mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-27 08:29:59 +00:00
Compare commits
3 Commits
v0.9.3
...
test/skope
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b4b105ad35 | ||
|
|
e1d0bb3749 | ||
|
|
867d6ab600 |
@@ -4,6 +4,9 @@ inputs:
|
||||
arch:
|
||||
description: Architecture to build
|
||||
required: true
|
||||
rust-toolchain:
|
||||
description: Rust toolchain to use
|
||||
required: true
|
||||
cargo-profile:
|
||||
description: Cargo profile to build
|
||||
required: true
|
||||
@@ -40,9 +43,10 @@ runs:
|
||||
brew install protobuf
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
target: ${{ inputs.arch }}
|
||||
toolchain: ${{ inputs.rust-toolchain }}
|
||||
targets: ${{ inputs.arch }}
|
||||
|
||||
- name: Start etcd # For integration tests.
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
|
||||
@@ -4,6 +4,9 @@ inputs:
|
||||
arch:
|
||||
description: Architecture to build
|
||||
required: true
|
||||
rust-toolchain:
|
||||
description: Rust toolchain to use
|
||||
required: true
|
||||
cargo-profile:
|
||||
description: Cargo profile to build
|
||||
required: true
|
||||
@@ -25,9 +28,10 @@ runs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
target: ${{ inputs.arch }}
|
||||
toolchain: ${{ inputs.rust-toolchain }}
|
||||
targets: ${{ inputs.arch }}
|
||||
components: llvm-tools-preview
|
||||
|
||||
- name: Rust Cache
|
||||
|
||||
@@ -18,8 +18,6 @@ runs:
|
||||
--set replicaCount=${{ inputs.etcd-replicas }} \
|
||||
--set resources.requests.cpu=50m \
|
||||
--set resources.requests.memory=128Mi \
|
||||
--set resources.limits.cpu=1000m \
|
||||
--set resources.limits.memory=2Gi \
|
||||
--set auth.rbac.create=false \
|
||||
--set auth.rbac.token.enabled=false \
|
||||
--set persistence.size=2Gi \
|
||||
|
||||
2
.github/actions/start-runner/action.yml
vendored
2
.github/actions/start-runner/action.yml
vendored
@@ -38,7 +38,7 @@ runs:
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
if: startsWith(inputs.runner, 'ec2')
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
|
||||
2
.github/actions/stop-runner/action.yml
vendored
2
.github/actions/stop-runner/action.yml
vendored
@@ -25,7 +25,7 @@ runs:
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
|
||||
7
.github/workflows/apidoc.yml
vendored
7
.github/workflows/apidoc.yml
vendored
@@ -12,6 +12,9 @@ on:
|
||||
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-06-06
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
runs-on: ubuntu-20.04
|
||||
@@ -20,7 +23,9 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- run: cargo doc --workspace --no-deps --document-private-items
|
||||
- run: |
|
||||
cat <<EOF > target/doc/index.html
|
||||
|
||||
74
.github/workflows/develop.yml
vendored
74
.github/workflows/develop.yml
vendored
@@ -29,6 +29,9 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-06-06
|
||||
|
||||
jobs:
|
||||
check-typos-and-docs:
|
||||
name: Check typos and docs
|
||||
@@ -61,7 +64,9 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -77,7 +82,9 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: stable
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -100,7 +107,9 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
@@ -152,7 +161,9 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -170,7 +181,7 @@ jobs:
|
||||
name: bins
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: |
|
||||
run: |
|
||||
tar -xvf ./bins.tar.gz
|
||||
rm ./bins.tar.gz
|
||||
- name: Run GreptimeDB
|
||||
@@ -210,7 +221,9 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -261,7 +274,9 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
@@ -272,7 +287,7 @@ jobs:
|
||||
- name: Build greptime bianry
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc --profile ci -- --bin greptime
|
||||
run: cargo gc --profile ci -- --bin greptime
|
||||
- name: Pack greptime binary
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -286,7 +301,7 @@ jobs:
|
||||
artifacts-dir: bin
|
||||
version: current
|
||||
|
||||
distributed-fuzztest:
|
||||
distributed-fuzztest:
|
||||
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
@@ -329,7 +344,9 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -399,12 +416,12 @@ jobs:
|
||||
- name: Describe Nodes
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
kind export logs /tmp/kind
|
||||
- name: Upload logs
|
||||
if: failure()
|
||||
@@ -416,13 +433,13 @@ jobs:
|
||||
- name: Delete cluster
|
||||
if: success()
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
kind delete cluster
|
||||
docker stop $(docker ps -a -q)
|
||||
docker rm $(docker ps -a -q)
|
||||
docker system prune -f
|
||||
|
||||
distributed-fuzztest-with-chaos:
|
||||
distributed-fuzztest-with-chaos:
|
||||
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
@@ -430,7 +447,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
target: ["fuzz_migrate_mito_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
||||
mode:
|
||||
mode:
|
||||
- name: "Remote WAL"
|
||||
minio: true
|
||||
kafka: true
|
||||
@@ -467,7 +484,9 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -538,12 +557,12 @@ jobs:
|
||||
- name: Describe Nodes
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
kind export logs /tmp/kind
|
||||
- name: Upload logs
|
||||
if: failure()
|
||||
@@ -555,7 +574,7 @@ jobs:
|
||||
- name: Delete cluster
|
||||
if: success()
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
kind delete cluster
|
||||
docker stop $(docker ps -a -q)
|
||||
docker rm $(docker ps -a -q)
|
||||
@@ -608,16 +627,17 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: rustfmt
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "check-rust-fmt"
|
||||
- name: Check format
|
||||
run: make fmt-check
|
||||
- name: Run cargo fmt
|
||||
run: cargo fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
@@ -628,8 +648,9 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: clippy
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
@@ -653,8 +674,9 @@ jobs:
|
||||
with:
|
||||
version: "14.0"
|
||||
- name: Install toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
|
||||
10
.github/workflows/nightly-ci.yml
vendored
10
.github/workflows/nightly-ci.yml
vendored
@@ -9,6 +9,9 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-06-06
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
@@ -49,7 +52,9 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run sqlness
|
||||
@@ -80,8 +85,9 @@ jobs:
|
||||
with:
|
||||
version: "14.0"
|
||||
- name: Install Rust toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
|
||||
@@ -1,12 +1,6 @@
|
||||
name: Release dev-builder images
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- rust-toolchain.toml
|
||||
- 'docker/dev-builder/**'
|
||||
workflow_dispatch: # Allows you to run this workflow manually.
|
||||
inputs:
|
||||
release_dev_builder_ubuntu_image:
|
||||
|
||||
12
.github/workflows/release.yml
vendored
12
.github/workflows/release.yml
vendored
@@ -33,7 +33,6 @@ on:
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.4xlarge-arm64
|
||||
options:
|
||||
- ubuntu-2204-32-cores-arm
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
- ec2-c6g.4xlarge-arm64 # 16C32G
|
||||
@@ -83,6 +82,7 @@ on:
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
# The arguments of building greptime.
|
||||
RUST_TOOLCHAIN: nightly-2024-06-06
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
@@ -123,11 +123,6 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check Rust toolchain version
|
||||
shell: bash
|
||||
run: |
|
||||
./scripts/check-builder-rust-version.sh
|
||||
|
||||
# The create-version will create a global variable named 'version' in the global workflows.
|
||||
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
|
||||
@@ -249,11 +244,11 @@ jobs:
|
||||
- uses: ./.github/actions/build-macos-artifacts
|
||||
with:
|
||||
arch: ${{ matrix.arch }}
|
||||
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
features: ${{ matrix.features }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
# We decide to disable the integration tests on macOS because it's unnecessary and time-consuming.
|
||||
disable-run-tests: true
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
- name: Set build macos result
|
||||
@@ -292,6 +287,7 @@ jobs:
|
||||
- uses: ./.github/actions/build-windows-artifacts
|
||||
with:
|
||||
arch: ${{ matrix.arch }}
|
||||
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
features: ${{ matrix.features }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
215
Cargo.lock
generated
215
Cargo.lock
generated
@@ -214,7 +214,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
@@ -762,7 +762,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -897,15 +897,6 @@ dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "backon"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2caf634d05fe0642d0fb1ab43497fa627088ecd93f84b2d0f2a5d7b91f7730db"
|
||||
dependencies = [
|
||||
"fastrand",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.3.73"
|
||||
@@ -1286,7 +1277,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"catalog",
|
||||
"common-error",
|
||||
@@ -1294,7 +1285,7 @@ dependencies = [
|
||||
"common-meta",
|
||||
"moka",
|
||||
"snafu 0.8.4",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1321,7 +1312,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -1647,7 +1638,7 @@ checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70"
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1677,7 +1668,7 @@ dependencies = [
|
||||
"serde_json",
|
||||
"snafu 0.8.4",
|
||||
"substrait 0.37.3",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.2",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tonic 0.11.0",
|
||||
@@ -1707,7 +1698,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1763,7 +1754,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.4",
|
||||
"store-api",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.2",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tempfile",
|
||||
@@ -1809,7 +1800,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"bitvec",
|
||||
@@ -1825,7 +1816,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -1836,7 +1827,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -1859,7 +1850,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
@@ -1896,7 +1887,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-decimal"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"bigdecimal",
|
||||
"common-error",
|
||||
@@ -1909,7 +1900,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"snafu 0.8.4",
|
||||
"strum 0.25.0",
|
||||
@@ -1918,7 +1909,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-frontend"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1933,7 +1924,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1950,8 +1941,6 @@ dependencies = [
|
||||
"common-version",
|
||||
"datafusion",
|
||||
"datatypes",
|
||||
"geohash",
|
||||
"h3o",
|
||||
"num",
|
||||
"num-traits",
|
||||
"once_cell",
|
||||
@@ -1970,7 +1959,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-runtime",
|
||||
@@ -1987,7 +1976,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -2013,7 +2002,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
@@ -2031,7 +2020,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-query",
|
||||
@@ -2045,7 +2034,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2058,7 +2047,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"api",
|
||||
@@ -2114,15 +2103,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-plugins"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
"backon 1.0.2",
|
||||
"backon",
|
||||
"common-base",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2144,7 +2133,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -2152,7 +2141,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2178,7 +2167,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-error",
|
||||
@@ -2197,7 +2186,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -2219,7 +2208,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"backtrace",
|
||||
@@ -2246,7 +2235,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"client",
|
||||
"common-query",
|
||||
@@ -2258,7 +2247,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -2274,7 +2263,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
"const_format",
|
||||
@@ -2285,7 +2274,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-wal"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -3093,7 +3082,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -3142,7 +3131,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.4",
|
||||
"store-api",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.2",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.8.14",
|
||||
@@ -3151,7 +3140,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -3721,7 +3710,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -3815,15 +3804,9 @@ dependencies = [
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "float_eq"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28a80e3145d8ad11ba0995949bbcf48b9df2be62772b3d351ef017dff6ecb853"
|
||||
|
||||
[[package]]
|
||||
name = "flow"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -3880,7 +3863,7 @@ dependencies = [
|
||||
"snafu 0.8.4",
|
||||
"store-api",
|
||||
"strum 0.25.0",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.2",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.11.0",
|
||||
@@ -3927,7 +3910,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -4219,27 +4202,6 @@ dependencies = [
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "geo-types"
|
||||
version = "0.7.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ff16065e5720f376fbced200a5ae0f47ace85fd70b7e54269790281353b6d61"
|
||||
dependencies = [
|
||||
"approx",
|
||||
"num-traits",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "geohash"
|
||||
version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fb94b1a65401d6cbf22958a9040aa364812c26674f841bee538b12c135db1e6"
|
||||
dependencies = [
|
||||
"geo-types",
|
||||
"libm",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gethostname"
|
||||
version = "0.2.3"
|
||||
@@ -4330,25 +4292,6 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "h3o"
|
||||
version = "0.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0de3592e1f699692aa0525c42ff7879ec3ee7e36329af20967bc910a1cdc39c7"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"either",
|
||||
"float_eq",
|
||||
"h3o-bit",
|
||||
"libm",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "h3o-bit"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6fb45e8060378c0353781abf67e1917b545a6b710d0342d85b70c125af7ef320"
|
||||
|
||||
[[package]]
|
||||
name = "half"
|
||||
version = "1.8.3"
|
||||
@@ -5078,7 +5021,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"asynchronous-codec",
|
||||
@@ -5858,7 +5801,7 @@ checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -5874,7 +5817,6 @@ dependencies = [
|
||||
"common-time",
|
||||
"common-wal",
|
||||
"delta-encoding",
|
||||
"derive_builder 0.12.0",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"itertools 0.10.5",
|
||||
@@ -6170,7 +6112,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6196,7 +6138,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6274,7 +6216,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "metric-engine"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -6365,7 +6307,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -7012,7 +6954,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
@@ -7069,13 +7011,13 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
|
||||
|
||||
[[package]]
|
||||
name = "opendal"
|
||||
version = "0.49.1"
|
||||
version = "0.49.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ba615070686c8781ce97376fdafca29d7c42f47b31d2230d7c8c1642ec823950"
|
||||
checksum = "39d516adf7db912c38af382c3e92c27cd62fbbc240e630920555d784c2ab1494"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"backon 0.4.4",
|
||||
"backon",
|
||||
"base64 0.22.1",
|
||||
"bytes",
|
||||
"chrono",
|
||||
@@ -7259,7 +7201,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -7304,7 +7246,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
|
||||
"store-api",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.2",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -7554,7 +7496,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -7843,7 +7785,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pipeline"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8004,7 +7946,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"common-base",
|
||||
@@ -8273,7 +8215,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"async-trait",
|
||||
@@ -8508,7 +8450,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "puffin"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-compression 0.4.11",
|
||||
"async-trait",
|
||||
@@ -8630,7 +8572,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8693,7 +8635,7 @@ dependencies = [
|
||||
"stats-cli",
|
||||
"store-api",
|
||||
"streaming-stats",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.2",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -10055,7 +9997,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "script"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -10349,7 +10291,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"aide",
|
||||
"api",
|
||||
@@ -10455,7 +10397,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -10756,7 +10698,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"chrono",
|
||||
@@ -10816,7 +10758,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.7",
|
||||
@@ -11033,7 +10975,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -11202,7 +11144,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -11403,7 +11345,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -11668,7 +11610,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
||||
|
||||
[[package]]
|
||||
name = "tests-fuzz"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"async-trait",
|
||||
@@ -11710,7 +11652,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -11770,7 +11712,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.2",
|
||||
"table",
|
||||
"tempfile",
|
||||
"time",
|
||||
@@ -12489,16 +12431,6 @@ dependencies = [
|
||||
"web-time 0.2.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-serde"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"tracing-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-subscriber"
|
||||
version = "0.3.18"
|
||||
@@ -12509,15 +12441,12 @@ dependencies = [
|
||||
"nu-ansi-term",
|
||||
"once_cell",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sharded-slab",
|
||||
"smallvec",
|
||||
"thread_local",
|
||||
"tracing",
|
||||
"tracing-core",
|
||||
"tracing-log 0.2.0",
|
||||
"tracing-serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -64,7 +64,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
|
||||
12
Makefile
12
Makefile
@@ -8,7 +8,6 @@ CARGO_BUILD_OPTS := --locked
|
||||
IMAGE_REGISTRY ?= docker.io
|
||||
IMAGE_NAMESPACE ?= greptime
|
||||
IMAGE_TAG ?= latest
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2024-06-06-b4b105ad-20240827021230
|
||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||
BASE_IMAGE ?= ubuntu
|
||||
@@ -78,7 +77,7 @@ build: ## Build debug version greptime.
|
||||
build-by-dev-builder: ## Build greptime by dev-builder.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||
make build \
|
||||
CARGO_EXTENSION="${CARGO_EXTENSION}" \
|
||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||
@@ -92,7 +91,7 @@ build-by-dev-builder: ## Build greptime by dev-builder.
|
||||
build-android-bin: ## Build greptime binary for android.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||
make build \
|
||||
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
|
||||
CARGO_PROFILE=release \
|
||||
@@ -106,7 +105,7 @@ build-android-bin: ## Build greptime binary for android.
|
||||
strip-android-bin: build-android-bin ## Strip greptime binary for android.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip --strip-debug /greptimedb/target/aarch64-linux-android/release/greptime'
|
||||
|
||||
.PHONY: clean
|
||||
@@ -146,7 +145,7 @@ dev-builder: multi-platform-buildx ## Build dev-builder image.
|
||||
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
|
||||
--build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
|
||||
-f docker/dev-builder/${BASE_IMAGE}/Dockerfile \
|
||||
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
||||
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
||||
|
||||
.PHONY: multi-platform-buildx
|
||||
multi-platform-buildx: ## Create buildx multi-platform builder.
|
||||
@@ -191,7 +190,6 @@ fix-clippy: ## Fix clippy violations.
|
||||
.PHONY: fmt-check
|
||||
fmt-check: ## Check code format.
|
||||
cargo fmt --all -- --check
|
||||
python3 scripts/check-snafu.py
|
||||
|
||||
.PHONY: start-etcd
|
||||
start-etcd: ## Start single node etcd for testing purpose.
|
||||
@@ -205,7 +203,7 @@ stop-etcd: ## Stop single node etcd for testing purpose.
|
||||
run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
||||
|
||||
.PHONY: start-cluster
|
||||
|
||||
@@ -15,8 +15,6 @@
|
||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
@@ -129,7 +127,6 @@
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
@@ -153,21 +150,20 @@
|
||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
@@ -239,21 +235,20 @@
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
@@ -304,21 +299,20 @@
|
||||
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
|
||||
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
@@ -339,10 +333,6 @@
|
||||
| `rpc_runtime_size` | Integer | `None` | Deprecated, use `grpc.runtime_size` instead. |
|
||||
| `rpc_max_recv_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
|
||||
| `rpc_max_send_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
@@ -432,7 +422,6 @@
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
@@ -454,21 +443,20 @@
|
||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
@@ -500,12 +488,11 @@
|
||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
|
||||
@@ -39,18 +39,6 @@ rpc_max_recv_message_size = "512MB"
|
||||
## +toml2docs:none-default
|
||||
rpc_max_send_message_size = "512MB"
|
||||
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
body_limit = "64MB"
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
@@ -336,7 +324,7 @@ credential_path = "test"
|
||||
## The credential of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## +toml2docs:none-default
|
||||
credential = "base64-credential"
|
||||
credential= "base64-credential"
|
||||
|
||||
## The container of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
@@ -360,23 +348,9 @@ region = "us-west-2"
|
||||
|
||||
# Custom storage options
|
||||
# [[storage.providers]]
|
||||
# name = "S3"
|
||||
# type = "S3"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# access_key_id = "test"
|
||||
# secret_access_key = "123456"
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# [[storage.providers]]
|
||||
# name = "Gcs"
|
||||
# type = "Gcs"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# scope = "test"
|
||||
# credential_path = "123456"
|
||||
# credential = "base64-credential"
|
||||
# endpoint = "https://storage.googleapis.com"
|
||||
|
||||
## The region engine options. You can configure multiple region engines.
|
||||
[[region_engine]]
|
||||
@@ -455,10 +429,6 @@ parallel_scan_channel_size = 32
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
## Minimum time interval between two compactions.
|
||||
## To align with the old behavior, the default value is 0 (no restrictions).
|
||||
min_compaction_interval = "0m"
|
||||
|
||||
## The options for index in Mito engine.
|
||||
[region_engine.mito.index]
|
||||
|
||||
@@ -549,7 +519,7 @@ fork_dictionary_bytes = "1GiB"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
## The directory to store the log files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
@@ -565,9 +535,6 @@ otlp_endpoint = "http://localhost:4317"
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -585,13 +552,12 @@ enable = false
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## +toml2docs:none-default
|
||||
db = "greptime_metrics"
|
||||
db = "information_schema"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
|
||||
@@ -59,7 +59,7 @@ retry_interval = "3s"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
## The directory to store the log files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
@@ -75,9 +75,6 @@ otlp_endpoint = "http://localhost:4317"
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
|
||||
@@ -166,7 +166,7 @@ tcp_nodelay = true
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
## The directory to store the log files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
@@ -182,9 +182,6 @@ otlp_endpoint = "http://localhost:4317"
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -202,13 +199,12 @@ enable = false
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## +toml2docs:none-default
|
||||
db = "greptime_metrics"
|
||||
db = "information_schema"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
|
||||
@@ -153,7 +153,7 @@ backoff_deadline = "5mins"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
## The directory to store the log files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
@@ -169,9 +169,6 @@ otlp_endpoint = "http://localhost:4317"
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -189,13 +186,12 @@ enable = false
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## +toml2docs:none-default
|
||||
db = "greptime_metrics"
|
||||
db = "information_schema"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
|
||||
@@ -8,13 +8,6 @@ enable_telemetry = true
|
||||
## +toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
|
||||
## Initialize all regions in the background during the startup.
|
||||
## By default, it provides services after all regions have been initialized.
|
||||
init_regions_in_background = false
|
||||
|
||||
## Parallelism of initializing regions.
|
||||
init_regions_parallelism = 16
|
||||
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
@@ -398,23 +391,9 @@ region = "us-west-2"
|
||||
|
||||
# Custom storage options
|
||||
# [[storage.providers]]
|
||||
# name = "S3"
|
||||
# type = "S3"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# access_key_id = "test"
|
||||
# secret_access_key = "123456"
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# [[storage.providers]]
|
||||
# name = "Gcs"
|
||||
# type = "Gcs"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# scope = "test"
|
||||
# credential_path = "123456"
|
||||
# credential = "base64-credential"
|
||||
# endpoint = "https://storage.googleapis.com"
|
||||
|
||||
## The region engine options. You can configure multiple region engines.
|
||||
[[region_engine]]
|
||||
@@ -493,10 +472,6 @@ parallel_scan_channel_size = 32
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
## Minimum time interval between two compactions.
|
||||
## To align with the old behavior, the default value is 0 (no restrictions).
|
||||
min_compaction_interval = "0m"
|
||||
|
||||
## The options for index in Mito engine.
|
||||
[region_engine.mito.index]
|
||||
|
||||
@@ -593,7 +568,7 @@ fork_dictionary_bytes = "1GiB"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
## The directory to store the log files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
@@ -609,9 +584,6 @@ otlp_endpoint = "http://localhost:4317"
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -629,13 +601,12 @@ enable = false
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## +toml2docs:none-default
|
||||
db = "greptime_metrics"
|
||||
db = "information_schema"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
|
||||
@@ -157,6 +157,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -306,6 +326,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -457,6 +497,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -608,6 +668,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -772,6 +852,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -923,6 +1023,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1074,6 +1194,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1225,6 +1365,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1376,6 +1536,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1527,6 +1707,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1678,6 +1878,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1842,6 +2062,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1993,6 +2233,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -2157,6 +2417,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -2289,6 +2569,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -2451,6 +2751,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -2771,7 +3091,28 @@
|
||||
},
|
||||
"unit": "s"
|
||||
},
|
||||
"overrides": []
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
@@ -2901,6 +3242,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -3068,6 +3429,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -3217,6 +3598,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -3339,7 +3740,28 @@
|
||||
},
|
||||
"unit": "s"
|
||||
},
|
||||
"overrides": []
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
@@ -3679,6 +4101,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -3828,6 +4270,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -3977,6 +4439,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -4126,6 +4608,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -4275,6 +4777,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -4424,6 +4946,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -4573,6 +5115,26 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsZero",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": true,
|
||||
"tooltip": true,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -4679,4 +5241,4 @@
|
||||
"uid": "ea35efe5-918e-44fa-9743-e9aa1a340a3f",
|
||||
"version": 11,
|
||||
"weekStart": ""
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
RUST_TOOLCHAIN_VERSION_FILE="rust-toolchain.toml"
|
||||
DEV_BUILDER_UBUNTU_REGISTRY="docker.io"
|
||||
DEV_BUILDER_UBUNTU_NAMESPACE="greptime"
|
||||
DEV_BUILDER_UBUNTU_NAME="dev-builder-ubuntu"
|
||||
|
||||
function check_rust_toolchain_version() {
|
||||
DEV_BUILDER_IMAGE_TAG=$(grep "DEV_BUILDER_IMAGE_TAG ?= " Makefile | cut -d= -f2 | sed 's/^[ \t]*//')
|
||||
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
|
||||
echo "Error: No DEV_BUILDER_IMAGE_TAG found in Makefile"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DEV_BUILDER_UBUNTU_IMAGE="$DEV_BUILDER_UBUNTU_REGISTRY/$DEV_BUILDER_UBUNTU_NAMESPACE/$DEV_BUILDER_UBUNTU_NAME:$DEV_BUILDER_IMAGE_TAG"
|
||||
|
||||
CURRENT_VERSION=$(grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}' "$RUST_TOOLCHAIN_VERSION_FILE")
|
||||
if [ -z "$CURRENT_VERSION" ]; then
|
||||
echo "Error: No rust toolchain version found in $RUST_TOOLCHAIN_VERSION_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
RUST_TOOLCHAIN_VERSION_IN_BUILDER=$(docker run "$DEV_BUILDER_UBUNTU_IMAGE" rustc --version | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
|
||||
if [ -z "$RUST_TOOLCHAIN_VERSION_IN_BUILDER" ]; then
|
||||
echo "Error: No rustc version found in $DEV_BUILDER_UBUNTU_IMAGE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Compare the version and the difference should be less than 1 day.
|
||||
current_rust_toolchain_seconds=$(date -d "$CURRENT_VERSION" +%s)
|
||||
rust_toolchain_in_dev_builder_ubuntu_seconds=$(date -d "$RUST_TOOLCHAIN_VERSION_IN_BUILDER" +%s)
|
||||
date_diff=$(( (current_rust_toolchain_seconds - rust_toolchain_in_dev_builder_ubuntu_seconds) / 86400 ))
|
||||
|
||||
if [ $date_diff -gt 1 ]; then
|
||||
echo "Error: The rust toolchain '$RUST_TOOLCHAIN_VERSION_IN_BUILDER' in builder '$DEV_BUILDER_UBUNTU_IMAGE' maybe outdated, please update it to '$CURRENT_VERSION'"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
check_rust_toolchain_version
|
||||
@@ -1,69 +0,0 @@
|
||||
# Copyright 2023 Greptime Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
|
||||
def find_rust_files(directory):
|
||||
error_files = []
|
||||
other_rust_files = []
|
||||
for root, _, files in os.walk(directory):
|
||||
for file in files:
|
||||
if file == "error.rs":
|
||||
error_files.append(os.path.join(root, file))
|
||||
elif file.endswith(".rs"):
|
||||
other_rust_files.append(os.path.join(root, file))
|
||||
return error_files, other_rust_files
|
||||
|
||||
|
||||
def extract_branch_names(file_content):
|
||||
pattern = re.compile(r"#\[snafu\(display\([^\)]*\)\)\]\s*(\w+)\s*\{")
|
||||
return pattern.findall(file_content)
|
||||
|
||||
|
||||
def check_snafu_in_files(branch_name, rust_files):
|
||||
branch_name_snafu = f"{branch_name}Snafu"
|
||||
for rust_file in rust_files:
|
||||
with open(rust_file, "r") as file:
|
||||
content = file.read()
|
||||
if branch_name_snafu in content:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
error_files, other_rust_files = find_rust_files(".")
|
||||
branch_names = []
|
||||
|
||||
for error_file in error_files:
|
||||
with open(error_file, "r") as file:
|
||||
content = file.read()
|
||||
branch_names.extend(extract_branch_names(content))
|
||||
|
||||
unused_snafu = [
|
||||
branch_name
|
||||
for branch_name in branch_names
|
||||
if not check_snafu_in_files(branch_name, other_rust_files)
|
||||
]
|
||||
|
||||
for name in unused_snafu:
|
||||
print(name)
|
||||
|
||||
if unused_snafu:
|
||||
raise SystemExit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -13,11 +13,9 @@
|
||||
// limitations under the License.
|
||||
|
||||
use common_base::secrets::ExposeSecret;
|
||||
use common_error::ext::BoxedError;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{
|
||||
AccessDeniedSnafu, AuthBackendSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
|
||||
AccessDeniedSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
|
||||
UserPasswordMismatchSnafu,
|
||||
};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
@@ -51,19 +49,6 @@ impl MockUserProvider {
|
||||
info.schema.clone_into(&mut self.schema);
|
||||
info.username.clone_into(&mut self.username);
|
||||
}
|
||||
|
||||
// this is a deliberate function to ref AuthBackendSnafu
|
||||
// so that it won't get deleted in the future
|
||||
pub fn ref_auth_backend_snafu(&self) -> Result<()> {
|
||||
let none_option = None;
|
||||
|
||||
none_option
|
||||
.context(UserNotFoundSnafu {
|
||||
username: "no_user".to_string(),
|
||||
})
|
||||
.map_err(BoxedError::new)
|
||||
.context(AuthBackendSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
|
||||
@@ -18,7 +18,6 @@ use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_request::Request;
|
||||
use auth::error::Error::InternalState;
|
||||
use auth::error::InternalStateSnafu;
|
||||
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq, PermissionResp, UserInfoRef};
|
||||
use sql::statements::show::{ShowDatabases, ShowKind};
|
||||
use sql::statements::statement::Statement;
|
||||
@@ -34,10 +33,9 @@ impl PermissionChecker for DummyPermissionChecker {
|
||||
match req {
|
||||
PermissionReq::GrpcRequest(_) => Ok(PermissionResp::Allow),
|
||||
PermissionReq::SqlStatement(_) => Ok(PermissionResp::Reject),
|
||||
_ => InternalStateSnafu {
|
||||
_ => Err(InternalState {
|
||||
msg: "testing".to_string(),
|
||||
}
|
||||
.fail(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,6 +97,13 @@ pub enum Error {
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("System catalog is not valid: {}", msg))]
|
||||
SystemCatalog {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
|
||||
CatalogNotFound {
|
||||
catalog_name: String,
|
||||
@@ -179,6 +186,13 @@ pub enum Error {
|
||||
source: common_query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to perform metasrv operation"))]
|
||||
Metasrv {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid table info in catalog"))]
|
||||
InvalidTableInfoInCatalog {
|
||||
#[snafu(implicit)]
|
||||
@@ -274,6 +288,8 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::FlowInfoNotFound { .. } => StatusCode::FlowNotFound,
|
||||
|
||||
Error::SystemCatalog { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
||||
|
||||
Error::CreateRecordBatch { source, .. } => source.status_code(),
|
||||
@@ -287,6 +303,7 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::CreateTable { source, .. } => source.status_code(),
|
||||
|
||||
Error::Metasrv { source, .. } => source.status_code(),
|
||||
Error::DecodePlan { source, .. } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
||||
|
||||
@@ -321,6 +338,27 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
pub fn test_error_status_code() {
|
||||
assert_eq!(
|
||||
StatusCode::TableAlreadyExists,
|
||||
Error::TableExists {
|
||||
table: "some_table".to_string(),
|
||||
location: Location::generate(),
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::StorageUnavailable,
|
||||
Error::SystemCatalog {
|
||||
msg: String::default(),
|
||||
location: Location::generate(),
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_errors_to_datafusion_error() {
|
||||
let e: DataFusionError = Error::TableExists {
|
||||
|
||||
@@ -20,8 +20,8 @@ use std::time::Duration;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::KvCacheInvalidator;
|
||||
use common_meta::error::Error::CacheNotGet;
|
||||
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
|
||||
use common_meta::error::Error::{CacheNotGet, GetKvCache};
|
||||
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, Result};
|
||||
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
@@ -282,11 +282,8 @@ impl KvBackend for CachedMetaKvBackend {
|
||||
_ => Err(e),
|
||||
},
|
||||
}
|
||||
.map_err(|e| {
|
||||
GetKvCacheSnafu {
|
||||
err_msg: e.to_string(),
|
||||
}
|
||||
.build()
|
||||
.map_err(|e| GetKvCache {
|
||||
err_msg: e.to_string(),
|
||||
});
|
||||
|
||||
// "cache.invalidate_key" and "cache.try_get_with_by_ref" are not mutually exclusive. So we need
|
||||
|
||||
@@ -313,7 +313,7 @@ struct SystemCatalog {
|
||||
catalog_cache: Cache<String, Arc<InformationSchemaProvider>>,
|
||||
pg_catalog_cache: Cache<String, Arc<PGCatalogProvider>>,
|
||||
|
||||
// system_schema_provider for default catalog
|
||||
// system_schema_provier for default catalog
|
||||
information_schema_provider: Arc<InformationSchemaProvider>,
|
||||
pg_catalog_provider: Arc<PGCatalogProvider>,
|
||||
backend: KvBackendRef,
|
||||
|
||||
@@ -37,8 +37,7 @@ use tonic::metadata::AsciiMetadataKey;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, Error, FlightGetSnafu, IllegalFlightMessagesSnafu, InvalidAsciiSnafu,
|
||||
ServerSnafu,
|
||||
ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, InvalidAsciiSnafu, ServerSnafu,
|
||||
};
|
||||
use crate::{from_grpc_response, Client, Result};
|
||||
|
||||
@@ -226,18 +225,16 @@ impl Database {
|
||||
|
||||
let mut client = self.client.make_flight_client()?;
|
||||
|
||||
let response = client.mut_inner().do_get(request).await.or_else(|e| {
|
||||
let response = client.mut_inner().do_get(request).await.map_err(|e| {
|
||||
let tonic_code = e.code();
|
||||
let e: Error = e.into();
|
||||
let code = e.status_code();
|
||||
let msg = e.to_string();
|
||||
let error =
|
||||
Err(BoxedError::new(ServerSnafu { code, msg }.build())).with_context(|_| {
|
||||
FlightGetSnafu {
|
||||
addr: client.addr().to_string(),
|
||||
tonic_code,
|
||||
}
|
||||
});
|
||||
let error = Error::FlightGet {
|
||||
tonic_code,
|
||||
addr: client.addr().to_string(),
|
||||
source: BoxedError::new(ServerSnafu { code, msg }.build()),
|
||||
};
|
||||
error!(
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {:?}",
|
||||
client.addr(),
|
||||
|
||||
@@ -39,6 +39,13 @@ pub enum Error {
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failure occurs during handling request"))]
|
||||
HandleRequest {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert FlightData"))]
|
||||
ConvertFlightData {
|
||||
#[snafu(implicit)]
|
||||
@@ -109,6 +116,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to send request with streaming: {}", err_msg))]
|
||||
ClientStreaming {
|
||||
err_msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse ascii string: {}", value))]
|
||||
InvalidAscii {
|
||||
value: String,
|
||||
@@ -124,10 +138,12 @@ impl ErrorExt for Error {
|
||||
match self {
|
||||
Error::IllegalFlightMessages { .. }
|
||||
| Error::MissingField { .. }
|
||||
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
|
||||
| Error::IllegalDatabaseResponse { .. }
|
||||
| Error::ClientStreaming { .. } => StatusCode::Internal,
|
||||
|
||||
Error::Server { code, .. } => *code,
|
||||
Error::FlightGet { source, .. }
|
||||
| Error::HandleRequest { source, .. }
|
||||
| Error::RegionServer { source, .. }
|
||||
| Error::FlowServer { source, .. } => source.status_code(),
|
||||
Error::CreateChannel { source, .. }
|
||||
|
||||
@@ -16,9 +16,9 @@ use api::v1::flow::{FlowRequest, FlowResponse};
|
||||
use api::v1::region::InsertRequests;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::node_manager::Flownode;
|
||||
use snafu::ResultExt;
|
||||
use snafu::{location, ResultExt};
|
||||
|
||||
use crate::error::{FlowServerSnafu, Result};
|
||||
use crate::error::Result;
|
||||
use crate::Client;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -57,10 +57,15 @@ impl FlowRequester {
|
||||
let response = client
|
||||
.handle_create_remove(request)
|
||||
.await
|
||||
.or_else(|e| {
|
||||
.map_err(|e| {
|
||||
let code = e.code();
|
||||
let err: crate::error::Error = e.into();
|
||||
Err(BoxedError::new(err)).context(FlowServerSnafu { addr, code })
|
||||
crate::error::Error::FlowServer {
|
||||
addr,
|
||||
code,
|
||||
source: BoxedError::new(err),
|
||||
location: location!(),
|
||||
}
|
||||
})?
|
||||
.into_inner();
|
||||
Ok(response)
|
||||
@@ -83,10 +88,15 @@ impl FlowRequester {
|
||||
let response = client
|
||||
.handle_mirror_request(requests)
|
||||
.await
|
||||
.or_else(|e| {
|
||||
.map_err(|e| {
|
||||
let code = e.code();
|
||||
let err: crate::error::Error = e.into();
|
||||
Err(BoxedError::new(err)).context(FlowServerSnafu { addr, code })
|
||||
crate::error::Error::FlowServer {
|
||||
addr,
|
||||
code,
|
||||
source: BoxedError::new(err),
|
||||
location: location!(),
|
||||
}
|
||||
})?
|
||||
.into_inner();
|
||||
Ok(response)
|
||||
|
||||
@@ -38,8 +38,8 @@ use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
use tokio_stream::StreamExt;
|
||||
|
||||
use crate::error::{
|
||||
self, ConvertFlightDataSnafu, FlightGetSnafu, IllegalDatabaseResponseSnafu,
|
||||
IllegalFlightMessagesSnafu, MissingFieldSnafu, Result, ServerSnafu,
|
||||
self, ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
|
||||
MissingFieldSnafu, Result, ServerSnafu,
|
||||
};
|
||||
use crate::{metrics, Client, Error};
|
||||
|
||||
@@ -103,14 +103,11 @@ impl RegionRequester {
|
||||
let e: error::Error = e.into();
|
||||
let code = e.status_code();
|
||||
let msg = e.to_string();
|
||||
let error = ServerSnafu { code, msg }
|
||||
.fail::<()>()
|
||||
.map_err(BoxedError::new)
|
||||
.with_context(|_| FlightGetSnafu {
|
||||
tonic_code,
|
||||
addr: flight_client.addr().to_string(),
|
||||
})
|
||||
.unwrap_err();
|
||||
let error = Error::FlightGet {
|
||||
tonic_code,
|
||||
addr: flight_client.addr().to_string(),
|
||||
source: BoxedError::new(ServerSnafu { code, msg }.build()),
|
||||
};
|
||||
error!(
|
||||
e; "Failed to do Flight get, addr: {}, code: {}",
|
||||
flight_client.addr(),
|
||||
|
||||
@@ -21,8 +21,6 @@ mod export;
|
||||
mod helper;
|
||||
|
||||
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
||||
mod database;
|
||||
mod import;
|
||||
#[allow(unused)]
|
||||
mod repl;
|
||||
|
||||
@@ -34,7 +32,6 @@ pub use repl::Repl;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use self::export::ExportCommand;
|
||||
use crate::cli::import::ImportCommand;
|
||||
use crate::error::Result;
|
||||
use crate::options::GlobalOptions;
|
||||
use crate::App;
|
||||
@@ -117,7 +114,6 @@ enum SubCommand {
|
||||
// Attach(AttachCommand),
|
||||
Bench(BenchTableMetadataCommand),
|
||||
Export(ExportCommand),
|
||||
Import(ImportCommand),
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
@@ -126,7 +122,6 @@ impl SubCommand {
|
||||
// SubCommand::Attach(cmd) => cmd.build().await,
|
||||
SubCommand::Bench(cmd) => cmd.build(guard).await,
|
||||
SubCommand::Export(cmd) => cmd.build(guard).await,
|
||||
SubCommand::Import(cmd) => cmd.build(guard).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,119 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use base64::engine::general_purpose;
|
||||
use base64::Engine;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use serde_json::Value;
|
||||
use servers::http::greptime_result_v1::GreptimedbV1Response;
|
||||
use servers::http::GreptimeQueryOutput;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{HttpQuerySqlSnafu, Result, SerdeJsonSnafu};
|
||||
|
||||
pub(crate) struct DatabaseClient {
|
||||
addr: String,
|
||||
catalog: String,
|
||||
auth_header: Option<String>,
|
||||
}
|
||||
|
||||
impl DatabaseClient {
|
||||
pub fn new(addr: String, catalog: String, auth_basic: Option<String>) -> Self {
|
||||
let auth_header = if let Some(basic) = auth_basic {
|
||||
let encoded = general_purpose::STANDARD.encode(basic);
|
||||
Some(format!("basic {}", encoded))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Self {
|
||||
addr,
|
||||
catalog,
|
||||
auth_header,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn sql_in_public(&self, sql: &str) -> Result<Option<Vec<Vec<Value>>>> {
|
||||
self.sql(sql, DEFAULT_SCHEMA_NAME).await
|
||||
}
|
||||
|
||||
/// Execute sql query.
|
||||
pub async fn sql(&self, sql: &str, schema: &str) -> Result<Option<Vec<Vec<Value>>>> {
|
||||
let url = format!("http://{}/v1/sql", self.addr);
|
||||
let params = [
|
||||
("db", format!("{}-{}", self.catalog, schema)),
|
||||
("sql", sql.to_string()),
|
||||
];
|
||||
let mut request = reqwest::Client::new()
|
||||
.post(&url)
|
||||
.form(¶ms)
|
||||
.header("Content-Type", "application/x-www-form-urlencoded");
|
||||
if let Some(ref auth) = self.auth_header {
|
||||
request = request.header("Authorization", auth);
|
||||
}
|
||||
|
||||
let response = request.send().await.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: format!("bad url: {}", url),
|
||||
})?;
|
||||
let response = response
|
||||
.error_for_status()
|
||||
.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: format!("query failed: {}", sql),
|
||||
})?;
|
||||
|
||||
let text = response.text().await.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: "cannot get response text".to_string(),
|
||||
})?;
|
||||
|
||||
let body = serde_json::from_str::<GreptimedbV1Response>(&text).context(SerdeJsonSnafu)?;
|
||||
Ok(body.output().first().and_then(|output| match output {
|
||||
GreptimeQueryOutput::Records(records) => Some(records.rows().clone()),
|
||||
GreptimeQueryOutput::AffectedRows(_) => None,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
/// Split at `-`.
|
||||
pub(crate) fn split_database(database: &str) -> Result<(String, Option<String>)> {
|
||||
let (catalog, schema) = match database.split_once('-') {
|
||||
Some((catalog, schema)) => (catalog, schema),
|
||||
None => (DEFAULT_CATALOG_NAME, database),
|
||||
};
|
||||
|
||||
if schema == "*" {
|
||||
Ok((catalog.to_string(), None))
|
||||
} else {
|
||||
Ok((catalog.to_string(), Some(schema.to_string())))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_split_database() {
|
||||
let result = split_database("catalog-schema").unwrap();
|
||||
assert_eq!(result, ("catalog".to_string(), Some("schema".to_string())));
|
||||
|
||||
let result = split_database("schema").unwrap();
|
||||
assert_eq!(result, ("greptime".to_string(), Some("schema".to_string())));
|
||||
|
||||
let result = split_database("catalog-*").unwrap();
|
||||
assert_eq!(result, ("catalog".to_string(), None));
|
||||
|
||||
let result = split_database("*").unwrap();
|
||||
assert_eq!(result, ("greptime".to_string(), None));
|
||||
}
|
||||
}
|
||||
@@ -13,23 +13,30 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::path::PathBuf;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use base64::engine::general_purpose;
|
||||
use base64::Engine;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use client::DEFAULT_SCHEMA_NAME;
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use common_telemetry::{debug, error, info};
|
||||
use serde_json::Value;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use servers::http::greptime_result_v1::GreptimedbV1Response;
|
||||
use servers::http::GreptimeQueryOutput;
|
||||
use snafu::ResultExt;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::{AsyncWriteExt, BufWriter};
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio::time::Instant;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::cli::database::DatabaseClient;
|
||||
use crate::cli::{database, Instance, Tool};
|
||||
use crate::error::{EmptyResultSnafu, Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
||||
use crate::cli::{Instance, Tool};
|
||||
use crate::error::{
|
||||
EmptyResultSnafu, Error, FileIoSnafu, HttpQuerySqlSnafu, Result, SerdeJsonSnafu,
|
||||
};
|
||||
|
||||
type TableReference = (String, String, String);
|
||||
|
||||
@@ -87,21 +94,26 @@ pub struct ExportCommand {
|
||||
|
||||
impl ExportCommand {
|
||||
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
||||
let (catalog, schema) = database::split_database(&self.database)?;
|
||||
let (catalog, schema) = split_database(&self.database)?;
|
||||
|
||||
let database_client =
|
||||
DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
|
||||
let auth_header = if let Some(basic) = &self.auth_basic {
|
||||
let encoded = general_purpose::STANDARD.encode(basic);
|
||||
Some(format!("basic {}", encoded))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(Instance::new(
|
||||
Box::new(Export {
|
||||
addr: self.addr.clone(),
|
||||
catalog,
|
||||
schema,
|
||||
database_client,
|
||||
output_dir: self.output_dir.clone(),
|
||||
parallelism: self.export_jobs,
|
||||
target: self.target.clone(),
|
||||
start_time: self.start_time.clone(),
|
||||
end_time: self.end_time.clone(),
|
||||
auth_header,
|
||||
}),
|
||||
guard,
|
||||
))
|
||||
@@ -109,59 +121,78 @@ impl ExportCommand {
|
||||
}
|
||||
|
||||
pub struct Export {
|
||||
addr: String,
|
||||
catalog: String,
|
||||
schema: Option<String>,
|
||||
database_client: DatabaseClient,
|
||||
output_dir: String,
|
||||
parallelism: usize,
|
||||
target: ExportTarget,
|
||||
start_time: Option<String>,
|
||||
end_time: Option<String>,
|
||||
auth_header: Option<String>,
|
||||
}
|
||||
|
||||
impl Export {
|
||||
fn catalog_path(&self) -> PathBuf {
|
||||
PathBuf::from(&self.output_dir).join(&self.catalog)
|
||||
}
|
||||
/// Execute one single sql query.
|
||||
async fn sql(&self, sql: &str) -> Result<Option<Vec<Vec<Value>>>> {
|
||||
let url = format!(
|
||||
"http://{}/v1/sql?db={}-{}&sql={}",
|
||||
self.addr,
|
||||
self.catalog,
|
||||
self.schema.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME),
|
||||
sql
|
||||
);
|
||||
|
||||
async fn get_db_names(&self) -> Result<Vec<String>> {
|
||||
let db_names = self.all_db_names().await?;
|
||||
let Some(schema) = &self.schema else {
|
||||
return Ok(db_names);
|
||||
};
|
||||
let mut request = reqwest::Client::new()
|
||||
.get(&url)
|
||||
.header("Content-Type", "application/x-www-form-urlencoded");
|
||||
if let Some(ref auth) = self.auth_header {
|
||||
request = request.header("Authorization", auth);
|
||||
}
|
||||
|
||||
// Check if the schema exists
|
||||
db_names
|
||||
.into_iter()
|
||||
.find(|db_name| db_name.to_lowercase() == schema.to_lowercase())
|
||||
.map(|name| vec![name])
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: &self.catalog,
|
||||
schema,
|
||||
})
|
||||
let response = request.send().await.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: format!("bad url: {}", url),
|
||||
})?;
|
||||
let response = response
|
||||
.error_for_status()
|
||||
.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: format!("query failed: {}", sql),
|
||||
})?;
|
||||
|
||||
let text = response.text().await.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: "cannot get response text".to_string(),
|
||||
})?;
|
||||
|
||||
let body = serde_json::from_str::<GreptimedbV1Response>(&text).context(SerdeJsonSnafu)?;
|
||||
Ok(body.output().first().and_then(|output| match output {
|
||||
GreptimeQueryOutput::Records(records) => Some(records.rows().clone()),
|
||||
GreptimeQueryOutput::AffectedRows(_) => None,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Iterate over all db names.
|
||||
async fn all_db_names(&self) -> Result<Vec<String>> {
|
||||
let records = self
|
||||
.database_client
|
||||
.sql_in_public("SHOW DATABASES")
|
||||
.await?
|
||||
.context(EmptyResultSnafu)?;
|
||||
let mut result = Vec::with_capacity(records.len());
|
||||
for value in records {
|
||||
let Value::String(schema) = &value[0] else {
|
||||
unreachable!()
|
||||
///
|
||||
/// Newbie: `db_name` is catalog + schema.
|
||||
async fn iter_db_names(&self) -> Result<Vec<(String, String)>> {
|
||||
if let Some(schema) = &self.schema {
|
||||
Ok(vec![(self.catalog.clone(), schema.clone())])
|
||||
} else {
|
||||
let result = self.sql("SHOW DATABASES").await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
};
|
||||
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
|
||||
continue;
|
||||
let mut result = Vec::with_capacity(records.len());
|
||||
for value in records {
|
||||
let Value::String(schema) = &value[0] else {
|
||||
unreachable!()
|
||||
};
|
||||
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
|
||||
continue;
|
||||
}
|
||||
result.push((self.catalog.clone(), schema.clone()));
|
||||
}
|
||||
if schema == common_catalog::consts::PG_CATALOG_NAME {
|
||||
continue;
|
||||
}
|
||||
result.push(schema.clone());
|
||||
Ok(result)
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Return a list of [`TableReference`] to be exported.
|
||||
@@ -170,11 +201,7 @@ impl Export {
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
) -> Result<(
|
||||
Vec<TableReference>,
|
||||
Vec<TableReference>,
|
||||
Vec<TableReference>,
|
||||
)> {
|
||||
) -> Result<(Vec<TableReference>, Vec<TableReference>)> {
|
||||
// Puts all metric table first
|
||||
let sql = format!(
|
||||
"SELECT table_catalog, table_schema, table_name \
|
||||
@@ -183,16 +210,15 @@ impl Export {
|
||||
and table_catalog = \'{catalog}\' \
|
||||
and table_schema = \'{schema}\'"
|
||||
);
|
||||
let records = self
|
||||
.database_client
|
||||
.sql_in_public(&sql)
|
||||
.await?
|
||||
.context(EmptyResultSnafu)?;
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
};
|
||||
let mut metric_physical_tables = HashSet::with_capacity(records.len());
|
||||
for value in records {
|
||||
let mut t = Vec::with_capacity(3);
|
||||
for v in &value {
|
||||
let Value::String(value) = v else {
|
||||
let serde_json::Value::String(value) = v else {
|
||||
unreachable!()
|
||||
};
|
||||
t.push(value);
|
||||
@@ -200,142 +226,100 @@ impl Export {
|
||||
metric_physical_tables.insert((t[0].clone(), t[1].clone(), t[2].clone()));
|
||||
}
|
||||
|
||||
// TODO: SQL injection hurts
|
||||
let sql = format!(
|
||||
"SELECT table_catalog, table_schema, table_name, table_type \
|
||||
"SELECT table_catalog, table_schema, table_name \
|
||||
FROM information_schema.tables \
|
||||
WHERE (table_type = \'BASE TABLE\' OR table_type = \'VIEW\') \
|
||||
WHERE table_type = \'BASE TABLE\' \
|
||||
and table_catalog = \'{catalog}\' \
|
||||
and table_schema = \'{schema}\'",
|
||||
);
|
||||
let records = self
|
||||
.database_client
|
||||
.sql_in_public(&sql)
|
||||
.await?
|
||||
.context(EmptyResultSnafu)?;
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
};
|
||||
|
||||
debug!("Fetched table/view list: {:?}", records);
|
||||
debug!("Fetched table list: {:?}", records);
|
||||
|
||||
if records.is_empty() {
|
||||
return Ok((vec![], vec![], vec![]));
|
||||
return Ok((vec![], vec![]));
|
||||
}
|
||||
|
||||
let mut remaining_tables = Vec::with_capacity(records.len());
|
||||
let mut views = Vec::new();
|
||||
for value in records {
|
||||
let mut t = Vec::with_capacity(4);
|
||||
let mut t = Vec::with_capacity(3);
|
||||
for v in &value {
|
||||
let Value::String(value) = v else {
|
||||
let serde_json::Value::String(value) = v else {
|
||||
unreachable!()
|
||||
};
|
||||
t.push(value);
|
||||
}
|
||||
let table = (t[0].clone(), t[1].clone(), t[2].clone());
|
||||
let table_type = t[3].as_str();
|
||||
// Ignores the physical table
|
||||
if !metric_physical_tables.contains(&table) {
|
||||
if table_type == "VIEW" {
|
||||
views.push(table);
|
||||
} else {
|
||||
remaining_tables.push(table);
|
||||
}
|
||||
remaining_tables.push(table);
|
||||
}
|
||||
}
|
||||
|
||||
Ok((
|
||||
metric_physical_tables.into_iter().collect(),
|
||||
remaining_tables,
|
||||
views,
|
||||
))
|
||||
}
|
||||
|
||||
async fn show_create(
|
||||
&self,
|
||||
show_type: &str,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table: Option<&str>,
|
||||
) -> Result<String> {
|
||||
let sql = match table {
|
||||
Some(table) => format!(
|
||||
r#"SHOW CREATE {} "{}"."{}"."{}""#,
|
||||
show_type, catalog, schema, table
|
||||
),
|
||||
None => format!(r#"SHOW CREATE {} "{}"."{}""#, show_type, catalog, schema),
|
||||
async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> {
|
||||
let sql = format!(
|
||||
r#"SHOW CREATE TABLE "{}"."{}"."{}""#,
|
||||
catalog, schema, table
|
||||
);
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
};
|
||||
let records = self
|
||||
.database_client
|
||||
.sql_in_public(&sql)
|
||||
.await?
|
||||
.context(EmptyResultSnafu)?;
|
||||
let Value::String(create) = &records[0][1] else {
|
||||
let Value::String(create_table) = &records[0][1] else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
Ok(format!("{};\n", create))
|
||||
}
|
||||
|
||||
async fn export_create_database(&self) -> Result<()> {
|
||||
let timer = Instant::now();
|
||||
let db_names = self.get_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
for schema in db_names {
|
||||
let db_dir = self.catalog_path().join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&db_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let file = db_dir.join("create_database.sql");
|
||||
let mut file = File::create(file).await.context(FileIoSnafu)?;
|
||||
let create_database = self
|
||||
.show_create("DATABASE", &self.catalog, &schema, None)
|
||||
.await?;
|
||||
file.write_all(create_database.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
|
||||
let elapsed = timer.elapsed();
|
||||
info!("Success {db_count} jobs, cost: {elapsed:?}");
|
||||
|
||||
Ok(())
|
||||
Ok(format!("{};\n", create_table))
|
||||
}
|
||||
|
||||
async fn export_create_table(&self) -> Result<()> {
|
||||
let timer = Instant::now();
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.get_db_names().await?;
|
||||
let db_names = self.iter_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
let mut tasks = Vec::with_capacity(db_names.len());
|
||||
for schema in db_names {
|
||||
for (catalog, schema) in db_names {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
let (metric_physical_tables, remaining_tables, views) =
|
||||
self.get_table_list(&self.catalog, &schema).await?;
|
||||
let table_count =
|
||||
metric_physical_tables.len() + remaining_tables.len() + views.len();
|
||||
let db_dir = self.catalog_path().join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&db_dir)
|
||||
let (metric_physical_tables, remaining_tables) =
|
||||
self.get_table_list(&catalog, &schema).await?;
|
||||
let table_count = metric_physical_tables.len() + remaining_tables.len();
|
||||
let output_dir = Path::new(&self.output_dir)
|
||||
.join(&catalog)
|
||||
.join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&output_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let file = db_dir.join("create_tables.sql");
|
||||
let mut file = File::create(file).await.context(FileIoSnafu)?;
|
||||
let output_file = Path::new(&output_dir).join("create_tables.sql");
|
||||
let mut file = File::create(output_file).await.context(FileIoSnafu)?;
|
||||
for (c, s, t) in metric_physical_tables.into_iter().chain(remaining_tables) {
|
||||
let create_table = self.show_create("TABLE", &c, &s, Some(&t)).await?;
|
||||
file.write_all(create_table.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
for (c, s, v) in views {
|
||||
let create_view = self.show_create("VIEW", &c, &s, Some(&v)).await?;
|
||||
file.write_all(create_view.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
match self.show_create_table(&c, &s, &t).await {
|
||||
Err(e) => {
|
||||
error!(e; r#"Failed to export table "{}"."{}"."{}""#, c, s, t)
|
||||
}
|
||||
Ok(create_table) => {
|
||||
file.write_all(create_table.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
"Finished exporting {}.{schema} with {table_count} table schemas to path: {}",
|
||||
self.catalog,
|
||||
db_dir.to_string_lossy()
|
||||
"Finished exporting {catalog}.{schema} with {table_count} table schemas to path: {}",
|
||||
output_dir.to_string_lossy()
|
||||
);
|
||||
|
||||
Ok::<(), Error>(())
|
||||
@@ -348,14 +332,14 @@ impl Export {
|
||||
.filter(|r| match r {
|
||||
Ok(_) => true,
|
||||
Err(e) => {
|
||||
error!(e; "export schema job failed");
|
||||
error!(e; "export job failed");
|
||||
false
|
||||
}
|
||||
})
|
||||
.count();
|
||||
|
||||
let elapsed = timer.elapsed();
|
||||
info!("Success {success}/{db_count} jobs, cost: {elapsed:?}");
|
||||
info!("Success {success}/{db_count} jobs, cost: {:?}", elapsed);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -363,15 +347,17 @@ impl Export {
|
||||
async fn export_database_data(&self) -> Result<()> {
|
||||
let timer = Instant::now();
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.get_db_names().await?;
|
||||
let db_names = self.iter_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
let mut tasks = Vec::with_capacity(db_count);
|
||||
for schema in db_names {
|
||||
let mut tasks = Vec::with_capacity(db_names.len());
|
||||
for (catalog, schema) in db_names {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
let db_dir = self.catalog_path().join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&db_dir)
|
||||
let output_dir = Path::new(&self.output_dir)
|
||||
.join(&catalog)
|
||||
.join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&output_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
|
||||
@@ -393,31 +379,30 @@ impl Export {
|
||||
|
||||
let sql = format!(
|
||||
r#"COPY DATABASE "{}"."{}" TO '{}' {};"#,
|
||||
self.catalog,
|
||||
catalog,
|
||||
schema,
|
||||
db_dir.to_str().unwrap(),
|
||||
output_dir.to_str().unwrap(),
|
||||
with_options
|
||||
);
|
||||
|
||||
info!("Executing sql: {sql}");
|
||||
|
||||
self.database_client.sql_in_public(&sql).await?;
|
||||
self.sql(&sql).await?;
|
||||
|
||||
info!(
|
||||
"Finished exporting {}.{schema} data into path: {}",
|
||||
self.catalog,
|
||||
db_dir.to_string_lossy()
|
||||
"Finished exporting {catalog}.{schema} data into path: {}",
|
||||
output_dir.to_string_lossy()
|
||||
);
|
||||
|
||||
// The export copy from sql
|
||||
let copy_from_file = db_dir.join("copy_from.sql");
|
||||
let copy_from_file = output_dir.join("copy_from.sql");
|
||||
let mut writer =
|
||||
BufWriter::new(File::create(copy_from_file).await.context(FileIoSnafu)?);
|
||||
let copy_database_from_sql = format!(
|
||||
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH (FORMAT='parquet');"#,
|
||||
self.catalog,
|
||||
catalog,
|
||||
schema,
|
||||
db_dir.to_str().unwrap()
|
||||
output_dir.to_str().unwrap()
|
||||
);
|
||||
writer
|
||||
.write(copy_database_from_sql.as_bytes())
|
||||
@@ -425,7 +410,7 @@ impl Export {
|
||||
.context(FileIoSnafu)?;
|
||||
writer.flush().await.context(FileIoSnafu)?;
|
||||
|
||||
info!("Finished exporting {}.{schema} copy_from.sql", self.catalog);
|
||||
info!("Finished exporting {catalog}.{schema} copy_from.sql");
|
||||
|
||||
Ok::<(), Error>(())
|
||||
})
|
||||
@@ -444,23 +429,20 @@ impl Export {
|
||||
.count();
|
||||
let elapsed = timer.elapsed();
|
||||
|
||||
info!("Success {success}/{db_count} jobs, costs: {elapsed:?}");
|
||||
info!("Success {success}/{db_count} jobs, costs: {:?}", elapsed);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
#[async_trait]
|
||||
impl Tool for Export {
|
||||
async fn do_work(&self) -> Result<()> {
|
||||
match self.target {
|
||||
ExportTarget::Schema => {
|
||||
self.export_create_database().await?;
|
||||
self.export_create_table().await
|
||||
}
|
||||
ExportTarget::Schema => self.export_create_table().await,
|
||||
ExportTarget::Data => self.export_database_data().await,
|
||||
ExportTarget::All => {
|
||||
self.export_create_database().await?;
|
||||
self.export_create_table().await?;
|
||||
self.export_database_data().await
|
||||
}
|
||||
@@ -468,6 +450,20 @@ impl Tool for Export {
|
||||
}
|
||||
}
|
||||
|
||||
/// Split at `-`.
|
||||
fn split_database(database: &str) -> Result<(String, Option<String>)> {
|
||||
let (catalog, schema) = match database.split_once('-') {
|
||||
Some((catalog, schema)) => (catalog, schema),
|
||||
None => (DEFAULT_CATALOG_NAME, database),
|
||||
};
|
||||
|
||||
if schema == "*" {
|
||||
Ok((catalog.to_string(), None))
|
||||
} else {
|
||||
Ok((catalog.to_string(), Some(schema.to_string())))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use clap::Parser;
|
||||
@@ -475,10 +471,26 @@ mod tests {
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
|
||||
use crate::cli::export::split_database;
|
||||
use crate::error::Result as CmdResult;
|
||||
use crate::options::GlobalOptions;
|
||||
use crate::{cli, standalone, App};
|
||||
|
||||
#[test]
|
||||
fn test_split_database() {
|
||||
let result = split_database("catalog-schema").unwrap();
|
||||
assert_eq!(result, ("catalog".to_string(), Some("schema".to_string())));
|
||||
|
||||
let result = split_database("schema").unwrap();
|
||||
assert_eq!(result, ("greptime".to_string(), Some("schema".to_string())));
|
||||
|
||||
let result = split_database("catalog-*").unwrap();
|
||||
assert_eq!(result, ("catalog".to_string(), None));
|
||||
|
||||
let result = split_database("*").unwrap();
|
||||
assert_eq!(result, ("greptime".to_string(), None));
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
|
||||
let output_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
@@ -1,218 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
|
||||
use common_telemetry::{error, info, warn};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio::time::Instant;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::cli::database::DatabaseClient;
|
||||
use crate::cli::{database, Instance, Tool};
|
||||
use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
||||
|
||||
#[derive(Debug, Default, Clone, ValueEnum)]
|
||||
enum ImportTarget {
|
||||
/// Import all table schemas into the database.
|
||||
Schema,
|
||||
/// Import all table data into the database.
|
||||
Data,
|
||||
/// Export all table schemas and data at once.
|
||||
#[default]
|
||||
All,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct ImportCommand {
|
||||
/// Server address to connect
|
||||
#[clap(long)]
|
||||
addr: String,
|
||||
|
||||
/// Directory of the data. E.g.: /tmp/greptimedb-backup
|
||||
#[clap(long)]
|
||||
input_dir: String,
|
||||
|
||||
/// The name of the catalog to import.
|
||||
#[clap(long, default_value = "greptime-*")]
|
||||
database: String,
|
||||
|
||||
/// Parallelism of the import.
|
||||
#[clap(long, short = 'j', default_value = "1")]
|
||||
import_jobs: usize,
|
||||
|
||||
/// Max retry times for each job.
|
||||
#[clap(long, default_value = "3")]
|
||||
max_retry: usize,
|
||||
|
||||
/// Things to export
|
||||
#[clap(long, short = 't', value_enum, default_value = "all")]
|
||||
target: ImportTarget,
|
||||
|
||||
/// The basic authentication for connecting to the server
|
||||
#[clap(long)]
|
||||
auth_basic: Option<String>,
|
||||
}
|
||||
|
||||
impl ImportCommand {
|
||||
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
||||
let (catalog, schema) = database::split_database(&self.database)?;
|
||||
let database_client =
|
||||
DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
|
||||
|
||||
Ok(Instance::new(
|
||||
Box::new(Import {
|
||||
catalog,
|
||||
schema,
|
||||
database_client,
|
||||
input_dir: self.input_dir.clone(),
|
||||
parallelism: self.import_jobs,
|
||||
target: self.target.clone(),
|
||||
}),
|
||||
guard,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Import {
|
||||
catalog: String,
|
||||
schema: Option<String>,
|
||||
database_client: DatabaseClient,
|
||||
input_dir: String,
|
||||
parallelism: usize,
|
||||
target: ImportTarget,
|
||||
}
|
||||
|
||||
impl Import {
|
||||
async fn import_create_table(&self) -> Result<()> {
|
||||
// Use default db to creates other dbs
|
||||
self.do_sql_job("create_database.sql", Some(DEFAULT_SCHEMA_NAME))
|
||||
.await?;
|
||||
self.do_sql_job("create_tables.sql", None).await
|
||||
}
|
||||
|
||||
async fn import_database_data(&self) -> Result<()> {
|
||||
self.do_sql_job("copy_from.sql", None).await
|
||||
}
|
||||
|
||||
async fn do_sql_job(&self, filename: &str, exec_db: Option<&str>) -> Result<()> {
|
||||
let timer = Instant::now();
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.get_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
let mut tasks = Vec::with_capacity(db_count);
|
||||
for schema in db_names {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
let database_input_dir = self.catalog_path().join(&schema);
|
||||
let sql_file = database_input_dir.join(filename);
|
||||
let sql = tokio::fs::read_to_string(sql_file)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
if sql.is_empty() {
|
||||
info!("Empty `{filename}` {database_input_dir:?}");
|
||||
} else {
|
||||
let db = exec_db.unwrap_or(&schema);
|
||||
self.database_client.sql(&sql, db).await?;
|
||||
info!("Imported `{filename}` for database {schema}");
|
||||
}
|
||||
|
||||
Ok::<(), Error>(())
|
||||
})
|
||||
}
|
||||
|
||||
let success = futures::future::join_all(tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter(|r| match r {
|
||||
Ok(_) => true,
|
||||
Err(e) => {
|
||||
error!(e; "import {filename} job failed");
|
||||
false
|
||||
}
|
||||
})
|
||||
.count();
|
||||
let elapsed = timer.elapsed();
|
||||
info!("Success {success}/{db_count} `{filename}` jobs, cost: {elapsed:?}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn catalog_path(&self) -> PathBuf {
|
||||
PathBuf::from(&self.input_dir).join(&self.catalog)
|
||||
}
|
||||
|
||||
async fn get_db_names(&self) -> Result<Vec<String>> {
|
||||
let db_names = self.all_db_names().await?;
|
||||
let Some(schema) = &self.schema else {
|
||||
return Ok(db_names);
|
||||
};
|
||||
|
||||
// Check if the schema exists
|
||||
db_names
|
||||
.into_iter()
|
||||
.find(|db_name| db_name.to_lowercase() == schema.to_lowercase())
|
||||
.map(|name| vec![name])
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: &self.catalog,
|
||||
schema,
|
||||
})
|
||||
}
|
||||
|
||||
// Get all database names in the input directory.
|
||||
// The directory structure should be like:
|
||||
// /tmp/greptimedb-backup
|
||||
// ├── greptime-1
|
||||
// │ ├── db1
|
||||
// │ └── db2
|
||||
async fn all_db_names(&self) -> Result<Vec<String>> {
|
||||
let mut db_names = vec![];
|
||||
let path = self.catalog_path();
|
||||
let mut entries = tokio::fs::read_dir(path).await.context(FileIoSnafu)?;
|
||||
while let Some(entry) = entries.next_entry().await.context(FileIoSnafu)? {
|
||||
let path = entry.path();
|
||||
if path.is_dir() {
|
||||
let db_name = match path.file_name() {
|
||||
Some(name) => name.to_string_lossy().to_string(),
|
||||
None => {
|
||||
warn!("Failed to get the file name of {:?}", path);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
db_names.push(db_name);
|
||||
}
|
||||
}
|
||||
Ok(db_names)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for Import {
|
||||
async fn do_work(&self) -> Result<()> {
|
||||
match self.target {
|
||||
ImportTarget::Schema => self.import_create_table().await,
|
||||
ImportTarget::Data => self.import_database_data().await,
|
||||
ImportTarget::All => {
|
||||
self.import_create_table().await?;
|
||||
self.import_database_data().await
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -267,7 +267,7 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
opts.component.node_id.map(|x| x.to_string()),
|
||||
);
|
||||
log_versions(version(), short_version(), APP_NAME);
|
||||
log_versions(version(), short_version());
|
||||
|
||||
info!("Datanode start command: {:#?}", self);
|
||||
info!("Datanode options: {:#?}", opts);
|
||||
|
||||
@@ -31,6 +31,13 @@ pub enum Error {
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to iter stream"))]
|
||||
IterStream {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to init DDL manager"))]
|
||||
InitDdlManager {
|
||||
#[snafu(implicit)]
|
||||
@@ -230,6 +237,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start catalog manager"))]
|
||||
StartCatalogManager {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to connect to Etcd at {etcd_addr}"))]
|
||||
ConnectEtcd {
|
||||
etcd_addr: String,
|
||||
@@ -239,6 +253,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to connect server at {addr}"))]
|
||||
ConnectServer {
|
||||
addr: String,
|
||||
source: client::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serde json"))]
|
||||
SerdeJson {
|
||||
#[snafu(source)]
|
||||
@@ -256,6 +278,12 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Expect data from output, but got another thing"))]
|
||||
NotDataFromOutput {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Empty result from output"))]
|
||||
EmptyResult {
|
||||
#[snafu(implicit)]
|
||||
@@ -318,12 +346,13 @@ pub enum Error {
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot find schema {schema} in catalog {catalog}"))]
|
||||
SchemaNotFound {
|
||||
catalog: String,
|
||||
schema: String,
|
||||
#[snafu(display("Tonic transport error: {error:?} with msg: {msg:?}"))]
|
||||
TonicTransport {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: tonic::transport::Error,
|
||||
msg: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -341,16 +370,18 @@ impl ErrorExt for Error {
|
||||
Error::BuildMetaServer { source, .. } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
|
||||
Error::InitMetadata { source, .. } | Error::InitDdlManager { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::IterStream { source, .. }
|
||||
| Error::InitMetadata { source, .. }
|
||||
| Error::InitDdlManager { source, .. } => source.status_code(),
|
||||
|
||||
Error::ConnectServer { source, .. } => source.status_code(),
|
||||
Error::MissingConfig { .. }
|
||||
| Error::LoadLayeredConfig { .. }
|
||||
| Error::IllegalConfig { .. }
|
||||
| Error::InvalidReplCommand { .. }
|
||||
| Error::InitTimezone { .. }
|
||||
| Error::ConnectEtcd { .. }
|
||||
| Error::NotDataFromOutput { .. }
|
||||
| Error::CreateDir { .. }
|
||||
| Error::EmptyResult { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
@@ -368,6 +399,7 @@ impl ErrorExt for Error {
|
||||
source.status_code()
|
||||
}
|
||||
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
|
||||
Error::StartCatalogManager { source, .. } => source.status_code(),
|
||||
|
||||
Error::SerdeJson { .. } | Error::FileIo { .. } | Error::SpawnThread { .. } => {
|
||||
StatusCode::Unexpected
|
||||
@@ -382,7 +414,7 @@ impl ErrorExt for Error {
|
||||
source.status_code()
|
||||
}
|
||||
Error::MetaClientInit { source, .. } => source.status_code(),
|
||||
Error::SchemaNotFound { .. } => StatusCode::DatabaseNotFound,
|
||||
Error::TonicTransport { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -215,7 +215,7 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
opts.component.node_id.map(|x| x.to_string()),
|
||||
);
|
||||
log_versions(version(), short_version(), APP_NAME);
|
||||
log_versions(version(), short_version());
|
||||
|
||||
info!("Flownode start command: {:#?}", self);
|
||||
info!("Flownode options: {:#?}", opts);
|
||||
|
||||
@@ -261,7 +261,7 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
opts.component.node_id.clone(),
|
||||
);
|
||||
log_versions(version(), short_version(), APP_NAME);
|
||||
log_versions(version(), short_version());
|
||||
|
||||
info!("Frontend start command: {:#?}", self);
|
||||
info!("Frontend options: {:#?}", opts);
|
||||
|
||||
@@ -30,7 +30,7 @@ pub mod standalone;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref APP_VERSION: prometheus::IntGaugeVec =
|
||||
prometheus::register_int_gauge_vec!("greptime_app_version", "app version", &["version", "short_version", "app"]).unwrap();
|
||||
prometheus::register_int_gauge_vec!("greptime_app_version", "app version", &["version", "short_version"]).unwrap();
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -76,10 +76,10 @@ pub trait App: Send {
|
||||
/// Log the versions of the application, and the arguments passed to the cli.
|
||||
/// `version` should be the same as the output of cli "--version";
|
||||
/// and the `short_version` is the short version of the codes, often consist of git branch and commit.
|
||||
pub fn log_versions(version: &str, short_version: &str, app: &str) {
|
||||
pub fn log_versions(version: &str, short_version: &str) {
|
||||
// Report app version as gauge.
|
||||
APP_VERSION
|
||||
.with_label_values(&[env!("CARGO_PKG_VERSION"), short_version, app])
|
||||
.with_label_values(&[env!("CARGO_PKG_VERSION"), short_version])
|
||||
.inc();
|
||||
|
||||
// Log version and argument flags.
|
||||
|
||||
@@ -244,7 +244,7 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
None,
|
||||
);
|
||||
log_versions(version(), short_version(), APP_NAME);
|
||||
log_versions(version(), short_version());
|
||||
|
||||
info!("Metasrv start command: {:#?}", self);
|
||||
info!("Metasrv options: {:#?}", opts);
|
||||
|
||||
@@ -141,8 +141,6 @@ pub struct StandaloneOptions {
|
||||
pub region_engine: Vec<RegionEngineConfig>,
|
||||
pub export_metrics: ExportMetricsOption,
|
||||
pub tracing: TracingOptions,
|
||||
pub init_regions_in_background: bool,
|
||||
pub init_regions_parallelism: usize,
|
||||
}
|
||||
|
||||
impl Default for StandaloneOptions {
|
||||
@@ -170,8 +168,6 @@ impl Default for StandaloneOptions {
|
||||
RegionEngineConfig::File(FileEngineConfig::default()),
|
||||
],
|
||||
tracing: TracingOptions::default(),
|
||||
init_regions_in_background: false,
|
||||
init_regions_parallelism: 16,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -182,16 +178,6 @@ impl Configurable for StandaloneOptions {
|
||||
}
|
||||
}
|
||||
|
||||
/// The [`StandaloneOptions`] is only defined in cmd crate,
|
||||
/// we don't want to make `frontend` depends on it, so impl [`Into`]
|
||||
/// rather than [`From`].
|
||||
#[allow(clippy::from_over_into)]
|
||||
impl Into<FrontendOptions> for StandaloneOptions {
|
||||
fn into(self) -> FrontendOptions {
|
||||
self.frontend_options()
|
||||
}
|
||||
}
|
||||
|
||||
impl StandaloneOptions {
|
||||
pub fn frontend_options(&self) -> FrontendOptions {
|
||||
let cloned_opts = self.clone();
|
||||
@@ -222,9 +208,6 @@ impl StandaloneOptions {
|
||||
storage: cloned_opts.storage,
|
||||
region_engine: cloned_opts.region_engine,
|
||||
grpc: cloned_opts.grpc,
|
||||
init_regions_in_background: cloned_opts.init_regions_in_background,
|
||||
init_regions_parallelism: cloned_opts.init_regions_parallelism,
|
||||
mode: Mode::Standalone,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -432,7 +415,7 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
None,
|
||||
);
|
||||
log_versions(version(), short_version(), APP_NAME);
|
||||
log_versions(version(), short_version());
|
||||
|
||||
info!("Standalone start command: {:#?}", self);
|
||||
info!("Standalone options: {opts:#?}");
|
||||
@@ -527,7 +510,7 @@ impl StartCommand {
|
||||
.build(),
|
||||
);
|
||||
let wal_options_allocator = Arc::new(WalOptionsAllocator::new(
|
||||
opts.wal.clone().into(),
|
||||
opts.wal.into(),
|
||||
kv_backend.clone(),
|
||||
));
|
||||
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
|
||||
@@ -550,7 +533,7 @@ impl StartCommand {
|
||||
.await?;
|
||||
|
||||
let mut frontend = FrontendBuilder::new(
|
||||
fe_opts,
|
||||
fe_opts.clone(),
|
||||
kv_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
catalog_manager.clone(),
|
||||
@@ -578,7 +561,7 @@ impl StartCommand {
|
||||
|
||||
let (tx, _rx) = broadcast::channel(1);
|
||||
|
||||
let servers = Services::new(opts, Arc::new(frontend.clone()), plugins)
|
||||
let servers = Services::new(fe_opts, Arc::new(frontend.clone()), plugins)
|
||||
.build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
@@ -81,7 +81,6 @@ fn test_load_datanode_example_config() {
|
||||
sst_meta_cache_size: ReadableSize::mb(128),
|
||||
vector_cache_size: ReadableSize::mb(512),
|
||||
page_cache_size: ReadableSize::mb(512),
|
||||
selector_result_cache_size: ReadableSize::mb(512),
|
||||
max_background_jobs: 4,
|
||||
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
..Default::default()
|
||||
@@ -219,7 +218,6 @@ fn test_load_standalone_example_config() {
|
||||
sst_meta_cache_size: ReadableSize::mb(128),
|
||||
vector_cache_size: ReadableSize::mb(512),
|
||||
page_cache_size: ReadableSize::mb(512),
|
||||
selector_result_cache_size: ReadableSize::mb(512),
|
||||
max_background_jobs: 4,
|
||||
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
..Default::default()
|
||||
|
||||
46
src/common/catalog/src/error.rs
Normal file
46
src/common/catalog/src/error.rs
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Invalid full table name: {}", table_name))]
|
||||
InvalidFullTableName {
|
||||
table_name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::InvalidFullTableName { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -15,6 +15,7 @@
|
||||
use consts::DEFAULT_CATALOG_NAME;
|
||||
|
||||
pub mod consts;
|
||||
pub mod error;
|
||||
|
||||
#[inline]
|
||||
pub fn format_schema_name(catalog: &str, schema: &str) -> String {
|
||||
|
||||
@@ -7,10 +7,6 @@ license.workspace = true
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[features]
|
||||
default = ["geo"]
|
||||
geo = ["geohash", "h3o"]
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
arc-swap = "1.0"
|
||||
@@ -27,8 +23,6 @@ common-time.workspace = true
|
||||
common-version.workspace = true
|
||||
datafusion.workspace = true
|
||||
datatypes.workspace = true
|
||||
geohash = { version = "0.13", optional = true }
|
||||
h3o = { version = "0.6", optional = true }
|
||||
num = "0.4"
|
||||
num-traits = "0.2"
|
||||
once_cell.workspace = true
|
||||
|
||||
@@ -116,10 +116,6 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
SystemFunction::register(&function_registry);
|
||||
TableFunction::register(&function_registry);
|
||||
|
||||
// Geo functions
|
||||
#[cfg(feature = "geo")]
|
||||
crate::scalars::geo::GeoFunctions::register(&function_registry);
|
||||
|
||||
Arc::new(function_registry)
|
||||
});
|
||||
|
||||
|
||||
@@ -15,8 +15,6 @@
|
||||
pub mod aggregate;
|
||||
pub(crate) mod date;
|
||||
pub mod expression;
|
||||
#[cfg(feature = "geo")]
|
||||
pub mod geo;
|
||||
pub mod matches;
|
||||
pub mod math;
|
||||
pub mod numpy;
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
mod geohash;
|
||||
mod h3;
|
||||
|
||||
use geohash::GeohashFunction;
|
||||
use h3::H3Function;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
pub(crate) struct GeoFunctions;
|
||||
|
||||
impl GeoFunctions {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(GeohashFunction));
|
||||
registry.register(Arc::new(H3Function));
|
||||
}
|
||||
}
|
||||
@@ -1,135 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use common_error::ext::{BoxedError, PlainError};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_query::error::{self, InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature};
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{MutableVector, StringVectorBuilder, VectorRef};
|
||||
use geohash::Coord;
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
/// Function that return geohash string for a given geospatial coordinate.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct GeohashFunction;
|
||||
|
||||
const NAME: &str = "geohash";
|
||||
|
||||
impl Function for GeohashFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
let mut signatures = Vec::new();
|
||||
for coord_type in &[
|
||||
ConcreteDataType::float32_datatype(),
|
||||
ConcreteDataType::float64_datatype(),
|
||||
] {
|
||||
for resolution_type in &[
|
||||
ConcreteDataType::int8_datatype(),
|
||||
ConcreteDataType::int16_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::uint8_datatype(),
|
||||
ConcreteDataType::uint16_datatype(),
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
] {
|
||||
signatures.push(TypeSignature::Exact(vec![
|
||||
// latitude
|
||||
coord_type.clone(),
|
||||
// longitude
|
||||
coord_type.clone(),
|
||||
// resolution
|
||||
resolution_type.clone(),
|
||||
]));
|
||||
}
|
||||
}
|
||||
Signature::one_of(signatures, Volatility::Stable)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 3,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 3, provided : {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let lat_vec = &columns[0];
|
||||
let lon_vec = &columns[1];
|
||||
let resolution_vec = &columns[2];
|
||||
|
||||
let size = lat_vec.len();
|
||||
let mut results = StringVectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let lat = lat_vec.get(i).as_f64_lossy();
|
||||
let lon = lon_vec.get(i).as_f64_lossy();
|
||||
let r = match resolution_vec.get(i) {
|
||||
Value::Int8(v) => v as usize,
|
||||
Value::Int16(v) => v as usize,
|
||||
Value::Int32(v) => v as usize,
|
||||
Value::Int64(v) => v as usize,
|
||||
Value::UInt8(v) => v as usize,
|
||||
Value::UInt16(v) => v as usize,
|
||||
Value::UInt32(v) => v as usize,
|
||||
Value::UInt64(v) => v as usize,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let result = match (lat, lon) {
|
||||
(Some(lat), Some(lon)) => {
|
||||
let coord = Coord { x: lon, y: lat };
|
||||
let encoded = geohash::encode(coord, r)
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
format!("Geohash error: {}", e),
|
||||
StatusCode::EngineExecuteQuery,
|
||||
))
|
||||
})
|
||||
.context(error::ExecuteSnafu)?;
|
||||
Some(encoded)
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result.as_deref());
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for GeohashFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", NAME)
|
||||
}
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use common_error::ext::{BoxedError, PlainError};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_query::error::{self, InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature};
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{MutableVector, StringVectorBuilder, VectorRef};
|
||||
use h3o::{LatLng, Resolution};
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
/// Function that returns [h3] encoding string for a given geospatial coordinate.
|
||||
///
|
||||
/// [h3]: https://h3geo.org/
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct H3Function;
|
||||
|
||||
const NAME: &str = "h3";
|
||||
|
||||
impl Function for H3Function {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
let mut signatures = Vec::new();
|
||||
for coord_type in &[
|
||||
ConcreteDataType::float32_datatype(),
|
||||
ConcreteDataType::float64_datatype(),
|
||||
] {
|
||||
for resolution_type in &[
|
||||
ConcreteDataType::int8_datatype(),
|
||||
ConcreteDataType::int16_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::uint8_datatype(),
|
||||
ConcreteDataType::uint16_datatype(),
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
] {
|
||||
signatures.push(TypeSignature::Exact(vec![
|
||||
// latitude
|
||||
coord_type.clone(),
|
||||
// longitude
|
||||
coord_type.clone(),
|
||||
// resolution
|
||||
resolution_type.clone(),
|
||||
]));
|
||||
}
|
||||
}
|
||||
Signature::one_of(signatures, Volatility::Stable)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 3,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 3, provided : {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let lat_vec = &columns[0];
|
||||
let lon_vec = &columns[1];
|
||||
let resolution_vec = &columns[2];
|
||||
|
||||
let size = lat_vec.len();
|
||||
let mut results = StringVectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let lat = lat_vec.get(i).as_f64_lossy();
|
||||
let lon = lon_vec.get(i).as_f64_lossy();
|
||||
let r = match resolution_vec.get(i) {
|
||||
Value::Int8(v) => v as u8,
|
||||
Value::Int16(v) => v as u8,
|
||||
Value::Int32(v) => v as u8,
|
||||
Value::Int64(v) => v as u8,
|
||||
Value::UInt8(v) => v,
|
||||
Value::UInt16(v) => v as u8,
|
||||
Value::UInt32(v) => v as u8,
|
||||
Value::UInt64(v) => v as u8,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let result = match (lat, lon) {
|
||||
(Some(lat), Some(lon)) => {
|
||||
let coord = LatLng::new(lat, lon)
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
format!("H3 error: {}", e),
|
||||
StatusCode::EngineExecuteQuery,
|
||||
))
|
||||
})
|
||||
.context(error::ExecuteSnafu)?;
|
||||
let r = Resolution::try_from(r)
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
format!("H3 error: {}", e),
|
||||
StatusCode::EngineExecuteQuery,
|
||||
))
|
||||
})
|
||||
.context(error::ExecuteSnafu)?;
|
||||
let encoded = coord.to_cell(r).to_string();
|
||||
Some(encoded)
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result.as_deref());
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for H3Function {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", NAME)
|
||||
}
|
||||
}
|
||||
@@ -64,6 +64,12 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid column proto: {}", err_msg))]
|
||||
InvalidColumnProto {
|
||||
err_msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
#[snafu(display("Failed to create vector"))]
|
||||
CreateVector {
|
||||
#[snafu(implicit)]
|
||||
@@ -131,6 +137,7 @@ impl ErrorExt for Error {
|
||||
Error::DuplicatedTimestampColumn { .. }
|
||||
| Error::DuplicatedColumnName { .. }
|
||||
| Error::MissingTimestampColumn { .. } => StatusCode::InvalidArguments,
|
||||
Error::InvalidColumnProto { .. } => StatusCode::InvalidArguments,
|
||||
Error::CreateVector { .. } => StatusCode::InvalidArguments,
|
||||
Error::MissingField { .. } => StatusCode::InvalidArguments,
|
||||
Error::InvalidColumnDef { source, .. } => source.status_code(),
|
||||
|
||||
@@ -24,7 +24,7 @@ use crate::key::table_info::TableInfoKey;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteKey;
|
||||
use crate::key::view_info::ViewInfoKey;
|
||||
use crate::key::MetadataKey;
|
||||
use crate::key::MetaKey;
|
||||
|
||||
/// KvBackend cache invalidator
|
||||
#[async_trait::async_trait]
|
||||
|
||||
@@ -15,12 +15,12 @@
|
||||
use common_catalog::consts::METRIC_ENGINE;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_procedure::error::Error as ProcedureError;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use snafu::{ensure, location, OptionExt};
|
||||
use store_api::metric_engine_consts::LOGICAL_TABLE_METADATA_KEY;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::ddl::DetectingRegion;
|
||||
use crate::error::{Error, OperateDatanodeSnafu, Result, TableNotFoundSnafu, UnsupportedSnafu};
|
||||
use crate::error::{Error, Result, TableNotFoundSnafu, UnsupportedSnafu};
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::TableMetadataManagerRef;
|
||||
use crate::peer::Peer;
|
||||
@@ -32,9 +32,11 @@ use crate::ClusterId;
|
||||
pub fn add_peer_context_if_needed(datanode: Peer) -> impl FnOnce(Error) -> Error {
|
||||
move |err| {
|
||||
if !err.is_retry_later() {
|
||||
return Err::<(), BoxedError>(BoxedError::new(err))
|
||||
.context(OperateDatanodeSnafu { peer: datanode })
|
||||
.unwrap_err();
|
||||
return Error::OperateDatanode {
|
||||
location: location!(),
|
||||
peer: datanode,
|
||||
source: BoxedError::new(err),
|
||||
};
|
||||
}
|
||||
err
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ use common_macro::stack_trace_debug;
|
||||
use common_wal::options::WalOptions;
|
||||
use serde_json::error::Error as JsonError;
|
||||
use snafu::{Location, Snafu};
|
||||
use store_api::storage::RegionId;
|
||||
use store_api::storage::{RegionId, RegionNumber};
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::peer::Peer;
|
||||
@@ -49,6 +49,20 @@ pub enum Error {
|
||||
region_id: RegionId,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid result with a txn response: {}", err_msg))]
|
||||
InvalidTxnResult {
|
||||
err_msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid engine type: {}", engine_type))]
|
||||
InvalidEngineType {
|
||||
engine_type: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to connect to Etcd"))]
|
||||
ConnectEtcd {
|
||||
#[snafu(source)]
|
||||
@@ -81,6 +95,15 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Sequence out of range: {}, start={}, step={}", name, start, step))]
|
||||
SequenceOutOfRange {
|
||||
name: String,
|
||||
start: u64,
|
||||
step: u64,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unexpected sequence value: {}", err_msg))]
|
||||
UnexpectedSequenceValue {
|
||||
err_msg: String,
|
||||
@@ -304,6 +327,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Catalog already exists, catalog: {}", catalog))]
|
||||
CatalogAlreadyExists {
|
||||
catalog: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Schema already exists, catalog:{}, schema: {}", catalog, schema))]
|
||||
SchemaAlreadyExists {
|
||||
catalog: String,
|
||||
@@ -355,8 +385,15 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid metadata, err: {}", err_msg))]
|
||||
InvalidMetadata {
|
||||
#[snafu(display("Failed to rename table, reason: {}", reason))]
|
||||
RenameTable {
|
||||
reason: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid table metadata, err: {}", err_msg))]
|
||||
InvalidTableMetadata {
|
||||
err_msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
@@ -386,6 +423,27 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to move region {} in table {}, err: {}",
|
||||
region,
|
||||
table_id,
|
||||
err_msg
|
||||
))]
|
||||
MoveRegion {
|
||||
table_id: TableId,
|
||||
region: RegionNumber,
|
||||
err_msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid catalog value"))]
|
||||
InvalidCatalogValue {
|
||||
source: common_catalog::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("External error"))]
|
||||
External {
|
||||
#[snafu(implicit)]
|
||||
@@ -554,6 +612,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Delimiter not found, key: {}", key))]
|
||||
DelimiterNotFound {
|
||||
key: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid prefix: {}, key: {}", prefix, key))]
|
||||
MismatchPrefix {
|
||||
prefix: String,
|
||||
@@ -637,12 +702,15 @@ impl ErrorExt for Error {
|
||||
| ParseOption { .. }
|
||||
| RouteInfoCorrupted { .. }
|
||||
| InvalidProtoMsg { .. }
|
||||
| InvalidMetadata { .. }
|
||||
| InvalidTableMetadata { .. }
|
||||
| MoveRegion { .. }
|
||||
| Unexpected { .. }
|
||||
| TableInfoNotFound { .. }
|
||||
| NextSequence { .. }
|
||||
| SequenceOutOfRange { .. }
|
||||
| UnexpectedSequenceValue { .. }
|
||||
| InvalidHeartbeatResponse { .. }
|
||||
| InvalidTxnResult { .. }
|
||||
| EncodeJson { .. }
|
||||
| DecodeJson { .. }
|
||||
| PayloadNotExist { .. }
|
||||
@@ -666,17 +734,22 @@ impl ErrorExt for Error {
|
||||
| MetadataCorruption { .. }
|
||||
| StrFromUtf8 { .. } => StatusCode::Unexpected,
|
||||
|
||||
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } => StatusCode::Internal,
|
||||
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } | RenameTable { .. } => {
|
||||
StatusCode::Internal
|
||||
}
|
||||
|
||||
SchemaAlreadyExists { .. } => StatusCode::DatabaseAlreadyExists,
|
||||
|
||||
ProcedureNotFound { .. }
|
||||
| InvalidViewInfo { .. }
|
||||
| PrimaryKeyNotFound { .. }
|
||||
| CatalogAlreadyExists { .. }
|
||||
| EmptyKey { .. }
|
||||
| InvalidEngineType { .. }
|
||||
| AlterLogicalTablesInvalidArguments { .. }
|
||||
| CreateLogicalTablesInvalidArguments { .. }
|
||||
| MismatchPrefix { .. }
|
||||
| DelimiterNotFound { .. }
|
||||
| TlsConfig { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
FlowNotFound { .. } => StatusCode::FlowNotFound,
|
||||
@@ -694,6 +767,7 @@ impl ErrorExt for Error {
|
||||
OperateDatanode { source, .. } => source.status_code(),
|
||||
Table { source, .. } => source.status_code(),
|
||||
RetryLater { source, .. } => source.status_code(),
|
||||
InvalidCatalogValue { source, .. } => source.status_code(),
|
||||
ConvertAlterTableRequest { source, .. } => source.status_code(),
|
||||
|
||||
ParseProcedureId { .. }
|
||||
|
||||
@@ -153,9 +153,6 @@ pub struct UpgradeRegion {
|
||||
/// it's helpful to verify whether the leader region is ready.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub wait_for_replay_timeout: Option<Duration>,
|
||||
/// The hint for replaying memtable.
|
||||
#[serde(default)]
|
||||
pub location_id: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
|
||||
@@ -211,7 +211,7 @@ lazy_static! {
|
||||
}
|
||||
|
||||
/// The key of metadata.
|
||||
pub trait MetadataKey<'a, T> {
|
||||
pub trait MetaKey<'a, T> {
|
||||
fn to_bytes(&self) -> Vec<u8>;
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<T>;
|
||||
@@ -226,7 +226,7 @@ impl From<Vec<u8>> for BytesAdapter {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, BytesAdapter> for BytesAdapter {
|
||||
impl<'a> MetaKey<'a, BytesAdapter> for BytesAdapter {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.0.clone()
|
||||
}
|
||||
@@ -236,7 +236,7 @@ impl<'a> MetadataKey<'a, BytesAdapter> for BytesAdapter {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) trait MetadataKeyGetTxnOp {
|
||||
pub(crate) trait TableMetaKeyGetTxnOp {
|
||||
fn build_get_op(
|
||||
&self,
|
||||
) -> (
|
||||
@@ -245,7 +245,7 @@ pub(crate) trait MetadataKeyGetTxnOp {
|
||||
);
|
||||
}
|
||||
|
||||
pub trait MetadataValue {
|
||||
pub trait TableMetaValue {
|
||||
fn try_from_raw_value(raw_value: &[u8]) -> Result<Self>
|
||||
where
|
||||
Self: Sized;
|
||||
@@ -330,7 +330,7 @@ impl<T: DeserializeOwned + Serialize> Serialize for DeserializedValueWithBytes<T
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de, T: DeserializeOwned + Serialize + MetadataValue> Deserialize<'de>
|
||||
impl<'de, T: DeserializeOwned + Serialize + TableMetaValue> Deserialize<'de>
|
||||
for DeserializedValueWithBytes<T>
|
||||
{
|
||||
/// - Deserialize behaviors:
|
||||
@@ -359,7 +359,7 @@ impl<T: Serialize + DeserializeOwned + Clone> Clone for DeserializedValueWithByt
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Serialize + DeserializeOwned + MetadataValue> DeserializedValueWithBytes<T> {
|
||||
impl<T: Serialize + DeserializeOwned + TableMetaValue> DeserializedValueWithBytes<T> {
|
||||
/// Returns a struct containing a deserialized value and an original `bytes`.
|
||||
/// It accepts original bytes of inner.
|
||||
pub fn from_inner_bytes(bytes: Bytes) -> Result<Self> {
|
||||
@@ -1156,10 +1156,10 @@ impl TableMetadataManager {
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! impl_metadata_value {
|
||||
macro_rules! impl_table_meta_value {
|
||||
($($val_ty: ty), *) => {
|
||||
$(
|
||||
impl $crate::key::MetadataValue for $val_ty {
|
||||
impl $crate::key::TableMetaValue for $val_ty {
|
||||
fn try_from_raw_value(raw_value: &[u8]) -> Result<Self> {
|
||||
serde_json::from_slice(raw_value).context(SerdeJsonSnafu)
|
||||
}
|
||||
@@ -1172,10 +1172,10 @@ macro_rules! impl_metadata_value {
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_metadata_key_get_txn_op {
|
||||
macro_rules! impl_meta_key_get_txn_op {
|
||||
($($key: ty), *) => {
|
||||
$(
|
||||
impl $crate::key::MetadataKeyGetTxnOp for $key {
|
||||
impl $crate::key::TableMetaKeyGetTxnOp for $key {
|
||||
/// Returns a [TxnOp] to retrieve the corresponding value
|
||||
/// and a filter to retrieve the value from the [TxnOpGetResponseSet]
|
||||
fn build_get_op(
|
||||
@@ -1197,7 +1197,7 @@ macro_rules! impl_metadata_key_get_txn_op {
|
||||
}
|
||||
}
|
||||
|
||||
impl_metadata_key_get_txn_op! {
|
||||
impl_meta_key_get_txn_op! {
|
||||
TableNameKey<'_>,
|
||||
TableInfoKey,
|
||||
ViewInfoKey,
|
||||
@@ -1206,7 +1206,7 @@ impl_metadata_key_get_txn_op! {
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! impl_optional_metadata_value {
|
||||
macro_rules! impl_optional_meta_value {
|
||||
($($val_ty: ty), *) => {
|
||||
$(
|
||||
impl $val_ty {
|
||||
@@ -1222,7 +1222,7 @@ macro_rules! impl_optional_metadata_value {
|
||||
}
|
||||
}
|
||||
|
||||
impl_metadata_value! {
|
||||
impl_table_meta_value! {
|
||||
TableNameValue,
|
||||
TableInfoValue,
|
||||
ViewInfoValue,
|
||||
@@ -1233,7 +1233,7 @@ impl_metadata_value! {
|
||||
TableFlowValue
|
||||
}
|
||||
|
||||
impl_optional_metadata_value! {
|
||||
impl_optional_meta_value! {
|
||||
CatalogNameValue,
|
||||
SchemaNameValue
|
||||
}
|
||||
|
||||
@@ -20,8 +20,8 @@ use futures::stream::BoxStream;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{self, Error, InvalidMetadataSnafu, Result};
|
||||
use crate::key::{MetadataKey, CATALOG_NAME_KEY_PATTERN, CATALOG_NAME_KEY_PREFIX};
|
||||
use crate::error::{self, Error, InvalidTableMetadataSnafu, Result};
|
||||
use crate::key::{MetaKey, CATALOG_NAME_KEY_PATTERN, CATALOG_NAME_KEY_PREFIX};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||
use crate::rpc::store::RangeRequest;
|
||||
@@ -56,14 +56,14 @@ impl<'a> CatalogNameKey<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, CatalogNameKey<'a>> for CatalogNameKey<'_> {
|
||||
impl<'a> MetaKey<'a, CatalogNameKey<'a>> for CatalogNameKey<'_> {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<CatalogNameKey<'a>> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
InvalidMetadataSnafu {
|
||||
InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"CatalogNameKey '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -87,7 +87,7 @@ impl<'a> TryFrom<&'a str> for CatalogNameKey<'a> {
|
||||
fn try_from(s: &'a str) -> Result<Self> {
|
||||
let captures = CATALOG_NAME_KEY_PATTERN
|
||||
.captures(s)
|
||||
.context(InvalidMetadataSnafu {
|
||||
.context(InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Illegal CatalogNameKey format: '{s}'"),
|
||||
})?;
|
||||
|
||||
|
||||
@@ -22,10 +22,10 @@ use snafu::OptionExt;
|
||||
use store_api::storage::RegionNumber;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use super::MetadataKey;
|
||||
use crate::error::{InvalidMetadataSnafu, Result};
|
||||
use super::MetaKey;
|
||||
use crate::error::{InvalidTableMetadataSnafu, Result};
|
||||
use crate::key::{
|
||||
MetadataValue, RegionDistribution, DATANODE_TABLE_KEY_PATTERN, DATANODE_TABLE_KEY_PREFIX,
|
||||
RegionDistribution, TableMetaValue, DATANODE_TABLE_KEY_PATTERN, DATANODE_TABLE_KEY_PREFIX,
|
||||
};
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
@@ -77,14 +77,14 @@ impl DatanodeTableKey {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, DatanodeTableKey> for DatanodeTableKey {
|
||||
impl<'a> MetaKey<'a, DatanodeTableKey> for DatanodeTableKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
fn from_bytes(bytes: &[u8]) -> Result<DatanodeTableKey> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
InvalidMetadataSnafu {
|
||||
InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"DatanodeTableKey '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -92,11 +92,12 @@ impl<'a> MetadataKey<'a, DatanodeTableKey> for DatanodeTableKey {
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let captures = DATANODE_TABLE_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidMetadataSnafu {
|
||||
err_msg: format!("Invalid DatanodeTableKey '{key}'"),
|
||||
})?;
|
||||
let captures =
|
||||
DATANODE_TABLE_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid DatanodeTableKey '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
let datanode_id = captures[1].parse::<DatanodeId>().unwrap();
|
||||
let table_id = captures[2].parse::<TableId>().unwrap();
|
||||
|
||||
@@ -38,7 +38,7 @@ use crate::key::flow::flow_name::FlowNameManager;
|
||||
use crate::key::flow::flownode_flow::FlownodeFlowManager;
|
||||
pub use crate::key::flow::table_flow::{TableFlowManager, TableFlowManagerRef};
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{FlowId, MetadataKey};
|
||||
use crate::key::{FlowId, MetaKey};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::BatchDeleteRequest;
|
||||
@@ -66,7 +66,7 @@ impl<T> FlowScoped<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: MetadataKey<'a, T>> MetadataKey<'a, FlowScoped<T>> for FlowScoped<T> {
|
||||
impl<'a, T: MetaKey<'a, T>> MetaKey<'a, FlowScoped<T>> for FlowScoped<T> {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
let prefix = FlowScoped::<T>::PREFIX.as_bytes();
|
||||
let inner = self.inner.to_bytes();
|
||||
@@ -295,7 +295,7 @@ mod tests {
|
||||
inner: Vec<u8>,
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, MockKey> for MockKey {
|
||||
impl<'a> MetaKey<'a, MockKey> for MockKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.inner.clone()
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ use table::table_name::TableName;
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::flow::FlowScoped;
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
|
||||
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId, MetaKey, TableMetaValue};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::FlownodeId;
|
||||
@@ -42,7 +42,7 @@ lazy_static! {
|
||||
/// The layout: `__flow/info/{flow_id}`.
|
||||
pub struct FlowInfoKey(FlowScoped<FlowInfoKeyInner>);
|
||||
|
||||
impl<'a> MetadataKey<'a, FlowInfoKey> for FlowInfoKey {
|
||||
impl<'a> MetaKey<'a, FlowInfoKey> for FlowInfoKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.0.to_bytes()
|
||||
}
|
||||
@@ -80,14 +80,14 @@ impl FlowInfoKeyInner {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, FlowInfoKeyInner> for FlowInfoKeyInner {
|
||||
impl<'a> MetaKey<'a, FlowInfoKeyInner> for FlowInfoKeyInner {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
format!("{FLOW_INFO_KEY_PREFIX}/{}", self.flow_id).into_bytes()
|
||||
}
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<FlowInfoKeyInner> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
error::InvalidMetadataSnafu {
|
||||
error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"FlowInfoKeyInner '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -98,7 +98,7 @@ impl<'a> MetadataKey<'a, FlowInfoKeyInner> for FlowInfoKeyInner {
|
||||
let captures =
|
||||
FLOW_INFO_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(error::InvalidMetadataSnafu {
|
||||
.context(error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid FlowInfoKeyInner '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
|
||||
@@ -24,7 +24,7 @@ use crate::error::{self, Result};
|
||||
use crate::key::flow::FlowScoped;
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{
|
||||
BytesAdapter, DeserializedValueWithBytes, FlowId, MetadataKey, MetadataValue, NAME_PATTERN,
|
||||
BytesAdapter, DeserializedValueWithBytes, FlowId, MetaKey, TableMetaValue, NAME_PATTERN,
|
||||
};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
@@ -76,7 +76,7 @@ impl<'a> FlowNameKey<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, FlowNameKey<'a>> for FlowNameKey<'a> {
|
||||
impl<'a> MetaKey<'a, FlowNameKey<'a>> for FlowNameKey<'a> {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.0.to_bytes()
|
||||
}
|
||||
@@ -95,7 +95,7 @@ pub struct FlowNameKeyInner<'a> {
|
||||
pub flow_name: &'a str,
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, FlowNameKeyInner<'a>> for FlowNameKeyInner<'_> {
|
||||
impl<'a> MetaKey<'a, FlowNameKeyInner<'a>> for FlowNameKeyInner<'_> {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
format!(
|
||||
"{FLOW_NAME_KEY_PREFIX}/{}/{}",
|
||||
@@ -106,7 +106,7 @@ impl<'a> MetadataKey<'a, FlowNameKeyInner<'a>> for FlowNameKeyInner<'_> {
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<FlowNameKeyInner> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
error::InvalidMetadataSnafu {
|
||||
error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"FlowNameKeyInner '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -117,7 +117,7 @@ impl<'a> MetadataKey<'a, FlowNameKeyInner<'a>> for FlowNameKeyInner<'_> {
|
||||
let captures =
|
||||
FLOW_NAME_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(error::InvalidMetadataSnafu {
|
||||
.context(error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid FlowNameKeyInner '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
|
||||
@@ -22,7 +22,7 @@ use snafu::OptionExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::flow::FlowScoped;
|
||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
|
||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetaKey, TableMetaValue};
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::peer::Peer;
|
||||
@@ -68,7 +68,7 @@ impl FlowRouteKey {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, FlowRouteKey> for FlowRouteKey {
|
||||
impl<'a> MetaKey<'a, FlowRouteKey> for FlowRouteKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.0.to_bytes()
|
||||
}
|
||||
@@ -101,7 +101,7 @@ impl FlowRouteKeyInner {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, FlowRouteKeyInner> for FlowRouteKeyInner {
|
||||
impl<'a> MetaKey<'a, FlowRouteKeyInner> for FlowRouteKeyInner {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
format!(
|
||||
"{FLOW_ROUTE_KEY_PREFIX}/{}/{}",
|
||||
@@ -112,7 +112,7 @@ impl<'a> MetadataKey<'a, FlowRouteKeyInner> for FlowRouteKeyInner {
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<FlowRouteKeyInner> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
error::InvalidMetadataSnafu {
|
||||
error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"FlowInfoKeyInner '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -123,7 +123,7 @@ impl<'a> MetadataKey<'a, FlowRouteKeyInner> for FlowRouteKeyInner {
|
||||
let captures =
|
||||
FLOW_ROUTE_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(error::InvalidMetadataSnafu {
|
||||
.context(error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid FlowInfoKeyInner '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
@@ -209,7 +209,7 @@ impl FlowRouteManager {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::FlowRouteKey;
|
||||
use crate::key::MetadataKey;
|
||||
use crate::key::MetaKey;
|
||||
|
||||
#[test]
|
||||
fn test_key_serialization() {
|
||||
|
||||
@@ -22,7 +22,7 @@ use snafu::OptionExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::flow::FlowScoped;
|
||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey};
|
||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetaKey};
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||
@@ -44,7 +44,7 @@ const FLOWNODE_FLOW_KEY_PREFIX: &str = "flownode";
|
||||
/// The layout `__flow/flownode/{flownode_id}/{flow_id}/{partition_id}`
|
||||
pub struct FlownodeFlowKey(FlowScoped<FlownodeFlowKeyInner>);
|
||||
|
||||
impl<'a> MetadataKey<'a, FlownodeFlowKey> for FlownodeFlowKey {
|
||||
impl<'a> MetaKey<'a, FlownodeFlowKey> for FlownodeFlowKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.0.to_bytes()
|
||||
}
|
||||
@@ -113,7 +113,7 @@ impl FlownodeFlowKeyInner {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, FlownodeFlowKeyInner> for FlownodeFlowKeyInner {
|
||||
impl<'a> MetaKey<'a, FlownodeFlowKeyInner> for FlownodeFlowKeyInner {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
format!(
|
||||
"{FLOWNODE_FLOW_KEY_PREFIX}/{}/{}/{}",
|
||||
@@ -124,7 +124,7 @@ impl<'a> MetadataKey<'a, FlownodeFlowKeyInner> for FlownodeFlowKeyInner {
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<FlownodeFlowKeyInner> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
error::InvalidMetadataSnafu {
|
||||
error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"FlownodeFlowKeyInner '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -135,7 +135,7 @@ impl<'a> MetadataKey<'a, FlownodeFlowKeyInner> for FlownodeFlowKeyInner {
|
||||
let captures =
|
||||
FLOWNODE_FLOW_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(error::InvalidMetadataSnafu {
|
||||
.context(error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid FlownodeFlowKeyInner '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
@@ -208,7 +208,7 @@ impl FlownodeFlowManager {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::key::flow::flownode_flow::FlownodeFlowKey;
|
||||
use crate::key::MetadataKey;
|
||||
use crate::key::MetaKey;
|
||||
|
||||
#[test]
|
||||
fn test_key_serialization() {
|
||||
|
||||
@@ -23,7 +23,7 @@ use table::metadata::TableId;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::flow::FlowScoped;
|
||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
|
||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetaKey, TableMetaValue};
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::peer::Peer;
|
||||
@@ -56,7 +56,7 @@ struct TableFlowKeyInner {
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct TableFlowKey(FlowScoped<TableFlowKeyInner>);
|
||||
|
||||
impl<'a> MetadataKey<'a, TableFlowKey> for TableFlowKey {
|
||||
impl<'a> MetaKey<'a, TableFlowKey> for TableFlowKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.0.to_bytes()
|
||||
}
|
||||
@@ -129,7 +129,7 @@ impl TableFlowKeyInner {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, TableFlowKeyInner> for TableFlowKeyInner {
|
||||
impl<'a> MetaKey<'a, TableFlowKeyInner> for TableFlowKeyInner {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
format!(
|
||||
"{TABLE_FLOW_KEY_PREFIX}/{}/{}/{}/{}",
|
||||
@@ -140,7 +140,7 @@ impl<'a> MetadataKey<'a, TableFlowKeyInner> for TableFlowKeyInner {
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<TableFlowKeyInner> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
error::InvalidMetadataSnafu {
|
||||
error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"TableFlowKeyInner '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -151,7 +151,7 @@ impl<'a> MetadataKey<'a, TableFlowKeyInner> for TableFlowKeyInner {
|
||||
let captures =
|
||||
TABLE_FLOW_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(error::InvalidMetadataSnafu {
|
||||
.context(error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid TableFlowKeyInner '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
|
||||
@@ -23,8 +23,8 @@ use humantime_serde::re::humantime;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{self, Error, InvalidMetadataSnafu, ParseOptionSnafu, Result};
|
||||
use crate::key::{MetadataKey, SCHEMA_NAME_KEY_PATTERN, SCHEMA_NAME_KEY_PREFIX};
|
||||
use crate::error::{self, Error, InvalidTableMetadataSnafu, ParseOptionSnafu, Result};
|
||||
use crate::key::{MetaKey, SCHEMA_NAME_KEY_PATTERN, SCHEMA_NAME_KEY_PREFIX};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||
use crate::rpc::store::RangeRequest;
|
||||
@@ -89,19 +89,6 @@ impl TryFrom<&HashMap<String, String>> for SchemaNameValue {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SchemaNameValue> for HashMap<String, String> {
|
||||
fn from(value: SchemaNameValue) -> Self {
|
||||
let mut opts = HashMap::new();
|
||||
if let Some(ttl) = value.ttl {
|
||||
opts.insert(
|
||||
OPT_KEY_TTL.to_string(),
|
||||
format!("{}", humantime::format_duration(ttl)),
|
||||
);
|
||||
}
|
||||
opts
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> SchemaNameKey<'a> {
|
||||
pub fn new(catalog: &'a str, schema: &'a str) -> Self {
|
||||
Self { catalog, schema }
|
||||
@@ -122,14 +109,14 @@ impl Display for SchemaNameKey<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, SchemaNameKey<'a>> for SchemaNameKey<'_> {
|
||||
impl<'a> MetaKey<'a, SchemaNameKey<'a>> for SchemaNameKey<'_> {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<SchemaNameKey<'a>> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
InvalidMetadataSnafu {
|
||||
InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"SchemaNameKey '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -155,7 +142,7 @@ impl<'a> TryFrom<&'a str> for SchemaNameKey<'a> {
|
||||
fn try_from(s: &'a str) -> Result<Self> {
|
||||
let captures = SCHEMA_NAME_KEY_PATTERN
|
||||
.captures(s)
|
||||
.context(InvalidMetadataSnafu {
|
||||
.context(InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Illegal SchemaNameKey format: '{s}'"),
|
||||
})?;
|
||||
|
||||
|
||||
@@ -23,9 +23,9 @@ use table::table_name::TableName;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use super::TABLE_INFO_KEY_PATTERN;
|
||||
use crate::error::{InvalidMetadataSnafu, Result};
|
||||
use crate::error::{InvalidTableMetadataSnafu, Result};
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{DeserializedValueWithBytes, MetadataKey, MetadataValue, TABLE_INFO_KEY_PREFIX};
|
||||
use crate::key::{DeserializedValueWithBytes, MetaKey, TableMetaValue, TABLE_INFO_KEY_PREFIX};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::BatchGetRequest;
|
||||
@@ -51,14 +51,14 @@ impl Display for TableInfoKey {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, TableInfoKey> for TableInfoKey {
|
||||
impl<'a> MetaKey<'a, TableInfoKey> for TableInfoKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
fn from_bytes(bytes: &[u8]) -> Result<TableInfoKey> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
InvalidMetadataSnafu {
|
||||
InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"TableInfoKey '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -68,7 +68,7 @@ impl<'a> MetadataKey<'a, TableInfoKey> for TableInfoKey {
|
||||
})?;
|
||||
let captures = TABLE_INFO_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidMetadataSnafu {
|
||||
.context(InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid TableInfoKey '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
|
||||
@@ -22,8 +22,8 @@ use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use super::{MetadataKey, MetadataValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
|
||||
use crate::error::{Error, InvalidMetadataSnafu, Result};
|
||||
use super::{MetaKey, TableMetaValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
|
||||
use crate::error::{Error, InvalidTableMetadataSnafu, Result};
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
@@ -63,14 +63,14 @@ impl Display for TableNameKey<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, TableNameKey<'a>> for TableNameKey<'_> {
|
||||
impl<'a> MetaKey<'a, TableNameKey<'a>> for TableNameKey<'_> {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<TableNameKey<'a>> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
InvalidMetadataSnafu {
|
||||
InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"TableNameKey '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -80,7 +80,7 @@ impl<'a> MetadataKey<'a, TableNameKey<'a>> for TableNameKey<'_> {
|
||||
})?;
|
||||
let captures = TABLE_NAME_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidMetadataSnafu {
|
||||
.context(InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid TableNameKey '{key}'"),
|
||||
})?;
|
||||
let catalog = captures.get(1).unwrap().as_str();
|
||||
@@ -128,7 +128,7 @@ impl<'a> TryFrom<&'a str> for TableNameKey<'a> {
|
||||
fn try_from(s: &'a str) -> Result<Self> {
|
||||
let captures = TABLE_NAME_KEY_PATTERN
|
||||
.captures(s)
|
||||
.context(InvalidMetadataSnafu {
|
||||
.context(InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Illegal TableNameKey format: '{s}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
|
||||
@@ -22,12 +22,12 @@ use store_api::storage::{RegionId, RegionNumber};
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::error::{
|
||||
self, InvalidMetadataSnafu, MetadataCorruptionSnafu, Result, SerdeJsonSnafu,
|
||||
self, InvalidTableMetadataSnafu, MetadataCorruptionSnafu, Result, SerdeJsonSnafu,
|
||||
TableRouteNotFoundSnafu, UnexpectedLogicalRouteTableSnafu,
|
||||
};
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{
|
||||
DeserializedValueWithBytes, MetadataKey, MetadataValue, RegionDistribution,
|
||||
DeserializedValueWithBytes, MetaKey, RegionDistribution, TableMetaValue,
|
||||
TABLE_ROUTE_KEY_PATTERN, TABLE_ROUTE_PREFIX,
|
||||
};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
@@ -199,7 +199,7 @@ impl TableRouteValue {
|
||||
}
|
||||
}
|
||||
|
||||
impl MetadataValue for TableRouteValue {
|
||||
impl TableMetaValue for TableRouteValue {
|
||||
fn try_from_raw_value(raw_value: &[u8]) -> Result<Self> {
|
||||
let r = serde_json::from_slice::<TableRouteValue>(raw_value);
|
||||
match r {
|
||||
@@ -244,14 +244,14 @@ impl LogicalTableRouteValue {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, TableRouteKey> for TableRouteKey {
|
||||
impl<'a> MetaKey<'a, TableRouteKey> for TableRouteKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
fn from_bytes(bytes: &[u8]) -> Result<TableRouteKey> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
InvalidMetadataSnafu {
|
||||
InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"TableRouteKey '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -259,11 +259,12 @@ impl<'a> MetadataKey<'a, TableRouteKey> for TableRouteKey {
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let captures = TABLE_ROUTE_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidMetadataSnafu {
|
||||
err_msg: format!("Invalid TableRouteKey '{key}'"),
|
||||
})?;
|
||||
let captures =
|
||||
TABLE_ROUTE_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid TableRouteKey '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
let table_id = captures[1].parse::<TableId>().unwrap();
|
||||
Ok(TableRouteKey { table_id })
|
||||
|
||||
@@ -16,7 +16,7 @@ use serde::de::DeserializeOwned;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::key::{DeserializedValueWithBytes, MetadataValue};
|
||||
use crate::key::{DeserializedValueWithBytes, TableMetaValue};
|
||||
use crate::kv_backend::txn::TxnOpResponse;
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
@@ -41,7 +41,7 @@ impl TxnOpGetResponseSet {
|
||||
) -> impl FnMut(&mut TxnOpGetResponseSet) -> Result<Option<DeserializedValueWithBytes<T>>>
|
||||
where
|
||||
F: FnMut(&mut TxnOpGetResponseSet) -> Option<Vec<u8>>,
|
||||
T: Serialize + DeserializeOwned + MetadataValue,
|
||||
T: Serialize + DeserializeOwned + TableMetaValue,
|
||||
{
|
||||
move |set| {
|
||||
f(set)
|
||||
|
||||
@@ -24,7 +24,7 @@ use table::table_name::TableName;
|
||||
use super::VIEW_INFO_KEY_PATTERN;
|
||||
use crate::error::{InvalidViewInfoSnafu, Result};
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{DeserializedValueWithBytes, MetadataKey, MetadataValue, VIEW_INFO_KEY_PREFIX};
|
||||
use crate::key::{DeserializedValueWithBytes, MetaKey, TableMetaValue, VIEW_INFO_KEY_PREFIX};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::BatchGetRequest;
|
||||
@@ -53,7 +53,7 @@ impl Display for ViewInfoKey {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, ViewInfoKey> for ViewInfoKey {
|
||||
impl<'a> MetaKey<'a, ViewInfoKey> for ViewInfoKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ workspace = true
|
||||
[dependencies]
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
backon = "1"
|
||||
backon = "0.4"
|
||||
common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::string::FromUtf8Error;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
@@ -140,6 +141,12 @@ pub enum Error {
|
||||
procedure_id: ProcedureId,
|
||||
},
|
||||
|
||||
#[snafu(display("Corrupted data, error: "))]
|
||||
CorruptedData {
|
||||
#[snafu(source)]
|
||||
error: FromUtf8Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start the remove_outdated_meta method, error"))]
|
||||
StartRemoveOutdatedMetaTask {
|
||||
source: common_runtime::error::Error,
|
||||
@@ -154,6 +161,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Subprocedure {} failed", subprocedure_id))]
|
||||
SubprocedureFailed {
|
||||
subprocedure_id: ProcedureId,
|
||||
source: Arc<Error>,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse segment key: {key}"))]
|
||||
ParseSegmentKey {
|
||||
#[snafu(implicit)]
|
||||
@@ -203,11 +218,14 @@ impl ErrorExt for Error {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::ProcedurePanic { .. }
|
||||
| Error::CorruptedData { .. }
|
||||
| Error::ParseSegmentKey { .. }
|
||||
| Error::Unexpected { .. } => StatusCode::Unexpected,
|
||||
Error::ProcedureExec { source, .. } => source.status_code(),
|
||||
Error::StartRemoveOutdatedMetaTask { source, .. }
|
||||
| Error::StopRemoveOutdatedMetaTask { source, .. } => source.status_code(),
|
||||
|
||||
Error::SubprocedureFailed { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,11 +19,10 @@ use std::time::Duration;
|
||||
use backon::{BackoffBuilder, ExponentialBuilder};
|
||||
use common_telemetry::{debug, error, info};
|
||||
use rand::Rng;
|
||||
use snafu::ResultExt;
|
||||
use tokio::time;
|
||||
|
||||
use super::rwlock::OwnedKeyRwLockGuard;
|
||||
use crate::error::{self, ProcedurePanicSnafu, Result, RollbackTimesExceededSnafu};
|
||||
use crate::error::{self, ProcedurePanicSnafu, Result};
|
||||
use crate::local::{ManagerContext, ProcedureMeta, ProcedureMetaRef};
|
||||
use crate::procedure::{Output, StringKey};
|
||||
use crate::store::{ProcedureMessage, ProcedureStore};
|
||||
@@ -223,12 +222,12 @@ impl Runner {
|
||||
if let Some(d) = rollback.next() {
|
||||
self.wait_on_err(d, rollback_times).await;
|
||||
} else {
|
||||
let err = Err::<(), Arc<Error>>(error)
|
||||
.context(RollbackTimesExceededSnafu {
|
||||
self.meta.set_state(ProcedureState::failed(Arc::new(
|
||||
Error::RollbackTimesExceeded {
|
||||
source: error.clone(),
|
||||
procedure_id: self.meta.id,
|
||||
})
|
||||
.unwrap_err();
|
||||
self.meta.set_state(ProcedureState::failed(Arc::new(err)));
|
||||
},
|
||||
)));
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -374,7 +373,7 @@ impl Runner {
|
||||
procedure,
|
||||
manager_ctx: self.manager_ctx.clone(),
|
||||
step,
|
||||
exponential_builder: self.exponential_builder,
|
||||
exponential_builder: self.exponential_builder.clone(),
|
||||
store: self.store.clone(),
|
||||
rolling_back: false,
|
||||
};
|
||||
|
||||
@@ -127,6 +127,12 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Not expected to run ExecutionPlan more than once"))]
|
||||
ExecuteRepeatedly {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("General DataFusion error"))]
|
||||
GeneralDataFusion {
|
||||
#[snafu(source)]
|
||||
@@ -187,6 +193,12 @@ pub enum Error {
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to join thread"))]
|
||||
ThreadJoin {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to decode logical plan: {source}"))]
|
||||
DecodePlan {
|
||||
#[snafu(implicit)]
|
||||
@@ -277,7 +289,9 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::MissingTableMutationHandler { .. }
|
||||
| Error::MissingProcedureServiceHandler { .. }
|
||||
| Error::MissingFlowServiceHandler { .. } => StatusCode::Unexpected,
|
||||
| Error::MissingFlowServiceHandler { .. }
|
||||
| Error::ExecuteRepeatedly { .. }
|
||||
| Error::ThreadJoin { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::UnsupportedInputDataType { .. }
|
||||
| Error::TypeCast { .. }
|
||||
@@ -313,6 +327,7 @@ pub fn datafusion_status_code<T: ErrorExt + 'static>(
|
||||
match e {
|
||||
DataFusionError::Internal(_) => StatusCode::Internal,
|
||||
DataFusionError::NotImplemented(_) => StatusCode::Unsupported,
|
||||
DataFusionError::ResourcesExhausted(_) => StatusCode::RuntimeResourcesExhausted,
|
||||
DataFusionError::Plan(_) => StatusCode::PlanQuery,
|
||||
DataFusionError::External(e) => {
|
||||
if let Some(ext) = (*e).downcast_ref::<T>() {
|
||||
|
||||
@@ -172,13 +172,12 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::DataTypes { .. }
|
||||
| Error::CreateRecordBatches { .. }
|
||||
| Error::PollStream { .. }
|
||||
| Error::Format { .. }
|
||||
| Error::ToArrowScalar { .. }
|
||||
| Error::ProjectArrowRecordBatch { .. }
|
||||
| Error::PhysicalExpr { .. } => StatusCode::Internal,
|
||||
|
||||
Error::PollStream { .. } => StatusCode::EngineExecuteQuery,
|
||||
|
||||
Error::ArrowCompute { .. } => StatusCode::IllegalState,
|
||||
|
||||
Error::ColumnNotExists { .. } => StatusCode::TableColumnNotFound,
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::any::Any;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datafusion::error::DataFusionError;
|
||||
use prost::{DecodeError, EncodeError};
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
@@ -40,6 +41,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Internal error from DataFusion"))]
|
||||
DFInternal {
|
||||
#[snafu(source)]
|
||||
error: DataFusionError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Internal error"))]
|
||||
Internal {
|
||||
#[snafu(implicit)]
|
||||
@@ -47,6 +56,12 @@ pub enum Error {
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot convert plan doesn't belong to GreptimeDB"))]
|
||||
UnknownPlan {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode DataFusion plan"))]
|
||||
EncodeDfPlan {
|
||||
#[snafu(source)]
|
||||
@@ -69,8 +84,10 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::EncodeRel { .. } | Error::DecodeRel { .. } => StatusCode::InvalidArguments,
|
||||
Error::Internal { .. } => StatusCode::Internal,
|
||||
Error::UnknownPlan { .. } | Error::EncodeRel { .. } | Error::DecodeRel { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::DFInternal { .. } | Error::Internal { .. } => StatusCode::Internal,
|
||||
Error::EncodeDfPlan { .. } | Error::DecodeDfPlan { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,4 +34,4 @@ tracing = "0.1"
|
||||
tracing-appender = "0.2"
|
||||
tracing-log = "0.1"
|
||||
tracing-opentelemetry = "0.22.0"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
@@ -21,7 +21,7 @@ mod panic_hook;
|
||||
pub mod tracing_context;
|
||||
mod tracing_sampler;
|
||||
|
||||
pub use logging::{init_default_ut_logging, init_global_logging, RELOAD_HANDLE};
|
||||
pub use logging::{init_default_ut_logging, init_global_logging};
|
||||
pub use metric::dump_metrics;
|
||||
pub use panic_hook::set_panic_hook;
|
||||
pub use {common_error, tracing, tracing_subscriber};
|
||||
pub use {common_error, tracing};
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
use std::env;
|
||||
use std::sync::{Arc, Mutex, Once};
|
||||
|
||||
use once_cell::sync::{Lazy, OnceCell};
|
||||
use once_cell::sync::Lazy;
|
||||
use opentelemetry::{global, KeyValue};
|
||||
use opentelemetry_otlp::WithExportConfig;
|
||||
use opentelemetry_sdk::propagation::TraceContextPropagator;
|
||||
@@ -26,7 +26,6 @@ use serde::{Deserialize, Serialize};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
use tracing_appender::rolling::{RollingFileAppender, Rotation};
|
||||
use tracing_log::LogTracer;
|
||||
use tracing_subscriber::filter::Targets;
|
||||
use tracing_subscriber::fmt::Layer;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::prelude::*;
|
||||
@@ -36,41 +35,15 @@ use crate::tracing_sampler::{create_sampler, TracingSampleOptions};
|
||||
|
||||
pub const DEFAULT_OTLP_ENDPOINT: &str = "http://localhost:4317";
|
||||
|
||||
// Handle for reloading log level
|
||||
pub static RELOAD_HANDLE: OnceCell<tracing_subscriber::reload::Handle<Targets, Registry>> =
|
||||
OnceCell::new();
|
||||
|
||||
/// The logging options that used to initialize the logger.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct LoggingOptions {
|
||||
/// The directory to store log files. If not set, logs will be written to stdout.
|
||||
pub dir: String,
|
||||
|
||||
/// The log level that can be one of "trace", "debug", "info", "warn", "error". Default is "info".
|
||||
pub level: Option<String>,
|
||||
|
||||
/// The log format that can be one of "json" or "text". Default is "text".
|
||||
pub log_format: LogFormat,
|
||||
|
||||
/// Whether to append logs to stdout. Default is true.
|
||||
pub append_stdout: bool,
|
||||
|
||||
/// Whether to enable tracing with OTLP. Default is false.
|
||||
pub enable_otlp_tracing: bool,
|
||||
|
||||
/// The endpoint of OTLP. Default is "http://localhost:4317".
|
||||
pub otlp_endpoint: Option<String>,
|
||||
|
||||
/// The tracing sample ratio.
|
||||
pub tracing_sample_ratio: Option<TracingSampleOptions>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum LogFormat {
|
||||
Json,
|
||||
Text,
|
||||
pub append_stdout: bool,
|
||||
}
|
||||
|
||||
impl PartialEq for LoggingOptions {
|
||||
@@ -91,7 +64,6 @@ impl Default for LoggingOptions {
|
||||
Self {
|
||||
dir: "/tmp/greptimedb/logs".to_string(),
|
||||
level: None,
|
||||
log_format: LogFormat::Text,
|
||||
enable_otlp_tracing: false,
|
||||
otlp_endpoint: None,
|
||||
tracing_sample_ratio: None,
|
||||
@@ -156,103 +128,62 @@ pub fn init_global_logging(
|
||||
let mut guards = vec![];
|
||||
|
||||
START.call_once(|| {
|
||||
let dir = &opts.dir;
|
||||
let level = &opts.level;
|
||||
let enable_otlp_tracing = opts.enable_otlp_tracing;
|
||||
|
||||
// Enable log compatible layer to convert log record to tracing span.
|
||||
LogTracer::init().expect("log tracer must be valid");
|
||||
|
||||
// Configure the stdout logging layer.
|
||||
// stdout log layer.
|
||||
let stdout_logging_layer = if opts.append_stdout {
|
||||
let (writer, guard) = tracing_appender::non_blocking(std::io::stdout());
|
||||
guards.push(guard);
|
||||
let (stdout_writer, stdout_guard) = tracing_appender::non_blocking(std::io::stdout());
|
||||
guards.push(stdout_guard);
|
||||
|
||||
if opts.log_format == LogFormat::Json {
|
||||
Some(
|
||||
Layer::new()
|
||||
.json()
|
||||
.with_writer(writer)
|
||||
.with_ansi(atty::is(atty::Stream::Stdout))
|
||||
.boxed(),
|
||||
)
|
||||
} else {
|
||||
Some(
|
||||
Layer::new()
|
||||
.with_writer(writer)
|
||||
.with_ansi(atty::is(atty::Stream::Stdout))
|
||||
.boxed(),
|
||||
)
|
||||
}
|
||||
Some(
|
||||
Layer::new()
|
||||
.with_writer(stdout_writer)
|
||||
.with_ansi(atty::is(atty::Stream::Stdout)),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Configure the file logging layer with rolling policy.
|
||||
let file_logging_layer = if !opts.dir.is_empty() {
|
||||
let rolling_appender =
|
||||
RollingFileAppender::new(Rotation::HOURLY, &opts.dir, "greptimedb");
|
||||
let (writer, guard) = tracing_appender::non_blocking(rolling_appender);
|
||||
guards.push(guard);
|
||||
// file log layer.
|
||||
let rolling_appender = RollingFileAppender::new(Rotation::HOURLY, dir, app_name);
|
||||
let (rolling_writer, rolling_writer_guard) =
|
||||
tracing_appender::non_blocking(rolling_appender);
|
||||
let file_logging_layer = Layer::new().with_writer(rolling_writer).with_ansi(false);
|
||||
guards.push(rolling_writer_guard);
|
||||
|
||||
if opts.log_format == LogFormat::Json {
|
||||
Some(
|
||||
Layer::new()
|
||||
.json()
|
||||
.with_writer(writer)
|
||||
.with_ansi(false)
|
||||
.boxed(),
|
||||
)
|
||||
} else {
|
||||
Some(Layer::new().with_writer(writer).with_ansi(false).boxed())
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Configure the error file logging layer with rolling policy.
|
||||
let err_file_logging_layer = if !opts.dir.is_empty() {
|
||||
let rolling_appender =
|
||||
RollingFileAppender::new(Rotation::HOURLY, &opts.dir, "greptimedb-err");
|
||||
let (writer, guard) = tracing_appender::non_blocking(rolling_appender);
|
||||
guards.push(guard);
|
||||
|
||||
if opts.log_format == LogFormat::Json {
|
||||
Some(
|
||||
Layer::new()
|
||||
.json()
|
||||
.with_writer(writer)
|
||||
.with_ansi(false)
|
||||
.with_filter(filter::LevelFilter::ERROR)
|
||||
.boxed(),
|
||||
)
|
||||
} else {
|
||||
Some(
|
||||
Layer::new()
|
||||
.with_writer(writer)
|
||||
.with_ansi(false)
|
||||
.with_filter(filter::LevelFilter::ERROR)
|
||||
.boxed(),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
// error file log layer.
|
||||
let err_rolling_appender =
|
||||
RollingFileAppender::new(Rotation::HOURLY, dir, format!("{}-{}", app_name, "err"));
|
||||
let (err_rolling_writer, err_rolling_writer_guard) =
|
||||
tracing_appender::non_blocking(err_rolling_appender);
|
||||
let err_file_logging_layer = Layer::new()
|
||||
.with_writer(err_rolling_writer)
|
||||
.with_ansi(false);
|
||||
guards.push(err_rolling_writer_guard);
|
||||
|
||||
// resolve log level settings from:
|
||||
// - options from command line or config files
|
||||
// - environment variable: RUST_LOG
|
||||
// - default settings
|
||||
let filter = opts
|
||||
.level
|
||||
let rust_log_env = std::env::var(EnvFilter::DEFAULT_ENV).ok();
|
||||
let targets_string = level
|
||||
.as_deref()
|
||||
.or(env::var(EnvFilter::DEFAULT_ENV).ok().as_deref())
|
||||
.unwrap_or(DEFAULT_LOG_TARGETS)
|
||||
.or(rust_log_env.as_deref())
|
||||
.unwrap_or(DEFAULT_LOG_TARGETS);
|
||||
let filter = targets_string
|
||||
.parse::<filter::Targets>()
|
||||
.expect("error parsing log level string");
|
||||
|
||||
let (dyn_filter, reload_handle) = tracing_subscriber::reload::Layer::new(filter.clone());
|
||||
|
||||
RELOAD_HANDLE
|
||||
.set(reload_handle)
|
||||
.expect("reload handle already set, maybe init_global_logging get called twice?");
|
||||
|
||||
let sampler = opts
|
||||
.tracing_sample_ratio
|
||||
.as_ref()
|
||||
.map(create_sampler)
|
||||
.map(Sampler::ParentBased)
|
||||
.unwrap_or(Sampler::ParentBased(Box::new(Sampler::AlwaysOn)));
|
||||
// Must enable 'tokio_unstable' cfg to use this feature.
|
||||
// For example: `RUSTFLAGS="--cfg tokio_unstable" cargo run -F common-telemetry/console -- standalone start`
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -273,70 +204,59 @@ pub fn init_global_logging(
|
||||
None
|
||||
};
|
||||
|
||||
let stdout_logging_layer = stdout_logging_layer.map(|x| x.with_filter(filter.clone()));
|
||||
|
||||
let file_logging_layer = file_logging_layer.with_filter(filter);
|
||||
|
||||
Registry::default()
|
||||
.with(dyn_filter)
|
||||
.with(tokio_console_layer)
|
||||
.with(stdout_logging_layer)
|
||||
.with(file_logging_layer)
|
||||
.with(err_file_logging_layer)
|
||||
.with(err_file_logging_layer.with_filter(filter::LevelFilter::ERROR))
|
||||
};
|
||||
|
||||
// consume the `tracing_opts` to avoid "unused" warnings.
|
||||
// consume the `tracing_opts`, to avoid "unused" warnings
|
||||
let _ = tracing_opts;
|
||||
|
||||
#[cfg(not(feature = "tokio-console"))]
|
||||
let subscriber = Registry::default()
|
||||
.with(dyn_filter)
|
||||
.with(filter)
|
||||
.with(stdout_logging_layer)
|
||||
.with(file_logging_layer)
|
||||
.with(err_file_logging_layer);
|
||||
.with(err_file_logging_layer.with_filter(filter::LevelFilter::ERROR));
|
||||
|
||||
if opts.enable_otlp_tracing {
|
||||
if enable_otlp_tracing {
|
||||
global::set_text_map_propagator(TraceContextPropagator::new());
|
||||
|
||||
let sampler = opts
|
||||
.tracing_sample_ratio
|
||||
.as_ref()
|
||||
.map(create_sampler)
|
||||
.map(Sampler::ParentBased)
|
||||
.unwrap_or(Sampler::ParentBased(Box::new(Sampler::AlwaysOn)));
|
||||
|
||||
let trace_config = opentelemetry_sdk::trace::config()
|
||||
.with_sampler(sampler)
|
||||
.with_resource(opentelemetry_sdk::Resource::new(vec![
|
||||
KeyValue::new(resource::SERVICE_NAME, app_name.to_string()),
|
||||
KeyValue::new(
|
||||
resource::SERVICE_INSTANCE_ID,
|
||||
node_id.unwrap_or("none".to_string()),
|
||||
),
|
||||
KeyValue::new(resource::SERVICE_VERSION, env!("CARGO_PKG_VERSION")),
|
||||
KeyValue::new(resource::PROCESS_PID, std::process::id().to_string()),
|
||||
]));
|
||||
|
||||
let exporter = opentelemetry_otlp::new_exporter().tonic().with_endpoint(
|
||||
opts.otlp_endpoint
|
||||
.as_ref()
|
||||
.map(|e| {
|
||||
if e.starts_with("http") {
|
||||
e.to_string()
|
||||
} else {
|
||||
format!("http://{}", e)
|
||||
}
|
||||
})
|
||||
.unwrap_or(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
);
|
||||
|
||||
// otlp exporter
|
||||
let tracer = opentelemetry_otlp::new_pipeline()
|
||||
.tracing()
|
||||
.with_exporter(exporter)
|
||||
.with_trace_config(trace_config)
|
||||
.with_exporter(
|
||||
opentelemetry_otlp::new_exporter().tonic().with_endpoint(
|
||||
opts.otlp_endpoint
|
||||
.as_ref()
|
||||
.map(|e| format!("http://{}", e))
|
||||
.unwrap_or(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
),
|
||||
)
|
||||
.with_trace_config(
|
||||
opentelemetry_sdk::trace::config()
|
||||
.with_sampler(sampler)
|
||||
.with_resource(opentelemetry_sdk::Resource::new(vec![
|
||||
KeyValue::new(resource::SERVICE_NAME, app_name.to_string()),
|
||||
KeyValue::new(
|
||||
resource::SERVICE_INSTANCE_ID,
|
||||
node_id.unwrap_or("none".to_string()),
|
||||
),
|
||||
KeyValue::new(resource::SERVICE_VERSION, env!("CARGO_PKG_VERSION")),
|
||||
KeyValue::new(resource::PROCESS_PID, std::process::id().to_string()),
|
||||
])),
|
||||
)
|
||||
.install_batch(opentelemetry_sdk::runtime::Tokio)
|
||||
.expect("otlp tracer install failed");
|
||||
|
||||
tracing::subscriber::set_global_default(
|
||||
subscriber.with(tracing_opentelemetry::layer().with_tracer(tracer)),
|
||||
)
|
||||
.expect("error setting global tracing subscriber");
|
||||
let tracing_layer = Some(tracing_opentelemetry::layer().with_tracer(tracer));
|
||||
let subscriber = subscriber.with(tracing_layer);
|
||||
tracing::subscriber::set_global_default(subscriber)
|
||||
.expect("error setting global tracing subscriber");
|
||||
} else {
|
||||
tracing::subscriber::set_global_default(subscriber)
|
||||
.expect("error setting global tracing subscriber");
|
||||
|
||||
@@ -46,6 +46,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse a string into Interval, raw string: {}", raw))]
|
||||
ParseInterval {
|
||||
raw: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Current timestamp overflow"))]
|
||||
TimestampOverflow {
|
||||
#[snafu(source)]
|
||||
@@ -108,6 +115,7 @@ impl ErrorExt for Error {
|
||||
Error::InvalidDateStr { .. } | Error::ArithmeticOverflow { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::ParseInterval { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -47,8 +47,7 @@ pub enum ObjectStoreConfig {
|
||||
}
|
||||
|
||||
impl ObjectStoreConfig {
|
||||
/// Returns the object storage type name, such as `S3`, `Oss` etc.
|
||||
pub fn provider_name(&self) -> &'static str {
|
||||
pub fn name(&self) -> &'static str {
|
||||
match self {
|
||||
Self::File(_) => "File",
|
||||
Self::S3(_) => "S3",
|
||||
@@ -57,24 +56,6 @@ impl ObjectStoreConfig {
|
||||
Self::Gcs(_) => "Gcs",
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the object storage configuration name, return the provider name if it's empty.
|
||||
pub fn config_name(&self) -> &str {
|
||||
let name = match self {
|
||||
// file storage doesn't support name
|
||||
Self::File(_) => self.provider_name(),
|
||||
Self::S3(s3) => &s3.name,
|
||||
Self::Oss(oss) => &oss.name,
|
||||
Self::Azblob(az) => &az.name,
|
||||
Self::Gcs(gcs) => &gcs.name,
|
||||
};
|
||||
|
||||
if name.trim().is_empty() {
|
||||
return self.provider_name();
|
||||
}
|
||||
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
/// Storage engine config
|
||||
@@ -85,7 +66,6 @@ pub struct StorageConfig {
|
||||
pub data_home: String,
|
||||
#[serde(flatten)]
|
||||
pub store: ObjectStoreConfig,
|
||||
/// Object storage providers
|
||||
pub providers: Vec<ObjectStoreConfig>,
|
||||
}
|
||||
|
||||
@@ -115,7 +95,6 @@ pub struct ObjectStorageCacheConfig {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct S3Config {
|
||||
pub name: String,
|
||||
pub bucket: String,
|
||||
pub root: String,
|
||||
#[serde(skip_serializing)]
|
||||
@@ -130,8 +109,7 @@ pub struct S3Config {
|
||||
|
||||
impl PartialEq for S3Config {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.name == other.name
|
||||
&& self.bucket == other.bucket
|
||||
self.bucket == other.bucket
|
||||
&& self.root == other.root
|
||||
&& self.access_key_id.expose_secret() == other.access_key_id.expose_secret()
|
||||
&& self.secret_access_key.expose_secret() == other.secret_access_key.expose_secret()
|
||||
@@ -144,7 +122,6 @@ impl PartialEq for S3Config {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct OssConfig {
|
||||
pub name: String,
|
||||
pub bucket: String,
|
||||
pub root: String,
|
||||
#[serde(skip_serializing)]
|
||||
@@ -158,8 +135,7 @@ pub struct OssConfig {
|
||||
|
||||
impl PartialEq for OssConfig {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.name == other.name
|
||||
&& self.bucket == other.bucket
|
||||
self.bucket == other.bucket
|
||||
&& self.root == other.root
|
||||
&& self.access_key_id.expose_secret() == other.access_key_id.expose_secret()
|
||||
&& self.access_key_secret.expose_secret() == other.access_key_secret.expose_secret()
|
||||
@@ -171,7 +147,6 @@ impl PartialEq for OssConfig {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct AzblobConfig {
|
||||
pub name: String,
|
||||
pub container: String,
|
||||
pub root: String,
|
||||
#[serde(skip_serializing)]
|
||||
@@ -186,8 +161,7 @@ pub struct AzblobConfig {
|
||||
|
||||
impl PartialEq for AzblobConfig {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.name == other.name
|
||||
&& self.container == other.container
|
||||
self.container == other.container
|
||||
&& self.root == other.root
|
||||
&& self.account_name.expose_secret() == other.account_name.expose_secret()
|
||||
&& self.account_key.expose_secret() == other.account_key.expose_secret()
|
||||
@@ -200,7 +174,6 @@ impl PartialEq for AzblobConfig {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct GcsConfig {
|
||||
pub name: String,
|
||||
pub root: String,
|
||||
pub bucket: String,
|
||||
pub scope: String,
|
||||
@@ -215,8 +188,7 @@ pub struct GcsConfig {
|
||||
|
||||
impl PartialEq for GcsConfig {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.name == other.name
|
||||
&& self.root == other.root
|
||||
self.root == other.root
|
||||
&& self.bucket == other.bucket
|
||||
&& self.scope == other.scope
|
||||
&& self.credential_path.expose_secret() == other.credential_path.expose_secret()
|
||||
@@ -229,7 +201,6 @@ impl PartialEq for GcsConfig {
|
||||
impl Default for S3Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
name: String::default(),
|
||||
bucket: String::default(),
|
||||
root: String::default(),
|
||||
access_key_id: SecretString::from(String::default()),
|
||||
@@ -244,7 +215,6 @@ impl Default for S3Config {
|
||||
impl Default for OssConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
name: String::default(),
|
||||
bucket: String::default(),
|
||||
root: String::default(),
|
||||
access_key_id: SecretString::from(String::default()),
|
||||
@@ -258,7 +228,6 @@ impl Default for OssConfig {
|
||||
impl Default for AzblobConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
name: String::default(),
|
||||
container: String::default(),
|
||||
root: String::default(),
|
||||
account_name: SecretString::from(String::default()),
|
||||
@@ -273,7 +242,6 @@ impl Default for AzblobConfig {
|
||||
impl Default for GcsConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
name: String::default(),
|
||||
root: String::default(),
|
||||
bucket: String::default(),
|
||||
scope: String::default(),
|
||||
@@ -387,23 +355,6 @@ mod tests {
|
||||
let _parsed: DatanodeOptions = toml::from_str(&toml_string).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_name() {
|
||||
let object_store_config = ObjectStoreConfig::default();
|
||||
assert_eq!("File", object_store_config.config_name());
|
||||
|
||||
let s3_config = ObjectStoreConfig::S3(S3Config::default());
|
||||
assert_eq!("S3", s3_config.config_name());
|
||||
assert_eq!("S3", s3_config.provider_name());
|
||||
|
||||
let s3_config = ObjectStoreConfig::S3(S3Config {
|
||||
name: "test".to_string(),
|
||||
..Default::default()
|
||||
});
|
||||
assert_eq!("test", s3_config.config_name());
|
||||
assert_eq!("S3", s3_config.provider_name());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_secstr() {
|
||||
let toml_str = r#"
|
||||
|
||||
@@ -273,11 +273,11 @@ impl DatanodeBuilder {
|
||||
/// Builds [ObjectStoreManager] from [StorageConfig].
|
||||
pub async fn build_object_store_manager(cfg: &StorageConfig) -> Result<ObjectStoreManagerRef> {
|
||||
let object_store = store::new_object_store(cfg.store.clone(), &cfg.data_home).await?;
|
||||
let default_name = cfg.store.config_name();
|
||||
let default_name = cfg.store.name();
|
||||
let mut object_store_manager = ObjectStoreManager::new(default_name, object_store);
|
||||
for store in &cfg.providers {
|
||||
object_store_manager.add(
|
||||
store.config_name(),
|
||||
store.name(),
|
||||
store::new_object_store(store.clone(), &cfg.data_home).await?,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -98,6 +98,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Columns and values number mismatch, columns: {}, values: {}",
|
||||
columns,
|
||||
values
|
||||
))]
|
||||
ColumnValuesNumberMismatch { columns: usize, values: usize },
|
||||
|
||||
#[snafu(display("Failed to delete value from table: {}", table_name))]
|
||||
Delete {
|
||||
table_name: String,
|
||||
@@ -149,6 +156,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Runtime resource error"))]
|
||||
RuntimeResource {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_runtime::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Expect KvBackend but not found"))]
|
||||
MissingKvBackend {
|
||||
#[snafu(implicit)]
|
||||
@@ -158,6 +172,16 @@ pub enum Error {
|
||||
#[snafu(display("Invalid SQL, error: {}", msg))]
|
||||
InvalidSql { msg: String },
|
||||
|
||||
#[snafu(display("Not support SQL, error: {}", msg))]
|
||||
NotSupportSql { msg: String },
|
||||
|
||||
#[snafu(display("Specified timestamp key or primary key column not found: {}", name))]
|
||||
KeyColumnNotFound {
|
||||
name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal primary keys definition: {}", msg))]
|
||||
IllegalPrimaryKeysDef {
|
||||
msg: String,
|
||||
@@ -186,6 +210,14 @@ pub enum Error {
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Table id provider not found, cannot execute SQL directly on datanode in distributed mode"
|
||||
))]
|
||||
TableIdProviderNotFound {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing node id in Datanode config"))]
|
||||
MissingNodeId {
|
||||
#[snafu(implicit)]
|
||||
@@ -199,6 +231,9 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot find requested database: {}-{}", catalog, schema))]
|
||||
DatabaseNotFound { catalog: String, schema: String },
|
||||
|
||||
#[snafu(display(
|
||||
"No valid default value can be built automatically, column: {}",
|
||||
column,
|
||||
@@ -229,6 +264,12 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing WAL dir config"))]
|
||||
MissingWalDirConfig {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unexpected, violated: {}", violated))]
|
||||
Unexpected {
|
||||
violated: String,
|
||||
@@ -279,6 +320,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported gRPC request, kind: {}", kind))]
|
||||
UnsupportedGrpcRequest {
|
||||
kind: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported output type, expected: {}", expected))]
|
||||
UnsupportedOutput {
|
||||
expected: String,
|
||||
@@ -347,6 +395,20 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to setup plugin"))]
|
||||
SetupPlugin {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start plugin"))]
|
||||
StartPlugin {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -368,14 +430,19 @@ impl ErrorExt for Error {
|
||||
|
||||
Delete { source, .. } => source.status_code(),
|
||||
|
||||
InvalidSql { .. }
|
||||
ColumnValuesNumberMismatch { .. }
|
||||
| InvalidSql { .. }
|
||||
| NotSupportSql { .. }
|
||||
| KeyColumnNotFound { .. }
|
||||
| IllegalPrimaryKeysDef { .. }
|
||||
| MissingTimestampColumn { .. }
|
||||
| CatalogNotFound { .. }
|
||||
| SchemaNotFound { .. }
|
||||
| SchemaExists { .. }
|
||||
| DatabaseNotFound { .. }
|
||||
| MissingNodeId { .. }
|
||||
| ColumnNoneDefaultValue { .. }
|
||||
| MissingWalDirConfig { .. }
|
||||
| Catalog { .. }
|
||||
| MissingRequiredField { .. }
|
||||
| RegionEngineNotFound { .. }
|
||||
@@ -389,9 +456,12 @@ impl ErrorExt for Error {
|
||||
|
||||
AsyncTaskExecute { source, .. } => source.status_code(),
|
||||
|
||||
CreateDir { .. } | RemoveDir { .. } | ShutdownInstance { .. } | DataFusion { .. } => {
|
||||
StatusCode::Internal
|
||||
}
|
||||
CreateDir { .. }
|
||||
| RemoveDir { .. }
|
||||
| ShutdownInstance { .. }
|
||||
| DataFusion { .. }
|
||||
| SetupPlugin { .. }
|
||||
| StartPlugin { .. } => StatusCode::Internal,
|
||||
|
||||
RegionNotFound { .. } => StatusCode::RegionNotFound,
|
||||
RegionNotReady { .. } => StatusCode::RegionNotReady,
|
||||
@@ -402,8 +472,11 @@ impl ErrorExt for Error {
|
||||
InitBackend { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
OpenLogStore { source, .. } => source.status_code(),
|
||||
RuntimeResource { .. } => StatusCode::RuntimeResourcesExhausted,
|
||||
MetaClientInit { source, .. } => source.status_code(),
|
||||
UnsupportedOutput { .. } => StatusCode::Unsupported,
|
||||
UnsupportedOutput { .. }
|
||||
| TableIdProviderNotFound { .. }
|
||||
| UnsupportedGrpcRequest { .. } => StatusCode::Unsupported,
|
||||
HandleRegionRequest { source, .. }
|
||||
| GetRegionMetadata { source, .. }
|
||||
| HandleBatchOpenRequest { source, .. } => source.status_code(),
|
||||
|
||||
@@ -37,7 +37,7 @@ use crate::alive_keeper::RegionAliveKeeper;
|
||||
use crate::config::DatanodeOptions;
|
||||
use crate::error::{self, MetaClientInitSnafu, Result};
|
||||
use crate::event_listener::RegionServerEventReceiver;
|
||||
use crate::metrics::{self, HEARTBEAT_RECV_COUNT, HEARTBEAT_SENT_COUNT};
|
||||
use crate::metrics;
|
||||
use crate::region_server::RegionServer;
|
||||
|
||||
pub(crate) mod handler;
|
||||
@@ -231,12 +231,10 @@ impl HeartbeatTask {
|
||||
mailbox_message: Some(message),
|
||||
..Default::default()
|
||||
};
|
||||
HEARTBEAT_RECV_COUNT.with_label_values(&["success"]).inc();
|
||||
Some(req)
|
||||
}
|
||||
Err(e) => {
|
||||
error!(e; "Failed to encode mailbox messages!");
|
||||
HEARTBEAT_RECV_COUNT.with_label_values(&["error"]).inc();
|
||||
None
|
||||
}
|
||||
}
|
||||
@@ -306,8 +304,6 @@ impl HeartbeatTask {
|
||||
error!(e; "Failed to reconnect to metasrv!");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
HEARTBEAT_SENT_COUNT.inc();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -206,7 +206,6 @@ mod tests {
|
||||
region_id,
|
||||
last_entry_id: None,
|
||||
wait_for_replay_timeout: None,
|
||||
location_id: None,
|
||||
});
|
||||
assert!(
|
||||
heartbeat_handler.is_acceptable(&heartbeat_env.create_handler_ctx((meta, instruction)))
|
||||
|
||||
@@ -27,7 +27,6 @@ impl HandlerContext {
|
||||
region_id,
|
||||
last_entry_id,
|
||||
wait_for_replay_timeout,
|
||||
location_id,
|
||||
}: UpgradeRegion,
|
||||
) -> BoxFuture<'static, InstructionReply> {
|
||||
Box::pin(async move {
|
||||
@@ -63,7 +62,6 @@ impl HandlerContext {
|
||||
RegionRequest::Catchup(RegionCatchupRequest {
|
||||
set_writable: true,
|
||||
entry_id: last_entry_id,
|
||||
location_id,
|
||||
}),
|
||||
)
|
||||
.await?;
|
||||
@@ -153,7 +151,6 @@ mod tests {
|
||||
region_id,
|
||||
last_entry_id: None,
|
||||
wait_for_replay_timeout,
|
||||
location_id: None,
|
||||
})
|
||||
.await;
|
||||
assert_matches!(reply, InstructionReply::UpgradeRegion(_));
|
||||
@@ -194,7 +191,6 @@ mod tests {
|
||||
region_id,
|
||||
last_entry_id: None,
|
||||
wait_for_replay_timeout,
|
||||
location_id: None,
|
||||
})
|
||||
.await;
|
||||
assert_matches!(reply, InstructionReply::UpgradeRegion(_));
|
||||
@@ -236,7 +232,6 @@ mod tests {
|
||||
region_id,
|
||||
last_entry_id: None,
|
||||
wait_for_replay_timeout,
|
||||
location_id: None,
|
||||
})
|
||||
.await;
|
||||
assert_matches!(reply, InstructionReply::UpgradeRegion(_));
|
||||
@@ -279,9 +274,8 @@ mod tests {
|
||||
.clone()
|
||||
.handle_upgrade_region_instruction(UpgradeRegion {
|
||||
region_id,
|
||||
wait_for_replay_timeout,
|
||||
last_entry_id: None,
|
||||
location_id: None,
|
||||
wait_for_replay_timeout,
|
||||
})
|
||||
.await;
|
||||
assert_matches!(reply, InstructionReply::UpgradeRegion(_));
|
||||
@@ -299,7 +293,6 @@ mod tests {
|
||||
region_id,
|
||||
last_entry_id: None,
|
||||
wait_for_replay_timeout: Some(Duration::from_millis(500)),
|
||||
location_id: None,
|
||||
})
|
||||
.await;
|
||||
assert_matches!(reply, InstructionReply::UpgradeRegion(_));
|
||||
@@ -344,7 +337,6 @@ mod tests {
|
||||
region_id,
|
||||
last_entry_id: None,
|
||||
wait_for_replay_timeout: None,
|
||||
location_id: None,
|
||||
})
|
||||
.await;
|
||||
assert_matches!(reply, InstructionReply::UpgradeRegion(_));
|
||||
@@ -362,7 +354,6 @@ mod tests {
|
||||
region_id,
|
||||
last_entry_id: None,
|
||||
wait_for_replay_timeout: Some(Duration::from_millis(200)),
|
||||
location_id: None,
|
||||
})
|
||||
.await;
|
||||
assert_matches!(reply, InstructionReply::UpgradeRegion(_));
|
||||
|
||||
@@ -54,17 +54,4 @@ lazy_static! {
|
||||
&[REGION_ROLE]
|
||||
)
|
||||
.unwrap();
|
||||
/// The number of heartbeats send by datanode.
|
||||
pub static ref HEARTBEAT_SENT_COUNT: IntCounter = register_int_counter!(
|
||||
"greptime_datanode_heartbeat_send_count",
|
||||
"datanode heartbeat sent",
|
||||
)
|
||||
.unwrap();
|
||||
/// The number of heartbeats received by datanode, labeled with result type.
|
||||
pub static ref HEARTBEAT_RECV_COUNT: IntCounterVec = register_int_counter_vec!(
|
||||
"greptime_datanode_heartbeat_recv_count",
|
||||
"datanode heartbeat received",
|
||||
&["result"]
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
@@ -860,7 +860,7 @@ impl RegionServerInner {
|
||||
// complains "higher-ranked lifetime error". Rust can't prove some future is legit.
|
||||
// Possible related issue: https://github.com/rust-lang/rust/issues/102211
|
||||
//
|
||||
// The workaround is to put the async functions in the `common_runtime::spawn_global`. Or like
|
||||
// The walkaround is to put the async functions in the `common_runtime::spawn_global`. Or like
|
||||
// it here, collect the values first then use later separately.
|
||||
|
||||
let regions = self
|
||||
|
||||
@@ -268,23 +268,6 @@ impl Value {
|
||||
}
|
||||
}
|
||||
|
||||
/// Cast Value to f64. Return None if it's not castable;
|
||||
pub fn as_f64_lossy(&self) -> Option<f64> {
|
||||
match self {
|
||||
Value::Float32(v) => Some(v.0 as _),
|
||||
Value::Float64(v) => Some(v.0),
|
||||
Value::Int8(v) => Some(*v as _),
|
||||
Value::Int16(v) => Some(*v as _),
|
||||
Value::Int32(v) => Some(*v as _),
|
||||
Value::Int64(v) => Some(*v as _),
|
||||
Value::UInt8(v) => Some(*v as _),
|
||||
Value::UInt16(v) => Some(*v as _),
|
||||
Value::UInt32(v) => Some(*v as _),
|
||||
Value::UInt64(v) => Some(*v as _),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the logical type of the value.
|
||||
pub fn logical_type_id(&self) -> LogicalTypeId {
|
||||
match self {
|
||||
|
||||
@@ -49,13 +49,13 @@ use crate::adapter::table_source::TableSource;
|
||||
use crate::adapter::util::column_schemas_to_proto;
|
||||
use crate::adapter::worker::{create_worker, Worker, WorkerHandle};
|
||||
use crate::compute::ErrCollector;
|
||||
use crate::df_optimizer::sql_to_flow_plan;
|
||||
use crate::error::{ExternalSnafu, InternalSnafu, TableNotFoundSnafu, UnexpectedSnafu};
|
||||
use crate::expr::GlobalId;
|
||||
use crate::metrics::{
|
||||
METRIC_FLOW_INPUT_BUF_SIZE, METRIC_FLOW_INSERT_ELAPSED, METRIC_FLOW_RUN_INTERVAL_MS,
|
||||
};
|
||||
use crate::repr::{self, DiffRow, Row, BATCH_SIZE};
|
||||
use crate::transform::sql_to_flow_plan;
|
||||
|
||||
mod flownode_impl;
|
||||
mod parse_expr;
|
||||
|
||||
@@ -28,7 +28,7 @@ use super::state::Scheduler;
|
||||
use crate::compute::state::DataflowState;
|
||||
use crate::compute::types::{Collection, CollectionBundle, ErrCollector, Toff};
|
||||
use crate::error::{Error, InvalidQuerySnafu, NotImplementedSnafu};
|
||||
use crate::expr::{self, Batch, GlobalId, LocalId};
|
||||
use crate::expr::{self, GlobalId, LocalId};
|
||||
use crate::plan::{Plan, TypedPlan};
|
||||
use crate::repr::{self, DiffRow};
|
||||
|
||||
@@ -87,38 +87,9 @@ impl<'referred, 'df> Context<'referred, 'df> {
|
||||
}
|
||||
|
||||
impl<'referred, 'df> Context<'referred, 'df> {
|
||||
/// Like `render_plan` but in Batch Mode
|
||||
pub fn render_plan_batch(&mut self, plan: TypedPlan) -> Result<CollectionBundle<Batch>, Error> {
|
||||
match plan.plan {
|
||||
Plan::Constant { rows } => Ok(self.render_constant_batch(rows)),
|
||||
Plan::Get { .. } => NotImplementedSnafu {
|
||||
reason: "Get is still WIP in batchmode",
|
||||
}
|
||||
.fail(),
|
||||
Plan::Let { .. } => NotImplementedSnafu {
|
||||
reason: "Let is still WIP in batchmode",
|
||||
}
|
||||
.fail(),
|
||||
Plan::Mfp { input, mfp } => self.render_mfp_batch(input, mfp),
|
||||
Plan::Reduce {
|
||||
input,
|
||||
key_val_plan,
|
||||
reduce_plan,
|
||||
} => self.render_reduce_batch(input, &key_val_plan, &reduce_plan, &plan.schema.typ),
|
||||
Plan::Join { .. } => NotImplementedSnafu {
|
||||
reason: "Join is still WIP",
|
||||
}
|
||||
.fail(),
|
||||
Plan::Union { .. } => NotImplementedSnafu {
|
||||
reason: "Union is still WIP",
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Interpret plan to dataflow and prepare them for execution
|
||||
/// Interpret and execute plan
|
||||
///
|
||||
/// return the output handler of this plan
|
||||
/// return the output of this plan
|
||||
pub fn render_plan(&mut self, plan: TypedPlan) -> Result<CollectionBundle, Error> {
|
||||
match plan.plan {
|
||||
Plan::Constant { rows } => Ok(self.render_constant(rows)),
|
||||
@@ -141,61 +112,17 @@ impl<'referred, 'df> Context<'referred, 'df> {
|
||||
}
|
||||
}
|
||||
|
||||
/// render Constant, take all rows that have a timestamp not greater than the current time
|
||||
/// This function is primarily used for testing
|
||||
/// Always assume input is sorted by timestamp
|
||||
pub fn render_constant_batch(&mut self, rows: Vec<DiffRow>) -> CollectionBundle<Batch> {
|
||||
let (send_port, recv_port) = self.df.make_edge::<_, Toff<Batch>>("constant_batch");
|
||||
let mut per_time: BTreeMap<repr::Timestamp, Vec<DiffRow>> = Default::default();
|
||||
for (key, group) in &rows.into_iter().group_by(|(_row, ts, _diff)| *ts) {
|
||||
per_time.entry(key).or_default().extend(group);
|
||||
}
|
||||
|
||||
let now = self.compute_state.current_time_ref();
|
||||
// TODO(discord9): better way to schedule future run
|
||||
let scheduler = self.compute_state.get_scheduler();
|
||||
let scheduler_inner = scheduler.clone();
|
||||
let err_collector = self.err_collector.clone();
|
||||
|
||||
let subgraph_id =
|
||||
self.df
|
||||
.add_subgraph_source("ConstantBatch", send_port, move |_ctx, send_port| {
|
||||
// find the first timestamp that is greater than now
|
||||
// use filter_map
|
||||
|
||||
let mut after = per_time.split_off(&(*now.borrow() + 1));
|
||||
// swap
|
||||
std::mem::swap(&mut per_time, &mut after);
|
||||
let not_great_than_now = after;
|
||||
|
||||
not_great_than_now.into_iter().for_each(|(_ts, rows)| {
|
||||
err_collector.run(|| {
|
||||
let rows = rows.into_iter().map(|(row, _ts, _diff)| row).collect();
|
||||
let batch = Batch::try_from_rows(rows)?;
|
||||
send_port.give(vec![batch]);
|
||||
Ok(())
|
||||
});
|
||||
});
|
||||
// schedule the next run
|
||||
if let Some(next_run_time) = per_time.keys().next().copied() {
|
||||
scheduler_inner.schedule_at(next_run_time);
|
||||
}
|
||||
});
|
||||
scheduler.set_cur_subgraph(subgraph_id);
|
||||
|
||||
CollectionBundle::from_collection(Collection::from_port(recv_port))
|
||||
}
|
||||
|
||||
/// render Constant, take all rows that have a timestamp not greater than the current time
|
||||
///
|
||||
/// Always assume input is sorted by timestamp
|
||||
pub fn render_constant(&mut self, rows: Vec<DiffRow>) -> CollectionBundle {
|
||||
let (send_port, recv_port) = self.df.make_edge::<_, Toff>("constant");
|
||||
let mut per_time: BTreeMap<repr::Timestamp, Vec<DiffRow>> = Default::default();
|
||||
for (key, group) in &rows.into_iter().group_by(|(_row, ts, _diff)| *ts) {
|
||||
per_time.entry(key).or_default().extend(group);
|
||||
}
|
||||
|
||||
let mut per_time: BTreeMap<repr::Timestamp, Vec<DiffRow>> = rows
|
||||
.into_iter()
|
||||
.group_by(|(_row, ts, _diff)| *ts)
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k, v.into_iter().collect_vec()))
|
||||
.collect();
|
||||
let now = self.compute_state.current_time_ref();
|
||||
// TODO(discord9): better way to schedule future run
|
||||
let scheduler = self.compute_state.get_scheduler();
|
||||
|
||||
@@ -23,59 +23,12 @@ use crate::compute::render::Context;
|
||||
use crate::compute::state::Scheduler;
|
||||
use crate::compute::types::{Arranged, Collection, CollectionBundle, ErrCollector, Toff};
|
||||
use crate::error::{Error, PlanSnafu};
|
||||
use crate::expr::{Batch, EvalError, MapFilterProject, MfpPlan, ScalarExpr};
|
||||
use crate::expr::{EvalError, MapFilterProject, MfpPlan, ScalarExpr};
|
||||
use crate::plan::TypedPlan;
|
||||
use crate::repr::{self, DiffRow, KeyValDiffRow, Row};
|
||||
use crate::utils::ArrangeHandler;
|
||||
|
||||
impl<'referred, 'df> Context<'referred, 'df> {
|
||||
/// Like `render_mfp` but in batch mode
|
||||
pub fn render_mfp_batch(
|
||||
&mut self,
|
||||
input: Box<TypedPlan>,
|
||||
mfp: MapFilterProject,
|
||||
) -> Result<CollectionBundle<Batch>, Error> {
|
||||
let input = self.render_plan_batch(*input)?;
|
||||
|
||||
let (out_send_port, out_recv_port) = self.df.make_edge::<_, Toff<Batch>>("mfp_batch");
|
||||
|
||||
// This closure capture following variables:
|
||||
let mfp_plan = MfpPlan::create_from(mfp)?;
|
||||
|
||||
let err_collector = self.err_collector.clone();
|
||||
|
||||
// TODO(discord9): better way to schedule future run
|
||||
let scheduler = self.compute_state.get_scheduler();
|
||||
|
||||
let subgraph = self.df.add_subgraph_in_out(
|
||||
"mfp_batch",
|
||||
input.collection.into_inner(),
|
||||
out_send_port,
|
||||
move |_ctx, recv, send| {
|
||||
// mfp only need to passively receive updates from recvs
|
||||
let src_data = recv.take_inner().into_iter().flat_map(|v| v.into_iter());
|
||||
|
||||
let output_batches = src_data
|
||||
.filter_map(|mut input_batch| {
|
||||
err_collector.run(|| {
|
||||
let res_batch = mfp_plan.mfp.eval_batch_into(&mut input_batch)?;
|
||||
Ok(res_batch)
|
||||
})
|
||||
})
|
||||
.collect_vec();
|
||||
|
||||
send.give(output_batches);
|
||||
},
|
||||
);
|
||||
|
||||
// register current subgraph in scheduler for future scheduling
|
||||
scheduler.set_cur_subgraph(subgraph);
|
||||
|
||||
let bundle =
|
||||
CollectionBundle::from_collection(Collection::<Batch>::from_port(out_recv_port));
|
||||
Ok(bundle)
|
||||
}
|
||||
|
||||
/// render MapFilterProject, will only emit the `rows` once. Assume all incoming row's sys time being `now`` and ignore the row's stated sys time
|
||||
/// TODO(discord9): schedule mfp operator to run when temporal filter need
|
||||
///
|
||||
|
||||
@@ -14,247 +14,23 @@
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::prelude::DataType;
|
||||
use datatypes::value::{ListValue, Value};
|
||||
use datatypes::vectors::NullVector;
|
||||
use hydroflow::scheduled::graph_ext::GraphExt;
|
||||
use itertools::Itertools;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
|
||||
use crate::compute::render::{Context, SubgraphArg};
|
||||
use crate::compute::types::{Arranged, Collection, CollectionBundle, ErrCollector, Toff};
|
||||
use crate::error::{Error, NotImplementedSnafu, PlanSnafu};
|
||||
use crate::error::{Error, PlanSnafu};
|
||||
use crate::expr::error::{DataAlreadyExpiredSnafu, DataTypeSnafu, InternalSnafu};
|
||||
use crate::expr::{Batch, EvalError, ScalarExpr};
|
||||
use crate::expr::{EvalError, ScalarExpr};
|
||||
use crate::plan::{AccumulablePlan, AggrWithIndex, KeyValPlan, ReducePlan, TypedPlan};
|
||||
use crate::repr::{self, DiffRow, KeyValDiffRow, RelationType, Row};
|
||||
use crate::utils::{ArrangeHandler, ArrangeReader, ArrangeWriter, KeyExpiryManager};
|
||||
|
||||
impl<'referred, 'df> Context<'referred, 'df> {
|
||||
const REDUCE_BATCH: &'static str = "reduce_batch";
|
||||
/// Like `render_reduce`, but for batch mode, and only barebone implementation
|
||||
/// no support for distinct aggregation for now
|
||||
// There is a false positive in using `Vec<ScalarExpr>` as key due to `Value` have `bytes` variant
|
||||
#[allow(clippy::mutable_key_type)]
|
||||
pub fn render_reduce_batch(
|
||||
&mut self,
|
||||
input: Box<TypedPlan>,
|
||||
key_val_plan: &KeyValPlan,
|
||||
reduce_plan: &ReducePlan,
|
||||
output_type: &RelationType,
|
||||
) -> Result<CollectionBundle<Batch>, Error> {
|
||||
let accum_plan = if let ReducePlan::Accumulable(accum_plan) = reduce_plan {
|
||||
if !accum_plan.distinct_aggrs.is_empty() {
|
||||
NotImplementedSnafu {
|
||||
reason: "Distinct aggregation is not supported in batch mode",
|
||||
}
|
||||
.fail()?
|
||||
}
|
||||
accum_plan.clone()
|
||||
} else {
|
||||
NotImplementedSnafu {
|
||||
reason: "Only accumulable reduce plan is supported in batch mode",
|
||||
}
|
||||
.fail()?
|
||||
};
|
||||
|
||||
let input = self.render_plan_batch(*input)?;
|
||||
|
||||
// first assembly key&val to separate key and val columns(since this is batch mode)
|
||||
// Then stream kvs through a reduce operator
|
||||
|
||||
// the output is concat from key and val
|
||||
let output_key_arity = key_val_plan.key_plan.output_arity();
|
||||
|
||||
// TODO(discord9): config global expire time from self
|
||||
let arrange_handler = self.compute_state.new_arrange(None);
|
||||
|
||||
if let (Some(time_index), Some(expire_after)) =
|
||||
(output_type.time_index, self.compute_state.expire_after())
|
||||
{
|
||||
let expire_man =
|
||||
KeyExpiryManager::new(Some(expire_after), Some(ScalarExpr::Column(time_index)));
|
||||
arrange_handler.write().set_expire_state(expire_man);
|
||||
}
|
||||
|
||||
// reduce need full arrangement to be able to query all keys
|
||||
let arrange_handler_inner = arrange_handler.clone_full_arrange().context(PlanSnafu {
|
||||
reason: "No write is expected at this point",
|
||||
})?;
|
||||
let key_val_plan = key_val_plan.clone();
|
||||
|
||||
let now = self.compute_state.current_time_ref();
|
||||
|
||||
let err_collector = self.err_collector.clone();
|
||||
|
||||
// TODO(discord9): better way to schedule future run
|
||||
let scheduler = self.compute_state.get_scheduler();
|
||||
|
||||
let (out_send_port, out_recv_port) =
|
||||
self.df.make_edge::<_, Toff<Batch>>(Self::REDUCE_BATCH);
|
||||
|
||||
let subgraph =
|
||||
self.df.add_subgraph_in_out(
|
||||
Self::REDUCE_BATCH,
|
||||
input.collection.into_inner(),
|
||||
out_send_port,
|
||||
move |_ctx, recv, send| {
|
||||
let now = *(now.borrow());
|
||||
let arrange = arrange_handler_inner.clone();
|
||||
// mfp only need to passively receive updates from recvs
|
||||
let src_data = recv
|
||||
.take_inner()
|
||||
.into_iter()
|
||||
.flat_map(|v| v.into_iter())
|
||||
.collect_vec();
|
||||
|
||||
let mut key_to_many_vals = BTreeMap::<Row, Batch>::new();
|
||||
for batch in src_data {
|
||||
err_collector.run(|| {
|
||||
let (key_batch, val_batch) =
|
||||
batch_split_by_key_val(&batch, &key_val_plan, &err_collector);
|
||||
ensure!(
|
||||
key_batch.row_count() == val_batch.row_count(),
|
||||
InternalSnafu {
|
||||
reason: format!(
|
||||
"Key and val batch should have the same row count, found {} and {}",
|
||||
key_batch.row_count(),
|
||||
val_batch.row_count()
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
for row_idx in 0..key_batch.row_count() {
|
||||
let key_row = key_batch.get_row(row_idx).unwrap();
|
||||
let val_row = val_batch.slice(row_idx, 1)?;
|
||||
let val_batch =
|
||||
key_to_many_vals.entry(Row::new(key_row)).or_default();
|
||||
val_batch.append_batch(val_row)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
});
|
||||
}
|
||||
|
||||
// write lock the arrange for the rest of the function body
|
||||
// to prevent wired race condition
|
||||
let mut arrange = arrange.write();
|
||||
let mut all_arrange_updates = Vec::with_capacity(key_to_many_vals.len());
|
||||
let mut all_output_rows = Vec::with_capacity(key_to_many_vals.len());
|
||||
|
||||
for (key, val_batch) in key_to_many_vals {
|
||||
err_collector.run(|| -> Result<(), _> {
|
||||
let (accums, _, _) = arrange.get(now, &key).unwrap_or_default();
|
||||
let accum_list = from_accum_values_to_live_accums(
|
||||
accums.unpack(),
|
||||
accum_plan.simple_aggrs.len(),
|
||||
)?;
|
||||
|
||||
let mut accum_output = AccumOutput::new();
|
||||
for AggrWithIndex {
|
||||
expr,
|
||||
input_idx,
|
||||
output_idx,
|
||||
} in accum_plan.simple_aggrs.iter()
|
||||
{
|
||||
let cur_old_accum = accum_list.get(*output_idx).cloned().unwrap_or_default();
|
||||
// if batch is empty, input null instead
|
||||
let cur_input = val_batch.batch().get(*input_idx).cloned().unwrap_or_else(||Arc::new(NullVector::new(val_batch.row_count())));
|
||||
|
||||
let (output, new_accum) =
|
||||
expr.func.eval_batch(cur_old_accum, cur_input, None)?;
|
||||
|
||||
accum_output.insert_accum(*output_idx, new_accum);
|
||||
accum_output.insert_output(*output_idx, output);
|
||||
}
|
||||
|
||||
let (new_accums, res_val_row) = accum_output.into_accum_output()?;
|
||||
|
||||
let arrange_update = ((key.clone(), Row::new(new_accums)), now, 1);
|
||||
all_arrange_updates.push(arrange_update);
|
||||
|
||||
let mut key_val = key;
|
||||
key_val.extend(res_val_row);
|
||||
all_output_rows.push((key_val, now, 1));
|
||||
|
||||
Ok(())
|
||||
});
|
||||
}
|
||||
|
||||
err_collector.run(|| {
|
||||
arrange.apply_updates(now, all_arrange_updates)?;
|
||||
arrange.compact_to(now)
|
||||
});
|
||||
|
||||
// this output part is not supposed to be resource intensive
|
||||
// (because for every batch there wouldn't usually be as many output row?),
|
||||
// so we can do some costly operation here
|
||||
let output_types = all_output_rows.first().map(|(row, _, _)| {
|
||||
row.iter()
|
||||
.map(|v| v.data_type())
|
||||
.collect::<Vec<ConcreteDataType>>()
|
||||
});
|
||||
|
||||
if let Some(output_types) = output_types {
|
||||
err_collector.run(|| {
|
||||
let column_cnt = output_types.len();
|
||||
let row_cnt = all_output_rows.len();
|
||||
|
||||
let mut output_builder = output_types
|
||||
.into_iter()
|
||||
.map(|t| t.create_mutable_vector(row_cnt))
|
||||
.collect_vec();
|
||||
|
||||
for (row, _, _) in all_output_rows {
|
||||
for (i, v) in row.into_iter().enumerate() {
|
||||
output_builder
|
||||
.get_mut(i)
|
||||
.context(InternalSnafu{
|
||||
reason: format!(
|
||||
"Output builder should have the same length as the row, expected at most {} but got {}",
|
||||
column_cnt-1,
|
||||
i
|
||||
)
|
||||
})?
|
||||
.try_push_value_ref(v.as_value_ref())
|
||||
.context(DataTypeSnafu {
|
||||
msg: "Failed to push value",
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
||||
let output_columns = output_builder
|
||||
.into_iter()
|
||||
.map(|mut b| b.to_vector())
|
||||
.collect_vec();
|
||||
|
||||
let output_batch = Batch::try_new(output_columns, row_cnt)?;
|
||||
send.give(vec![output_batch]);
|
||||
|
||||
Ok(())
|
||||
});
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
scheduler.set_cur_subgraph(subgraph);
|
||||
|
||||
// by default the key of output arrange
|
||||
let arranged = BTreeMap::from([(
|
||||
(0..output_key_arity).map(ScalarExpr::Column).collect_vec(),
|
||||
Arranged::new(arrange_handler),
|
||||
)]);
|
||||
|
||||
let bundle = CollectionBundle {
|
||||
collection: Collection::from_port(out_recv_port),
|
||||
arranged,
|
||||
};
|
||||
Ok(bundle)
|
||||
}
|
||||
|
||||
const REDUCE: &'static str = "reduce";
|
||||
/// render `Plan::Reduce` into executable dataflow
|
||||
// There is a false positive in using `Vec<ScalarExpr>` as key due to `Value` have `bytes` variant
|
||||
@@ -375,18 +151,6 @@ impl<'referred, 'df> Context<'referred, 'df> {
|
||||
}
|
||||
}
|
||||
|
||||
fn from_accum_values_to_live_accums(
|
||||
accums: Vec<Value>,
|
||||
len: usize,
|
||||
) -> Result<Vec<Vec<Value>>, EvalError> {
|
||||
let accum_ranges = from_val_to_slice_idx(accums.first().cloned(), len)?;
|
||||
let mut accum_list = vec![];
|
||||
for range in accum_ranges.iter() {
|
||||
accum_list.push(accums.get(range.clone()).unwrap_or_default().to_vec());
|
||||
}
|
||||
Ok(accum_list)
|
||||
}
|
||||
|
||||
/// All arrange(aka state) used in reduce operator
|
||||
pub struct ReduceArrange {
|
||||
/// The output arrange of reduce operator
|
||||
@@ -396,40 +160,33 @@ pub struct ReduceArrange {
|
||||
distinct_input: Option<Vec<ArrangeHandler>>,
|
||||
}
|
||||
|
||||
fn batch_split_by_key_val(
|
||||
batch: &Batch,
|
||||
/// split a row into key and val by evaluate the key and val plan
|
||||
fn split_row_to_key_val(
|
||||
row: Row,
|
||||
sys_time: repr::Timestamp,
|
||||
diff: repr::Diff,
|
||||
key_val_plan: &KeyValPlan,
|
||||
err_collector: &ErrCollector,
|
||||
) -> (Batch, Batch) {
|
||||
let row_count = batch.row_count();
|
||||
let mut key_batch = Batch::empty();
|
||||
let mut val_batch = Batch::empty();
|
||||
|
||||
err_collector.run(|| {
|
||||
if key_val_plan.key_plan.output_arity() != 0 {
|
||||
key_batch = key_val_plan.key_plan.eval_batch_into(&mut batch.clone())?;
|
||||
}
|
||||
|
||||
if key_val_plan.val_plan.output_arity() != 0 {
|
||||
val_batch = key_val_plan.val_plan.eval_batch_into(&mut batch.clone())?;
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
|
||||
// deal with empty key or val
|
||||
if key_batch.row_count() == 0 && key_batch.column_count() == 0 {
|
||||
key_batch.set_row_count(row_count);
|
||||
row_buf: &mut Row,
|
||||
) -> Result<Option<KeyValDiffRow>, EvalError> {
|
||||
if let Some(key) = key_val_plan
|
||||
.key_plan
|
||||
.evaluate_into(&mut row.inner.clone(), row_buf)?
|
||||
{
|
||||
// val_plan is not supported to carry any filter predicate,
|
||||
let val = key_val_plan
|
||||
.val_plan
|
||||
.evaluate_into(&mut row.inner.clone(), row_buf)?
|
||||
.context(InternalSnafu {
|
||||
reason: "val_plan should not contain any filter predicate",
|
||||
})?;
|
||||
Ok(Some(((key, val), sys_time, diff)))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
if val_batch.row_count() == 0 && val_batch.column_count() == 0 {
|
||||
val_batch.set_row_count(row_count);
|
||||
}
|
||||
|
||||
(key_batch, val_batch)
|
||||
}
|
||||
|
||||
/// split a row into key and val by evaluate the key and val plan
|
||||
fn split_rows_to_key_val(
|
||||
fn batch_split_rows_to_key_val(
|
||||
rows: impl IntoIterator<Item = DiffRow>,
|
||||
key_val_plan: KeyValPlan,
|
||||
err_collector: ErrCollector,
|
||||
@@ -478,7 +235,7 @@ fn reduce_subgraph(
|
||||
send,
|
||||
}: SubgraphArg,
|
||||
) {
|
||||
let key_val = split_rows_to_key_val(data, key_val_plan.clone(), err_collector.clone());
|
||||
let key_val = batch_split_rows_to_key_val(data, key_val_plan.clone(), err_collector.clone());
|
||||
// from here for distinct reduce and accum reduce, things are drastically different
|
||||
// for distinct reduce the arrange store the output,
|
||||
// but for accum reduce the arrange store the accum state, and output is
|
||||
@@ -1370,105 +1127,6 @@ mod test {
|
||||
run_and_check(&mut state, &mut df, 6..7, expected, output);
|
||||
}
|
||||
|
||||
/// Batch Mode Reduce Evaluation
|
||||
/// SELECT SUM(col) FROM table
|
||||
///
|
||||
/// table schema:
|
||||
/// | name | type |
|
||||
/// |------|-------|
|
||||
/// | col | Int64 |
|
||||
#[test]
|
||||
fn test_basic_batch_reduce_accum() {
|
||||
let mut df = Hydroflow::new();
|
||||
let mut state = DataflowState::default();
|
||||
let now = state.current_time_ref();
|
||||
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
||||
|
||||
let rows = vec![
|
||||
(Row::new(vec![1i64.into()]), 1, 1),
|
||||
(Row::new(vec![2i64.into()]), 2, 1),
|
||||
(Row::new(vec![3i64.into()]), 3, 1),
|
||||
(Row::new(vec![1i64.into()]), 4, 1),
|
||||
(Row::new(vec![2i64.into()]), 5, 1),
|
||||
(Row::new(vec![3i64.into()]), 6, 1),
|
||||
];
|
||||
let input_plan = Plan::Constant { rows: rows.clone() };
|
||||
|
||||
let typ = RelationType::new(vec![ColumnType::new_nullable(
|
||||
ConcreteDataType::int64_datatype(),
|
||||
)]);
|
||||
let key_val_plan = KeyValPlan {
|
||||
key_plan: MapFilterProject::new(1).project([]).unwrap().into_safe(),
|
||||
val_plan: MapFilterProject::new(1).project([0]).unwrap().into_safe(),
|
||||
};
|
||||
|
||||
let simple_aggrs = vec![AggrWithIndex::new(
|
||||
AggregateExpr {
|
||||
func: AggregateFunc::SumInt64,
|
||||
expr: ScalarExpr::Column(0),
|
||||
distinct: false,
|
||||
},
|
||||
0,
|
||||
0,
|
||||
)];
|
||||
let accum_plan = AccumulablePlan {
|
||||
full_aggrs: vec![AggregateExpr {
|
||||
func: AggregateFunc::SumInt64,
|
||||
expr: ScalarExpr::Column(0),
|
||||
distinct: false,
|
||||
}],
|
||||
simple_aggrs,
|
||||
distinct_aggrs: vec![],
|
||||
};
|
||||
|
||||
let reduce_plan = ReducePlan::Accumulable(accum_plan);
|
||||
let bundle = ctx
|
||||
.render_reduce_batch(
|
||||
Box::new(input_plan.with_types(typ.into_unnamed())),
|
||||
&key_val_plan,
|
||||
&reduce_plan,
|
||||
&RelationType::empty(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
{
|
||||
let now_inner = now.clone();
|
||||
let expected = BTreeMap::<i64, Vec<i64>>::from([
|
||||
(1, vec![1i64]),
|
||||
(2, vec![3i64]),
|
||||
(3, vec![6i64]),
|
||||
(4, vec![7i64]),
|
||||
(5, vec![9i64]),
|
||||
(6, vec![12i64]),
|
||||
]);
|
||||
let collection = bundle.collection;
|
||||
ctx.df
|
||||
.add_subgraph_sink("test_sink", collection.into_inner(), move |_ctx, recv| {
|
||||
let now = *now_inner.borrow();
|
||||
let data = recv.take_inner();
|
||||
let res = data.into_iter().flat_map(|v| v.into_iter()).collect_vec();
|
||||
|
||||
if let Some(expected) = expected.get(&now) {
|
||||
let batch = expected.iter().map(|v| Value::from(*v)).collect_vec();
|
||||
let batch = Batch::try_from_rows(vec![batch.into()]).unwrap();
|
||||
assert_eq!(res.first(), Some(&batch));
|
||||
}
|
||||
});
|
||||
drop(ctx);
|
||||
|
||||
for now in 1..7 {
|
||||
state.set_current_ts(now);
|
||||
state.run_available_with_schedule(&mut df);
|
||||
if !state.get_err_collector().is_empty() {
|
||||
panic!(
|
||||
"Errors occur: {:?}",
|
||||
state.get_err_collector().get_all_blocking()
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// SELECT SUM(col) FROM table
|
||||
///
|
||||
/// table schema:
|
||||
|
||||
@@ -27,67 +27,11 @@ use crate::compute::render::Context;
|
||||
use crate::compute::types::{Arranged, Collection, CollectionBundle, Toff};
|
||||
use crate::error::{Error, PlanSnafu};
|
||||
use crate::expr::error::InternalSnafu;
|
||||
use crate::expr::{Batch, EvalError};
|
||||
use crate::expr::EvalError;
|
||||
use crate::repr::{DiffRow, Row, BROADCAST_CAP};
|
||||
|
||||
#[allow(clippy::mutable_key_type)]
|
||||
impl<'referred, 'df> Context<'referred, 'df> {
|
||||
/// simply send the batch to downstream, without fancy features like buffering
|
||||
pub fn render_source_batch(
|
||||
&mut self,
|
||||
mut src_recv: broadcast::Receiver<Batch>,
|
||||
) -> Result<CollectionBundle<Batch>, Error> {
|
||||
debug!("Rendering Source Batch");
|
||||
let (send_port, recv_port) = self.df.make_edge::<_, Toff<Batch>>("source_batch");
|
||||
|
||||
let schd = self.compute_state.get_scheduler();
|
||||
let inner_schd = schd.clone();
|
||||
let now = self.compute_state.current_time_ref();
|
||||
let err_collector = self.err_collector.clone();
|
||||
|
||||
let sub = self
|
||||
.df
|
||||
.add_subgraph_source("source_batch", send_port, move |_ctx, send| {
|
||||
loop {
|
||||
match src_recv.try_recv() {
|
||||
Ok(batch) => {
|
||||
send.give(vec![batch]);
|
||||
}
|
||||
Err(TryRecvError::Empty) => {
|
||||
break;
|
||||
}
|
||||
Err(TryRecvError::Lagged(lag_offset)) => {
|
||||
// use `err_collector` instead of `error!` to locate which operator caused the error
|
||||
err_collector.run(|| -> Result<(), EvalError> {
|
||||
InternalSnafu {
|
||||
reason: format!("Flow missing {} rows behind", lag_offset),
|
||||
}
|
||||
.fail()
|
||||
});
|
||||
break;
|
||||
}
|
||||
Err(TryRecvError::Closed) => {
|
||||
err_collector.run(|| -> Result<(), EvalError> {
|
||||
InternalSnafu {
|
||||
reason: "Source Batch Channel is closed".to_string(),
|
||||
}
|
||||
.fail()
|
||||
});
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let now = *now.borrow();
|
||||
// always schedule source to run at now so we can
|
||||
// repeatedly run source if needed
|
||||
inner_schd.schedule_at(now);
|
||||
});
|
||||
schd.set_cur_subgraph(sub);
|
||||
let bundle = CollectionBundle::from_collection(Collection::<Batch>::from_port(recv_port));
|
||||
Ok(bundle)
|
||||
}
|
||||
|
||||
/// Render a source which comes from brocast channel into the dataflow
|
||||
/// will immediately send updates not greater than `now` and buffer the rest in arrangement
|
||||
pub fn render_source(
|
||||
@@ -170,32 +114,6 @@ impl<'referred, 'df> Context<'referred, 'df> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn render_unbounded_sink_batch(
|
||||
&mut self,
|
||||
bundle: CollectionBundle<Batch>,
|
||||
sender: mpsc::UnboundedSender<Batch>,
|
||||
) {
|
||||
let CollectionBundle {
|
||||
collection,
|
||||
arranged: _,
|
||||
} = bundle;
|
||||
|
||||
let _sink = self.df.add_subgraph_sink(
|
||||
"UnboundedSinkBatch",
|
||||
collection.into_inner(),
|
||||
move |_ctx, recv| {
|
||||
let data = recv.take_inner();
|
||||
for batch in data.into_iter().flat_map(|i| i.into_iter()) {
|
||||
// if the sender is closed unexpectedly, stop sending
|
||||
if sender.is_closed() || sender.send(batch).is_err() {
|
||||
common_telemetry::error!("UnboundedSinkBatch is closed");
|
||||
break;
|
||||
}
|
||||
}
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
pub fn render_unbounded_sink(
|
||||
&mut self,
|
||||
bundle: CollectionBundle,
|
||||
|
||||
@@ -105,13 +105,11 @@ impl Arranged {
|
||||
/// This type maintains the invariant that it does contain at least one(or both) valid
|
||||
/// source of data, either a collection or at least one arrangement. This is for convenience
|
||||
/// of reading the data from the collection.
|
||||
///
|
||||
// TODO(discord9): make T default to Batch and obsolete the Row Mode
|
||||
pub struct CollectionBundle<T: 'static = DiffRow> {
|
||||
pub struct CollectionBundle {
|
||||
/// This is useful for passively reading the new updates from the collection
|
||||
///
|
||||
/// Invariant: the timestamp of the updates should always not greater than now, since future updates should be stored in the arrangement
|
||||
pub collection: Collection<T>,
|
||||
pub collection: Collection<DiffRow>,
|
||||
/// the key [`ScalarExpr`] indicate how the keys(also a [`Row`]) used in Arranged is extract from collection's [`Row`]
|
||||
/// So it is the "index" of the arrangement
|
||||
///
|
||||
@@ -123,16 +121,13 @@ pub struct CollectionBundle<T: 'static = DiffRow> {
|
||||
pub arranged: BTreeMap<Vec<ScalarExpr>, Arranged>,
|
||||
}
|
||||
|
||||
impl<T: 'static> CollectionBundle<T> {
|
||||
pub fn from_collection(collection: Collection<T>) -> Self {
|
||||
impl CollectionBundle {
|
||||
pub fn from_collection(collection: Collection<DiffRow>) -> Self {
|
||||
Self {
|
||||
collection,
|
||||
arranged: BTreeMap::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: 'static + Clone> CollectionBundle<T> {
|
||||
pub fn clone(&self, df: &mut Hydroflow) -> Self {
|
||||
Self {
|
||||
collection: self.collection.clone(df),
|
||||
|
||||
@@ -1,604 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Datafusion optimizer for flow plan
|
||||
|
||||
#![warn(unused)]
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_telemetry::debug;
|
||||
use datafusion::config::ConfigOptions;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datafusion::optimizer::analyzer::type_coercion::TypeCoercion;
|
||||
use datafusion::optimizer::common_subexpr_eliminate::CommonSubexprEliminate;
|
||||
use datafusion::optimizer::optimize_projections::OptimizeProjections;
|
||||
use datafusion::optimizer::simplify_expressions::SimplifyExpressions;
|
||||
use datafusion::optimizer::unwrap_cast_in_comparison::UnwrapCastInComparison;
|
||||
use datafusion::optimizer::utils::NamePreserver;
|
||||
use datafusion::optimizer::{Analyzer, AnalyzerRule, Optimizer, OptimizerContext};
|
||||
use datafusion_common::tree_node::{
|
||||
Transformed, TreeNode, TreeNodeRecursion, TreeNodeRewriter, TreeNodeVisitor,
|
||||
};
|
||||
use datafusion_common::{Column, DFSchema, ScalarValue};
|
||||
use datafusion_expr::aggregate_function::AggregateFunction;
|
||||
use datafusion_expr::expr::AggregateFunctionDefinition;
|
||||
use datafusion_expr::utils::merge_schema;
|
||||
use datafusion_expr::{
|
||||
BinaryExpr, Expr, Operator, Projection, ScalarUDFImpl, Signature, TypeSignature, Volatility,
|
||||
};
|
||||
use query::parser::QueryLanguageParser;
|
||||
use query::plan::LogicalPlan;
|
||||
use query::query_engine::DefaultSerializer;
|
||||
use query::QueryEngine;
|
||||
use snafu::ResultExt;
|
||||
/// note here we are using the `substrait_proto_df` crate from the `substrait` module and
|
||||
/// rename it to `substrait_proto`
|
||||
use substrait::DFLogicalSubstraitConvertor;
|
||||
|
||||
use crate::adapter::FlownodeContext;
|
||||
use crate::error::{DatafusionSnafu, Error, ExternalSnafu, UnexpectedSnafu};
|
||||
use crate::expr::{TUMBLE_END, TUMBLE_START};
|
||||
use crate::plan::TypedPlan;
|
||||
|
||||
// TODO(discord9): use `Analyzer` to manage rules if more `AnalyzerRule` is needed
|
||||
pub async fn apply_df_optimizer(
|
||||
plan: datafusion_expr::LogicalPlan,
|
||||
) -> Result<datafusion_expr::LogicalPlan, Error> {
|
||||
let cfg = ConfigOptions::new();
|
||||
let analyzer = Analyzer::with_rules(vec![
|
||||
Arc::new(AvgExpandRule::new()),
|
||||
Arc::new(TumbleExpandRule::new()),
|
||||
Arc::new(CheckGroupByRule::new()),
|
||||
Arc::new(TypeCoercion::new()),
|
||||
]);
|
||||
let plan = analyzer
|
||||
.execute_and_check(plan, &cfg, |p, r| {
|
||||
debug!("After apply rule {}, get plan: \n{:?}", r.name(), p);
|
||||
})
|
||||
.context(DatafusionSnafu {
|
||||
context: "Fail to apply analyzer",
|
||||
})?;
|
||||
|
||||
let ctx = OptimizerContext::new();
|
||||
let optimizer = Optimizer::with_rules(vec![
|
||||
Arc::new(OptimizeProjections::new()),
|
||||
Arc::new(CommonSubexprEliminate::new()),
|
||||
Arc::new(SimplifyExpressions::new()),
|
||||
Arc::new(UnwrapCastInComparison::new()),
|
||||
]);
|
||||
let plan = optimizer
|
||||
.optimize(plan, &ctx, |_, _| {})
|
||||
.context(DatafusionSnafu {
|
||||
context: "Fail to apply optimizer",
|
||||
})?;
|
||||
|
||||
Ok(plan)
|
||||
}
|
||||
|
||||
/// To reuse existing code for parse sql, the sql is first parsed into a datafusion logical plan,
|
||||
/// then to a substrait plan, and finally to a flow plan.
|
||||
pub async fn sql_to_flow_plan(
|
||||
ctx: &mut FlownodeContext,
|
||||
engine: &Arc<dyn QueryEngine>,
|
||||
sql: &str,
|
||||
) -> Result<TypedPlan, Error> {
|
||||
let query_ctx = ctx.query_context.clone().ok_or_else(|| {
|
||||
UnexpectedSnafu {
|
||||
reason: "Query context is missing",
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let stmt = QueryLanguageParser::parse_sql(sql, &query_ctx)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
let LogicalPlan::DfPlan(plan) = plan;
|
||||
|
||||
let opted_plan = apply_df_optimizer(plan).await?;
|
||||
|
||||
// TODO(discord9): add df optimization
|
||||
let sub_plan = DFLogicalSubstraitConvertor {}
|
||||
.to_sub_plan(&opted_plan, DefaultSerializer)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
|
||||
let flow_plan = TypedPlan::from_substrait_plan(ctx, &sub_plan).await?;
|
||||
|
||||
Ok(flow_plan)
|
||||
}
|
||||
|
||||
struct AvgExpandRule {}
|
||||
|
||||
impl AvgExpandRule {
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
|
||||
impl AnalyzerRule for AvgExpandRule {
|
||||
fn analyze(
|
||||
&self,
|
||||
plan: datafusion_expr::LogicalPlan,
|
||||
_config: &ConfigOptions,
|
||||
) -> datafusion_common::Result<datafusion_expr::LogicalPlan> {
|
||||
let transformed = plan
|
||||
.transform_up_with_subqueries(expand_avg_analyzer)?
|
||||
.data
|
||||
.transform_down_with_subqueries(put_aggr_to_proj_analyzer)?
|
||||
.data;
|
||||
Ok(transformed)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"avg_expand"
|
||||
}
|
||||
}
|
||||
|
||||
/// lift aggr's composite aggr_expr to outer proj, and leave aggr only with simple direct aggr expr
|
||||
/// i.e.
|
||||
/// ```ignore
|
||||
/// proj: avg(x)
|
||||
/// -- aggr: [sum(x)/count(x) as avg(x)]
|
||||
/// ```
|
||||
/// becomes:
|
||||
/// ```ignore
|
||||
/// proj: sum(x)/count(x) as avg(x)
|
||||
/// -- aggr: [sum(x), count(x)]
|
||||
/// ```
|
||||
fn put_aggr_to_proj_analyzer(
|
||||
plan: datafusion_expr::LogicalPlan,
|
||||
) -> Result<Transformed<datafusion_expr::LogicalPlan>, DataFusionError> {
|
||||
if let datafusion_expr::LogicalPlan::Projection(proj) = &plan {
|
||||
if let datafusion_expr::LogicalPlan::Aggregate(aggr) = proj.input.as_ref() {
|
||||
let mut replace_old_proj_exprs = HashMap::new();
|
||||
let mut expanded_aggr_exprs = vec![];
|
||||
for aggr_expr in &aggr.aggr_expr {
|
||||
let mut is_composite = false;
|
||||
if let Expr::AggregateFunction(_) = &aggr_expr {
|
||||
expanded_aggr_exprs.push(aggr_expr.clone());
|
||||
} else {
|
||||
let old_name = aggr_expr.name_for_alias()?;
|
||||
let new_proj_expr = aggr_expr
|
||||
.clone()
|
||||
.transform(|ch| {
|
||||
if let Expr::AggregateFunction(_) = &ch {
|
||||
is_composite = true;
|
||||
expanded_aggr_exprs.push(ch.clone());
|
||||
Ok(Transformed::yes(Expr::Column(Column::from_qualified_name(
|
||||
ch.name_for_alias()?,
|
||||
))))
|
||||
} else {
|
||||
Ok(Transformed::no(ch))
|
||||
}
|
||||
})?
|
||||
.data;
|
||||
replace_old_proj_exprs.insert(old_name, new_proj_expr);
|
||||
}
|
||||
}
|
||||
|
||||
if expanded_aggr_exprs.len() > aggr.aggr_expr.len() {
|
||||
let mut aggr = aggr.clone();
|
||||
aggr.aggr_expr = expanded_aggr_exprs;
|
||||
let mut aggr_plan = datafusion_expr::LogicalPlan::Aggregate(aggr);
|
||||
// important to recompute schema after changing aggr_expr
|
||||
aggr_plan = aggr_plan.recompute_schema()?;
|
||||
|
||||
// reconstruct proj with new proj_exprs
|
||||
let mut new_proj_exprs = proj.expr.clone();
|
||||
for proj_expr in new_proj_exprs.iter_mut() {
|
||||
if let Some(new_proj_expr) =
|
||||
replace_old_proj_exprs.get(&proj_expr.name_for_alias()?)
|
||||
{
|
||||
*proj_expr = new_proj_expr.clone();
|
||||
}
|
||||
*proj_expr = proj_expr
|
||||
.clone()
|
||||
.transform(|expr| {
|
||||
if let Some(new_expr) =
|
||||
replace_old_proj_exprs.get(&expr.name_for_alias()?)
|
||||
{
|
||||
Ok(Transformed::yes(new_expr.clone()))
|
||||
} else {
|
||||
Ok(Transformed::no(expr))
|
||||
}
|
||||
})?
|
||||
.data;
|
||||
}
|
||||
let proj = datafusion_expr::LogicalPlan::Projection(Projection::try_new(
|
||||
new_proj_exprs,
|
||||
Arc::new(aggr_plan),
|
||||
)?);
|
||||
return Ok(Transformed::yes(proj));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Transformed::no(plan))
|
||||
}
|
||||
|
||||
/// expand `avg(<expr>)` function into `cast(sum((<expr>) AS f64)/count((<expr>)`
|
||||
fn expand_avg_analyzer(
|
||||
plan: datafusion_expr::LogicalPlan,
|
||||
) -> Result<Transformed<datafusion_expr::LogicalPlan>, DataFusionError> {
|
||||
let mut schema = merge_schema(plan.inputs());
|
||||
|
||||
if let datafusion_expr::LogicalPlan::TableScan(ts) = &plan {
|
||||
let source_schema =
|
||||
DFSchema::try_from_qualified_schema(ts.table_name.clone(), &ts.source.schema())?;
|
||||
schema.merge(&source_schema);
|
||||
}
|
||||
|
||||
let mut expr_rewrite = ExpandAvgRewriter::new(&schema);
|
||||
|
||||
let name_preserver = NamePreserver::new(&plan);
|
||||
// apply coercion rewrite all expressions in the plan individually
|
||||
plan.map_expressions(|expr| {
|
||||
let original_name = name_preserver.save(&expr)?;
|
||||
expr.rewrite(&mut expr_rewrite)?
|
||||
.map_data(|expr| original_name.restore(expr))
|
||||
})?
|
||||
.map_data(|plan| plan.recompute_schema())
|
||||
}
|
||||
|
||||
/// rewrite `avg(<expr>)` function into `CASE WHEN count(<expr>) !=0 THEN cast(sum((<expr>) AS avg_return_type)/count((<expr>) ELSE 0`
|
||||
///
|
||||
/// TODO(discord9): support avg return type decimal128
|
||||
///
|
||||
/// see impl details at https://github.com/apache/datafusion/blob/4ad4f90d86c57226a4e0fb1f79dfaaf0d404c273/datafusion/expr/src/type_coercion/aggregates.rs#L457-L462
|
||||
pub(crate) struct ExpandAvgRewriter<'a> {
|
||||
/// schema of the plan
|
||||
#[allow(unused)]
|
||||
pub(crate) schema: &'a DFSchema,
|
||||
}
|
||||
|
||||
impl<'a> ExpandAvgRewriter<'a> {
|
||||
fn new(schema: &'a DFSchema) -> Self {
|
||||
Self { schema }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> TreeNodeRewriter for ExpandAvgRewriter<'a> {
|
||||
type Node = Expr;
|
||||
|
||||
fn f_up(&mut self, expr: Expr) -> Result<Transformed<Expr>, DataFusionError> {
|
||||
if let Expr::AggregateFunction(aggr_func) = &expr {
|
||||
if let AggregateFunctionDefinition::BuiltIn(AggregateFunction::Avg) =
|
||||
&aggr_func.func_def
|
||||
{
|
||||
let sum_expr = {
|
||||
let mut tmp = aggr_func.clone();
|
||||
tmp.func_def = AggregateFunctionDefinition::BuiltIn(AggregateFunction::Sum);
|
||||
Expr::AggregateFunction(tmp)
|
||||
};
|
||||
let sum_cast = {
|
||||
let mut tmp = sum_expr.clone();
|
||||
tmp = Expr::Cast(datafusion_expr::Cast {
|
||||
expr: Box::new(tmp),
|
||||
data_type: arrow_schema::DataType::Float64,
|
||||
});
|
||||
tmp
|
||||
};
|
||||
|
||||
let count_expr = {
|
||||
let mut tmp = aggr_func.clone();
|
||||
tmp.func_def = AggregateFunctionDefinition::BuiltIn(AggregateFunction::Count);
|
||||
|
||||
Expr::AggregateFunction(tmp)
|
||||
};
|
||||
let count_expr_ref =
|
||||
Expr::Column(Column::from_qualified_name(count_expr.name_for_alias()?));
|
||||
|
||||
let div =
|
||||
BinaryExpr::new(Box::new(sum_cast), Operator::Divide, Box::new(count_expr));
|
||||
let div_expr = Box::new(Expr::BinaryExpr(div));
|
||||
|
||||
let zero = Box::new(Expr::Literal(ScalarValue::Int64(Some(0))));
|
||||
let not_zero =
|
||||
BinaryExpr::new(Box::new(count_expr_ref), Operator::NotEq, zero.clone());
|
||||
let not_zero = Box::new(Expr::BinaryExpr(not_zero));
|
||||
let null = Box::new(Expr::Literal(ScalarValue::Null));
|
||||
|
||||
let case_when =
|
||||
datafusion_expr::Case::new(None, vec![(not_zero, div_expr)], Some(null));
|
||||
let case_when_expr = Expr::Case(case_when);
|
||||
|
||||
return Ok(Transformed::yes(case_when_expr));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Transformed::no(expr))
|
||||
}
|
||||
}
|
||||
|
||||
/// expand tumble in aggr expr to tumble_start and tumble_end with column name like `window_start`
|
||||
struct TumbleExpandRule {}
|
||||
|
||||
impl TumbleExpandRule {
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
|
||||
impl AnalyzerRule for TumbleExpandRule {
|
||||
fn analyze(
|
||||
&self,
|
||||
plan: datafusion_expr::LogicalPlan,
|
||||
_config: &ConfigOptions,
|
||||
) -> datafusion_common::Result<datafusion_expr::LogicalPlan> {
|
||||
let transformed = plan
|
||||
.transform_up_with_subqueries(expand_tumble_analyzer)?
|
||||
.data;
|
||||
Ok(transformed)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"tumble_expand"
|
||||
}
|
||||
}
|
||||
|
||||
/// expand `tumble` in aggr expr to `tumble_start` and `tumble_end`, also expand related alias and column ref
|
||||
///
|
||||
/// will add `tumble_start` and `tumble_end` to outer projection if not exist before
|
||||
fn expand_tumble_analyzer(
|
||||
plan: datafusion_expr::LogicalPlan,
|
||||
) -> Result<Transformed<datafusion_expr::LogicalPlan>, DataFusionError> {
|
||||
if let datafusion_expr::LogicalPlan::Projection(proj) = &plan {
|
||||
if let datafusion_expr::LogicalPlan::Aggregate(aggr) = proj.input.as_ref() {
|
||||
let mut new_group_expr = vec![];
|
||||
let mut alias_to_expand = HashMap::new();
|
||||
let mut encountered_tumble = false;
|
||||
for expr in aggr.group_expr.iter() {
|
||||
match expr {
|
||||
datafusion_expr::Expr::ScalarFunction(func) if func.name() == "tumble" => {
|
||||
encountered_tumble = true;
|
||||
|
||||
let tumble_start = TumbleExpand::new(TUMBLE_START);
|
||||
let tumble_start = datafusion_expr::expr::ScalarFunction::new_udf(
|
||||
Arc::new(tumble_start.into()),
|
||||
func.args.clone(),
|
||||
);
|
||||
let tumble_start = datafusion_expr::Expr::ScalarFunction(tumble_start);
|
||||
let start_col_name = tumble_start.name_for_alias()?;
|
||||
new_group_expr.push(tumble_start);
|
||||
|
||||
let tumble_end = TumbleExpand::new(TUMBLE_END);
|
||||
let tumble_end = datafusion_expr::expr::ScalarFunction::new_udf(
|
||||
Arc::new(tumble_end.into()),
|
||||
func.args.clone(),
|
||||
);
|
||||
let tumble_end = datafusion_expr::Expr::ScalarFunction(tumble_end);
|
||||
let end_col_name = tumble_end.name_for_alias()?;
|
||||
new_group_expr.push(tumble_end);
|
||||
|
||||
alias_to_expand
|
||||
.insert(expr.name_for_alias()?, (start_col_name, end_col_name));
|
||||
}
|
||||
_ => new_group_expr.push(expr.clone()),
|
||||
}
|
||||
}
|
||||
if !encountered_tumble {
|
||||
return Ok(Transformed::no(plan));
|
||||
}
|
||||
let mut new_aggr = aggr.clone();
|
||||
new_aggr.group_expr = new_group_expr;
|
||||
let new_aggr = datafusion_expr::LogicalPlan::Aggregate(new_aggr).recompute_schema()?;
|
||||
// replace alias in projection if needed, and add new column ref if necessary
|
||||
let mut new_proj_expr = vec![];
|
||||
let mut have_expanded = false;
|
||||
|
||||
for proj_expr in proj.expr.iter() {
|
||||
if let Some((start_col_name, end_col_name)) =
|
||||
alias_to_expand.get(&proj_expr.name_for_alias()?)
|
||||
{
|
||||
let start_col = Column::from_qualified_name(start_col_name);
|
||||
let end_col = Column::from_qualified_name(end_col_name);
|
||||
new_proj_expr.push(datafusion_expr::Expr::Column(start_col));
|
||||
new_proj_expr.push(datafusion_expr::Expr::Column(end_col));
|
||||
have_expanded = true;
|
||||
} else {
|
||||
new_proj_expr.push(proj_expr.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// append to end of projection if not exist
|
||||
if !have_expanded {
|
||||
for (start_col_name, end_col_name) in alias_to_expand.values() {
|
||||
let start_col = Column::from_qualified_name(start_col_name);
|
||||
let end_col = Column::from_qualified_name(end_col_name);
|
||||
new_proj_expr
|
||||
.push(datafusion_expr::Expr::Column(start_col).alias("window_start"));
|
||||
new_proj_expr.push(datafusion_expr::Expr::Column(end_col).alias("window_end"));
|
||||
}
|
||||
}
|
||||
|
||||
let new_proj = datafusion_expr::LogicalPlan::Projection(Projection::try_new(
|
||||
new_proj_expr,
|
||||
Arc::new(new_aggr),
|
||||
)?);
|
||||
return Ok(Transformed::yes(new_proj));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Transformed::no(plan))
|
||||
}
|
||||
|
||||
/// This is a placeholder for tumble_start and tumble_end function, so that datafusion can
|
||||
/// recognize them as scalar function
|
||||
#[derive(Debug)]
|
||||
pub struct TumbleExpand {
|
||||
signature: Signature,
|
||||
name: String,
|
||||
}
|
||||
|
||||
impl TumbleExpand {
|
||||
pub fn new(name: &str) -> Self {
|
||||
Self {
|
||||
signature: Signature::new(TypeSignature::UserDefined, Volatility::Immutable),
|
||||
name: name.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ScalarUDFImpl for TumbleExpand {
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
/// elide the signature for now
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn coerce_types(
|
||||
&self,
|
||||
arg_types: &[arrow_schema::DataType],
|
||||
) -> datafusion_common::Result<Vec<arrow_schema::DataType>> {
|
||||
match (arg_types.first(), arg_types.get(1), arg_types.get(2)) {
|
||||
(Some(ts), Some(window), opt) => {
|
||||
use arrow_schema::DataType::*;
|
||||
if !matches!(ts, Date32 | Date64 | Timestamp(_, _)) {
|
||||
return Err(DataFusionError::Plan(
|
||||
format!("Expect timestamp column as first arg for tumble_start, found {:?}", ts)
|
||||
));
|
||||
}
|
||||
if !matches!(window, Utf8 | Interval(_)) {
|
||||
return Err(DataFusionError::Plan(
|
||||
format!("Expect second arg for window size's type being interval for tumble_start, found {:?}", window),
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(start_time) = opt{
|
||||
if !matches!(start_time, Utf8 | Date32 | Date64 | Timestamp(_, _)){
|
||||
return Err(DataFusionError::Plan(
|
||||
format!("Expect start_time to either be date, timestampe or string, found {:?}", start_time)
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(arg_types.to_vec())
|
||||
}
|
||||
_ => Err(DataFusionError::Plan(
|
||||
"Expect tumble function have at least two arg(timestamp column and window size) and a third optional arg for starting time".to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn return_type(
|
||||
&self,
|
||||
arg_types: &[arrow_schema::DataType],
|
||||
) -> Result<arrow_schema::DataType, DataFusionError> {
|
||||
arg_types.first().cloned().ok_or_else(|| {
|
||||
DataFusionError::Plan(
|
||||
"Expect tumble function have at least two arg(timestamp column and window size)"
|
||||
.to_string(),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn invoke(
|
||||
&self,
|
||||
_args: &[datafusion_expr::ColumnarValue],
|
||||
) -> Result<datafusion_expr::ColumnarValue, DataFusionError> {
|
||||
Err(DataFusionError::Plan(
|
||||
"This function should not be executed by datafusion".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// This rule check all group by exprs, and make sure they are also in select clause in a aggr query
|
||||
struct CheckGroupByRule {}
|
||||
|
||||
impl CheckGroupByRule {
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
|
||||
impl AnalyzerRule for CheckGroupByRule {
|
||||
fn analyze(
|
||||
&self,
|
||||
plan: datafusion_expr::LogicalPlan,
|
||||
_config: &ConfigOptions,
|
||||
) -> datafusion_common::Result<datafusion_expr::LogicalPlan> {
|
||||
let transformed = plan
|
||||
.transform_up_with_subqueries(check_group_by_analyzer)?
|
||||
.data;
|
||||
Ok(transformed)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"check_groupby"
|
||||
}
|
||||
}
|
||||
|
||||
/// make sure everything in group by's expr is in select
|
||||
fn check_group_by_analyzer(
|
||||
plan: datafusion_expr::LogicalPlan,
|
||||
) -> Result<Transformed<datafusion_expr::LogicalPlan>, DataFusionError> {
|
||||
if let datafusion_expr::LogicalPlan::Projection(proj) = &plan {
|
||||
if let datafusion_expr::LogicalPlan::Aggregate(aggr) = proj.input.as_ref() {
|
||||
let mut found_column_used = FindColumn::new();
|
||||
proj.expr
|
||||
.iter()
|
||||
.map(|i| i.visit(&mut found_column_used))
|
||||
.count();
|
||||
for expr in aggr.group_expr.iter() {
|
||||
if !found_column_used
|
||||
.names_for_alias
|
||||
.contains(&expr.name_for_alias()?)
|
||||
{
|
||||
return Err(DataFusionError::Plan(format!("Expect {} expr in group by also exist in select list, but select list only contain {:?}",expr.name_for_alias()?, found_column_used.names_for_alias)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Transformed::no(plan))
|
||||
}
|
||||
|
||||
/// Find all column names in a plan
|
||||
#[derive(Debug, Default)]
|
||||
struct FindColumn {
|
||||
names_for_alias: HashSet<String>,
|
||||
}
|
||||
|
||||
impl FindColumn {
|
||||
fn new() -> Self {
|
||||
Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl TreeNodeVisitor<'_> for FindColumn {
|
||||
type Node = datafusion_expr::Expr;
|
||||
fn f_down(
|
||||
&mut self,
|
||||
node: &datafusion_expr::Expr,
|
||||
) -> Result<TreeNodeRecursion, DataFusionError> {
|
||||
if let datafusion_expr::Expr::Column(_) = node {
|
||||
self.names_for_alias.insert(node.name_for_alias()?);
|
||||
}
|
||||
Ok(TreeNodeRecursion::Continue)
|
||||
}
|
||||
}
|
||||
@@ -21,6 +21,7 @@ use common_error::ext::BoxedError;
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_telemetry::common_error::ext::ErrorExt;
|
||||
use common_telemetry::common_error::status_code::StatusCode;
|
||||
use datatypes::value::Value;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
use crate::adapter::FlowId;
|
||||
@@ -68,6 +69,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table already exist: {name}"))]
|
||||
TableAlreadyExist {
|
||||
name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Flow not found, id={id}"))]
|
||||
FlowNotFound {
|
||||
id: FlowId,
|
||||
@@ -98,6 +106,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid query: prost can't decode substrait plan: {inner}"))]
|
||||
InvalidQueryProst {
|
||||
inner: api::DecodeError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid query: {reason}"))]
|
||||
InvalidQuery {
|
||||
reason: String,
|
||||
@@ -105,6 +120,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("No protobuf type for value: {value}"))]
|
||||
NoProtoType {
|
||||
value: Value,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Not implement in flow: {reason}"))]
|
||||
NotImplemented {
|
||||
reason: String,
|
||||
@@ -192,18 +214,21 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Self::Eval { .. } | Self::JoinTask { .. } | Self::Datafusion { .. } => {
|
||||
Self::Eval { .. } | &Self::JoinTask { .. } | &Self::Datafusion { .. } => {
|
||||
StatusCode::Internal
|
||||
}
|
||||
Self::FlowAlreadyExist { .. } => StatusCode::TableAlreadyExists,
|
||||
&Self::TableAlreadyExist { .. } | Self::FlowAlreadyExist { .. } => {
|
||||
StatusCode::TableAlreadyExists
|
||||
}
|
||||
Self::TableNotFound { .. }
|
||||
| Self::TableNotFoundMeta { .. }
|
||||
| Self::FlowNotFound { .. }
|
||||
| Self::ListFlows { .. } => StatusCode::TableNotFound,
|
||||
Self::InvalidQuery { .. } | Self::Plan { .. } | Self::Datatypes { .. } => {
|
||||
StatusCode::PlanQuery
|
||||
}
|
||||
Self::Unexpected { .. } => StatusCode::Unexpected,
|
||||
Self::InvalidQueryProst { .. }
|
||||
| &Self::InvalidQuery { .. }
|
||||
| &Self::Plan { .. }
|
||||
| &Self::Datatypes { .. } => StatusCode::PlanQuery,
|
||||
Self::NoProtoType { .. } | Self::Unexpected { .. } => StatusCode::Unexpected,
|
||||
Self::NotImplemented { .. } | Self::UnsupportedTemporalFilter { .. } => {
|
||||
StatusCode::Unsupported
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@ mod scalar;
|
||||
mod signature;
|
||||
|
||||
use datatypes::prelude::DataType;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::VectorRef;
|
||||
pub(crate) use df_func::{DfScalarFunction, RawDfScalarFn};
|
||||
pub(crate) use error::{EvalError, InvalidArgumentSnafu};
|
||||
@@ -38,168 +37,42 @@ use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::expr::error::DataTypeSnafu;
|
||||
|
||||
pub const TUMBLE_START: &str = "tumble_start";
|
||||
pub const TUMBLE_END: &str = "tumble_end";
|
||||
|
||||
/// A batch of vectors with the same length but without schema, only useful in dataflow
|
||||
///
|
||||
/// somewhere cheap to clone since it just contains a list of VectorRef(which is a `Arc`).
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Batch {
|
||||
batch: Vec<VectorRef>,
|
||||
row_count: usize,
|
||||
/// describe if corresponding rows in batch is insert or delete, None means all rows are insert
|
||||
diffs: Option<VectorRef>,
|
||||
}
|
||||
|
||||
impl PartialEq for Batch {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
let mut batch_eq = true;
|
||||
if self.batch.len() != other.batch.len() {
|
||||
return false;
|
||||
}
|
||||
for (left, right) in self.batch.iter().zip(other.batch.iter()) {
|
||||
batch_eq = batch_eq
|
||||
&& <dyn arrow::array::Array>::eq(&left.to_arrow_array(), &right.to_arrow_array());
|
||||
}
|
||||
|
||||
let diff_eq = match (&self.diffs, &other.diffs) {
|
||||
(Some(left), Some(right)) => {
|
||||
<dyn arrow::array::Array>::eq(&left.to_arrow_array(), &right.to_arrow_array())
|
||||
}
|
||||
(None, None) => true,
|
||||
_ => false,
|
||||
};
|
||||
batch_eq && diff_eq && self.row_count == other.row_count
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for Batch {}
|
||||
|
||||
impl Default for Batch {
|
||||
fn default() -> Self {
|
||||
Self::empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl Batch {
|
||||
pub fn try_from_rows(rows: Vec<crate::repr::Row>) -> Result<Self, EvalError> {
|
||||
if rows.is_empty() {
|
||||
return Ok(Self::empty());
|
||||
}
|
||||
let len = rows.len();
|
||||
let mut builder = rows
|
||||
.first()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|v| v.data_type().create_mutable_vector(len))
|
||||
.collect_vec();
|
||||
for row in rows {
|
||||
ensure!(
|
||||
row.len() == builder.len(),
|
||||
InvalidArgumentSnafu {
|
||||
reason: format!(
|
||||
"row length not match, expect {}, found {}",
|
||||
builder.len(),
|
||||
row.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
for (idx, value) in row.iter().enumerate() {
|
||||
builder[idx]
|
||||
.try_push_value_ref(value.as_value_ref())
|
||||
.context(DataTypeSnafu {
|
||||
msg: "Failed to convert rows to columns",
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
||||
let columns = builder.into_iter().map(|mut b| b.to_vector()).collect_vec();
|
||||
let batch = Self::try_new(columns, len)?;
|
||||
Ok(batch)
|
||||
}
|
||||
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
batch: vec![],
|
||||
row_count: 0,
|
||||
diffs: None,
|
||||
}
|
||||
}
|
||||
pub fn try_new(batch: Vec<VectorRef>, row_count: usize) -> Result<Self, EvalError> {
|
||||
ensure!(
|
||||
batch.iter().map(|v| v.len()).all_equal()
|
||||
&& batch.first().map(|v| v.len() == row_count).unwrap_or(true),
|
||||
InvalidArgumentSnafu {
|
||||
reason: "All columns should have same length".to_string()
|
||||
}
|
||||
);
|
||||
Ok(Self {
|
||||
batch,
|
||||
row_count,
|
||||
diffs: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new_unchecked(batch: Vec<VectorRef>, row_count: usize) -> Self {
|
||||
Self {
|
||||
batch,
|
||||
row_count,
|
||||
diffs: None,
|
||||
}
|
||||
pub fn new(batch: Vec<VectorRef>, row_count: usize) -> Self {
|
||||
Self { batch, row_count }
|
||||
}
|
||||
|
||||
pub fn batch(&self) -> &[VectorRef] {
|
||||
&self.batch
|
||||
}
|
||||
|
||||
pub fn batch_mut(&mut self) -> &mut Vec<VectorRef> {
|
||||
&mut self.batch
|
||||
}
|
||||
|
||||
pub fn row_count(&self) -> usize {
|
||||
self.row_count
|
||||
}
|
||||
|
||||
pub fn set_row_count(&mut self, row_count: usize) {
|
||||
self.row_count = row_count;
|
||||
}
|
||||
|
||||
pub fn column_count(&self) -> usize {
|
||||
self.batch.len()
|
||||
}
|
||||
|
||||
pub fn get_row(&self, idx: usize) -> Result<Vec<Value>, EvalError> {
|
||||
ensure!(
|
||||
idx < self.row_count,
|
||||
InvalidArgumentSnafu {
|
||||
reason: format!(
|
||||
"Expect row index to be less than {}, found {}",
|
||||
self.row_count, idx
|
||||
)
|
||||
}
|
||||
);
|
||||
Ok(self.batch.iter().map(|v| v.get(idx)).collect_vec())
|
||||
}
|
||||
|
||||
/// Slices the `Batch`, returning a new `Batch`.
|
||||
pub fn slice(&self, offset: usize, length: usize) -> Result<Batch, EvalError> {
|
||||
///
|
||||
/// # Panics
|
||||
/// This function panics if `offset + length > self.row_count()`.
|
||||
pub fn slice(&self, offset: usize, length: usize) -> Batch {
|
||||
let batch = self
|
||||
.batch()
|
||||
.iter()
|
||||
.map(|v| v.slice(offset, length))
|
||||
.collect_vec();
|
||||
Batch::try_new(batch, length)
|
||||
Batch::new(batch, length)
|
||||
}
|
||||
|
||||
/// append another batch to self
|
||||
///
|
||||
/// NOTE: This is expensive since it will create new vectors for each column
|
||||
pub fn append_batch(&mut self, other: Batch) -> Result<(), EvalError> {
|
||||
ensure!(
|
||||
self.batch.len() == other.batch.len()
|
||||
|| self.batch.is_empty()
|
||||
|| other.batch.is_empty(),
|
||||
self.batch.len() == other.batch.len(),
|
||||
InvalidArgumentSnafu {
|
||||
reason: format!(
|
||||
"Expect two batch to have same numbers of column, found {} and {} columns",
|
||||
@@ -209,31 +82,21 @@ impl Batch {
|
||||
}
|
||||
);
|
||||
|
||||
if self.batch.is_empty() {
|
||||
self.batch = other.batch;
|
||||
self.row_count = other.row_count;
|
||||
return Ok(());
|
||||
} else if other.batch.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let dts = if self.batch.is_empty() {
|
||||
other.batch.iter().map(|v| v.data_type()).collect_vec()
|
||||
} else {
|
||||
self.batch.iter().map(|v| v.data_type()).collect_vec()
|
||||
};
|
||||
|
||||
let batch_builders = dts
|
||||
let batch_builders = self
|
||||
.batch
|
||||
.iter()
|
||||
.map(|dt| dt.create_mutable_vector(self.row_count() + other.row_count()))
|
||||
.map(|v| {
|
||||
v.data_type()
|
||||
.create_mutable_vector(self.row_count() + other.row_count())
|
||||
})
|
||||
.collect_vec();
|
||||
|
||||
let mut result = vec![];
|
||||
let self_row_count = self.row_count();
|
||||
let zelf_row_count = self.row_count();
|
||||
let other_row_count = other.row_count();
|
||||
for (idx, mut builder) in batch_builders.into_iter().enumerate() {
|
||||
builder
|
||||
.extend_slice_of(self.batch()[idx].as_ref(), 0, self_row_count)
|
||||
.extend_slice_of(self.batch()[idx].as_ref(), 0, zelf_row_count)
|
||||
.context(DataTypeSnafu {
|
||||
msg: "Failed to extend vector",
|
||||
})?;
|
||||
@@ -245,7 +108,7 @@ impl Batch {
|
||||
result.push(builder.to_vector());
|
||||
}
|
||||
self.batch = result;
|
||||
self.row_count = self_row_count + other_row_count;
|
||||
self.row_count = zelf_row_count + other_row_count;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,13 +35,13 @@ use snafu::{ensure, OptionExt, ResultExt};
|
||||
use strum::{EnumIter, IntoEnumIterator};
|
||||
use substrait::df_logical_plan::consumer::name_to_op;
|
||||
|
||||
use crate::error::{Error, ExternalSnafu, InvalidQuerySnafu, PlanSnafu, UnexpectedSnafu};
|
||||
use crate::error::{Error, ExternalSnafu, InvalidQuerySnafu, PlanSnafu};
|
||||
use crate::expr::error::{
|
||||
ArrowSnafu, CastValueSnafu, DataTypeSnafu, DivisionByZeroSnafu, EvalError, OverflowSnafu,
|
||||
TryFromValueSnafu, TypeMismatchSnafu,
|
||||
};
|
||||
use crate::expr::signature::{GenericFn, Signature};
|
||||
use crate::expr::{Batch, InvalidArgumentSnafu, ScalarExpr, TypedExpr, TUMBLE_END, TUMBLE_START};
|
||||
use crate::expr::{Batch, InvalidArgumentSnafu, ScalarExpr, TypedExpr};
|
||||
use crate::repr::{self, value_to_internal_ts};
|
||||
|
||||
/// UnmaterializableFunc is a function that can't be eval independently,
|
||||
@@ -87,10 +87,42 @@ impl UnmaterializableFunc {
|
||||
}
|
||||
|
||||
/// Create a UnmaterializableFunc from a string of the function name
|
||||
pub fn from_str_args(name: &str, _args: Vec<TypedExpr>) -> Result<Self, Error> {
|
||||
pub fn from_str_args(name: &str, args: Vec<TypedExpr>) -> Result<Self, Error> {
|
||||
match name.to_lowercase().as_str() {
|
||||
"now" => Ok(Self::Now),
|
||||
"current_schema" => Ok(Self::CurrentSchema),
|
||||
"tumble" => {
|
||||
let ts = args.first().context(InvalidQuerySnafu {
|
||||
reason: "Tumble window function requires a timestamp argument",
|
||||
})?;
|
||||
let window_size = args
|
||||
.get(1)
|
||||
.and_then(|expr| expr.expr.as_literal())
|
||||
.context(InvalidQuerySnafu {
|
||||
reason: "Tumble window function requires a window size argument"
|
||||
})?.as_string() // TODO(discord9): since df to substrait convertor does not support interval type yet, we need to take a string and cast it to interval instead
|
||||
.map(|s|cast(Value::from(s), &ConcreteDataType::interval_month_day_nano_datatype())).transpose().map_err(BoxedError::new).context(
|
||||
ExternalSnafu
|
||||
)?.and_then(|v|v.as_interval())
|
||||
.with_context(||InvalidQuerySnafu {
|
||||
reason: format!("Tumble window function requires window size argument to be a string describe a interval, found {:?}", args.get(1))
|
||||
})?;
|
||||
let start_time = match args.get(2) {
|
||||
Some(start_time) => start_time.expr.as_literal(),
|
||||
None => None,
|
||||
}
|
||||
.map(|s| cast(s.clone(), &ConcreteDataType::datetime_datatype())).transpose().map_err(BoxedError::new).context(ExternalSnafu)?.map(|v|v.as_datetime().with_context(
|
||||
||InvalidQuerySnafu {
|
||||
reason: format!("Tumble window function requires start time argument to be a datetime describe in string, found {:?}", args.get(2))
|
||||
}
|
||||
)).transpose()?;
|
||||
|
||||
Ok(Self::TumbleWindow {
|
||||
ts: Box::new(ts.clone()),
|
||||
window_size,
|
||||
start_time,
|
||||
})
|
||||
}
|
||||
_ => InvalidQuerySnafu {
|
||||
reason: format!("Unknown unmaterializable function: {}", name),
|
||||
}
|
||||
@@ -315,96 +347,6 @@ impl UnaryFunc {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_tumble_func(name: &str, args: &[TypedExpr]) -> Result<(Self, TypedExpr), Error> {
|
||||
match name.to_lowercase().as_str() {
|
||||
TUMBLE_START | TUMBLE_END => {
|
||||
let ts = args.first().context(InvalidQuerySnafu {
|
||||
reason: "Tumble window function requires a timestamp argument",
|
||||
})?;
|
||||
let window_size = {
|
||||
let window_size_untyped = args
|
||||
.get(1)
|
||||
.and_then(|expr| expr.expr.as_literal())
|
||||
.context(InvalidQuerySnafu {
|
||||
reason: "Tumble window function requires a window size argument",
|
||||
})?;
|
||||
if let Some(window_size) = window_size_untyped.as_string() {
|
||||
// cast as interval
|
||||
cast(
|
||||
Value::from(window_size),
|
||||
&ConcreteDataType::interval_month_day_nano_datatype(),
|
||||
)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?
|
||||
.as_interval()
|
||||
.context(UnexpectedSnafu {
|
||||
reason: "Expect window size arg to be interval after successful cast"
|
||||
.to_string(),
|
||||
})?
|
||||
} else if let Some(interval) = window_size_untyped.as_interval() {
|
||||
interval
|
||||
} else {
|
||||
InvalidQuerySnafu {
|
||||
reason: format!(
|
||||
"Tumble window function requires window size argument to be either a interval or a string describe a interval, found {:?}",
|
||||
window_size_untyped
|
||||
)
|
||||
}.fail()?
|
||||
}
|
||||
};
|
||||
|
||||
// start time argument is optional
|
||||
let start_time = match args.get(2) {
|
||||
Some(start_time) => {
|
||||
if let Some(value) = start_time.expr.as_literal() {
|
||||
// cast as DateTime
|
||||
let ret = cast(value, &ConcreteDataType::datetime_datatype())
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?
|
||||
.as_datetime()
|
||||
.context(UnexpectedSnafu {
|
||||
reason:
|
||||
"Expect start time arg to be datetime after successful cast"
|
||||
.to_string(),
|
||||
})?;
|
||||
Some(ret)
|
||||
} else {
|
||||
UnexpectedSnafu {
|
||||
reason: "Expect start time arg to be literal",
|
||||
}
|
||||
.fail()?
|
||||
}
|
||||
}
|
||||
None => None,
|
||||
};
|
||||
|
||||
if name == TUMBLE_START {
|
||||
Ok((
|
||||
Self::TumbleWindowFloor {
|
||||
window_size,
|
||||
start_time,
|
||||
},
|
||||
ts.clone(),
|
||||
))
|
||||
} else if name == TUMBLE_END {
|
||||
Ok((
|
||||
Self::TumbleWindowCeiling {
|
||||
window_size,
|
||||
start_time,
|
||||
},
|
||||
ts.clone(),
|
||||
))
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
_ => crate::error::InternalSnafu {
|
||||
reason: format!("Unknown tumble kind function: {}", name),
|
||||
}
|
||||
.fail()?,
|
||||
}
|
||||
}
|
||||
|
||||
/// Evaluate the function with given values and expression
|
||||
///
|
||||
/// # Arguments
|
||||
@@ -770,8 +712,8 @@ impl BinaryFunc {
|
||||
t1 == t2,
|
||||
InvalidQuerySnafu {
|
||||
reason: format!(
|
||||
"Binary function {:?} requires both arguments to have the same type, left={:?}, right={:?}",
|
||||
generic, t1, t2
|
||||
"Binary function {:?} requires both arguments to have the same type",
|
||||
generic
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user