mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-06 05:12:54 +00:00
Compare commits
6 Commits
v0.10.0-ni
...
feat/geo-f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0bff41e038 | ||
|
|
e56709b545 | ||
|
|
a64eb0a5bc | ||
|
|
2e4ab6dd91 | ||
|
|
f761798f93 | ||
|
|
420446f19f |
@@ -42,7 +42,7 @@ runs:
|
||||
- name: Install rust toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
target: ${{ inputs.arch }}
|
||||
targets: ${{ inputs.arch }}
|
||||
|
||||
- name: Start etcd # For integration tests.
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
|
||||
@@ -27,7 +27,7 @@ runs:
|
||||
- name: Install rust toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
target: ${{ inputs.arch }}
|
||||
targets: ${{ inputs.arch }}
|
||||
components: llvm-tools-preview
|
||||
|
||||
- name: Rust Cache
|
||||
|
||||
@@ -18,8 +18,6 @@ runs:
|
||||
--set replicaCount=${{ inputs.etcd-replicas }} \
|
||||
--set resources.requests.cpu=50m \
|
||||
--set resources.requests.memory=128Mi \
|
||||
--set resources.limits.cpu=1000m \
|
||||
--set resources.limits.memory=2Gi \
|
||||
--set auth.rbac.create=false \
|
||||
--set auth.rbac.token.enabled=false \
|
||||
--set persistence.size=2Gi \
|
||||
|
||||
2
.github/actions/start-runner/action.yml
vendored
2
.github/actions/start-runner/action.yml
vendored
@@ -38,7 +38,7 @@ runs:
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
if: startsWith(inputs.runner, 'ec2')
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
|
||||
2
.github/actions/stop-runner/action.yml
vendored
2
.github/actions/stop-runner/action.yml
vendored
@@ -25,7 +25,7 @@ runs:
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
|
||||
4
.github/workflows/develop.yml
vendored
4
.github/workflows/develop.yml
vendored
@@ -616,8 +616,8 @@ jobs:
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "check-rust-fmt"
|
||||
- name: Check format
|
||||
run: make fmt-check
|
||||
- name: Run cargo fmt
|
||||
run: cargo fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
|
||||
19
.github/workflows/release.yml
vendored
19
.github/workflows/release.yml
vendored
@@ -33,7 +33,6 @@ on:
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.4xlarge-arm64
|
||||
options:
|
||||
- ubuntu-2204-32-cores-arm
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
- ec2-c6g.4xlarge-arm64 # 16C32G
|
||||
@@ -99,6 +98,16 @@ permissions:
|
||||
contents: write # Allows the action to create a release.
|
||||
|
||||
jobs:
|
||||
check-builder-rust-version:
|
||||
name: Check rust version in builder
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Check Rust toolchain version
|
||||
shell: bash
|
||||
run: |
|
||||
./scripts/check-builder-rust-version.sh
|
||||
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
@@ -123,11 +132,6 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check Rust toolchain version
|
||||
shell: bash
|
||||
run: |
|
||||
./scripts/check-builder-rust-version.sh
|
||||
|
||||
# The create-version will create a global variable named 'version' in the global workflows.
|
||||
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
|
||||
@@ -252,8 +256,7 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
features: ${{ matrix.features }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
# We decide to disable the integration tests on macOS because it's unnecessary and time-consuming.
|
||||
disable-run-tests: true
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
- name: Set build macos result
|
||||
|
||||
225
Cargo.lock
generated
225
Cargo.lock
generated
@@ -214,7 +214,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
@@ -762,7 +762,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1175,17 +1175,6 @@ dependencies = [
|
||||
"regex-automata 0.1.10",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bstr"
|
||||
version = "1.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
"regex-automata 0.4.7",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "btoi"
|
||||
version = "0.4.3"
|
||||
@@ -1297,7 +1286,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"catalog",
|
||||
"common-error",
|
||||
@@ -1305,7 +1294,7 @@ dependencies = [
|
||||
"common-meta",
|
||||
"moka",
|
||||
"snafu 0.8.4",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1332,7 +1321,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -1658,7 +1647,7 @@ checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70"
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1688,7 +1677,7 @@ dependencies = [
|
||||
"serde_json",
|
||||
"snafu 0.8.4",
|
||||
"substrait 0.37.3",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.2",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tonic 0.11.0",
|
||||
@@ -1718,7 +1707,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1772,10 +1761,9 @@ dependencies = [
|
||||
"serde_json",
|
||||
"servers",
|
||||
"session",
|
||||
"similar-asserts",
|
||||
"snafu 0.8.4",
|
||||
"store-api",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.2",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tempfile",
|
||||
@@ -1821,15 +1809,13 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"async-trait",
|
||||
"bitvec",
|
||||
"bytes",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"futures",
|
||||
"paste",
|
||||
"serde",
|
||||
"snafu 0.8.4",
|
||||
@@ -1839,7 +1825,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -1850,7 +1836,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -1873,7 +1859,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
@@ -1910,7 +1896,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-decimal"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"bigdecimal",
|
||||
"common-error",
|
||||
@@ -1923,7 +1909,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"snafu 0.8.4",
|
||||
"strum 0.25.0",
|
||||
@@ -1932,7 +1918,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-frontend"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1947,7 +1933,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1966,7 +1952,6 @@ dependencies = [
|
||||
"datatypes",
|
||||
"geohash",
|
||||
"h3o",
|
||||
"jsonb",
|
||||
"num",
|
||||
"num-traits",
|
||||
"once_cell",
|
||||
@@ -1985,7 +1970,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-runtime",
|
||||
@@ -2002,7 +1987,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -2028,7 +2013,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
@@ -2046,7 +2031,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-query",
|
||||
@@ -2060,7 +2045,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2073,7 +2058,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"api",
|
||||
@@ -2129,11 +2114,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-plugins"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -2159,7 +2144,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -2167,7 +2152,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2193,7 +2178,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-error",
|
||||
@@ -2212,7 +2197,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -2234,7 +2219,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"backtrace",
|
||||
@@ -2261,7 +2246,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"client",
|
||||
"common-query",
|
||||
@@ -2273,7 +2258,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -2289,7 +2274,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
"const_format",
|
||||
@@ -2300,7 +2285,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-wal"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -2308,7 +2293,6 @@ dependencies = [
|
||||
"common-telemetry",
|
||||
"futures-util",
|
||||
"humantime-serde",
|
||||
"num_cpus",
|
||||
"rskafka",
|
||||
"rustls 0.23.10",
|
||||
"rustls-native-certs",
|
||||
@@ -3109,7 +3093,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -3158,7 +3142,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.4",
|
||||
"store-api",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.2",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.8.14",
|
||||
@@ -3167,12 +3151,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
"arrow-schema",
|
||||
"base64 0.21.7",
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
"common-error",
|
||||
@@ -3181,8 +3164,6 @@ dependencies = [
|
||||
"common-time",
|
||||
"datafusion-common",
|
||||
"enum_dispatch",
|
||||
"greptime-proto",
|
||||
"jsonb",
|
||||
"num",
|
||||
"num-traits",
|
||||
"ordered-float 3.9.2",
|
||||
@@ -3715,12 +3696,6 @@ version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a"
|
||||
|
||||
[[package]]
|
||||
name = "fast-float"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "95765f67b4b18863968b4a1bd5bb576f732b29a4a28c7cd84c09fa3e2875f33c"
|
||||
|
||||
[[package]]
|
||||
name = "fastdivide"
|
||||
version = "0.4.1"
|
||||
@@ -3746,7 +3721,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -3848,7 +3823,7 @@ checksum = "28a80e3145d8ad11ba0995949bbcf48b9df2be62772b3d351ef017dff6ecb853"
|
||||
|
||||
[[package]]
|
||||
name = "flow"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -3905,7 +3880,7 @@ dependencies = [
|
||||
"snafu 0.8.4",
|
||||
"store-api",
|
||||
"strum 0.25.0",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.2",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.11.0",
|
||||
@@ -3952,7 +3927,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -4325,7 +4300,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=973f49cde88a582fb65755cc572ebcf6fb93ccf7#973f49cde88a582fb65755cc572ebcf6fb93ccf7"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=c437b55725b7f5224fe9d46db21072b4a682ee4b#c437b55725b7f5224fe9d46db21072b4a682ee4b"
|
||||
dependencies = [
|
||||
"prost 0.12.6",
|
||||
"serde",
|
||||
@@ -5103,7 +5078,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"asynchronous-codec",
|
||||
@@ -5432,21 +5407,6 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jsonb"
|
||||
version = "0.4.1"
|
||||
source = "git+https://github.com/CookiePieWw/jsonb.git?rev=d0166c130fce903bf6c58643417a3173a6172d31#d0166c130fce903bf6c58643417a3173a6172d31"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"fast-float",
|
||||
"itoa",
|
||||
"nom",
|
||||
"ordered-float 4.2.0",
|
||||
"rand",
|
||||
"ryu",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jsonpath-rust"
|
||||
version = "0.5.1"
|
||||
@@ -5898,7 +5858,7 @@ checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -6210,7 +6170,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6236,7 +6196,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6314,7 +6274,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "metric-engine"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -6405,7 +6365,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -7052,7 +7012,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
@@ -7240,11 +7200,9 @@ version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3a8fddc9b68f5b80dae9d6f510b88e02396f006ad48cac349411fbecc80caae4"
|
||||
dependencies = [
|
||||
"hex",
|
||||
"opentelemetry 0.22.0",
|
||||
"opentelemetry_sdk 0.22.1",
|
||||
"prost 0.12.6",
|
||||
"serde",
|
||||
"tonic 0.11.0",
|
||||
]
|
||||
|
||||
@@ -7301,7 +7259,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -7346,7 +7304,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
|
||||
"store-api",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.2",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -7596,7 +7554,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -7885,7 +7843,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pipeline"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8046,7 +8004,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"common-base",
|
||||
@@ -8102,8 +8060,6 @@ dependencies = [
|
||||
"chrono",
|
||||
"fallible-iterator",
|
||||
"postgres-protocol",
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -8317,7 +8273,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"async-trait",
|
||||
@@ -8552,7 +8508,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "puffin"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-compression 0.4.11",
|
||||
"async-trait",
|
||||
@@ -8674,7 +8630,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8737,7 +8693,7 @@ dependencies = [
|
||||
"stats-cli",
|
||||
"store-api",
|
||||
"streaming-stats",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.2",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -9669,7 +9625,7 @@ source = "git+https://github.com/discord9/RustPython?rev=9ed5137412#9ed51374125b
|
||||
dependencies = [
|
||||
"ascii",
|
||||
"bitflags 1.3.2",
|
||||
"bstr 0.2.17",
|
||||
"bstr",
|
||||
"cfg-if",
|
||||
"hexf-parse",
|
||||
"itertools 0.10.5",
|
||||
@@ -9704,7 +9660,7 @@ version = "0.2.0"
|
||||
source = "git+https://github.com/discord9/RustPython?rev=9ed5137412#9ed51374125b5f1a9e5cee5dd7e27023b8591f1e"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
"bstr 0.2.17",
|
||||
"bstr",
|
||||
"itertools 0.10.5",
|
||||
"lz4_flex 0.9.5",
|
||||
"num-bigint",
|
||||
@@ -9857,7 +9813,7 @@ dependencies = [
|
||||
"ascii",
|
||||
"atty",
|
||||
"bitflags 1.3.2",
|
||||
"bstr 0.2.17",
|
||||
"bstr",
|
||||
"caseless",
|
||||
"cfg-if",
|
||||
"chrono",
|
||||
@@ -10099,7 +10055,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "script"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -10393,7 +10349,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"aide",
|
||||
"api",
|
||||
@@ -10442,7 +10398,6 @@ dependencies = [
|
||||
"hyper 0.14.29",
|
||||
"influxdb_line_protocol",
|
||||
"itertools 0.10.5",
|
||||
"jsonb",
|
||||
"lazy_static",
|
||||
"mime_guess",
|
||||
"mysql_async",
|
||||
@@ -10500,7 +10455,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -10625,26 +10580,6 @@ version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a"
|
||||
|
||||
[[package]]
|
||||
name = "similar"
|
||||
version = "2.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e"
|
||||
dependencies = [
|
||||
"bstr 1.10.0",
|
||||
"unicode-segmentation",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "similar-asserts"
|
||||
version = "1.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cfe85670573cd6f0fa97940f26e7e6601213c3b0555246c24234131f88c5709e"
|
||||
dependencies = [
|
||||
"console",
|
||||
"similar",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "simple_asn1"
|
||||
version = "0.6.2"
|
||||
@@ -10821,7 +10756,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"chrono",
|
||||
@@ -10842,7 +10777,6 @@ dependencies = [
|
||||
"hex",
|
||||
"iso8601",
|
||||
"itertools 0.10.5",
|
||||
"jsonb",
|
||||
"lazy_static",
|
||||
"regex",
|
||||
"serde_json",
|
||||
@@ -10882,7 +10816,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.7",
|
||||
@@ -11099,7 +11033,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -11268,7 +11202,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -11469,7 +11403,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -11734,7 +11668,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
||||
|
||||
[[package]]
|
||||
name = "tests-fuzz"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"async-trait",
|
||||
@@ -11776,7 +11710,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -11809,7 +11743,6 @@ dependencies = [
|
||||
"datanode",
|
||||
"datatypes",
|
||||
"dotenv",
|
||||
"flate2",
|
||||
"flow",
|
||||
"frontend",
|
||||
"futures",
|
||||
@@ -11833,12 +11766,11 @@ dependencies = [
|
||||
"serde_json",
|
||||
"servers",
|
||||
"session",
|
||||
"similar-asserts",
|
||||
"snafu 0.8.4",
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.2",
|
||||
"table",
|
||||
"tempfile",
|
||||
"time",
|
||||
@@ -12557,16 +12489,6 @@ dependencies = [
|
||||
"web-time 0.2.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-serde"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"tracing-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-subscriber"
|
||||
version = "0.3.18"
|
||||
@@ -12577,15 +12499,12 @@ dependencies = [
|
||||
"nu-ansi-term",
|
||||
"once_cell",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sharded-slab",
|
||||
"smallvec",
|
||||
"thread_local",
|
||||
"tracing",
|
||||
"tracing-core",
|
||||
"tracing-log 0.2.0",
|
||||
"tracing-serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -64,7 +64,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.9.3"
|
||||
version = "0.9.2"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -120,11 +120,10 @@ etcd-client = { version = "0.13" }
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "973f49cde88a582fb65755cc572ebcf6fb93ccf7" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "c437b55725b7f5224fe9d46db21072b4a682ee4b" }
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
jsonb = { git = "https://github.com/CookiePieWw/jsonb.git", rev = "d0166c130fce903bf6c58643417a3173a6172d31", default-features = false }
|
||||
lazy_static = "1.4"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd" }
|
||||
mockall = "0.11.4"
|
||||
@@ -136,7 +135,6 @@ opentelemetry-proto = { version = "0.5", features = [
|
||||
"gen-tonic",
|
||||
"metrics",
|
||||
"trace",
|
||||
"with-serde",
|
||||
] }
|
||||
parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||
paste = "1.0"
|
||||
@@ -169,7 +167,6 @@ shadow-rs = "0.31"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.8"
|
||||
sysinfo = "0.30"
|
||||
similar-asserts = "1.6.0"
|
||||
# on branch v0.44.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
|
||||
"visitor",
|
||||
|
||||
3
Makefile
3
Makefile
@@ -191,7 +191,6 @@ fix-clippy: ## Fix clippy violations.
|
||||
.PHONY: fmt-check
|
||||
fmt-check: ## Check code format.
|
||||
cargo fmt --all -- --check
|
||||
python3 scripts/check-snafu.py
|
||||
|
||||
.PHONY: start-etcd
|
||||
start-etcd: ## Start single node etcd for testing purpose.
|
||||
@@ -221,7 +220,7 @@ config-docs: ## Generate configuration documentation from toml files.
|
||||
docker run --rm \
|
||||
-v ${PWD}:/greptimedb \
|
||||
-w /greptimedb/config \
|
||||
toml2docs/toml2docs:v0.1.3 \
|
||||
toml2docs/toml2docs:v0.1.1 \
|
||||
-p '##' \
|
||||
-t ./config-docs-template.md \
|
||||
-o ./config.md
|
||||
|
||||
@@ -74,7 +74,7 @@ Our core developers have been building time-series data platforms for years. Bas
|
||||
|
||||
* **Compatible with InfluxDB, Prometheus and more protocols**
|
||||
|
||||
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/protocols/overview).
|
||||
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/clients/overview).
|
||||
|
||||
## Try GreptimeDB
|
||||
|
||||
|
||||
193
config/config.md
193
config/config.md
@@ -14,9 +14,7 @@
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
@@ -29,8 +27,8 @@
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
| `grpc.tls.key_path` | String | Unset | Private key file path. |
|
||||
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `mysql` | -- | -- | MySQL server options. |
|
||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||
@@ -38,8 +36,8 @@
|
||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `mysql.tls` | -- | -- | -- |
|
||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
| `mysql.tls.key_path` | String | Unset | Private key file path. |
|
||||
| `mysql.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `mysql.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||
| `postgres` | -- | -- | PostgresSQL server options. |
|
||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||
@@ -47,8 +45,8 @@
|
||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
| `postgres.tls.key_path` | String | Unset | Private key file path. |
|
||||
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||
@@ -59,7 +57,7 @@
|
||||
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
||||
| `wal` | -- | -- | The WAL options. |
|
||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.dir` | String | `None` | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
@@ -68,7 +66,6 @@
|
||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
||||
| `wal.num_topics` | Integer | `64` | Number of topics.<br/>**It's only used when the provider is `kafka`**. |
|
||||
@@ -91,22 +88,22 @@
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||
| `storage.secret_access_key` | String | Unset | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
||||
| `storage.access_key_secret` | String | Unset | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
||||
| `storage.account_name` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.account_key` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.scope` | String | Unset | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential_path` | String | Unset | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential` | String | Unset | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.container` | String | Unset | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.cache_path` | String | `None` | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
|
||||
| `storage.cache_capacity` | String | `None` | The local file cache capacity in bytes. |
|
||||
| `storage.bucket` | String | `None` | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | `None` | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
| `storage.access_key_id` | String | `None` | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||
| `storage.secret_access_key` | String | `None` | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
||||
| `storage.access_key_secret` | String | `None` | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
||||
| `storage.account_name` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential` | String | `None` | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.region` | String | `None` | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||
@@ -116,21 +113,20 @@
|
||||
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
||||
| `region_engine.mito.max_background_jobs` | Integer | `4` | Max number of running background jobs |
|
||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`. |
|
||||
| `region_engine.mito.sst_meta_cache_size` | String | Auto | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.global_write_buffer_size` | String | `1GB` | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
||||
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | `512MB` | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | `None` | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
@@ -154,24 +150,23 @@
|
||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommended to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | Unset | -- |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
|
||||
## Distributed Mode
|
||||
@@ -180,7 +175,7 @@
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
||||
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
@@ -197,8 +192,8 @@
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
| `grpc.tls.key_path` | String | Unset | Private key file path. |
|
||||
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `mysql` | -- | -- | MySQL server options. |
|
||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||
@@ -206,8 +201,8 @@
|
||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `mysql.tls` | -- | -- | -- |
|
||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
| `mysql.tls.key_path` | String | Unset | Private key file path. |
|
||||
| `mysql.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `mysql.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||
| `postgres` | -- | -- | PostgresSQL server options. |
|
||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||
@@ -215,8 +210,8 @@
|
||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
| `postgres.tls.key_path` | String | Unset | Private key file path. |
|
||||
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||
@@ -240,24 +235,23 @@
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | Unset | -- |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
|
||||
### Metasrv
|
||||
@@ -305,24 +299,23 @@
|
||||
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
|
||||
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | Unset | -- |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
|
||||
### Datanode
|
||||
@@ -330,16 +323,16 @@
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
|
||||
| `node_id` | Integer | `None` | The datanode identifier and should be unique in the cluster. |
|
||||
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||
| `rpc_addr` | String | Unset | Deprecated, use `grpc.addr` instead. |
|
||||
| `rpc_hostname` | String | Unset | Deprecated, use `grpc.hostname` instead. |
|
||||
| `rpc_runtime_size` | Integer | Unset | Deprecated, use `grpc.runtime_size` instead. |
|
||||
| `rpc_max_recv_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
|
||||
| `rpc_max_send_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
|
||||
| `rpc_addr` | String | `None` | Deprecated, use `grpc.addr` instead. |
|
||||
| `rpc_hostname` | String | `None` | Deprecated, use `grpc.hostname` instead. |
|
||||
| `rpc_runtime_size` | Integer | `None` | Deprecated, use `grpc.runtime_size` instead. |
|
||||
| `rpc_max_recv_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
|
||||
| `rpc_max_send_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
@@ -352,8 +345,8 @@
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
| `grpc.tls.key_path` | String | Unset | Private key file path. |
|
||||
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
@@ -373,7 +366,7 @@
|
||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||
| `wal` | -- | -- | The WAL options. |
|
||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.dir` | String | `None` | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
@@ -382,7 +375,6 @@
|
||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||
@@ -395,22 +387,22 @@
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||
| `storage.secret_access_key` | String | Unset | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
||||
| `storage.access_key_secret` | String | Unset | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
||||
| `storage.account_name` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.account_key` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.scope` | String | Unset | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential_path` | String | Unset | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential` | String | Unset | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.container` | String | Unset | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.cache_path` | String | `None` | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
|
||||
| `storage.cache_capacity` | String | `None` | The local file cache capacity in bytes. |
|
||||
| `storage.bucket` | String | `None` | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | `None` | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
| `storage.access_key_id` | String | `None` | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||
| `storage.secret_access_key` | String | `None` | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
||||
| `storage.access_key_secret` | String | `None` | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
||||
| `storage.account_name` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential` | String | `None` | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.region` | String | `None` | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||
@@ -420,21 +412,20 @@
|
||||
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
||||
| `region_engine.mito.max_background_jobs` | Integer | `4` | Max number of running background jobs |
|
||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
||||
| `region_engine.mito.sst_meta_cache_size` | String | Auto | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.global_write_buffer_size` | String | `1GB` | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
||||
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | `512MB` | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | `None` | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
@@ -456,24 +447,23 @@
|
||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | Unset | -- |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
|
||||
### Flownode
|
||||
@@ -481,7 +471,7 @@
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
|
||||
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
|
||||
| `node_id` | Integer | `None` | The flownode identifier and should be unique in the cluster. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
@@ -502,13 +492,12 @@
|
||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
mode = "standalone"
|
||||
|
||||
## The datanode identifier and should be unique in the cluster.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
node_id = 42
|
||||
|
||||
## Start services after regions have obtained leases.
|
||||
@@ -20,23 +20,23 @@ enable_telemetry = true
|
||||
init_regions_parallelism = 16
|
||||
|
||||
## Deprecated, use `grpc.addr` instead.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
|
||||
## Deprecated, use `grpc.hostname` instead.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
rpc_hostname = "127.0.0.1"
|
||||
|
||||
## Deprecated, use `grpc.runtime_size` instead.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
rpc_runtime_size = 8
|
||||
|
||||
## Deprecated, use `grpc.rpc_max_recv_message_size` instead.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
rpc_max_recv_message_size = "512MB"
|
||||
|
||||
## Deprecated, use `grpc.rpc_max_send_message_size` instead.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
rpc_max_send_message_size = "512MB"
|
||||
|
||||
|
||||
@@ -71,11 +71,11 @@ max_send_message_size = "512MB"
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload.
|
||||
@@ -83,11 +83,11 @@ key_path = ""
|
||||
watch = false
|
||||
|
||||
## The runtime options.
|
||||
#+ [runtime]
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
#+ global_rt_size = 8
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
#+ compact_rt_size = 4
|
||||
compact_rt_size = 4
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
@@ -135,7 +135,7 @@ provider = "raft_engine"
|
||||
|
||||
## The directory to store the WAL files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
|
||||
## The size of the WAL segment file.
|
||||
@@ -170,9 +170,6 @@ prefill_log_files = false
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
sync_period = "10s"
|
||||
|
||||
## Parallelism during WAL recovery.
|
||||
recovery_parallelism = 2
|
||||
|
||||
## The Kafka broker endpoints.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
@@ -282,104 +279,90 @@ type = "File"
|
||||
|
||||
## Cache configuration for object storage such as 'S3' etc.
|
||||
## The local file cache directory.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
cache_path = "/path/local_cache"
|
||||
|
||||
## The local file cache capacity in bytes.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
cache_capacity = "256MB"
|
||||
|
||||
## The S3 bucket name.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
bucket = "greptimedb"
|
||||
|
||||
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
root = "greptimedb"
|
||||
|
||||
## The access key id of the aws account.
|
||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||
## **It's only used when the storage type is `S3` and `Oss`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
access_key_id = "test"
|
||||
|
||||
## The secret access key of the aws account.
|
||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||
## **It's only used when the storage type is `S3`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
secret_access_key = "test"
|
||||
|
||||
## The secret access key of the aliyun account.
|
||||
## **It's only used when the storage type is `Oss`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
access_key_secret = "test"
|
||||
|
||||
## The account key of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
account_name = "test"
|
||||
|
||||
## The account key of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
account_key = "test"
|
||||
|
||||
## The scope of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
scope = "test"
|
||||
|
||||
## The credential path of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
credential_path = "test"
|
||||
|
||||
## The credential of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
credential = "base64-credential"
|
||||
## +toml2docs:none-default
|
||||
credential= "base64-credential"
|
||||
|
||||
## The container of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
container = "greptimedb"
|
||||
|
||||
## The sas token of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
sas_token = ""
|
||||
|
||||
## The endpoint of the S3 service.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
endpoint = "https://s3.amazonaws.com"
|
||||
|
||||
## The region of the S3 service.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
region = "us-west-2"
|
||||
|
||||
# Custom storage options
|
||||
# [[storage.providers]]
|
||||
# name = "S3"
|
||||
# type = "S3"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# access_key_id = "test"
|
||||
# secret_access_key = "123456"
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# [[storage.providers]]
|
||||
# name = "Gcs"
|
||||
# type = "Gcs"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# scope = "test"
|
||||
# credential_path = "123456"
|
||||
# credential = "base64-credential"
|
||||
# endpoint = "https://storage.googleapis.com"
|
||||
|
||||
## The region engine options. You can configure multiple region engines.
|
||||
[[region_engine]]
|
||||
@@ -388,7 +371,7 @@ region = "us-west-2"
|
||||
[region_engine.mito]
|
||||
|
||||
## Number of region workers.
|
||||
#+ num_workers = 8
|
||||
num_workers = 8
|
||||
|
||||
## Request channel size of each worker.
|
||||
worker_channel_size = 128
|
||||
@@ -409,32 +392,26 @@ max_background_jobs = 4
|
||||
auto_flush_interval = "1h"
|
||||
|
||||
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ global_write_buffer_size = "1GB"
|
||||
global_write_buffer_size = "1GB"
|
||||
|
||||
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ global_write_buffer_reject_size = "2GB"
|
||||
global_write_buffer_reject_size = "2GB"
|
||||
|
||||
## Cache size for SST metadata. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ sst_meta_cache_size = "128MB"
|
||||
sst_meta_cache_size = "128MB"
|
||||
|
||||
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ vector_cache_size = "512MB"
|
||||
vector_cache_size = "512MB"
|
||||
|
||||
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/8 of OS memory.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ page_cache_size = "512MB"
|
||||
page_cache_size = "512MB"
|
||||
|
||||
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ selector_result_cache_size = "512MB"
|
||||
selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the experimental write cache.
|
||||
enable_experimental_write_cache = false
|
||||
@@ -446,7 +423,7 @@ experimental_write_cache_path = ""
|
||||
experimental_write_cache_size = "512MB"
|
||||
|
||||
## TTL for write cache.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
experimental_write_cache_ttl = "8h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
@@ -464,10 +441,6 @@ parallel_scan_channel_size = 32
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
## Minimum time interval between two compactions.
|
||||
## To align with the old behavior, the default value is 0 (no restrictions).
|
||||
min_compaction_interval = "0m"
|
||||
|
||||
## The options for index in Mito engine.
|
||||
[region_engine.mito.index]
|
||||
|
||||
@@ -558,11 +531,11 @@ fork_dictionary_bytes = "1GiB"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
## The directory to store the log files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
level = "info"
|
||||
|
||||
## Enable OTLP tracing.
|
||||
@@ -574,9 +547,6 @@ otlp_endpoint = "http://localhost:4317"
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -596,7 +566,7 @@ write_interval = "30s"
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
@@ -609,5 +579,5 @@ headers = { }
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
mode = "distributed"
|
||||
|
||||
## The flownode identifier and should be unique in the cluster.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
node_id = 14
|
||||
|
||||
## The gRPC server options.
|
||||
@@ -59,11 +59,11 @@ retry_interval = "3s"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
## The directory to store the log files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
level = "info"
|
||||
|
||||
## Enable OTLP tracing.
|
||||
@@ -75,9 +75,6 @@ otlp_endpoint = "http://localhost:4317"
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -87,6 +84,6 @@ default_ratio = 1.0
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
## The default timezone of the server.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
|
||||
## The runtime options.
|
||||
#+ [runtime]
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
#+ global_rt_size = 8
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
#+ compact_rt_size = 4
|
||||
compact_rt_size = 4
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
@@ -44,11 +44,11 @@ runtime_size = 8
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload.
|
||||
@@ -76,11 +76,11 @@ runtime_size = 2
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload
|
||||
@@ -101,11 +101,11 @@ runtime_size = 2
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload
|
||||
@@ -166,11 +166,11 @@ tcp_nodelay = true
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
## The directory to store the log files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
level = "info"
|
||||
|
||||
## Enable OTLP tracing.
|
||||
@@ -182,9 +182,6 @@ otlp_endpoint = "http://localhost:4317"
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -204,7 +201,7 @@ write_interval = "30s"
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
@@ -217,5 +214,5 @@ headers = { }
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -36,11 +36,11 @@ enable_region_failover = false
|
||||
backend = "EtcdStore"
|
||||
|
||||
## The runtime options.
|
||||
#+ [runtime]
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
#+ global_rt_size = 8
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
#+ compact_rt_size = 4
|
||||
compact_rt_size = 4
|
||||
|
||||
## Procedure storage options.
|
||||
[procedure]
|
||||
@@ -153,11 +153,11 @@ backoff_deadline = "5mins"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
## The directory to store the log files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
level = "info"
|
||||
|
||||
## Enable OTLP tracing.
|
||||
@@ -169,9 +169,6 @@ otlp_endpoint = "http://localhost:4317"
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -191,7 +188,7 @@ write_interval = "30s"
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
@@ -204,5 +201,5 @@ headers = { }
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -5,22 +5,15 @@ mode = "standalone"
|
||||
enable_telemetry = true
|
||||
|
||||
## The default timezone of the server.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
|
||||
## Initialize all regions in the background during the startup.
|
||||
## By default, it provides services after all regions have been initialized.
|
||||
init_regions_in_background = false
|
||||
|
||||
## Parallelism of initializing regions.
|
||||
init_regions_parallelism = 16
|
||||
|
||||
## The runtime options.
|
||||
#+ [runtime]
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
#+ global_rt_size = 8
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
#+ compact_rt_size = 4
|
||||
compact_rt_size = 4
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
@@ -46,11 +39,11 @@ runtime_size = 8
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload.
|
||||
@@ -78,11 +71,11 @@ runtime_size = 2
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload
|
||||
@@ -103,11 +96,11 @@ runtime_size = 2
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload
|
||||
@@ -139,7 +132,7 @@ provider = "raft_engine"
|
||||
|
||||
## The directory to store the WAL files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
|
||||
## The size of the WAL segment file.
|
||||
@@ -174,9 +167,6 @@ prefill_log_files = false
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
sync_period = "10s"
|
||||
|
||||
## Parallelism during WAL recovery.
|
||||
recovery_parallelism = 2
|
||||
|
||||
## The Kafka broker endpoints.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
@@ -320,104 +310,90 @@ type = "File"
|
||||
|
||||
## Cache configuration for object storage such as 'S3' etc.
|
||||
## The local file cache directory.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
cache_path = "/path/local_cache"
|
||||
|
||||
## The local file cache capacity in bytes.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
cache_capacity = "256MB"
|
||||
|
||||
## The S3 bucket name.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
bucket = "greptimedb"
|
||||
|
||||
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
root = "greptimedb"
|
||||
|
||||
## The access key id of the aws account.
|
||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||
## **It's only used when the storage type is `S3` and `Oss`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
access_key_id = "test"
|
||||
|
||||
## The secret access key of the aws account.
|
||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||
## **It's only used when the storage type is `S3`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
secret_access_key = "test"
|
||||
|
||||
## The secret access key of the aliyun account.
|
||||
## **It's only used when the storage type is `Oss`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
access_key_secret = "test"
|
||||
|
||||
## The account key of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
account_name = "test"
|
||||
|
||||
## The account key of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
account_key = "test"
|
||||
|
||||
## The scope of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
scope = "test"
|
||||
|
||||
## The credential path of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
credential_path = "test"
|
||||
|
||||
## The credential of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
credential = "base64-credential"
|
||||
|
||||
## The container of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
container = "greptimedb"
|
||||
|
||||
## The sas token of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
sas_token = ""
|
||||
|
||||
## The endpoint of the S3 service.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
endpoint = "https://s3.amazonaws.com"
|
||||
|
||||
## The region of the S3 service.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
region = "us-west-2"
|
||||
|
||||
# Custom storage options
|
||||
# [[storage.providers]]
|
||||
# name = "S3"
|
||||
# type = "S3"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# access_key_id = "test"
|
||||
# secret_access_key = "123456"
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# [[storage.providers]]
|
||||
# name = "Gcs"
|
||||
# type = "Gcs"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# scope = "test"
|
||||
# credential_path = "123456"
|
||||
# credential = "base64-credential"
|
||||
# endpoint = "https://storage.googleapis.com"
|
||||
|
||||
## The region engine options. You can configure multiple region engines.
|
||||
[[region_engine]]
|
||||
@@ -426,7 +402,7 @@ region = "us-west-2"
|
||||
[region_engine.mito]
|
||||
|
||||
## Number of region workers.
|
||||
#+ num_workers = 8
|
||||
num_workers = 8
|
||||
|
||||
## Request channel size of each worker.
|
||||
worker_channel_size = 128
|
||||
@@ -447,32 +423,26 @@ max_background_jobs = 4
|
||||
auto_flush_interval = "1h"
|
||||
|
||||
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ global_write_buffer_size = "1GB"
|
||||
global_write_buffer_size = "1GB"
|
||||
|
||||
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ global_write_buffer_reject_size = "2GB"
|
||||
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
|
||||
global_write_buffer_reject_size = "2GB"
|
||||
|
||||
## Cache size for SST metadata. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ sst_meta_cache_size = "128MB"
|
||||
sst_meta_cache_size = "128MB"
|
||||
|
||||
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ vector_cache_size = "512MB"
|
||||
vector_cache_size = "512MB"
|
||||
|
||||
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/8 of OS memory.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ page_cache_size = "512MB"
|
||||
page_cache_size = "512MB"
|
||||
|
||||
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ selector_result_cache_size = "512MB"
|
||||
selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the experimental write cache.
|
||||
enable_experimental_write_cache = false
|
||||
@@ -484,7 +454,7 @@ experimental_write_cache_path = ""
|
||||
experimental_write_cache_size = "512MB"
|
||||
|
||||
## TTL for write cache.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
experimental_write_cache_ttl = "8h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
@@ -502,10 +472,6 @@ parallel_scan_channel_size = 32
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
## Minimum time interval between two compactions.
|
||||
## To align with the old behavior, the default value is 0 (no restrictions).
|
||||
min_compaction_interval = "0m"
|
||||
|
||||
## The options for index in Mito engine.
|
||||
[region_engine.mito.index]
|
||||
|
||||
@@ -602,11 +568,11 @@ fork_dictionary_bytes = "1GiB"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
## The directory to store the log files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
level = "info"
|
||||
|
||||
## Enable OTLP tracing.
|
||||
@@ -618,9 +584,6 @@ otlp_endpoint = "http://localhost:4317"
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -637,10 +600,10 @@ enable = false
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommended to collect metrics generated by itself
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
@@ -653,5 +616,5 @@ headers = { }
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
# Copyright 2023 Greptime Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
|
||||
def find_rust_files(directory):
|
||||
error_files = []
|
||||
other_rust_files = []
|
||||
for root, _, files in os.walk(directory):
|
||||
for file in files:
|
||||
if file == "error.rs":
|
||||
error_files.append(os.path.join(root, file))
|
||||
elif file.endswith(".rs"):
|
||||
other_rust_files.append(os.path.join(root, file))
|
||||
return error_files, other_rust_files
|
||||
|
||||
|
||||
def extract_branch_names(file_content):
|
||||
pattern = re.compile(r"#\[snafu\(display\([^\)]*\)\)\]\s*(\w+)\s*\{")
|
||||
return pattern.findall(file_content)
|
||||
|
||||
|
||||
def check_snafu_in_files(branch_name, rust_files):
|
||||
branch_name_snafu = f"{branch_name}Snafu"
|
||||
for rust_file in rust_files:
|
||||
with open(rust_file, "r") as file:
|
||||
content = file.read()
|
||||
if branch_name_snafu in content:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
error_files, other_rust_files = find_rust_files(".")
|
||||
branch_names = []
|
||||
|
||||
for error_file in error_files:
|
||||
with open(error_file, "r") as file:
|
||||
content = file.read()
|
||||
branch_names.extend(extract_branch_names(content))
|
||||
|
||||
unused_snafu = [
|
||||
branch_name
|
||||
for branch_name in branch_names
|
||||
if not check_snafu_in_files(branch_name, other_rust_files)
|
||||
]
|
||||
|
||||
for name in unused_snafu:
|
||||
print(name)
|
||||
|
||||
if unused_snafu:
|
||||
raise SystemExit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -42,8 +42,7 @@ use greptime_proto::v1::greptime_request::Request;
|
||||
use greptime_proto::v1::query_request::Query;
|
||||
use greptime_proto::v1::value::ValueData;
|
||||
use greptime_proto::v1::{
|
||||
ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, JsonTypeExtension, QueryRequest,
|
||||
Row, SemanticType,
|
||||
ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, QueryRequest, Row, SemanticType,
|
||||
};
|
||||
use paste::paste;
|
||||
use snafu::prelude::*;
|
||||
@@ -104,17 +103,7 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||
ColumnDataType::Uint64 => ConcreteDataType::uint64_datatype(),
|
||||
ColumnDataType::Float32 => ConcreteDataType::float32_datatype(),
|
||||
ColumnDataType::Float64 => ConcreteDataType::float64_datatype(),
|
||||
ColumnDataType::Binary => {
|
||||
if let Some(TypeExt::JsonType(_)) = datatype_wrapper
|
||||
.datatype_ext
|
||||
.as_ref()
|
||||
.and_then(|datatype_ext| datatype_ext.type_ext.as_ref())
|
||||
{
|
||||
ConcreteDataType::json_datatype()
|
||||
} else {
|
||||
ConcreteDataType::binary_datatype()
|
||||
}
|
||||
}
|
||||
ColumnDataType::Binary => ConcreteDataType::binary_datatype(),
|
||||
ColumnDataType::String => ConcreteDataType::string_datatype(),
|
||||
ColumnDataType::Date => ConcreteDataType::date_datatype(),
|
||||
ColumnDataType::Datetime => ConcreteDataType::datetime_datatype(),
|
||||
@@ -247,7 +236,7 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
ConcreteDataType::UInt64(_) => ColumnDataType::Uint64,
|
||||
ConcreteDataType::Float32(_) => ColumnDataType::Float32,
|
||||
ConcreteDataType::Float64(_) => ColumnDataType::Float64,
|
||||
ConcreteDataType::Binary(_) | ConcreteDataType::Json(_) => ColumnDataType::Binary,
|
||||
ConcreteDataType::Binary(_) => ColumnDataType::Binary,
|
||||
ConcreteDataType::String(_) => ColumnDataType::String,
|
||||
ConcreteDataType::Date(_) => ColumnDataType::Date,
|
||||
ConcreteDataType::DateTime(_) => ColumnDataType::Datetime,
|
||||
@@ -287,16 +276,6 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
})),
|
||||
})
|
||||
}
|
||||
ColumnDataType::Binary => {
|
||||
if datatype == ConcreteDataType::json_datatype() {
|
||||
// Json is the same as binary in proto. The extension marks the binary in proto is actually a json.
|
||||
Some(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
Ok(Self {
|
||||
@@ -670,8 +649,7 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_)
|
||||
| ConcreteDataType::Json(_) => {
|
||||
| ConcreteDataType::Duration(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
@@ -835,8 +813,7 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_)
|
||||
| ConcreteDataType::Json(_) => {
|
||||
| ConcreteDataType::Duration(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
@@ -854,13 +831,7 @@ pub fn is_column_type_value_eq(
|
||||
expect_type: &ConcreteDataType,
|
||||
) -> bool {
|
||||
ColumnDataTypeWrapper::try_new(type_value, type_extension)
|
||||
.map(|wrapper| {
|
||||
let datatype = ConcreteDataType::from(wrapper);
|
||||
(datatype == *expect_type)
|
||||
// Json type leverage binary type in pb, so this is valid.
|
||||
|| (datatype == ConcreteDataType::binary_datatype()
|
||||
&& *expect_type == ConcreteDataType::json_datatype())
|
||||
})
|
||||
.map(|wrapper| ConcreteDataType::from(wrapper) == *expect_type)
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
|
||||
@@ -21,14 +21,14 @@ use greptime_proto::v1::region::RegionResponse as RegionResponseV1;
|
||||
#[derive(Debug)]
|
||||
pub struct RegionResponse {
|
||||
pub affected_rows: AffectedRows,
|
||||
pub extensions: HashMap<String, Vec<u8>>,
|
||||
pub extension: HashMap<String, Vec<u8>>,
|
||||
}
|
||||
|
||||
impl RegionResponse {
|
||||
pub fn from_region_response(region_response: RegionResponseV1) -> Self {
|
||||
Self {
|
||||
affected_rows: region_response.affected_rows as _,
|
||||
extensions: region_response.extensions,
|
||||
extension: region_response.extension,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ impl RegionResponse {
|
||||
pub fn new(affected_rows: AffectedRows) -> Self {
|
||||
Self {
|
||||
affected_rows,
|
||||
extensions: Default::default(),
|
||||
extension: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,11 +13,9 @@
|
||||
// limitations under the License.
|
||||
|
||||
use common_base::secrets::ExposeSecret;
|
||||
use common_error::ext::BoxedError;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{
|
||||
AccessDeniedSnafu, AuthBackendSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
|
||||
AccessDeniedSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
|
||||
UserPasswordMismatchSnafu,
|
||||
};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
@@ -51,19 +49,6 @@ impl MockUserProvider {
|
||||
info.schema.clone_into(&mut self.schema);
|
||||
info.username.clone_into(&mut self.username);
|
||||
}
|
||||
|
||||
// this is a deliberate function to ref AuthBackendSnafu
|
||||
// so that it won't get deleted in the future
|
||||
pub fn ref_auth_backend_snafu(&self) -> Result<()> {
|
||||
let none_option = None;
|
||||
|
||||
none_option
|
||||
.context(UserNotFoundSnafu {
|
||||
username: "no_user".to_string(),
|
||||
})
|
||||
.map_err(BoxedError::new)
|
||||
.context(AuthBackendSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
|
||||
@@ -18,7 +18,6 @@ use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_request::Request;
|
||||
use auth::error::Error::InternalState;
|
||||
use auth::error::InternalStateSnafu;
|
||||
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq, PermissionResp, UserInfoRef};
|
||||
use sql::statements::show::{ShowDatabases, ShowKind};
|
||||
use sql::statements::statement::Statement;
|
||||
@@ -34,10 +33,9 @@ impl PermissionChecker for DummyPermissionChecker {
|
||||
match req {
|
||||
PermissionReq::GrpcRequest(_) => Ok(PermissionResp::Allow),
|
||||
PermissionReq::SqlStatement(_) => Ok(PermissionResp::Reject),
|
||||
_ => InternalStateSnafu {
|
||||
_ => Err(InternalState {
|
||||
msg: "testing".to_string(),
|
||||
}
|
||||
.fail(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,6 +97,13 @@ pub enum Error {
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("System catalog is not valid: {}", msg))]
|
||||
SystemCatalog {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
|
||||
CatalogNotFound {
|
||||
catalog_name: String,
|
||||
@@ -179,6 +186,13 @@ pub enum Error {
|
||||
source: common_query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to perform metasrv operation"))]
|
||||
Metasrv {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid table info in catalog"))]
|
||||
InvalidTableInfoInCatalog {
|
||||
#[snafu(implicit)]
|
||||
@@ -274,6 +288,8 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::FlowInfoNotFound { .. } => StatusCode::FlowNotFound,
|
||||
|
||||
Error::SystemCatalog { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
||||
|
||||
Error::CreateRecordBatch { source, .. } => source.status_code(),
|
||||
@@ -287,6 +303,7 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::CreateTable { source, .. } => source.status_code(),
|
||||
|
||||
Error::Metasrv { source, .. } => source.status_code(),
|
||||
Error::DecodePlan { source, .. } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
||||
|
||||
@@ -321,6 +338,27 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
pub fn test_error_status_code() {
|
||||
assert_eq!(
|
||||
StatusCode::TableAlreadyExists,
|
||||
Error::TableExists {
|
||||
table: "some_table".to_string(),
|
||||
location: Location::generate(),
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::StorageUnavailable,
|
||||
Error::SystemCatalog {
|
||||
msg: String::default(),
|
||||
location: Location::generate(),
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_errors_to_datafusion_error() {
|
||||
let e: DataFusionError = Error::TableExists {
|
||||
|
||||
@@ -20,8 +20,8 @@ use std::time::Duration;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::KvCacheInvalidator;
|
||||
use common_meta::error::Error::CacheNotGet;
|
||||
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
|
||||
use common_meta::error::Error::{CacheNotGet, GetKvCache};
|
||||
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, Result};
|
||||
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
@@ -282,11 +282,8 @@ impl KvBackend for CachedMetaKvBackend {
|
||||
_ => Err(e),
|
||||
},
|
||||
}
|
||||
.map_err(|e| {
|
||||
GetKvCacheSnafu {
|
||||
err_msg: e.to_string(),
|
||||
}
|
||||
.build()
|
||||
.map_err(|e| GetKvCache {
|
||||
err_msg: e.to_string(),
|
||||
});
|
||||
|
||||
// "cache.invalidate_key" and "cache.try_get_with_by_ref" are not mutually exclusive. So we need
|
||||
|
||||
@@ -36,7 +36,6 @@ use futures_util::{StreamExt, TryStreamExt};
|
||||
use meta_client::client::MetaClient;
|
||||
use moka::sync::Cache;
|
||||
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||
use session::context::{Channel, QueryContext};
|
||||
use snafu::prelude::*;
|
||||
use table::dist_table::DistTable;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
@@ -153,11 +152,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
Ok(keys)
|
||||
}
|
||||
|
||||
async fn schema_names(
|
||||
&self,
|
||||
catalog: &str,
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Vec<String>> {
|
||||
async fn schema_names(&self, catalog: &str) -> Result<Vec<String>> {
|
||||
let stream = self
|
||||
.table_metadata_manager
|
||||
.schema_manager()
|
||||
@@ -168,17 +163,12 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
.map_err(BoxedError::new)
|
||||
.context(ListSchemasSnafu { catalog })?;
|
||||
|
||||
keys.extend(self.system_catalog.schema_names(query_ctx));
|
||||
keys.extend(self.system_catalog.schema_names());
|
||||
|
||||
Ok(keys.into_iter().collect())
|
||||
}
|
||||
|
||||
async fn table_names(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Vec<String>> {
|
||||
async fn table_names(&self, catalog: &str, schema: &str) -> Result<Vec<String>> {
|
||||
let stream = self
|
||||
.table_metadata_manager
|
||||
.table_name_manager()
|
||||
@@ -191,7 +181,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
.into_iter()
|
||||
.map(|(k, _)| k)
|
||||
.collect::<Vec<_>>();
|
||||
tables.extend_from_slice(&self.system_catalog.table_names(schema, query_ctx));
|
||||
tables.extend_from_slice(&self.system_catalog.table_names(schema));
|
||||
|
||||
Ok(tables.into_iter().collect())
|
||||
}
|
||||
@@ -204,13 +194,8 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
.context(TableMetadataManagerSnafu)
|
||||
}
|
||||
|
||||
async fn schema_exists(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<bool> {
|
||||
if self.system_catalog.schema_exists(schema, query_ctx) {
|
||||
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool> {
|
||||
if self.system_catalog.schema_exists(schema) {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
@@ -221,14 +206,8 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
.context(TableMetadataManagerSnafu)
|
||||
}
|
||||
|
||||
async fn table_exists(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table: &str,
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<bool> {
|
||||
if self.system_catalog.table_exists(schema, table, query_ctx) {
|
||||
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
|
||||
if self.system_catalog.table_exists(schema, table) {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
@@ -246,12 +225,10 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Option<TableRef>> {
|
||||
let channel = query_ctx.map_or(Channel::Unknown, |ctx| ctx.channel());
|
||||
if let Some(table) =
|
||||
self.system_catalog
|
||||
.table(catalog_name, schema_name, table_name, query_ctx)
|
||||
if let Some(table) = self
|
||||
.system_catalog
|
||||
.table(catalog_name, schema_name, table_name)
|
||||
{
|
||||
return Ok(Some(table));
|
||||
}
|
||||
@@ -259,45 +236,23 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
let table_cache: TableCacheRef = self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_cache",
|
||||
})?;
|
||||
if let Some(table) = table_cache
|
||||
|
||||
table_cache
|
||||
.get_by_ref(&TableName {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
})
|
||||
.await
|
||||
.context(GetTableCacheSnafu)?
|
||||
{
|
||||
return Ok(Some(table));
|
||||
}
|
||||
|
||||
if channel == Channel::Postgres {
|
||||
// falldown to pg_catalog
|
||||
if let Some(table) =
|
||||
self.system_catalog
|
||||
.table(catalog_name, PG_CATALOG_NAME, table_name, query_ctx)
|
||||
{
|
||||
return Ok(Some(table));
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(None);
|
||||
.context(GetTableCacheSnafu)
|
||||
}
|
||||
|
||||
fn tables<'a>(
|
||||
&'a self,
|
||||
catalog: &'a str,
|
||||
schema: &'a str,
|
||||
query_ctx: Option<&'a QueryContext>,
|
||||
) -> BoxStream<'a, Result<TableRef>> {
|
||||
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>> {
|
||||
let sys_tables = try_stream!({
|
||||
// System tables
|
||||
let sys_table_names = self.system_catalog.table_names(schema, query_ctx);
|
||||
let sys_table_names = self.system_catalog.table_names(schema);
|
||||
for table_name in sys_table_names {
|
||||
if let Some(table) =
|
||||
self.system_catalog
|
||||
.table(catalog, schema, &table_name, query_ctx)
|
||||
{
|
||||
if let Some(table) = self.system_catalog.table(catalog, schema, &table_name) {
|
||||
yield table;
|
||||
}
|
||||
}
|
||||
@@ -358,34 +313,25 @@ struct SystemCatalog {
|
||||
catalog_cache: Cache<String, Arc<InformationSchemaProvider>>,
|
||||
pg_catalog_cache: Cache<String, Arc<PGCatalogProvider>>,
|
||||
|
||||
// system_schema_provider for default catalog
|
||||
// system_schema_provier for default catalog
|
||||
information_schema_provider: Arc<InformationSchemaProvider>,
|
||||
pg_catalog_provider: Arc<PGCatalogProvider>,
|
||||
backend: KvBackendRef,
|
||||
}
|
||||
|
||||
impl SystemCatalog {
|
||||
fn schema_names(&self, query_ctx: Option<&QueryContext>) -> Vec<String> {
|
||||
let channel = query_ctx.map_or(Channel::Unknown, |ctx| ctx.channel());
|
||||
match channel {
|
||||
// pg_catalog only visible under postgres protocol
|
||||
Channel::Postgres => vec![
|
||||
INFORMATION_SCHEMA_NAME.to_string(),
|
||||
PG_CATALOG_NAME.to_string(),
|
||||
],
|
||||
_ => {
|
||||
vec![INFORMATION_SCHEMA_NAME.to_string()]
|
||||
}
|
||||
}
|
||||
// TODO(j0hn50n133): remove the duplicated hard-coded table names logic
|
||||
fn schema_names(&self) -> Vec<String> {
|
||||
vec![
|
||||
INFORMATION_SCHEMA_NAME.to_string(),
|
||||
PG_CATALOG_NAME.to_string(),
|
||||
]
|
||||
}
|
||||
|
||||
fn table_names(&self, schema: &str, query_ctx: Option<&QueryContext>) -> Vec<String> {
|
||||
let channel = query_ctx.map_or(Channel::Unknown, |ctx| ctx.channel());
|
||||
fn table_names(&self, schema: &str) -> Vec<String> {
|
||||
match schema {
|
||||
INFORMATION_SCHEMA_NAME => self.information_schema_provider.table_names(),
|
||||
PG_CATALOG_NAME if channel == Channel::Postgres => {
|
||||
self.pg_catalog_provider.table_names()
|
||||
}
|
||||
PG_CATALOG_NAME => self.pg_catalog_provider.table_names(),
|
||||
DEFAULT_SCHEMA_NAME => {
|
||||
vec![NUMBERS_TABLE_NAME.to_string()]
|
||||
}
|
||||
@@ -393,35 +339,23 @@ impl SystemCatalog {
|
||||
}
|
||||
}
|
||||
|
||||
fn schema_exists(&self, schema: &str, query_ctx: Option<&QueryContext>) -> bool {
|
||||
let channel = query_ctx.map_or(Channel::Unknown, |ctx| ctx.channel());
|
||||
match channel {
|
||||
Channel::Postgres => schema == PG_CATALOG_NAME || schema == INFORMATION_SCHEMA_NAME,
|
||||
_ => schema == INFORMATION_SCHEMA_NAME,
|
||||
}
|
||||
fn schema_exists(&self, schema: &str) -> bool {
|
||||
schema == INFORMATION_SCHEMA_NAME || schema == PG_CATALOG_NAME
|
||||
}
|
||||
|
||||
fn table_exists(&self, schema: &str, table: &str, query_ctx: Option<&QueryContext>) -> bool {
|
||||
let channel = query_ctx.map_or(Channel::Unknown, |ctx| ctx.channel());
|
||||
fn table_exists(&self, schema: &str, table: &str) -> bool {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
self.information_schema_provider.table(table).is_some()
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
table == NUMBERS_TABLE_NAME
|
||||
} else if schema == PG_CATALOG_NAME && channel == Channel::Postgres {
|
||||
} else if schema == PG_CATALOG_NAME {
|
||||
self.pg_catalog_provider.table(table).is_some()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn table(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Option<TableRef> {
|
||||
let channel = query_ctx.map_or(Channel::Unknown, |ctx| ctx.channel());
|
||||
fn table(&self, catalog: &str, schema: &str, table_name: &str) -> Option<TableRef> {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
let information_schema_provider =
|
||||
self.catalog_cache.get_with_by_ref(catalog, move || {
|
||||
@@ -432,7 +366,7 @@ impl SystemCatalog {
|
||||
))
|
||||
});
|
||||
information_schema_provider.table(table_name)
|
||||
} else if schema == PG_CATALOG_NAME && channel == Channel::Postgres {
|
||||
} else if schema == PG_CATALOG_NAME {
|
||||
if catalog == DEFAULT_CATALOG_NAME {
|
||||
self.pg_catalog_provider.table(table_name)
|
||||
} else {
|
||||
|
||||
@@ -20,10 +20,8 @@ use std::fmt::{Debug, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::CreateTableExpr;
|
||||
use common_catalog::consts::{INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME};
|
||||
use futures::future::BoxFuture;
|
||||
use futures_util::stream::BoxStream;
|
||||
use session::context::QueryContext;
|
||||
use table::metadata::TableId;
|
||||
use table::TableRef;
|
||||
|
||||
@@ -46,35 +44,15 @@ pub trait CatalogManager: Send + Sync {
|
||||
|
||||
async fn catalog_names(&self) -> Result<Vec<String>>;
|
||||
|
||||
async fn schema_names(
|
||||
&self,
|
||||
catalog: &str,
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Vec<String>>;
|
||||
async fn schema_names(&self, catalog: &str) -> Result<Vec<String>>;
|
||||
|
||||
async fn table_names(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Vec<String>>;
|
||||
async fn table_names(&self, catalog: &str, schema: &str) -> Result<Vec<String>>;
|
||||
|
||||
async fn catalog_exists(&self, catalog: &str) -> Result<bool>;
|
||||
|
||||
async fn schema_exists(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<bool>;
|
||||
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool>;
|
||||
|
||||
async fn table_exists(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table: &str,
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<bool>;
|
||||
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool>;
|
||||
|
||||
/// Returns the table by catalog, schema and table name.
|
||||
async fn table(
|
||||
@@ -82,25 +60,10 @@ pub trait CatalogManager: Send + Sync {
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Option<TableRef>>;
|
||||
|
||||
/// Returns all tables with a stream by catalog and schema.
|
||||
fn tables<'a>(
|
||||
&'a self,
|
||||
catalog: &'a str,
|
||||
schema: &'a str,
|
||||
query_ctx: Option<&'a QueryContext>,
|
||||
) -> BoxStream<'a, Result<TableRef>>;
|
||||
|
||||
/// Check if `schema` is a reserved schema name
|
||||
fn is_reserved_schema_name(&self, schema: &str) -> bool {
|
||||
// We have to check whether a schema name is reserved before create schema.
|
||||
// We need this rather than use schema_exists directly because `pg_catalog` is
|
||||
// only visible via postgres protocol. So if we don't check, a mysql client may
|
||||
// create a schema named `pg_catalog` which is somehow malformed.
|
||||
schema == INFORMATION_SCHEMA_NAME || schema == PG_CATALOG_NAME
|
||||
}
|
||||
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>>;
|
||||
}
|
||||
|
||||
pub type CatalogManagerRef = Arc<dyn CatalogManager>;
|
||||
|
||||
@@ -26,7 +26,6 @@ use common_catalog::consts::{
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use futures_util::stream::BoxStream;
|
||||
use session::context::QueryContext;
|
||||
use snafu::OptionExt;
|
||||
use table::TableRef;
|
||||
|
||||
@@ -54,11 +53,7 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
Ok(self.catalogs.read().unwrap().keys().cloned().collect())
|
||||
}
|
||||
|
||||
async fn schema_names(
|
||||
&self,
|
||||
catalog: &str,
|
||||
_query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Vec<String>> {
|
||||
async fn schema_names(&self, catalog: &str) -> Result<Vec<String>> {
|
||||
Ok(self
|
||||
.catalogs
|
||||
.read()
|
||||
@@ -72,12 +67,7 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn table_names(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
_query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Vec<String>> {
|
||||
async fn table_names(&self, catalog: &str, schema: &str) -> Result<Vec<String>> {
|
||||
Ok(self
|
||||
.catalogs
|
||||
.read()
|
||||
@@ -97,22 +87,11 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
self.catalog_exist_sync(catalog)
|
||||
}
|
||||
|
||||
async fn schema_exists(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
_query_ctx: Option<&QueryContext>,
|
||||
) -> Result<bool> {
|
||||
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool> {
|
||||
self.schema_exist_sync(catalog, schema)
|
||||
}
|
||||
|
||||
async fn table_exists(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table: &str,
|
||||
_query_ctx: Option<&QueryContext>,
|
||||
) -> Result<bool> {
|
||||
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
|
||||
let catalogs = self.catalogs.read().unwrap();
|
||||
Ok(catalogs
|
||||
.get(catalog)
|
||||
@@ -129,7 +108,6 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
_query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Option<TableRef>> {
|
||||
let result = try {
|
||||
self.catalogs
|
||||
@@ -143,12 +121,7 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn tables<'a>(
|
||||
&'a self,
|
||||
catalog: &'a str,
|
||||
schema: &'a str,
|
||||
_query_ctx: Option<&QueryContext>,
|
||||
) -> BoxStream<'a, Result<TableRef>> {
|
||||
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>> {
|
||||
let catalogs = self.catalogs.read().unwrap();
|
||||
|
||||
let Some(schemas) = catalogs.get(catalog) else {
|
||||
@@ -398,12 +371,11 @@ mod tests {
|
||||
DEFAULT_CATALOG_NAME,
|
||||
DEFAULT_SCHEMA_NAME,
|
||||
NUMBERS_TABLE_NAME,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let stream = catalog_list.tables(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, None);
|
||||
let stream = catalog_list.tables(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME);
|
||||
let tables = stream.try_collect::<Vec<_>>().await.unwrap();
|
||||
assert_eq!(tables.len(), 1);
|
||||
assert_eq!(
|
||||
@@ -412,12 +384,7 @@ mod tests {
|
||||
);
|
||||
|
||||
assert!(catalog_list
|
||||
.table(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
DEFAULT_SCHEMA_NAME,
|
||||
"not_exists",
|
||||
None
|
||||
)
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "not_exists")
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
@@ -444,7 +411,7 @@ mod tests {
|
||||
};
|
||||
catalog.register_table_sync(register_table_req).unwrap();
|
||||
assert!(catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some());
|
||||
@@ -456,7 +423,7 @@ mod tests {
|
||||
};
|
||||
catalog.deregister_table_sync(deregister_table_req).unwrap();
|
||||
assert!(catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
|
||||
@@ -257,8 +257,8 @@ impl InformationSchemaColumnsBuilder {
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let keys = &table.table_info().meta.primary_key_indices;
|
||||
|
||||
@@ -212,8 +212,8 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let mut primary_constraints = vec![];
|
||||
|
||||
@@ -240,9 +240,9 @@ impl InformationSchemaPartitionsBuilder {
|
||||
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let table_info_stream = catalog_manager
|
||||
.tables(&catalog_name, &schema_name, None)
|
||||
.tables(&catalog_name, &schema_name)
|
||||
.try_filter_map(|t| async move {
|
||||
let table_info = t.table_info();
|
||||
if table_info.table_type == TableType::Temporary {
|
||||
|
||||
@@ -176,9 +176,9 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let table_id_stream = catalog_manager
|
||||
.tables(&catalog_name, &schema_name, None)
|
||||
.tables(&catalog_name, &schema_name)
|
||||
.try_filter_map(|t| async move {
|
||||
let table_info = t.table_info();
|
||||
if table_info.table_type == TableType::Temporary {
|
||||
|
||||
@@ -171,7 +171,7 @@ impl InformationSchemaSchemataBuilder {
|
||||
let table_metadata_manager = utils::table_meta_manager(&self.catalog_manager)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let opts = if let Some(table_metadata_manager) = &table_metadata_manager {
|
||||
table_metadata_manager
|
||||
.schema_manager()
|
||||
|
||||
@@ -176,8 +176,8 @@ impl InformationSchemaTableConstraintsBuilder {
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let keys = &table.table_info().meta.primary_key_indices;
|
||||
|
||||
@@ -234,8 +234,8 @@ impl InformationSchemaTablesBuilder {
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let table_info = table.table_info();
|
||||
|
||||
@@ -192,8 +192,8 @@ impl InformationSchemaViewsBuilder {
|
||||
.context(CastManagerSnafu)?
|
||||
.view_info_cache()?;
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let table_info = table.table_info();
|
||||
|
||||
@@ -18,16 +18,15 @@ mod pg_namespace;
|
||||
mod table_names;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, LazyLock, Weak};
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, PG_CATALOG_NAME};
|
||||
use common_catalog::consts::{self, PG_CATALOG_NAME};
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use lazy_static::lazy_static;
|
||||
use paste::paste;
|
||||
use pg_catalog_memory_table::get_schema_columns;
|
||||
use pg_class::PGClass;
|
||||
use pg_namespace::PGNamespace;
|
||||
use session::context::{Channel, QueryContext};
|
||||
use table::TableRef;
|
||||
pub use table_names::*;
|
||||
|
||||
@@ -143,12 +142,3 @@ impl SystemSchemaProviderInner for PGCatalogProvider {
|
||||
&self.catalog_name
|
||||
}
|
||||
}
|
||||
|
||||
/// Provide query context to call the [`CatalogManager`]'s method.
|
||||
static PG_QUERY_CTX: LazyLock<QueryContext> = LazyLock::new(|| {
|
||||
QueryContext::with_channel(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, Channel::Postgres)
|
||||
});
|
||||
|
||||
fn query_ctx() -> Option<&'static QueryContext> {
|
||||
Some(&PG_QUERY_CTX)
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ use store_api::storage::ScanRequest;
|
||||
use table::metadata::TableType;
|
||||
|
||||
use super::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||
use super::{query_ctx, OID_COLUMN_NAME, PG_CLASS};
|
||||
use super::{OID_COLUMN_NAME, PG_CLASS};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
@@ -202,11 +202,8 @@ impl PGClassBuilder {
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
for schema_name in catalog_manager
|
||||
.schema_names(&catalog_name, query_ctx())
|
||||
.await?
|
||||
{
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, query_ctx());
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let table_info = table.table_info();
|
||||
self.add_class(
|
||||
|
||||
@@ -31,7 +31,7 @@ use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
|
||||
use super::{query_ctx, PGNamespaceOidMapRef, OID_COLUMN_NAME, PG_NAMESPACE};
|
||||
use super::{PGNamespaceOidMapRef, OID_COLUMN_NAME, PG_NAMESPACE};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
@@ -180,10 +180,7 @@ impl PGNamespaceBuilder {
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
for schema_name in catalog_manager
|
||||
.schema_names(&catalog_name, query_ctx())
|
||||
.await?
|
||||
{
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
self.add_namespace(&predicates, &schema_name);
|
||||
}
|
||||
self.finish()
|
||||
|
||||
@@ -23,7 +23,7 @@ use datafusion::datasource::view::ViewTable;
|
||||
use datafusion::datasource::{provider_as_source, TableProvider};
|
||||
use datafusion::logical_expr::TableSource;
|
||||
use itertools::Itertools;
|
||||
use session::context::QueryContextRef;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::TableType;
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
@@ -45,7 +45,6 @@ pub struct DfTableSourceProvider {
|
||||
disallow_cross_catalog_query: bool,
|
||||
default_catalog: String,
|
||||
default_schema: String,
|
||||
query_ctx: QueryContextRef,
|
||||
plan_decoder: SubstraitPlanDecoderRef,
|
||||
enable_ident_normalization: bool,
|
||||
}
|
||||
@@ -54,7 +53,7 @@ impl DfTableSourceProvider {
|
||||
pub fn new(
|
||||
catalog_manager: CatalogManagerRef,
|
||||
disallow_cross_catalog_query: bool,
|
||||
query_ctx: QueryContextRef,
|
||||
query_ctx: &QueryContext,
|
||||
plan_decoder: SubstraitPlanDecoderRef,
|
||||
enable_ident_normalization: bool,
|
||||
) -> Self {
|
||||
@@ -64,7 +63,6 @@ impl DfTableSourceProvider {
|
||||
resolved_tables: HashMap::new(),
|
||||
default_catalog: query_ctx.current_catalog().to_owned(),
|
||||
default_schema: query_ctx.current_schema(),
|
||||
query_ctx,
|
||||
plan_decoder,
|
||||
enable_ident_normalization,
|
||||
}
|
||||
@@ -73,7 +71,8 @@ impl DfTableSourceProvider {
|
||||
pub fn resolve_table_ref(&self, table_ref: TableReference) -> Result<ResolvedTableReference> {
|
||||
if self.disallow_cross_catalog_query {
|
||||
match &table_ref {
|
||||
TableReference::Bare { .. } | TableReference::Partial { .. } => {}
|
||||
TableReference::Bare { .. } => (),
|
||||
TableReference::Partial { .. } => {}
|
||||
TableReference::Full {
|
||||
catalog, schema, ..
|
||||
} => {
|
||||
@@ -108,7 +107,7 @@ impl DfTableSourceProvider {
|
||||
|
||||
let table = self
|
||||
.catalog_manager
|
||||
.table(catalog_name, schema_name, table_name, Some(&self.query_ctx))
|
||||
.table(catalog_name, schema_name, table_name)
|
||||
.await?
|
||||
.with_context(|| TableNotExistSnafu {
|
||||
table: format_full_table_name(catalog_name, schema_name, table_name),
|
||||
@@ -211,12 +210,12 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_validate_table_ref() {
|
||||
let query_ctx = Arc::new(QueryContext::with("greptime", "public"));
|
||||
let query_ctx = &QueryContext::with("greptime", "public");
|
||||
|
||||
let table_provider = DfTableSourceProvider::new(
|
||||
MemoryCatalogManager::with_default_setup(),
|
||||
true,
|
||||
query_ctx.clone(),
|
||||
query_ctx,
|
||||
DummyDecoder::arc(),
|
||||
true,
|
||||
);
|
||||
@@ -309,7 +308,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_resolve_view() {
|
||||
let query_ctx = Arc::new(QueryContext::with("greptime", "public"));
|
||||
let query_ctx = &QueryContext::with("greptime", "public");
|
||||
let backend = Arc::new(MemoryKvBackend::default());
|
||||
let layered_cache_builder = LayeredCacheRegistryBuilder::default()
|
||||
.add_cache_registry(CacheRegistryBuilder::default().build());
|
||||
@@ -345,13 +344,8 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut table_provider = DfTableSourceProvider::new(
|
||||
catalog_manager,
|
||||
true,
|
||||
query_ctx.clone(),
|
||||
MockDecoder::arc(),
|
||||
true,
|
||||
);
|
||||
let mut table_provider =
|
||||
DfTableSourceProvider::new(catalog_manager, true, query_ctx, MockDecoder::arc(), true);
|
||||
|
||||
// View not found
|
||||
let table_ref = TableReference::bare("not_exists_view");
|
||||
|
||||
@@ -112,7 +112,7 @@ impl SchemaProvider for DummySchemaProvider {
|
||||
async fn table(&self, name: &str) -> datafusion::error::Result<Option<Arc<dyn TableProvider>>> {
|
||||
let table = self
|
||||
.catalog_manager
|
||||
.table(&self.catalog_name, &self.schema_name, name, None)
|
||||
.table(&self.catalog_name, &self.schema_name, name)
|
||||
.await?
|
||||
.with_context(|| TableNotExistSnafu {
|
||||
table: format_full_table_name(&self.catalog_name, &self.schema_name, name),
|
||||
|
||||
@@ -37,8 +37,7 @@ use tonic::metadata::AsciiMetadataKey;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, Error, FlightGetSnafu, IllegalFlightMessagesSnafu, InvalidAsciiSnafu,
|
||||
ServerSnafu,
|
||||
ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, InvalidAsciiSnafu, ServerSnafu,
|
||||
};
|
||||
use crate::{from_grpc_response, Client, Result};
|
||||
|
||||
@@ -226,18 +225,16 @@ impl Database {
|
||||
|
||||
let mut client = self.client.make_flight_client()?;
|
||||
|
||||
let response = client.mut_inner().do_get(request).await.or_else(|e| {
|
||||
let response = client.mut_inner().do_get(request).await.map_err(|e| {
|
||||
let tonic_code = e.code();
|
||||
let e: Error = e.into();
|
||||
let code = e.status_code();
|
||||
let msg = e.to_string();
|
||||
let error =
|
||||
Err(BoxedError::new(ServerSnafu { code, msg }.build())).with_context(|_| {
|
||||
FlightGetSnafu {
|
||||
addr: client.addr().to_string(),
|
||||
tonic_code,
|
||||
}
|
||||
});
|
||||
let error = Error::FlightGet {
|
||||
tonic_code,
|
||||
addr: client.addr().to_string(),
|
||||
source: BoxedError::new(ServerSnafu { code, msg }.build()),
|
||||
};
|
||||
error!(
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {:?}",
|
||||
client.addr(),
|
||||
|
||||
@@ -39,6 +39,13 @@ pub enum Error {
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failure occurs during handling request"))]
|
||||
HandleRequest {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert FlightData"))]
|
||||
ConvertFlightData {
|
||||
#[snafu(implicit)]
|
||||
@@ -109,6 +116,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to send request with streaming: {}", err_msg))]
|
||||
ClientStreaming {
|
||||
err_msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse ascii string: {}", value))]
|
||||
InvalidAscii {
|
||||
value: String,
|
||||
@@ -124,10 +138,12 @@ impl ErrorExt for Error {
|
||||
match self {
|
||||
Error::IllegalFlightMessages { .. }
|
||||
| Error::MissingField { .. }
|
||||
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
|
||||
| Error::IllegalDatabaseResponse { .. }
|
||||
| Error::ClientStreaming { .. } => StatusCode::Internal,
|
||||
|
||||
Error::Server { code, .. } => *code,
|
||||
Error::FlightGet { source, .. }
|
||||
| Error::HandleRequest { source, .. }
|
||||
| Error::RegionServer { source, .. }
|
||||
| Error::FlowServer { source, .. } => source.status_code(),
|
||||
Error::CreateChannel { source, .. }
|
||||
|
||||
@@ -16,9 +16,9 @@ use api::v1::flow::{FlowRequest, FlowResponse};
|
||||
use api::v1::region::InsertRequests;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::node_manager::Flownode;
|
||||
use snafu::ResultExt;
|
||||
use snafu::{location, ResultExt};
|
||||
|
||||
use crate::error::{FlowServerSnafu, Result};
|
||||
use crate::error::Result;
|
||||
use crate::Client;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -57,10 +57,15 @@ impl FlowRequester {
|
||||
let response = client
|
||||
.handle_create_remove(request)
|
||||
.await
|
||||
.or_else(|e| {
|
||||
.map_err(|e| {
|
||||
let code = e.code();
|
||||
let err: crate::error::Error = e.into();
|
||||
Err(BoxedError::new(err)).context(FlowServerSnafu { addr, code })
|
||||
crate::error::Error::FlowServer {
|
||||
addr,
|
||||
code,
|
||||
source: BoxedError::new(err),
|
||||
location: location!(),
|
||||
}
|
||||
})?
|
||||
.into_inner();
|
||||
Ok(response)
|
||||
@@ -83,10 +88,15 @@ impl FlowRequester {
|
||||
let response = client
|
||||
.handle_mirror_request(requests)
|
||||
.await
|
||||
.or_else(|e| {
|
||||
.map_err(|e| {
|
||||
let code = e.code();
|
||||
let err: crate::error::Error = e.into();
|
||||
Err(BoxedError::new(err)).context(FlowServerSnafu { addr, code })
|
||||
crate::error::Error::FlowServer {
|
||||
addr,
|
||||
code,
|
||||
source: BoxedError::new(err),
|
||||
location: location!(),
|
||||
}
|
||||
})?
|
||||
.into_inner();
|
||||
Ok(response)
|
||||
|
||||
@@ -38,8 +38,8 @@ use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
use tokio_stream::StreamExt;
|
||||
|
||||
use crate::error::{
|
||||
self, ConvertFlightDataSnafu, FlightGetSnafu, IllegalDatabaseResponseSnafu,
|
||||
IllegalFlightMessagesSnafu, MissingFieldSnafu, Result, ServerSnafu,
|
||||
self, ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
|
||||
MissingFieldSnafu, Result, ServerSnafu,
|
||||
};
|
||||
use crate::{metrics, Client, Error};
|
||||
|
||||
@@ -103,14 +103,11 @@ impl RegionRequester {
|
||||
let e: error::Error = e.into();
|
||||
let code = e.status_code();
|
||||
let msg = e.to_string();
|
||||
let error = ServerSnafu { code, msg }
|
||||
.fail::<()>()
|
||||
.map_err(BoxedError::new)
|
||||
.with_context(|_| FlightGetSnafu {
|
||||
tonic_code,
|
||||
addr: flight_client.addr().to_string(),
|
||||
})
|
||||
.unwrap_err();
|
||||
let error = Error::FlightGet {
|
||||
tonic_code,
|
||||
addr: flight_client.addr().to_string(),
|
||||
source: BoxedError::new(ServerSnafu { code, msg }.build()),
|
||||
};
|
||||
error!(
|
||||
e; "Failed to do Flight get, addr: {}, code: {}",
|
||||
flight_client.addr(),
|
||||
|
||||
@@ -70,7 +70,6 @@ serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
servers.workspace = true
|
||||
session.workspace = true
|
||||
similar-asserts.workspace = true
|
||||
snafu.workspace = true
|
||||
store-api.workspace = true
|
||||
substrait.workspace = true
|
||||
|
||||
@@ -21,8 +21,6 @@ mod export;
|
||||
mod helper;
|
||||
|
||||
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
||||
mod database;
|
||||
mod import;
|
||||
#[allow(unused)]
|
||||
mod repl;
|
||||
|
||||
@@ -34,7 +32,6 @@ pub use repl::Repl;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use self::export::ExportCommand;
|
||||
use crate::cli::import::ImportCommand;
|
||||
use crate::error::Result;
|
||||
use crate::options::GlobalOptions;
|
||||
use crate::App;
|
||||
@@ -117,7 +114,6 @@ enum SubCommand {
|
||||
// Attach(AttachCommand),
|
||||
Bench(BenchTableMetadataCommand),
|
||||
Export(ExportCommand),
|
||||
Import(ImportCommand),
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
@@ -126,7 +122,6 @@ impl SubCommand {
|
||||
// SubCommand::Attach(cmd) => cmd.build().await,
|
||||
SubCommand::Bench(cmd) => cmd.build(guard).await,
|
||||
SubCommand::Export(cmd) => cmd.build(guard).await,
|
||||
SubCommand::Import(cmd) => cmd.build(guard).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,119 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use base64::engine::general_purpose;
|
||||
use base64::Engine;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use serde_json::Value;
|
||||
use servers::http::greptime_result_v1::GreptimedbV1Response;
|
||||
use servers::http::GreptimeQueryOutput;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{HttpQuerySqlSnafu, Result, SerdeJsonSnafu};
|
||||
|
||||
pub(crate) struct DatabaseClient {
|
||||
addr: String,
|
||||
catalog: String,
|
||||
auth_header: Option<String>,
|
||||
}
|
||||
|
||||
impl DatabaseClient {
|
||||
pub fn new(addr: String, catalog: String, auth_basic: Option<String>) -> Self {
|
||||
let auth_header = if let Some(basic) = auth_basic {
|
||||
let encoded = general_purpose::STANDARD.encode(basic);
|
||||
Some(format!("basic {}", encoded))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Self {
|
||||
addr,
|
||||
catalog,
|
||||
auth_header,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn sql_in_public(&self, sql: &str) -> Result<Option<Vec<Vec<Value>>>> {
|
||||
self.sql(sql, DEFAULT_SCHEMA_NAME).await
|
||||
}
|
||||
|
||||
/// Execute sql query.
|
||||
pub async fn sql(&self, sql: &str, schema: &str) -> Result<Option<Vec<Vec<Value>>>> {
|
||||
let url = format!("http://{}/v1/sql", self.addr);
|
||||
let params = [
|
||||
("db", format!("{}-{}", self.catalog, schema)),
|
||||
("sql", sql.to_string()),
|
||||
];
|
||||
let mut request = reqwest::Client::new()
|
||||
.post(&url)
|
||||
.form(¶ms)
|
||||
.header("Content-Type", "application/x-www-form-urlencoded");
|
||||
if let Some(ref auth) = self.auth_header {
|
||||
request = request.header("Authorization", auth);
|
||||
}
|
||||
|
||||
let response = request.send().await.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: format!("bad url: {}", url),
|
||||
})?;
|
||||
let response = response
|
||||
.error_for_status()
|
||||
.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: format!("query failed: {}", sql),
|
||||
})?;
|
||||
|
||||
let text = response.text().await.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: "cannot get response text".to_string(),
|
||||
})?;
|
||||
|
||||
let body = serde_json::from_str::<GreptimedbV1Response>(&text).context(SerdeJsonSnafu)?;
|
||||
Ok(body.output().first().and_then(|output| match output {
|
||||
GreptimeQueryOutput::Records(records) => Some(records.rows().clone()),
|
||||
GreptimeQueryOutput::AffectedRows(_) => None,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
/// Split at `-`.
|
||||
pub(crate) fn split_database(database: &str) -> Result<(String, Option<String>)> {
|
||||
let (catalog, schema) = match database.split_once('-') {
|
||||
Some((catalog, schema)) => (catalog, schema),
|
||||
None => (DEFAULT_CATALOG_NAME, database),
|
||||
};
|
||||
|
||||
if schema == "*" {
|
||||
Ok((catalog.to_string(), None))
|
||||
} else {
|
||||
Ok((catalog.to_string(), Some(schema.to_string())))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_split_database() {
|
||||
let result = split_database("catalog-schema").unwrap();
|
||||
assert_eq!(result, ("catalog".to_string(), Some("schema".to_string())));
|
||||
|
||||
let result = split_database("schema").unwrap();
|
||||
assert_eq!(result, ("greptime".to_string(), Some("schema".to_string())));
|
||||
|
||||
let result = split_database("catalog-*").unwrap();
|
||||
assert_eq!(result, ("catalog".to_string(), None));
|
||||
|
||||
let result = split_database("*").unwrap();
|
||||
assert_eq!(result, ("greptime".to_string(), None));
|
||||
}
|
||||
}
|
||||
@@ -13,23 +13,30 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::path::PathBuf;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use base64::engine::general_purpose;
|
||||
use base64::Engine;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use client::DEFAULT_SCHEMA_NAME;
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use common_telemetry::{debug, error, info};
|
||||
use serde_json::Value;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use servers::http::greptime_result_v1::GreptimedbV1Response;
|
||||
use servers::http::GreptimeQueryOutput;
|
||||
use snafu::ResultExt;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::{AsyncWriteExt, BufWriter};
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio::time::Instant;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::cli::database::DatabaseClient;
|
||||
use crate::cli::{database, Instance, Tool};
|
||||
use crate::error::{EmptyResultSnafu, Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
||||
use crate::cli::{Instance, Tool};
|
||||
use crate::error::{
|
||||
EmptyResultSnafu, Error, FileIoSnafu, HttpQuerySqlSnafu, Result, SerdeJsonSnafu,
|
||||
};
|
||||
|
||||
type TableReference = (String, String, String);
|
||||
|
||||
@@ -87,21 +94,26 @@ pub struct ExportCommand {
|
||||
|
||||
impl ExportCommand {
|
||||
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
||||
let (catalog, schema) = database::split_database(&self.database)?;
|
||||
let (catalog, schema) = split_database(&self.database)?;
|
||||
|
||||
let database_client =
|
||||
DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
|
||||
let auth_header = if let Some(basic) = &self.auth_basic {
|
||||
let encoded = general_purpose::STANDARD.encode(basic);
|
||||
Some(format!("basic {}", encoded))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(Instance::new(
|
||||
Box::new(Export {
|
||||
addr: self.addr.clone(),
|
||||
catalog,
|
||||
schema,
|
||||
database_client,
|
||||
output_dir: self.output_dir.clone(),
|
||||
parallelism: self.export_jobs,
|
||||
target: self.target.clone(),
|
||||
start_time: self.start_time.clone(),
|
||||
end_time: self.end_time.clone(),
|
||||
auth_header,
|
||||
}),
|
||||
guard,
|
||||
))
|
||||
@@ -109,59 +121,78 @@ impl ExportCommand {
|
||||
}
|
||||
|
||||
pub struct Export {
|
||||
addr: String,
|
||||
catalog: String,
|
||||
schema: Option<String>,
|
||||
database_client: DatabaseClient,
|
||||
output_dir: String,
|
||||
parallelism: usize,
|
||||
target: ExportTarget,
|
||||
start_time: Option<String>,
|
||||
end_time: Option<String>,
|
||||
auth_header: Option<String>,
|
||||
}
|
||||
|
||||
impl Export {
|
||||
fn catalog_path(&self) -> PathBuf {
|
||||
PathBuf::from(&self.output_dir).join(&self.catalog)
|
||||
}
|
||||
/// Execute one single sql query.
|
||||
async fn sql(&self, sql: &str) -> Result<Option<Vec<Vec<Value>>>> {
|
||||
let url = format!(
|
||||
"http://{}/v1/sql?db={}-{}&sql={}",
|
||||
self.addr,
|
||||
self.catalog,
|
||||
self.schema.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME),
|
||||
sql
|
||||
);
|
||||
|
||||
async fn get_db_names(&self) -> Result<Vec<String>> {
|
||||
let db_names = self.all_db_names().await?;
|
||||
let Some(schema) = &self.schema else {
|
||||
return Ok(db_names);
|
||||
};
|
||||
let mut request = reqwest::Client::new()
|
||||
.get(&url)
|
||||
.header("Content-Type", "application/x-www-form-urlencoded");
|
||||
if let Some(ref auth) = self.auth_header {
|
||||
request = request.header("Authorization", auth);
|
||||
}
|
||||
|
||||
// Check if the schema exists
|
||||
db_names
|
||||
.into_iter()
|
||||
.find(|db_name| db_name.to_lowercase() == schema.to_lowercase())
|
||||
.map(|name| vec![name])
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: &self.catalog,
|
||||
schema,
|
||||
})
|
||||
let response = request.send().await.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: format!("bad url: {}", url),
|
||||
})?;
|
||||
let response = response
|
||||
.error_for_status()
|
||||
.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: format!("query failed: {}", sql),
|
||||
})?;
|
||||
|
||||
let text = response.text().await.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: "cannot get response text".to_string(),
|
||||
})?;
|
||||
|
||||
let body = serde_json::from_str::<GreptimedbV1Response>(&text).context(SerdeJsonSnafu)?;
|
||||
Ok(body.output().first().and_then(|output| match output {
|
||||
GreptimeQueryOutput::Records(records) => Some(records.rows().clone()),
|
||||
GreptimeQueryOutput::AffectedRows(_) => None,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Iterate over all db names.
|
||||
async fn all_db_names(&self) -> Result<Vec<String>> {
|
||||
let records = self
|
||||
.database_client
|
||||
.sql_in_public("SHOW DATABASES")
|
||||
.await?
|
||||
.context(EmptyResultSnafu)?;
|
||||
let mut result = Vec::with_capacity(records.len());
|
||||
for value in records {
|
||||
let Value::String(schema) = &value[0] else {
|
||||
unreachable!()
|
||||
///
|
||||
/// Newbie: `db_name` is catalog + schema.
|
||||
async fn iter_db_names(&self) -> Result<Vec<(String, String)>> {
|
||||
if let Some(schema) = &self.schema {
|
||||
Ok(vec![(self.catalog.clone(), schema.clone())])
|
||||
} else {
|
||||
let result = self.sql("SHOW DATABASES").await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
};
|
||||
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
|
||||
continue;
|
||||
let mut result = Vec::with_capacity(records.len());
|
||||
for value in records {
|
||||
let Value::String(schema) = &value[0] else {
|
||||
unreachable!()
|
||||
};
|
||||
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
|
||||
continue;
|
||||
}
|
||||
result.push((self.catalog.clone(), schema.clone()));
|
||||
}
|
||||
if schema == common_catalog::consts::PG_CATALOG_NAME {
|
||||
continue;
|
||||
}
|
||||
result.push(schema.clone());
|
||||
Ok(result)
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Return a list of [`TableReference`] to be exported.
|
||||
@@ -170,11 +201,7 @@ impl Export {
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
) -> Result<(
|
||||
Vec<TableReference>,
|
||||
Vec<TableReference>,
|
||||
Vec<TableReference>,
|
||||
)> {
|
||||
) -> Result<(Vec<TableReference>, Vec<TableReference>)> {
|
||||
// Puts all metric table first
|
||||
let sql = format!(
|
||||
"SELECT table_catalog, table_schema, table_name \
|
||||
@@ -183,16 +210,15 @@ impl Export {
|
||||
and table_catalog = \'{catalog}\' \
|
||||
and table_schema = \'{schema}\'"
|
||||
);
|
||||
let records = self
|
||||
.database_client
|
||||
.sql_in_public(&sql)
|
||||
.await?
|
||||
.context(EmptyResultSnafu)?;
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
};
|
||||
let mut metric_physical_tables = HashSet::with_capacity(records.len());
|
||||
for value in records {
|
||||
let mut t = Vec::with_capacity(3);
|
||||
for v in &value {
|
||||
let Value::String(value) = v else {
|
||||
let serde_json::Value::String(value) = v else {
|
||||
unreachable!()
|
||||
};
|
||||
t.push(value);
|
||||
@@ -200,142 +226,100 @@ impl Export {
|
||||
metric_physical_tables.insert((t[0].clone(), t[1].clone(), t[2].clone()));
|
||||
}
|
||||
|
||||
// TODO: SQL injection hurts
|
||||
let sql = format!(
|
||||
"SELECT table_catalog, table_schema, table_name, table_type \
|
||||
"SELECT table_catalog, table_schema, table_name \
|
||||
FROM information_schema.tables \
|
||||
WHERE (table_type = \'BASE TABLE\' OR table_type = \'VIEW\') \
|
||||
WHERE table_type = \'BASE TABLE\' \
|
||||
and table_catalog = \'{catalog}\' \
|
||||
and table_schema = \'{schema}\'",
|
||||
);
|
||||
let records = self
|
||||
.database_client
|
||||
.sql_in_public(&sql)
|
||||
.await?
|
||||
.context(EmptyResultSnafu)?;
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
};
|
||||
|
||||
debug!("Fetched table/view list: {:?}", records);
|
||||
debug!("Fetched table list: {:?}", records);
|
||||
|
||||
if records.is_empty() {
|
||||
return Ok((vec![], vec![], vec![]));
|
||||
return Ok((vec![], vec![]));
|
||||
}
|
||||
|
||||
let mut remaining_tables = Vec::with_capacity(records.len());
|
||||
let mut views = Vec::new();
|
||||
for value in records {
|
||||
let mut t = Vec::with_capacity(4);
|
||||
let mut t = Vec::with_capacity(3);
|
||||
for v in &value {
|
||||
let Value::String(value) = v else {
|
||||
let serde_json::Value::String(value) = v else {
|
||||
unreachable!()
|
||||
};
|
||||
t.push(value);
|
||||
}
|
||||
let table = (t[0].clone(), t[1].clone(), t[2].clone());
|
||||
let table_type = t[3].as_str();
|
||||
// Ignores the physical table
|
||||
if !metric_physical_tables.contains(&table) {
|
||||
if table_type == "VIEW" {
|
||||
views.push(table);
|
||||
} else {
|
||||
remaining_tables.push(table);
|
||||
}
|
||||
remaining_tables.push(table);
|
||||
}
|
||||
}
|
||||
|
||||
Ok((
|
||||
metric_physical_tables.into_iter().collect(),
|
||||
remaining_tables,
|
||||
views,
|
||||
))
|
||||
}
|
||||
|
||||
async fn show_create(
|
||||
&self,
|
||||
show_type: &str,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table: Option<&str>,
|
||||
) -> Result<String> {
|
||||
let sql = match table {
|
||||
Some(table) => format!(
|
||||
r#"SHOW CREATE {} "{}"."{}"."{}""#,
|
||||
show_type, catalog, schema, table
|
||||
),
|
||||
None => format!(r#"SHOW CREATE {} "{}"."{}""#, show_type, catalog, schema),
|
||||
async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> {
|
||||
let sql = format!(
|
||||
r#"SHOW CREATE TABLE "{}"."{}"."{}""#,
|
||||
catalog, schema, table
|
||||
);
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
};
|
||||
let records = self
|
||||
.database_client
|
||||
.sql_in_public(&sql)
|
||||
.await?
|
||||
.context(EmptyResultSnafu)?;
|
||||
let Value::String(create) = &records[0][1] else {
|
||||
let Value::String(create_table) = &records[0][1] else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
Ok(format!("{};\n", create))
|
||||
}
|
||||
|
||||
async fn export_create_database(&self) -> Result<()> {
|
||||
let timer = Instant::now();
|
||||
let db_names = self.get_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
for schema in db_names {
|
||||
let db_dir = self.catalog_path().join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&db_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let file = db_dir.join("create_database.sql");
|
||||
let mut file = File::create(file).await.context(FileIoSnafu)?;
|
||||
let create_database = self
|
||||
.show_create("DATABASE", &self.catalog, &schema, None)
|
||||
.await?;
|
||||
file.write_all(create_database.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
|
||||
let elapsed = timer.elapsed();
|
||||
info!("Success {db_count} jobs, cost: {elapsed:?}");
|
||||
|
||||
Ok(())
|
||||
Ok(format!("{};\n", create_table))
|
||||
}
|
||||
|
||||
async fn export_create_table(&self) -> Result<()> {
|
||||
let timer = Instant::now();
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.get_db_names().await?;
|
||||
let db_names = self.iter_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
let mut tasks = Vec::with_capacity(db_names.len());
|
||||
for schema in db_names {
|
||||
for (catalog, schema) in db_names {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
let (metric_physical_tables, remaining_tables, views) =
|
||||
self.get_table_list(&self.catalog, &schema).await?;
|
||||
let table_count =
|
||||
metric_physical_tables.len() + remaining_tables.len() + views.len();
|
||||
let db_dir = self.catalog_path().join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&db_dir)
|
||||
let (metric_physical_tables, remaining_tables) =
|
||||
self.get_table_list(&catalog, &schema).await?;
|
||||
let table_count = metric_physical_tables.len() + remaining_tables.len();
|
||||
let output_dir = Path::new(&self.output_dir)
|
||||
.join(&catalog)
|
||||
.join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&output_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let file = db_dir.join("create_tables.sql");
|
||||
let mut file = File::create(file).await.context(FileIoSnafu)?;
|
||||
let output_file = Path::new(&output_dir).join("create_tables.sql");
|
||||
let mut file = File::create(output_file).await.context(FileIoSnafu)?;
|
||||
for (c, s, t) in metric_physical_tables.into_iter().chain(remaining_tables) {
|
||||
let create_table = self.show_create("TABLE", &c, &s, Some(&t)).await?;
|
||||
file.write_all(create_table.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
for (c, s, v) in views {
|
||||
let create_view = self.show_create("VIEW", &c, &s, Some(&v)).await?;
|
||||
file.write_all(create_view.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
match self.show_create_table(&c, &s, &t).await {
|
||||
Err(e) => {
|
||||
error!(e; r#"Failed to export table "{}"."{}"."{}""#, c, s, t)
|
||||
}
|
||||
Ok(create_table) => {
|
||||
file.write_all(create_table.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
"Finished exporting {}.{schema} with {table_count} table schemas to path: {}",
|
||||
self.catalog,
|
||||
db_dir.to_string_lossy()
|
||||
"Finished exporting {catalog}.{schema} with {table_count} table schemas to path: {}",
|
||||
output_dir.to_string_lossy()
|
||||
);
|
||||
|
||||
Ok::<(), Error>(())
|
||||
@@ -348,14 +332,14 @@ impl Export {
|
||||
.filter(|r| match r {
|
||||
Ok(_) => true,
|
||||
Err(e) => {
|
||||
error!(e; "export schema job failed");
|
||||
error!(e; "export job failed");
|
||||
false
|
||||
}
|
||||
})
|
||||
.count();
|
||||
|
||||
let elapsed = timer.elapsed();
|
||||
info!("Success {success}/{db_count} jobs, cost: {elapsed:?}");
|
||||
info!("Success {success}/{db_count} jobs, cost: {:?}", elapsed);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -363,15 +347,17 @@ impl Export {
|
||||
async fn export_database_data(&self) -> Result<()> {
|
||||
let timer = Instant::now();
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.get_db_names().await?;
|
||||
let db_names = self.iter_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
let mut tasks = Vec::with_capacity(db_count);
|
||||
for schema in db_names {
|
||||
let mut tasks = Vec::with_capacity(db_names.len());
|
||||
for (catalog, schema) in db_names {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
let db_dir = self.catalog_path().join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&db_dir)
|
||||
let output_dir = Path::new(&self.output_dir)
|
||||
.join(&catalog)
|
||||
.join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&output_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
|
||||
@@ -393,31 +379,30 @@ impl Export {
|
||||
|
||||
let sql = format!(
|
||||
r#"COPY DATABASE "{}"."{}" TO '{}' {};"#,
|
||||
self.catalog,
|
||||
catalog,
|
||||
schema,
|
||||
db_dir.to_str().unwrap(),
|
||||
output_dir.to_str().unwrap(),
|
||||
with_options
|
||||
);
|
||||
|
||||
info!("Executing sql: {sql}");
|
||||
|
||||
self.database_client.sql_in_public(&sql).await?;
|
||||
self.sql(&sql).await?;
|
||||
|
||||
info!(
|
||||
"Finished exporting {}.{schema} data into path: {}",
|
||||
self.catalog,
|
||||
db_dir.to_string_lossy()
|
||||
"Finished exporting {catalog}.{schema} data into path: {}",
|
||||
output_dir.to_string_lossy()
|
||||
);
|
||||
|
||||
// The export copy from sql
|
||||
let copy_from_file = db_dir.join("copy_from.sql");
|
||||
let copy_from_file = output_dir.join("copy_from.sql");
|
||||
let mut writer =
|
||||
BufWriter::new(File::create(copy_from_file).await.context(FileIoSnafu)?);
|
||||
let copy_database_from_sql = format!(
|
||||
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH (FORMAT='parquet');"#,
|
||||
self.catalog,
|
||||
catalog,
|
||||
schema,
|
||||
db_dir.to_str().unwrap()
|
||||
output_dir.to_str().unwrap()
|
||||
);
|
||||
writer
|
||||
.write(copy_database_from_sql.as_bytes())
|
||||
@@ -425,7 +410,7 @@ impl Export {
|
||||
.context(FileIoSnafu)?;
|
||||
writer.flush().await.context(FileIoSnafu)?;
|
||||
|
||||
info!("Finished exporting {}.{schema} copy_from.sql", self.catalog);
|
||||
info!("Finished exporting {catalog}.{schema} copy_from.sql");
|
||||
|
||||
Ok::<(), Error>(())
|
||||
})
|
||||
@@ -444,23 +429,20 @@ impl Export {
|
||||
.count();
|
||||
let elapsed = timer.elapsed();
|
||||
|
||||
info!("Success {success}/{db_count} jobs, costs: {elapsed:?}");
|
||||
info!("Success {success}/{db_count} jobs, costs: {:?}", elapsed);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
#[async_trait]
|
||||
impl Tool for Export {
|
||||
async fn do_work(&self) -> Result<()> {
|
||||
match self.target {
|
||||
ExportTarget::Schema => {
|
||||
self.export_create_database().await?;
|
||||
self.export_create_table().await
|
||||
}
|
||||
ExportTarget::Schema => self.export_create_table().await,
|
||||
ExportTarget::Data => self.export_database_data().await,
|
||||
ExportTarget::All => {
|
||||
self.export_create_database().await?;
|
||||
self.export_create_table().await?;
|
||||
self.export_database_data().await
|
||||
}
|
||||
@@ -468,6 +450,20 @@ impl Tool for Export {
|
||||
}
|
||||
}
|
||||
|
||||
/// Split at `-`.
|
||||
fn split_database(database: &str) -> Result<(String, Option<String>)> {
|
||||
let (catalog, schema) = match database.split_once('-') {
|
||||
Some((catalog, schema)) => (catalog, schema),
|
||||
None => (DEFAULT_CATALOG_NAME, database),
|
||||
};
|
||||
|
||||
if schema == "*" {
|
||||
Ok((catalog.to_string(), None))
|
||||
} else {
|
||||
Ok((catalog.to_string(), Some(schema.to_string())))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use clap::Parser;
|
||||
@@ -475,10 +471,26 @@ mod tests {
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
|
||||
use crate::cli::export::split_database;
|
||||
use crate::error::Result as CmdResult;
|
||||
use crate::options::GlobalOptions;
|
||||
use crate::{cli, standalone, App};
|
||||
|
||||
#[test]
|
||||
fn test_split_database() {
|
||||
let result = split_database("catalog-schema").unwrap();
|
||||
assert_eq!(result, ("catalog".to_string(), Some("schema".to_string())));
|
||||
|
||||
let result = split_database("schema").unwrap();
|
||||
assert_eq!(result, ("greptime".to_string(), Some("schema".to_string())));
|
||||
|
||||
let result = split_database("catalog-*").unwrap();
|
||||
assert_eq!(result, ("catalog".to_string(), None));
|
||||
|
||||
let result = split_database("*").unwrap();
|
||||
assert_eq!(result, ("greptime".to_string(), None));
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
|
||||
let output_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
@@ -1,218 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
|
||||
use common_telemetry::{error, info, warn};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio::time::Instant;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::cli::database::DatabaseClient;
|
||||
use crate::cli::{database, Instance, Tool};
|
||||
use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
||||
|
||||
#[derive(Debug, Default, Clone, ValueEnum)]
|
||||
enum ImportTarget {
|
||||
/// Import all table schemas into the database.
|
||||
Schema,
|
||||
/// Import all table data into the database.
|
||||
Data,
|
||||
/// Export all table schemas and data at once.
|
||||
#[default]
|
||||
All,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct ImportCommand {
|
||||
/// Server address to connect
|
||||
#[clap(long)]
|
||||
addr: String,
|
||||
|
||||
/// Directory of the data. E.g.: /tmp/greptimedb-backup
|
||||
#[clap(long)]
|
||||
input_dir: String,
|
||||
|
||||
/// The name of the catalog to import.
|
||||
#[clap(long, default_value = "greptime-*")]
|
||||
database: String,
|
||||
|
||||
/// Parallelism of the import.
|
||||
#[clap(long, short = 'j', default_value = "1")]
|
||||
import_jobs: usize,
|
||||
|
||||
/// Max retry times for each job.
|
||||
#[clap(long, default_value = "3")]
|
||||
max_retry: usize,
|
||||
|
||||
/// Things to export
|
||||
#[clap(long, short = 't', value_enum, default_value = "all")]
|
||||
target: ImportTarget,
|
||||
|
||||
/// The basic authentication for connecting to the server
|
||||
#[clap(long)]
|
||||
auth_basic: Option<String>,
|
||||
}
|
||||
|
||||
impl ImportCommand {
|
||||
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
||||
let (catalog, schema) = database::split_database(&self.database)?;
|
||||
let database_client =
|
||||
DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
|
||||
|
||||
Ok(Instance::new(
|
||||
Box::new(Import {
|
||||
catalog,
|
||||
schema,
|
||||
database_client,
|
||||
input_dir: self.input_dir.clone(),
|
||||
parallelism: self.import_jobs,
|
||||
target: self.target.clone(),
|
||||
}),
|
||||
guard,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Import {
|
||||
catalog: String,
|
||||
schema: Option<String>,
|
||||
database_client: DatabaseClient,
|
||||
input_dir: String,
|
||||
parallelism: usize,
|
||||
target: ImportTarget,
|
||||
}
|
||||
|
||||
impl Import {
|
||||
async fn import_create_table(&self) -> Result<()> {
|
||||
// Use default db to creates other dbs
|
||||
self.do_sql_job("create_database.sql", Some(DEFAULT_SCHEMA_NAME))
|
||||
.await?;
|
||||
self.do_sql_job("create_tables.sql", None).await
|
||||
}
|
||||
|
||||
async fn import_database_data(&self) -> Result<()> {
|
||||
self.do_sql_job("copy_from.sql", None).await
|
||||
}
|
||||
|
||||
async fn do_sql_job(&self, filename: &str, exec_db: Option<&str>) -> Result<()> {
|
||||
let timer = Instant::now();
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.get_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
let mut tasks = Vec::with_capacity(db_count);
|
||||
for schema in db_names {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
let database_input_dir = self.catalog_path().join(&schema);
|
||||
let sql_file = database_input_dir.join(filename);
|
||||
let sql = tokio::fs::read_to_string(sql_file)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
if sql.is_empty() {
|
||||
info!("Empty `{filename}` {database_input_dir:?}");
|
||||
} else {
|
||||
let db = exec_db.unwrap_or(&schema);
|
||||
self.database_client.sql(&sql, db).await?;
|
||||
info!("Imported `{filename}` for database {schema}");
|
||||
}
|
||||
|
||||
Ok::<(), Error>(())
|
||||
})
|
||||
}
|
||||
|
||||
let success = futures::future::join_all(tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter(|r| match r {
|
||||
Ok(_) => true,
|
||||
Err(e) => {
|
||||
error!(e; "import {filename} job failed");
|
||||
false
|
||||
}
|
||||
})
|
||||
.count();
|
||||
let elapsed = timer.elapsed();
|
||||
info!("Success {success}/{db_count} `{filename}` jobs, cost: {elapsed:?}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn catalog_path(&self) -> PathBuf {
|
||||
PathBuf::from(&self.input_dir).join(&self.catalog)
|
||||
}
|
||||
|
||||
async fn get_db_names(&self) -> Result<Vec<String>> {
|
||||
let db_names = self.all_db_names().await?;
|
||||
let Some(schema) = &self.schema else {
|
||||
return Ok(db_names);
|
||||
};
|
||||
|
||||
// Check if the schema exists
|
||||
db_names
|
||||
.into_iter()
|
||||
.find(|db_name| db_name.to_lowercase() == schema.to_lowercase())
|
||||
.map(|name| vec![name])
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: &self.catalog,
|
||||
schema,
|
||||
})
|
||||
}
|
||||
|
||||
// Get all database names in the input directory.
|
||||
// The directory structure should be like:
|
||||
// /tmp/greptimedb-backup
|
||||
// ├── greptime-1
|
||||
// │ ├── db1
|
||||
// │ └── db2
|
||||
async fn all_db_names(&self) -> Result<Vec<String>> {
|
||||
let mut db_names = vec![];
|
||||
let path = self.catalog_path();
|
||||
let mut entries = tokio::fs::read_dir(path).await.context(FileIoSnafu)?;
|
||||
while let Some(entry) = entries.next_entry().await.context(FileIoSnafu)? {
|
||||
let path = entry.path();
|
||||
if path.is_dir() {
|
||||
let db_name = match path.file_name() {
|
||||
Some(name) => name.to_string_lossy().to_string(),
|
||||
None => {
|
||||
warn!("Failed to get the file name of {:?}", path);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
db_names.push(db_name);
|
||||
}
|
||||
}
|
||||
Ok(db_names)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for Import {
|
||||
async fn do_work(&self) -> Result<()> {
|
||||
match self.target {
|
||||
ImportTarget::Schema => self.import_create_table().await,
|
||||
ImportTarget::Data => self.import_database_data().await,
|
||||
ImportTarget::All => {
|
||||
self.import_create_table().await?;
|
||||
self.import_database_data().await
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -31,6 +31,13 @@ pub enum Error {
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to iter stream"))]
|
||||
IterStream {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to init DDL manager"))]
|
||||
InitDdlManager {
|
||||
#[snafu(implicit)]
|
||||
@@ -230,6 +237,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start catalog manager"))]
|
||||
StartCatalogManager {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to connect to Etcd at {etcd_addr}"))]
|
||||
ConnectEtcd {
|
||||
etcd_addr: String,
|
||||
@@ -239,6 +253,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to connect server at {addr}"))]
|
||||
ConnectServer {
|
||||
addr: String,
|
||||
source: client::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serde json"))]
|
||||
SerdeJson {
|
||||
#[snafu(source)]
|
||||
@@ -256,6 +278,12 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Expect data from output, but got another thing"))]
|
||||
NotDataFromOutput {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Empty result from output"))]
|
||||
EmptyResult {
|
||||
#[snafu(implicit)]
|
||||
@@ -318,12 +346,13 @@ pub enum Error {
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot find schema {schema} in catalog {catalog}"))]
|
||||
SchemaNotFound {
|
||||
catalog: String,
|
||||
schema: String,
|
||||
#[snafu(display("Tonic transport error: {error:?} with msg: {msg:?}"))]
|
||||
TonicTransport {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: tonic::transport::Error,
|
||||
msg: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -341,16 +370,18 @@ impl ErrorExt for Error {
|
||||
Error::BuildMetaServer { source, .. } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
|
||||
Error::InitMetadata { source, .. } | Error::InitDdlManager { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::IterStream { source, .. }
|
||||
| Error::InitMetadata { source, .. }
|
||||
| Error::InitDdlManager { source, .. } => source.status_code(),
|
||||
|
||||
Error::ConnectServer { source, .. } => source.status_code(),
|
||||
Error::MissingConfig { .. }
|
||||
| Error::LoadLayeredConfig { .. }
|
||||
| Error::IllegalConfig { .. }
|
||||
| Error::InvalidReplCommand { .. }
|
||||
| Error::InitTimezone { .. }
|
||||
| Error::ConnectEtcd { .. }
|
||||
| Error::NotDataFromOutput { .. }
|
||||
| Error::CreateDir { .. }
|
||||
| Error::EmptyResult { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
@@ -368,6 +399,7 @@ impl ErrorExt for Error {
|
||||
source.status_code()
|
||||
}
|
||||
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
|
||||
Error::StartCatalogManager { source, .. } => source.status_code(),
|
||||
|
||||
Error::SerdeJson { .. } | Error::FileIo { .. } | Error::SpawnThread { .. } => {
|
||||
StatusCode::Unexpected
|
||||
@@ -382,7 +414,7 @@ impl ErrorExt for Error {
|
||||
source.status_code()
|
||||
}
|
||||
Error::MetaClientInit { source, .. } => source.status_code(),
|
||||
Error::SchemaNotFound { .. } => StatusCode::DatabaseNotFound,
|
||||
Error::TonicTransport { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -141,8 +141,6 @@ pub struct StandaloneOptions {
|
||||
pub region_engine: Vec<RegionEngineConfig>,
|
||||
pub export_metrics: ExportMetricsOption,
|
||||
pub tracing: TracingOptions,
|
||||
pub init_regions_in_background: bool,
|
||||
pub init_regions_parallelism: usize,
|
||||
}
|
||||
|
||||
impl Default for StandaloneOptions {
|
||||
@@ -170,8 +168,6 @@ impl Default for StandaloneOptions {
|
||||
RegionEngineConfig::File(FileEngineConfig::default()),
|
||||
],
|
||||
tracing: TracingOptions::default(),
|
||||
init_regions_in_background: false,
|
||||
init_regions_parallelism: 16,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -222,9 +218,6 @@ impl StandaloneOptions {
|
||||
storage: cloned_opts.storage,
|
||||
region_engine: cloned_opts.region_engine,
|
||||
grpc: cloned_opts.grpc,
|
||||
init_regions_in_background: cloned_opts.init_regions_in_background,
|
||||
init_regions_parallelism: cloned_opts.init_regions_parallelism,
|
||||
mode: Mode::Standalone,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,10 +16,12 @@ use std::time::Duration;
|
||||
|
||||
use cmd::options::GreptimeOptions;
|
||||
use cmd::standalone::StandaloneOptions;
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_config::Configurable;
|
||||
use common_grpc::channel_manager::{
|
||||
DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE, DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
||||
};
|
||||
use common_runtime::global::RuntimeOptions;
|
||||
use common_telemetry::logging::{LoggingOptions, DEFAULT_OTLP_ENDPOINT};
|
||||
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
@@ -43,6 +45,10 @@ fn test_load_datanode_example_config() {
|
||||
.unwrap();
|
||||
|
||||
let expected = GreptimeOptions::<DatanodeOptions> {
|
||||
runtime: RuntimeOptions {
|
||||
global_rt_size: 8,
|
||||
compact_rt_size: 4,
|
||||
},
|
||||
component: DatanodeOptions {
|
||||
node_id: Some(42),
|
||||
meta_client: Some(MetaClientOptions {
|
||||
@@ -59,7 +65,6 @@ fn test_load_datanode_example_config() {
|
||||
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
||||
dir: Some("/tmp/greptimedb/wal".to_string()),
|
||||
sync_period: Some(Duration::from_secs(10)),
|
||||
recovery_parallelism: 2,
|
||||
..Default::default()
|
||||
}),
|
||||
storage: StorageConfig {
|
||||
@@ -68,8 +73,15 @@ fn test_load_datanode_example_config() {
|
||||
},
|
||||
region_engine: vec![
|
||||
RegionEngineConfig::Mito(MitoConfig {
|
||||
num_workers: 8,
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
scan_parallelism: 0,
|
||||
global_write_buffer_size: ReadableSize::gb(1),
|
||||
global_write_buffer_reject_size: ReadableSize::gb(2),
|
||||
sst_meta_cache_size: ReadableSize::mb(128),
|
||||
vector_cache_size: ReadableSize::mb(512),
|
||||
page_cache_size: ReadableSize::mb(512),
|
||||
max_background_jobs: 4,
|
||||
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
..Default::default()
|
||||
}),
|
||||
@@ -94,10 +106,9 @@ fn test_load_datanode_example_config() {
|
||||
rpc_max_send_message_size: Some(DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
similar_asserts::assert_eq!(options, expected);
|
||||
assert_eq!(options, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -107,6 +118,10 @@ fn test_load_frontend_example_config() {
|
||||
GreptimeOptions::<FrontendOptions>::load_layered_options(example_config.to_str(), "")
|
||||
.unwrap();
|
||||
let expected = GreptimeOptions::<FrontendOptions> {
|
||||
runtime: RuntimeOptions {
|
||||
global_rt_size: 8,
|
||||
compact_rt_size: 4,
|
||||
},
|
||||
component: FrontendOptions {
|
||||
default_timezone: Some("UTC".to_string()),
|
||||
meta_client: Some(MetaClientOptions {
|
||||
@@ -139,9 +154,8 @@ fn test_load_frontend_example_config() {
|
||||
},
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
similar_asserts::assert_eq!(options, expected);
|
||||
assert_eq!(options, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -151,6 +165,10 @@ fn test_load_metasrv_example_config() {
|
||||
GreptimeOptions::<MetasrvOptions>::load_layered_options(example_config.to_str(), "")
|
||||
.unwrap();
|
||||
let expected = GreptimeOptions::<MetasrvOptions> {
|
||||
runtime: RuntimeOptions {
|
||||
global_rt_size: 8,
|
||||
compact_rt_size: 4,
|
||||
},
|
||||
component: MetasrvOptions {
|
||||
selector: SelectorType::default(),
|
||||
data_home: "/tmp/metasrv/".to_string(),
|
||||
@@ -168,9 +186,8 @@ fn test_load_metasrv_example_config() {
|
||||
},
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
similar_asserts::assert_eq!(options, expected);
|
||||
assert_eq!(options, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -180,19 +197,30 @@ fn test_load_standalone_example_config() {
|
||||
GreptimeOptions::<StandaloneOptions>::load_layered_options(example_config.to_str(), "")
|
||||
.unwrap();
|
||||
let expected = GreptimeOptions::<StandaloneOptions> {
|
||||
runtime: RuntimeOptions {
|
||||
global_rt_size: 8,
|
||||
compact_rt_size: 4,
|
||||
},
|
||||
component: StandaloneOptions {
|
||||
default_timezone: Some("UTC".to_string()),
|
||||
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
||||
dir: Some("/tmp/greptimedb/wal".to_string()),
|
||||
sync_period: Some(Duration::from_secs(10)),
|
||||
recovery_parallelism: 2,
|
||||
..Default::default()
|
||||
}),
|
||||
region_engine: vec![
|
||||
RegionEngineConfig::Mito(MitoConfig {
|
||||
num_workers: 8,
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
scan_parallelism: 0,
|
||||
global_write_buffer_size: ReadableSize::gb(1),
|
||||
global_write_buffer_reject_size: ReadableSize::gb(2),
|
||||
sst_meta_cache_size: ReadableSize::mb(128),
|
||||
vector_cache_size: ReadableSize::mb(512),
|
||||
page_cache_size: ReadableSize::mb(512),
|
||||
selector_result_cache_size: ReadableSize::mb(512),
|
||||
max_background_jobs: 4,
|
||||
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
..Default::default()
|
||||
}),
|
||||
RegionEngineConfig::File(EngineConfig {}),
|
||||
@@ -214,7 +242,6 @@ fn test_load_standalone_example_config() {
|
||||
},
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
similar_asserts::assert_eq!(options, expected);
|
||||
assert_eq!(options, expected);
|
||||
}
|
||||
|
||||
@@ -9,12 +9,10 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
anymap = "1.0.0-beta.2"
|
||||
async-trait.workspace = true
|
||||
bitvec = "1.0"
|
||||
bytes.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
futures.workspace = true
|
||||
paste = "1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
snafu.workspace = true
|
||||
|
||||
242
src/common/base/src/buffer.rs
Normal file
242
src/common/base/src/buffer.rs
Normal file
@@ -0,0 +1,242 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::io::{Read, Write};
|
||||
|
||||
use bytes::{Buf, BufMut, BytesMut};
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_macro::stack_trace_debug;
|
||||
use paste::paste;
|
||||
use snafu::{ensure, Location, ResultExt, Snafu};
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display(
|
||||
"Destination buffer overflow, src_len: {}, dst_len: {}",
|
||||
src_len,
|
||||
dst_len
|
||||
))]
|
||||
Overflow {
|
||||
src_len: usize,
|
||||
dst_len: usize,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Buffer underflow"))]
|
||||
Underflow {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("IO operation reach EOF"))]
|
||||
Eof {
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_read_le {
|
||||
( $($num_ty: ty), *) => {
|
||||
$(
|
||||
paste!{
|
||||
// TODO(hl): default implementation requires allocating a
|
||||
// temp buffer. maybe use more efficient impls in concrete buffers.
|
||||
// see https://github.com/GrepTimeTeam/greptimedb/pull/97#discussion_r930798941
|
||||
fn [<read_ $num_ty _le>](&mut self) -> Result<$num_ty> {
|
||||
let mut buf = [0u8; std::mem::size_of::<$num_ty>()];
|
||||
self.read_to_slice(&mut buf)?;
|
||||
Ok($num_ty::from_le_bytes(buf))
|
||||
}
|
||||
|
||||
fn [<peek_ $num_ty _le>](&mut self) -> Result<$num_ty> {
|
||||
let mut buf = [0u8; std::mem::size_of::<$num_ty>()];
|
||||
self.peek_to_slice(&mut buf)?;
|
||||
Ok($num_ty::from_le_bytes(buf))
|
||||
}
|
||||
}
|
||||
)*
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_write_le {
|
||||
( $($num_ty: ty), *) => {
|
||||
$(
|
||||
paste!{
|
||||
fn [<write_ $num_ty _le>](&mut self, n: $num_ty) -> Result<()> {
|
||||
self.write_from_slice(&n.to_le_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
)*
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Buffer {
|
||||
/// Returns remaining data size for read.
|
||||
fn remaining_size(&self) -> usize;
|
||||
|
||||
/// Returns true if buffer has no data for read.
|
||||
fn is_empty(&self) -> bool {
|
||||
self.remaining_size() == 0
|
||||
}
|
||||
|
||||
/// Peeks data into dst. This method should not change internal cursor,
|
||||
/// invoke `advance_by` if needed.
|
||||
/// # Panics
|
||||
/// This method **may** panic if buffer does not have enough data to be copied to dst.
|
||||
fn peek_to_slice(&self, dst: &mut [u8]) -> Result<()>;
|
||||
|
||||
/// Reads data into dst. This method will change internal cursor.
|
||||
/// # Panics
|
||||
/// This method **may** panic if buffer does not have enough data to be copied to dst.
|
||||
fn read_to_slice(&mut self, dst: &mut [u8]) -> Result<()> {
|
||||
self.peek_to_slice(dst)?;
|
||||
self.advance_by(dst.len());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Advances internal cursor for next read.
|
||||
/// # Panics
|
||||
/// This method **may** panic if the offset after advancing exceeds the length of underlying buffer.
|
||||
fn advance_by(&mut self, by: usize);
|
||||
|
||||
impl_read_le![u8, i8, u16, i16, u32, i32, u64, i64, f32, f64];
|
||||
}
|
||||
|
||||
macro_rules! impl_buffer_for_bytes {
|
||||
( $($buf_ty:ty), *) => {
|
||||
$(
|
||||
impl Buffer for $buf_ty {
|
||||
fn remaining_size(&self) -> usize{
|
||||
self.len()
|
||||
}
|
||||
|
||||
fn peek_to_slice(&self, dst: &mut [u8]) -> Result<()> {
|
||||
let dst_len = dst.len();
|
||||
ensure!(self.remaining() >= dst.len(), OverflowSnafu {
|
||||
src_len: self.remaining_size(),
|
||||
dst_len,
|
||||
}
|
||||
);
|
||||
dst.copy_from_slice(&self[0..dst_len]);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn advance_by(&mut self, by: usize) {
|
||||
self.advance(by);
|
||||
}
|
||||
}
|
||||
)*
|
||||
};
|
||||
}
|
||||
|
||||
impl_buffer_for_bytes![bytes::Bytes, bytes::BytesMut];
|
||||
|
||||
impl Buffer for &[u8] {
|
||||
fn remaining_size(&self) -> usize {
|
||||
self.len()
|
||||
}
|
||||
|
||||
fn peek_to_slice(&self, dst: &mut [u8]) -> Result<()> {
|
||||
let dst_len = dst.len();
|
||||
ensure!(
|
||||
self.len() >= dst.len(),
|
||||
OverflowSnafu {
|
||||
src_len: self.remaining_size(),
|
||||
dst_len,
|
||||
}
|
||||
);
|
||||
dst.copy_from_slice(&self[0..dst_len]);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read_to_slice(&mut self, dst: &mut [u8]) -> Result<()> {
|
||||
ensure!(
|
||||
self.len() >= dst.len(),
|
||||
OverflowSnafu {
|
||||
src_len: self.remaining_size(),
|
||||
dst_len: dst.len(),
|
||||
}
|
||||
);
|
||||
self.read_exact(dst).context(EofSnafu)
|
||||
}
|
||||
|
||||
fn advance_by(&mut self, by: usize) {
|
||||
*self = &self[by..];
|
||||
}
|
||||
}
|
||||
|
||||
/// Mutable buffer.
|
||||
pub trait BufferMut {
|
||||
fn as_slice(&self) -> &[u8];
|
||||
|
||||
fn write_from_slice(&mut self, src: &[u8]) -> Result<()>;
|
||||
|
||||
impl_write_le![i8, u8, i16, u16, i32, u32, i64, u64, f32, f64];
|
||||
}
|
||||
|
||||
impl BufferMut for BytesMut {
|
||||
fn as_slice(&self) -> &[u8] {
|
||||
self
|
||||
}
|
||||
|
||||
fn write_from_slice(&mut self, src: &[u8]) -> Result<()> {
|
||||
self.put_slice(src);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl BufferMut for &mut [u8] {
|
||||
fn as_slice(&self) -> &[u8] {
|
||||
self
|
||||
}
|
||||
|
||||
fn write_from_slice(&mut self, src: &[u8]) -> Result<()> {
|
||||
// see std::io::Write::write_all
|
||||
// https://doc.rust-lang.org/src/std/io/impls.rs.html#363
|
||||
self.write_all(src).map_err(|_| {
|
||||
OverflowSnafu {
|
||||
src_len: src.len(),
|
||||
dst_len: self.as_slice().len(),
|
||||
}
|
||||
.build()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl BufferMut for Vec<u8> {
|
||||
fn as_slice(&self) -> &[u8] {
|
||||
self
|
||||
}
|
||||
|
||||
fn write_from_slice(&mut self, src: &[u8]) -> Result<()> {
|
||||
self.extend_from_slice(src);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -44,12 +44,6 @@ impl From<Vec<u8>> for Bytes {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Bytes> for Vec<u8> {
|
||||
fn from(bytes: Bytes) -> Vec<u8> {
|
||||
bytes.0.into()
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for Bytes {
|
||||
type Target = [u8];
|
||||
|
||||
|
||||
@@ -13,9 +13,9 @@
|
||||
// limitations under the License.
|
||||
|
||||
pub mod bit_vec;
|
||||
pub mod buffer;
|
||||
pub mod bytes;
|
||||
pub mod plugins;
|
||||
pub mod range_read;
|
||||
#[allow(clippy::all)]
|
||||
pub mod readable_size;
|
||||
pub mod secrets;
|
||||
|
||||
@@ -1,105 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::io;
|
||||
use std::ops::Range;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use bytes::{BufMut, Bytes};
|
||||
use futures::{AsyncReadExt, AsyncSeekExt};
|
||||
|
||||
/// `Metadata` contains the metadata of a source.
|
||||
pub struct Metadata {
|
||||
/// The length of the source in bytes.
|
||||
pub content_length: u64,
|
||||
}
|
||||
|
||||
/// `RangeReader` reads a range of bytes from a source.
|
||||
#[async_trait]
|
||||
pub trait RangeReader: Send + Unpin {
|
||||
/// Returns the metadata of the source.
|
||||
async fn metadata(&mut self) -> io::Result<Metadata>;
|
||||
|
||||
/// Reads the bytes in the given range.
|
||||
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes>;
|
||||
|
||||
/// Reads the bytes in the given range into the buffer.
|
||||
///
|
||||
/// Handles the buffer based on its capacity:
|
||||
/// - If the buffer is insufficient to hold the bytes, it will either:
|
||||
/// - Allocate additional space (e.g., for `Vec<u8>`)
|
||||
/// - Panic (e.g., for `&mut [u8]`)
|
||||
async fn read_into(
|
||||
&mut self,
|
||||
range: Range<u64>,
|
||||
buf: &mut (impl BufMut + Send),
|
||||
) -> io::Result<()> {
|
||||
let bytes = self.read(range).await?;
|
||||
buf.put_slice(&bytes);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reads the bytes in the given ranges.
|
||||
async fn read_vec(&mut self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
|
||||
let mut result = Vec::with_capacity(ranges.len());
|
||||
for range in ranges {
|
||||
result.push(self.read(range.clone()).await?);
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<R: RangeReader + Send + Unpin> RangeReader for &mut R {
|
||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
||||
(*self).metadata().await
|
||||
}
|
||||
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
|
||||
(*self).read(range).await
|
||||
}
|
||||
async fn read_into(
|
||||
&mut self,
|
||||
range: Range<u64>,
|
||||
buf: &mut (impl BufMut + Send),
|
||||
) -> io::Result<()> {
|
||||
(*self).read_into(range, buf).await
|
||||
}
|
||||
async fn read_vec(&mut self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
|
||||
(*self).read_vec(ranges).await
|
||||
}
|
||||
}
|
||||
|
||||
/// `RangeReaderAdapter` bridges `RangeReader` and `AsyncRead + AsyncSeek`.
|
||||
pub struct RangeReaderAdapter<R>(pub R);
|
||||
|
||||
/// Implements `RangeReader` for a type that implements `AsyncRead + AsyncSeek`.
|
||||
///
|
||||
/// TODO(zhongzc): It's a temporary solution for porting the codebase from `AsyncRead + AsyncSeek` to `RangeReader`.
|
||||
/// Until the codebase is fully ported to `RangeReader`, remove this implementation.
|
||||
#[async_trait]
|
||||
impl<R: futures::AsyncRead + futures::AsyncSeek + Send + Unpin> RangeReader
|
||||
for RangeReaderAdapter<R>
|
||||
{
|
||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
||||
let content_length = self.0.seek(io::SeekFrom::End(0)).await?;
|
||||
Ok(Metadata { content_length })
|
||||
}
|
||||
|
||||
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
|
||||
let mut buf = vec![0; (range.end - range.start) as usize];
|
||||
self.0.seek(io::SeekFrom::Start(range.start)).await?;
|
||||
self.0.read_exact(&mut buf).await?;
|
||||
Ok(Bytes::from(buf))
|
||||
}
|
||||
}
|
||||
182
src/common/base/tests/buffer_tests.rs
Normal file
182
src/common/base/tests/buffer_tests.rs
Normal file
@@ -0,0 +1,182 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(assert_matches)]
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
|
||||
use bytes::{Buf, Bytes, BytesMut};
|
||||
use common_base::buffer::Error::Overflow;
|
||||
use common_base::buffer::{Buffer, BufferMut};
|
||||
use paste::paste;
|
||||
|
||||
#[test]
|
||||
pub fn test_buffer_read_write() {
|
||||
let mut buf = BytesMut::with_capacity(16);
|
||||
buf.write_u64_le(1234u64).unwrap();
|
||||
let result = buf.peek_u64_le().unwrap();
|
||||
assert_eq!(1234u64, result);
|
||||
buf.advance_by(8);
|
||||
|
||||
buf.write_from_slice("hello, world".as_bytes()).unwrap();
|
||||
let mut content = vec![0u8; 5];
|
||||
buf.peek_to_slice(&mut content).unwrap();
|
||||
let read = String::from_utf8_lossy(&content);
|
||||
assert_eq!("hello", read);
|
||||
buf.advance_by(5);
|
||||
// after read, buffer should still have 7 bytes to read.
|
||||
assert_eq!(7, buf.remaining());
|
||||
|
||||
let mut content = vec![0u8; 6];
|
||||
buf.read_to_slice(&mut content).unwrap();
|
||||
let read = String::from_utf8_lossy(&content);
|
||||
assert_eq!(", worl", read);
|
||||
// after read, buffer should still have 1 byte to read.
|
||||
assert_eq!(1, buf.remaining());
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_buffer_read() {
|
||||
let mut bytes = Bytes::from_static("hello".as_bytes());
|
||||
assert_eq!(5, bytes.remaining_size());
|
||||
assert_eq!(b'h', bytes.peek_u8_le().unwrap());
|
||||
bytes.advance_by(1);
|
||||
assert_eq!(4, bytes.remaining_size());
|
||||
}
|
||||
|
||||
macro_rules! test_primitive_read_write {
|
||||
( $($num_ty: ty), *) => {
|
||||
$(
|
||||
paste!{
|
||||
#[test]
|
||||
fn [<test_read_write_ $num_ty>]() {
|
||||
assert_eq!($num_ty::MAX,(&mut $num_ty::MAX.to_le_bytes() as &[u8]).[<read_ $num_ty _le>]().unwrap());
|
||||
assert_eq!($num_ty::MIN,(&mut $num_ty::MIN.to_le_bytes() as &[u8]).[<read_ $num_ty _le>]().unwrap());
|
||||
}
|
||||
}
|
||||
)*
|
||||
}
|
||||
}
|
||||
|
||||
test_primitive_read_write![u8, u16, u32, u64, i8, i16, i32, i64, f32, f64];
|
||||
|
||||
#[test]
|
||||
pub fn test_read_write_from_slice_buffer() {
|
||||
let mut buf = "hello".as_bytes();
|
||||
assert_eq!(104, buf.peek_u8_le().unwrap());
|
||||
buf.advance_by(1);
|
||||
assert_eq!(101, buf.peek_u8_le().unwrap());
|
||||
buf.advance_by(1);
|
||||
assert_eq!(108, buf.peek_u8_le().unwrap());
|
||||
buf.advance_by(1);
|
||||
assert_eq!(108, buf.peek_u8_le().unwrap());
|
||||
buf.advance_by(1);
|
||||
assert_eq!(111, buf.peek_u8_le().unwrap());
|
||||
buf.advance_by(1);
|
||||
assert_matches!(buf.peek_u8_le(), Err(Overflow { .. }));
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_read_u8_from_slice_buffer() {
|
||||
let mut buf = "hello".as_bytes();
|
||||
assert_eq!(104, buf.read_u8_le().unwrap());
|
||||
assert_eq!(101, buf.read_u8_le().unwrap());
|
||||
assert_eq!(108, buf.read_u8_le().unwrap());
|
||||
assert_eq!(108, buf.read_u8_le().unwrap());
|
||||
assert_eq!(111, buf.read_u8_le().unwrap());
|
||||
assert_matches!(buf.read_u8_le(), Err(Overflow { .. }));
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_read_write_numbers() {
|
||||
let mut buf: Vec<u8> = vec![];
|
||||
buf.write_u64_le(1234).unwrap();
|
||||
assert_eq!(1234, (&buf[..]).read_u64_le().unwrap());
|
||||
|
||||
buf.write_u32_le(4242).unwrap();
|
||||
let mut p = &buf[..];
|
||||
assert_eq!(1234, p.read_u64_le().unwrap());
|
||||
assert_eq!(4242, p.read_u32_le().unwrap());
|
||||
}
|
||||
|
||||
macro_rules! test_primitive_vec_read_write {
|
||||
( $($num_ty: ty), *) => {
|
||||
$(
|
||||
paste!{
|
||||
#[test]
|
||||
fn [<test_read_write_ $num_ty _from_vec_buffer>]() {
|
||||
let mut buf = vec![];
|
||||
let _ = buf.[<write_ $num_ty _le>]($num_ty::MAX).unwrap();
|
||||
assert_eq!($num_ty::MAX, buf.as_slice().[<read_ $num_ty _le>]().unwrap());
|
||||
}
|
||||
}
|
||||
)*
|
||||
}
|
||||
}
|
||||
|
||||
test_primitive_vec_read_write![u8, u16, u32, u64, i8, i16, i32, i64, f32, f64];
|
||||
|
||||
#[test]
|
||||
pub fn test_peek_write_from_vec_buffer() {
|
||||
let mut buf: Vec<u8> = vec![];
|
||||
buf.write_from_slice("hello".as_bytes()).unwrap();
|
||||
let mut slice = buf.as_slice();
|
||||
assert_eq!(104, slice.peek_u8_le().unwrap());
|
||||
slice.advance_by(1);
|
||||
assert_eq!(101, slice.peek_u8_le().unwrap());
|
||||
slice.advance_by(1);
|
||||
assert_eq!(108, slice.peek_u8_le().unwrap());
|
||||
slice.advance_by(1);
|
||||
assert_eq!(108, slice.peek_u8_le().unwrap());
|
||||
slice.advance_by(1);
|
||||
assert_eq!(111, slice.peek_u8_le().unwrap());
|
||||
slice.advance_by(1);
|
||||
assert_matches!(slice.read_u8_le(), Err(Overflow { .. }));
|
||||
}
|
||||
|
||||
macro_rules! test_primitive_bytes_read_write {
|
||||
( $($num_ty: ty), *) => {
|
||||
$(
|
||||
paste!{
|
||||
#[test]
|
||||
fn [<test_read_write_ $num_ty _from_bytes>]() {
|
||||
let mut bytes = bytes::Bytes::from($num_ty::MAX.to_le_bytes().to_vec());
|
||||
assert_eq!($num_ty::MAX, bytes.[<read_ $num_ty _le>]().unwrap());
|
||||
|
||||
let mut bytes = bytes::Bytes::from($num_ty::MIN.to_le_bytes().to_vec());
|
||||
assert_eq!($num_ty::MIN, bytes.[<read_ $num_ty _le>]().unwrap());
|
||||
}
|
||||
}
|
||||
)*
|
||||
}
|
||||
}
|
||||
|
||||
test_primitive_bytes_read_write![u8, u16, u32, u64, i8, i16, i32, i64, f32, f64];
|
||||
|
||||
#[test]
|
||||
pub fn test_write_overflow() {
|
||||
let mut buf = [0u8; 4];
|
||||
assert_matches!(
|
||||
(&mut buf[..]).write_from_slice("hell".as_bytes()),
|
||||
Ok { .. }
|
||||
);
|
||||
|
||||
assert_matches!(
|
||||
(&mut buf[..]).write_from_slice("hello".as_bytes()),
|
||||
Err(common_base::buffer::Error::Overflow { .. })
|
||||
);
|
||||
}
|
||||
}
|
||||
46
src/common/catalog/src/error.rs
Normal file
46
src/common/catalog/src/error.rs
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Invalid full table name: {}", table_name))]
|
||||
InvalidFullTableName {
|
||||
table_name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::InvalidFullTableName { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -15,6 +15,7 @@
|
||||
use consts::DEFAULT_CATALOG_NAME;
|
||||
|
||||
pub mod consts;
|
||||
pub mod error;
|
||||
|
||||
#[inline]
|
||||
pub fn format_schema_name(catalog: &str, schema: &str) -> String {
|
||||
|
||||
@@ -29,7 +29,6 @@ datafusion.workspace = true
|
||||
datatypes.workspace = true
|
||||
geohash = { version = "0.13", optional = true }
|
||||
h3o = { version = "0.6", optional = true }
|
||||
jsonb.workspace = true
|
||||
num = "0.4"
|
||||
num-traits = "0.2"
|
||||
once_cell.workspace = true
|
||||
|
||||
@@ -22,7 +22,6 @@ use crate::function::{AsyncFunctionRef, FunctionRef};
|
||||
use crate::scalars::aggregate::{AggregateFunctionMetaRef, AggregateFunctions};
|
||||
use crate::scalars::date::DateFunction;
|
||||
use crate::scalars::expression::ExpressionFunction;
|
||||
use crate::scalars::json::JsonFunction;
|
||||
use crate::scalars::matches::MatchesFunction;
|
||||
use crate::scalars::math::MathFunction;
|
||||
use crate::scalars::numpy::NumpyFunction;
|
||||
@@ -117,9 +116,6 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
SystemFunction::register(&function_registry);
|
||||
TableFunction::register(&function_registry);
|
||||
|
||||
// Json related functions
|
||||
JsonFunction::register(&function_registry);
|
||||
|
||||
// Geo functions
|
||||
#[cfg(feature = "geo")]
|
||||
crate::scalars::geo::GeoFunctions::register(&function_registry);
|
||||
|
||||
@@ -17,11 +17,9 @@ pub(crate) mod date;
|
||||
pub mod expression;
|
||||
#[cfg(feature = "geo")]
|
||||
pub mod geo;
|
||||
pub mod json;
|
||||
pub mod matches;
|
||||
pub mod math;
|
||||
pub mod numpy;
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test;
|
||||
pub(crate) mod timestamp;
|
||||
|
||||
@@ -28,9 +28,7 @@ use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
/// Function that returns [h3] encoding string for a given geospatial coordinate.
|
||||
///
|
||||
/// [h3]: https://h3geo.org/
|
||||
/// Function that return h3 encoding string for a given geospatial coordinate.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct H3Function;
|
||||
|
||||
@@ -117,7 +115,7 @@ impl Function for H3Function {
|
||||
))
|
||||
})
|
||||
.context(error::ExecuteSnafu)?;
|
||||
let r = Resolution::try_from(r)
|
||||
let r = Resolution::try_from(r as u8)
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
format!("H3 error: {}", e),
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
mod json_get;
|
||||
mod json_to_string;
|
||||
mod to_json;
|
||||
|
||||
use json_get::{JsonGetBool, JsonGetFloat, JsonGetInt, JsonGetString};
|
||||
use json_to_string::JsonToStringFunction;
|
||||
use to_json::ToJsonFunction;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
pub(crate) struct JsonFunction;
|
||||
|
||||
impl JsonFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(JsonToStringFunction));
|
||||
registry.register(Arc::new(ToJsonFunction));
|
||||
|
||||
registry.register(Arc::new(JsonGetInt));
|
||||
registry.register(Arc::new(JsonGetFloat));
|
||||
registry.register(Arc::new(JsonGetString));
|
||||
registry.register(Arc::new(JsonGetBool));
|
||||
}
|
||||
}
|
||||
@@ -1,454 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{self, Display};
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::prelude::Signature;
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::prelude::VectorRef;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{
|
||||
BooleanVectorBuilder, Float64VectorBuilder, Int64VectorBuilder, MutableVector,
|
||||
StringVectorBuilder,
|
||||
};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
fn get_json_by_path(json: &[u8], path: &str) -> Option<Vec<u8>> {
|
||||
let json_path = jsonb::jsonpath::parse_json_path(path.as_bytes());
|
||||
match json_path {
|
||||
Ok(json_path) => {
|
||||
let mut sub_jsonb = Vec::new();
|
||||
let mut sub_offsets = Vec::new();
|
||||
match jsonb::get_by_path(json, json_path, &mut sub_jsonb, &mut sub_offsets) {
|
||||
Ok(_) => Some(sub_jsonb),
|
||||
Err(_) => None,
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the value from the JSONB by the given path and return it as specified type.
|
||||
/// If the path does not exist or the value is not the type specified, return `NULL`.
|
||||
macro_rules! json_get {
|
||||
// e.g. name = JsonGetInt, type = Int64, rust_type = i64, doc = "Get the value from the JSONB by the given path and return it as an integer."
|
||||
($name: ident, $type: ident, $rust_type: ident, $doc:expr) => {
|
||||
paste::paste! {
|
||||
#[doc = $doc]
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct $name;
|
||||
|
||||
impl Function for $name {
|
||||
fn name(&self) -> &str {
|
||||
stringify!([<$name:snake>])
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::[<$type:snake _datatype>]())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::exact(
|
||||
vec![
|
||||
ConcreteDataType::json_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
let jsons = &columns[0];
|
||||
let paths = &columns[1];
|
||||
|
||||
let size = jsons.len();
|
||||
let datatype = jsons.data_type();
|
||||
let mut results = [<$type VectorBuilder>]::with_capacity(size);
|
||||
|
||||
match datatype {
|
||||
// JSON data type uses binary vector
|
||||
ConcreteDataType::Binary(_) => {
|
||||
for i in 0..size {
|
||||
let json = jsons.get_ref(i);
|
||||
let path = paths.get_ref(i);
|
||||
|
||||
let json = json.as_binary();
|
||||
let path = path.as_string();
|
||||
let result = match (json, path) {
|
||||
(Ok(Some(json)), Ok(Some(path))) => {
|
||||
get_json_by_path(json, path)
|
||||
.and_then(|json| { jsonb::[<to_ $rust_type>](&json).ok() })
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: stringify!([<$name:snake>]),
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for $name {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", stringify!([<$name:snake>]).to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
json_get!(
|
||||
JsonGetInt,
|
||||
Int64,
|
||||
i64,
|
||||
"Get the value from the JSONB by the given path and return it as an integer."
|
||||
);
|
||||
|
||||
json_get!(
|
||||
JsonGetFloat,
|
||||
Float64,
|
||||
f64,
|
||||
"Get the value from the JSONB by the given path and return it as a float."
|
||||
);
|
||||
|
||||
json_get!(
|
||||
JsonGetBool,
|
||||
Boolean,
|
||||
bool,
|
||||
"Get the value from the JSONB by the given path and return it as a boolean."
|
||||
);
|
||||
|
||||
/// Get the value from the JSONB by the given path and return it as a string.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct JsonGetString;
|
||||
|
||||
impl Function for JsonGetString {
|
||||
fn name(&self) -> &str {
|
||||
"json_get_string"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::exact(
|
||||
vec![
|
||||
ConcreteDataType::json_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
let jsons = &columns[0];
|
||||
let paths = &columns[1];
|
||||
|
||||
let size = jsons.len();
|
||||
let datatype = jsons.data_type();
|
||||
let mut results = StringVectorBuilder::with_capacity(size);
|
||||
|
||||
match datatype {
|
||||
// JSON data type uses binary vector
|
||||
ConcreteDataType::Binary(_) => {
|
||||
for i in 0..size {
|
||||
let json = jsons.get_ref(i);
|
||||
let path = paths.get_ref(i);
|
||||
|
||||
let json = json.as_binary();
|
||||
let path = path.as_string();
|
||||
let result = match (json, path) {
|
||||
(Ok(Some(json)), Ok(Some(path))) => {
|
||||
get_json_by_path(json, path).and_then(|json| jsonb::to_str(&json).ok())
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result.as_deref());
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: "json_get_string",
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for JsonGetString {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", "json_get_string".to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::vectors::{BinaryVector, StringVector};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_json_get_int() {
|
||||
let json_get_int = JsonGetInt;
|
||||
|
||||
assert_eq!("json_get_int", json_get_int.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::int64_datatype(),
|
||||
json_get_int
|
||||
.return_type(&[
|
||||
ConcreteDataType::json_datatype(),
|
||||
ConcreteDataType::string_datatype()
|
||||
])
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(json_get_int.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Exact(valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![ConcreteDataType::json_datatype(), ConcreteDataType::string_datatype()]
|
||||
));
|
||||
|
||||
let json_strings = [
|
||||
r#"{"a": {"b": 2}, "b": 2, "c": 3}"#,
|
||||
r#"{"a": 4, "b": {"c": 6}, "c": 6}"#,
|
||||
r#"{"a": 7, "b": 8, "c": {"a": 7}}"#,
|
||||
];
|
||||
let paths = vec!["$.a.b", "$.a", "$.c"];
|
||||
let results = [Some(2), Some(4), None];
|
||||
|
||||
let jsonbs = json_strings
|
||||
.iter()
|
||||
.map(|s| {
|
||||
let value = jsonb::parse_value(s.as_bytes()).unwrap();
|
||||
value.to_vec()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let json_vector = BinaryVector::from_vec(jsonbs);
|
||||
let path_vector = StringVector::from_vec(paths);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(json_vector), Arc::new(path_vector)];
|
||||
let vector = json_get_int
|
||||
.eval(FunctionContext::default(), &args)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(3, vector.len());
|
||||
for (i, gt) in results.iter().enumerate() {
|
||||
let result = vector.get_ref(i);
|
||||
let result = result.as_i64().unwrap();
|
||||
assert_eq!(*gt, result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_get_float() {
|
||||
let json_get_float = JsonGetFloat;
|
||||
|
||||
assert_eq!("json_get_float", json_get_float.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::float64_datatype(),
|
||||
json_get_float
|
||||
.return_type(&[
|
||||
ConcreteDataType::json_datatype(),
|
||||
ConcreteDataType::string_datatype()
|
||||
])
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(json_get_float.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Exact(valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![ConcreteDataType::json_datatype(), ConcreteDataType::string_datatype()]
|
||||
));
|
||||
|
||||
let json_strings = [
|
||||
r#"{"a": {"b": 2.1}, "b": 2.2, "c": 3.3}"#,
|
||||
r#"{"a": 4.4, "b": {"c": 6.6}, "c": 6.6}"#,
|
||||
r#"{"a": 7.7, "b": 8.8, "c": {"a": 7.7}}"#,
|
||||
];
|
||||
let paths = vec!["$.a.b", "$.a", "$.c"];
|
||||
let results = [Some(2.1), Some(4.4), None];
|
||||
|
||||
let jsonbs = json_strings
|
||||
.iter()
|
||||
.map(|s| {
|
||||
let value = jsonb::parse_value(s.as_bytes()).unwrap();
|
||||
value.to_vec()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let json_vector = BinaryVector::from_vec(jsonbs);
|
||||
let path_vector = StringVector::from_vec(paths);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(json_vector), Arc::new(path_vector)];
|
||||
let vector = json_get_float
|
||||
.eval(FunctionContext::default(), &args)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(3, vector.len());
|
||||
for (i, gt) in results.iter().enumerate() {
|
||||
let result = vector.get_ref(i);
|
||||
let result = result.as_f64().unwrap();
|
||||
assert_eq!(*gt, result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_get_bool() {
|
||||
let json_get_bool = JsonGetBool;
|
||||
|
||||
assert_eq!("json_get_bool", json_get_bool.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::boolean_datatype(),
|
||||
json_get_bool
|
||||
.return_type(&[
|
||||
ConcreteDataType::json_datatype(),
|
||||
ConcreteDataType::string_datatype()
|
||||
])
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(json_get_bool.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Exact(valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![ConcreteDataType::json_datatype(), ConcreteDataType::string_datatype()]
|
||||
));
|
||||
|
||||
let json_strings = [
|
||||
r#"{"a": {"b": true}, "b": false, "c": true}"#,
|
||||
r#"{"a": false, "b": {"c": true}, "c": false}"#,
|
||||
r#"{"a": true, "b": false, "c": {"a": true}}"#,
|
||||
];
|
||||
let paths = vec!["$.a.b", "$.a", "$.c"];
|
||||
let results = [Some(true), Some(false), None];
|
||||
|
||||
let jsonbs = json_strings
|
||||
.iter()
|
||||
.map(|s| {
|
||||
let value = jsonb::parse_value(s.as_bytes()).unwrap();
|
||||
value.to_vec()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let json_vector = BinaryVector::from_vec(jsonbs);
|
||||
let path_vector = StringVector::from_vec(paths);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(json_vector), Arc::new(path_vector)];
|
||||
let vector = json_get_bool
|
||||
.eval(FunctionContext::default(), &args)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(3, vector.len());
|
||||
for (i, gt) in results.iter().enumerate() {
|
||||
let result = vector.get_ref(i);
|
||||
let result = result.as_boolean().unwrap();
|
||||
assert_eq!(*gt, result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_get_string() {
|
||||
let json_get_string = JsonGetString;
|
||||
|
||||
assert_eq!("json_get_string", json_get_string.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::string_datatype(),
|
||||
json_get_string
|
||||
.return_type(&[
|
||||
ConcreteDataType::json_datatype(),
|
||||
ConcreteDataType::string_datatype()
|
||||
])
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(json_get_string.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Exact(valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![ConcreteDataType::json_datatype(), ConcreteDataType::string_datatype()]
|
||||
));
|
||||
|
||||
let json_strings = [
|
||||
r#"{"a": {"b": "a"}, "b": "b", "c": "c"}"#,
|
||||
r#"{"a": "d", "b": {"c": "e"}, "c": "f"}"#,
|
||||
r#"{"a": "g", "b": "h", "c": {"a": "g"}}"#,
|
||||
];
|
||||
let paths = vec!["$.a.b", "$.a", ""];
|
||||
let results = [Some("a"), Some("d"), None];
|
||||
|
||||
let jsonbs = json_strings
|
||||
.iter()
|
||||
.map(|s| {
|
||||
let value = jsonb::parse_value(s.as_bytes()).unwrap();
|
||||
value.to_vec()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let json_vector = BinaryVector::from_vec(jsonbs);
|
||||
let path_vector = StringVector::from_vec(paths);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(json_vector), Arc::new(path_vector)];
|
||||
let vector = json_get_string
|
||||
.eval(FunctionContext::default(), &args)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(3, vector.len());
|
||||
for (i, gt) in results.iter().enumerate() {
|
||||
let result = vector.get_ref(i);
|
||||
let result = result.as_string().unwrap();
|
||||
assert_eq!(*gt, result);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,174 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{self, Display};
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::prelude::Signature;
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::prelude::VectorRef;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{MutableVector, StringVectorBuilder};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
/// Converts the `JSONB` into `String`. It's useful for displaying JSONB content.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct JsonToStringFunction;
|
||||
|
||||
const NAME: &str = "json_to_string";
|
||||
|
||||
impl Function for JsonToStringFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::exact(
|
||||
vec![ConcreteDataType::json_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
let jsons = &columns[0];
|
||||
|
||||
let size = jsons.len();
|
||||
let datatype = jsons.data_type();
|
||||
let mut results = StringVectorBuilder::with_capacity(size);
|
||||
|
||||
match datatype {
|
||||
// JSON data type uses binary vector
|
||||
ConcreteDataType::Binary(_) => {
|
||||
for i in 0..size {
|
||||
let json = jsons.get_ref(i);
|
||||
|
||||
let json = json.as_binary();
|
||||
let result = match json {
|
||||
Ok(Some(json)) => match jsonb::from_slice(json) {
|
||||
Ok(json) => {
|
||||
let json = json.to_string();
|
||||
Some(json)
|
||||
}
|
||||
Err(_) => {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Illegal json binary: {:?}", json),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
},
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result.as_deref());
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for JsonToStringFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "JSON_TO_STRING")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::vectors::BinaryVector;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_get_by_path_function() {
|
||||
let json_to_string = JsonToStringFunction;
|
||||
|
||||
assert_eq!("json_to_string", json_to_string.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::string_datatype(),
|
||||
json_to_string
|
||||
.return_type(&[ConcreteDataType::json_datatype()])
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(json_to_string.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Exact(valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![ConcreteDataType::json_datatype()]
|
||||
));
|
||||
|
||||
let json_strings = [
|
||||
r#"{"a": {"b": 2}, "b": 2, "c": 3}"#,
|
||||
r#"{"a": 4, "b": {"c": 6}, "c": 6}"#,
|
||||
r#"{"a": 7, "b": 8, "c": {"a": 7}}"#,
|
||||
];
|
||||
|
||||
let jsonbs = json_strings
|
||||
.iter()
|
||||
.map(|s| {
|
||||
let value = jsonb::parse_value(s.as_bytes()).unwrap();
|
||||
value.to_vec()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let json_vector = BinaryVector::from_vec(jsonbs);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(json_vector)];
|
||||
let vector = json_to_string
|
||||
.eval(FunctionContext::default(), &args)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(3, vector.len());
|
||||
for (i, gt) in json_strings.iter().enumerate() {
|
||||
let result = vector.get_ref(i);
|
||||
let result = result.as_string().unwrap().unwrap();
|
||||
// remove whitespaces
|
||||
assert_eq!(gt.replace(" ", ""), result);
|
||||
}
|
||||
|
||||
let invalid_jsonb = vec![b"invalid json"];
|
||||
let invalid_json_vector = BinaryVector::from_vec(invalid_jsonb);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(invalid_json_vector)];
|
||||
let vector = json_to_string.eval(FunctionContext::default(), &args);
|
||||
assert!(vector.is_err());
|
||||
}
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{self, Display};
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::prelude::Signature;
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::prelude::VectorRef;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BinaryVectorBuilder, MutableVector};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
/// Parses the `String` into `JSONB`.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ToJsonFunction;
|
||||
|
||||
const NAME: &str = "to_json";
|
||||
|
||||
impl Function for ToJsonFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::json_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::exact(
|
||||
vec![ConcreteDataType::string_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
let json_strings = &columns[0];
|
||||
|
||||
let size = json_strings.len();
|
||||
let datatype = json_strings.data_type();
|
||||
let mut results = BinaryVectorBuilder::with_capacity(size);
|
||||
|
||||
match datatype {
|
||||
ConcreteDataType::String(_) => {
|
||||
for i in 0..size {
|
||||
let json_string = json_strings.get_ref(i);
|
||||
|
||||
let json_string = json_string.as_string();
|
||||
let result = match json_string {
|
||||
Ok(Some(json_string)) => match jsonb::parse_value(json_string.as_bytes()) {
|
||||
Ok(json) => Some(json.to_vec()),
|
||||
Err(_) => {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"Cannot convert the string to json, have: {}",
|
||||
json_string
|
||||
),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
},
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result.as_deref());
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ToJsonFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "TO_JSON")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_get_by_path_function() {
|
||||
let to_json = ToJsonFunction;
|
||||
|
||||
assert_eq!("to_json", to_json.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::json_datatype(),
|
||||
to_json
|
||||
.return_type(&[ConcreteDataType::json_datatype()])
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(to_json.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Exact(valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![ConcreteDataType::string_datatype()]
|
||||
));
|
||||
|
||||
let json_strings = [
|
||||
r#"{"a": {"b": 2}, "b": 2, "c": 3}"#,
|
||||
r#"{"a": 4, "b": {"c": 6}, "c": 6}"#,
|
||||
r#"{"a": 7, "b": 8, "c": {"a": 7}}"#,
|
||||
];
|
||||
|
||||
let jsonbs = json_strings
|
||||
.iter()
|
||||
.map(|s| {
|
||||
let value = jsonb::parse_value(s.as_bytes()).unwrap();
|
||||
value.to_vec()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let json_string_vector = StringVector::from_vec(json_strings.to_vec());
|
||||
let args: Vec<VectorRef> = vec![Arc::new(json_string_vector)];
|
||||
let vector = to_json.eval(FunctionContext::default(), &args).unwrap();
|
||||
|
||||
assert_eq!(3, vector.len());
|
||||
for (i, gt) in jsonbs.iter().enumerate() {
|
||||
let result = vector.get_ref(i);
|
||||
let result = result.as_binary().unwrap().unwrap();
|
||||
// remove whitespaces
|
||||
assert_eq!(gt, result);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -19,7 +19,6 @@ use common_query::error::Result;
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::vectors::{StringVector, VectorRef};
|
||||
use session::context::Channel;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
@@ -45,22 +44,11 @@ impl Function for VersionFunction {
|
||||
Signature::exact(vec![], Volatility::Immutable)
|
||||
}
|
||||
|
||||
fn eval(&self, func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
let version = match func_ctx.query_ctx.channel() {
|
||||
Channel::Mysql => {
|
||||
format!(
|
||||
"{}-greptimedb-{}",
|
||||
std::env::var("GREPTIMEDB_MYSQL_SERVER_VERSION")
|
||||
.unwrap_or_else(|_| "8.4.2".to_string()),
|
||||
env!("CARGO_PKG_VERSION")
|
||||
)
|
||||
}
|
||||
Channel::Postgres => {
|
||||
format!("16.3-greptimedb-{}", env!("CARGO_PKG_VERSION"))
|
||||
}
|
||||
_ => env!("CARGO_PKG_VERSION").to_string(),
|
||||
};
|
||||
let result = StringVector::from(vec![version]);
|
||||
fn eval(&self, _func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
let result = StringVector::from(vec![format!(
|
||||
"5.7.20-greptimedb-{}",
|
||||
env!("CARGO_PKG_VERSION")
|
||||
)]);
|
||||
Ok(Arc::new(result))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,6 +64,12 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid column proto: {}", err_msg))]
|
||||
InvalidColumnProto {
|
||||
err_msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
#[snafu(display("Failed to create vector"))]
|
||||
CreateVector {
|
||||
#[snafu(implicit)]
|
||||
@@ -131,6 +137,7 @@ impl ErrorExt for Error {
|
||||
Error::DuplicatedTimestampColumn { .. }
|
||||
| Error::DuplicatedColumnName { .. }
|
||||
| Error::MissingTimestampColumn { .. } => StatusCode::InvalidArguments,
|
||||
Error::InvalidColumnProto { .. } => StatusCode::InvalidArguments,
|
||||
Error::CreateVector { .. } => StatusCode::InvalidArguments,
|
||||
Error::MissingField { .. } => StatusCode::InvalidArguments,
|
||||
Error::InvalidColumnDef { source, .. } => source.status_code(),
|
||||
|
||||
@@ -14,10 +14,11 @@
|
||||
|
||||
use api::helper;
|
||||
use api::v1::column::Values;
|
||||
use api::v1::{Column, CreateTableExpr};
|
||||
use api::v1::{AddColumns, Column, CreateTableExpr};
|
||||
use common_base::BitVec;
|
||||
use datatypes::data_type::{ConcreteDataType, DataType};
|
||||
use datatypes::prelude::VectorRef;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use table::metadata::TableId;
|
||||
use table::table_reference::TableReference;
|
||||
@@ -26,6 +27,11 @@ use crate::error::{CreateVectorSnafu, Result, UnexpectedValuesLengthSnafu};
|
||||
use crate::util;
|
||||
use crate::util::ColumnExpr;
|
||||
|
||||
pub fn find_new_columns(schema: &SchemaRef, columns: &[Column]) -> Result<Option<AddColumns>> {
|
||||
let column_exprs = ColumnExpr::from_columns(columns);
|
||||
util::extract_new_columns(schema, column_exprs)
|
||||
}
|
||||
|
||||
/// Try to build create table request from insert data.
|
||||
pub fn build_create_expr_from_insertion(
|
||||
catalog_name: &str,
|
||||
@@ -108,6 +114,7 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::error;
|
||||
use crate::error::ColumnDataTypeSnafu;
|
||||
use crate::insert::find_new_columns;
|
||||
|
||||
#[inline]
|
||||
fn build_column_schema(
|
||||
@@ -274,18 +281,11 @@ mod tests {
|
||||
|
||||
let schema = Arc::new(SchemaBuilder::try_from(columns).unwrap().build().unwrap());
|
||||
|
||||
assert!(
|
||||
util::extract_new_columns(&schema, ColumnExpr::from_columns(&[]))
|
||||
.unwrap()
|
||||
.is_none()
|
||||
);
|
||||
assert!(find_new_columns(&schema, &[]).unwrap().is_none());
|
||||
|
||||
let insert_batch = mock_insert_batch();
|
||||
|
||||
let add_columns =
|
||||
util::extract_new_columns(&schema, ColumnExpr::from_columns(&insert_batch.0))
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let add_columns = find_new_columns(&schema, &insert_batch.0).unwrap().unwrap();
|
||||
|
||||
assert_eq!(5, add_columns.add_columns.len());
|
||||
let host_column = &add_columns.add_columns[0];
|
||||
|
||||
@@ -19,4 +19,4 @@ pub mod insert;
|
||||
pub mod util;
|
||||
|
||||
pub use alter::{alter_expr_to_request, create_table_schema};
|
||||
pub use insert::build_create_expr_from_insertion;
|
||||
pub use insert::{build_create_expr_from_insertion, find_new_columns};
|
||||
|
||||
@@ -70,7 +70,7 @@ macro_rules! convert_arrow_array_to_grpc_vals {
|
||||
return Ok(vals);
|
||||
},
|
||||
)+
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) | ConcreteDataType::Duration(_) | ConcreteDataType::Json(_) => unreachable!("Should not send {:?} in gRPC", $data_type),
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) | ConcreteDataType::Duration(_) => unreachable!("Should not send {:?} in gRPC", $data_type),
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ use crate::key::table_info::TableInfoKey;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteKey;
|
||||
use crate::key::view_info::ViewInfoKey;
|
||||
use crate::key::MetadataKey;
|
||||
use crate::key::MetaKey;
|
||||
|
||||
/// KvBackend cache invalidator
|
||||
#[async_trait::async_trait]
|
||||
|
||||
@@ -39,7 +39,7 @@ use crate::key::DeserializedValueWithBytes;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
use crate::rpc::router::find_leaders;
|
||||
use crate::{metrics, ClusterId};
|
||||
use crate::{cache_invalidator, metrics, ClusterId};
|
||||
|
||||
pub struct AlterLogicalTablesProcedure {
|
||||
pub context: DdlContext,
|
||||
@@ -131,7 +131,7 @@ impl AlterLogicalTablesProcedure {
|
||||
let phy_raw_schemas = future::join_all(alter_region_tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|res| res.map(|mut res| res.extensions.remove(ALTER_PHYSICAL_EXTENSION_KEY)))
|
||||
.map(|res| res.map(|mut res| res.extension.remove(ALTER_PHYSICAL_EXTENSION_KEY)))
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
if phy_raw_schemas.is_empty() {
|
||||
@@ -170,11 +170,12 @@ impl AlterLogicalTablesProcedure {
|
||||
}
|
||||
|
||||
pub(crate) async fn on_invalidate_table_cache(&mut self) -> Result<Status> {
|
||||
let ctx = cache_invalidator::Context::default();
|
||||
let to_invalidate = self.build_table_cache_keys_to_invalidate();
|
||||
|
||||
self.context
|
||||
.cache_invalidator
|
||||
.invalidate(&Default::default(), &to_invalidate)
|
||||
.invalidate(&ctx, &to_invalidate)
|
||||
.await?;
|
||||
Ok(Status::done())
|
||||
}
|
||||
|
||||
@@ -157,7 +157,7 @@ impl CreateLogicalTablesProcedure {
|
||||
let phy_raw_schemas = join_all(create_region_tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|res| res.map(|mut res| res.extensions.remove(ALTER_PHYSICAL_EXTENSION_KEY)))
|
||||
.map(|res| res.map(|mut res| res.extension.remove(ALTER_PHYSICAL_EXTENSION_KEY)))
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
if phy_raw_schemas.is_empty() {
|
||||
|
||||
@@ -15,12 +15,12 @@
|
||||
use common_catalog::consts::METRIC_ENGINE;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_procedure::error::Error as ProcedureError;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use snafu::{ensure, location, OptionExt};
|
||||
use store_api::metric_engine_consts::LOGICAL_TABLE_METADATA_KEY;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::ddl::DetectingRegion;
|
||||
use crate::error::{Error, OperateDatanodeSnafu, Result, TableNotFoundSnafu, UnsupportedSnafu};
|
||||
use crate::error::{Error, Result, TableNotFoundSnafu, UnsupportedSnafu};
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::TableMetadataManagerRef;
|
||||
use crate::peer::Peer;
|
||||
@@ -32,9 +32,11 @@ use crate::ClusterId;
|
||||
pub fn add_peer_context_if_needed(datanode: Peer) -> impl FnOnce(Error) -> Error {
|
||||
move |err| {
|
||||
if !err.is_retry_later() {
|
||||
return Err::<(), BoxedError>(BoxedError::new(err))
|
||||
.context(OperateDatanodeSnafu { peer: datanode })
|
||||
.unwrap_err();
|
||||
return Error::OperateDatanode {
|
||||
location: location!(),
|
||||
peer: datanode,
|
||||
source: BoxedError::new(err),
|
||||
};
|
||||
}
|
||||
err
|
||||
}
|
||||
|
||||
@@ -441,9 +441,11 @@ async fn handle_alter_table_task(
|
||||
.table_metadata_manager()
|
||||
.table_route_manager()
|
||||
.table_route_storage()
|
||||
.get(table_id)
|
||||
.get_raw(table_id)
|
||||
.await?
|
||||
.context(TableRouteNotFoundSnafu { table_id })?;
|
||||
.context(TableRouteNotFoundSnafu { table_id })?
|
||||
.into_inner();
|
||||
|
||||
ensure!(
|
||||
table_route_value.is_physical(),
|
||||
UnexpectedLogicalRouteTableSnafu {
|
||||
|
||||
@@ -21,7 +21,7 @@ use common_macro::stack_trace_debug;
|
||||
use common_wal::options::WalOptions;
|
||||
use serde_json::error::Error as JsonError;
|
||||
use snafu::{Location, Snafu};
|
||||
use store_api::storage::RegionId;
|
||||
use store_api::storage::{RegionId, RegionNumber};
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::peer::Peer;
|
||||
@@ -49,6 +49,20 @@ pub enum Error {
|
||||
region_id: RegionId,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid result with a txn response: {}", err_msg))]
|
||||
InvalidTxnResult {
|
||||
err_msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid engine type: {}", engine_type))]
|
||||
InvalidEngineType {
|
||||
engine_type: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to connect to Etcd"))]
|
||||
ConnectEtcd {
|
||||
#[snafu(source)]
|
||||
@@ -81,6 +95,15 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Sequence out of range: {}, start={}, step={}", name, start, step))]
|
||||
SequenceOutOfRange {
|
||||
name: String,
|
||||
start: u64,
|
||||
step: u64,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unexpected sequence value: {}", err_msg))]
|
||||
UnexpectedSequenceValue {
|
||||
err_msg: String,
|
||||
@@ -304,6 +327,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Catalog already exists, catalog: {}", catalog))]
|
||||
CatalogAlreadyExists {
|
||||
catalog: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Schema already exists, catalog:{}, schema: {}", catalog, schema))]
|
||||
SchemaAlreadyExists {
|
||||
catalog: String,
|
||||
@@ -355,8 +385,15 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid metadata, err: {}", err_msg))]
|
||||
InvalidMetadata {
|
||||
#[snafu(display("Failed to rename table, reason: {}", reason))]
|
||||
RenameTable {
|
||||
reason: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid table metadata, err: {}", err_msg))]
|
||||
InvalidTableMetadata {
|
||||
err_msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
@@ -386,6 +423,27 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to move region {} in table {}, err: {}",
|
||||
region,
|
||||
table_id,
|
||||
err_msg
|
||||
))]
|
||||
MoveRegion {
|
||||
table_id: TableId,
|
||||
region: RegionNumber,
|
||||
err_msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid catalog value"))]
|
||||
InvalidCatalogValue {
|
||||
source: common_catalog::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("External error"))]
|
||||
External {
|
||||
#[snafu(implicit)]
|
||||
@@ -554,6 +612,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Delimiter not found, key: {}", key))]
|
||||
DelimiterNotFound {
|
||||
key: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid prefix: {}, key: {}", prefix, key))]
|
||||
MismatchPrefix {
|
||||
prefix: String,
|
||||
@@ -637,12 +702,15 @@ impl ErrorExt for Error {
|
||||
| ParseOption { .. }
|
||||
| RouteInfoCorrupted { .. }
|
||||
| InvalidProtoMsg { .. }
|
||||
| InvalidMetadata { .. }
|
||||
| InvalidTableMetadata { .. }
|
||||
| MoveRegion { .. }
|
||||
| Unexpected { .. }
|
||||
| TableInfoNotFound { .. }
|
||||
| NextSequence { .. }
|
||||
| SequenceOutOfRange { .. }
|
||||
| UnexpectedSequenceValue { .. }
|
||||
| InvalidHeartbeatResponse { .. }
|
||||
| InvalidTxnResult { .. }
|
||||
| EncodeJson { .. }
|
||||
| DecodeJson { .. }
|
||||
| PayloadNotExist { .. }
|
||||
@@ -666,17 +734,22 @@ impl ErrorExt for Error {
|
||||
| MetadataCorruption { .. }
|
||||
| StrFromUtf8 { .. } => StatusCode::Unexpected,
|
||||
|
||||
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } => StatusCode::Internal,
|
||||
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } | RenameTable { .. } => {
|
||||
StatusCode::Internal
|
||||
}
|
||||
|
||||
SchemaAlreadyExists { .. } => StatusCode::DatabaseAlreadyExists,
|
||||
|
||||
ProcedureNotFound { .. }
|
||||
| InvalidViewInfo { .. }
|
||||
| PrimaryKeyNotFound { .. }
|
||||
| CatalogAlreadyExists { .. }
|
||||
| EmptyKey { .. }
|
||||
| InvalidEngineType { .. }
|
||||
| AlterLogicalTablesInvalidArguments { .. }
|
||||
| CreateLogicalTablesInvalidArguments { .. }
|
||||
| MismatchPrefix { .. }
|
||||
| DelimiterNotFound { .. }
|
||||
| TlsConfig { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
FlowNotFound { .. } => StatusCode::FlowNotFound,
|
||||
@@ -694,6 +767,7 @@ impl ErrorExt for Error {
|
||||
OperateDatanode { source, .. } => source.status_code(),
|
||||
Table { source, .. } => source.status_code(),
|
||||
RetryLater { source, .. } => source.status_code(),
|
||||
InvalidCatalogValue { source, .. } => source.status_code(),
|
||||
ConvertAlterTableRequest { source, .. } => source.status_code(),
|
||||
|
||||
ParseProcedureId { .. }
|
||||
|
||||
@@ -90,7 +90,6 @@
|
||||
pub mod catalog_name;
|
||||
pub mod datanode_table;
|
||||
pub mod flow;
|
||||
pub mod node_address;
|
||||
pub mod schema_name;
|
||||
pub mod table_info;
|
||||
pub mod table_name;
|
||||
@@ -103,7 +102,7 @@ pub mod view_info;
|
||||
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use std::fmt::Debug;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
@@ -135,7 +134,6 @@ use self::table_route::{TableRouteManager, TableRouteValue};
|
||||
use self::tombstone::TombstoneManager;
|
||||
use crate::ddl::utils::region_storage_path;
|
||||
use crate::error::{self, Result, SerdeJsonSnafu};
|
||||
use crate::key::node_address::NodeAddressValue;
|
||||
use crate::key::table_route::TableRouteKey;
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
@@ -154,15 +152,12 @@ pub const TABLE_NAME_KEY_PREFIX: &str = "__table_name";
|
||||
pub const CATALOG_NAME_KEY_PREFIX: &str = "__catalog_name";
|
||||
pub const SCHEMA_NAME_KEY_PREFIX: &str = "__schema_name";
|
||||
pub const TABLE_ROUTE_PREFIX: &str = "__table_route";
|
||||
pub const NODE_ADDRESS_PREFIX: &str = "__node_address";
|
||||
|
||||
/// The keys with these prefixes will be loaded into the cache when the leader starts.
|
||||
pub const CACHE_KEY_PREFIXES: [&str; 5] = [
|
||||
pub const CACHE_KEY_PREFIXES: [&str; 4] = [
|
||||
TABLE_NAME_KEY_PREFIX,
|
||||
CATALOG_NAME_KEY_PREFIX,
|
||||
SCHEMA_NAME_KEY_PREFIX,
|
||||
TABLE_ROUTE_PREFIX,
|
||||
NODE_ADDRESS_PREFIX,
|
||||
];
|
||||
|
||||
pub type RegionDistribution = BTreeMap<DatanodeId, Vec<RegionNumber>>;
|
||||
@@ -215,13 +210,8 @@ lazy_static! {
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref NODE_ADDRESS_PATTERN: Regex =
|
||||
Regex::new(&format!("^{NODE_ADDRESS_PREFIX}/([0-9]+)/([0-9]+)$")).unwrap();
|
||||
}
|
||||
|
||||
/// The key of metadata.
|
||||
pub trait MetadataKey<'a, T> {
|
||||
pub trait MetaKey<'a, T> {
|
||||
fn to_bytes(&self) -> Vec<u8>;
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<T>;
|
||||
@@ -236,7 +226,7 @@ impl From<Vec<u8>> for BytesAdapter {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, BytesAdapter> for BytesAdapter {
|
||||
impl<'a> MetaKey<'a, BytesAdapter> for BytesAdapter {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.0.clone()
|
||||
}
|
||||
@@ -246,7 +236,7 @@ impl<'a> MetadataKey<'a, BytesAdapter> for BytesAdapter {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) trait MetadataKeyGetTxnOp {
|
||||
pub(crate) trait TableMetaKeyGetTxnOp {
|
||||
fn build_get_op(
|
||||
&self,
|
||||
) -> (
|
||||
@@ -255,7 +245,7 @@ pub(crate) trait MetadataKeyGetTxnOp {
|
||||
);
|
||||
}
|
||||
|
||||
pub trait MetadataValue {
|
||||
pub trait TableMetaValue {
|
||||
fn try_from_raw_value(raw_value: &[u8]) -> Result<Self>
|
||||
where
|
||||
Self: Sized;
|
||||
@@ -316,12 +306,6 @@ impl<T: DeserializeOwned + Serialize> Deref for DeserializedValueWithBytes<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: DeserializeOwned + Serialize> DerefMut for DeserializedValueWithBytes<T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.inner
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: DeserializeOwned + Serialize + Debug> Debug for DeserializedValueWithBytes<T> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
@@ -346,7 +330,7 @@ impl<T: DeserializeOwned + Serialize> Serialize for DeserializedValueWithBytes<T
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de, T: DeserializeOwned + Serialize + MetadataValue> Deserialize<'de>
|
||||
impl<'de, T: DeserializeOwned + Serialize + TableMetaValue> Deserialize<'de>
|
||||
for DeserializedValueWithBytes<T>
|
||||
{
|
||||
/// - Deserialize behaviors:
|
||||
@@ -375,7 +359,7 @@ impl<T: Serialize + DeserializeOwned + Clone> Clone for DeserializedValueWithByt
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Serialize + DeserializeOwned + MetadataValue> DeserializedValueWithBytes<T> {
|
||||
impl<T: Serialize + DeserializeOwned + TableMetaValue> DeserializedValueWithBytes<T> {
|
||||
/// Returns a struct containing a deserialized value and an original `bytes`.
|
||||
/// It accepts original bytes of inner.
|
||||
pub fn from_inner_bytes(bytes: Bytes) -> Result<Self> {
|
||||
@@ -1172,10 +1156,10 @@ impl TableMetadataManager {
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! impl_metadata_value {
|
||||
macro_rules! impl_table_meta_value {
|
||||
($($val_ty: ty), *) => {
|
||||
$(
|
||||
impl $crate::key::MetadataValue for $val_ty {
|
||||
impl $crate::key::TableMetaValue for $val_ty {
|
||||
fn try_from_raw_value(raw_value: &[u8]) -> Result<Self> {
|
||||
serde_json::from_slice(raw_value).context(SerdeJsonSnafu)
|
||||
}
|
||||
@@ -1188,10 +1172,10 @@ macro_rules! impl_metadata_value {
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_metadata_key_get_txn_op {
|
||||
macro_rules! impl_meta_key_get_txn_op {
|
||||
($($key: ty), *) => {
|
||||
$(
|
||||
impl $crate::key::MetadataKeyGetTxnOp for $key {
|
||||
impl $crate::key::TableMetaKeyGetTxnOp for $key {
|
||||
/// Returns a [TxnOp] to retrieve the corresponding value
|
||||
/// and a filter to retrieve the value from the [TxnOpGetResponseSet]
|
||||
fn build_get_op(
|
||||
@@ -1213,7 +1197,7 @@ macro_rules! impl_metadata_key_get_txn_op {
|
||||
}
|
||||
}
|
||||
|
||||
impl_metadata_key_get_txn_op! {
|
||||
impl_meta_key_get_txn_op! {
|
||||
TableNameKey<'_>,
|
||||
TableInfoKey,
|
||||
ViewInfoKey,
|
||||
@@ -1222,7 +1206,7 @@ impl_metadata_key_get_txn_op! {
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! impl_optional_metadata_value {
|
||||
macro_rules! impl_optional_meta_value {
|
||||
($($val_ty: ty), *) => {
|
||||
$(
|
||||
impl $val_ty {
|
||||
@@ -1238,7 +1222,7 @@ macro_rules! impl_optional_metadata_value {
|
||||
}
|
||||
}
|
||||
|
||||
impl_metadata_value! {
|
||||
impl_table_meta_value! {
|
||||
TableNameValue,
|
||||
TableInfoValue,
|
||||
ViewInfoValue,
|
||||
@@ -1246,11 +1230,10 @@ impl_metadata_value! {
|
||||
FlowInfoValue,
|
||||
FlowNameValue,
|
||||
FlowRouteValue,
|
||||
TableFlowValue,
|
||||
NodeAddressValue
|
||||
TableFlowValue
|
||||
}
|
||||
|
||||
impl_optional_metadata_value! {
|
||||
impl_optional_meta_value! {
|
||||
CatalogNameValue,
|
||||
SchemaNameValue
|
||||
}
|
||||
@@ -1969,7 +1952,7 @@ mod tests {
|
||||
let table_route_value = table_metadata_manager
|
||||
.table_route_manager
|
||||
.table_route_storage()
|
||||
.get_with_raw_bytes(table_id)
|
||||
.get_raw(table_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
@@ -2022,7 +2005,7 @@ mod tests {
|
||||
let table_route_value = table_metadata_manager
|
||||
.table_route_manager
|
||||
.table_route_storage()
|
||||
.get_with_raw_bytes(table_id)
|
||||
.get_raw(table_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
@@ -20,8 +20,8 @@ use futures::stream::BoxStream;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{self, Error, InvalidMetadataSnafu, Result};
|
||||
use crate::key::{MetadataKey, CATALOG_NAME_KEY_PATTERN, CATALOG_NAME_KEY_PREFIX};
|
||||
use crate::error::{self, Error, InvalidTableMetadataSnafu, Result};
|
||||
use crate::key::{MetaKey, CATALOG_NAME_KEY_PATTERN, CATALOG_NAME_KEY_PREFIX};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||
use crate::rpc::store::RangeRequest;
|
||||
@@ -56,14 +56,14 @@ impl<'a> CatalogNameKey<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, CatalogNameKey<'a>> for CatalogNameKey<'_> {
|
||||
impl<'a> MetaKey<'a, CatalogNameKey<'a>> for CatalogNameKey<'_> {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<CatalogNameKey<'a>> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
InvalidMetadataSnafu {
|
||||
InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"CatalogNameKey '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -87,7 +87,7 @@ impl<'a> TryFrom<&'a str> for CatalogNameKey<'a> {
|
||||
fn try_from(s: &'a str) -> Result<Self> {
|
||||
let captures = CATALOG_NAME_KEY_PATTERN
|
||||
.captures(s)
|
||||
.context(InvalidMetadataSnafu {
|
||||
.context(InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Illegal CatalogNameKey format: '{s}'"),
|
||||
})?;
|
||||
|
||||
|
||||
@@ -22,10 +22,10 @@ use snafu::OptionExt;
|
||||
use store_api::storage::RegionNumber;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use super::MetadataKey;
|
||||
use crate::error::{InvalidMetadataSnafu, Result};
|
||||
use super::MetaKey;
|
||||
use crate::error::{InvalidTableMetadataSnafu, Result};
|
||||
use crate::key::{
|
||||
MetadataValue, RegionDistribution, DATANODE_TABLE_KEY_PATTERN, DATANODE_TABLE_KEY_PREFIX,
|
||||
RegionDistribution, TableMetaValue, DATANODE_TABLE_KEY_PATTERN, DATANODE_TABLE_KEY_PREFIX,
|
||||
};
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
@@ -77,14 +77,14 @@ impl DatanodeTableKey {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, DatanodeTableKey> for DatanodeTableKey {
|
||||
impl<'a> MetaKey<'a, DatanodeTableKey> for DatanodeTableKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
fn from_bytes(bytes: &[u8]) -> Result<DatanodeTableKey> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
InvalidMetadataSnafu {
|
||||
InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"DatanodeTableKey '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -92,11 +92,12 @@ impl<'a> MetadataKey<'a, DatanodeTableKey> for DatanodeTableKey {
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let captures = DATANODE_TABLE_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidMetadataSnafu {
|
||||
err_msg: format!("Invalid DatanodeTableKey '{key}'"),
|
||||
})?;
|
||||
let captures =
|
||||
DATANODE_TABLE_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid DatanodeTableKey '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
let datanode_id = captures[1].parse::<DatanodeId>().unwrap();
|
||||
let table_id = captures[2].parse::<TableId>().unwrap();
|
||||
|
||||
@@ -38,7 +38,7 @@ use crate::key::flow::flow_name::FlowNameManager;
|
||||
use crate::key::flow::flownode_flow::FlownodeFlowManager;
|
||||
pub use crate::key::flow::table_flow::{TableFlowManager, TableFlowManagerRef};
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{FlowId, MetadataKey};
|
||||
use crate::key::{FlowId, MetaKey};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::BatchDeleteRequest;
|
||||
@@ -66,7 +66,7 @@ impl<T> FlowScoped<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: MetadataKey<'a, T>> MetadataKey<'a, FlowScoped<T>> for FlowScoped<T> {
|
||||
impl<'a, T: MetaKey<'a, T>> MetaKey<'a, FlowScoped<T>> for FlowScoped<T> {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
let prefix = FlowScoped::<T>::PREFIX.as_bytes();
|
||||
let inner = self.inner.to_bytes();
|
||||
@@ -295,7 +295,7 @@ mod tests {
|
||||
inner: Vec<u8>,
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, MockKey> for MockKey {
|
||||
impl<'a> MetaKey<'a, MockKey> for MockKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.inner.clone()
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ use table::table_name::TableName;
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::flow::FlowScoped;
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
|
||||
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId, MetaKey, TableMetaValue};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::FlownodeId;
|
||||
@@ -42,7 +42,7 @@ lazy_static! {
|
||||
/// The layout: `__flow/info/{flow_id}`.
|
||||
pub struct FlowInfoKey(FlowScoped<FlowInfoKeyInner>);
|
||||
|
||||
impl<'a> MetadataKey<'a, FlowInfoKey> for FlowInfoKey {
|
||||
impl<'a> MetaKey<'a, FlowInfoKey> for FlowInfoKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.0.to_bytes()
|
||||
}
|
||||
@@ -80,14 +80,14 @@ impl FlowInfoKeyInner {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, FlowInfoKeyInner> for FlowInfoKeyInner {
|
||||
impl<'a> MetaKey<'a, FlowInfoKeyInner> for FlowInfoKeyInner {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
format!("{FLOW_INFO_KEY_PREFIX}/{}", self.flow_id).into_bytes()
|
||||
}
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<FlowInfoKeyInner> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
error::InvalidMetadataSnafu {
|
||||
error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"FlowInfoKeyInner '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -98,7 +98,7 @@ impl<'a> MetadataKey<'a, FlowInfoKeyInner> for FlowInfoKeyInner {
|
||||
let captures =
|
||||
FLOW_INFO_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(error::InvalidMetadataSnafu {
|
||||
.context(error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid FlowInfoKeyInner '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
|
||||
@@ -24,7 +24,7 @@ use crate::error::{self, Result};
|
||||
use crate::key::flow::FlowScoped;
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{
|
||||
BytesAdapter, DeserializedValueWithBytes, FlowId, MetadataKey, MetadataValue, NAME_PATTERN,
|
||||
BytesAdapter, DeserializedValueWithBytes, FlowId, MetaKey, TableMetaValue, NAME_PATTERN,
|
||||
};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
@@ -76,7 +76,7 @@ impl<'a> FlowNameKey<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, FlowNameKey<'a>> for FlowNameKey<'a> {
|
||||
impl<'a> MetaKey<'a, FlowNameKey<'a>> for FlowNameKey<'a> {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.0.to_bytes()
|
||||
}
|
||||
@@ -95,7 +95,7 @@ pub struct FlowNameKeyInner<'a> {
|
||||
pub flow_name: &'a str,
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, FlowNameKeyInner<'a>> for FlowNameKeyInner<'_> {
|
||||
impl<'a> MetaKey<'a, FlowNameKeyInner<'a>> for FlowNameKeyInner<'_> {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
format!(
|
||||
"{FLOW_NAME_KEY_PREFIX}/{}/{}",
|
||||
@@ -106,7 +106,7 @@ impl<'a> MetadataKey<'a, FlowNameKeyInner<'a>> for FlowNameKeyInner<'_> {
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<FlowNameKeyInner> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
error::InvalidMetadataSnafu {
|
||||
error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"FlowNameKeyInner '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -117,7 +117,7 @@ impl<'a> MetadataKey<'a, FlowNameKeyInner<'a>> for FlowNameKeyInner<'_> {
|
||||
let captures =
|
||||
FLOW_NAME_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(error::InvalidMetadataSnafu {
|
||||
.context(error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid FlowNameKeyInner '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
|
||||
@@ -22,7 +22,7 @@ use snafu::OptionExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::flow::FlowScoped;
|
||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
|
||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetaKey, TableMetaValue};
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::peer::Peer;
|
||||
@@ -68,7 +68,7 @@ impl FlowRouteKey {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, FlowRouteKey> for FlowRouteKey {
|
||||
impl<'a> MetaKey<'a, FlowRouteKey> for FlowRouteKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.0.to_bytes()
|
||||
}
|
||||
@@ -101,7 +101,7 @@ impl FlowRouteKeyInner {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, FlowRouteKeyInner> for FlowRouteKeyInner {
|
||||
impl<'a> MetaKey<'a, FlowRouteKeyInner> for FlowRouteKeyInner {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
format!(
|
||||
"{FLOW_ROUTE_KEY_PREFIX}/{}/{}",
|
||||
@@ -112,7 +112,7 @@ impl<'a> MetadataKey<'a, FlowRouteKeyInner> for FlowRouteKeyInner {
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<FlowRouteKeyInner> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
error::InvalidMetadataSnafu {
|
||||
error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"FlowInfoKeyInner '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -123,7 +123,7 @@ impl<'a> MetadataKey<'a, FlowRouteKeyInner> for FlowRouteKeyInner {
|
||||
let captures =
|
||||
FLOW_ROUTE_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(error::InvalidMetadataSnafu {
|
||||
.context(error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid FlowInfoKeyInner '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
@@ -209,7 +209,7 @@ impl FlowRouteManager {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::FlowRouteKey;
|
||||
use crate::key::MetadataKey;
|
||||
use crate::key::MetaKey;
|
||||
|
||||
#[test]
|
||||
fn test_key_serialization() {
|
||||
|
||||
@@ -22,7 +22,7 @@ use snafu::OptionExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::flow::FlowScoped;
|
||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey};
|
||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetaKey};
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||
@@ -44,7 +44,7 @@ const FLOWNODE_FLOW_KEY_PREFIX: &str = "flownode";
|
||||
/// The layout `__flow/flownode/{flownode_id}/{flow_id}/{partition_id}`
|
||||
pub struct FlownodeFlowKey(FlowScoped<FlownodeFlowKeyInner>);
|
||||
|
||||
impl<'a> MetadataKey<'a, FlownodeFlowKey> for FlownodeFlowKey {
|
||||
impl<'a> MetaKey<'a, FlownodeFlowKey> for FlownodeFlowKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.0.to_bytes()
|
||||
}
|
||||
@@ -113,7 +113,7 @@ impl FlownodeFlowKeyInner {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, FlownodeFlowKeyInner> for FlownodeFlowKeyInner {
|
||||
impl<'a> MetaKey<'a, FlownodeFlowKeyInner> for FlownodeFlowKeyInner {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
format!(
|
||||
"{FLOWNODE_FLOW_KEY_PREFIX}/{}/{}/{}",
|
||||
@@ -124,7 +124,7 @@ impl<'a> MetadataKey<'a, FlownodeFlowKeyInner> for FlownodeFlowKeyInner {
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<FlownodeFlowKeyInner> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
error::InvalidMetadataSnafu {
|
||||
error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"FlownodeFlowKeyInner '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -135,7 +135,7 @@ impl<'a> MetadataKey<'a, FlownodeFlowKeyInner> for FlownodeFlowKeyInner {
|
||||
let captures =
|
||||
FLOWNODE_FLOW_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(error::InvalidMetadataSnafu {
|
||||
.context(error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid FlownodeFlowKeyInner '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
@@ -208,7 +208,7 @@ impl FlownodeFlowManager {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::key::flow::flownode_flow::FlownodeFlowKey;
|
||||
use crate::key::MetadataKey;
|
||||
use crate::key::MetaKey;
|
||||
|
||||
#[test]
|
||||
fn test_key_serialization() {
|
||||
|
||||
@@ -23,7 +23,7 @@ use table::metadata::TableId;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::flow::FlowScoped;
|
||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
|
||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetaKey, TableMetaValue};
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::peer::Peer;
|
||||
@@ -56,7 +56,7 @@ struct TableFlowKeyInner {
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct TableFlowKey(FlowScoped<TableFlowKeyInner>);
|
||||
|
||||
impl<'a> MetadataKey<'a, TableFlowKey> for TableFlowKey {
|
||||
impl<'a> MetaKey<'a, TableFlowKey> for TableFlowKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.0.to_bytes()
|
||||
}
|
||||
@@ -129,7 +129,7 @@ impl TableFlowKeyInner {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, TableFlowKeyInner> for TableFlowKeyInner {
|
||||
impl<'a> MetaKey<'a, TableFlowKeyInner> for TableFlowKeyInner {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
format!(
|
||||
"{TABLE_FLOW_KEY_PREFIX}/{}/{}/{}/{}",
|
||||
@@ -140,7 +140,7 @@ impl<'a> MetadataKey<'a, TableFlowKeyInner> for TableFlowKeyInner {
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<TableFlowKeyInner> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
error::InvalidMetadataSnafu {
|
||||
error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"TableFlowKeyInner '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -151,7 +151,7 @@ impl<'a> MetadataKey<'a, TableFlowKeyInner> for TableFlowKeyInner {
|
||||
let captures =
|
||||
TABLE_FLOW_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(error::InvalidMetadataSnafu {
|
||||
.context(error::InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid TableFlowKeyInner '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Display;
|
||||
|
||||
use api::v1::meta::Role;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::error::{InvalidMetadataSnafu, Result};
|
||||
use crate::key::{MetadataKey, NODE_ADDRESS_PATTERN, NODE_ADDRESS_PREFIX};
|
||||
use crate::peer::Peer;
|
||||
|
||||
/// The key stores node address.
|
||||
///
|
||||
/// The layout: `__node_address/{role}/{node_id}`
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct NodeAddressKey {
|
||||
pub role: Role,
|
||||
pub node_id: u64,
|
||||
}
|
||||
|
||||
impl NodeAddressKey {
|
||||
pub fn new(role: Role, node_id: u64) -> Self {
|
||||
Self { role, node_id }
|
||||
}
|
||||
|
||||
pub fn with_datanode(node_id: u64) -> Self {
|
||||
Self::new(Role::Datanode, node_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
|
||||
pub struct NodeAddressValue {
|
||||
pub peer: Peer,
|
||||
}
|
||||
|
||||
impl NodeAddressValue {
|
||||
pub fn new(peer: Peer) -> Self {
|
||||
Self { peer }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, NodeAddressKey> for NodeAddressKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
fn from_bytes(bytes: &[u8]) -> Result<NodeAddressKey> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
InvalidMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"NodeAddressKey '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let captures = NODE_ADDRESS_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidMetadataSnafu {
|
||||
err_msg: format!("Invalid NodeAddressKey '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
let role = captures[1].parse::<i32>().unwrap();
|
||||
let role = Role::try_from(role).map_err(|_| {
|
||||
InvalidMetadataSnafu {
|
||||
err_msg: format!("Invalid Role value: {role}"),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let node_id = captures[2].parse::<u64>().unwrap();
|
||||
Ok(NodeAddressKey::new(role, node_id))
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for NodeAddressKey {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}/{}/{}",
|
||||
NODE_ADDRESS_PREFIX, self.role as i32, self.node_id
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_node_address_key() {
|
||||
let key = NodeAddressKey::new(Role::Datanode, 1);
|
||||
let bytes = key.to_bytes();
|
||||
let key2 = NodeAddressKey::from_bytes(&bytes).unwrap();
|
||||
assert_eq!(key, key2);
|
||||
|
||||
let key = NodeAddressKey::new(Role::Flownode, 3);
|
||||
let bytes = key.to_bytes();
|
||||
let key2 = NodeAddressKey::from_bytes(&bytes).unwrap();
|
||||
assert_eq!(key, key2);
|
||||
}
|
||||
}
|
||||
@@ -23,8 +23,8 @@ use humantime_serde::re::humantime;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{self, Error, InvalidMetadataSnafu, ParseOptionSnafu, Result};
|
||||
use crate::key::{MetadataKey, SCHEMA_NAME_KEY_PATTERN, SCHEMA_NAME_KEY_PREFIX};
|
||||
use crate::error::{self, Error, InvalidTableMetadataSnafu, ParseOptionSnafu, Result};
|
||||
use crate::key::{MetaKey, SCHEMA_NAME_KEY_PATTERN, SCHEMA_NAME_KEY_PREFIX};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||
use crate::rpc::store::RangeRequest;
|
||||
@@ -89,19 +89,6 @@ impl TryFrom<&HashMap<String, String>> for SchemaNameValue {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SchemaNameValue> for HashMap<String, String> {
|
||||
fn from(value: SchemaNameValue) -> Self {
|
||||
let mut opts = HashMap::new();
|
||||
if let Some(ttl) = value.ttl {
|
||||
opts.insert(
|
||||
OPT_KEY_TTL.to_string(),
|
||||
format!("{}", humantime::format_duration(ttl)),
|
||||
);
|
||||
}
|
||||
opts
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> SchemaNameKey<'a> {
|
||||
pub fn new(catalog: &'a str, schema: &'a str) -> Self {
|
||||
Self { catalog, schema }
|
||||
@@ -122,14 +109,14 @@ impl Display for SchemaNameKey<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, SchemaNameKey<'a>> for SchemaNameKey<'_> {
|
||||
impl<'a> MetaKey<'a, SchemaNameKey<'a>> for SchemaNameKey<'_> {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<SchemaNameKey<'a>> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
InvalidMetadataSnafu {
|
||||
InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"SchemaNameKey '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -155,7 +142,7 @@ impl<'a> TryFrom<&'a str> for SchemaNameKey<'a> {
|
||||
fn try_from(s: &'a str) -> Result<Self> {
|
||||
let captures = SCHEMA_NAME_KEY_PATTERN
|
||||
.captures(s)
|
||||
.context(InvalidMetadataSnafu {
|
||||
.context(InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Illegal SchemaNameKey format: '{s}'"),
|
||||
})?;
|
||||
|
||||
|
||||
@@ -23,9 +23,9 @@ use table::table_name::TableName;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use super::TABLE_INFO_KEY_PATTERN;
|
||||
use crate::error::{InvalidMetadataSnafu, Result};
|
||||
use crate::error::{InvalidTableMetadataSnafu, Result};
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{DeserializedValueWithBytes, MetadataKey, MetadataValue, TABLE_INFO_KEY_PREFIX};
|
||||
use crate::key::{DeserializedValueWithBytes, MetaKey, TableMetaValue, TABLE_INFO_KEY_PREFIX};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::BatchGetRequest;
|
||||
@@ -51,14 +51,14 @@ impl Display for TableInfoKey {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, TableInfoKey> for TableInfoKey {
|
||||
impl<'a> MetaKey<'a, TableInfoKey> for TableInfoKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
fn from_bytes(bytes: &[u8]) -> Result<TableInfoKey> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
InvalidMetadataSnafu {
|
||||
InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"TableInfoKey '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -68,7 +68,7 @@ impl<'a> MetadataKey<'a, TableInfoKey> for TableInfoKey {
|
||||
})?;
|
||||
let captures = TABLE_INFO_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidMetadataSnafu {
|
||||
.context(InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid TableInfoKey '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
|
||||
@@ -22,8 +22,8 @@ use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use super::{MetadataKey, MetadataValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
|
||||
use crate::error::{Error, InvalidMetadataSnafu, Result};
|
||||
use super::{MetaKey, TableMetaValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
|
||||
use crate::error::{Error, InvalidTableMetadataSnafu, Result};
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
@@ -63,14 +63,14 @@ impl Display for TableNameKey<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, TableNameKey<'a>> for TableNameKey<'_> {
|
||||
impl<'a> MetaKey<'a, TableNameKey<'a>> for TableNameKey<'_> {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<TableNameKey<'a>> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
InvalidMetadataSnafu {
|
||||
InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"TableNameKey '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -80,7 +80,7 @@ impl<'a> MetadataKey<'a, TableNameKey<'a>> for TableNameKey<'_> {
|
||||
})?;
|
||||
let captures = TABLE_NAME_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidMetadataSnafu {
|
||||
.context(InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid TableNameKey '{key}'"),
|
||||
})?;
|
||||
let catalog = captures.get(1).unwrap().as_str();
|
||||
@@ -128,7 +128,7 @@ impl<'a> TryFrom<&'a str> for TableNameKey<'a> {
|
||||
fn try_from(s: &'a str) -> Result<Self> {
|
||||
let captures = TABLE_NAME_KEY_PATTERN
|
||||
.captures(s)
|
||||
.context(InvalidMetadataSnafu {
|
||||
.context(InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Illegal TableNameKey format: '{s}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
|
||||
@@ -22,13 +22,12 @@ use store_api::storage::{RegionId, RegionNumber};
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::error::{
|
||||
InvalidMetadataSnafu, MetadataCorruptionSnafu, Result, SerdeJsonSnafu, TableRouteNotFoundSnafu,
|
||||
UnexpectedLogicalRouteTableSnafu,
|
||||
self, InvalidTableMetadataSnafu, MetadataCorruptionSnafu, Result, SerdeJsonSnafu,
|
||||
TableRouteNotFoundSnafu, UnexpectedLogicalRouteTableSnafu,
|
||||
};
|
||||
use crate::key::node_address::{NodeAddressKey, NodeAddressValue};
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{
|
||||
DeserializedValueWithBytes, MetadataKey, MetadataValue, RegionDistribution,
|
||||
DeserializedValueWithBytes, MetaKey, RegionDistribution, TableMetaValue,
|
||||
TABLE_ROUTE_KEY_PATTERN, TABLE_ROUTE_PREFIX,
|
||||
};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
@@ -86,7 +85,7 @@ impl TableRouteValue {
|
||||
debug_assert_eq!(region.region.id.table_id(), physical_table_id);
|
||||
RegionId::new(table_id, region.region.id.region_number())
|
||||
})
|
||||
.collect();
|
||||
.collect::<Vec<_>>();
|
||||
TableRouteValue::logical(physical_table_id, region_routes)
|
||||
}
|
||||
}
|
||||
@@ -190,17 +189,17 @@ impl TableRouteValue {
|
||||
.region_routes
|
||||
.iter()
|
||||
.map(|region_route| region_route.region.id.region_number())
|
||||
.collect(),
|
||||
.collect::<Vec<_>>(),
|
||||
TableRouteValue::Logical(x) => x
|
||||
.region_ids()
|
||||
.iter()
|
||||
.map(|region_id| region_id.region_number())
|
||||
.collect(),
|
||||
.collect::<Vec<_>>(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MetadataValue for TableRouteValue {
|
||||
impl TableMetaValue for TableRouteValue {
|
||||
fn try_from_raw_value(raw_value: &[u8]) -> Result<Self> {
|
||||
let r = serde_json::from_slice::<TableRouteValue>(raw_value);
|
||||
match r {
|
||||
@@ -245,14 +244,14 @@ impl LogicalTableRouteValue {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, TableRouteKey> for TableRouteKey {
|
||||
impl<'a> MetaKey<'a, TableRouteKey> for TableRouteKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
fn from_bytes(bytes: &[u8]) -> Result<TableRouteKey> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
InvalidMetadataSnafu {
|
||||
InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"TableRouteKey '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
@@ -260,11 +259,12 @@ impl<'a> MetadataKey<'a, TableRouteKey> for TableRouteKey {
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let captures = TABLE_ROUTE_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidMetadataSnafu {
|
||||
err_msg: format!("Invalid TableRouteKey '{key}'"),
|
||||
})?;
|
||||
let captures =
|
||||
TABLE_ROUTE_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid TableRouteKey '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
let table_id = captures[1].parse::<TableId>().unwrap();
|
||||
Ok(TableRouteKey { table_id })
|
||||
@@ -302,7 +302,7 @@ impl TableRouteManager {
|
||||
Some(route) => {
|
||||
ensure!(
|
||||
route.is_physical(),
|
||||
UnexpectedLogicalRouteTableSnafu {
|
||||
error::UnexpectedLogicalRouteTableSnafu {
|
||||
err_msg: format!("{route:?} is a non-physical TableRouteValue.")
|
||||
}
|
||||
);
|
||||
@@ -322,7 +322,7 @@ impl TableRouteManager {
|
||||
) -> Result<TableId> {
|
||||
let table_route = self
|
||||
.storage
|
||||
.get_inner(logical_or_physical_table_id)
|
||||
.get(logical_or_physical_table_id)
|
||||
.await?
|
||||
.context(TableRouteNotFoundSnafu {
|
||||
table_id: logical_or_physical_table_id,
|
||||
@@ -336,7 +336,7 @@ impl TableRouteManager {
|
||||
|
||||
/// Returns the [TableRouteValue::Physical] recursively.
|
||||
///
|
||||
/// Returns a [TableRouteNotFound](error::Error::TableRouteNotFound) Error if:
|
||||
/// Returns a [TableRouteNotFound](crate::error::Error::TableRouteNotFound) Error if:
|
||||
/// - the physical table(`logical_or_physical_table_id`) does not exist
|
||||
/// - the corresponding physical table of the logical table(`logical_or_physical_table_id`) does not exist.
|
||||
pub async fn get_physical_table_route(
|
||||
@@ -529,15 +529,6 @@ impl TableRouteStorage {
|
||||
|
||||
/// Returns the [`TableRouteValue`].
|
||||
pub async fn get(&self, table_id: TableId) -> Result<Option<TableRouteValue>> {
|
||||
let mut table_route = self.get_inner(table_id).await?;
|
||||
if let Some(table_route) = &mut table_route {
|
||||
self.remap_route_address(table_route).await?;
|
||||
};
|
||||
|
||||
Ok(table_route)
|
||||
}
|
||||
|
||||
async fn get_inner(&self, table_id: TableId) -> Result<Option<TableRouteValue>> {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
self.kv_backend
|
||||
.get(&key.to_bytes())
|
||||
@@ -547,19 +538,7 @@ impl TableRouteStorage {
|
||||
}
|
||||
|
||||
/// Returns the [`TableRouteValue`] wrapped with [`DeserializedValueWithBytes`].
|
||||
pub async fn get_with_raw_bytes(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>> {
|
||||
let mut table_route = self.get_with_raw_bytes_inner(table_id).await?;
|
||||
if let Some(table_route) = &mut table_route {
|
||||
self.remap_route_address(table_route).await?;
|
||||
};
|
||||
|
||||
Ok(table_route)
|
||||
}
|
||||
|
||||
async fn get_with_raw_bytes_inner(
|
||||
pub async fn get_raw(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>> {
|
||||
@@ -576,27 +555,27 @@ impl TableRouteStorage {
|
||||
/// Returns a [TableRouteNotFound](crate::error::Error::TableRouteNotFound) Error if:
|
||||
/// - the physical table(`logical_or_physical_table_id`) does not exist
|
||||
/// - the corresponding physical table of the logical table(`logical_or_physical_table_id`) does not exist.
|
||||
pub async fn get_physical_table_route_with_raw_bytes(
|
||||
pub async fn get_raw_physical_table_route(
|
||||
&self,
|
||||
logical_or_physical_table_id: TableId,
|
||||
) -> Result<(TableId, DeserializedValueWithBytes<TableRouteValue>)> {
|
||||
let table_route = self
|
||||
.get_with_raw_bytes(logical_or_physical_table_id)
|
||||
.await?
|
||||
.context(TableRouteNotFoundSnafu {
|
||||
table_id: logical_or_physical_table_id,
|
||||
})?;
|
||||
let table_route =
|
||||
self.get_raw(logical_or_physical_table_id)
|
||||
.await?
|
||||
.context(TableRouteNotFoundSnafu {
|
||||
table_id: logical_or_physical_table_id,
|
||||
})?;
|
||||
|
||||
match table_route.get_inner_ref() {
|
||||
TableRouteValue::Physical(_) => Ok((logical_or_physical_table_id, table_route)),
|
||||
TableRouteValue::Logical(x) => {
|
||||
let physical_table_id = x.physical_table_id();
|
||||
let physical_table_route = self
|
||||
.get_with_raw_bytes(physical_table_id)
|
||||
.await?
|
||||
.context(TableRouteNotFoundSnafu {
|
||||
table_id: physical_table_id,
|
||||
})?;
|
||||
let physical_table_route =
|
||||
self.get_raw(physical_table_id)
|
||||
.await?
|
||||
.context(TableRouteNotFoundSnafu {
|
||||
table_id: physical_table_id,
|
||||
})?;
|
||||
Ok((physical_table_id, physical_table_route))
|
||||
}
|
||||
}
|
||||
@@ -604,13 +583,6 @@ impl TableRouteStorage {
|
||||
|
||||
/// Returns batch of [`TableRouteValue`] that respects the order of `table_ids`.
|
||||
pub async fn batch_get(&self, table_ids: &[TableId]) -> Result<Vec<Option<TableRouteValue>>> {
|
||||
let mut table_routes = self.batch_get_inner(table_ids).await?;
|
||||
self.remap_routes_addresses(&mut table_routes).await?;
|
||||
|
||||
Ok(table_routes)
|
||||
}
|
||||
|
||||
async fn batch_get_inner(&self, table_ids: &[TableId]) -> Result<Vec<Option<TableRouteValue>>> {
|
||||
let keys = table_ids
|
||||
.iter()
|
||||
.map(|id| TableRouteKey::new(*id).to_bytes())
|
||||
@@ -633,107 +605,8 @@ impl TableRouteStorage {
|
||||
Ok(None)
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
.collect::<Result<Vec<_>>>()
|
||||
}
|
||||
|
||||
async fn remap_routes_addresses(
|
||||
&self,
|
||||
table_routes: &mut [Option<TableRouteValue>],
|
||||
) -> Result<()> {
|
||||
let keys = table_routes
|
||||
.iter()
|
||||
.flat_map(|table_route| {
|
||||
table_route
|
||||
.as_ref()
|
||||
.map(extract_address_keys)
|
||||
.unwrap_or_default()
|
||||
})
|
||||
.collect::<HashSet<_>>()
|
||||
.into_iter()
|
||||
.collect();
|
||||
let node_addrs = self.get_node_addresses(keys).await?;
|
||||
for table_route in table_routes.iter_mut().flatten() {
|
||||
set_addresses(&node_addrs, table_route)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn remap_route_address(&self, table_route: &mut TableRouteValue) -> Result<()> {
|
||||
let keys = extract_address_keys(table_route).into_iter().collect();
|
||||
let node_addrs = self.get_node_addresses(keys).await?;
|
||||
set_addresses(&node_addrs, table_route)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_node_addresses(
|
||||
&self,
|
||||
keys: Vec<Vec<u8>>,
|
||||
) -> Result<HashMap<u64, NodeAddressValue>> {
|
||||
if keys.is_empty() {
|
||||
return Ok(HashMap::default());
|
||||
}
|
||||
|
||||
self.kv_backend
|
||||
.batch_get(BatchGetRequest { keys })
|
||||
.await?
|
||||
.kvs
|
||||
.into_iter()
|
||||
.map(|kv| {
|
||||
let node_id = NodeAddressKey::from_bytes(&kv.key)?.node_id;
|
||||
let node_addr = NodeAddressValue::try_from_raw_value(&kv.value)?;
|
||||
Ok((node_id, node_addr))
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
fn set_addresses(
|
||||
node_addrs: &HashMap<u64, NodeAddressValue>,
|
||||
table_route: &mut TableRouteValue,
|
||||
) -> Result<()> {
|
||||
let TableRouteValue::Physical(physical_table_route) = table_route else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
for region_route in &mut physical_table_route.region_routes {
|
||||
if let Some(leader) = &mut region_route.leader_peer {
|
||||
if let Some(node_addr) = node_addrs.get(&leader.id) {
|
||||
leader.addr = node_addr.peer.addr.clone();
|
||||
}
|
||||
}
|
||||
for follower in &mut region_route.follower_peers {
|
||||
if let Some(node_addr) = node_addrs.get(&follower.id) {
|
||||
follower.addr = node_addr.peer.addr.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn extract_address_keys(table_route: &TableRouteValue) -> HashSet<Vec<u8>> {
|
||||
let TableRouteValue::Physical(physical_table_route) = table_route else {
|
||||
return HashSet::default();
|
||||
};
|
||||
|
||||
physical_table_route
|
||||
.region_routes
|
||||
.iter()
|
||||
.flat_map(|region_route| {
|
||||
region_route
|
||||
.follower_peers
|
||||
.iter()
|
||||
.map(|peer| NodeAddressKey::with_datanode(peer.id).to_bytes())
|
||||
.chain(
|
||||
region_route
|
||||
.leader_peer
|
||||
.as_ref()
|
||||
.map(|leader| NodeAddressKey::with_datanode(leader.id).to_bytes()),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -742,9 +615,7 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::kv_backend::{KvBackend, TxnService};
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::store::PutRequest;
|
||||
use crate::kv_backend::TxnService;
|
||||
|
||||
#[test]
|
||||
fn test_table_route_compatibility() {
|
||||
@@ -773,18 +644,18 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_table_route_storage_get_with_raw_bytes_empty() {
|
||||
async fn test_table_route_storage_get_raw_empty() {
|
||||
let kv = Arc::new(MemoryKvBackend::default());
|
||||
let table_route_storage = TableRouteStorage::new(kv);
|
||||
let table_route = table_route_storage.get_with_raw_bytes(1024).await.unwrap();
|
||||
let table_route = table_route_storage.get_raw(1024).await.unwrap();
|
||||
assert!(table_route.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_table_route_storage_get_with_raw_bytes() {
|
||||
async fn test_table_route_storage_get_raw() {
|
||||
let kv = Arc::new(MemoryKvBackend::default());
|
||||
let table_route_storage = TableRouteStorage::new(kv.clone());
|
||||
let table_route = table_route_storage.get_with_raw_bytes(1024).await.unwrap();
|
||||
let table_route = table_route_storage.get_raw(1024).await.unwrap();
|
||||
assert!(table_route.is_none());
|
||||
let table_route_manager = TableRouteManager::new(kv.clone());
|
||||
let table_route_value = TableRouteValue::Logical(LogicalTableRouteValue {
|
||||
@@ -797,7 +668,7 @@ mod tests {
|
||||
.unwrap();
|
||||
let r = kv.txn(txn).await.unwrap();
|
||||
assert!(r.succeeded);
|
||||
let table_route = table_route_storage.get_with_raw_bytes(1024).await.unwrap();
|
||||
let table_route = table_route_storage.get_raw(1024).await.unwrap();
|
||||
assert!(table_route.is_some());
|
||||
let got = table_route.unwrap().inner;
|
||||
assert_eq!(got, table_route_value);
|
||||
@@ -848,61 +719,4 @@ mod tests {
|
||||
assert!(results[2].is_none());
|
||||
assert_eq!(results[3].as_ref().unwrap(), &routes[0].1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn remap_route_address_updates_addresses() {
|
||||
let kv = Arc::new(MemoryKvBackend::default());
|
||||
let table_route_storage = TableRouteStorage::new(kv.clone());
|
||||
let mut table_route = TableRouteValue::Physical(PhysicalTableRouteValue {
|
||||
region_routes: vec![RegionRoute {
|
||||
leader_peer: Some(Peer {
|
||||
id: 1,
|
||||
..Default::default()
|
||||
}),
|
||||
follower_peers: vec![Peer {
|
||||
id: 2,
|
||||
..Default::default()
|
||||
}],
|
||||
..Default::default()
|
||||
}],
|
||||
version: 0,
|
||||
});
|
||||
|
||||
kv.put(PutRequest {
|
||||
key: NodeAddressKey::with_datanode(1).to_bytes(),
|
||||
value: NodeAddressValue {
|
||||
peer: Peer {
|
||||
addr: "addr1".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
.try_as_raw_value()
|
||||
.unwrap(),
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
table_route_storage
|
||||
.remap_route_address(&mut table_route)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
if let TableRouteValue::Physical(physical_table_route) = table_route {
|
||||
assert_eq!(
|
||||
physical_table_route.region_routes[0]
|
||||
.leader_peer
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.addr,
|
||||
"addr1"
|
||||
);
|
||||
assert_eq!(
|
||||
physical_table_route.region_routes[0].follower_peers[0].addr,
|
||||
""
|
||||
);
|
||||
} else {
|
||||
panic!("Expected PhysicalTableRouteValue");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ use serde::de::DeserializeOwned;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::key::{DeserializedValueWithBytes, MetadataValue};
|
||||
use crate::key::{DeserializedValueWithBytes, TableMetaValue};
|
||||
use crate::kv_backend::txn::TxnOpResponse;
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
@@ -41,7 +41,7 @@ impl TxnOpGetResponseSet {
|
||||
) -> impl FnMut(&mut TxnOpGetResponseSet) -> Result<Option<DeserializedValueWithBytes<T>>>
|
||||
where
|
||||
F: FnMut(&mut TxnOpGetResponseSet) -> Option<Vec<u8>>,
|
||||
T: Serialize + DeserializeOwned + MetadataValue,
|
||||
T: Serialize + DeserializeOwned + TableMetaValue,
|
||||
{
|
||||
move |set| {
|
||||
f(set)
|
||||
|
||||
@@ -24,7 +24,7 @@ use table::table_name::TableName;
|
||||
use super::VIEW_INFO_KEY_PATTERN;
|
||||
use crate::error::{InvalidViewInfoSnafu, Result};
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{DeserializedValueWithBytes, MetadataKey, MetadataValue, VIEW_INFO_KEY_PREFIX};
|
||||
use crate::key::{DeserializedValueWithBytes, MetaKey, TableMetaValue, VIEW_INFO_KEY_PREFIX};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::BatchGetRequest;
|
||||
@@ -53,7 +53,7 @@ impl Display for ViewInfoKey {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, ViewInfoKey> for ViewInfoKey {
|
||||
impl<'a> MetaKey<'a, ViewInfoKey> for ViewInfoKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::string::FromUtf8Error;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
@@ -140,6 +141,12 @@ pub enum Error {
|
||||
procedure_id: ProcedureId,
|
||||
},
|
||||
|
||||
#[snafu(display("Corrupted data, error: "))]
|
||||
CorruptedData {
|
||||
#[snafu(source)]
|
||||
error: FromUtf8Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start the remove_outdated_meta method, error"))]
|
||||
StartRemoveOutdatedMetaTask {
|
||||
source: common_runtime::error::Error,
|
||||
@@ -154,6 +161,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Subprocedure {} failed", subprocedure_id))]
|
||||
SubprocedureFailed {
|
||||
subprocedure_id: ProcedureId,
|
||||
source: Arc<Error>,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse segment key: {key}"))]
|
||||
ParseSegmentKey {
|
||||
#[snafu(implicit)]
|
||||
@@ -203,11 +218,14 @@ impl ErrorExt for Error {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::ProcedurePanic { .. }
|
||||
| Error::CorruptedData { .. }
|
||||
| Error::ParseSegmentKey { .. }
|
||||
| Error::Unexpected { .. } => StatusCode::Unexpected,
|
||||
Error::ProcedureExec { source, .. } => source.status_code(),
|
||||
Error::StartRemoveOutdatedMetaTask { source, .. }
|
||||
| Error::StopRemoveOutdatedMetaTask { source, .. } => source.status_code(),
|
||||
|
||||
Error::SubprocedureFailed { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,11 +19,10 @@ use std::time::Duration;
|
||||
use backon::{BackoffBuilder, ExponentialBuilder};
|
||||
use common_telemetry::{debug, error, info};
|
||||
use rand::Rng;
|
||||
use snafu::ResultExt;
|
||||
use tokio::time;
|
||||
|
||||
use super::rwlock::OwnedKeyRwLockGuard;
|
||||
use crate::error::{self, ProcedurePanicSnafu, Result, RollbackTimesExceededSnafu};
|
||||
use crate::error::{self, ProcedurePanicSnafu, Result};
|
||||
use crate::local::{ManagerContext, ProcedureMeta, ProcedureMetaRef};
|
||||
use crate::procedure::{Output, StringKey};
|
||||
use crate::store::{ProcedureMessage, ProcedureStore};
|
||||
@@ -223,12 +222,12 @@ impl Runner {
|
||||
if let Some(d) = rollback.next() {
|
||||
self.wait_on_err(d, rollback_times).await;
|
||||
} else {
|
||||
let err = Err::<(), Arc<Error>>(error)
|
||||
.context(RollbackTimesExceededSnafu {
|
||||
self.meta.set_state(ProcedureState::failed(Arc::new(
|
||||
Error::RollbackTimesExceeded {
|
||||
source: error.clone(),
|
||||
procedure_id: self.meta.id,
|
||||
})
|
||||
.unwrap_err();
|
||||
self.meta.set_state(ProcedureState::failed(Arc::new(err)));
|
||||
},
|
||||
)));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -127,6 +127,12 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Not expected to run ExecutionPlan more than once"))]
|
||||
ExecuteRepeatedly {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("General DataFusion error"))]
|
||||
GeneralDataFusion {
|
||||
#[snafu(source)]
|
||||
@@ -187,6 +193,12 @@ pub enum Error {
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to join thread"))]
|
||||
ThreadJoin {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to decode logical plan: {source}"))]
|
||||
DecodePlan {
|
||||
#[snafu(implicit)]
|
||||
@@ -277,7 +289,9 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::MissingTableMutationHandler { .. }
|
||||
| Error::MissingProcedureServiceHandler { .. }
|
||||
| Error::MissingFlowServiceHandler { .. } => StatusCode::Unexpected,
|
||||
| Error::MissingFlowServiceHandler { .. }
|
||||
| Error::ExecuteRepeatedly { .. }
|
||||
| Error::ThreadJoin { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::UnsupportedInputDataType { .. }
|
||||
| Error::TypeCast { .. }
|
||||
@@ -313,6 +327,7 @@ pub fn datafusion_status_code<T: ErrorExt + 'static>(
|
||||
match e {
|
||||
DataFusionError::Internal(_) => StatusCode::Internal,
|
||||
DataFusionError::NotImplemented(_) => StatusCode::Unsupported,
|
||||
DataFusionError::ResourcesExhausted(_) => StatusCode::RuntimeResourcesExhausted,
|
||||
DataFusionError::Plan(_) => StatusCode::PlanQuery,
|
||||
DataFusionError::External(e) => {
|
||||
if let Some(ext) = (*e).downcast_ref::<T>() {
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::any::Any;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datafusion::error::DataFusionError;
|
||||
use prost::{DecodeError, EncodeError};
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
@@ -40,6 +41,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Internal error from DataFusion"))]
|
||||
DFInternal {
|
||||
#[snafu(source)]
|
||||
error: DataFusionError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Internal error"))]
|
||||
Internal {
|
||||
#[snafu(implicit)]
|
||||
@@ -47,6 +56,12 @@ pub enum Error {
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot convert plan doesn't belong to GreptimeDB"))]
|
||||
UnknownPlan {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode DataFusion plan"))]
|
||||
EncodeDfPlan {
|
||||
#[snafu(source)]
|
||||
@@ -69,8 +84,10 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::EncodeRel { .. } | Error::DecodeRel { .. } => StatusCode::InvalidArguments,
|
||||
Error::Internal { .. } => StatusCode::Internal,
|
||||
Error::UnknownPlan { .. } | Error::EncodeRel { .. } | Error::DecodeRel { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::DFInternal { .. } | Error::Internal { .. } => StatusCode::Internal,
|
||||
Error::EncodeDfPlan { .. } | Error::DecodeDfPlan { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,4 +34,4 @@ tracing = "0.1"
|
||||
tracing-appender = "0.2"
|
||||
tracing-log = "0.1"
|
||||
tracing-opentelemetry = "0.22.0"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user