mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
146 Commits
v1.0.0-bet
...
497dfde90b
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
497dfde90b | ||
|
|
a8b512dded | ||
|
|
bd8ffd3db9 | ||
|
|
c0652f6dd5 | ||
|
|
fed6cb0806 | ||
|
|
69659211f6 | ||
|
|
6332d91884 | ||
|
|
4d66bd96b8 | ||
|
|
2f4a15ec40 | ||
|
|
658332fe68 | ||
|
|
c088d361a4 | ||
|
|
a85864067e | ||
|
|
0df69c95aa | ||
|
|
72eede8b38 | ||
|
|
95eccd6cde | ||
|
|
0bc5a305be | ||
|
|
1afcddd5a9 | ||
|
|
62808b887b | ||
|
|
04ddd40e00 | ||
|
|
b4f028be5f | ||
|
|
da964880f5 | ||
|
|
a35a39f726 | ||
|
|
e0c1566e92 | ||
|
|
f6afb10e33 | ||
|
|
2dfcf35fee | ||
|
|
f7d5c87ac0 | ||
|
|
9cd57e9342 | ||
|
|
32f9cc5286 | ||
|
|
5232a12a8c | ||
|
|
913ac325e5 | ||
|
|
0c52d5bb34 | ||
|
|
e0697790e6 | ||
|
|
64e74916b9 | ||
|
|
b601781604 | ||
|
|
bd3ad60910 | ||
|
|
cbfdeca64c | ||
|
|
baffed8c6a | ||
|
|
11a5e1618d | ||
|
|
f5e0e94e3a | ||
|
|
ba4eda40e5 | ||
|
|
f06a64ff90 | ||
|
|
84b4777925 | ||
|
|
a26dee0ca1 | ||
|
|
276f6bf026 | ||
|
|
1d5291b06d | ||
|
|
564cc0c750 | ||
|
|
f1abe5d215 | ||
|
|
ab426cbf89 | ||
|
|
cb0f1afb01 | ||
|
|
a22d08f1b1 | ||
|
|
6817a376b5 | ||
|
|
4d1a587079 | ||
|
|
9f1aefe98f | ||
|
|
2f9130a2de | ||
|
|
fa2b4e5e63 | ||
|
|
9197e818ec | ||
|
|
36d89c3baf | ||
|
|
0ebfd161d8 | ||
|
|
8b26a98c3b | ||
|
|
7199823be9 | ||
|
|
60f752d306 | ||
|
|
edb1f6086f | ||
|
|
1ebcef4794 | ||
|
|
2147545c90 | ||
|
|
84e4e42ee7 | ||
|
|
d5c616a9ff | ||
|
|
f02bdf5428 | ||
|
|
f2288a86b0 | ||
|
|
9d35b8cad4 | ||
|
|
cc99f9d65b | ||
|
|
11ecb7a28a | ||
|
|
2a760f010f | ||
|
|
63dd37dca3 | ||
|
|
68fff3b1aa | ||
|
|
0177f244e9 | ||
|
|
931556dbd3 | ||
|
|
69f0249039 | ||
|
|
1f91422bae | ||
|
|
377373b8fd | ||
|
|
e107030d85 | ||
|
|
18875eed4d | ||
|
|
ee76d50569 | ||
|
|
5d634aeba0 | ||
|
|
8346acb900 | ||
|
|
fdab75ce27 | ||
|
|
4c07d2d5de | ||
|
|
020477994b | ||
|
|
afefc0c604 | ||
|
|
e44323c433 | ||
|
|
0aeaf405c7 | ||
|
|
b5cbc35a0d | ||
|
|
5472bdfc0f | ||
|
|
6485a26fa3 | ||
|
|
69865c831d | ||
|
|
713525797a | ||
|
|
09d1074e23 | ||
|
|
1ebd25adbb | ||
|
|
c66f661494 | ||
|
|
2783a5218e | ||
|
|
6b6d1ce7c4 | ||
|
|
7e4f0af065 | ||
|
|
d811c4f060 | ||
|
|
be3c26f2b8 | ||
|
|
9eb44071b1 | ||
|
|
77e507cbe8 | ||
|
|
5bf72ab327 | ||
|
|
9f4902b10a | ||
|
|
b32ca3ad86 | ||
|
|
d180cc8f4b | ||
|
|
b099abc3a3 | ||
|
|
52a576cf6d | ||
|
|
c0d0b99a32 | ||
|
|
7d575d18ee | ||
|
|
ff99bce37c | ||
|
|
2f447e6f91 | ||
|
|
c9a7b1fd68 | ||
|
|
8c3da5e81f | ||
|
|
c152a45d44 | ||
|
|
c054c13e48 | ||
|
|
4a7c16586b | ||
|
|
c5173fccfc | ||
|
|
c02754b44c | ||
|
|
0b4f00feef | ||
|
|
c13febe35d | ||
|
|
29d23e0ba1 | ||
|
|
25fab2ba7d | ||
|
|
ec8263b464 | ||
|
|
01ea7e1468 | ||
|
|
7f1da17150 | ||
|
|
0cee4fa115 | ||
|
|
e59612043d | ||
|
|
5d8819e7af | ||
|
|
8b7b5c17c7 | ||
|
|
ee35ec0a39 | ||
|
|
605f3270e5 | ||
|
|
4e9f419de7 | ||
|
|
29bbff3c90 | ||
|
|
ff2a12a49d | ||
|
|
77483ad7d4 | ||
|
|
6adc348fcd | ||
|
|
cc61af7c65 | ||
|
|
1eb8d6b76b | ||
|
|
6c93c7d299 | ||
|
|
cdf9d18c36 | ||
|
|
32168e8ca8 | ||
|
|
de9ae6066f |
22
.github/CODEOWNERS
vendored
22
.github/CODEOWNERS
vendored
@@ -5,23 +5,23 @@
|
||||
* @GreptimeTeam/db-approver
|
||||
|
||||
## [Module] Database Engine
|
||||
/src/index @zhongzc
|
||||
/src/index @evenyag @discord9 @WenyXu
|
||||
/src/mito2 @evenyag @v0y4g3r @waynexia
|
||||
/src/query @evenyag
|
||||
/src/query @evenyag @waynexia @discord9
|
||||
|
||||
## [Module] Distributed
|
||||
/src/common/meta @MichaelScofield
|
||||
/src/common/procedure @MichaelScofield
|
||||
/src/meta-client @MichaelScofield
|
||||
/src/meta-srv @MichaelScofield
|
||||
/src/common/meta @MichaelScofield @WenyXu
|
||||
/src/common/procedure @MichaelScofield @WenyXu
|
||||
/src/meta-client @MichaelScofield @WenyXu
|
||||
/src/meta-srv @MichaelScofield @WenyXu
|
||||
|
||||
## [Module] Write Ahead Log
|
||||
/src/log-store @v0y4g3r
|
||||
/src/store-api @v0y4g3r
|
||||
/src/log-store @v0y4g3r @WenyXu
|
||||
/src/store-api @v0y4g3r @evenyag
|
||||
|
||||
## [Module] Metrics Engine
|
||||
/src/metric-engine @waynexia
|
||||
/src/promql @waynexia
|
||||
/src/metric-engine @waynexia @WenyXu
|
||||
/src/promql @waynexia @evenyag @discord9
|
||||
|
||||
## [Module] Flow
|
||||
/src/flow @zhongzc @waynexia
|
||||
/src/flow @discord9 @waynexia
|
||||
|
||||
17
.github/actions/build-greptime-binary/action.yml
vendored
17
.github/actions/build-greptime-binary/action.yml
vendored
@@ -32,9 +32,23 @@ inputs:
|
||||
description: Image Registry
|
||||
required: false
|
||||
default: 'docker.io'
|
||||
large-page-size:
|
||||
description: Build GreptimeDB with large page size (65536).
|
||||
required: false
|
||||
default: 'false'
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Set extra build environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ '${{ inputs.large-page-size }}' == 'true' ]]; then
|
||||
echo 'EXTRA_BUILD_ENVS="JEMALLOC_SYS_WITH_LG_PAGE=16"' >> $GITHUB_ENV
|
||||
else
|
||||
echo 'EXTRA_BUILD_ENVS=' >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
if: ${{ inputs.build-android-artifacts == 'false' }}
|
||||
@@ -45,7 +59,8 @@ runs:
|
||||
FEATURES=${{ inputs.features }} \
|
||||
BASE_IMAGE=${{ inputs.base-image }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
||||
IMAGE_REGISTRY=${{ inputs.image-registry }} \
|
||||
EXTRA_BUILD_ENVS=$EXTRA_BUILD_ENVS
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
|
||||
@@ -27,6 +27,10 @@ inputs:
|
||||
description: Working directory to build the artifacts
|
||||
required: false
|
||||
default: .
|
||||
large-page-size:
|
||||
description: Build GreptimeDB with large page size (65536).
|
||||
required: false
|
||||
default: 'false'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -59,6 +63,7 @@ runs:
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
large-page-size: ${{ inputs.large-page-size }}
|
||||
|
||||
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
||||
shell: bash
|
||||
@@ -77,6 +82,7 @@ runs:
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
large-page-size: ${{ inputs.large-page-size }}
|
||||
|
||||
- name: Build greptime on android base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
@@ -89,3 +95,4 @@ runs:
|
||||
build-android-artifacts: true
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
large-page-size: ${{ inputs.large-page-size }}
|
||||
|
||||
@@ -51,7 +51,7 @@ runs:
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install my-greptimedb \
|
||||
--set meta.backendStorage.etcd.endpoints=${{ inputs.etcd-endpoints }} \
|
||||
--set 'meta.backendStorage.etcd.endpoints[0]=${{ inputs.etcd-endpoints }}' \
|
||||
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
||||
--set image.registry=${{ inputs.image-registry }} \
|
||||
--set image.repository=${{ inputs.image-repository }} \
|
||||
|
||||
11
.github/scripts/create-version.sh
vendored
11
.github/scripts/create-version.sh
vendored
@@ -49,6 +49,17 @@ function create_version() {
|
||||
echo "GITHUB_REF_NAME is empty in push event" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For tag releases, ensure GITHUB_REF_NAME matches the version in Cargo.toml
|
||||
CARGO_VERSION=$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
|
||||
EXPECTED_REF_NAME="v${CARGO_VERSION}"
|
||||
|
||||
if [ "$GITHUB_REF_NAME" != "$EXPECTED_REF_NAME" ]; then
|
||||
echo "Error: GITHUB_REF_NAME '$GITHUB_REF_NAME' does not match Cargo.toml version 'v${CARGO_VERSION}'" >&2
|
||||
echo "Expected tag name: '$EXPECTED_REF_NAME'" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "$GITHUB_REF_NAME"
|
||||
elif [ "$GITHUB_EVENT_NAME" = workflow_dispatch ]; then
|
||||
echo "$NEXT_RELEASE_VERSION-$(git rev-parse --short HEAD)-$(date "+%Y%m%d-%s")"
|
||||
|
||||
4
.github/scripts/deploy-greptimedb.sh
vendored
4
.github/scripts/deploy-greptimedb.sh
vendored
@@ -81,7 +81,7 @@ function deploy_greptimedb_cluster() {
|
||||
--create-namespace \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set initializer.tag="$GREPTIMEDB_INITIALIZER_IMAGE_TAG" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
--set "meta.backendStorage.etcd.endpoints[0]=etcd.$install_namespace.svc.cluster.local:2379" \
|
||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
||||
-n "$install_namespace"
|
||||
|
||||
@@ -119,7 +119,7 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
||||
--create-namespace \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set initializer.tag="$GREPTIMEDB_INITIALIZER_IMAGE_TAG" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
--set "meta.backendStorage.etcd.endpoints[0]=etcd.$install_namespace.svc.cluster.local:2379" \
|
||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
||||
--set objectStorage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||
--set objectStorage.s3.region="$AWS_REGION" \
|
||||
|
||||
154
.github/workflows/check-git-deps.yml
vendored
Normal file
154
.github/workflows/check-git-deps.yml
vendored
Normal file
@@ -0,0 +1,154 @@
|
||||
name: Check Git Dependencies on Main Branch
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'Cargo.toml'
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'Cargo.toml'
|
||||
|
||||
jobs:
|
||||
check-git-deps:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Check git dependencies
|
||||
env:
|
||||
WHITELIST_DEPS: "greptime-proto,meter-core,meter-macros"
|
||||
run: |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "Checking whitelisted git dependencies..."
|
||||
|
||||
# Function to check if a commit is on main branch
|
||||
check_commit_on_main() {
|
||||
local repo_url="$1"
|
||||
local commit="$2"
|
||||
local repo_name=$(basename "$repo_url" .git)
|
||||
|
||||
echo "Checking $repo_name"
|
||||
echo "Repo: $repo_url"
|
||||
echo "Commit: $commit"
|
||||
|
||||
# Create a temporary directory for cloning
|
||||
local temp_dir=$(mktemp -d)
|
||||
|
||||
# Clone the repository
|
||||
if git clone "$repo_url" "$temp_dir" 2>/dev/null; then
|
||||
cd "$temp_dir"
|
||||
|
||||
# Try to determine the main branch name
|
||||
local main_branch="main"
|
||||
if ! git rev-parse --verify origin/main >/dev/null 2>&1; then
|
||||
if git rev-parse --verify origin/master >/dev/null 2>&1; then
|
||||
main_branch="master"
|
||||
else
|
||||
# Try to get the default branch
|
||||
main_branch=$(git symbolic-ref refs/remotes/origin/HEAD | sed 's@^refs/remotes/origin/@@')
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Main branch: $main_branch"
|
||||
|
||||
# Check if commit exists
|
||||
if git cat-file -e "$commit" 2>/dev/null; then
|
||||
# Check if commit is on main branch
|
||||
if git merge-base --is-ancestor "$commit" "origin/$main_branch" 2>/dev/null; then
|
||||
echo "PASS: Commit $commit is on $main_branch branch"
|
||||
cd - >/dev/null
|
||||
rm -rf "$temp_dir"
|
||||
return 0
|
||||
else
|
||||
echo "FAIL: Commit $commit is NOT on $main_branch branch"
|
||||
|
||||
# Try to find which branch contains this commit
|
||||
local branch_name=$(git branch -r --contains "$commit" 2>/dev/null | head -1 | sed 's/^[[:space:]]*origin\///' | sed 's/[[:space:]]*$//')
|
||||
if [[ -n "$branch_name" ]]; then
|
||||
echo "Found on branch: $branch_name"
|
||||
fi
|
||||
cd - >/dev/null
|
||||
rm -rf "$temp_dir"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
echo "FAIL: Commit $commit not found in repository"
|
||||
cd - >/dev/null
|
||||
rm -rf "$temp_dir"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
echo "FAIL: Failed to clone $repo_url"
|
||||
rm -rf "$temp_dir"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Extract whitelisted git dependencies from Cargo.toml
|
||||
echo "Extracting git dependencies from Cargo.toml..."
|
||||
|
||||
# Create temporary array to store dependencies
|
||||
declare -a deps=()
|
||||
|
||||
# Build awk pattern from whitelist
|
||||
IFS=',' read -ra WHITELIST <<< "$WHITELIST_DEPS"
|
||||
awk_pattern=""
|
||||
for dep in "${WHITELIST[@]}"; do
|
||||
if [[ -n "$awk_pattern" ]]; then
|
||||
awk_pattern="$awk_pattern|"
|
||||
fi
|
||||
awk_pattern="$awk_pattern$dep"
|
||||
done
|
||||
|
||||
# Extract whitelisted dependencies
|
||||
while IFS= read -r line; do
|
||||
if [[ -n "$line" ]]; then
|
||||
deps+=("$line")
|
||||
fi
|
||||
done < <(awk -v pattern="$awk_pattern" '
|
||||
$0 ~ pattern ".*git = \"https:/" {
|
||||
match($0, /git = "([^"]+)"/, arr)
|
||||
git_url = arr[1]
|
||||
if (match($0, /rev = "([^"]+)"/, rev_arr)) {
|
||||
rev = rev_arr[1]
|
||||
print git_url " " rev
|
||||
} else {
|
||||
# Check next line for rev
|
||||
getline
|
||||
if (match($0, /rev = "([^"]+)"/, rev_arr)) {
|
||||
rev = rev_arr[1]
|
||||
print git_url " " rev
|
||||
}
|
||||
}
|
||||
}
|
||||
' Cargo.toml)
|
||||
|
||||
echo "Found ${#deps[@]} dependencies to check:"
|
||||
for dep in "${deps[@]}"; do
|
||||
echo " $dep"
|
||||
done
|
||||
|
||||
failed=0
|
||||
|
||||
for dep in "${deps[@]}"; do
|
||||
read -r repo_url commit <<< "$dep"
|
||||
if ! check_commit_on_main "$repo_url" "$commit"; then
|
||||
failed=1
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Check completed."
|
||||
|
||||
if [[ $failed -eq 1 ]]; then
|
||||
echo "ERROR: Some git dependencies are not on their main branches!"
|
||||
echo "Please update the commits to point to main branch commits."
|
||||
exit 1
|
||||
else
|
||||
echo "SUCCESS: All git dependencies are on their main branches!"
|
||||
fi
|
||||
9
.github/workflows/dev-build.yml
vendored
9
.github/workflows/dev-build.yml
vendored
@@ -4,10 +4,11 @@ name: GreptimeDB Development Build
|
||||
on:
|
||||
workflow_dispatch: # Allows you to run this workflow manually.
|
||||
inputs:
|
||||
repository:
|
||||
description: The public repository to build
|
||||
large-page-size:
|
||||
description: Build GreptimeDB with large page size (65536).
|
||||
type: boolean
|
||||
required: false
|
||||
default: GreptimeTeam/greptimedb
|
||||
default: false
|
||||
commit: # Note: We only pull the source code and use the current workflow to build the artifacts.
|
||||
description: The commit to build
|
||||
required: true
|
||||
@@ -181,6 +182,7 @@ jobs:
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
large-page-size: ${{ inputs.large-page-size }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
@@ -214,6 +216,7 @@ jobs:
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
large-page-size: ${{ inputs.large-page-size }}
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
|
||||
57
.github/workflows/multi-lang-tests.yml
vendored
Normal file
57
.github/workflows/multi-lang-tests.yml
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
name: Multi-language Integration Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-greptimedb:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Build GreptimeDB binary
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
shared-key: "multi-lang-build"
|
||||
cache-all-crates: "true"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin --force
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
run: cargo gc -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
|
||||
- name: Pack greptime binary
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir bin && \
|
||||
mv ./target/debug/greptime bin
|
||||
- name: Print greptime binary info
|
||||
run: ls -lh bin
|
||||
- name: Upload greptime binary
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: greptime-bin
|
||||
path: bin/
|
||||
retention-days: 1
|
||||
|
||||
run-multi-lang-tests:
|
||||
name: Run Multi-language SDK Tests
|
||||
needs: build-greptimedb
|
||||
uses: ./.github/workflows/run-multi-lang-tests.yml
|
||||
with:
|
||||
artifact-name: greptime-bin
|
||||
21
.github/workflows/nightly-build.yml
vendored
21
.github/workflows/nightly-build.yml
vendored
@@ -174,6 +174,18 @@ jobs:
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
run-multi-lang-tests:
|
||||
name: Run Multi-language SDK Tests
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
]
|
||||
uses: ./.github/workflows/run-multi-lang-tests.yml
|
||||
with:
|
||||
artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
|
||||
artifact-is-tarball: true
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||
@@ -301,7 +313,8 @@ jobs:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
release-images-to-dockerhub,
|
||||
run-multi-lang-tests,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
@@ -319,17 +332,17 @@ jobs:
|
||||
run: pnpm tsx bin/report-ci-failure.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }}
|
||||
- name: Notify nightly build successful result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||
|
||||
- name: Notify nightly build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' || needs.run-multi-lang-tests.result == 'failure' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||
|
||||
44
.github/workflows/release.yml
vendored
44
.github/workflows/release.yml
vendored
@@ -49,14 +49,9 @@ on:
|
||||
description: Do not run integration tests during the build
|
||||
type: boolean
|
||||
default: true
|
||||
build_linux_amd64_artifacts:
|
||||
build_linux_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-amd64 artifacts
|
||||
required: false
|
||||
default: false
|
||||
build_linux_arm64_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-arm64 artifacts
|
||||
description: Build linux artifacts (both amd64 and arm64)
|
||||
required: false
|
||||
default: false
|
||||
build_macos_artifacts:
|
||||
@@ -144,7 +139,7 @@ jobs:
|
||||
./.github/scripts/check-version.sh "${{ steps.create-version.outputs.version }}"
|
||||
|
||||
- name: Allocate linux-amd64 runner
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-amd64-runner
|
||||
with:
|
||||
@@ -158,7 +153,7 @@ jobs:
|
||||
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
|
||||
|
||||
- name: Allocate linux-arm64 runner
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-arm64-runner
|
||||
with:
|
||||
@@ -173,7 +168,7 @@ jobs:
|
||||
|
||||
build-linux-amd64-artifacts:
|
||||
name: Build linux-amd64 artifacts
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
@@ -195,7 +190,7 @@ jobs:
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
@@ -215,6 +210,18 @@ jobs:
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
run-multi-lang-tests:
|
||||
name: Run Multi-language SDK Tests
|
||||
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
]
|
||||
uses: ./.github/workflows/run-multi-lang-tests.yml
|
||||
with:
|
||||
artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
|
||||
artifact-is-tarball: true
|
||||
|
||||
build-macos-artifacts:
|
||||
name: Build macOS artifacts
|
||||
strategy:
|
||||
@@ -303,6 +310,7 @@ jobs:
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
run-multi-lang-tests,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
@@ -373,7 +381,18 @@ jobs:
|
||||
|
||||
publish-github-release:
|
||||
name: Create GitHub release and upload artifacts
|
||||
if: ${{ inputs.publish_github_release || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
# Use always() to run even when optional jobs (macos, windows) are skipped.
|
||||
# Then check that required jobs succeeded and optional jobs didn't fail.
|
||||
if: |
|
||||
always() &&
|
||||
(inputs.publish_github_release || github.event_name == 'push' || github.event_name == 'schedule') &&
|
||||
needs.allocate-runners.result == 'success' &&
|
||||
(needs.build-linux-amd64-artifacts.result == 'success' || needs.build-linux-amd64-artifacts.result == 'skipped') &&
|
||||
(needs.build-linux-arm64-artifacts.result == 'success' || needs.build-linux-arm64-artifacts.result == 'skipped') &&
|
||||
(needs.build-macos-artifacts.result == 'success' || needs.build-macos-artifacts.result == 'skipped') &&
|
||||
(needs.build-windows-artifacts.result == 'success' || needs.build-windows-artifacts.result == 'skipped') &&
|
||||
(needs.release-images-to-dockerhub.result == 'success' || needs.release-images-to-dockerhub.result == 'skipped') &&
|
||||
(needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped')
|
||||
needs: [ # The job have to wait for all the artifacts are built.
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -381,6 +400,7 @@ jobs:
|
||||
build-macos-artifacts,
|
||||
build-windows-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
run-multi-lang-tests,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
194
.github/workflows/run-multi-lang-tests.yml
vendored
Normal file
194
.github/workflows/run-multi-lang-tests.yml
vendored
Normal file
@@ -0,0 +1,194 @@
|
||||
# Reusable workflow for running multi-language SDK tests against GreptimeDB
|
||||
# Used by: multi-lang-tests.yml, release.yml, nightly-build.yml
|
||||
# Supports both direct binary artifacts and tarball artifacts
|
||||
|
||||
name: Run Multi-language SDK Tests
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
artifact-name:
|
||||
required: true
|
||||
type: string
|
||||
description: 'Name of the artifact containing greptime binary'
|
||||
http-port:
|
||||
required: false
|
||||
type: string
|
||||
default: '4000'
|
||||
description: 'HTTP server port'
|
||||
mysql-port:
|
||||
required: false
|
||||
type: string
|
||||
default: '4002'
|
||||
description: 'MySQL server port'
|
||||
postgres-port:
|
||||
required: false
|
||||
type: string
|
||||
default: '4003'
|
||||
description: 'PostgreSQL server port'
|
||||
db-name:
|
||||
required: false
|
||||
type: string
|
||||
default: 'test_db'
|
||||
description: 'Test database name'
|
||||
username:
|
||||
required: false
|
||||
type: string
|
||||
default: 'greptime_user'
|
||||
description: 'Authentication username'
|
||||
password:
|
||||
required: false
|
||||
type: string
|
||||
default: 'greptime_pwd'
|
||||
description: 'Authentication password'
|
||||
timeout-minutes:
|
||||
required: false
|
||||
type: number
|
||||
default: 30
|
||||
description: 'Job timeout in minutes'
|
||||
artifact-is-tarball:
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
description: 'Whether the artifact is a tarball (tar.gz) that needs to be extracted'
|
||||
|
||||
jobs:
|
||||
run-tests:
|
||||
name: Run Multi-language SDK Tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: ${{ inputs.timeout-minutes }}
|
||||
steps:
|
||||
- name: Checkout greptimedb-tests repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: GreptimeTeam/greptimedb-tests
|
||||
persist-credentials: false
|
||||
|
||||
- name: Download pre-built greptime binary
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.artifact-name }}
|
||||
path: artifact
|
||||
|
||||
- name: Setup greptime binary
|
||||
run: |
|
||||
mkdir -p bin
|
||||
if [ "${{ inputs.artifact-is-tarball }}" = "true" ]; then
|
||||
# Extract tarball and find greptime binary
|
||||
tar -xzf artifact/*.tar.gz -C artifact
|
||||
find artifact -name "greptime" -type f -exec cp {} bin/greptime \;
|
||||
else
|
||||
# Direct binary format
|
||||
if [ -f artifact/greptime ]; then
|
||||
cp artifact/greptime bin/greptime
|
||||
else
|
||||
cp artifact/* bin/greptime
|
||||
fi
|
||||
fi
|
||||
chmod +x ./bin/greptime
|
||||
ls -lh ./bin/greptime
|
||||
./bin/greptime --version
|
||||
|
||||
- name: Setup Java 17
|
||||
uses: actions/setup-java@v4
|
||||
with:
|
||||
distribution: 'temurin'
|
||||
java-version: '17'
|
||||
cache: 'maven'
|
||||
|
||||
- name: Setup Python 3.8
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.8'
|
||||
|
||||
- name: Setup Go 1.24
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.24'
|
||||
cache: true
|
||||
cache-dependency-path: go-tests/go.sum
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '18'
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
pip install mysql-connector-python psycopg2-binary
|
||||
python3 -c "import mysql.connector; print(f'mysql-connector-python {mysql.connector.__version__}')"
|
||||
python3 -c "import psycopg2; print(f'psycopg2 {psycopg2.__version__}')"
|
||||
|
||||
- name: Install Go dependencies
|
||||
working-directory: go-tests
|
||||
run: |
|
||||
go mod download
|
||||
go mod verify
|
||||
go version
|
||||
|
||||
- name: Kill existing GreptimeDB processes
|
||||
run: |
|
||||
pkill -f greptime || true
|
||||
sleep 2
|
||||
|
||||
- name: Start GreptimeDB standalone
|
||||
run: |
|
||||
./bin/greptime standalone start \
|
||||
--http-addr 0.0.0.0:${{ inputs.http-port }} \
|
||||
--rpc-addr 0.0.0.0:4001 \
|
||||
--mysql-addr 0.0.0.0:${{ inputs.mysql-port }} \
|
||||
--postgres-addr 0.0.0.0:${{ inputs.postgres-port }} \
|
||||
--user-provider=static_user_provider:cmd:${{ inputs.username }}=${{ inputs.password }} > /tmp/greptimedb.log 2>&1 &
|
||||
|
||||
- name: Wait for GreptimeDB to be ready
|
||||
run: |
|
||||
echo "Waiting for GreptimeDB..."
|
||||
for i in {1..60}; do
|
||||
if curl -sf http://localhost:${{ inputs.http-port }}/health > /dev/null; then
|
||||
echo "✅ GreptimeDB is ready"
|
||||
exit 0
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
echo "❌ GreptimeDB failed to start"
|
||||
cat /tmp/greptimedb.log
|
||||
exit 1
|
||||
|
||||
- name: Run multi-language tests
|
||||
env:
|
||||
DB_NAME: ${{ inputs.db-name }}
|
||||
MYSQL_HOST: 127.0.0.1
|
||||
MYSQL_PORT: ${{ inputs.mysql-port }}
|
||||
POSTGRES_HOST: 127.0.0.1
|
||||
POSTGRES_PORT: ${{ inputs.postgres-port }}
|
||||
HTTP_HOST: 127.0.0.1
|
||||
HTTP_PORT: ${{ inputs.http-port }}
|
||||
GREPTIME_USERNAME: ${{ inputs.username }}
|
||||
GREPTIME_PASSWORD: ${{ inputs.password }}
|
||||
run: |
|
||||
chmod +x ./run_tests.sh
|
||||
./run_tests.sh
|
||||
|
||||
- name: Collect logs on failure
|
||||
if: failure()
|
||||
run: |
|
||||
echo "=== GreptimeDB Logs ==="
|
||||
cat /tmp/greptimedb.log || true
|
||||
|
||||
- name: Upload test logs on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: |
|
||||
/tmp/greptimedb.log
|
||||
java-tests/target/surefire-reports/
|
||||
python-tests/.pytest_cache/
|
||||
go-tests/*.log
|
||||
**/test-output/
|
||||
retention-days: 7
|
||||
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
pkill -f greptime || true
|
||||
64
AUTHOR.md
64
AUTHOR.md
@@ -2,41 +2,41 @@
|
||||
|
||||
## Individual Committers (in alphabetical order)
|
||||
|
||||
* [CookiePieWw](https://github.com/CookiePieWw)
|
||||
* [etolbakov](https://github.com/etolbakov)
|
||||
* [irenjj](https://github.com/irenjj)
|
||||
* [KKould](https://github.com/KKould)
|
||||
* [Lanqing Yang](https://github.com/lyang24)
|
||||
* [NiwakaDev](https://github.com/NiwakaDev)
|
||||
* [tisonkun](https://github.com/tisonkun)
|
||||
- [apdong2022](https://github.com/apdong2022)
|
||||
- [beryl678](https://github.com/beryl678)
|
||||
- [CookiePieWw](https://github.com/CookiePieWw)
|
||||
- [etolbakov](https://github.com/etolbakov)
|
||||
- [irenjj](https://github.com/irenjj)
|
||||
- [KKould](https://github.com/KKould)
|
||||
- [Lanqing Yang](https://github.com/lyang24)
|
||||
- [nicecui](https://github.com/nicecui)
|
||||
- [NiwakaDev](https://github.com/NiwakaDev)
|
||||
- [paomian](https://github.com/paomian)
|
||||
- [tisonkun](https://github.com/tisonkun)
|
||||
- [Wenjie0329](https://github.com/Wenjie0329)
|
||||
- [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
||||
- [zhongzc](https://github.com/zhongzc)
|
||||
- [ZonaHex](https://github.com/ZonaHex)
|
||||
- [zyy17](https://github.com/zyy17)
|
||||
|
||||
## Team Members (in alphabetical order)
|
||||
|
||||
* [apdong2022](https://github.com/apdong2022)
|
||||
* [beryl678](https://github.com/beryl678)
|
||||
* [daviderli614](https://github.com/daviderli614)
|
||||
* [discord9](https://github.com/discord9)
|
||||
* [evenyag](https://github.com/evenyag)
|
||||
* [fengjiachun](https://github.com/fengjiachun)
|
||||
* [fengys1996](https://github.com/fengys1996)
|
||||
* [GrepTime](https://github.com/GrepTime)
|
||||
* [holalengyu](https://github.com/holalengyu)
|
||||
* [killme2008](https://github.com/killme2008)
|
||||
* [MichaelScofield](https://github.com/MichaelScofield)
|
||||
* [nicecui](https://github.com/nicecui)
|
||||
* [paomian](https://github.com/paomian)
|
||||
* [shuiyisong](https://github.com/shuiyisong)
|
||||
* [sunchanglong](https://github.com/sunchanglong)
|
||||
* [sunng87](https://github.com/sunng87)
|
||||
* [v0y4g3r](https://github.com/v0y4g3r)
|
||||
* [waynexia](https://github.com/waynexia)
|
||||
* [Wenjie0329](https://github.com/Wenjie0329)
|
||||
* [WenyXu](https://github.com/WenyXu)
|
||||
* [xtang](https://github.com/xtang)
|
||||
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
||||
* [zhongzc](https://github.com/zhongzc)
|
||||
* [ZonaHex](https://github.com/ZonaHex)
|
||||
* [zyy17](https://github.com/zyy17)
|
||||
- [daviderli614](https://github.com/daviderli614)
|
||||
- [discord9](https://github.com/discord9)
|
||||
- [evenyag](https://github.com/evenyag)
|
||||
- [fengjiachun](https://github.com/fengjiachun)
|
||||
- [fengys1996](https://github.com/fengys1996)
|
||||
- [GrepTime](https://github.com/GrepTime)
|
||||
- [holalengyu](https://github.com/holalengyu)
|
||||
- [killme2008](https://github.com/killme2008)
|
||||
- [MichaelScofield](https://github.com/MichaelScofield)
|
||||
- [shuiyisong](https://github.com/shuiyisong)
|
||||
- [sunchanglong](https://github.com/sunchanglong)
|
||||
- [sunng87](https://github.com/sunng87)
|
||||
- [v0y4g3r](https://github.com/v0y4g3r)
|
||||
- [waynexia](https://github.com/waynexia)
|
||||
- [WenyXu](https://github.com/WenyXu)
|
||||
- [xtang](https://github.com/xtang)
|
||||
|
||||
## All Contributors
|
||||
|
||||
|
||||
399
Cargo.lock
generated
399
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
17
Cargo.toml
17
Cargo.toml
@@ -21,6 +21,7 @@ members = [
|
||||
"src/common/grpc-expr",
|
||||
"src/common/macro",
|
||||
"src/common/mem-prof",
|
||||
"src/common/memory-manager",
|
||||
"src/common/meta",
|
||||
"src/common/options",
|
||||
"src/common/plugins",
|
||||
@@ -74,7 +75,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "1.0.0-beta.1"
|
||||
version = "1.0.0-beta.3"
|
||||
edition = "2024"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -131,7 +132,7 @@ datafusion-functions = "50"
|
||||
datafusion-functions-aggregate-common = "50"
|
||||
datafusion-optimizer = "50"
|
||||
datafusion-orc = "0.5"
|
||||
datafusion-pg-catalog = "0.12.1"
|
||||
datafusion-pg-catalog = "0.12.3"
|
||||
datafusion-physical-expr = "50"
|
||||
datafusion-physical-plan = "50"
|
||||
datafusion-sql = "50"
|
||||
@@ -139,16 +140,17 @@ datafusion-substrait = "50"
|
||||
deadpool = "0.12"
|
||||
deadpool-postgres = "0.14"
|
||||
derive_builder = "0.20"
|
||||
derive_more = { version = "2.1", features = ["full"] }
|
||||
dotenv = "0.15"
|
||||
either = "1.15"
|
||||
etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62df834f0cffda355eba96691fe1a9a332b75a7", features = [
|
||||
etcd-client = { version = "0.16.1", features = [
|
||||
"tls",
|
||||
"tls-roots",
|
||||
] }
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "14b9dc40bdc8288742b0cefc7bb024303b7429ef" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "173efe5ec62722089db7c531c0b0d470a072b915" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
@@ -200,7 +202,8 @@ reqwest = { version = "0.12", default-features = false, features = [
|
||||
"stream",
|
||||
"multipart",
|
||||
] }
|
||||
rskafka = { git = "https://github.com/WenyXu/rskafka.git", rev = "7b0f31ed39db049b4ee2e5f1e95b5a30be9baf76", features = [
|
||||
# Branch: feat/request-timeout
|
||||
rskafka = { git = "https://github.com/GreptimeTeam/rskafka.git", rev = "f5688f83e7da591cda3f2674c2408b4c0ed4ed50", features = [
|
||||
"transport-tls",
|
||||
] }
|
||||
rstest = "0.25"
|
||||
@@ -234,6 +237,7 @@ tower = "0.5"
|
||||
tower-http = "0.6"
|
||||
tracing = "0.1"
|
||||
tracing-appender = "0.2"
|
||||
tracing-opentelemetry = "0.31.0"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
||||
typetag = "0.2"
|
||||
uuid = { version = "1.17", features = ["serde", "v4", "fast-rng"] }
|
||||
@@ -263,6 +267,7 @@ common-grpc = { path = "src/common/grpc" }
|
||||
common-grpc-expr = { path = "src/common/grpc-expr" }
|
||||
common-macro = { path = "src/common/macro" }
|
||||
common-mem-prof = { path = "src/common/mem-prof" }
|
||||
common-memory-manager = { path = "src/common/memory-manager" }
|
||||
common-meta = { path = "src/common/meta" }
|
||||
common-options = { path = "src/common/options" }
|
||||
common-plugins = { path = "src/common/plugins" }
|
||||
@@ -327,7 +332,7 @@ datafusion-physical-plan = { git = "https://github.com/GreptimeTeam/datafusion.g
|
||||
datafusion-datasource = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "4b519a5caa95472cc3988f5556813a583dd35af1" } # branch = "v0.58.x"
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "a0ce2bc6eb3e804532932f39833c32432f5c9a39" } # branch = "v0.58.x"
|
||||
|
||||
[profile.release]
|
||||
debug = 1
|
||||
|
||||
3
Makefile
3
Makefile
@@ -17,6 +17,8 @@ CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
|
||||
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
||||
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
|
||||
SQLNESS_OPTS ?=
|
||||
EXTRA_BUILD_ENVS ?=
|
||||
ASSEMBLED_EXTRA_BUILD_ENV := $(foreach var,$(EXTRA_BUILD_ENVS),-e $(var))
|
||||
|
||||
# The arguments for running integration tests.
|
||||
ETCD_VERSION ?= v3.5.9
|
||||
@@ -83,6 +85,7 @@ build: ## Build debug version greptime.
|
||||
.PHONY: build-by-dev-builder
|
||||
build-by-dev-builder: ## Build greptime by dev-builder.
|
||||
docker run --network=host \
|
||||
${ASSEMBLED_EXTRA_BUILD_ENV} \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
||||
make build \
|
||||
|
||||
@@ -83,6 +83,8 @@
|
||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.connect_timeout` | String | `3s` | The connect timeout for kafka client.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.timeout` | String | `3s` | The timeout for kafka client.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
||||
| `wal.num_topics` | Integer | `64` | Number of topics.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. |
|
||||
@@ -108,9 +110,6 @@
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.enable_read_cache` | Bool | `true` | Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||
@@ -141,6 +140,8 @@
|
||||
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
|
||||
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
|
||||
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
|
||||
| `region_engine.mito.experimental_compaction_memory_limit` | String | 0 | Memory budget for compaction tasks. Setting it to 0 or "unlimited" disables the limit. |
|
||||
| `region_engine.mito.experimental_compaction_on_exhausted` | String | wait | Behavior when compaction cannot acquire memory from the budget.<br/>Options: "wait" (default, 10s), "wait(<duration>)", "fail" |
|
||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`. |
|
||||
@@ -154,6 +155,8 @@
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).<br/>When enabled, index files are loaded into the write cache during region initialization,<br/>which can improve query performance at the cost of longer startup times. |
|
||||
| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).<br/>The remaining capacity is used for data (parquet) files.<br/>Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,<br/>1GiB is reserved for index files and 4GiB for data files. |
|
||||
| `region_engine.mito.enable_refill_cache_on_read` | Bool | `true` | Enable refilling cache on read operations (default: true).<br/>When disabled, cache refilling on read won't happen. |
|
||||
| `region_engine.mito.manifest_cache_size` | String | `256MB` | Capacity for manifest cache (default: 256MB). |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
||||
@@ -210,14 +213,6 @@
|
||||
| `slow_query.record_type` | String | Unset | The record type of slow queries. It can be `system_table` or `log`. |
|
||||
| `slow_query.threshold` | String | Unset | The threshold of slow query. |
|
||||
| `slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
||||
| `export_metrics` | -- | -- | The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommended to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | Unset | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
@@ -302,7 +297,6 @@
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
@@ -335,12 +329,6 @@
|
||||
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. |
|
||||
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. |
|
||||
| `slow_query.ttl` | String | `90d` | The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`. |
|
||||
| `export_metrics` | -- | -- | The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
@@ -354,7 +342,7 @@
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
|
||||
| `store_addrs` | Array | -- | Store server address(es). The format depends on the selected backend.<br/><br/>For etcd: a list of "host:port" endpoints.<br/>e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]<br/><br/>For PostgreSQL: a connection string in libpq format or URI.<br/>e.g.<br/>- "host=localhost port=5432 user=postgres password=<PASSWORD> dbname=postgres"<br/>- "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"<br/>The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html<br/><br/>For mysql store, the format is a MySQL connection URL.<br/>e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem" |
|
||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store`<br/>- `mysql_store` |
|
||||
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
|
||||
@@ -366,22 +354,28 @@
|
||||
| `region_failure_detector_initialization_delay` | String | `10m` | The delay before starting region failure detection.<br/>This delay helps prevent Metasrv from triggering unnecessary region failovers before all Datanodes are fully started.<br/>Especially useful when the cluster is not deployed with GreptimeDB Operator and maintenance mode is not enabled. |
|
||||
| `allow_region_failover_on_local_wal` | Bool | `false` | Whether to allow region failover on local WAL.<br/>**This option is not recommended to be set to true, because it may lead to data loss during failover.** |
|
||||
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
||||
| `heartbeat_interval` | String | `3s` | Base heartbeat interval for calculating distributed time constants.<br/>The frontend heartbeat interval is 6 times of the base heartbeat interval.<br/>The flownode/datanode heartbeat interval is 1 times of the base heartbeat interval.<br/>e.g., If the base heartbeat interval is 3s, the frontend heartbeat interval is 18s, the flownode/datanode heartbeat interval is 3s.<br/>If you change this value, you need to change the heartbeat interval of the flownode/frontend/datanode accordingly. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `backend_tls` | -- | -- | TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)<br/>When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here |
|
||||
| `backend_tls` | -- | -- | TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)<br/>When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here<br/><br/>Note: if TLS is configured in both this section and the `store_addrs` connection string, the<br/>settings here will override the TLS settings in `store_addrs`. |
|
||||
| `backend_tls.mode` | String | `prefer` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- "disable" - No TLS<br/>- "prefer" (default) - Try TLS, fallback to plain<br/>- "require" - Require TLS<br/>- "verify_ca" - Require TLS and verify CA<br/>- "verify_full" - Require TLS and verify hostname |
|
||||
| `backend_tls.cert_path` | String | `""` | Path to client certificate file (for client authentication)<br/>Like "/path/to/client.crt" |
|
||||
| `backend_tls.key_path` | String | `""` | Path to client private key file (for client authentication)<br/>Like "/path/to/client.key" |
|
||||
| `backend_tls.ca_cert_path` | String | `""` | Path to CA certificate file (for server certificate verification)<br/>Required when using custom CAs or self-signed certificates<br/>Leave empty to use system root certificates only<br/>Like "/path/to/ca.crt" |
|
||||
| `backend_tls.watch` | Bool | `false` | Watch for certificate file changes and auto reload |
|
||||
| `backend_client` | -- | -- | The backend client options.<br/>Currently, only applicable when using etcd as the metadata store. |
|
||||
| `backend_client.keep_alive_timeout` | String | `3s` | The keep alive timeout for backend client. |
|
||||
| `backend_client.keep_alive_interval` | String | `10s` | The keep alive interval for backend client. |
|
||||
| `backend_client.connect_timeout` | String | `3s` | The connect timeout for backend client. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:3002` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `grpc.http2_keep_alive_interval` | String | `10s` | The server side HTTP/2 keep-alive interval |
|
||||
| `grpc.http2_keep_alive_timeout` | String | `3s` | The server side HTTP/2 keep-alive timeout. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
@@ -430,12 +424,6 @@
|
||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
@@ -478,7 +466,6 @@
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
@@ -498,6 +485,8 @@
|
||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.connect_timeout` | String | `3s` | The connect timeout for kafka client.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.timeout` | String | `3s` | The timeout for kafka client.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.<br/>**It's only used when the provider is `kafka`**. |
|
||||
@@ -509,9 +498,6 @@
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.enable_read_cache` | Bool | `true` | Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||
@@ -544,6 +530,8 @@
|
||||
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
|
||||
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
|
||||
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
|
||||
| `region_engine.mito.experimental_compaction_memory_limit` | String | 0 | Memory budget for compaction tasks. Setting it to 0 or "unlimited" disables the limit. |
|
||||
| `region_engine.mito.experimental_compaction_on_exhausted` | String | wait | Behavior when compaction cannot acquire memory from the budget.<br/>Options: "wait" (default, 10s), "wait(<duration>)", "fail" |
|
||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
||||
@@ -557,6 +545,8 @@
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).<br/>When enabled, index files are loaded into the write cache during region initialization,<br/>which can improve query performance at the cost of longer startup times. |
|
||||
| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).<br/>The remaining capacity is used for data (parquet) files.<br/>Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,<br/>1GiB is reserved for index files and 4GiB for data files. |
|
||||
| `region_engine.mito.enable_refill_cache_on_read` | Bool | `true` | Enable refilling cache on read operations (default: true).<br/>When disabled, cache refilling on read won't happen. |
|
||||
| `region_engine.mito.manifest_cache_size` | String | `256MB` | Capacity for manifest cache (default: 256MB). |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
||||
@@ -608,12 +598,6 @@
|
||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
@@ -656,7 +640,6 @@
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
|
||||
@@ -99,9 +99,6 @@ metasrv_addrs = ["127.0.0.1:3002"]
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
@@ -172,6 +169,14 @@ recovery_parallelism = 2
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
|
||||
## The connect timeout for kafka client.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
#+ connect_timeout = "3s"
|
||||
|
||||
## The timeout for kafka client.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
#+ timeout = "3s"
|
||||
|
||||
## The max size of a single producer batch.
|
||||
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
@@ -228,6 +233,7 @@ overwrite_entry_start_id = false
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# enable_virtual_host_style = false
|
||||
# disable_ec2_metadata = false
|
||||
|
||||
# Example of using Oss as the storage.
|
||||
# [storage]
|
||||
@@ -284,18 +290,6 @@ data_home = "./greptimedb_data"
|
||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||
type = "File"
|
||||
|
||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||
## @toml2docs:none-default
|
||||
#+ cache_path = ""
|
||||
|
||||
## Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage.
|
||||
#+ enable_read_cache = true
|
||||
|
||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
||||
## @toml2docs:none-default
|
||||
cache_capacity = "5GiB"
|
||||
|
||||
## The S3 bucket name.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
@@ -455,6 +449,15 @@ compress_manifest = false
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ max_background_purges = 8
|
||||
|
||||
## Memory budget for compaction tasks. Setting it to 0 or "unlimited" disables the limit.
|
||||
## @toml2docs:none-default="0"
|
||||
#+ experimental_compaction_memory_limit = "0"
|
||||
|
||||
## Behavior when compaction cannot acquire memory from the budget.
|
||||
## Options: "wait" (default, 10s), "wait(<duration>)", "fail"
|
||||
## @toml2docs:none-default="wait"
|
||||
#+ experimental_compaction_on_exhausted = "wait"
|
||||
|
||||
## Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
|
||||
@@ -510,6 +513,13 @@ preload_index_cache = true
|
||||
## 1GiB is reserved for index files and 4GiB for data files.
|
||||
index_cache_percent = 20
|
||||
|
||||
## Enable refilling cache on read operations (default: true).
|
||||
## When disabled, cache refilling on read won't happen.
|
||||
enable_refill_cache_on_read = true
|
||||
|
||||
## Capacity for manifest cache (default: 256MB).
|
||||
manifest_cache_size = "256MB"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
@@ -712,21 +722,6 @@ otlp_export_protocol = "http"
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
#+ [tracing]
|
||||
## The tokio console address.
|
||||
|
||||
@@ -78,9 +78,6 @@ metasrv_addrs = ["127.0.0.1:3002"]
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
|
||||
@@ -131,7 +131,6 @@ key_path = ""
|
||||
## For now, gRPC tls config does not support auto reload.
|
||||
watch = false
|
||||
|
||||
|
||||
## MySQL server options.
|
||||
[mysql]
|
||||
## Whether to enable.
|
||||
@@ -226,9 +225,6 @@ metasrv_addrs = ["127.0.0.1:3002"]
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
@@ -329,21 +325,6 @@ sample_ratio = 1.0
|
||||
## The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`.
|
||||
ttl = "90d"
|
||||
|
||||
## The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
#+ [tracing]
|
||||
## The tokio console address.
|
||||
|
||||
@@ -1,11 +1,19 @@
|
||||
## The working home directory.
|
||||
data_home = "./greptimedb_data"
|
||||
|
||||
## Store server address default to etcd store.
|
||||
## For postgres store, the format is:
|
||||
## "password=password dbname=postgres user=postgres host=localhost port=5432"
|
||||
## For etcd store, the format is:
|
||||
## "127.0.0.1:2379"
|
||||
## Store server address(es). The format depends on the selected backend.
|
||||
##
|
||||
## For etcd: a list of "host:port" endpoints.
|
||||
## e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]
|
||||
##
|
||||
## For PostgreSQL: a connection string in libpq format or URI.
|
||||
## e.g.
|
||||
## - "host=localhost port=5432 user=postgres password=<PASSWORD> dbname=postgres"
|
||||
## - "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"
|
||||
## The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html
|
||||
##
|
||||
## For mysql store, the format is a MySQL connection URL.
|
||||
## e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem"
|
||||
store_addrs = ["127.0.0.1:2379"]
|
||||
|
||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||
@@ -63,6 +71,13 @@ allow_region_failover_on_local_wal = false
|
||||
## Max allowed idle time before removing node info from metasrv memory.
|
||||
node_max_idle_time = "24hours"
|
||||
|
||||
## Base heartbeat interval for calculating distributed time constants.
|
||||
## The frontend heartbeat interval is 6 times of the base heartbeat interval.
|
||||
## The flownode/datanode heartbeat interval is 1 times of the base heartbeat interval.
|
||||
## e.g., If the base heartbeat interval is 3s, the frontend heartbeat interval is 18s, the flownode/datanode heartbeat interval is 3s.
|
||||
## If you change this value, you need to change the heartbeat interval of the flownode/frontend/datanode accordingly.
|
||||
#+ heartbeat_interval = "3s"
|
||||
|
||||
## Whether to enable greptimedb telemetry. Enabled by default.
|
||||
#+ enable_telemetry = true
|
||||
|
||||
@@ -75,6 +90,9 @@ node_max_idle_time = "24hours"
|
||||
|
||||
## TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)
|
||||
## When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here
|
||||
##
|
||||
## Note: if TLS is configured in both this section and the `store_addrs` connection string, the
|
||||
## settings here will override the TLS settings in `store_addrs`.
|
||||
[backend_tls]
|
||||
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||
## - "disable" - No TLS
|
||||
@@ -98,8 +116,15 @@ key_path = ""
|
||||
## Like "/path/to/ca.crt"
|
||||
ca_cert_path = ""
|
||||
|
||||
## Watch for certificate file changes and auto reload
|
||||
watch = false
|
||||
## The backend client options.
|
||||
## Currently, only applicable when using etcd as the metadata store.
|
||||
#+ [backend_client]
|
||||
## The keep alive timeout for backend client.
|
||||
#+ keep_alive_timeout = "3s"
|
||||
## The keep alive interval for backend client.
|
||||
#+ keep_alive_interval = "10s"
|
||||
## The connect timeout for backend client.
|
||||
#+ connect_timeout = "3s"
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
@@ -115,6 +140,10 @@ runtime_size = 8
|
||||
max_recv_message_size = "512MB"
|
||||
## The maximum send message size for gRPC server.
|
||||
max_send_message_size = "512MB"
|
||||
## The server side HTTP/2 keep-alive interval
|
||||
#+ http2_keep_alive_interval = "10s"
|
||||
## The server side HTTP/2 keep-alive timeout.
|
||||
#+ http2_keep_alive_timeout = "3s"
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
@@ -323,21 +352,6 @@ otlp_export_protocol = "http"
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
#+ [tracing]
|
||||
## The tokio console address.
|
||||
|
||||
@@ -230,6 +230,14 @@ recovery_parallelism = 2
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
|
||||
## The connect timeout for kafka client.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
#+ connect_timeout = "3s"
|
||||
|
||||
## The timeout for kafka client.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
#+ timeout = "3s"
|
||||
|
||||
## Automatically create topics for WAL.
|
||||
## Set to `true` to automatically create topics for WAL.
|
||||
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
|
||||
@@ -332,6 +340,7 @@ max_running_procedures = 128
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# enable_virtual_host_style = false
|
||||
# disable_ec2_metadata = false
|
||||
|
||||
# Example of using Oss as the storage.
|
||||
# [storage]
|
||||
@@ -388,18 +397,6 @@ data_home = "./greptimedb_data"
|
||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||
type = "File"
|
||||
|
||||
## Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage.
|
||||
#+ enable_read_cache = true
|
||||
|
||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||
## @toml2docs:none-default
|
||||
#+ cache_path = ""
|
||||
|
||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
||||
## @toml2docs:none-default
|
||||
cache_capacity = "5GiB"
|
||||
|
||||
## The S3 bucket name.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
@@ -546,6 +543,15 @@ compress_manifest = false
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ max_background_purges = 8
|
||||
|
||||
## Memory budget for compaction tasks. Setting it to 0 or "unlimited" disables the limit.
|
||||
## @toml2docs:none-default="0"
|
||||
#+ experimental_compaction_memory_limit = "0"
|
||||
|
||||
## Behavior when compaction cannot acquire memory from the budget.
|
||||
## Options: "wait" (default, 10s), "wait(<duration>)", "fail"
|
||||
## @toml2docs:none-default="wait"
|
||||
#+ experimental_compaction_on_exhausted = "wait"
|
||||
|
||||
## Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
|
||||
@@ -601,6 +607,13 @@ preload_index_cache = true
|
||||
## 1GiB is reserved for index files and 4GiB for data files.
|
||||
index_cache_percent = 20
|
||||
|
||||
## Enable refilling cache on read operations (default: true).
|
||||
## When disabled, cache refilling on read won't happen.
|
||||
enable_refill_cache_on_read = true
|
||||
|
||||
## Capacity for manifest cache (default: 256MB).
|
||||
manifest_cache_size = "256MB"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
@@ -820,27 +833,6 @@ default_ratio = 1.0
|
||||
## @toml2docs:none-default
|
||||
#+ sample_ratio = 1.0
|
||||
|
||||
## The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommended to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## @toml2docs:none-default
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
#+ [tracing]
|
||||
## The tokio console address.
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
FROM centos:7 as builder
|
||||
FROM centos:7 AS builder
|
||||
|
||||
ARG CARGO_PROFILE
|
||||
ARG FEATURES
|
||||
ARG OUTPUT_DIR
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
ENV LANG=en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Install dependencies
|
||||
@@ -22,7 +22,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
||||
# Install Rust
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
ENV PATH=/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
|
||||
# Build the project in release mode.
|
||||
RUN --mount=target=.,rw \
|
||||
@@ -33,7 +33,7 @@ RUN --mount=target=.,rw \
|
||||
TARGET_DIR=/out/target
|
||||
|
||||
# Export the binary to the clean image.
|
||||
FROM centos:7 as base
|
||||
FROM centos:7 AS base
|
||||
|
||||
ARG OUTPUT_DIR
|
||||
|
||||
@@ -45,7 +45,7 @@ RUN yum install -y epel-release \
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
ENV PATH=/greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
|
||||
65
docker/buildx/distroless/Dockerfile
Normal file
65
docker/buildx/distroless/Dockerfile
Normal file
@@ -0,0 +1,65 @@
|
||||
FROM ubuntu:22.04 AS builder
|
||||
|
||||
ARG CARGO_PROFILE
|
||||
ARG FEATURES
|
||||
ARG OUTPUT_DIR
|
||||
|
||||
ENV LANG=en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
||||
|
||||
# Install dependencies.
|
||||
RUN --mount=type=cache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y \
|
||||
libssl-dev \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH=/root/.cargo/bin/:$PATH
|
||||
|
||||
# Build the project in release mode.
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cargo/registry \
|
||||
make build \
|
||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||
FEATURES=${FEATURES} \
|
||||
TARGET_DIR=/out/target
|
||||
|
||||
FROM ubuntu:22.04 AS libs
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
# Copy required library dependencies based on architecture
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
cp /lib/x86_64-linux-gnu/libz.so.1.2.11 /lib/x86_64-linux-gnu/libz.so.1; \
|
||||
elif [ "$TARGETARCH" = "arm64" ]; then \
|
||||
cp /lib/aarch64-linux-gnu/libz.so.1.2.11 /lib/aarch64-linux-gnu/libz.so.1; \
|
||||
else \
|
||||
echo "Unsupported architecture: $TARGETARCH" && exit 1; \
|
||||
fi
|
||||
|
||||
# Export the binary to the clean distroless image.
|
||||
FROM gcr.io/distroless/cc-debian12:latest AS base
|
||||
|
||||
ARG OUTPUT_DIR
|
||||
ARG TARGETARCH
|
||||
|
||||
# Copy required library dependencies
|
||||
COPY --from=libs /lib /lib
|
||||
COPY --from=busybox:stable /bin/busybox /bin/busybox
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/greptime
|
||||
ENV PATH=/greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
@@ -1,10 +1,10 @@
|
||||
FROM ubuntu:22.04 as builder
|
||||
FROM ubuntu:22.04 AS builder
|
||||
|
||||
ARG CARGO_PROFILE
|
||||
ARG FEATURES
|
||||
ARG OUTPUT_DIR
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
ENV LANG=en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
RUN apt-get update && \
|
||||
@@ -23,7 +23,7 @@ RUN --mount=type=cache,target=/var/cache/apt \
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
ENV PATH=/root/.cargo/bin/:$PATH
|
||||
|
||||
# Build the project in release mode.
|
||||
RUN --mount=target=. \
|
||||
@@ -35,7 +35,7 @@ RUN --mount=target=. \
|
||||
|
||||
# Export the binary to the clean image.
|
||||
# TODO(zyy17): Maybe should use the more secure container image.
|
||||
FROM ubuntu:22.04 as base
|
||||
FROM ubuntu:22.04 AS base
|
||||
|
||||
ARG OUTPUT_DIR
|
||||
|
||||
@@ -45,7 +45,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
ENV PATH=/greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ ARG TARGETARCH
|
||||
|
||||
ADD $TARGETARCH/greptime /greptime/bin/
|
||||
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
ENV PATH=/greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
|
||||
40
docker/ci/distroless/Dockerfile
Normal file
40
docker/ci/distroless/Dockerfile
Normal file
@@ -0,0 +1,40 @@
|
||||
FROM ubuntu:22.04 AS libs
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
# Copy required library dependencies based on architecture
|
||||
# TARGETARCH values: amd64, arm64
|
||||
# Ubuntu library paths: x86_64-linux-gnu, aarch64-linux-gnu
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
mkdir -p /output/x86_64-linux-gnu && \
|
||||
cp /lib/x86_64-linux-gnu/libz.so.1.2.11 /output/x86_64-linux-gnu/libz.so.1; \
|
||||
elif [ "$TARGETARCH" = "arm64" ]; then \
|
||||
mkdir -p /output/aarch64-linux-gnu && \
|
||||
cp /lib/aarch64-linux-gnu/libz.so.1.2.11 /output/aarch64-linux-gnu/libz.so.1; \
|
||||
else \
|
||||
echo "Unsupported architecture: $TARGETARCH" && exit 1; \
|
||||
fi
|
||||
|
||||
FROM gcr.io/distroless/cc-debian12:latest
|
||||
|
||||
# The root path under which contains all the dependencies to build this Dockerfile.
|
||||
ARG DOCKER_BUILD_ROOT=.
|
||||
# The binary name of GreptimeDB executable.
|
||||
# Defaults to "greptime", but sometimes in other projects it might be different.
|
||||
ARG TARGET_BIN=greptime
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
# Copy required library dependencies
|
||||
COPY --from=libs /output /lib
|
||||
COPY --from=busybox:stable /bin/busybox /bin/busybox
|
||||
|
||||
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
|
||||
|
||||
ENV PATH=/greptime/bin/:$PATH
|
||||
|
||||
ENV TARGET_BIN=$TARGET_BIN
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
@@ -14,7 +14,7 @@ ARG TARGETARCH
|
||||
|
||||
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
|
||||
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
ENV PATH=/greptime/bin/:$PATH
|
||||
|
||||
ENV TARGET_BIN=$TARGET_BIN
|
||||
|
||||
|
||||
@@ -13,4 +13,19 @@ Log Level changed from Some("info") to "trace,flow=debug"%
|
||||
|
||||
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follows the same rule of `RUST_LOG`.
|
||||
|
||||
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).
|
||||
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).
|
||||
|
||||
# Enable/Disable Trace on the Fly
|
||||
|
||||
## HTTP API
|
||||
|
||||
example:
|
||||
```bash
|
||||
curl --data "true" 127.0.0.1:4000/debug/enable_trace
|
||||
```
|
||||
And database will reply with something like:
|
||||
```
|
||||
trace enabled%
|
||||
```
|
||||
|
||||
Possible values are "true" or "false".
|
||||
|
||||
@@ -106,6 +106,37 @@ This mechanism may be too complex to implement at once. We can consider a two-ph
|
||||
Also the read replica shouldn't be later in manifest version for more than the lingering time of obsolete files, otherwise it might ref to files that are already deleted by the GC worker.
|
||||
- need to upload tmp manifest to object storage, which may introduce additional complexity and potential performance overhead. But since long-running queries are typically not frequent, the performance impact is expected to be minimal.
|
||||
|
||||
one potential race condition with region-migration is illustrated below:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant gc_worker as GC Worker(same dn as region 1)
|
||||
participant region1 as Region 1 (Leader → Follower)
|
||||
participant region2 as Region 2 (Follower → Leader)
|
||||
participant region_dir as Region Directory
|
||||
|
||||
gc_worker->>region1: Start GC, get region manifest
|
||||
activate region1
|
||||
region1-->>gc_worker: Region 1 manifest
|
||||
deactivate region1
|
||||
gc_worker->>region_dir: Scan region directory
|
||||
|
||||
Note over region1,region2: Region Migration Occurs
|
||||
region1-->>region2: Downgrade to Follower
|
||||
region2-->>region1: Becomes Leader
|
||||
|
||||
region2->>region_dir: Add new file
|
||||
|
||||
gc_worker->>region_dir: Continue scanning
|
||||
gc_worker-->>region_dir: Discovers new file
|
||||
Note over gc_worker: New file not in Region 1's manifest
|
||||
gc_worker->>gc_worker: Mark file as orphan(incorrectly)
|
||||
```
|
||||
which could cause gc worker to incorrectly mark the new file as orphan and delete it, if config the lingering time for orphan files(files not mentioned anywhere(in used or unused)) is not long enough.
|
||||
|
||||
A good enough solution could be to use lock to prevent gc worker to happen on the region if region migration is happening on the region, and vise versa.
|
||||
|
||||
The race condition between gc worker and repartition also needs to be considered carefully. For now, acquiring lock for both region-migration and repartition during gc worker process could be a simple solution.
|
||||
|
||||
## Conclusion and Rationale
|
||||
|
||||
|
||||
20
flake.lock
generated
20
flake.lock
generated
@@ -8,11 +8,11 @@
|
||||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1760078406,
|
||||
"narHash": "sha256-JeJK0ZA845PtkCHkfo4KjeI1mYrsr2s3cxBYKhF4BoE=",
|
||||
"lastModified": 1765252472,
|
||||
"narHash": "sha256-byMt/uMi7DJ8tRniFopDFZMO3leSjGp6GS4zWOFT+uQ=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "351277c60d104944122ee389cdf581c5ce2c6732",
|
||||
"rev": "8456b985f6652e3eef0632ee9992b439735c5544",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -41,16 +41,16 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1759994382,
|
||||
"narHash": "sha256-wSK+3UkalDZRVHGCRikZ//CyZUJWDJkBDTQX1+G77Ow=",
|
||||
"lastModified": 1764983851,
|
||||
"narHash": "sha256-y7RPKl/jJ/KAP/VKLMghMgXTlvNIJMHKskl8/Uuar7o=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "5da4a26309e796daa7ffca72df93dbe53b8164c7",
|
||||
"rev": "d9bc5c7dceb30d8d6fafa10aeb6aa8a48c218454",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-25.05",
|
||||
"ref": "nixos-25.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
@@ -65,11 +65,11 @@
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1760014945,
|
||||
"narHash": "sha256-ySdl7F9+oeWNHVrg3QL/brazqmJvYFEdpGnF3pyoDH8=",
|
||||
"lastModified": 1765120009,
|
||||
"narHash": "sha256-nG76b87rkaDzibWbnB5bYDm6a52b78A+fpm+03pqYIw=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "90d2e1ce4dfe7dc49250a8b88a0f08ffdb9cb23f",
|
||||
"rev": "5e3e9c4e61bba8a5e72134b9ffefbef8f531d008",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
description = "Development environment flake";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.11";
|
||||
fenix = {
|
||||
url = "github:nix-community/fenix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
@@ -48,7 +48,7 @@
|
||||
gnuplot ## for cargo bench
|
||||
];
|
||||
|
||||
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
|
||||
buildInputs = buildInputs;
|
||||
NIX_HARDENING_ENABLE = "";
|
||||
};
|
||||
});
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::collections::{BTreeMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_decimal::Decimal128;
|
||||
@@ -20,13 +20,12 @@ use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECIS
|
||||
use common_time::time::Time;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::{Date, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp};
|
||||
use datatypes::json::value::{JsonNumber, JsonValue, JsonValueRef, JsonVariant};
|
||||
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
||||
use datatypes::types::{
|
||||
IntervalType, JsonFormat, StructField, StructType, TimeType, TimestampType,
|
||||
};
|
||||
use datatypes::value::{
|
||||
ListValue, ListValueRef, OrderedF32, OrderedF64, StructValue, StructValueRef, Value,
|
||||
IntervalType, JsonFormat, JsonType, StructField, StructType, TimeType, TimestampType,
|
||||
};
|
||||
use datatypes::value::{ListValueRef, OrderedF32, OrderedF64, StructValueRef, Value};
|
||||
use datatypes::vectors::VectorRef;
|
||||
use greptime_proto::v1::column_data_type_extension::TypeExt;
|
||||
use greptime_proto::v1::ddl_request::Expr;
|
||||
@@ -34,9 +33,9 @@ use greptime_proto::v1::greptime_request::Request;
|
||||
use greptime_proto::v1::query_request::Query;
|
||||
use greptime_proto::v1::value::ValueData;
|
||||
use greptime_proto::v1::{
|
||||
self, ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, JsonNativeTypeExtension,
|
||||
JsonTypeExtension, ListTypeExtension, QueryRequest, Row, SemanticType, StructTypeExtension,
|
||||
VectorTypeExtension,
|
||||
self, ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, DictionaryTypeExtension,
|
||||
JsonList, JsonNativeTypeExtension, JsonObject, JsonTypeExtension, ListTypeExtension,
|
||||
QueryRequest, Row, SemanticType, StructTypeExtension, VectorTypeExtension, json_value,
|
||||
};
|
||||
use paste::paste;
|
||||
use snafu::prelude::*;
|
||||
@@ -81,6 +80,10 @@ impl ColumnDataTypeWrapper {
|
||||
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
|
||||
(self.datatype, self.datatype_ext.clone())
|
||||
}
|
||||
|
||||
pub fn into_parts(self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
|
||||
(self.datatype, self.datatype_ext)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||
@@ -126,6 +129,7 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||
};
|
||||
ConcreteDataType::json_native_datatype(inner_type.into())
|
||||
}
|
||||
None => ConcreteDataType::Json(JsonType::null()),
|
||||
_ => {
|
||||
// invalid state, type extension is missing or invalid
|
||||
ConcreteDataType::null_datatype()
|
||||
@@ -215,6 +219,26 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||
ConcreteDataType::null_datatype()
|
||||
}
|
||||
}
|
||||
ColumnDataType::Dictionary => {
|
||||
if let Some(TypeExt::DictionaryType(d)) = datatype_wrapper
|
||||
.datatype_ext
|
||||
.as_ref()
|
||||
.and_then(|datatype_ext| datatype_ext.type_ext.as_ref())
|
||||
{
|
||||
let key_type = ColumnDataTypeWrapper {
|
||||
datatype: d.key_datatype(),
|
||||
datatype_ext: d.key_datatype_extension.clone().map(|ext| *ext),
|
||||
};
|
||||
let value_type = ColumnDataTypeWrapper {
|
||||
datatype: d.value_datatype(),
|
||||
datatype_ext: d.value_datatype_extension.clone().map(|ext| *ext),
|
||||
};
|
||||
ConcreteDataType::dictionary_datatype(key_type.into(), value_type.into())
|
||||
} else {
|
||||
// invalid state: type extension not found
|
||||
ConcreteDataType::null_datatype()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -338,13 +362,30 @@ impl ColumnDataTypeWrapper {
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn dictionary_datatype(
|
||||
key_type: ColumnDataTypeWrapper,
|
||||
value_type: ColumnDataTypeWrapper,
|
||||
) -> Self {
|
||||
ColumnDataTypeWrapper {
|
||||
datatype: ColumnDataType::Dictionary,
|
||||
datatype_ext: Some(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::DictionaryType(Box::new(DictionaryTypeExtension {
|
||||
key_datatype: key_type.datatype().into(),
|
||||
key_datatype_extension: key_type.datatype_ext.map(Box::new),
|
||||
value_datatype: value_type.datatype().into(),
|
||||
value_datatype_extension: value_type.datatype_ext.map(Box::new),
|
||||
}))),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(datatype: ConcreteDataType) -> Result<Self> {
|
||||
let column_datatype = match datatype {
|
||||
let column_datatype = match &datatype {
|
||||
ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
|
||||
ConcreteDataType::Int8(_) => ColumnDataType::Int8,
|
||||
ConcreteDataType::Int16(_) => ColumnDataType::Int16,
|
||||
@@ -381,9 +422,8 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
ConcreteDataType::Vector(_) => ColumnDataType::Vector,
|
||||
ConcreteDataType::List(_) => ColumnDataType::List,
|
||||
ConcreteDataType::Struct(_) => ColumnDataType::Struct,
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_) => {
|
||||
ConcreteDataType::Dictionary(_) => ColumnDataType::Dictionary,
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::Duration(_) => {
|
||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail();
|
||||
}
|
||||
};
|
||||
@@ -404,16 +444,22 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
JsonFormat::Jsonb => Some(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
|
||||
}),
|
||||
JsonFormat::Native(inner) => {
|
||||
let inner_type = ColumnDataTypeWrapper::try_from(*inner.clone())?;
|
||||
Some(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::JsonNativeType(Box::new(
|
||||
JsonNativeTypeExtension {
|
||||
datatype: inner_type.datatype.into(),
|
||||
datatype_extension: inner_type.datatype_ext.map(Box::new),
|
||||
},
|
||||
))),
|
||||
})
|
||||
JsonFormat::Native(native_type) => {
|
||||
if native_type.is_null() {
|
||||
None
|
||||
} else {
|
||||
let native_type = ConcreteDataType::from(native_type.as_ref());
|
||||
let (datatype, datatype_extension) =
|
||||
ColumnDataTypeWrapper::try_from(native_type)?.into_parts();
|
||||
Some(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::JsonNativeType(Box::new(
|
||||
JsonNativeTypeExtension {
|
||||
datatype: datatype as i32,
|
||||
datatype_extension: datatype_extension.map(Box::new),
|
||||
},
|
||||
))),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -463,6 +509,25 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
None
|
||||
}
|
||||
}
|
||||
ColumnDataType::Dictionary => {
|
||||
if let ConcreteDataType::Dictionary(dict_type) = &datatype {
|
||||
let key_type = ColumnDataTypeWrapper::try_from(dict_type.key_type().clone())?;
|
||||
let value_type =
|
||||
ColumnDataTypeWrapper::try_from(dict_type.value_type().clone())?;
|
||||
Some(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::DictionaryType(Box::new(
|
||||
DictionaryTypeExtension {
|
||||
key_datatype: key_type.datatype.into(),
|
||||
key_datatype_extension: key_type.datatype_ext.map(Box::new),
|
||||
value_datatype: value_type.datatype.into(),
|
||||
value_datatype_extension: value_type.datatype_ext.map(Box::new),
|
||||
},
|
||||
))),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
Ok(Self {
|
||||
@@ -601,6 +666,9 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
|
||||
struct_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Dictionary => Values {
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -640,6 +708,7 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
||||
Some(Expr::CreateView(_)) => "ddl.create_view",
|
||||
Some(Expr::DropView(_)) => "ddl.drop_view",
|
||||
Some(Expr::AlterDatabase(_)) => "ddl.alter_database",
|
||||
Some(Expr::CommentOn(_)) => "ddl.comment_on",
|
||||
None => "ddl.empty",
|
||||
}
|
||||
}
|
||||
@@ -801,21 +870,8 @@ pub fn pb_value_to_value_ref<'a>(
|
||||
}
|
||||
|
||||
ValueData::JsonValue(inner_value) => {
|
||||
let json_datatype_ext = datatype_ext
|
||||
.as_ref()
|
||||
.and_then(|ext| {
|
||||
if let Some(TypeExt::JsonNativeType(l)) = &ext.type_ext {
|
||||
Some(l)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.expect("json value must contain datatype ext");
|
||||
|
||||
ValueRef::Json(Box::new(pb_value_to_value_ref(
|
||||
inner_value,
|
||||
json_datatype_ext.datatype_extension.as_deref(),
|
||||
)))
|
||||
let value = decode_json_value(inner_value);
|
||||
ValueRef::Json(Box::new(value))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -839,125 +895,64 @@ pub fn is_column_type_value_eq(
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Convert value into proto's value.
|
||||
pub fn to_proto_value(value: Value) -> v1::Value {
|
||||
match value {
|
||||
Value::Null => v1::Value { value_data: None },
|
||||
Value::Boolean(v) => v1::Value {
|
||||
value_data: Some(ValueData::BoolValue(v)),
|
||||
},
|
||||
Value::UInt8(v) => v1::Value {
|
||||
value_data: Some(ValueData::U8Value(v.into())),
|
||||
},
|
||||
Value::UInt16(v) => v1::Value {
|
||||
value_data: Some(ValueData::U16Value(v.into())),
|
||||
},
|
||||
Value::UInt32(v) => v1::Value {
|
||||
value_data: Some(ValueData::U32Value(v)),
|
||||
},
|
||||
Value::UInt64(v) => v1::Value {
|
||||
value_data: Some(ValueData::U64Value(v)),
|
||||
},
|
||||
Value::Int8(v) => v1::Value {
|
||||
value_data: Some(ValueData::I8Value(v.into())),
|
||||
},
|
||||
Value::Int16(v) => v1::Value {
|
||||
value_data: Some(ValueData::I16Value(v.into())),
|
||||
},
|
||||
Value::Int32(v) => v1::Value {
|
||||
value_data: Some(ValueData::I32Value(v)),
|
||||
},
|
||||
Value::Int64(v) => v1::Value {
|
||||
value_data: Some(ValueData::I64Value(v)),
|
||||
},
|
||||
Value::Float32(v) => v1::Value {
|
||||
value_data: Some(ValueData::F32Value(*v)),
|
||||
},
|
||||
Value::Float64(v) => v1::Value {
|
||||
value_data: Some(ValueData::F64Value(*v)),
|
||||
},
|
||||
Value::String(v) => v1::Value {
|
||||
value_data: Some(ValueData::StringValue(v.as_utf8().to_string())),
|
||||
},
|
||||
Value::Binary(v) => v1::Value {
|
||||
value_data: Some(ValueData::BinaryValue(v.to_vec())),
|
||||
},
|
||||
Value::Date(v) => v1::Value {
|
||||
value_data: Some(ValueData::DateValue(v.val())),
|
||||
},
|
||||
Value::Timestamp(v) => match v.unit() {
|
||||
TimeUnit::Second => v1::Value {
|
||||
value_data: Some(ValueData::TimestampSecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Millisecond => v1::Value {
|
||||
value_data: Some(ValueData::TimestampMillisecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Microsecond => v1::Value {
|
||||
value_data: Some(ValueData::TimestampMicrosecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Nanosecond => v1::Value {
|
||||
value_data: Some(ValueData::TimestampNanosecondValue(v.value())),
|
||||
},
|
||||
},
|
||||
Value::Time(v) => match v.unit() {
|
||||
TimeUnit::Second => v1::Value {
|
||||
value_data: Some(ValueData::TimeSecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Millisecond => v1::Value {
|
||||
value_data: Some(ValueData::TimeMillisecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Microsecond => v1::Value {
|
||||
value_data: Some(ValueData::TimeMicrosecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Nanosecond => v1::Value {
|
||||
value_data: Some(ValueData::TimeNanosecondValue(v.value())),
|
||||
},
|
||||
},
|
||||
Value::IntervalYearMonth(v) => v1::Value {
|
||||
value_data: Some(ValueData::IntervalYearMonthValue(v.to_i32())),
|
||||
},
|
||||
Value::IntervalDayTime(v) => v1::Value {
|
||||
value_data: Some(ValueData::IntervalDayTimeValue(v.to_i64())),
|
||||
},
|
||||
Value::IntervalMonthDayNano(v) => v1::Value {
|
||||
value_data: Some(ValueData::IntervalMonthDayNanoValue(
|
||||
convert_month_day_nano_to_pb(v),
|
||||
)),
|
||||
},
|
||||
Value::Decimal128(v) => v1::Value {
|
||||
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||
},
|
||||
Value::List(list_value) => v1::Value {
|
||||
value_data: Some(ValueData::ListValue(v1::ListValue {
|
||||
items: convert_list_to_pb_values(list_value),
|
||||
fn encode_json_value(value: JsonValue) -> v1::JsonValue {
|
||||
fn helper(json: JsonVariant) -> v1::JsonValue {
|
||||
let value = match json {
|
||||
JsonVariant::Null => None,
|
||||
JsonVariant::Bool(x) => Some(json_value::Value::Boolean(x)),
|
||||
JsonVariant::Number(x) => Some(match x {
|
||||
JsonNumber::PosInt(i) => json_value::Value::Uint(i),
|
||||
JsonNumber::NegInt(i) => json_value::Value::Int(i),
|
||||
JsonNumber::Float(f) => json_value::Value::Float(f.0),
|
||||
}),
|
||||
JsonVariant::String(x) => Some(json_value::Value::Str(x)),
|
||||
JsonVariant::Array(x) => Some(json_value::Value::Array(JsonList {
|
||||
items: x.into_iter().map(helper).collect::<Vec<_>>(),
|
||||
})),
|
||||
},
|
||||
Value::Struct(struct_value) => v1::Value {
|
||||
value_data: Some(ValueData::StructValue(v1::StructValue {
|
||||
items: convert_struct_to_pb_values(struct_value),
|
||||
})),
|
||||
},
|
||||
Value::Json(v) => v1::Value {
|
||||
value_data: Some(ValueData::JsonValue(Box::new(to_proto_value(*v)))),
|
||||
},
|
||||
Value::Duration(_) => v1::Value { value_data: None },
|
||||
JsonVariant::Object(x) => {
|
||||
let entries = x
|
||||
.into_iter()
|
||||
.map(|(key, v)| v1::json_object::Entry {
|
||||
key,
|
||||
value: Some(helper(v)),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
Some(json_value::Value::Object(JsonObject { entries }))
|
||||
}
|
||||
};
|
||||
v1::JsonValue { value }
|
||||
}
|
||||
helper(value.into_variant())
|
||||
}
|
||||
|
||||
fn convert_list_to_pb_values(list_value: ListValue) -> Vec<v1::Value> {
|
||||
list_value
|
||||
.take_items()
|
||||
.into_iter()
|
||||
.map(to_proto_value)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn convert_struct_to_pb_values(struct_value: StructValue) -> Vec<v1::Value> {
|
||||
struct_value
|
||||
.take_items()
|
||||
.into_iter()
|
||||
.map(to_proto_value)
|
||||
.collect()
|
||||
fn decode_json_value(value: &v1::JsonValue) -> JsonValueRef<'_> {
|
||||
let Some(value) = &value.value else {
|
||||
return JsonValueRef::null();
|
||||
};
|
||||
match value {
|
||||
json_value::Value::Boolean(x) => (*x).into(),
|
||||
json_value::Value::Int(x) => (*x).into(),
|
||||
json_value::Value::Uint(x) => (*x).into(),
|
||||
json_value::Value::Float(x) => (*x).into(),
|
||||
json_value::Value::Str(x) => (x.as_str()).into(),
|
||||
json_value::Value::Array(array) => array
|
||||
.items
|
||||
.iter()
|
||||
.map(|x| decode_json_value(x).into_variant())
|
||||
.collect::<Vec<_>>()
|
||||
.into(),
|
||||
json_value::Value::Object(x) => x
|
||||
.entries
|
||||
.iter()
|
||||
.filter_map(|entry| {
|
||||
entry
|
||||
.value
|
||||
.as_ref()
|
||||
.map(|v| (entry.key.as_str(), decode_json_value(v).into_variant()))
|
||||
})
|
||||
.collect::<BTreeMap<_, _>>()
|
||||
.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the [ColumnDataTypeWrapper] of the value.
|
||||
@@ -1006,14 +1001,14 @@ pub fn vectors_to_rows<'a>(
|
||||
let mut rows = vec![Row { values: vec![] }; row_count];
|
||||
for column in columns {
|
||||
for (row_index, row) in rows.iter_mut().enumerate() {
|
||||
row.values.push(value_to_grpc_value(column.get(row_index)))
|
||||
row.values.push(to_grpc_value(column.get(row_index)))
|
||||
}
|
||||
}
|
||||
|
||||
rows
|
||||
}
|
||||
|
||||
pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
pub fn to_grpc_value(value: Value) -> GrpcValue {
|
||||
GrpcValue {
|
||||
value_data: match value {
|
||||
Value::Null => None,
|
||||
@@ -1053,7 +1048,7 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
let items = list_value
|
||||
.take_items()
|
||||
.into_iter()
|
||||
.map(value_to_grpc_value)
|
||||
.map(to_grpc_value)
|
||||
.collect();
|
||||
Some(ValueData::ListValue(v1::ListValue { items }))
|
||||
}
|
||||
@@ -1061,13 +1056,11 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
let items = struct_value
|
||||
.take_items()
|
||||
.into_iter()
|
||||
.map(value_to_grpc_value)
|
||||
.map(to_grpc_value)
|
||||
.collect();
|
||||
Some(ValueData::StructValue(v1::StructValue { items }))
|
||||
}
|
||||
Value::Json(inner_value) => Some(ValueData::JsonValue(Box::new(value_to_grpc_value(
|
||||
*inner_value,
|
||||
)))),
|
||||
Value::Json(v) => Some(ValueData::JsonValue(encode_json_value(*v))),
|
||||
Value::Duration(_) => unreachable!(),
|
||||
},
|
||||
}
|
||||
@@ -1163,6 +1156,7 @@ mod tests {
|
||||
use common_time::interval::IntervalUnit;
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::types::{Int8Type, Int32Type, UInt8Type, UInt32Type};
|
||||
use datatypes::value::{ListValue, StructValue};
|
||||
use datatypes::vectors::{
|
||||
BooleanVector, DateVector, Float32Vector, PrimitiveVector, StringVector,
|
||||
};
|
||||
@@ -1259,6 +1253,9 @@ mod tests {
|
||||
let values = values_with_capacity(ColumnDataType::Json, 2);
|
||||
assert_eq!(2, values.json_values.capacity());
|
||||
assert_eq!(2, values.string_values.capacity());
|
||||
|
||||
let values = values_with_capacity(ColumnDataType::Dictionary, 2);
|
||||
assert!(values.bool_values.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1355,6 +1352,17 @@ mod tests {
|
||||
ConcreteDataType::list_datatype(Arc::new(ConcreteDataType::string_datatype())),
|
||||
ColumnDataTypeWrapper::list_datatype(ColumnDataTypeWrapper::string_datatype()).into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::dictionary_datatype(
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::string_datatype()
|
||||
),
|
||||
ColumnDataTypeWrapper::dictionary_datatype(
|
||||
ColumnDataTypeWrapper::int32_datatype(),
|
||||
ColumnDataTypeWrapper::string_datatype()
|
||||
)
|
||||
.into()
|
||||
);
|
||||
let struct_type = StructType::new(Arc::new(vec![
|
||||
StructField::new("id".to_string(), ConcreteDataType::int64_datatype(), true),
|
||||
StructField::new(
|
||||
@@ -1525,6 +1533,18 @@ mod tests {
|
||||
ColumnDataTypeWrapper::vector_datatype(3),
|
||||
ConcreteDataType::vector_datatype(3).try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper::dictionary_datatype(
|
||||
ColumnDataTypeWrapper::int32_datatype(),
|
||||
ColumnDataTypeWrapper::string_datatype()
|
||||
),
|
||||
ConcreteDataType::dictionary_datatype(
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::string_datatype()
|
||||
)
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
|
||||
assert!(result.is_err());
|
||||
@@ -1580,6 +1600,20 @@ mod tests {
|
||||
datatype_extension: Some(Box::new(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::StructType(StructTypeExtension {
|
||||
fields: vec![
|
||||
v1::StructField {
|
||||
name: "address".to_string(),
|
||||
datatype: ColumnDataTypeWrapper::string_datatype()
|
||||
.datatype()
|
||||
.into(),
|
||||
datatype_extension: None
|
||||
},
|
||||
v1::StructField {
|
||||
name: "age".to_string(),
|
||||
datatype: ColumnDataTypeWrapper::int64_datatype()
|
||||
.datatype()
|
||||
.into(),
|
||||
datatype_extension: None
|
||||
},
|
||||
v1::StructField {
|
||||
name: "id".to_string(),
|
||||
datatype: ColumnDataTypeWrapper::int64_datatype()
|
||||
@@ -1594,20 +1628,6 @@ mod tests {
|
||||
.into(),
|
||||
datatype_extension: None
|
||||
},
|
||||
v1::StructField {
|
||||
name: "age".to_string(),
|
||||
datatype: ColumnDataTypeWrapper::int32_datatype()
|
||||
.datatype()
|
||||
.into(),
|
||||
datatype_extension: None
|
||||
},
|
||||
v1::StructField {
|
||||
name: "address".to_string(),
|
||||
datatype: ColumnDataTypeWrapper::string_datatype()
|
||||
.datatype()
|
||||
.into(),
|
||||
datatype_extension: None
|
||||
}
|
||||
]
|
||||
}))
|
||||
}))
|
||||
@@ -1740,7 +1760,7 @@ mod tests {
|
||||
Arc::new(ConcreteDataType::boolean_datatype()),
|
||||
));
|
||||
|
||||
let pb_value = to_proto_value(value);
|
||||
let pb_value = to_grpc_value(value);
|
||||
|
||||
match pb_value.value_data.unwrap() {
|
||||
ValueData::ListValue(pb_list_value) => {
|
||||
@@ -1769,7 +1789,7 @@ mod tests {
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
let pb_value = to_proto_value(value);
|
||||
let pb_value = to_grpc_value(value);
|
||||
|
||||
match pb_value.value_data.unwrap() {
|
||||
ValueData::StructValue(pb_struct_value) => {
|
||||
@@ -1778,4 +1798,199 @@ mod tests {
|
||||
_ => panic!("Unexpected value type"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encode_decode_json_value() {
|
||||
let json = JsonValue::null();
|
||||
let proto = encode_json_value(json.clone());
|
||||
assert!(proto.value.is_none());
|
||||
let value = decode_json_value(&proto);
|
||||
assert_eq!(json.as_ref(), value);
|
||||
|
||||
let json: JsonValue = true.into();
|
||||
let proto = encode_json_value(json.clone());
|
||||
assert_eq!(proto.value, Some(json_value::Value::Boolean(true)));
|
||||
let value = decode_json_value(&proto);
|
||||
assert_eq!(json.as_ref(), value);
|
||||
|
||||
let json: JsonValue = (-1i64).into();
|
||||
let proto = encode_json_value(json.clone());
|
||||
assert_eq!(proto.value, Some(json_value::Value::Int(-1)));
|
||||
let value = decode_json_value(&proto);
|
||||
assert_eq!(json.as_ref(), value);
|
||||
|
||||
let json: JsonValue = 1u64.into();
|
||||
let proto = encode_json_value(json.clone());
|
||||
assert_eq!(proto.value, Some(json_value::Value::Uint(1)));
|
||||
let value = decode_json_value(&proto);
|
||||
assert_eq!(json.as_ref(), value);
|
||||
|
||||
let json: JsonValue = 1.0f64.into();
|
||||
let proto = encode_json_value(json.clone());
|
||||
assert_eq!(proto.value, Some(json_value::Value::Float(1.0)));
|
||||
let value = decode_json_value(&proto);
|
||||
assert_eq!(json.as_ref(), value);
|
||||
|
||||
let json: JsonValue = "s".into();
|
||||
let proto = encode_json_value(json.clone());
|
||||
assert_eq!(proto.value, Some(json_value::Value::Str("s".to_string())));
|
||||
let value = decode_json_value(&proto);
|
||||
assert_eq!(json.as_ref(), value);
|
||||
|
||||
let json: JsonValue = [1i64, 2, 3].into();
|
||||
let proto = encode_json_value(json.clone());
|
||||
assert_eq!(
|
||||
proto.value,
|
||||
Some(json_value::Value::Array(JsonList {
|
||||
items: vec![
|
||||
v1::JsonValue {
|
||||
value: Some(json_value::Value::Int(1))
|
||||
},
|
||||
v1::JsonValue {
|
||||
value: Some(json_value::Value::Int(2))
|
||||
},
|
||||
v1::JsonValue {
|
||||
value: Some(json_value::Value::Int(3))
|
||||
}
|
||||
]
|
||||
}))
|
||||
);
|
||||
let value = decode_json_value(&proto);
|
||||
assert_eq!(json.as_ref(), value);
|
||||
|
||||
let json: JsonValue = [(); 0].into();
|
||||
let proto = encode_json_value(json.clone());
|
||||
assert_eq!(
|
||||
proto.value,
|
||||
Some(json_value::Value::Array(JsonList { items: vec![] }))
|
||||
);
|
||||
let value = decode_json_value(&proto);
|
||||
assert_eq!(json.as_ref(), value);
|
||||
|
||||
let json: JsonValue = [("k3", 3i64), ("k2", 2i64), ("k1", 1i64)].into();
|
||||
let proto = encode_json_value(json.clone());
|
||||
assert_eq!(
|
||||
proto.value,
|
||||
Some(json_value::Value::Object(JsonObject {
|
||||
entries: vec![
|
||||
v1::json_object::Entry {
|
||||
key: "k1".to_string(),
|
||||
value: Some(v1::JsonValue {
|
||||
value: Some(json_value::Value::Int(1))
|
||||
}),
|
||||
},
|
||||
v1::json_object::Entry {
|
||||
key: "k2".to_string(),
|
||||
value: Some(v1::JsonValue {
|
||||
value: Some(json_value::Value::Int(2))
|
||||
}),
|
||||
},
|
||||
v1::json_object::Entry {
|
||||
key: "k3".to_string(),
|
||||
value: Some(v1::JsonValue {
|
||||
value: Some(json_value::Value::Int(3))
|
||||
}),
|
||||
},
|
||||
]
|
||||
}))
|
||||
);
|
||||
let value = decode_json_value(&proto);
|
||||
assert_eq!(json.as_ref(), value);
|
||||
|
||||
let json: JsonValue = [("null", ()); 0].into();
|
||||
let proto = encode_json_value(json.clone());
|
||||
assert_eq!(
|
||||
proto.value,
|
||||
Some(json_value::Value::Object(JsonObject { entries: vec![] }))
|
||||
);
|
||||
let value = decode_json_value(&proto);
|
||||
assert_eq!(json.as_ref(), value);
|
||||
|
||||
let json: JsonValue = [
|
||||
("null", JsonVariant::from(())),
|
||||
("bool", false.into()),
|
||||
("list", ["hello", "world"].into()),
|
||||
(
|
||||
"object",
|
||||
[
|
||||
("positive_i", JsonVariant::from(42u64)),
|
||||
("negative_i", (-42i64).into()),
|
||||
("nested", [("what", "blah")].into()),
|
||||
]
|
||||
.into(),
|
||||
),
|
||||
]
|
||||
.into();
|
||||
let proto = encode_json_value(json.clone());
|
||||
assert_eq!(
|
||||
proto.value,
|
||||
Some(json_value::Value::Object(JsonObject {
|
||||
entries: vec![
|
||||
v1::json_object::Entry {
|
||||
key: "bool".to_string(),
|
||||
value: Some(v1::JsonValue {
|
||||
value: Some(json_value::Value::Boolean(false))
|
||||
}),
|
||||
},
|
||||
v1::json_object::Entry {
|
||||
key: "list".to_string(),
|
||||
value: Some(v1::JsonValue {
|
||||
value: Some(json_value::Value::Array(JsonList {
|
||||
items: vec![
|
||||
v1::JsonValue {
|
||||
value: Some(json_value::Value::Str("hello".to_string()))
|
||||
},
|
||||
v1::JsonValue {
|
||||
value: Some(json_value::Value::Str("world".to_string()))
|
||||
},
|
||||
]
|
||||
}))
|
||||
}),
|
||||
},
|
||||
v1::json_object::Entry {
|
||||
key: "null".to_string(),
|
||||
value: Some(v1::JsonValue { value: None }),
|
||||
},
|
||||
v1::json_object::Entry {
|
||||
key: "object".to_string(),
|
||||
value: Some(v1::JsonValue {
|
||||
value: Some(json_value::Value::Object(JsonObject {
|
||||
entries: vec![
|
||||
v1::json_object::Entry {
|
||||
key: "negative_i".to_string(),
|
||||
value: Some(v1::JsonValue {
|
||||
value: Some(json_value::Value::Int(-42))
|
||||
}),
|
||||
},
|
||||
v1::json_object::Entry {
|
||||
key: "nested".to_string(),
|
||||
value: Some(v1::JsonValue {
|
||||
value: Some(json_value::Value::Object(JsonObject {
|
||||
entries: vec![v1::json_object::Entry {
|
||||
key: "what".to_string(),
|
||||
value: Some(v1::JsonValue {
|
||||
value: Some(json_value::Value::Str(
|
||||
"blah".to_string()
|
||||
))
|
||||
}),
|
||||
},]
|
||||
}))
|
||||
}),
|
||||
},
|
||||
v1::json_object::Entry {
|
||||
key: "positive_i".to_string(),
|
||||
value: Some(v1::JsonValue {
|
||||
value: Some(json_value::Value::Uint(42))
|
||||
}),
|
||||
},
|
||||
]
|
||||
}))
|
||||
}),
|
||||
},
|
||||
]
|
||||
}))
|
||||
);
|
||||
let value = decode_json_value(&proto);
|
||||
assert_eq!(json.as_ref(), value);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,11 +15,11 @@ workspace = true
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-base.workspace = true
|
||||
common-config.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
digest = "0.10"
|
||||
notify.workspace = true
|
||||
sha1 = "0.10"
|
||||
snafu.workspace = true
|
||||
sql.workspace = true
|
||||
|
||||
@@ -75,11 +75,12 @@ pub enum Error {
|
||||
username: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to initialize a watcher for file {}", path))]
|
||||
#[snafu(display("Failed to initialize a file watcher"))]
|
||||
FileWatch {
|
||||
path: String,
|
||||
#[snafu(source)]
|
||||
error: notify::Error,
|
||||
source: common_config::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("User is not authorized to perform this action"))]
|
||||
|
||||
@@ -12,16 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_config::file_watcher::{FileWatcherBuilder, FileWatcherConfig};
|
||||
use common_telemetry::{info, warn};
|
||||
use notify::{EventKind, RecursiveMode, Watcher};
|
||||
use snafu::{ResultExt, ensure};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{FileWatchSnafu, InvalidConfigSnafu, Result};
|
||||
use crate::error::{FileWatchSnafu, Result};
|
||||
use crate::user_provider::{UserInfoMap, authenticate_with_credential, load_credential_from_file};
|
||||
use crate::{Identity, Password, UserInfoRef, UserProvider};
|
||||
|
||||
@@ -41,61 +39,36 @@ impl WatchFileUserProvider {
|
||||
pub fn new(filepath: &str) -> Result<Self> {
|
||||
let credential = load_credential_from_file(filepath)?;
|
||||
let users = Arc::new(Mutex::new(credential));
|
||||
let this = WatchFileUserProvider {
|
||||
users: users.clone(),
|
||||
};
|
||||
|
||||
let (tx, rx) = channel::<notify::Result<notify::Event>>();
|
||||
let mut debouncer =
|
||||
notify::recommended_watcher(tx).context(FileWatchSnafu { path: "<none>" })?;
|
||||
let mut dir = Path::new(filepath).to_path_buf();
|
||||
ensure!(
|
||||
dir.pop(),
|
||||
InvalidConfigSnafu {
|
||||
value: filepath,
|
||||
msg: "UserProvider path must be a file path",
|
||||
}
|
||||
);
|
||||
debouncer
|
||||
.watch(&dir, RecursiveMode::NonRecursive)
|
||||
.context(FileWatchSnafu { path: filepath })?;
|
||||
let users_clone = users.clone();
|
||||
let filepath_owned = filepath.to_string();
|
||||
|
||||
let filepath = filepath.to_string();
|
||||
std::thread::spawn(move || {
|
||||
let filename = Path::new(&filepath).file_name();
|
||||
let _hold = debouncer;
|
||||
while let Ok(res) = rx.recv() {
|
||||
if let Ok(event) = res {
|
||||
let is_this_file = event.paths.iter().any(|p| p.file_name() == filename);
|
||||
let is_relevant_event = matches!(
|
||||
event.kind,
|
||||
EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)
|
||||
FileWatcherBuilder::new()
|
||||
.watch_path(filepath)
|
||||
.context(FileWatchSnafu)?
|
||||
.config(FileWatcherConfig::new())
|
||||
.spawn(move || match load_credential_from_file(&filepath_owned) {
|
||||
Ok(credential) => {
|
||||
let mut users = users_clone.lock().expect("users credential must be valid");
|
||||
#[cfg(not(test))]
|
||||
info!("User provider file {} reloaded", &filepath_owned);
|
||||
#[cfg(test)]
|
||||
info!(
|
||||
"User provider file {} reloaded: {:?}",
|
||||
&filepath_owned, credential
|
||||
);
|
||||
if is_this_file && is_relevant_event {
|
||||
info!(?event.kind, "User provider file {} changed", &filepath);
|
||||
match load_credential_from_file(&filepath) {
|
||||
Ok(credential) => {
|
||||
let mut users =
|
||||
users.lock().expect("users credential must be valid");
|
||||
#[cfg(not(test))]
|
||||
info!("User provider file {filepath} reloaded");
|
||||
#[cfg(test)]
|
||||
info!("User provider file {filepath} reloaded: {credential:?}");
|
||||
*users = credential;
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(
|
||||
?err,
|
||||
"Fail to load credential from file {filepath}; keep the old one",
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
*users = credential;
|
||||
}
|
||||
}
|
||||
});
|
||||
Err(err) => {
|
||||
warn!(
|
||||
?err,
|
||||
"Fail to load credential from file {}; keep the old one", &filepath_owned
|
||||
)
|
||||
}
|
||||
})
|
||||
.context(FileWatchSnafu)?;
|
||||
|
||||
Ok(this)
|
||||
Ok(WatchFileUserProvider { users })
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
enterprise = []
|
||||
testing = []
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -12,13 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
|
||||
|
||||
mod builder;
|
||||
mod client;
|
||||
mod manager;
|
||||
mod table_cache;
|
||||
|
||||
pub use builder::KvBackendCatalogManagerBuilder;
|
||||
pub use builder::{
|
||||
CatalogManagerConfigurator, CatalogManagerConfiguratorRef, KvBackendCatalogManagerBuilder,
|
||||
};
|
||||
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
|
||||
pub use manager::KvBackendCatalogManager;
|
||||
pub use table_cache::{TableCache, TableCacheRef, new_table_cache};
|
||||
|
||||
@@ -12,9 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::LayeredCacheRegistryRef;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
@@ -23,24 +25,34 @@ use common_procedure::ProcedureManagerRef;
|
||||
use moka::sync::Cache;
|
||||
use partition::manager::PartitionRuleManager;
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::information_schema::InformationSchemaTableFactoryRef;
|
||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||
use crate::information_schema::{
|
||||
InformationExtensionRef, InformationSchemaProvider, InformationSchemaTableFactoryRef,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::kvbackend::manager::{CATALOG_CACHE_MAX_CAPACITY, SystemCatalog};
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::numbers_table_provider::NumbersTableProvider;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
|
||||
/// The configurator that customizes or enhances the [`KvBackendCatalogManagerBuilder`].
|
||||
#[async_trait::async_trait]
|
||||
pub trait CatalogManagerConfigurator<C>: Send + Sync {
|
||||
async fn configure(
|
||||
&self,
|
||||
builder: KvBackendCatalogManagerBuilder,
|
||||
ctx: C,
|
||||
) -> std::result::Result<KvBackendCatalogManagerBuilder, BoxedError>;
|
||||
}
|
||||
|
||||
pub type CatalogManagerConfiguratorRef<C> = Arc<dyn CatalogManagerConfigurator<C>>;
|
||||
|
||||
pub struct KvBackendCatalogManagerBuilder {
|
||||
information_extension: InformationExtensionRef,
|
||||
backend: KvBackendRef,
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
process_manager: Option<ProcessManagerRef>,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories:
|
||||
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
extra_information_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
}
|
||||
|
||||
impl KvBackendCatalogManagerBuilder {
|
||||
@@ -55,8 +67,7 @@ impl KvBackendCatalogManagerBuilder {
|
||||
cache_registry,
|
||||
procedure_manager: None,
|
||||
process_manager: None,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories: std::collections::HashMap::new(),
|
||||
extra_information_table_factories: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,10 +82,9 @@ impl KvBackendCatalogManagerBuilder {
|
||||
}
|
||||
|
||||
/// Sets the extra information tables.
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub fn with_extra_information_table_factories(
|
||||
mut self,
|
||||
factories: std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
) -> Self {
|
||||
self.extra_information_table_factories = factories;
|
||||
self
|
||||
@@ -87,7 +97,6 @@ impl KvBackendCatalogManagerBuilder {
|
||||
cache_registry,
|
||||
procedure_manager,
|
||||
process_manager,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories,
|
||||
} = self;
|
||||
Arc::new_cyclic(|me| KvBackendCatalogManager {
|
||||
@@ -111,7 +120,6 @@ impl KvBackendCatalogManagerBuilder {
|
||||
process_manager.clone(),
|
||||
backend.clone(),
|
||||
);
|
||||
#[cfg(feature = "enterprise")]
|
||||
let provider = provider
|
||||
.with_extra_table_factories(extra_information_table_factories.clone());
|
||||
Arc::new(provider)
|
||||
@@ -123,7 +131,6 @@ impl KvBackendCatalogManagerBuilder {
|
||||
numbers_table_provider: NumbersTableProvider,
|
||||
backend,
|
||||
process_manager,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories,
|
||||
},
|
||||
cache_registry,
|
||||
|
||||
@@ -53,9 +53,9 @@ use crate::error::{
|
||||
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
||||
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||
};
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::information_schema::InformationSchemaTableFactoryRef;
|
||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||
use crate::information_schema::{
|
||||
InformationExtensionRef, InformationSchemaProvider, InformationSchemaTableFactoryRef,
|
||||
};
|
||||
use crate::kvbackend::TableCacheRef;
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
@@ -557,7 +557,6 @@ pub(super) struct SystemCatalog {
|
||||
pub(super) numbers_table_provider: NumbersTableProvider,
|
||||
pub(super) backend: KvBackendRef,
|
||||
pub(super) process_manager: Option<ProcessManagerRef>,
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub(super) extra_information_table_factories:
|
||||
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
}
|
||||
@@ -628,7 +627,6 @@ impl SystemCatalog {
|
||||
self.process_manager.clone(),
|
||||
self.backend.clone(),
|
||||
);
|
||||
#[cfg(feature = "enterprise")]
|
||||
let provider = provider
|
||||
.with_extra_table_factories(self.extra_information_table_factories.clone());
|
||||
Arc::new(provider)
|
||||
|
||||
@@ -22,7 +22,6 @@ mod procedure_info;
|
||||
pub mod process_list;
|
||||
pub mod region_peers;
|
||||
mod region_statistics;
|
||||
mod runtime_metrics;
|
||||
pub mod schemata;
|
||||
mod ssts;
|
||||
mod table_constraints;
|
||||
@@ -65,7 +64,6 @@ use crate::system_schema::information_schema::information_memory_table::get_sche
|
||||
use crate::system_schema::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
||||
use crate::system_schema::information_schema::partitions::InformationSchemaPartitions;
|
||||
use crate::system_schema::information_schema::region_peers::InformationSchemaRegionPeers;
|
||||
use crate::system_schema::information_schema::runtime_metrics::InformationSchemaMetrics;
|
||||
use crate::system_schema::information_schema::schemata::InformationSchemaSchemata;
|
||||
use crate::system_schema::information_schema::ssts::{
|
||||
InformationSchemaSstsIndexMeta, InformationSchemaSstsManifest, InformationSchemaSstsStorage,
|
||||
@@ -119,7 +117,6 @@ macro_rules! setup_memory_table {
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub struct MakeInformationTableRequest {
|
||||
pub catalog_name: String,
|
||||
pub catalog_manager: Weak<dyn CatalogManager>,
|
||||
@@ -130,12 +127,10 @@ pub struct MakeInformationTableRequest {
|
||||
///
|
||||
/// This trait allows for extensibility of the information schema by providing
|
||||
/// a way to dynamically create custom information schema tables.
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub trait InformationSchemaTableFactory {
|
||||
fn make_information_table(&self, req: MakeInformationTableRequest) -> SystemTableRef;
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub type InformationSchemaTableFactoryRef = Arc<dyn InformationSchemaTableFactory + Send + Sync>;
|
||||
|
||||
/// The `information_schema` tables info provider.
|
||||
@@ -145,9 +140,7 @@ pub struct InformationSchemaProvider {
|
||||
process_manager: Option<ProcessManagerRef>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
#[allow(dead_code)]
|
||||
kv_backend: KvBackendRef,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
}
|
||||
|
||||
@@ -168,7 +161,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
}
|
||||
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
||||
#[cfg(feature = "enterprise")]
|
||||
if let Some(factory) = self.extra_table_factories.get(name) {
|
||||
let req = MakeInformationTableRequest {
|
||||
catalog_name: self.catalog_name.clone(),
|
||||
@@ -216,7 +208,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
RUNTIME_METRICS => Some(Arc::new(InformationSchemaMetrics::new())),
|
||||
PARTITIONS => Some(Arc::new(InformationSchemaPartitions::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
@@ -284,7 +275,6 @@ impl InformationSchemaProvider {
|
||||
process_manager,
|
||||
tables: HashMap::new(),
|
||||
kv_backend,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_table_factories: HashMap::new(),
|
||||
};
|
||||
|
||||
@@ -293,7 +283,6 @@ impl InformationSchemaProvider {
|
||||
provider
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub(crate) fn with_extra_table_factories(
|
||||
mut self,
|
||||
factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
@@ -311,10 +300,6 @@ impl InformationSchemaProvider {
|
||||
// authentication details, and other critical information.
|
||||
// Only put these tables under `greptime` catalog to prevent info leak.
|
||||
if self.catalog_name == DEFAULT_CATALOG_NAME {
|
||||
tables.insert(
|
||||
RUNTIME_METRICS.to_string(),
|
||||
self.build_table(RUNTIME_METRICS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
BUILD_INFO.to_string(),
|
||||
self.build_table(BUILD_INFO).unwrap(),
|
||||
@@ -365,7 +350,6 @@ impl InformationSchemaProvider {
|
||||
if let Some(process_list) = self.build_table(PROCESS_LIST) {
|
||||
tables.insert(PROCESS_LIST.to_string(), process_list);
|
||||
}
|
||||
#[cfg(feature = "enterprise")]
|
||||
for name in self.extra_table_factories.keys() {
|
||||
tables.insert(name.clone(), self.build_table(name).expect(name));
|
||||
}
|
||||
@@ -444,7 +428,7 @@ pub trait InformationExtension {
|
||||
}
|
||||
|
||||
/// The request to inspect the datanode.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct DatanodeInspectRequest {
|
||||
/// Kind to fetch from datanode.
|
||||
pub kind: DatanodeInspectKind,
|
||||
|
||||
@@ -211,6 +211,7 @@ struct InformationSchemaPartitionsBuilder {
|
||||
partition_names: StringVectorBuilder,
|
||||
partition_ordinal_positions: Int64VectorBuilder,
|
||||
partition_expressions: StringVectorBuilder,
|
||||
partition_descriptions: StringVectorBuilder,
|
||||
create_times: TimestampSecondVectorBuilder,
|
||||
partition_ids: UInt64VectorBuilder,
|
||||
}
|
||||
@@ -231,6 +232,7 @@ impl InformationSchemaPartitionsBuilder {
|
||||
partition_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_expressions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_descriptions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_times: TimestampSecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
@@ -319,6 +321,21 @@ impl InformationSchemaPartitionsBuilder {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get partition column names (shared by all partitions)
|
||||
// In MySQL, PARTITION_EXPRESSION is the partitioning function expression (e.g., column name)
|
||||
let partition_columns: String = table_info
|
||||
.meta
|
||||
.partition_column_names()
|
||||
.cloned()
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
|
||||
let partition_expr_str = if partition_columns.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(partition_columns)
|
||||
};
|
||||
|
||||
for (index, partition) in partitions.iter().enumerate() {
|
||||
let partition_name = format!("p{index}");
|
||||
|
||||
@@ -328,8 +345,12 @@ impl InformationSchemaPartitionsBuilder {
|
||||
self.partition_names.push(Some(&partition_name));
|
||||
self.partition_ordinal_positions
|
||||
.push(Some((index + 1) as i64));
|
||||
let expression = partition.partition_expr.as_ref().map(|e| e.to_string());
|
||||
self.partition_expressions.push(expression.as_deref());
|
||||
// PARTITION_EXPRESSION: partition column names (same for all partitions)
|
||||
self.partition_expressions
|
||||
.push(partition_expr_str.as_deref());
|
||||
// PARTITION_DESCRIPTION: partition boundary expression (different for each partition)
|
||||
let description = partition.partition_expr.as_ref().map(|e| e.to_string());
|
||||
self.partition_descriptions.push(description.as_deref());
|
||||
self.create_times.push(Some(TimestampSecond::from(
|
||||
table_info.meta.created_on.timestamp(),
|
||||
)));
|
||||
@@ -369,7 +390,7 @@ impl InformationSchemaPartitionsBuilder {
|
||||
null_string_vector.clone(),
|
||||
Arc::new(self.partition_expressions.finish()),
|
||||
null_string_vector.clone(),
|
||||
null_string_vector.clone(),
|
||||
Arc::new(self.partition_descriptions.finish()),
|
||||
// TODO(dennis): rows and index statistics info
|
||||
null_i64_vector.clone(),
|
||||
null_i64_vector.clone(),
|
||||
|
||||
@@ -1,265 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_time::util::current_time_millis;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::prelude::{ConcreteDataType, MutableVector};
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{
|
||||
ConstantVector, Float64VectorBuilder, StringVectorBuilder, TimestampMillisecondVector,
|
||||
VectorRef,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::system_schema::information_schema::{InformationTable, RUNTIME_METRICS};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(super) struct InformationSchemaMetrics {
|
||||
schema: SchemaRef,
|
||||
}
|
||||
|
||||
const METRIC_NAME: &str = "metric_name";
|
||||
const METRIC_VALUE: &str = "value";
|
||||
const METRIC_LABELS: &str = "labels";
|
||||
const PEER_ADDR: &str = "peer_addr";
|
||||
const PEER_TYPE: &str = "peer_type";
|
||||
const TIMESTAMP: &str = "timestamp";
|
||||
|
||||
/// The `information_schema.runtime_metrics` virtual table.
|
||||
/// It provides the GreptimeDB runtime metrics for the users by SQL.
|
||||
impl InformationSchemaMetrics {
|
||||
pub(super) fn new() -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new(METRIC_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(METRIC_VALUE, ConcreteDataType::float64_datatype(), false),
|
||||
ColumnSchema::new(METRIC_LABELS, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(
|
||||
TIMESTAMP,
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaMetricsBuilder {
|
||||
InformationSchemaMetricsBuilder::new(self.schema.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaMetrics {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
RUNTIME_METRICS
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_metrics(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
struct InformationSchemaMetricsBuilder {
|
||||
schema: SchemaRef,
|
||||
|
||||
metric_names: StringVectorBuilder,
|
||||
metric_values: Float64VectorBuilder,
|
||||
metric_labels: StringVectorBuilder,
|
||||
peer_addrs: StringVectorBuilder,
|
||||
peer_types: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaMetricsBuilder {
|
||||
fn new(schema: SchemaRef) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
metric_names: StringVectorBuilder::with_capacity(42),
|
||||
metric_values: Float64VectorBuilder::with_capacity(42),
|
||||
metric_labels: StringVectorBuilder::with_capacity(42),
|
||||
peer_addrs: StringVectorBuilder::with_capacity(42),
|
||||
peer_types: StringVectorBuilder::with_capacity(42),
|
||||
}
|
||||
}
|
||||
|
||||
fn add_metric(
|
||||
&mut self,
|
||||
metric_name: &str,
|
||||
labels: String,
|
||||
metric_value: f64,
|
||||
peer: Option<&str>,
|
||||
peer_type: &str,
|
||||
) {
|
||||
self.metric_names.push(Some(metric_name));
|
||||
self.metric_values.push(Some(metric_value));
|
||||
self.metric_labels.push(Some(&labels));
|
||||
self.peer_addrs.push(peer);
|
||||
self.peer_types.push(Some(peer_type));
|
||||
}
|
||||
|
||||
async fn make_metrics(&mut self, _request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let metric_families = prometheus::gather();
|
||||
|
||||
let write_request =
|
||||
common_telemetry::metric::convert_metric_to_write_request(metric_families, None, 0);
|
||||
|
||||
for ts in write_request.timeseries {
|
||||
//Safety: always has `__name__` label
|
||||
let metric_name = ts
|
||||
.labels
|
||||
.iter()
|
||||
.find_map(|label| {
|
||||
if label.name == "__name__" {
|
||||
Some(label.value.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
self.add_metric(
|
||||
&metric_name,
|
||||
ts.labels
|
||||
.into_iter()
|
||||
.filter_map(|label| {
|
||||
if label.name == "__name__" {
|
||||
None
|
||||
} else {
|
||||
Some(format!("{}={}", label.name, label.value))
|
||||
}
|
||||
})
|
||||
.join(", "),
|
||||
// Safety: always has a sample
|
||||
ts.samples[0].value,
|
||||
// The peer column is always `None` for standalone
|
||||
None,
|
||||
"STANDALONE",
|
||||
);
|
||||
}
|
||||
|
||||
// FIXME(dennis): fetching other peers metrics
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let rows_num = self.metric_names.len();
|
||||
|
||||
let timestamps = Arc::new(ConstantVector::new(
|
||||
Arc::new(TimestampMillisecondVector::from_slice([
|
||||
current_time_millis(),
|
||||
])),
|
||||
rows_num,
|
||||
));
|
||||
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.metric_names.finish()),
|
||||
Arc::new(self.metric_values.finish()),
|
||||
Arc::new(self.metric_labels.finish()),
|
||||
Arc::new(self.peer_addrs.finish()),
|
||||
Arc::new(self.peer_types.finish()),
|
||||
timestamps,
|
||||
];
|
||||
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for InformationSchemaMetrics {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_metrics(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_recordbatch::RecordBatches;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_make_metrics() {
|
||||
let metrics = InformationSchemaMetrics::new();
|
||||
|
||||
let stream = metrics.to_stream(ScanRequest::default()).unwrap();
|
||||
|
||||
let batches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
|
||||
let result_literal = batches.pretty_print().unwrap();
|
||||
|
||||
assert!(result_literal.contains(METRIC_NAME));
|
||||
assert!(result_literal.contains(METRIC_VALUE));
|
||||
assert!(result_literal.contains(METRIC_LABELS));
|
||||
assert!(result_literal.contains(PEER_ADDR));
|
||||
assert!(result_literal.contains(PEER_TYPE));
|
||||
assert!(result_literal.contains(TIMESTAMP));
|
||||
}
|
||||
}
|
||||
@@ -38,7 +38,6 @@ pub const TABLE_PRIVILEGES: &str = "table_privileges";
|
||||
pub const TRIGGERS: &str = "triggers";
|
||||
pub const GLOBAL_STATUS: &str = "global_status";
|
||||
pub const SESSION_STATUS: &str = "session_status";
|
||||
pub const RUNTIME_METRICS: &str = "runtime_metrics";
|
||||
pub const PARTITIONS: &str = "partitions";
|
||||
pub const REGION_PEERS: &str = "region_peers";
|
||||
pub const TABLE_CONSTRAINTS: &str = "table_constraints";
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
@@ -255,14 +254,17 @@ impl InformationSchemaTablesBuilder {
|
||||
// TODO(dennis): `region_stats` API is not stable in distributed cluster because of network issue etc.
|
||||
// But we don't want the statements such as `show tables` fail,
|
||||
// so using `unwrap_or_else` here instead of `?` operator.
|
||||
let region_stats = information_extension
|
||||
.region_stats()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(e; "Failed to call region_stats");
|
||||
e
|
||||
})
|
||||
.unwrap_or_else(|_| vec![]);
|
||||
let region_stats = {
|
||||
let mut x = information_extension
|
||||
.region_stats()
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
error!(e; "Failed to find region stats in information_schema, fallback to all empty");
|
||||
vec![]
|
||||
});
|
||||
x.sort_unstable_by_key(|x| x.id);
|
||||
x
|
||||
};
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
|
||||
@@ -273,16 +275,16 @@ impl InformationSchemaTablesBuilder {
|
||||
// TODO(dennis): make it working for metric engine
|
||||
let table_region_stats =
|
||||
if table_info.meta.engine == MITO_ENGINE || table_info.is_physical_table() {
|
||||
let region_ids = table_info
|
||||
table_info
|
||||
.meta
|
||||
.region_numbers
|
||||
.iter()
|
||||
.map(|n| RegionId::new(table_info.ident.table_id, *n))
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
region_stats
|
||||
.iter()
|
||||
.filter(|stat| region_ids.contains(&stat.id))
|
||||
.flat_map(|region_id| {
|
||||
region_stats
|
||||
.binary_search_by_key(®ion_id, |x| x.id)
|
||||
.map(|i| ®ion_stats[i])
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
} else {
|
||||
vec![]
|
||||
|
||||
@@ -67,6 +67,7 @@ tracing-appender.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-meta = { workspace = true, features = ["testing"] }
|
||||
common-test-util.workspace = true
|
||||
common-version.workspace = true
|
||||
serde.workspace = true
|
||||
tempfile.workspace = true
|
||||
|
||||
@@ -15,5 +15,8 @@
|
||||
mod object_store;
|
||||
mod store;
|
||||
|
||||
pub use object_store::{ObjectStoreConfig, new_fs_object_store};
|
||||
pub use object_store::{
|
||||
ObjectStoreConfig, PrefixedAzblobConnection, PrefixedGcsConnection, PrefixedOssConnection,
|
||||
PrefixedS3Connection, new_fs_object_store,
|
||||
};
|
||||
pub use store::StoreConfig;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_base::secrets::SecretString;
|
||||
use common_base::secrets::{ExposeSecret, SecretString};
|
||||
use common_error::ext::BoxedError;
|
||||
use object_store::services::{Azblob, Fs, Gcs, Oss, S3};
|
||||
use object_store::util::{with_instrument_layers, with_retry_layers};
|
||||
@@ -22,9 +22,69 @@ use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self};
|
||||
|
||||
/// Trait to convert CLI field types to target struct field types.
|
||||
/// This enables `Option<SecretString>` (CLI) -> `SecretString` (target) conversions,
|
||||
/// allowing us to distinguish "not provided" from "provided but empty".
|
||||
trait IntoField<T> {
|
||||
fn into_field(self) -> T;
|
||||
}
|
||||
|
||||
/// Identity conversion for types that are the same.
|
||||
impl<T> IntoField<T> for T {
|
||||
fn into_field(self) -> T {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert `Option<SecretString>` to `SecretString`, using default for None.
|
||||
impl IntoField<SecretString> for Option<SecretString> {
|
||||
fn into_field(self) -> SecretString {
|
||||
self.unwrap_or_default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for checking if a field is effectively empty.
|
||||
///
|
||||
/// **`is_empty()`**: Checks if the field has no meaningful value
|
||||
/// - Used when backend is enabled to validate required fields
|
||||
/// - `None`, `Some("")`, `false`, or `""` are considered empty
|
||||
trait FieldValidator {
|
||||
/// Check if the field is empty (has no meaningful value).
|
||||
fn is_empty(&self) -> bool;
|
||||
}
|
||||
|
||||
/// String fields: empty if the string is empty
|
||||
impl FieldValidator for String {
|
||||
fn is_empty(&self) -> bool {
|
||||
self.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
/// Bool fields: false is considered "empty", true is "provided"
|
||||
impl FieldValidator for bool {
|
||||
fn is_empty(&self) -> bool {
|
||||
!self
|
||||
}
|
||||
}
|
||||
|
||||
/// Option<String> fields: None or empty content is empty
|
||||
impl FieldValidator for Option<String> {
|
||||
fn is_empty(&self) -> bool {
|
||||
self.as_ref().is_none_or(|s| s.is_empty())
|
||||
}
|
||||
}
|
||||
|
||||
/// Option<SecretString> fields: None or empty secret is empty
|
||||
/// For secrets, Some("") is treated as "not provided" for both checks
|
||||
impl FieldValidator for Option<SecretString> {
|
||||
fn is_empty(&self) -> bool {
|
||||
self.as_ref().is_none_or(|s| s.expose_secret().is_empty())
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! wrap_with_clap_prefix {
|
||||
(
|
||||
$new_name:ident, $prefix:literal, $base:ty, {
|
||||
$new_name:ident, $prefix:literal, $enable_flag:literal, $base:ty, {
|
||||
$( $( #[doc = $doc:expr] )? $( #[alias = $alias:literal] )? $field:ident : $type:ty $( = $default:expr )? ),* $(,)?
|
||||
}
|
||||
) => {
|
||||
@@ -34,15 +94,16 @@ macro_rules! wrap_with_clap_prefix {
|
||||
$(
|
||||
$( #[doc = $doc] )?
|
||||
$( #[clap(alias = $alias)] )?
|
||||
#[clap(long $(, default_value_t = $default )? )]
|
||||
[<$prefix $field>]: $type,
|
||||
#[clap(long, requires = $enable_flag $(, default_value_t = $default )? )]
|
||||
pub [<$prefix $field>]: $type,
|
||||
)*
|
||||
}
|
||||
|
||||
impl From<$new_name> for $base {
|
||||
fn from(w: $new_name) -> Self {
|
||||
Self {
|
||||
$( $field: w.[<$prefix $field>] ),*
|
||||
// Use into_field() to handle Option<SecretString> -> SecretString conversion
|
||||
$( $field: w.[<$prefix $field>].into_field() ),*
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -50,9 +111,90 @@ macro_rules! wrap_with_clap_prefix {
|
||||
};
|
||||
}
|
||||
|
||||
/// Macro for declarative backend validation.
|
||||
///
|
||||
/// # Validation Rules
|
||||
///
|
||||
/// For each storage backend (S3, OSS, GCS, Azblob), this function validates:
|
||||
/// **When backend is enabled** (e.g., `--s3`): All required fields must be non-empty
|
||||
///
|
||||
/// Note: When backend is disabled, clap's `requires` attribute ensures no configuration
|
||||
/// fields can be provided at parse time.
|
||||
///
|
||||
/// # Syntax
|
||||
///
|
||||
/// ```ignore
|
||||
/// validate_backend!(
|
||||
/// enable: self.enable_s3,
|
||||
/// name: "S3",
|
||||
/// required: [(field1, "name1"), (field2, "name2"), ...],
|
||||
/// custom_validator: |missing| { ... } // optional
|
||||
/// )
|
||||
/// ```
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// - `enable`: Boolean expression indicating if backend is enabled
|
||||
/// - `name`: Human-readable backend name for error messages
|
||||
/// - `required`: Array of (field_ref, field_name) tuples for required fields
|
||||
/// - `custom_validator`: Optional closure for complex validation logic
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```ignore
|
||||
/// validate_backend!(
|
||||
/// enable: self.enable_s3,
|
||||
/// name: "S3",
|
||||
/// required: [
|
||||
/// (&self.s3.s3_bucket, "bucket"),
|
||||
/// (&self.s3.s3_access_key_id, "access key ID"),
|
||||
/// ]
|
||||
/// )
|
||||
/// ```
|
||||
macro_rules! validate_backend {
|
||||
(
|
||||
enable: $enable:expr,
|
||||
name: $backend_name:expr,
|
||||
required: [ $( ($field:expr, $field_name:expr) ),* $(,)? ]
|
||||
$(, custom_validator: $custom_validator:expr)?
|
||||
) => {{
|
||||
if $enable {
|
||||
// Check required fields when backend is enabled
|
||||
let mut missing = Vec::new();
|
||||
$(
|
||||
if FieldValidator::is_empty($field) {
|
||||
missing.push($field_name);
|
||||
}
|
||||
)*
|
||||
|
||||
// Run custom validation if provided
|
||||
$(
|
||||
$custom_validator(&mut missing);
|
||||
)?
|
||||
|
||||
if !missing.is_empty() {
|
||||
return Err(BoxedError::new(
|
||||
error::MissingConfigSnafu {
|
||||
msg: format!(
|
||||
"{} {} must be set when --{} is enabled.",
|
||||
$backend_name,
|
||||
missing.join(", "),
|
||||
$backend_name.to_lowercase()
|
||||
),
|
||||
}
|
||||
.build(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}};
|
||||
}
|
||||
|
||||
wrap_with_clap_prefix! {
|
||||
PrefixedAzblobConnection,
|
||||
"azblob-",
|
||||
"enable_azblob",
|
||||
AzblobConnection,
|
||||
{
|
||||
#[doc = "The container of the object store."]
|
||||
@@ -60,9 +202,9 @@ wrap_with_clap_prefix! {
|
||||
#[doc = "The root of the object store."]
|
||||
root: String = Default::default(),
|
||||
#[doc = "The account name of the object store."]
|
||||
account_name: SecretString = Default::default(),
|
||||
account_name: Option<SecretString>,
|
||||
#[doc = "The account key of the object store."]
|
||||
account_key: SecretString = Default::default(),
|
||||
account_key: Option<SecretString>,
|
||||
#[doc = "The endpoint of the object store."]
|
||||
endpoint: String = Default::default(),
|
||||
#[doc = "The SAS token of the object store."]
|
||||
@@ -70,9 +212,33 @@ wrap_with_clap_prefix! {
|
||||
}
|
||||
}
|
||||
|
||||
impl PrefixedAzblobConnection {
|
||||
pub fn validate(&self) -> Result<(), BoxedError> {
|
||||
validate_backend!(
|
||||
enable: true,
|
||||
name: "AzBlob",
|
||||
required: [
|
||||
(&self.azblob_container, "container"),
|
||||
(&self.azblob_root, "root"),
|
||||
(&self.azblob_account_name, "account name"),
|
||||
(&self.azblob_endpoint, "endpoint"),
|
||||
],
|
||||
custom_validator: |missing: &mut Vec<&str>| {
|
||||
// account_key is only required if sas_token is not provided
|
||||
if self.azblob_sas_token.is_none()
|
||||
&& self.azblob_account_key.is_empty()
|
||||
{
|
||||
missing.push("account key (when sas_token is not provided)");
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
wrap_with_clap_prefix! {
|
||||
PrefixedS3Connection,
|
||||
"s3-",
|
||||
"enable_s3",
|
||||
S3Connection,
|
||||
{
|
||||
#[doc = "The bucket of the object store."]
|
||||
@@ -80,21 +246,39 @@ wrap_with_clap_prefix! {
|
||||
#[doc = "The root of the object store."]
|
||||
root: String = Default::default(),
|
||||
#[doc = "The access key ID of the object store."]
|
||||
access_key_id: SecretString = Default::default(),
|
||||
access_key_id: Option<SecretString>,
|
||||
#[doc = "The secret access key of the object store."]
|
||||
secret_access_key: SecretString = Default::default(),
|
||||
secret_access_key: Option<SecretString>,
|
||||
#[doc = "The endpoint of the object store."]
|
||||
endpoint: Option<String>,
|
||||
#[doc = "The region of the object store."]
|
||||
region: Option<String>,
|
||||
#[doc = "Enable virtual host style for the object store."]
|
||||
enable_virtual_host_style: bool = Default::default(),
|
||||
#[doc = "Disable EC2 metadata service for the object store."]
|
||||
disable_ec2_metadata: bool = Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
impl PrefixedS3Connection {
|
||||
pub fn validate(&self) -> Result<(), BoxedError> {
|
||||
validate_backend!(
|
||||
enable: true,
|
||||
name: "S3",
|
||||
required: [
|
||||
(&self.s3_bucket, "bucket"),
|
||||
(&self.s3_access_key_id, "access key ID"),
|
||||
(&self.s3_secret_access_key, "secret access key"),
|
||||
(&self.s3_region, "region"),
|
||||
]
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
wrap_with_clap_prefix! {
|
||||
PrefixedOssConnection,
|
||||
"oss-",
|
||||
"enable_oss",
|
||||
OssConnection,
|
||||
{
|
||||
#[doc = "The bucket of the object store."]
|
||||
@@ -102,17 +286,33 @@ wrap_with_clap_prefix! {
|
||||
#[doc = "The root of the object store."]
|
||||
root: String = Default::default(),
|
||||
#[doc = "The access key ID of the object store."]
|
||||
access_key_id: SecretString = Default::default(),
|
||||
access_key_id: Option<SecretString>,
|
||||
#[doc = "The access key secret of the object store."]
|
||||
access_key_secret: SecretString = Default::default(),
|
||||
access_key_secret: Option<SecretString>,
|
||||
#[doc = "The endpoint of the object store."]
|
||||
endpoint: String = Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
impl PrefixedOssConnection {
|
||||
pub fn validate(&self) -> Result<(), BoxedError> {
|
||||
validate_backend!(
|
||||
enable: true,
|
||||
name: "OSS",
|
||||
required: [
|
||||
(&self.oss_bucket, "bucket"),
|
||||
(&self.oss_access_key_id, "access key ID"),
|
||||
(&self.oss_access_key_secret, "access key secret"),
|
||||
(&self.oss_endpoint, "endpoint"),
|
||||
]
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
wrap_with_clap_prefix! {
|
||||
PrefixedGcsConnection,
|
||||
"gcs-",
|
||||
"enable_gcs",
|
||||
GcsConnection,
|
||||
{
|
||||
#[doc = "The root of the object store."]
|
||||
@@ -122,40 +322,72 @@ wrap_with_clap_prefix! {
|
||||
#[doc = "The scope of the object store."]
|
||||
scope: String = Default::default(),
|
||||
#[doc = "The credential path of the object store."]
|
||||
credential_path: SecretString = Default::default(),
|
||||
credential_path: Option<SecretString>,
|
||||
#[doc = "The credential of the object store."]
|
||||
credential: SecretString = Default::default(),
|
||||
credential: Option<SecretString>,
|
||||
#[doc = "The endpoint of the object store."]
|
||||
endpoint: String = Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// common config for object store.
|
||||
impl PrefixedGcsConnection {
|
||||
pub fn validate(&self) -> Result<(), BoxedError> {
|
||||
validate_backend!(
|
||||
enable: true,
|
||||
name: "GCS",
|
||||
required: [
|
||||
(&self.gcs_bucket, "bucket"),
|
||||
(&self.gcs_root, "root"),
|
||||
(&self.gcs_scope, "scope"),
|
||||
]
|
||||
// No custom_validator needed: GCS supports Application Default Credentials (ADC)
|
||||
// where neither credential_path nor credential is required.
|
||||
// Endpoint is also optional (defaults to https://storage.googleapis.com).
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Common config for object store.
|
||||
///
|
||||
/// # Dependency Enforcement
|
||||
///
|
||||
/// Each backend's configuration fields (e.g., `--s3-bucket`) requires its corresponding
|
||||
/// enable flag (e.g., `--s3`) to be present. This is enforced by `clap` at parse time
|
||||
/// using the `requires` attribute.
|
||||
///
|
||||
/// For example, attempting to use `--s3-bucket my-bucket` without `--s3` will result in:
|
||||
/// ```text
|
||||
/// error: The argument '--s3-bucket <BUCKET>' requires '--s3'
|
||||
/// ```
|
||||
///
|
||||
/// This ensures that users cannot accidentally provide backend-specific configuration
|
||||
/// without explicitly enabling that backend.
|
||||
#[derive(clap::Parser, Debug, Clone, PartialEq, Default)]
|
||||
#[clap(group(clap::ArgGroup::new("storage_backend").required(false).multiple(false)))]
|
||||
pub struct ObjectStoreConfig {
|
||||
/// Whether to use S3 object store.
|
||||
#[clap(long, alias = "s3")]
|
||||
#[clap(long = "s3", group = "storage_backend")]
|
||||
pub enable_s3: bool,
|
||||
|
||||
#[clap(flatten)]
|
||||
pub s3: PrefixedS3Connection,
|
||||
|
||||
/// Whether to use OSS.
|
||||
#[clap(long, alias = "oss")]
|
||||
#[clap(long = "oss", group = "storage_backend")]
|
||||
pub enable_oss: bool,
|
||||
|
||||
#[clap(flatten)]
|
||||
pub oss: PrefixedOssConnection,
|
||||
|
||||
/// Whether to use GCS.
|
||||
#[clap(long, alias = "gcs")]
|
||||
#[clap(long = "gcs", group = "storage_backend")]
|
||||
pub enable_gcs: bool,
|
||||
|
||||
#[clap(flatten)]
|
||||
pub gcs: PrefixedGcsConnection,
|
||||
|
||||
/// Whether to use Azure Blob.
|
||||
#[clap(long, alias = "azblob")]
|
||||
#[clap(long = "azblob", group = "storage_backend")]
|
||||
pub enable_azblob: bool,
|
||||
|
||||
#[clap(flatten)]
|
||||
@@ -173,52 +405,66 @@ pub fn new_fs_object_store(root: &str) -> std::result::Result<ObjectStore, Boxed
|
||||
Ok(with_instrument_layers(object_store, false))
|
||||
}
|
||||
|
||||
macro_rules! gen_object_store_builder {
|
||||
($method:ident, $field:ident, $conn_type:ty, $service_type:ty) => {
|
||||
pub fn $method(&self) -> Result<ObjectStore, BoxedError> {
|
||||
let config = <$conn_type>::from(self.$field.clone());
|
||||
common_telemetry::info!(
|
||||
"Building object store with {}: {:?}",
|
||||
stringify!($field),
|
||||
config
|
||||
);
|
||||
let object_store = ObjectStore::new(<$service_type>::from(&config))
|
||||
.context(error::InitBackendSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.finish();
|
||||
Ok(with_instrument_layers(
|
||||
with_retry_layers(object_store),
|
||||
false,
|
||||
))
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl ObjectStoreConfig {
|
||||
gen_object_store_builder!(build_s3, s3, S3Connection, S3);
|
||||
|
||||
gen_object_store_builder!(build_oss, oss, OssConnection, Oss);
|
||||
|
||||
gen_object_store_builder!(build_gcs, gcs, GcsConnection, Gcs);
|
||||
|
||||
gen_object_store_builder!(build_azblob, azblob, AzblobConnection, Azblob);
|
||||
|
||||
pub fn validate(&self) -> Result<(), BoxedError> {
|
||||
if self.enable_s3 {
|
||||
self.s3.validate()?;
|
||||
}
|
||||
if self.enable_oss {
|
||||
self.oss.validate()?;
|
||||
}
|
||||
if self.enable_gcs {
|
||||
self.gcs.validate()?;
|
||||
}
|
||||
if self.enable_azblob {
|
||||
self.azblob.validate()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Builds the object store from the config.
|
||||
pub fn build(&self) -> Result<Option<ObjectStore>, BoxedError> {
|
||||
let object_store = if self.enable_s3 {
|
||||
let s3 = S3Connection::from(self.s3.clone());
|
||||
common_telemetry::info!("Building object store with s3: {:?}", s3);
|
||||
Some(
|
||||
ObjectStore::new(S3::from(&s3))
|
||||
.context(error::InitBackendSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.finish(),
|
||||
)
|
||||
self.validate()?;
|
||||
|
||||
if self.enable_s3 {
|
||||
self.build_s3().map(Some)
|
||||
} else if self.enable_oss {
|
||||
let oss = OssConnection::from(self.oss.clone());
|
||||
common_telemetry::info!("Building object store with oss: {:?}", oss);
|
||||
Some(
|
||||
ObjectStore::new(Oss::from(&oss))
|
||||
.context(error::InitBackendSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.finish(),
|
||||
)
|
||||
self.build_oss().map(Some)
|
||||
} else if self.enable_gcs {
|
||||
let gcs = GcsConnection::from(self.gcs.clone());
|
||||
common_telemetry::info!("Building object store with gcs: {:?}", gcs);
|
||||
Some(
|
||||
ObjectStore::new(Gcs::from(&gcs))
|
||||
.context(error::InitBackendSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.finish(),
|
||||
)
|
||||
self.build_gcs().map(Some)
|
||||
} else if self.enable_azblob {
|
||||
let azblob = AzblobConnection::from(self.azblob.clone());
|
||||
common_telemetry::info!("Building object store with azblob: {:?}", azblob);
|
||||
Some(
|
||||
ObjectStore::new(Azblob::from(&azblob))
|
||||
.context(error::InitBackendSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.finish(),
|
||||
)
|
||||
self.build_azblob().map(Some)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let object_store = object_store
|
||||
.map(|object_store| with_instrument_layers(with_retry_layers(object_store), false));
|
||||
|
||||
Ok(object_store)
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ use common_error::ext::BoxedError;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use meta_srv::metasrv::BackendImpl;
|
||||
use meta_srv::metasrv::{BackendClientOptions, BackendImpl};
|
||||
use meta_srv::utils::etcd::create_etcd_client_with_tls;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
|
||||
@@ -112,9 +112,13 @@ impl StoreConfig {
|
||||
let kvbackend = match self.backend {
|
||||
BackendImpl::EtcdStore => {
|
||||
let tls_config = self.tls_config();
|
||||
let etcd_client = create_etcd_client_with_tls(store_addrs, tls_config.as_ref())
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
let etcd_client = create_etcd_client_with_tls(
|
||||
store_addrs,
|
||||
&BackendClientOptions::default(),
|
||||
tls_config.as_ref(),
|
||||
)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(EtcdStore::with_etcd_client(etcd_client, max_txn_ops))
|
||||
}
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
mod export;
|
||||
mod import;
|
||||
mod storage_export;
|
||||
|
||||
use clap::Subcommand;
|
||||
use client::DEFAULT_CATALOG_NAME;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -56,9 +56,11 @@ pub struct ImportCommand {
|
||||
#[clap(long, default_value_t = default_database())]
|
||||
database: String,
|
||||
|
||||
/// Parallelism of the import.
|
||||
#[clap(long, short = 'j', default_value = "1")]
|
||||
import_jobs: usize,
|
||||
/// The number of databases imported in parallel.
|
||||
/// For example, if there are 20 databases and `db_parallelism` is 4,
|
||||
/// 4 databases will be imported concurrently.
|
||||
#[clap(long, short = 'j', default_value = "1", alias = "import-jobs")]
|
||||
db_parallelism: usize,
|
||||
|
||||
/// Max retry times for each job.
|
||||
#[clap(long, default_value = "3")]
|
||||
@@ -109,7 +111,7 @@ impl ImportCommand {
|
||||
schema,
|
||||
database_client,
|
||||
input_dir: self.input_dir.clone(),
|
||||
parallelism: self.import_jobs,
|
||||
parallelism: self.db_parallelism,
|
||||
target: self.target.clone(),
|
||||
}))
|
||||
}
|
||||
|
||||
373
src/cli/src/data/storage_export.rs
Normal file
373
src/cli/src/data/storage_export.rs
Normal file
@@ -0,0 +1,373 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
use common_base::secrets::{ExposeSecret, SecretString};
|
||||
use common_error::ext::BoxedError;
|
||||
|
||||
use crate::common::{
|
||||
PrefixedAzblobConnection, PrefixedGcsConnection, PrefixedOssConnection, PrefixedS3Connection,
|
||||
};
|
||||
|
||||
/// Helper function to extract secret string from Option<SecretString>.
|
||||
/// Returns empty string if None.
|
||||
fn expose_optional_secret(secret: &Option<SecretString>) -> &str {
|
||||
secret
|
||||
.as_ref()
|
||||
.map(|s| s.expose_secret().as_str())
|
||||
.unwrap_or("")
|
||||
}
|
||||
|
||||
/// Helper function to format root path with leading slash if non-empty.
|
||||
fn format_root_path(root: &str) -> String {
|
||||
if root.is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
format!("/{}", root)
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function to mask multiple secrets in a string.
|
||||
fn mask_secrets(mut sql: String, secrets: &[&str]) -> String {
|
||||
for secret in secrets {
|
||||
if !secret.is_empty() {
|
||||
sql = sql.replace(secret, "[REDACTED]");
|
||||
}
|
||||
}
|
||||
sql
|
||||
}
|
||||
|
||||
/// Helper function to format storage URI.
|
||||
fn format_uri(scheme: &str, bucket: &str, root: &str, path: &str) -> String {
|
||||
let root = format_root_path(root);
|
||||
format!("{}://{}{}/{}", scheme, bucket, root, path)
|
||||
}
|
||||
|
||||
/// Trait for storage backends that can be used for data export.
|
||||
pub trait StorageExport: Send + Sync {
|
||||
/// Generate the storage path for COPY DATABASE command.
|
||||
/// Returns (path, connection_string) where connection_string includes CONNECTION clause.
|
||||
fn get_storage_path(&self, catalog: &str, schema: &str) -> (String, String);
|
||||
|
||||
/// Format the output path for logging purposes.
|
||||
fn format_output_path(&self, file_path: &str) -> String;
|
||||
|
||||
/// Mask sensitive information in SQL commands for safe logging.
|
||||
fn mask_sensitive_info(&self, sql: &str) -> String;
|
||||
}
|
||||
|
||||
macro_rules! define_backend {
|
||||
($name:ident, $config:ty) => {
|
||||
#[derive(Clone)]
|
||||
pub struct $name {
|
||||
config: $config,
|
||||
}
|
||||
|
||||
impl $name {
|
||||
pub fn new(config: $config) -> Result<Self, BoxedError> {
|
||||
config.validate()?;
|
||||
Ok(Self { config })
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Local file system storage backend.
|
||||
#[derive(Clone)]
|
||||
pub struct FsBackend {
|
||||
output_dir: String,
|
||||
}
|
||||
|
||||
impl FsBackend {
|
||||
pub fn new(output_dir: String) -> Self {
|
||||
Self { output_dir }
|
||||
}
|
||||
}
|
||||
|
||||
impl StorageExport for FsBackend {
|
||||
fn get_storage_path(&self, catalog: &str, schema: &str) -> (String, String) {
|
||||
if self.output_dir.is_empty() {
|
||||
unreachable!("output_dir must be set when not using remote storage")
|
||||
}
|
||||
let path = PathBuf::from(&self.output_dir)
|
||||
.join(catalog)
|
||||
.join(format!("{schema}/"))
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
(path, String::new())
|
||||
}
|
||||
|
||||
fn format_output_path(&self, file_path: &str) -> String {
|
||||
format!("{}/{}", self.output_dir, file_path)
|
||||
}
|
||||
|
||||
fn mask_sensitive_info(&self, sql: &str) -> String {
|
||||
sql.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
define_backend!(S3Backend, PrefixedS3Connection);
|
||||
|
||||
impl StorageExport for S3Backend {
|
||||
fn get_storage_path(&self, catalog: &str, schema: &str) -> (String, String) {
|
||||
let s3_path = format_uri(
|
||||
"s3",
|
||||
&self.config.s3_bucket,
|
||||
&self.config.s3_root,
|
||||
&format!("{}/{}/", catalog, schema),
|
||||
);
|
||||
|
||||
let mut connection_options = vec![
|
||||
format!(
|
||||
"ACCESS_KEY_ID='{}'",
|
||||
expose_optional_secret(&self.config.s3_access_key_id)
|
||||
),
|
||||
format!(
|
||||
"SECRET_ACCESS_KEY='{}'",
|
||||
expose_optional_secret(&self.config.s3_secret_access_key)
|
||||
),
|
||||
];
|
||||
|
||||
if let Some(region) = &self.config.s3_region {
|
||||
connection_options.push(format!("REGION='{}'", region));
|
||||
}
|
||||
|
||||
if let Some(endpoint) = &self.config.s3_endpoint {
|
||||
connection_options.push(format!("ENDPOINT='{}'", endpoint));
|
||||
}
|
||||
|
||||
let connection_str = format!(" CONNECTION ({})", connection_options.join(", "));
|
||||
(s3_path, connection_str)
|
||||
}
|
||||
|
||||
fn format_output_path(&self, file_path: &str) -> String {
|
||||
format_uri(
|
||||
"s3",
|
||||
&self.config.s3_bucket,
|
||||
&self.config.s3_root,
|
||||
file_path,
|
||||
)
|
||||
}
|
||||
|
||||
fn mask_sensitive_info(&self, sql: &str) -> String {
|
||||
mask_secrets(
|
||||
sql.to_string(),
|
||||
&[
|
||||
expose_optional_secret(&self.config.s3_access_key_id),
|
||||
expose_optional_secret(&self.config.s3_secret_access_key),
|
||||
],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
define_backend!(OssBackend, PrefixedOssConnection);
|
||||
|
||||
impl StorageExport for OssBackend {
|
||||
fn get_storage_path(&self, catalog: &str, schema: &str) -> (String, String) {
|
||||
let oss_path = format_uri(
|
||||
"oss",
|
||||
&self.config.oss_bucket,
|
||||
&self.config.oss_root,
|
||||
&format!("{}/{}/", catalog, schema),
|
||||
);
|
||||
|
||||
let connection_options = [
|
||||
format!(
|
||||
"ACCESS_KEY_ID='{}'",
|
||||
expose_optional_secret(&self.config.oss_access_key_id)
|
||||
),
|
||||
format!(
|
||||
"ACCESS_KEY_SECRET='{}'",
|
||||
expose_optional_secret(&self.config.oss_access_key_secret)
|
||||
),
|
||||
];
|
||||
|
||||
let connection_str = format!(" CONNECTION ({})", connection_options.join(", "));
|
||||
(oss_path, connection_str)
|
||||
}
|
||||
|
||||
fn format_output_path(&self, file_path: &str) -> String {
|
||||
format_uri(
|
||||
"oss",
|
||||
&self.config.oss_bucket,
|
||||
&self.config.oss_root,
|
||||
file_path,
|
||||
)
|
||||
}
|
||||
|
||||
fn mask_sensitive_info(&self, sql: &str) -> String {
|
||||
mask_secrets(
|
||||
sql.to_string(),
|
||||
&[
|
||||
expose_optional_secret(&self.config.oss_access_key_id),
|
||||
expose_optional_secret(&self.config.oss_access_key_secret),
|
||||
],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
define_backend!(GcsBackend, PrefixedGcsConnection);
|
||||
|
||||
impl StorageExport for GcsBackend {
|
||||
fn get_storage_path(&self, catalog: &str, schema: &str) -> (String, String) {
|
||||
let gcs_path = format_uri(
|
||||
"gcs",
|
||||
&self.config.gcs_bucket,
|
||||
&self.config.gcs_root,
|
||||
&format!("{}/{}/", catalog, schema),
|
||||
);
|
||||
|
||||
let mut connection_options = Vec::new();
|
||||
|
||||
let credential_path = expose_optional_secret(&self.config.gcs_credential_path);
|
||||
if !credential_path.is_empty() {
|
||||
connection_options.push(format!("CREDENTIAL_PATH='{}'", credential_path));
|
||||
}
|
||||
|
||||
let credential = expose_optional_secret(&self.config.gcs_credential);
|
||||
if !credential.is_empty() {
|
||||
connection_options.push(format!("CREDENTIAL='{}'", credential));
|
||||
}
|
||||
|
||||
if !self.config.gcs_endpoint.is_empty() {
|
||||
connection_options.push(format!("ENDPOINT='{}'", self.config.gcs_endpoint));
|
||||
}
|
||||
|
||||
let connection_str = if connection_options.is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
format!(" CONNECTION ({})", connection_options.join(", "))
|
||||
};
|
||||
|
||||
(gcs_path, connection_str)
|
||||
}
|
||||
|
||||
fn format_output_path(&self, file_path: &str) -> String {
|
||||
format_uri(
|
||||
"gcs",
|
||||
&self.config.gcs_bucket,
|
||||
&self.config.gcs_root,
|
||||
file_path,
|
||||
)
|
||||
}
|
||||
|
||||
fn mask_sensitive_info(&self, sql: &str) -> String {
|
||||
mask_secrets(
|
||||
sql.to_string(),
|
||||
&[
|
||||
expose_optional_secret(&self.config.gcs_credential_path),
|
||||
expose_optional_secret(&self.config.gcs_credential),
|
||||
],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
define_backend!(AzblobBackend, PrefixedAzblobConnection);
|
||||
|
||||
impl StorageExport for AzblobBackend {
|
||||
fn get_storage_path(&self, catalog: &str, schema: &str) -> (String, String) {
|
||||
let azblob_path = format_uri(
|
||||
"azblob",
|
||||
&self.config.azblob_container,
|
||||
&self.config.azblob_root,
|
||||
&format!("{}/{}/", catalog, schema),
|
||||
);
|
||||
|
||||
let mut connection_options = vec![
|
||||
format!(
|
||||
"ACCOUNT_NAME='{}'",
|
||||
expose_optional_secret(&self.config.azblob_account_name)
|
||||
),
|
||||
format!(
|
||||
"ACCOUNT_KEY='{}'",
|
||||
expose_optional_secret(&self.config.azblob_account_key)
|
||||
),
|
||||
];
|
||||
|
||||
if let Some(sas_token) = &self.config.azblob_sas_token {
|
||||
connection_options.push(format!("SAS_TOKEN='{}'", sas_token));
|
||||
}
|
||||
|
||||
let connection_str = format!(" CONNECTION ({})", connection_options.join(", "));
|
||||
(azblob_path, connection_str)
|
||||
}
|
||||
|
||||
fn format_output_path(&self, file_path: &str) -> String {
|
||||
format_uri(
|
||||
"azblob",
|
||||
&self.config.azblob_container,
|
||||
&self.config.azblob_root,
|
||||
file_path,
|
||||
)
|
||||
}
|
||||
|
||||
fn mask_sensitive_info(&self, sql: &str) -> String {
|
||||
mask_secrets(
|
||||
sql.to_string(),
|
||||
&[
|
||||
expose_optional_secret(&self.config.azblob_account_name),
|
||||
expose_optional_secret(&self.config.azblob_account_key),
|
||||
],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum StorageType {
|
||||
Fs(FsBackend),
|
||||
S3(S3Backend),
|
||||
Oss(OssBackend),
|
||||
Gcs(GcsBackend),
|
||||
Azblob(AzblobBackend),
|
||||
}
|
||||
|
||||
impl StorageExport for StorageType {
|
||||
fn get_storage_path(&self, catalog: &str, schema: &str) -> (String, String) {
|
||||
match self {
|
||||
StorageType::Fs(backend) => backend.get_storage_path(catalog, schema),
|
||||
StorageType::S3(backend) => backend.get_storage_path(catalog, schema),
|
||||
StorageType::Oss(backend) => backend.get_storage_path(catalog, schema),
|
||||
StorageType::Gcs(backend) => backend.get_storage_path(catalog, schema),
|
||||
StorageType::Azblob(backend) => backend.get_storage_path(catalog, schema),
|
||||
}
|
||||
}
|
||||
|
||||
fn format_output_path(&self, file_path: &str) -> String {
|
||||
match self {
|
||||
StorageType::Fs(backend) => backend.format_output_path(file_path),
|
||||
StorageType::S3(backend) => backend.format_output_path(file_path),
|
||||
StorageType::Oss(backend) => backend.format_output_path(file_path),
|
||||
StorageType::Gcs(backend) => backend.format_output_path(file_path),
|
||||
StorageType::Azblob(backend) => backend.format_output_path(file_path),
|
||||
}
|
||||
}
|
||||
|
||||
fn mask_sensitive_info(&self, sql: &str) -> String {
|
||||
match self {
|
||||
StorageType::Fs(backend) => backend.mask_sensitive_info(sql),
|
||||
StorageType::S3(backend) => backend.mask_sensitive_info(sql),
|
||||
StorageType::Oss(backend) => backend.mask_sensitive_info(sql),
|
||||
StorageType::Gcs(backend) => backend.mask_sensitive_info(sql),
|
||||
StorageType::Azblob(backend) => backend.mask_sensitive_info(sql),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StorageType {
|
||||
/// Returns true if the storage backend is remote (not local filesystem).
|
||||
pub fn is_remote_storage(&self) -> bool {
|
||||
!matches!(self, StorageType::Fs(_))
|
||||
}
|
||||
}
|
||||
@@ -253,12 +253,6 @@ pub enum Error {
|
||||
error: ObjectStoreError,
|
||||
},
|
||||
|
||||
#[snafu(display("S3 config need be set"))]
|
||||
S3ConfigNotSet {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Output directory not set"))]
|
||||
OutputDirNotSet {
|
||||
#[snafu(implicit)]
|
||||
@@ -364,9 +358,9 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::Other { source, .. } => source.status_code(),
|
||||
Error::OpenDal { .. } | Error::InitBackend { .. } => StatusCode::Internal,
|
||||
Error::S3ConfigNotSet { .. }
|
||||
| Error::OutputDirNotSet { .. }
|
||||
| Error::EmptyStoreAddrs { .. } => StatusCode::InvalidArguments,
|
||||
Error::OutputDirNotSet { .. } | Error::EmptyStoreAddrs { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
|
||||
Error::BuildRuntime { source, .. } => source.status_code(),
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
|
||||
use api::v1::region::region_client::RegionClient as PbRegionClient;
|
||||
use arrow_flight::flight_service_client::FlightServiceClient;
|
||||
use common_grpc::channel_manager::{
|
||||
ChannelConfig, ChannelManager, ClientTlsOption, load_tls_config,
|
||||
ChannelConfig, ChannelManager, ClientTlsOption, load_client_tls_config,
|
||||
};
|
||||
use parking_lot::RwLock;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -95,9 +95,9 @@ impl Client {
|
||||
U: AsRef<str>,
|
||||
A: AsRef<[U]>,
|
||||
{
|
||||
let channel_config = ChannelConfig::default().client_tls_config(client_tls);
|
||||
let tls_config = load_tls_config(channel_config.client_tls.as_ref())
|
||||
.context(error::CreateTlsChannelSnafu)?;
|
||||
let channel_config = ChannelConfig::default().client_tls_config(client_tls.clone());
|
||||
let tls_config =
|
||||
load_client_tls_config(Some(client_tls)).context(error::CreateTlsChannelSnafu)?;
|
||||
let channel_manager = ChannelManager::with_config(channel_config, tls_config);
|
||||
Ok(Self::with_manager_and_urls(channel_manager, urls))
|
||||
}
|
||||
|
||||
@@ -435,10 +435,10 @@ impl Database {
|
||||
.context(ExternalSnafu)?;
|
||||
match flight_message {
|
||||
FlightMessage::RecordBatch(arrow_batch) => {
|
||||
yield RecordBatch::try_from_df_record_batch(
|
||||
yield Ok(RecordBatch::from_df_record_batch(
|
||||
schema_cloned.clone(),
|
||||
arrow_batch,
|
||||
)
|
||||
))
|
||||
}
|
||||
FlightMessage::Metrics(_) => {}
|
||||
FlightMessage::AffectedRows(_) | FlightMessage::Schema(_) => {
|
||||
|
||||
@@ -182,10 +182,8 @@ impl RegionRequester {
|
||||
|
||||
match flight_message {
|
||||
FlightMessage::RecordBatch(record_batch) => {
|
||||
let result_to_yield = RecordBatch::try_from_df_record_batch(
|
||||
schema_cloned.clone(),
|
||||
record_batch,
|
||||
);
|
||||
let result_to_yield =
|
||||
RecordBatch::from_df_record_batch(schema_cloned.clone(), record_batch);
|
||||
|
||||
// get the next message from the stream. normally it should be a metrics message.
|
||||
if let Some(next_flight_message_result) = flight_message_stream.next().await
|
||||
@@ -219,7 +217,7 @@ impl RegionRequester {
|
||||
stream_ended = true;
|
||||
}
|
||||
|
||||
yield result_to_yield;
|
||||
yield Ok(result_to_yield);
|
||||
}
|
||||
FlightMessage::Metrics(s) => {
|
||||
// just a branch in case of some metrics message comes after other things.
|
||||
|
||||
@@ -16,7 +16,7 @@ default = [
|
||||
"meta-srv/pg_kvbackend",
|
||||
"meta-srv/mysql_kvbackend",
|
||||
]
|
||||
enterprise = ["common-meta/enterprise", "frontend/enterprise", "meta-srv/enterprise", "catalog/enterprise"]
|
||||
enterprise = ["common-meta/enterprise", "frontend/enterprise", "meta-srv/enterprise"]
|
||||
tokio-console = ["common-telemetry/tokio-console"]
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -145,6 +145,17 @@ impl ObjbenchCommand {
|
||||
let region_meta = extract_region_metadata(&self.source, &parquet_meta)?;
|
||||
let num_rows = parquet_meta.file_metadata().num_rows() as u64;
|
||||
let num_row_groups = parquet_meta.num_row_groups() as u64;
|
||||
let max_row_group_uncompressed_size: u64 = parquet_meta
|
||||
.row_groups()
|
||||
.iter()
|
||||
.map(|rg| {
|
||||
rg.columns()
|
||||
.iter()
|
||||
.map(|c| c.uncompressed_size() as u64)
|
||||
.sum::<u64>()
|
||||
})
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
|
||||
println!(
|
||||
"{} Metadata loaded - rows: {}, size: {} bytes",
|
||||
@@ -160,9 +171,11 @@ impl ObjbenchCommand {
|
||||
time_range: Default::default(),
|
||||
level: 0,
|
||||
file_size,
|
||||
max_row_group_uncompressed_size,
|
||||
available_indexes: Default::default(),
|
||||
indexes: Default::default(),
|
||||
index_file_size: 0,
|
||||
index_file_id: None,
|
||||
index_version: 0,
|
||||
num_rows,
|
||||
num_row_groups,
|
||||
sequence: None,
|
||||
@@ -563,7 +576,7 @@ fn new_noop_file_purger() -> FilePurgerRef {
|
||||
#[derive(Debug)]
|
||||
struct Noop;
|
||||
impl FilePurger for Noop {
|
||||
fn remove_file(&self, _file_meta: FileMeta, _is_delete: bool) {}
|
||||
fn remove_file(&self, _file_meta: FileMeta, _is_delete: bool, _index_outdated: bool) {}
|
||||
}
|
||||
Arc::new(Noop)
|
||||
}
|
||||
|
||||
@@ -99,13 +99,6 @@ pub enum Error {
|
||||
source: flow::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Servers error"))]
|
||||
Servers {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: servers::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start frontend"))]
|
||||
StartFrontend {
|
||||
#[snafu(implicit)]
|
||||
@@ -336,7 +329,6 @@ impl ErrorExt for Error {
|
||||
Error::ShutdownFrontend { source, .. } => source.status_code(),
|
||||
Error::StartMetaServer { source, .. } => source.status_code(),
|
||||
Error::ShutdownMetaServer { source, .. } => source.status_code(),
|
||||
Error::Servers { source, .. } => source.status_code(),
|
||||
Error::BuildMetaServer { source, .. } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::BuildCli { source, .. } => source.status_code(),
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
@@ -39,12 +40,14 @@ use flow::{
|
||||
get_flow_auth_options,
|
||||
};
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use plugins::flownode::context::GrpcConfigureContext;
|
||||
use servers::configurator::GrpcBuilderConfiguratorRef;
|
||||
use snafu::{OptionExt, ResultExt, ensure};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{
|
||||
BuildCacheRegistrySnafu, InitMetadataSnafu, LoadLayeredConfigSnafu, MetaClientInitSnafu,
|
||||
MissingConfigSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
|
||||
MissingConfigSnafu, OtherSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
|
||||
};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
||||
@@ -55,33 +58,14 @@ type FlownodeOptions = GreptimeOptions<flow::FlownodeOptions>;
|
||||
|
||||
pub struct Instance {
|
||||
flownode: FlownodeInstance,
|
||||
|
||||
// The components of flownode, which make it easier to expand based
|
||||
// on the components.
|
||||
#[cfg(feature = "enterprise")]
|
||||
components: Components,
|
||||
|
||||
// Keep the logging guard to prevent the worker from being dropped.
|
||||
_guard: Vec<WorkerGuard>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub struct Components {
|
||||
pub catalog_manager: catalog::CatalogManagerRef,
|
||||
pub fe_client: Arc<FrontendClient>,
|
||||
pub kv_backend: common_meta::kv_backend::KvBackendRef,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub fn new(
|
||||
flownode: FlownodeInstance,
|
||||
#[cfg(feature = "enterprise")] components: Components,
|
||||
guard: Vec<WorkerGuard>,
|
||||
) -> Self {
|
||||
pub fn new(flownode: FlownodeInstance, guard: Vec<WorkerGuard>) -> Self {
|
||||
Self {
|
||||
flownode,
|
||||
#[cfg(feature = "enterprise")]
|
||||
components,
|
||||
_guard: guard,
|
||||
}
|
||||
}
|
||||
@@ -94,11 +78,6 @@ impl Instance {
|
||||
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
|
||||
&mut self.flownode
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub fn components(&self) -> &Components {
|
||||
&self.components
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -396,7 +375,7 @@ impl StartCommand {
|
||||
let frontend_client = Arc::new(frontend_client);
|
||||
let flownode_builder = FlownodeBuilder::new(
|
||||
opts.clone(),
|
||||
plugins,
|
||||
plugins.clone(),
|
||||
table_metadata_manager,
|
||||
catalog_manager.clone(),
|
||||
flow_metadata_manager,
|
||||
@@ -405,8 +384,29 @@ impl StartCommand {
|
||||
.with_heartbeat_task(heartbeat_task);
|
||||
|
||||
let mut flownode = flownode_builder.build().await.context(StartFlownodeSnafu)?;
|
||||
|
||||
let builder =
|
||||
FlownodeServiceBuilder::grpc_server_builder(&opts, flownode.flownode_server());
|
||||
let builder = if let Some(configurator) =
|
||||
plugins.get::<GrpcBuilderConfiguratorRef<GrpcConfigureContext>>()
|
||||
{
|
||||
let context = GrpcConfigureContext {
|
||||
kv_backend: cached_meta_backend.clone(),
|
||||
fe_client: frontend_client.clone(),
|
||||
flownode_id: member_id,
|
||||
catalog_manager: catalog_manager.clone(),
|
||||
};
|
||||
configurator
|
||||
.configure(builder, context)
|
||||
.await
|
||||
.context(OtherSnafu)?
|
||||
} else {
|
||||
builder
|
||||
};
|
||||
let grpc_server = builder.build();
|
||||
|
||||
let services = FlownodeServiceBuilder::new(&opts)
|
||||
.with_default_grpc_server(flownode.flownode_server())
|
||||
.with_grpc_server(grpc_server)
|
||||
.enable_http_service()
|
||||
.build()
|
||||
.context(StartFlownodeSnafu)?;
|
||||
@@ -430,16 +430,6 @@ impl StartCommand {
|
||||
.set_frontend_invoker(invoker)
|
||||
.await;
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
let components = Components {
|
||||
catalog_manager: catalog_manager.clone(),
|
||||
fe_client: frontend_client,
|
||||
kv_backend: cached_meta_backend,
|
||||
};
|
||||
|
||||
#[cfg(not(feature = "enterprise"))]
|
||||
return Ok(Instance::new(flownode, guard));
|
||||
#[cfg(feature = "enterprise")]
|
||||
Ok(Instance::new(flownode, components, guard))
|
||||
Ok(Instance::new(flownode, guard))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
@@ -19,7 +20,10 @@ use std::time::Duration;
|
||||
use async_trait::async_trait;
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::information_extension::DistributedInformationExtension;
|
||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManagerBuilder, MetaKvBackend};
|
||||
use catalog::kvbackend::{
|
||||
CachedKvBackendBuilder, CatalogManagerConfiguratorRef, KvBackendCatalogManagerBuilder,
|
||||
MetaKvBackend,
|
||||
};
|
||||
use catalog::process_manager::ProcessManager;
|
||||
use clap::Parser;
|
||||
use client::client_manager::NodeClients;
|
||||
@@ -31,6 +35,7 @@ use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
use common_meta::heartbeat::handler::suspend::SuspendHandler;
|
||||
use common_query::prelude::set_default_prefix;
|
||||
use common_stat::ResourceStatImpl;
|
||||
use common_telemetry::info;
|
||||
@@ -41,15 +46,17 @@ use frontend::frontend::Frontend;
|
||||
use frontend::heartbeat::HeartbeatTask;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::server::Services;
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use meta_client::{MetaClientOptions, MetaClientRef, MetaClientType};
|
||||
use plugins::frontend::context::{
|
||||
CatalogManagerConfigureContext, DistributedCatalogManagerConfigureContext,
|
||||
};
|
||||
use servers::addrs;
|
||||
use servers::export_metrics::ExportMetricsTask;
|
||||
use servers::grpc::GrpcOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::tls::{TlsMode, TlsOption, merge_tls_option};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::error::{self, OtherSnafu, Result};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
||||
|
||||
@@ -249,7 +256,7 @@ impl StartCommand {
|
||||
|
||||
if let Some(addr) = &self.rpc_bind_addr {
|
||||
opts.grpc.bind_addr.clone_from(addr);
|
||||
opts.grpc.tls = tls_opts.clone();
|
||||
opts.grpc.tls = merge_tls_option(&opts.grpc.tls, tls_opts.clone());
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_server_addr {
|
||||
@@ -284,13 +291,13 @@ impl StartCommand {
|
||||
if let Some(addr) = &self.mysql_addr {
|
||||
opts.mysql.enable = true;
|
||||
opts.mysql.addr.clone_from(addr);
|
||||
opts.mysql.tls = tls_opts.clone();
|
||||
opts.mysql.tls = merge_tls_option(&opts.mysql.tls, tls_opts.clone());
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.postgres_addr {
|
||||
opts.postgres.enable = true;
|
||||
opts.postgres.addr.clone_from(addr);
|
||||
opts.postgres.tls = tls_opts;
|
||||
opts.postgres.tls = merge_tls_option(&opts.postgres.tls, tls_opts.clone());
|
||||
}
|
||||
|
||||
if let Some(enable) = self.influxdb_enable {
|
||||
@@ -417,38 +424,30 @@ impl StartCommand {
|
||||
layered_cache_registry.clone(),
|
||||
)
|
||||
.with_process_manager(process_manager.clone());
|
||||
#[cfg(feature = "enterprise")]
|
||||
let builder = if let Some(factories) = plugins.get() {
|
||||
builder.with_extra_information_table_factories(factories)
|
||||
let builder = if let Some(configurator) =
|
||||
plugins.get::<CatalogManagerConfiguratorRef<CatalogManagerConfigureContext>>()
|
||||
{
|
||||
let ctx = DistributedCatalogManagerConfigureContext {
|
||||
meta_client: meta_client.clone(),
|
||||
};
|
||||
let ctx = CatalogManagerConfigureContext::Distributed(ctx);
|
||||
|
||||
configurator
|
||||
.configure(builder, ctx)
|
||||
.await
|
||||
.context(OtherSnafu)?
|
||||
} else {
|
||||
builder
|
||||
};
|
||||
let catalog_manager = builder.build();
|
||||
|
||||
let executor = HandlerGroupExecutor::new(vec![
|
||||
Arc::new(ParseMailboxMessageHandler),
|
||||
Arc::new(InvalidateCacheHandler::new(layered_cache_registry.clone())),
|
||||
]);
|
||||
|
||||
let mut resource_stat = ResourceStatImpl::default();
|
||||
resource_stat.start_collect_cpu_usage();
|
||||
|
||||
let heartbeat_task = HeartbeatTask::new(
|
||||
&opts,
|
||||
meta_client.clone(),
|
||||
opts.heartbeat.clone(),
|
||||
Arc::new(executor),
|
||||
Arc::new(resource_stat),
|
||||
);
|
||||
let heartbeat_task = Some(heartbeat_task);
|
||||
|
||||
let instance = FrontendBuilder::new(
|
||||
opts.clone(),
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
catalog_manager,
|
||||
client,
|
||||
meta_client,
|
||||
meta_client.clone(),
|
||||
process_manager,
|
||||
)
|
||||
.with_plugin(plugins.clone())
|
||||
@@ -456,10 +455,10 @@ impl StartCommand {
|
||||
.try_build()
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
let instance = Arc::new(instance);
|
||||
|
||||
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
|
||||
.context(error::ServersSnafu)?;
|
||||
let heartbeat_task = Some(create_heartbeat_task(&opts, meta_client, &instance));
|
||||
|
||||
let instance = Arc::new(instance);
|
||||
|
||||
let servers = Services::new(opts, instance.clone(), plugins)
|
||||
.build()
|
||||
@@ -469,13 +468,34 @@ impl StartCommand {
|
||||
instance,
|
||||
servers,
|
||||
heartbeat_task,
|
||||
export_metrics_task,
|
||||
};
|
||||
|
||||
Ok(Instance::new(frontend, guard))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_heartbeat_task(
|
||||
options: &frontend::frontend::FrontendOptions,
|
||||
meta_client: MetaClientRef,
|
||||
instance: &frontend::instance::Instance,
|
||||
) -> HeartbeatTask {
|
||||
let executor = Arc::new(HandlerGroupExecutor::new(vec![
|
||||
Arc::new(ParseMailboxMessageHandler),
|
||||
Arc::new(SuspendHandler::new(instance.suspend_state())),
|
||||
Arc::new(InvalidateCacheHandler::new(
|
||||
instance.cache_invalidator().clone(),
|
||||
)),
|
||||
]));
|
||||
|
||||
let stat = {
|
||||
let mut stat = ResourceStatImpl::default();
|
||||
stat.start_collect_cpu_usage();
|
||||
Arc::new(stat)
|
||||
};
|
||||
|
||||
HeartbeatTask::new(options, meta_client, executor, stat)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::fmt::{self, Debug};
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -20,10 +20,11 @@ use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_meta::distributed_time_constants::init_distributed_time_constants;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
||||
use common_version::{short_version, verbose_version};
|
||||
use meta_srv::bootstrap::MetasrvInstance;
|
||||
use meta_srv::bootstrap::{MetasrvInstance, metasrv_builder};
|
||||
use meta_srv::metasrv::BackendImpl;
|
||||
use snafu::ResultExt;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
@@ -177,7 +178,7 @@ pub struct StartCommand {
|
||||
backend: Option<BackendImpl>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for StartCommand {
|
||||
impl Debug for StartCommand {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("StartCommand")
|
||||
.field("rpc_bind_addr", &self.rpc_bind_addr)
|
||||
@@ -327,6 +328,7 @@ impl StartCommand {
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
maybe_activate_heap_profile(&opts.component.memory);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
init_distributed_time_constants(opts.component.heartbeat_interval);
|
||||
|
||||
info!("Metasrv start command: {:#?}", self);
|
||||
|
||||
@@ -341,7 +343,7 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartMetaServerSnafu)?;
|
||||
|
||||
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins, None)
|
||||
let builder = metasrv_builder(&opts, plugins, None)
|
||||
.await
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
let metasrv = builder.build().await.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
@@ -20,7 +21,7 @@ use std::{fs, path};
|
||||
use async_trait::async_trait;
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::information_schema::InformationExtensionRef;
|
||||
use catalog::kvbackend::KvBackendCatalogManagerBuilder;
|
||||
use catalog::kvbackend::{CatalogManagerConfiguratorRef, KvBackendCatalogManagerBuilder};
|
||||
use catalog::process_manager::ProcessManager;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
@@ -31,7 +32,7 @@ use common_meta::cache::LayeredCacheRegistryBuilder;
|
||||
use common_meta::ddl::flow_meta::FlowMetadataAllocator;
|
||||
use common_meta::ddl::table_meta::TableMetadataAllocator;
|
||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl};
|
||||
use common_meta::ddl_manager::DdlManager;
|
||||
use common_meta::ddl_manager::{DdlManager, DdlManagerConfiguratorRef};
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
@@ -57,14 +58,17 @@ use frontend::instance::StandaloneDatanodeManager;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::server::Services;
|
||||
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
|
||||
use servers::export_metrics::ExportMetricsTask;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use plugins::frontend::context::{
|
||||
CatalogManagerConfigureContext, StandaloneCatalogManagerConfigureContext,
|
||||
};
|
||||
use plugins::standalone::context::DdlManagerConfigureContext;
|
||||
use servers::tls::{TlsMode, TlsOption, merge_tls_option};
|
||||
use snafu::ResultExt;
|
||||
use standalone::StandaloneInformationExtension;
|
||||
use standalone::options::StandaloneOptions;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{Result, StartFlownodeSnafu};
|
||||
use crate::error::{OtherSnafu, Result, StartFlownodeSnafu};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{App, create_resource_limit_metrics, error, log_versions, maybe_activate_heap_profile};
|
||||
|
||||
@@ -117,34 +121,15 @@ pub struct Instance {
|
||||
flownode: FlownodeInstance,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
wal_options_allocator: WalOptionsAllocatorRef,
|
||||
|
||||
// The components of standalone, which make it easier to expand based
|
||||
// on the components.
|
||||
#[cfg(feature = "enterprise")]
|
||||
components: Components,
|
||||
|
||||
// Keep the logging guard to prevent the worker from being dropped.
|
||||
_guard: Vec<WorkerGuard>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub struct Components {
|
||||
pub plugins: Plugins,
|
||||
pub kv_backend: KvBackendRef,
|
||||
pub frontend_client: Arc<FrontendClient>,
|
||||
pub catalog_manager: catalog::CatalogManagerRef,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
/// Find the socket addr of a server by its `name`.
|
||||
pub fn server_addr(&self, name: &str) -> Option<SocketAddr> {
|
||||
self.frontend.server_handlers().addr(name)
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub fn components(&self) -> &Components {
|
||||
&self.components
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -308,19 +293,20 @@ impl StartCommand {
|
||||
),
|
||||
}.fail();
|
||||
}
|
||||
opts.grpc.bind_addr.clone_from(addr)
|
||||
opts.grpc.bind_addr.clone_from(addr);
|
||||
opts.grpc.tls = merge_tls_option(&opts.grpc.tls, tls_opts.clone());
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.mysql_addr {
|
||||
opts.mysql.enable = true;
|
||||
opts.mysql.addr.clone_from(addr);
|
||||
opts.mysql.tls = tls_opts.clone();
|
||||
opts.mysql.tls = merge_tls_option(&opts.mysql.tls, tls_opts.clone());
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.postgres_addr {
|
||||
opts.postgres.enable = true;
|
||||
opts.postgres.addr.clone_from(addr);
|
||||
opts.postgres.tls = tls_opts;
|
||||
opts.postgres.tls = merge_tls_option(&opts.postgres.tls, tls_opts.clone());
|
||||
}
|
||||
|
||||
if self.influxdb_enable {
|
||||
@@ -416,6 +402,13 @@ impl StartCommand {
|
||||
plugins.insert::<InformationExtensionRef>(information_extension.clone());
|
||||
|
||||
let process_manager = Arc::new(ProcessManager::new(opts.grpc.server_addr.clone(), None));
|
||||
|
||||
// for standalone not use grpc, but get a handler to frontend grpc client without
|
||||
// actually make a connection
|
||||
let (frontend_client, frontend_instance_handler) =
|
||||
FrontendClient::from_empty_grpc_handler(opts.query.clone());
|
||||
let frontend_client = Arc::new(frontend_client);
|
||||
|
||||
let builder = KvBackendCatalogManagerBuilder::new(
|
||||
information_extension.clone(),
|
||||
kv_backend.clone(),
|
||||
@@ -423,9 +416,17 @@ impl StartCommand {
|
||||
)
|
||||
.with_procedure_manager(procedure_manager.clone())
|
||||
.with_process_manager(process_manager.clone());
|
||||
#[cfg(feature = "enterprise")]
|
||||
let builder = if let Some(factories) = plugins.get() {
|
||||
builder.with_extra_information_table_factories(factories)
|
||||
let builder = if let Some(configurator) =
|
||||
plugins.get::<CatalogManagerConfiguratorRef<CatalogManagerConfigureContext>>()
|
||||
{
|
||||
let ctx = StandaloneCatalogManagerConfigureContext {
|
||||
fe_client: frontend_client.clone(),
|
||||
};
|
||||
let ctx = CatalogManagerConfigureContext::Standalone(ctx);
|
||||
configurator
|
||||
.configure(builder, ctx)
|
||||
.await
|
||||
.context(OtherSnafu)?
|
||||
} else {
|
||||
builder
|
||||
};
|
||||
@@ -440,11 +441,6 @@ impl StartCommand {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// for standalone not use grpc, but get a handler to frontend grpc client without
|
||||
// actually make a connection
|
||||
let (frontend_client, frontend_instance_handler) =
|
||||
FrontendClient::from_empty_grpc_handler(opts.query.clone());
|
||||
let frontend_client = Arc::new(frontend_client);
|
||||
let flow_builder = FlownodeBuilder::new(
|
||||
flownode_options,
|
||||
plugins.clone(),
|
||||
@@ -515,11 +511,21 @@ impl StartCommand {
|
||||
|
||||
let ddl_manager = DdlManager::try_new(ddl_context, procedure_manager.clone(), true)
|
||||
.context(error::InitDdlManagerSnafu)?;
|
||||
#[cfg(feature = "enterprise")]
|
||||
let ddl_manager = {
|
||||
let trigger_ddl_manager: Option<common_meta::ddl_manager::TriggerDdlManagerRef> =
|
||||
plugins.get();
|
||||
ddl_manager.with_trigger_ddl_manager(trigger_ddl_manager)
|
||||
|
||||
let ddl_manager = if let Some(configurator) =
|
||||
plugins.get::<DdlManagerConfiguratorRef<DdlManagerConfigureContext>>()
|
||||
{
|
||||
let ctx = DdlManagerConfigureContext {
|
||||
kv_backend: kv_backend.clone(),
|
||||
fe_client: frontend_client.clone(),
|
||||
catalog_manager: catalog_manager.clone(),
|
||||
};
|
||||
configurator
|
||||
.configure(ddl_manager, ctx)
|
||||
.await
|
||||
.context(OtherSnafu)?
|
||||
} else {
|
||||
ddl_manager
|
||||
};
|
||||
|
||||
let procedure_executor = Arc::new(LocalProcedureExecutor::new(
|
||||
@@ -546,9 +552,8 @@ impl StartCommand {
|
||||
let grpc_handler = fe_instance.clone() as Arc<dyn GrpcQueryHandlerWithBoxedError>;
|
||||
let weak_grpc_handler = Arc::downgrade(&grpc_handler);
|
||||
frontend_instance_handler
|
||||
.lock()
|
||||
.unwrap()
|
||||
.replace(weak_grpc_handler);
|
||||
.set_handler(weak_grpc_handler)
|
||||
.await;
|
||||
|
||||
// set the frontend invoker for flownode
|
||||
let flow_streaming_engine = flownode.flow_engine().streaming_engine();
|
||||
@@ -565,9 +570,6 @@ impl StartCommand {
|
||||
.context(StartFlownodeSnafu)?;
|
||||
flow_streaming_engine.set_frontend_invoker(invoker).await;
|
||||
|
||||
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
|
||||
.context(error::ServersSnafu)?;
|
||||
|
||||
let servers = Services::new(opts, fe_instance.clone(), plugins.clone())
|
||||
.build()
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
@@ -576,15 +578,6 @@ impl StartCommand {
|
||||
instance: fe_instance,
|
||||
servers,
|
||||
heartbeat_task: None,
|
||||
export_metrics_task,
|
||||
};
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
let components = Components {
|
||||
plugins,
|
||||
kv_backend,
|
||||
frontend_client,
|
||||
catalog_manager,
|
||||
};
|
||||
|
||||
Ok(Instance {
|
||||
@@ -593,8 +586,6 @@ impl StartCommand {
|
||||
flownode,
|
||||
procedure_manager,
|
||||
wal_options_allocator,
|
||||
#[cfg(feature = "enterprise")]
|
||||
components,
|
||||
_guard: guard,
|
||||
})
|
||||
}
|
||||
@@ -774,7 +765,6 @@ mod tests {
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
mysql_addr: Some("127.0.0.1:4002".to_string()),
|
||||
postgres_addr: Some("127.0.0.1:4003".to_string()),
|
||||
tls_watch: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -791,8 +781,6 @@ mod tests {
|
||||
|
||||
assert_eq!("./greptimedb_data/test/logs", opts.logging.dir);
|
||||
assert_eq!("debug", opts.logging.level.unwrap());
|
||||
assert!(opts.mysql.tls.watch);
|
||||
assert!(opts.postgres.tls.watch);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -31,7 +31,6 @@ use meta_srv::selector::SelectorType;
|
||||
use metric_engine::config::EngineConfig as MetricEngineConfig;
|
||||
use mito2::config::MitoConfig;
|
||||
use query::options::QueryOptions;
|
||||
use servers::export_metrics::ExportMetricsOption;
|
||||
use servers::grpc::GrpcOptions;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
@@ -53,7 +52,6 @@ fn test_load_datanode_example_config() {
|
||||
meta_client: Some(MetaClientOptions {
|
||||
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
||||
timeout: Duration::from_secs(3),
|
||||
heartbeat_timeout: Duration::from_millis(500),
|
||||
ddl_timeout: Duration::from_secs(10),
|
||||
connect_timeout: Duration::from_secs(1),
|
||||
tcp_nodelay: true,
|
||||
@@ -95,11 +93,6 @@ fn test_load_datanode_example_config() {
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
export_metrics: ExportMetricsOption {
|
||||
self_import: None,
|
||||
remote_write: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
grpc: GrpcOptions::default()
|
||||
.with_bind_addr("127.0.0.1:3001")
|
||||
.with_server_addr("127.0.0.1:3001"),
|
||||
@@ -124,7 +117,6 @@ fn test_load_frontend_example_config() {
|
||||
meta_client: Some(MetaClientOptions {
|
||||
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
||||
timeout: Duration::from_secs(3),
|
||||
heartbeat_timeout: Duration::from_millis(500),
|
||||
ddl_timeout: Duration::from_secs(10),
|
||||
connect_timeout: Duration::from_secs(1),
|
||||
tcp_nodelay: true,
|
||||
@@ -146,11 +138,6 @@ fn test_load_frontend_example_config() {
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
export_metrics: ExportMetricsOption {
|
||||
self_import: None,
|
||||
remote_write: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
grpc: GrpcOptions {
|
||||
bind_addr: "127.0.0.1:4001".to_string(),
|
||||
server_addr: "127.0.0.1:4001".to_string(),
|
||||
@@ -201,11 +188,6 @@ fn test_load_metasrv_example_config() {
|
||||
tcp_nodelay: true,
|
||||
},
|
||||
},
|
||||
export_metrics: ExportMetricsOption {
|
||||
self_import: None,
|
||||
remote_write: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
backend_tls: Some(TlsOption {
|
||||
mode: TlsMode::Prefer,
|
||||
cert_path: String::new(),
|
||||
@@ -257,7 +239,6 @@ fn test_load_flownode_example_config() {
|
||||
meta_client: Some(MetaClientOptions {
|
||||
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
||||
timeout: Duration::from_secs(3),
|
||||
heartbeat_timeout: Duration::from_millis(500),
|
||||
ddl_timeout: Duration::from_secs(10),
|
||||
connect_timeout: Duration::from_secs(1),
|
||||
tcp_nodelay: true,
|
||||
@@ -317,11 +298,6 @@ fn test_load_standalone_example_config() {
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
export_metrics: ExportMetricsOption {
|
||||
self_import: Some(Default::default()),
|
||||
remote_write: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
http: HttpOptions {
|
||||
cors_allowed_origins: vec!["https://example.com".to_string()],
|
||||
..Default::default()
|
||||
|
||||
@@ -32,7 +32,12 @@ impl Plugins {
|
||||
|
||||
pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
|
||||
let last = self.write().insert(value);
|
||||
assert!(last.is_none(), "each type of plugins must be one and only");
|
||||
if last.is_some() {
|
||||
panic!(
|
||||
"Plugin of type {} already exists",
|
||||
std::any::type_name::<T>()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
|
||||
@@ -140,7 +145,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "each type of plugins must be one and only")]
|
||||
#[should_panic(expected = "Plugin of type i32 already exists")]
|
||||
fn test_plugin_uniqueness() {
|
||||
let plugins = Plugins::new();
|
||||
plugins.insert(1i32);
|
||||
|
||||
@@ -86,8 +86,6 @@ pub const INFORMATION_SCHEMA_TRIGGERS_TABLE_ID: u32 = 24;
|
||||
pub const INFORMATION_SCHEMA_GLOBAL_STATUS_TABLE_ID: u32 = 25;
|
||||
/// id for information_schema.SESSION_STATUS
|
||||
pub const INFORMATION_SCHEMA_SESSION_STATUS_TABLE_ID: u32 = 26;
|
||||
/// id for information_schema.RUNTIME_METRICS
|
||||
pub const INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID: u32 = 27;
|
||||
/// id for information_schema.PARTITIONS
|
||||
pub const INFORMATION_SCHEMA_PARTITIONS_TABLE_ID: u32 = 28;
|
||||
/// id for information_schema.REGION_PEERS
|
||||
@@ -112,6 +110,8 @@ pub const INFORMATION_SCHEMA_SSTS_MANIFEST_TABLE_ID: u32 = 37;
|
||||
pub const INFORMATION_SCHEMA_SSTS_STORAGE_TABLE_ID: u32 = 38;
|
||||
/// id for information_schema.ssts_index_meta
|
||||
pub const INFORMATION_SCHEMA_SSTS_INDEX_META_TABLE_ID: u32 = 39;
|
||||
/// id for information_schema.alerts
|
||||
pub const INFORMATION_SCHEMA_ALERTS_TABLE_ID: u32 = 40;
|
||||
|
||||
// ----- End of information_schema tables -----
|
||||
|
||||
|
||||
@@ -11,8 +11,10 @@ workspace = true
|
||||
common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
config.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
notify.workspace = true
|
||||
object-store.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
|
||||
@@ -49,14 +49,31 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to watch file: {}", path))]
|
||||
FileWatch {
|
||||
path: String,
|
||||
#[snafu(source)]
|
||||
error: notify::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid path '{}': expected a file, not a directory", path))]
|
||||
InvalidPath {
|
||||
path: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::TomlFormat { .. } | Error::LoadLayeredConfig { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::TomlFormat { .. }
|
||||
| Error::LoadLayeredConfig { .. }
|
||||
| Error::FileWatch { .. }
|
||||
| Error::InvalidPath { .. } => StatusCode::InvalidArguments,
|
||||
Error::SerdeJson { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
277
src/common/config/src/file_watcher.rs
Normal file
277
src/common/config/src/file_watcher.rs
Normal file
@@ -0,0 +1,277 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Common file watching utilities for configuration hot-reloading.
|
||||
//!
|
||||
//! This module provides a generic file watcher that can be used to watch
|
||||
//! files for changes and trigger callbacks when changes occur.
|
||||
//!
|
||||
//! The watcher monitors the parent directory of each file rather than the
|
||||
//! file itself. This ensures that file deletions and recreations are properly
|
||||
//! tracked, which is common with editors that use atomic saves or when
|
||||
//! configuration files are replaced.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::mpsc::channel;
|
||||
|
||||
use common_telemetry::{error, info, warn};
|
||||
use notify::{EventKind, RecursiveMode, Watcher};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{FileWatchSnafu, InvalidPathSnafu, Result};
|
||||
|
||||
/// Configuration for the file watcher behavior.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct FileWatcherConfig {
|
||||
/// Whether to include Remove events in addition to Modify and Create.
|
||||
pub include_remove_events: bool,
|
||||
}
|
||||
|
||||
impl FileWatcherConfig {
|
||||
pub fn new() -> Self {
|
||||
Default::default()
|
||||
}
|
||||
|
||||
pub fn include_remove_events(mut self) -> Self {
|
||||
self.include_remove_events = true;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// A builder for creating file watchers with flexible configuration.
|
||||
///
|
||||
/// The watcher monitors the parent directory of each file to handle file
|
||||
/// deletion and recreation properly. Events are filtered to only trigger
|
||||
/// callbacks for the specific files being watched.
|
||||
pub struct FileWatcherBuilder {
|
||||
config: FileWatcherConfig,
|
||||
/// Canonicalized paths of files to watch.
|
||||
file_paths: Vec<PathBuf>,
|
||||
}
|
||||
|
||||
impl FileWatcherBuilder {
|
||||
/// Create a new builder with default configuration.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
config: FileWatcherConfig::default(),
|
||||
file_paths: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the watcher configuration.
|
||||
pub fn config(mut self, config: FileWatcherConfig) -> Self {
|
||||
self.config = config;
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a file path to watch.
|
||||
///
|
||||
/// Returns an error if the path is a directory.
|
||||
/// The path is canonicalized for reliable comparison with events.
|
||||
pub fn watch_path<P: AsRef<Path>>(mut self, path: P) -> Result<Self> {
|
||||
let path = path.as_ref();
|
||||
snafu::ensure!(
|
||||
path.is_file(),
|
||||
InvalidPathSnafu {
|
||||
path: path.display().to_string(),
|
||||
}
|
||||
);
|
||||
|
||||
self.file_paths.push(path.to_path_buf());
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Add multiple file paths to watch.
|
||||
///
|
||||
/// Returns an error if any path is a directory.
|
||||
pub fn watch_paths<P: AsRef<Path>, I: IntoIterator<Item = P>>(
|
||||
mut self,
|
||||
paths: I,
|
||||
) -> Result<Self> {
|
||||
for path in paths {
|
||||
self = self.watch_path(path)?;
|
||||
}
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Build and spawn the file watcher with the given callback.
|
||||
///
|
||||
/// The callback is invoked when relevant file events are detected for
|
||||
/// the watched files. The watcher monitors the parent directories to
|
||||
/// handle file deletion and recreation properly.
|
||||
///
|
||||
/// The spawned watcher thread runs for the lifetime of the process.
|
||||
pub fn spawn<F>(self, callback: F) -> Result<()>
|
||||
where
|
||||
F: Fn() + Send + 'static,
|
||||
{
|
||||
let (tx, rx) = channel::<notify::Result<notify::Event>>();
|
||||
let mut watcher =
|
||||
notify::recommended_watcher(tx).context(FileWatchSnafu { path: "<none>" })?;
|
||||
|
||||
// Collect unique parent directories to watch
|
||||
let mut watched_dirs: HashSet<PathBuf> = HashSet::new();
|
||||
for file_path in &self.file_paths {
|
||||
if let Some(parent) = file_path.parent()
|
||||
&& watched_dirs.insert(parent.to_path_buf())
|
||||
{
|
||||
watcher
|
||||
.watch(parent, RecursiveMode::NonRecursive)
|
||||
.context(FileWatchSnafu {
|
||||
path: parent.display().to_string(),
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
||||
let config = self.config;
|
||||
|
||||
info!(
|
||||
"Spawning file watcher for paths: {:?} (watching parent directories)",
|
||||
self.file_paths
|
||||
.iter()
|
||||
.map(|p| p.display().to_string())
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
std::thread::spawn(move || {
|
||||
// Keep watcher alive in the thread
|
||||
let _watcher = watcher;
|
||||
|
||||
while let Ok(res) = rx.recv() {
|
||||
match res {
|
||||
Ok(event) => {
|
||||
if !is_relevant_event(&event.kind, &config) {
|
||||
continue;
|
||||
}
|
||||
|
||||
info!(?event.kind, ?event.paths, "Detected folder change");
|
||||
callback();
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("File watcher error: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
error!("File watcher channel closed unexpectedly");
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for FileWatcherBuilder {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if an event kind is relevant based on the configuration.
|
||||
fn is_relevant_event(kind: &EventKind, config: &FileWatcherConfig) -> bool {
|
||||
match kind {
|
||||
EventKind::Modify(_) | EventKind::Create(_) => true,
|
||||
EventKind::Remove(_) => config.include_remove_events,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::time::Duration;
|
||||
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_file_watcher_detects_changes() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let dir = create_temp_dir("test_file_watcher");
|
||||
let file_path = dir.path().join("test_file.txt");
|
||||
|
||||
// Create initial file
|
||||
std::fs::write(&file_path, "initial content").unwrap();
|
||||
|
||||
let counter = Arc::new(AtomicUsize::new(0));
|
||||
let counter_clone = counter.clone();
|
||||
|
||||
FileWatcherBuilder::new()
|
||||
.watch_path(&file_path)
|
||||
.unwrap()
|
||||
.config(FileWatcherConfig::new())
|
||||
.spawn(move || {
|
||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// Give watcher time to start
|
||||
std::thread::sleep(Duration::from_millis(100));
|
||||
|
||||
// Modify the file
|
||||
std::fs::write(&file_path, "modified content").unwrap();
|
||||
|
||||
// Wait for the event to be processed
|
||||
std::thread::sleep(Duration::from_millis(500));
|
||||
|
||||
assert!(
|
||||
counter.load(Ordering::SeqCst) >= 1,
|
||||
"Watcher should have detected at least one change"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_watcher_detects_delete_and_recreate() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let dir = create_temp_dir("test_file_watcher_recreate");
|
||||
let file_path = dir.path().join("test_file.txt");
|
||||
|
||||
// Create initial file
|
||||
std::fs::write(&file_path, "initial content").unwrap();
|
||||
|
||||
let counter = Arc::new(AtomicUsize::new(0));
|
||||
let counter_clone = counter.clone();
|
||||
|
||||
FileWatcherBuilder::new()
|
||||
.watch_path(&file_path)
|
||||
.unwrap()
|
||||
.config(FileWatcherConfig::new())
|
||||
.spawn(move || {
|
||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// Give watcher time to start
|
||||
std::thread::sleep(Duration::from_millis(100));
|
||||
|
||||
// Delete the file
|
||||
std::fs::remove_file(&file_path).unwrap();
|
||||
std::thread::sleep(Duration::from_millis(100));
|
||||
|
||||
// Recreate the file - this should still be detected because we watch the directory
|
||||
std::fs::write(&file_path, "recreated content").unwrap();
|
||||
|
||||
// Wait for the event to be processed
|
||||
std::thread::sleep(Duration::from_millis(500));
|
||||
|
||||
assert!(
|
||||
counter.load(Ordering::SeqCst) >= 1,
|
||||
"Watcher should have detected file recreation"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
pub mod config;
|
||||
pub mod error;
|
||||
pub mod file_watcher;
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
|
||||
@@ -12,28 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::future::Future;
|
||||
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use async_trait::async_trait;
|
||||
use datafusion::parquet::format::FileMetaData;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::io::{AsyncWrite, AsyncWriteExt};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::share_buffer::SharedBuffer;
|
||||
|
||||
pub struct LazyBufferedWriter<T, U, F> {
|
||||
path: String,
|
||||
writer_factory: F,
|
||||
writer: Option<T>,
|
||||
/// None stands for [`LazyBufferedWriter`] closed.
|
||||
encoder: Option<U>,
|
||||
buffer: SharedBuffer,
|
||||
rows_written: usize,
|
||||
bytes_written: u64,
|
||||
threshold: usize,
|
||||
}
|
||||
use crate::error::Result;
|
||||
|
||||
pub trait DfRecordBatchEncoder {
|
||||
fn write(&mut self, batch: &RecordBatch) -> Result<()>;
|
||||
@@ -43,126 +26,3 @@ pub trait DfRecordBatchEncoder {
|
||||
pub trait ArrowWriterCloser {
|
||||
async fn close(mut self) -> Result<FileMetaData>;
|
||||
}
|
||||
|
||||
impl<
|
||||
T: AsyncWrite + Send + Unpin,
|
||||
U: DfRecordBatchEncoder + ArrowWriterCloser,
|
||||
F: Fn(String) -> Fut,
|
||||
Fut: Future<Output = Result<T>>,
|
||||
> LazyBufferedWriter<T, U, F>
|
||||
{
|
||||
/// Closes `LazyBufferedWriter` and optionally flushes all data to underlying storage
|
||||
/// if any row's been written.
|
||||
pub async fn close_with_arrow_writer(mut self) -> Result<(FileMetaData, u64)> {
|
||||
let encoder = self
|
||||
.encoder
|
||||
.take()
|
||||
.context(error::BufferedWriterClosedSnafu)?;
|
||||
let metadata = encoder.close().await?;
|
||||
|
||||
// It's important to shut down! flushes all pending writes
|
||||
self.close_inner_writer().await?;
|
||||
Ok((metadata, self.bytes_written))
|
||||
}
|
||||
}
|
||||
|
||||
impl<
|
||||
T: AsyncWrite + Send + Unpin,
|
||||
U: DfRecordBatchEncoder,
|
||||
F: Fn(String) -> Fut,
|
||||
Fut: Future<Output = Result<T>>,
|
||||
> LazyBufferedWriter<T, U, F>
|
||||
{
|
||||
/// Closes the writer and flushes the buffer data.
|
||||
pub async fn close_inner_writer(&mut self) -> Result<()> {
|
||||
// Use `rows_written` to keep a track of if any rows have been written.
|
||||
// If no row's been written, then we can simply close the underlying
|
||||
// writer without flush so that no file will be actually created.
|
||||
if self.rows_written != 0 {
|
||||
self.bytes_written += self.try_flush(true).await?;
|
||||
}
|
||||
|
||||
if let Some(writer) = &mut self.writer {
|
||||
writer.shutdown().await.context(error::AsyncWriteSnafu)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
threshold: usize,
|
||||
buffer: SharedBuffer,
|
||||
encoder: U,
|
||||
path: impl AsRef<str>,
|
||||
writer_factory: F,
|
||||
) -> Self {
|
||||
Self {
|
||||
path: path.as_ref().to_string(),
|
||||
threshold,
|
||||
encoder: Some(encoder),
|
||||
buffer,
|
||||
rows_written: 0,
|
||||
bytes_written: 0,
|
||||
writer_factory,
|
||||
writer: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn write(&mut self, batch: &RecordBatch) -> Result<()> {
|
||||
let encoder = self
|
||||
.encoder
|
||||
.as_mut()
|
||||
.context(error::BufferedWriterClosedSnafu)?;
|
||||
encoder.write(batch)?;
|
||||
self.rows_written += batch.num_rows();
|
||||
self.bytes_written += self.try_flush(false).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn try_flush(&mut self, all: bool) -> Result<u64> {
|
||||
let mut bytes_written: u64 = 0;
|
||||
|
||||
// Once buffered data size reaches threshold, split the data in chunks (typically 4MB)
|
||||
// and write to underlying storage.
|
||||
while self.buffer.buffer.lock().unwrap().len() >= self.threshold {
|
||||
let chunk = {
|
||||
let mut buffer = self.buffer.buffer.lock().unwrap();
|
||||
buffer.split_to(self.threshold)
|
||||
};
|
||||
let size = chunk.len();
|
||||
|
||||
self.maybe_init_writer()
|
||||
.await?
|
||||
.write_all(&chunk)
|
||||
.await
|
||||
.context(error::AsyncWriteSnafu)?;
|
||||
|
||||
bytes_written += size as u64;
|
||||
}
|
||||
|
||||
if all {
|
||||
bytes_written += self.try_flush_all().await?;
|
||||
}
|
||||
Ok(bytes_written)
|
||||
}
|
||||
|
||||
/// Only initiates underlying file writer when rows have been written.
|
||||
async fn maybe_init_writer(&mut self) -> Result<&mut T> {
|
||||
if let Some(ref mut writer) = self.writer {
|
||||
Ok(writer)
|
||||
} else {
|
||||
let writer = (self.writer_factory)(self.path.clone()).await?;
|
||||
Ok(self.writer.insert(writer))
|
||||
}
|
||||
}
|
||||
|
||||
async fn try_flush_all(&mut self) -> Result<u64> {
|
||||
let remain = self.buffer.buffer.lock().unwrap().split();
|
||||
let size = remain.len();
|
||||
self.maybe_init_writer()
|
||||
.await?
|
||||
.write_all(&remain)
|
||||
.await
|
||||
.context(error::AsyncWriteSnafu)?;
|
||||
Ok(size as u64)
|
||||
}
|
||||
}
|
||||
|
||||
202
src/common/datasource/src/compressed_writer.rs
Normal file
202
src/common/datasource/src/compressed_writer.rs
Normal file
@@ -0,0 +1,202 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::io;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use async_compression::tokio::write::{BzEncoder, GzipEncoder, XzEncoder, ZstdEncoder};
|
||||
use snafu::ResultExt;
|
||||
use tokio::io::{AsyncWrite, AsyncWriteExt};
|
||||
|
||||
use crate::compression::CompressionType;
|
||||
use crate::error::{self, Result};
|
||||
|
||||
/// A compressed writer that wraps an underlying async writer with compression.
|
||||
///
|
||||
/// This writer supports multiple compression formats including GZIP, BZIP2, XZ, and ZSTD.
|
||||
/// It provides transparent compression for any async writer implementation.
|
||||
pub struct CompressedWriter {
|
||||
inner: Box<dyn AsyncWrite + Unpin + Send>,
|
||||
compression_type: CompressionType,
|
||||
}
|
||||
|
||||
impl CompressedWriter {
|
||||
/// Creates a new compressed writer with the specified compression type.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `writer` - The underlying writer to wrap with compression
|
||||
/// * `compression_type` - The type of compression to apply
|
||||
pub fn new(
|
||||
writer: impl AsyncWrite + Unpin + Send + 'static,
|
||||
compression_type: CompressionType,
|
||||
) -> Self {
|
||||
let inner: Box<dyn AsyncWrite + Unpin + Send> = match compression_type {
|
||||
CompressionType::Gzip => Box::new(GzipEncoder::new(writer)),
|
||||
CompressionType::Bzip2 => Box::new(BzEncoder::new(writer)),
|
||||
CompressionType::Xz => Box::new(XzEncoder::new(writer)),
|
||||
CompressionType::Zstd => Box::new(ZstdEncoder::new(writer)),
|
||||
CompressionType::Uncompressed => Box::new(writer),
|
||||
};
|
||||
|
||||
Self {
|
||||
inner,
|
||||
compression_type,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the compression type used by this writer.
|
||||
pub fn compression_type(&self) -> CompressionType {
|
||||
self.compression_type
|
||||
}
|
||||
|
||||
/// Flush the writer and shutdown compression
|
||||
pub async fn shutdown(mut self) -> Result<()> {
|
||||
self.inner
|
||||
.shutdown()
|
||||
.await
|
||||
.context(error::AsyncWriteSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncWrite for CompressedWriter {
|
||||
fn poll_write(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
Pin::new(&mut self.inner).poll_write(cx, buf)
|
||||
}
|
||||
|
||||
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut self.inner).poll_flush(cx)
|
||||
}
|
||||
|
||||
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut self.inner).poll_shutdown(cx)
|
||||
}
|
||||
}
|
||||
|
||||
/// A trait for converting async writers into compressed writers.
|
||||
///
|
||||
/// This trait is automatically implemented for all types that implement [`AsyncWrite`].
|
||||
pub trait IntoCompressedWriter {
|
||||
/// Converts this writer into a [`CompressedWriter`] with the specified compression type.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `self` - The underlying writer to wrap with compression
|
||||
/// * `compression_type` - The type of compression to apply
|
||||
fn into_compressed_writer(self, compression_type: CompressionType) -> CompressedWriter
|
||||
where
|
||||
Self: AsyncWrite + Unpin + Send + 'static + Sized,
|
||||
{
|
||||
CompressedWriter::new(self, compression_type)
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: AsyncWrite + Unpin + Send + 'static> IntoCompressedWriter for W {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, duplex};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_compressed_writer_gzip() {
|
||||
let (duplex_writer, mut duplex_reader) = duplex(1024);
|
||||
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Gzip);
|
||||
let original = b"test data for gzip compression";
|
||||
|
||||
writer.write_all(original).await.unwrap();
|
||||
writer.shutdown().await.unwrap();
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
duplex_reader.read_to_end(&mut buffer).await.unwrap();
|
||||
|
||||
// The compressed data should be different from the original
|
||||
assert_ne!(buffer, original);
|
||||
assert!(!buffer.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_compressed_writer_bzip2() {
|
||||
let (duplex_writer, mut duplex_reader) = duplex(1024);
|
||||
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Bzip2);
|
||||
let original = b"test data for bzip2 compression";
|
||||
|
||||
writer.write_all(original).await.unwrap();
|
||||
writer.shutdown().await.unwrap();
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
duplex_reader.read_to_end(&mut buffer).await.unwrap();
|
||||
|
||||
// The compressed data should be different from the original
|
||||
assert_ne!(buffer, original);
|
||||
assert!(!buffer.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_compressed_writer_xz() {
|
||||
let (duplex_writer, mut duplex_reader) = duplex(1024);
|
||||
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Xz);
|
||||
let original = b"test data for xz compression";
|
||||
|
||||
writer.write_all(original).await.unwrap();
|
||||
writer.shutdown().await.unwrap();
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
duplex_reader.read_to_end(&mut buffer).await.unwrap();
|
||||
|
||||
// The compressed data should be different from the original
|
||||
assert_ne!(buffer, original);
|
||||
assert!(!buffer.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_compressed_writer_zstd() {
|
||||
let (duplex_writer, mut duplex_reader) = duplex(1024);
|
||||
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Zstd);
|
||||
let original = b"test data for zstd compression";
|
||||
|
||||
writer.write_all(original).await.unwrap();
|
||||
writer.shutdown().await.unwrap();
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
duplex_reader.read_to_end(&mut buffer).await.unwrap();
|
||||
|
||||
// The compressed data should be different from the original
|
||||
assert_ne!(buffer, original);
|
||||
assert!(!buffer.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_compressed_writer_uncompressed() {
|
||||
let (duplex_writer, mut duplex_reader) = duplex(1024);
|
||||
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Uncompressed);
|
||||
let original = b"test data for uncompressed";
|
||||
|
||||
writer.write_all(original).await.unwrap();
|
||||
writer.shutdown().await.unwrap();
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
duplex_reader.read_to_end(&mut buffer).await.unwrap();
|
||||
|
||||
// Uncompressed data should be the same as the original
|
||||
assert_eq!(buffer, original);
|
||||
}
|
||||
}
|
||||
@@ -194,12 +194,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Buffered writer closed"))]
|
||||
BufferedWriterClosed {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write parquet file, path: {}", path))]
|
||||
WriteParquet {
|
||||
path: String,
|
||||
@@ -208,6 +202,14 @@ pub enum Error {
|
||||
#[snafu(source)]
|
||||
error: parquet::errors::ParquetError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build file stream"))]
|
||||
BuildFileStream {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: datafusion::error::DataFusionError,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -239,7 +241,7 @@ impl ErrorExt for Error {
|
||||
| ReadRecordBatch { .. }
|
||||
| WriteRecordBatch { .. }
|
||||
| EncodeRecordBatch { .. }
|
||||
| BufferedWriterClosed { .. }
|
||||
| BuildFileStream { .. }
|
||||
| OrcReader { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,12 +30,22 @@ use arrow::record_batch::RecordBatch;
|
||||
use arrow_schema::{ArrowError, Schema as ArrowSchema};
|
||||
use async_trait::async_trait;
|
||||
use bytes::{Buf, Bytes};
|
||||
use datafusion::datasource::physical_plan::FileOpenFuture;
|
||||
use common_recordbatch::DfSendableRecordBatchStream;
|
||||
use datafusion::datasource::file_format::file_compression_type::FileCompressionType as DfCompressionType;
|
||||
use datafusion::datasource::listing::PartitionedFile;
|
||||
use datafusion::datasource::object_store::ObjectStoreUrl;
|
||||
use datafusion::datasource::physical_plan::{
|
||||
FileGroup, FileOpenFuture, FileScanConfigBuilder, FileSource, FileStream,
|
||||
};
|
||||
use datafusion::error::{DataFusionError, Result as DataFusionResult};
|
||||
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||
use datatypes::arrow::datatypes::SchemaRef;
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
use object_store::ObjectStore;
|
||||
use object_store_opendal::OpendalStore;
|
||||
use snafu::ResultExt;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio_util::compat::FuturesAsyncWriteCompatExt;
|
||||
|
||||
use self::csv::CsvFormat;
|
||||
@@ -43,7 +53,8 @@ use self::json::JsonFormat;
|
||||
use self::orc::OrcFormat;
|
||||
use self::parquet::ParquetFormat;
|
||||
use crate::DEFAULT_WRITE_BUFFER_SIZE;
|
||||
use crate::buffered_writer::{DfRecordBatchEncoder, LazyBufferedWriter};
|
||||
use crate::buffered_writer::DfRecordBatchEncoder;
|
||||
use crate::compressed_writer::{CompressedWriter, IntoCompressedWriter};
|
||||
use crate::compression::CompressionType;
|
||||
use crate::error::{self, Result};
|
||||
use crate::share_buffer::SharedBuffer;
|
||||
@@ -195,33 +206,128 @@ pub async fn infer_schemas(
|
||||
ArrowSchema::try_merge(schemas).context(error::MergeSchemaSnafu)
|
||||
}
|
||||
|
||||
pub async fn stream_to_file<T: DfRecordBatchEncoder, U: Fn(SharedBuffer) -> T>(
|
||||
/// Writes data to a compressed writer if the data is not empty.
|
||||
///
|
||||
/// Does nothing if `data` is empty; otherwise writes all data and returns any error.
|
||||
async fn write_to_compressed_writer(
|
||||
compressed_writer: &mut CompressedWriter,
|
||||
data: &[u8],
|
||||
) -> Result<()> {
|
||||
if !data.is_empty() {
|
||||
compressed_writer
|
||||
.write_all(data)
|
||||
.await
|
||||
.context(error::AsyncWriteSnafu)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Streams [SendableRecordBatchStream] to a file with optional compression support.
|
||||
/// Data is buffered and flushed according to the given `threshold`.
|
||||
/// Ensures that writer resources are cleanly released and that an empty file is not
|
||||
/// created if no rows are written.
|
||||
///
|
||||
/// Returns the total number of rows successfully written.
|
||||
pub async fn stream_to_file<E>(
|
||||
mut stream: SendableRecordBatchStream,
|
||||
store: ObjectStore,
|
||||
path: &str,
|
||||
threshold: usize,
|
||||
concurrency: usize,
|
||||
encoder_factory: U,
|
||||
) -> Result<usize> {
|
||||
compression_type: CompressionType,
|
||||
encoder_factory: impl Fn(SharedBuffer) -> E,
|
||||
) -> Result<usize>
|
||||
where
|
||||
E: DfRecordBatchEncoder,
|
||||
{
|
||||
// Create the file writer with OpenDAL's built-in buffering
|
||||
let writer = store
|
||||
.writer_with(path)
|
||||
.concurrent(concurrency)
|
||||
.chunk(DEFAULT_WRITE_BUFFER_SIZE.as_bytes() as usize)
|
||||
.await
|
||||
.with_context(|_| error::WriteObjectSnafu { path })?
|
||||
.into_futures_async_write()
|
||||
.compat_write();
|
||||
|
||||
// Apply compression if needed
|
||||
let mut compressed_writer = writer.into_compressed_writer(compression_type);
|
||||
|
||||
// Create a buffer for the encoder
|
||||
let buffer = SharedBuffer::with_capacity(threshold);
|
||||
let encoder = encoder_factory(buffer.clone());
|
||||
let mut writer = LazyBufferedWriter::new(threshold, buffer, encoder, path, |path| async {
|
||||
store
|
||||
.writer_with(&path)
|
||||
.concurrent(concurrency)
|
||||
.chunk(DEFAULT_WRITE_BUFFER_SIZE.as_bytes() as usize)
|
||||
.await
|
||||
.map(|v| v.into_futures_async_write().compat_write())
|
||||
.context(error::WriteObjectSnafu { path })
|
||||
});
|
||||
let mut encoder = encoder_factory(buffer.clone());
|
||||
|
||||
let mut rows = 0;
|
||||
|
||||
// Process each record batch
|
||||
while let Some(batch) = stream.next().await {
|
||||
let batch = batch.context(error::ReadRecordBatchSnafu)?;
|
||||
writer.write(&batch).await?;
|
||||
|
||||
// Write batch using the encoder
|
||||
encoder.write(&batch)?;
|
||||
rows += batch.num_rows();
|
||||
|
||||
loop {
|
||||
let chunk = {
|
||||
let mut buffer_guard = buffer.buffer.lock().unwrap();
|
||||
if buffer_guard.len() < threshold {
|
||||
break;
|
||||
}
|
||||
buffer_guard.split_to(threshold)
|
||||
};
|
||||
write_to_compressed_writer(&mut compressed_writer, &chunk).await?;
|
||||
}
|
||||
}
|
||||
writer.close_inner_writer().await?;
|
||||
|
||||
// If no row's been written, just simply close the underlying writer
|
||||
// without flush so that no file will be actually created.
|
||||
if rows != 0 {
|
||||
// Final flush of any remaining data
|
||||
let final_data = {
|
||||
let mut buffer_guard = buffer.buffer.lock().unwrap();
|
||||
buffer_guard.split()
|
||||
};
|
||||
write_to_compressed_writer(&mut compressed_writer, &final_data).await?;
|
||||
}
|
||||
|
||||
// Shutdown compression and close writer
|
||||
compressed_writer.shutdown().await?;
|
||||
|
||||
Ok(rows)
|
||||
}
|
||||
|
||||
/// Creates a [FileStream] for reading data from a file with optional column projection
|
||||
/// and compression support.
|
||||
///
|
||||
/// Returns [SendableRecordBatchStream].
|
||||
pub async fn file_to_stream(
|
||||
store: &ObjectStore,
|
||||
filename: &str,
|
||||
file_schema: SchemaRef,
|
||||
file_source: Arc<dyn FileSource>,
|
||||
projection: Option<Vec<usize>>,
|
||||
compression_type: CompressionType,
|
||||
) -> Result<DfSendableRecordBatchStream> {
|
||||
let df_compression: DfCompressionType = compression_type.into();
|
||||
let config = FileScanConfigBuilder::new(
|
||||
ObjectStoreUrl::local_filesystem(),
|
||||
file_schema,
|
||||
file_source.clone(),
|
||||
)
|
||||
.with_file_group(FileGroup::new(vec![PartitionedFile::new(
|
||||
filename.to_string(),
|
||||
0,
|
||||
)]))
|
||||
.with_projection(projection)
|
||||
.with_file_compression_type(df_compression)
|
||||
.build();
|
||||
|
||||
let store = Arc::new(OpendalStore::new(store.clone()));
|
||||
let file_opener = file_source
|
||||
.with_projection(&config)
|
||||
.create_file_opener(store, &config, 0);
|
||||
let stream = FileStream::new(&config, 0, file_opener, &ExecutionPlanMetricsSet::new())
|
||||
.context(error::BuildFileStreamSnafu)?;
|
||||
|
||||
Ok(Box::pin(stream))
|
||||
}
|
||||
|
||||
@@ -157,19 +157,27 @@ pub async fn stream_to_csv(
|
||||
concurrency: usize,
|
||||
format: &CsvFormat,
|
||||
) -> Result<usize> {
|
||||
stream_to_file(stream, store, path, threshold, concurrency, |buffer| {
|
||||
let mut builder = WriterBuilder::new();
|
||||
if let Some(timestamp_format) = &format.timestamp_format {
|
||||
builder = builder.with_timestamp_format(timestamp_format.to_owned())
|
||||
}
|
||||
if let Some(date_format) = &format.date_format {
|
||||
builder = builder.with_date_format(date_format.to_owned())
|
||||
}
|
||||
if let Some(time_format) = &format.time_format {
|
||||
builder = builder.with_time_format(time_format.to_owned())
|
||||
}
|
||||
builder.build(buffer)
|
||||
})
|
||||
stream_to_file(
|
||||
stream,
|
||||
store,
|
||||
path,
|
||||
threshold,
|
||||
concurrency,
|
||||
format.compression_type,
|
||||
|buffer| {
|
||||
let mut builder = WriterBuilder::new();
|
||||
if let Some(timestamp_format) = &format.timestamp_format {
|
||||
builder = builder.with_timestamp_format(timestamp_format.to_owned())
|
||||
}
|
||||
if let Some(date_format) = &format.date_format {
|
||||
builder = builder.with_date_format(date_format.to_owned())
|
||||
}
|
||||
if let Some(time_format) = &format.time_format {
|
||||
builder = builder.with_time_format(time_format.to_owned())
|
||||
}
|
||||
builder.build(buffer)
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -181,13 +189,21 @@ impl DfRecordBatchEncoder for csv::Writer<SharedBuffer> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_recordbatch::adapter::DfRecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, RecordBatches};
|
||||
use common_test_util::find_workspace_path;
|
||||
use datafusion::datasource::physical_plan::{CsvSource, FileSource};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use datatypes::vectors::{Float64Vector, StringVector, UInt32Vector, VectorRef};
|
||||
use futures::TryStreamExt;
|
||||
|
||||
use super::*;
|
||||
use crate::file_format::{
|
||||
FORMAT_COMPRESSION_TYPE, FORMAT_DELIMITER, FORMAT_HAS_HEADER,
|
||||
FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat,
|
||||
FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat, file_to_stream,
|
||||
};
|
||||
use crate::test_util::{format_schema, test_store};
|
||||
|
||||
@@ -297,4 +313,166 @@ mod tests {
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_compressed_csv() {
|
||||
// Create test data
|
||||
let column_schemas = vec![
|
||||
ColumnSchema::new("id", ConcreteDataType::uint32_datatype(), false),
|
||||
ColumnSchema::new("name", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("value", ConcreteDataType::float64_datatype(), false),
|
||||
];
|
||||
let schema = Arc::new(Schema::new(column_schemas));
|
||||
|
||||
// Create multiple record batches with different data
|
||||
let batch1_columns: Vec<VectorRef> = vec![
|
||||
Arc::new(UInt32Vector::from_slice(vec![1, 2, 3])),
|
||||
Arc::new(StringVector::from(vec!["Alice", "Bob", "Charlie"])),
|
||||
Arc::new(Float64Vector::from_slice(vec![10.5, 20.3, 30.7])),
|
||||
];
|
||||
let batch1 = RecordBatch::new(schema.clone(), batch1_columns).unwrap();
|
||||
|
||||
let batch2_columns: Vec<VectorRef> = vec![
|
||||
Arc::new(UInt32Vector::from_slice(vec![4, 5, 6])),
|
||||
Arc::new(StringVector::from(vec!["David", "Eva", "Frank"])),
|
||||
Arc::new(Float64Vector::from_slice(vec![40.1, 50.2, 60.3])),
|
||||
];
|
||||
let batch2 = RecordBatch::new(schema.clone(), batch2_columns).unwrap();
|
||||
|
||||
let batch3_columns: Vec<VectorRef> = vec![
|
||||
Arc::new(UInt32Vector::from_slice(vec![7, 8, 9])),
|
||||
Arc::new(StringVector::from(vec!["Grace", "Henry", "Ivy"])),
|
||||
Arc::new(Float64Vector::from_slice(vec![70.4, 80.5, 90.6])),
|
||||
];
|
||||
let batch3 = RecordBatch::new(schema.clone(), batch3_columns).unwrap();
|
||||
|
||||
// Combine all batches into a RecordBatches collection
|
||||
let recordbatches = RecordBatches::try_new(schema, vec![batch1, batch2, batch3]).unwrap();
|
||||
|
||||
// Test with different compression types
|
||||
let compression_types = vec![
|
||||
CompressionType::Gzip,
|
||||
CompressionType::Bzip2,
|
||||
CompressionType::Xz,
|
||||
CompressionType::Zstd,
|
||||
];
|
||||
|
||||
// Create a temporary file path
|
||||
let temp_dir = common_test_util::temp_dir::create_temp_dir("test_compressed_csv");
|
||||
for compression_type in compression_types {
|
||||
let format = CsvFormat {
|
||||
compression_type,
|
||||
..CsvFormat::default()
|
||||
};
|
||||
|
||||
// Use correct format without Debug formatter
|
||||
let compressed_file_name =
|
||||
format!("test_compressed_csv.{}", compression_type.file_extension());
|
||||
let compressed_file_path = temp_dir.path().join(&compressed_file_name);
|
||||
let compressed_file_path_str = compressed_file_path.to_str().unwrap();
|
||||
|
||||
// Create a simple file store for testing
|
||||
let store = test_store("/");
|
||||
|
||||
// Export CSV with compression
|
||||
let rows = stream_to_csv(
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(recordbatches.as_stream())),
|
||||
store,
|
||||
compressed_file_path_str,
|
||||
1024,
|
||||
1,
|
||||
&format,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(rows, 9);
|
||||
|
||||
// Verify compressed file was created and has content
|
||||
assert!(compressed_file_path.exists());
|
||||
let file_size = std::fs::metadata(&compressed_file_path).unwrap().len();
|
||||
assert!(file_size > 0);
|
||||
|
||||
// Verify the file is actually compressed
|
||||
let file_content = std::fs::read(&compressed_file_path).unwrap();
|
||||
// Compressed files should not start with CSV header
|
||||
// They should have compression magic bytes
|
||||
match compression_type {
|
||||
CompressionType::Gzip => {
|
||||
// Gzip magic bytes: 0x1f 0x8b
|
||||
assert_eq!(file_content[0], 0x1f, "Gzip file should start with 0x1f");
|
||||
assert_eq!(
|
||||
file_content[1], 0x8b,
|
||||
"Gzip file should have 0x8b as second byte"
|
||||
);
|
||||
}
|
||||
CompressionType::Bzip2 => {
|
||||
// Bzip2 magic bytes: 'BZ'
|
||||
assert_eq!(file_content[0], b'B', "Bzip2 file should start with 'B'");
|
||||
assert_eq!(
|
||||
file_content[1], b'Z',
|
||||
"Bzip2 file should have 'Z' as second byte"
|
||||
);
|
||||
}
|
||||
CompressionType::Xz => {
|
||||
// XZ magic bytes: 0xFD '7zXZ'
|
||||
assert_eq!(file_content[0], 0xFD, "XZ file should start with 0xFD");
|
||||
}
|
||||
CompressionType::Zstd => {
|
||||
// Zstd magic bytes: 0x28 0xB5 0x2F 0xFD
|
||||
assert_eq!(file_content[0], 0x28, "Zstd file should start with 0x28");
|
||||
assert_eq!(
|
||||
file_content[1], 0xB5,
|
||||
"Zstd file should have 0xB5 as second byte"
|
||||
);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
// Verify the compressed file can be decompressed and content matches original data
|
||||
let store = test_store("/");
|
||||
let schema = Arc::new(
|
||||
CsvFormat {
|
||||
compression_type,
|
||||
..Default::default()
|
||||
}
|
||||
.infer_schema(&store, compressed_file_path_str)
|
||||
.await
|
||||
.unwrap(),
|
||||
);
|
||||
let csv_source = CsvSource::new(true, b',', b'"')
|
||||
.with_schema(schema.clone())
|
||||
.with_batch_size(8192);
|
||||
|
||||
let stream = file_to_stream(
|
||||
&store,
|
||||
compressed_file_path_str,
|
||||
schema.clone(),
|
||||
csv_source.clone(),
|
||||
None,
|
||||
compression_type,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let batches = stream.try_collect::<Vec<_>>().await.unwrap();
|
||||
let pretty_print = arrow::util::pretty::pretty_format_batches(&batches)
|
||||
.unwrap()
|
||||
.to_string();
|
||||
let expected = r#"+----+---------+-------+
|
||||
| id | name | value |
|
||||
+----+---------+-------+
|
||||
| 1 | Alice | 10.5 |
|
||||
| 2 | Bob | 20.3 |
|
||||
| 3 | Charlie | 30.7 |
|
||||
| 4 | David | 40.1 |
|
||||
| 5 | Eva | 50.2 |
|
||||
| 6 | Frank | 60.3 |
|
||||
| 7 | Grace | 70.4 |
|
||||
| 8 | Henry | 80.5 |
|
||||
| 9 | Ivy | 90.6 |
|
||||
+----+---------+-------+"#;
|
||||
assert_eq!(expected, pretty_print);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,10 +115,17 @@ pub async fn stream_to_json(
|
||||
path: &str,
|
||||
threshold: usize,
|
||||
concurrency: usize,
|
||||
format: &JsonFormat,
|
||||
) -> Result<usize> {
|
||||
stream_to_file(stream, store, path, threshold, concurrency, |buffer| {
|
||||
json::LineDelimitedWriter::new(buffer)
|
||||
})
|
||||
stream_to_file(
|
||||
stream,
|
||||
store,
|
||||
path,
|
||||
threshold,
|
||||
concurrency,
|
||||
format.compression_type,
|
||||
json::LineDelimitedWriter::new,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -130,10 +137,21 @@ impl DfRecordBatchEncoder for json::Writer<SharedBuffer, LineDelimited> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_recordbatch::adapter::DfRecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, RecordBatches};
|
||||
use common_test_util::find_workspace_path;
|
||||
use datafusion::datasource::physical_plan::{FileSource, JsonSource};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use datatypes::vectors::{Float64Vector, StringVector, UInt32Vector, VectorRef};
|
||||
use futures::TryStreamExt;
|
||||
|
||||
use super::*;
|
||||
use crate::file_format::{FORMAT_COMPRESSION_TYPE, FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat};
|
||||
use crate::file_format::{
|
||||
FORMAT_COMPRESSION_TYPE, FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat, file_to_stream,
|
||||
};
|
||||
use crate::test_util::{format_schema, test_store};
|
||||
|
||||
fn test_data_root() -> String {
|
||||
@@ -203,4 +221,165 @@ mod tests {
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_compressed_json() {
|
||||
// Create test data
|
||||
let column_schemas = vec![
|
||||
ColumnSchema::new("id", ConcreteDataType::uint32_datatype(), false),
|
||||
ColumnSchema::new("name", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("value", ConcreteDataType::float64_datatype(), false),
|
||||
];
|
||||
let schema = Arc::new(Schema::new(column_schemas));
|
||||
|
||||
// Create multiple record batches with different data
|
||||
let batch1_columns: Vec<VectorRef> = vec![
|
||||
Arc::new(UInt32Vector::from_slice(vec![1, 2, 3])),
|
||||
Arc::new(StringVector::from(vec!["Alice", "Bob", "Charlie"])),
|
||||
Arc::new(Float64Vector::from_slice(vec![10.5, 20.3, 30.7])),
|
||||
];
|
||||
let batch1 = RecordBatch::new(schema.clone(), batch1_columns).unwrap();
|
||||
|
||||
let batch2_columns: Vec<VectorRef> = vec![
|
||||
Arc::new(UInt32Vector::from_slice(vec![4, 5, 6])),
|
||||
Arc::new(StringVector::from(vec!["David", "Eva", "Frank"])),
|
||||
Arc::new(Float64Vector::from_slice(vec![40.1, 50.2, 60.3])),
|
||||
];
|
||||
let batch2 = RecordBatch::new(schema.clone(), batch2_columns).unwrap();
|
||||
|
||||
let batch3_columns: Vec<VectorRef> = vec![
|
||||
Arc::new(UInt32Vector::from_slice(vec![7, 8, 9])),
|
||||
Arc::new(StringVector::from(vec!["Grace", "Henry", "Ivy"])),
|
||||
Arc::new(Float64Vector::from_slice(vec![70.4, 80.5, 90.6])),
|
||||
];
|
||||
let batch3 = RecordBatch::new(schema.clone(), batch3_columns).unwrap();
|
||||
|
||||
// Combine all batches into a RecordBatches collection
|
||||
let recordbatches = RecordBatches::try_new(schema, vec![batch1, batch2, batch3]).unwrap();
|
||||
|
||||
// Test with different compression types
|
||||
let compression_types = vec![
|
||||
CompressionType::Gzip,
|
||||
CompressionType::Bzip2,
|
||||
CompressionType::Xz,
|
||||
CompressionType::Zstd,
|
||||
];
|
||||
|
||||
// Create a temporary file path
|
||||
let temp_dir = common_test_util::temp_dir::create_temp_dir("test_compressed_json");
|
||||
for compression_type in compression_types {
|
||||
let format = JsonFormat {
|
||||
compression_type,
|
||||
..JsonFormat::default()
|
||||
};
|
||||
|
||||
let compressed_file_name =
|
||||
format!("test_compressed_json.{}", compression_type.file_extension());
|
||||
let compressed_file_path = temp_dir.path().join(&compressed_file_name);
|
||||
let compressed_file_path_str = compressed_file_path.to_str().unwrap();
|
||||
|
||||
// Create a simple file store for testing
|
||||
let store = test_store("/");
|
||||
|
||||
// Export JSON with compression
|
||||
let rows = stream_to_json(
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(recordbatches.as_stream())),
|
||||
store,
|
||||
compressed_file_path_str,
|
||||
1024,
|
||||
1,
|
||||
&format,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(rows, 9);
|
||||
|
||||
// Verify compressed file was created and has content
|
||||
assert!(compressed_file_path.exists());
|
||||
let file_size = std::fs::metadata(&compressed_file_path).unwrap().len();
|
||||
assert!(file_size > 0);
|
||||
|
||||
// Verify the file is actually compressed
|
||||
let file_content = std::fs::read(&compressed_file_path).unwrap();
|
||||
// Compressed files should not start with '{' (JSON character)
|
||||
// They should have compression magic bytes
|
||||
match compression_type {
|
||||
CompressionType::Gzip => {
|
||||
// Gzip magic bytes: 0x1f 0x8b
|
||||
assert_eq!(file_content[0], 0x1f, "Gzip file should start with 0x1f");
|
||||
assert_eq!(
|
||||
file_content[1], 0x8b,
|
||||
"Gzip file should have 0x8b as second byte"
|
||||
);
|
||||
}
|
||||
CompressionType::Bzip2 => {
|
||||
// Bzip2 magic bytes: 'BZ'
|
||||
assert_eq!(file_content[0], b'B', "Bzip2 file should start with 'B'");
|
||||
assert_eq!(
|
||||
file_content[1], b'Z',
|
||||
"Bzip2 file should have 'Z' as second byte"
|
||||
);
|
||||
}
|
||||
CompressionType::Xz => {
|
||||
// XZ magic bytes: 0xFD '7zXZ'
|
||||
assert_eq!(file_content[0], 0xFD, "XZ file should start with 0xFD");
|
||||
}
|
||||
CompressionType::Zstd => {
|
||||
// Zstd magic bytes: 0x28 0xB5 0x2F 0xFD
|
||||
assert_eq!(file_content[0], 0x28, "Zstd file should start with 0x28");
|
||||
assert_eq!(
|
||||
file_content[1], 0xB5,
|
||||
"Zstd file should have 0xB5 as second byte"
|
||||
);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
// Verify the compressed file can be decompressed and content matches original data
|
||||
let store = test_store("/");
|
||||
let schema = Arc::new(
|
||||
JsonFormat {
|
||||
compression_type,
|
||||
..Default::default()
|
||||
}
|
||||
.infer_schema(&store, compressed_file_path_str)
|
||||
.await
|
||||
.unwrap(),
|
||||
);
|
||||
let json_source = JsonSource::new()
|
||||
.with_schema(schema.clone())
|
||||
.with_batch_size(8192);
|
||||
|
||||
let stream = file_to_stream(
|
||||
&store,
|
||||
compressed_file_path_str,
|
||||
schema.clone(),
|
||||
json_source.clone(),
|
||||
None,
|
||||
compression_type,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let batches = stream.try_collect::<Vec<_>>().await.unwrap();
|
||||
let pretty_print = arrow::util::pretty::pretty_format_batches(&batches)
|
||||
.unwrap()
|
||||
.to_string();
|
||||
let expected = r#"+----+---------+-------+
|
||||
| id | name | value |
|
||||
+----+---------+-------+
|
||||
| 1 | Alice | 10.5 |
|
||||
| 2 | Bob | 20.3 |
|
||||
| 3 | Charlie | 30.7 |
|
||||
| 4 | David | 40.1 |
|
||||
| 5 | Eva | 50.2 |
|
||||
| 6 | Frank | 60.3 |
|
||||
| 7 | Grace | 70.4 |
|
||||
| 8 | Henry | 80.5 |
|
||||
| 9 | Ivy | 90.6 |
|
||||
+----+---------+-------+"#;
|
||||
assert_eq!(expected, pretty_print);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
#![feature(type_alias_impl_trait)]
|
||||
|
||||
pub mod buffered_writer;
|
||||
pub mod compressed_writer;
|
||||
pub mod compression;
|
||||
pub mod error;
|
||||
pub mod file_format;
|
||||
|
||||
@@ -27,6 +27,7 @@ const SECRET_ACCESS_KEY: &str = "secret_access_key";
|
||||
const SESSION_TOKEN: &str = "session_token";
|
||||
const REGION: &str = "region";
|
||||
const ENABLE_VIRTUAL_HOST_STYLE: &str = "enable_virtual_host_style";
|
||||
const DISABLE_EC2_METADATA: &str = "disable_ec2_metadata";
|
||||
|
||||
pub fn is_supported_in_s3(key: &str) -> bool {
|
||||
[
|
||||
@@ -36,6 +37,7 @@ pub fn is_supported_in_s3(key: &str) -> bool {
|
||||
SESSION_TOKEN,
|
||||
REGION,
|
||||
ENABLE_VIRTUAL_HOST_STYLE,
|
||||
DISABLE_EC2_METADATA,
|
||||
]
|
||||
.contains(&key)
|
||||
}
|
||||
@@ -82,6 +84,21 @@ pub fn build_s3_backend(
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(disable_str) = connection.get(DISABLE_EC2_METADATA) {
|
||||
let disable = disable_str.as_str().parse::<bool>().map_err(|e| {
|
||||
error::InvalidConnectionSnafu {
|
||||
msg: format!(
|
||||
"failed to parse the option {}={}, {}",
|
||||
DISABLE_EC2_METADATA, disable_str, e
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
if disable {
|
||||
builder = builder.disable_ec2_metadata();
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(weny): Consider finding a better way to eliminate duplicate code.
|
||||
Ok(ObjectStore::new(builder)
|
||||
.context(error::BuildBackendSnafu)?
|
||||
@@ -109,6 +126,7 @@ mod tests {
|
||||
assert!(is_supported_in_s3(SESSION_TOKEN));
|
||||
assert!(is_supported_in_s3(REGION));
|
||||
assert!(is_supported_in_s3(ENABLE_VIRTUAL_HOST_STYLE));
|
||||
assert!(is_supported_in_s3(DISABLE_EC2_METADATA));
|
||||
assert!(!is_supported_in_s3("foo"))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ use object_store::ObjectStore;
|
||||
use object_store::services::Fs;
|
||||
|
||||
use crate::file_format::csv::{CsvFormat, stream_to_csv};
|
||||
use crate::file_format::json::stream_to_json;
|
||||
use crate::file_format::json::{JsonFormat, stream_to_json};
|
||||
use crate::test_util;
|
||||
|
||||
pub const TEST_BATCH_SIZE: usize = 100;
|
||||
@@ -122,13 +122,16 @@ pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usi
|
||||
|
||||
let output_path = format!("{}/{}", dir.path().display(), "output");
|
||||
|
||||
let json_format = JsonFormat::default();
|
||||
|
||||
assert!(
|
||||
stream_to_json(
|
||||
Box::pin(stream),
|
||||
tmp_store.clone(),
|
||||
&output_path,
|
||||
threshold(size),
|
||||
8
|
||||
8,
|
||||
&json_format,
|
||||
)
|
||||
.await
|
||||
.is_ok()
|
||||
|
||||
@@ -21,6 +21,8 @@ pub mod status_code;
|
||||
use http::{HeaderMap, HeaderValue};
|
||||
pub use snafu;
|
||||
|
||||
use crate::status_code::StatusCode;
|
||||
|
||||
// HACK - these headers are here for shared in gRPC services. For common HTTP headers,
|
||||
// please define in `src/servers/src/http/header.rs`.
|
||||
pub const GREPTIME_DB_HEADER_ERROR_CODE: &str = "x-greptime-err-code";
|
||||
@@ -46,6 +48,29 @@ pub fn from_err_code_msg_to_header(code: u32, msg: &str) -> HeaderMap {
|
||||
header
|
||||
}
|
||||
|
||||
/// Extract [StatusCode] and error message from [HeaderMap], if any.
|
||||
///
|
||||
/// Note that if the [StatusCode] is illegal, for example, a random number that is not pre-defined
|
||||
/// as a [StatusCode], the result is still `None`.
|
||||
pub fn from_header_to_err_code_msg(headers: &HeaderMap) -> Option<(StatusCode, &str)> {
|
||||
let code = headers
|
||||
.get(GREPTIME_DB_HEADER_ERROR_CODE)
|
||||
.and_then(|value| {
|
||||
value
|
||||
.to_str()
|
||||
.ok()
|
||||
.and_then(|x| x.parse::<u32>().ok())
|
||||
.and_then(StatusCode::from_u32)
|
||||
});
|
||||
let msg = headers
|
||||
.get(GREPTIME_DB_HEADER_ERROR_MSG)
|
||||
.and_then(|x| x.to_str().ok());
|
||||
match (code, msg) {
|
||||
(Some(code), Some(msg)) => Some((code, msg)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the external root cause of the source error (exclude the current error).
|
||||
pub fn root_source(err: &dyn std::error::Error) -> Option<&dyn std::error::Error> {
|
||||
// There are some divergence about the behavior of the `sources()` API
|
||||
|
||||
@@ -42,6 +42,8 @@ pub enum StatusCode {
|
||||
External = 1007,
|
||||
/// The request is deadline exceeded (typically server-side).
|
||||
DeadlineExceeded = 1008,
|
||||
/// Service got suspended for various reason. For example, resources exceed limit.
|
||||
Suspended = 1009,
|
||||
// ====== End of common status code ================
|
||||
|
||||
// ====== Begin of SQL related status code =========
|
||||
@@ -175,7 +177,8 @@ impl StatusCode {
|
||||
| StatusCode::AccessDenied
|
||||
| StatusCode::PermissionDenied
|
||||
| StatusCode::RequestOutdated
|
||||
| StatusCode::External => false,
|
||||
| StatusCode::External
|
||||
| StatusCode::Suspended => false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,7 +226,8 @@ impl StatusCode {
|
||||
| StatusCode::InvalidAuthHeader
|
||||
| StatusCode::AccessDenied
|
||||
| StatusCode::PermissionDenied
|
||||
| StatusCode::RequestOutdated => false,
|
||||
| StatusCode::RequestOutdated
|
||||
| StatusCode::Suspended => false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -347,7 +351,8 @@ pub fn status_to_tonic_code(status_code: StatusCode) -> Code {
|
||||
| StatusCode::RegionNotReady => Code::Unavailable,
|
||||
StatusCode::RuntimeResourcesExhausted
|
||||
| StatusCode::RateLimited
|
||||
| StatusCode::RegionBusy => Code::ResourceExhausted,
|
||||
| StatusCode::RegionBusy
|
||||
| StatusCode::Suspended => Code::ResourceExhausted,
|
||||
StatusCode::UnsupportedPasswordType
|
||||
| StatusCode::UserPasswordMismatch
|
||||
| StatusCode::AuthHeaderNotFound
|
||||
|
||||
@@ -97,9 +97,9 @@ pub trait Event: Send + Sync + Debug {
|
||||
vec![]
|
||||
}
|
||||
|
||||
/// Add the extra row to the event with the default row.
|
||||
fn extra_row(&self) -> Result<Row> {
|
||||
Ok(Row { values: vec![] })
|
||||
/// Add the extra rows to the event with the default row.
|
||||
fn extra_rows(&self) -> Result<Vec<Row>> {
|
||||
Ok(vec![Row { values: vec![] }])
|
||||
}
|
||||
|
||||
/// Returns the event as any type.
|
||||
@@ -159,15 +159,17 @@ pub fn build_row_inserts_request(events: &[&Box<dyn Event>]) -> Result<RowInsert
|
||||
|
||||
let mut rows: Vec<Row> = Vec::with_capacity(events.len());
|
||||
for event in events {
|
||||
let extra_row = event.extra_row()?;
|
||||
let mut values = Vec::with_capacity(3 + extra_row.values.len());
|
||||
values.extend([
|
||||
ValueData::StringValue(event.event_type().to_string()).into(),
|
||||
ValueData::BinaryValue(event.json_payload()?.into_bytes()).into(),
|
||||
ValueData::TimestampNanosecondValue(event.timestamp().value()).into(),
|
||||
]);
|
||||
values.extend(extra_row.values);
|
||||
rows.push(Row { values });
|
||||
let extra_rows = event.extra_rows()?;
|
||||
for extra_row in extra_rows {
|
||||
let mut values = Vec::with_capacity(3 + extra_row.values.len());
|
||||
values.extend([
|
||||
ValueData::StringValue(event.event_type().to_string()).into(),
|
||||
ValueData::BinaryValue(event.json_payload()?.into_bytes()).into(),
|
||||
ValueData::TimestampNanosecondValue(event.timestamp().value()).into(),
|
||||
]);
|
||||
values.extend(extra_row.values);
|
||||
rows.push(Row { values });
|
||||
}
|
||||
}
|
||||
|
||||
Ok(RowInsertRequests {
|
||||
|
||||
@@ -107,8 +107,8 @@ impl Event for SlowQueryEvent {
|
||||
]
|
||||
}
|
||||
|
||||
fn extra_row(&self) -> Result<Row> {
|
||||
Ok(Row {
|
||||
fn extra_rows(&self) -> Result<Vec<Row>> {
|
||||
Ok(vec![Row {
|
||||
values: vec![
|
||||
ValueData::U64Value(self.cost).into(),
|
||||
ValueData::U64Value(self.threshold).into(),
|
||||
@@ -119,7 +119,7 @@ impl Event for SlowQueryEvent {
|
||||
ValueData::TimestampMillisecondValue(self.promql_start.unwrap_or(0)).into(),
|
||||
ValueData::TimestampMillisecondValue(self.promql_end.unwrap_or(0)).into(),
|
||||
],
|
||||
})
|
||||
}])
|
||||
}
|
||||
|
||||
fn json_payload(&self) -> Result<String> {
|
||||
|
||||
@@ -19,7 +19,7 @@ arc-swap = "1.0"
|
||||
arrow.workspace = true
|
||||
arrow-schema.workspace = true
|
||||
async-trait.workspace = true
|
||||
bincode = "1.3"
|
||||
bincode = "=1.3.3"
|
||||
catalog.workspace = true
|
||||
chrono.workspace = true
|
||||
common-base.workspace = true
|
||||
@@ -39,7 +39,7 @@ datafusion-functions-aggregate-common.workspace = true
|
||||
datafusion-pg-catalog.workspace = true
|
||||
datafusion-physical-expr.workspace = true
|
||||
datatypes.workspace = true
|
||||
derive_more = { version = "1", default-features = false, features = ["display"] }
|
||||
derive_more.workspace = true
|
||||
geo = { version = "0.29", optional = true }
|
||||
geo-types = { version = "0.7", optional = true }
|
||||
geohash = { version = "0.13", optional = true }
|
||||
@@ -47,6 +47,7 @@ h3o = { version = "0.6", optional = true }
|
||||
hyperloglogplus = "0.4"
|
||||
jsonb.workspace = true
|
||||
memchr = "2.7"
|
||||
mito-codec.workspace = true
|
||||
nalgebra.workspace = true
|
||||
num = "0.4"
|
||||
num-traits = "0.2"
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod build_index_table;
|
||||
mod flush_compact_region;
|
||||
mod flush_compact_table;
|
||||
mod migrate_region;
|
||||
@@ -26,6 +27,7 @@ use reconcile_catalog::ReconcileCatalogFunction;
|
||||
use reconcile_database::ReconcileDatabaseFunction;
|
||||
use reconcile_table::ReconcileTableFunction;
|
||||
|
||||
use crate::admin::build_index_table::BuildIndexFunction;
|
||||
use crate::flush_flow::FlushFlowFunction;
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
@@ -40,6 +42,7 @@ impl AdminFunction {
|
||||
registry.register(CompactRegionFunction::factory());
|
||||
registry.register(FlushTableFunction::factory());
|
||||
registry.register(CompactTableFunction::factory());
|
||||
registry.register(BuildIndexFunction::factory());
|
||||
registry.register(FlushFlowFunction::factory());
|
||||
registry.register(ReconcileCatalogFunction::factory());
|
||||
registry.register(ReconcileDatabaseFunction::factory());
|
||||
|
||||
80
src/common/function/src/admin/build_index_table.rs
Normal file
80
src/common/function/src/admin/build_index_table.rs
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use arrow::datatypes::DataType as ArrowDataType;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingTableMutationHandlerSnafu, Result, TableMutationSnafu,
|
||||
UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use datafusion_expr::{Signature, Volatility};
|
||||
use datatypes::prelude::*;
|
||||
use session::context::QueryContextRef;
|
||||
use session::table_name::table_name_to_full_name;
|
||||
use snafu::{ResultExt, ensure};
|
||||
use table::requests::BuildIndexTableRequest;
|
||||
|
||||
use crate::handlers::TableMutationHandlerRef;
|
||||
|
||||
#[admin_fn(
|
||||
name = BuildIndexFunction,
|
||||
display_name = build_index,
|
||||
sig_fn = build_index_signature,
|
||||
ret = uint64
|
||||
)]
|
||||
pub(crate) async fn build_index(
|
||||
table_mutation_handler: &TableMutationHandlerRef,
|
||||
query_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
ensure!(
|
||||
params.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 1, have: {}",
|
||||
params.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let ValueRef::String(table_name) = params[0] else {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: "build_index",
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
|
||||
let (catalog_name, schema_name, table_name) = table_name_to_full_name(table_name, query_ctx)
|
||||
.map_err(BoxedError::new)
|
||||
.context(TableMutationSnafu)?;
|
||||
|
||||
let affected_rows = table_mutation_handler
|
||||
.build_index(
|
||||
BuildIndexTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
},
|
||||
query_ctx.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(Value::from(affected_rows as u64))
|
||||
}
|
||||
|
||||
fn build_index_signature() -> Signature {
|
||||
Signature::uniform(1, vec![ArrowDataType::Utf8], Volatility::Immutable)
|
||||
}
|
||||
@@ -34,6 +34,7 @@ use crate::scalars::json::JsonFunction;
|
||||
use crate::scalars::matches::MatchesFunction;
|
||||
use crate::scalars::matches_term::MatchesTermFunction;
|
||||
use crate::scalars::math::MathFunction;
|
||||
use crate::scalars::primary_key::DecodePrimaryKeyFunction;
|
||||
use crate::scalars::string::register_string_functions;
|
||||
use crate::scalars::timestamp::TimestampFunction;
|
||||
use crate::scalars::uddsketch_calc::UddSketchCalcFunction;
|
||||
@@ -143,6 +144,7 @@ pub static FUNCTION_REGISTRY: LazyLock<Arc<FunctionRegistry>> = LazyLock::new(||
|
||||
ExpressionFunction::register(&function_registry);
|
||||
UddSketchCalcFunction::register(&function_registry);
|
||||
HllCalcFunction::register(&function_registry);
|
||||
DecodePrimaryKeyFunction::register(&function_registry);
|
||||
|
||||
// Full text search function
|
||||
MatchesFunction::register(&function_registry);
|
||||
|
||||
@@ -25,7 +25,9 @@ use common_query::Output;
|
||||
use common_query::error::Result;
|
||||
use session::context::QueryContextRef;
|
||||
use store_api::storage::RegionId;
|
||||
use table::requests::{CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest};
|
||||
use table::requests::{
|
||||
BuildIndexTableRequest, CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest,
|
||||
};
|
||||
|
||||
/// A trait for handling table mutations in `QueryEngine`.
|
||||
#[async_trait]
|
||||
@@ -47,6 +49,13 @@ pub trait TableMutationHandler: Send + Sync {
|
||||
ctx: QueryContextRef,
|
||||
) -> Result<AffectedRows>;
|
||||
|
||||
/// Trigger an index build task for the table.
|
||||
async fn build_index(
|
||||
&self,
|
||||
request: BuildIndexTableRequest,
|
||||
ctx: QueryContextRef,
|
||||
) -> Result<AffectedRows>;
|
||||
|
||||
/// Trigger a flush task for a table region.
|
||||
async fn flush_region(&self, region_id: RegionId, ctx: QueryContextRef)
|
||||
-> Result<AffectedRows>;
|
||||
|
||||
@@ -20,6 +20,7 @@ pub mod json;
|
||||
pub mod matches;
|
||||
pub mod matches_term;
|
||||
pub mod math;
|
||||
pub mod primary_key;
|
||||
pub(crate) mod string;
|
||||
pub mod vector;
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
mod binary;
|
||||
mod ctx;
|
||||
mod if_func;
|
||||
mod is_null;
|
||||
mod unary;
|
||||
|
||||
@@ -22,6 +23,7 @@ pub use ctx::EvalContext;
|
||||
pub use unary::scalar_unary_op;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
use crate::scalars::expression::if_func::IfFunction;
|
||||
use crate::scalars::expression::is_null::IsNullFunction;
|
||||
|
||||
pub(crate) struct ExpressionFunction;
|
||||
@@ -29,5 +31,6 @@ pub(crate) struct ExpressionFunction;
|
||||
impl ExpressionFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register_scalar(IsNullFunction::default());
|
||||
registry.register_scalar(IfFunction::default());
|
||||
}
|
||||
}
|
||||
|
||||
404
src/common/function/src/scalars/expression/if_func.rs
Normal file
404
src/common/function/src/scalars/expression/if_func.rs
Normal file
@@ -0,0 +1,404 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::fmt::Display;
|
||||
|
||||
use arrow::array::ArrowNativeTypeOp;
|
||||
use arrow::datatypes::ArrowPrimitiveType;
|
||||
use datafusion::arrow::array::{Array, ArrayRef, AsArray, BooleanArray, PrimitiveArray};
|
||||
use datafusion::arrow::compute::kernels::zip::zip;
|
||||
use datafusion::arrow::datatypes::DataType;
|
||||
use datafusion_common::DataFusionError;
|
||||
use datafusion_expr::type_coercion::binary::comparison_coercion;
|
||||
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature, Volatility};
|
||||
|
||||
use crate::function::Function;
|
||||
|
||||
const NAME: &str = "if";
|
||||
|
||||
/// MySQL-compatible IF function: IF(condition, true_value, false_value)
|
||||
///
|
||||
/// Returns true_value if condition is TRUE (not NULL and not 0),
|
||||
/// otherwise returns false_value.
|
||||
///
|
||||
/// MySQL truthy rules:
|
||||
/// - NULL -> false
|
||||
/// - 0 (numeric zero) -> false
|
||||
/// - Any non-zero numeric -> true
|
||||
/// - Boolean true/false -> use directly
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct IfFunction {
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl Default for IfFunction {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
signature: Signature::any(3, Volatility::Immutable),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for IfFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for IfFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, input_types: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
// Return the common type of true_value and false_value (args[1] and args[2])
|
||||
if input_types.len() < 3 {
|
||||
return Err(DataFusionError::Plan(format!(
|
||||
"{} requires 3 arguments, got {}",
|
||||
NAME,
|
||||
input_types.len()
|
||||
)));
|
||||
}
|
||||
let true_type = &input_types[1];
|
||||
let false_type = &input_types[2];
|
||||
|
||||
// Use comparison_coercion to find common type
|
||||
comparison_coercion(true_type, false_type).ok_or_else(|| {
|
||||
DataFusionError::Plan(format!(
|
||||
"Cannot find common type for IF function between {:?} and {:?}",
|
||||
true_type, false_type
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn invoke_with_args(
|
||||
&self,
|
||||
args: ScalarFunctionArgs,
|
||||
) -> datafusion_common::Result<ColumnarValue> {
|
||||
if args.args.len() != 3 {
|
||||
return Err(DataFusionError::Plan(format!(
|
||||
"{} requires exactly 3 arguments, got {}",
|
||||
NAME,
|
||||
args.args.len()
|
||||
)));
|
||||
}
|
||||
|
||||
let condition = &args.args[0];
|
||||
let true_value = &args.args[1];
|
||||
let false_value = &args.args[2];
|
||||
|
||||
// Convert condition to boolean array using MySQL truthy rules
|
||||
let bool_array = to_boolean_array(condition, args.number_rows)?;
|
||||
|
||||
// Convert true and false values to arrays
|
||||
let true_array = true_value.to_array(args.number_rows)?;
|
||||
let false_array = false_value.to_array(args.number_rows)?;
|
||||
|
||||
// Use zip to select values based on condition
|
||||
// zip expects &dyn Datum, and ArrayRef (Arc<dyn Array>) implements Datum
|
||||
let result = zip(&bool_array, &true_array, &false_array)?;
|
||||
Ok(ColumnarValue::Array(result))
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a ColumnarValue to a BooleanArray using MySQL truthy rules:
|
||||
/// - NULL -> false
|
||||
/// - 0 (any numeric zero) -> false
|
||||
/// - Non-zero numeric -> true
|
||||
/// - Boolean -> use directly
|
||||
fn to_boolean_array(
|
||||
value: &ColumnarValue,
|
||||
num_rows: usize,
|
||||
) -> datafusion_common::Result<BooleanArray> {
|
||||
let array = value.to_array(num_rows)?;
|
||||
array_to_bool(array)
|
||||
}
|
||||
|
||||
/// Convert an integer PrimitiveArray to BooleanArray using MySQL truthy rules:
|
||||
/// NULL -> false, 0 -> false, non-zero -> true
|
||||
fn int_array_to_bool<T>(array: &PrimitiveArray<T>) -> BooleanArray
|
||||
where
|
||||
T: ArrowPrimitiveType,
|
||||
T::Native: ArrowNativeTypeOp,
|
||||
{
|
||||
BooleanArray::from_iter(
|
||||
array
|
||||
.iter()
|
||||
.map(|opt| Some(opt.is_some_and(|v| !v.is_zero()))),
|
||||
)
|
||||
}
|
||||
|
||||
/// Convert a float PrimitiveArray to BooleanArray using MySQL truthy rules:
|
||||
/// NULL -> false, 0 (including -0.0) -> false, NaN -> true, other non-zero -> true
|
||||
fn float_array_to_bool<T>(array: &PrimitiveArray<T>) -> BooleanArray
|
||||
where
|
||||
T: ArrowPrimitiveType,
|
||||
T::Native: ArrowNativeTypeOp + num_traits::Float,
|
||||
{
|
||||
use num_traits::Float;
|
||||
BooleanArray::from_iter(
|
||||
array
|
||||
.iter()
|
||||
.map(|opt| Some(opt.is_some_and(|v| v.is_nan() || !v.is_zero()))),
|
||||
)
|
||||
}
|
||||
|
||||
/// Convert an Array to BooleanArray using MySQL truthy rules
|
||||
fn array_to_bool(array: ArrayRef) -> datafusion_common::Result<BooleanArray> {
|
||||
use arrow::datatypes::*;
|
||||
|
||||
match array.data_type() {
|
||||
DataType::Boolean => {
|
||||
let bool_array = array.as_boolean();
|
||||
Ok(BooleanArray::from_iter(
|
||||
bool_array.iter().map(|opt| Some(opt.unwrap_or(false))),
|
||||
))
|
||||
}
|
||||
DataType::Int8 => Ok(int_array_to_bool(array.as_primitive::<Int8Type>())),
|
||||
DataType::Int16 => Ok(int_array_to_bool(array.as_primitive::<Int16Type>())),
|
||||
DataType::Int32 => Ok(int_array_to_bool(array.as_primitive::<Int32Type>())),
|
||||
DataType::Int64 => Ok(int_array_to_bool(array.as_primitive::<Int64Type>())),
|
||||
DataType::UInt8 => Ok(int_array_to_bool(array.as_primitive::<UInt8Type>())),
|
||||
DataType::UInt16 => Ok(int_array_to_bool(array.as_primitive::<UInt16Type>())),
|
||||
DataType::UInt32 => Ok(int_array_to_bool(array.as_primitive::<UInt32Type>())),
|
||||
DataType::UInt64 => Ok(int_array_to_bool(array.as_primitive::<UInt64Type>())),
|
||||
// Float16 needs special handling since half::f16 doesn't implement num_traits::Float
|
||||
DataType::Float16 => {
|
||||
let typed_array = array.as_primitive::<Float16Type>();
|
||||
Ok(BooleanArray::from_iter(typed_array.iter().map(|opt| {
|
||||
Some(opt.is_some_and(|v| {
|
||||
let f = v.to_f32();
|
||||
f.is_nan() || !f.is_zero()
|
||||
}))
|
||||
})))
|
||||
}
|
||||
DataType::Float32 => Ok(float_array_to_bool(array.as_primitive::<Float32Type>())),
|
||||
DataType::Float64 => Ok(float_array_to_bool(array.as_primitive::<Float64Type>())),
|
||||
// Null type is always false.
|
||||
// Note: NullArray::is_null() returns false (physical null), so we must handle it explicitly.
|
||||
// See: https://github.com/apache/arrow-rs/issues/4840
|
||||
DataType::Null => Ok(BooleanArray::from(vec![false; array.len()])),
|
||||
// For other types, treat non-null as true
|
||||
_ => {
|
||||
let len = array.len();
|
||||
Ok(BooleanArray::from_iter(
|
||||
(0..len).map(|i| Some(!array.is_null(i))),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::Field;
|
||||
use datafusion_common::ScalarValue;
|
||||
use datafusion_common::arrow::array::{AsArray, Int32Array, StringArray};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_if_function_basic() {
|
||||
let if_func = IfFunction::default();
|
||||
assert_eq!("if", if_func.name());
|
||||
|
||||
// Test IF(true, 'yes', 'no') -> 'yes'
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Boolean(Some(true))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 1,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "yes");
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_if_function_false() {
|
||||
let if_func = IfFunction::default();
|
||||
|
||||
// Test IF(false, 'yes', 'no') -> 'no'
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Boolean(Some(false))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 1,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "no");
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_if_function_null_is_false() {
|
||||
let if_func = IfFunction::default();
|
||||
|
||||
// Test IF(NULL, 'yes', 'no') -> 'no' (NULL is treated as false)
|
||||
// Using Boolean(None) - typed null
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Boolean(None)),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 1,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "no");
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
|
||||
// Test IF(NULL, 'yes', 'no') -> 'no' using ScalarValue::Null (untyped null from SQL NULL literal)
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Null),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 1,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "no");
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_if_function_numeric_truthy() {
|
||||
let if_func = IfFunction::default();
|
||||
|
||||
// Test IF(1, 'yes', 'no') -> 'yes' (non-zero is true)
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Int32(Some(1))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 1,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "yes");
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
|
||||
// Test IF(0, 'yes', 'no') -> 'no' (zero is false)
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Int32(Some(0))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 1,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "no");
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_if_function_with_arrays() {
|
||||
let if_func = IfFunction::default();
|
||||
|
||||
// Test with array condition
|
||||
let condition = Int32Array::from(vec![Some(1), Some(0), None, Some(5)]);
|
||||
let true_val = StringArray::from(vec!["yes", "yes", "yes", "yes"]);
|
||||
let false_val = StringArray::from(vec!["no", "no", "no", "no"]);
|
||||
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Array(Arc::new(condition)),
|
||||
ColumnarValue::Array(Arc::new(true_val)),
|
||||
ColumnarValue::Array(Arc::new(false_val)),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 4,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "yes"); // 1 is true
|
||||
assert_eq!(str_arr.value(1), "no"); // 0 is false
|
||||
assert_eq!(str_arr.value(2), "no"); // NULL is false
|
||||
assert_eq!(str_arr.value(3), "yes"); // 5 is true
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
use datafusion_common::arrow::array::{Array, AsArray, BooleanBuilder};
|
||||
|
||||
@@ -19,7 +19,7 @@ mod json_path_match;
|
||||
mod json_to_string;
|
||||
mod parse_json;
|
||||
|
||||
use json_get::{JsonGetBool, JsonGetFloat, JsonGetInt, JsonGetString};
|
||||
use json_get::{JsonGetBool, JsonGetFloat, JsonGetInt, JsonGetObject, JsonGetString};
|
||||
use json_is::{
|
||||
JsonIsArray, JsonIsBool, JsonIsFloat, JsonIsInt, JsonIsNull, JsonIsObject, JsonIsString,
|
||||
};
|
||||
@@ -39,6 +39,7 @@ impl JsonFunction {
|
||||
registry.register_scalar(JsonGetFloat::default());
|
||||
registry.register_scalar(JsonGetString::default());
|
||||
registry.register_scalar(JsonGetBool::default());
|
||||
registry.register_scalar(JsonGetObject::default());
|
||||
|
||||
registry.register_scalar(JsonIsNull::default());
|
||||
registry.register_scalar(JsonIsInt::default());
|
||||
|
||||
@@ -16,10 +16,13 @@ use std::fmt::{self, Display};
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow::compute;
|
||||
use datafusion_common::DataFusionError;
|
||||
use datafusion_common::arrow::array::{
|
||||
Array, AsArray, BooleanBuilder, Float64Builder, Int64Builder, StringViewBuilder,
|
||||
Array, AsArray, BinaryViewBuilder, BooleanBuilder, Float64Builder, Int64Builder,
|
||||
StringViewBuilder,
|
||||
};
|
||||
use datafusion_common::arrow::datatypes::DataType;
|
||||
use datafusion_expr::type_coercion::aggregates::STRINGS;
|
||||
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature};
|
||||
|
||||
use crate::function::{Function, extract_args};
|
||||
@@ -212,13 +215,92 @@ impl Display for JsonGetString {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the object from JSON value by path.
|
||||
pub(super) struct JsonGetObject {
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl JsonGetObject {
|
||||
const NAME: &'static str = "json_get_object";
|
||||
}
|
||||
|
||||
impl Default for JsonGetObject {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
signature: helper::one_of_sigs2(
|
||||
vec![
|
||||
DataType::Binary,
|
||||
DataType::LargeBinary,
|
||||
DataType::BinaryView,
|
||||
],
|
||||
STRINGS.to_vec(),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for JsonGetObject {
|
||||
fn name(&self) -> &str {
|
||||
Self::NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
Ok(DataType::BinaryView)
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn invoke_with_args(
|
||||
&self,
|
||||
args: ScalarFunctionArgs,
|
||||
) -> datafusion_common::Result<ColumnarValue> {
|
||||
let [arg0, arg1] = extract_args(self.name(), &args)?;
|
||||
let arg0 = compute::cast(&arg0, &DataType::BinaryView)?;
|
||||
let jsons = arg0.as_binary_view();
|
||||
let arg1 = compute::cast(&arg1, &DataType::Utf8View)?;
|
||||
let paths = arg1.as_string_view();
|
||||
|
||||
let len = jsons.len();
|
||||
let mut builder = BinaryViewBuilder::with_capacity(len);
|
||||
|
||||
for i in 0..len {
|
||||
let json = jsons.is_valid(i).then(|| jsons.value(i));
|
||||
let path = paths.is_valid(i).then(|| paths.value(i));
|
||||
let result = if let (Some(json), Some(path)) = (json, path) {
|
||||
let result = jsonb::jsonpath::parse_json_path(path.as_bytes()).and_then(|path| {
|
||||
let mut data = Vec::new();
|
||||
let mut offset = Vec::new();
|
||||
jsonb::get_by_path(json, path, &mut data, &mut offset)
|
||||
.map(|()| jsonb::is_object(&data).then_some(data))
|
||||
});
|
||||
result.map_err(|e| DataFusionError::Execution(e.to_string()))?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
builder.append_option(result);
|
||||
}
|
||||
|
||||
Ok(ColumnarValue::Array(Arc::new(builder.finish())))
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for JsonGetObject {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", Self::NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::Field;
|
||||
use datafusion_common::arrow::array::{BinaryArray, StringArray};
|
||||
use datafusion_common::ScalarValue;
|
||||
use datafusion_common::arrow::array::{BinaryArray, BinaryViewArray, StringArray};
|
||||
use datafusion_common::arrow::datatypes::{Float64Type, Int64Type};
|
||||
use datatypes::types::parse_string_to_jsonb;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -425,4 +507,49 @@ mod tests {
|
||||
assert_eq!(*gt, result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_get_object() -> datafusion_common::Result<()> {
|
||||
let udf = JsonGetObject::default();
|
||||
assert_eq!("json_get_object", udf.name());
|
||||
assert_eq!(
|
||||
DataType::BinaryView,
|
||||
udf.return_type(&[DataType::BinaryView, DataType::Utf8View])?
|
||||
);
|
||||
|
||||
let json_value = parse_string_to_jsonb(r#"{"a": {"b": {"c": {"d": 1}}}}"#).unwrap();
|
||||
let paths = vec!["$", "$.a", "$.a.b", "$.a.b.c", "$.a.b.c.d", "$.e", "$.a.e"];
|
||||
let number_rows = paths.len();
|
||||
|
||||
let args = ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Binary(Some(json_value))),
|
||||
ColumnarValue::Array(Arc::new(StringArray::from_iter_values(paths))),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows,
|
||||
return_field: Arc::new(Field::new("x", DataType::Binary, false)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
};
|
||||
let result = udf
|
||||
.invoke_with_args(args)
|
||||
.and_then(|x| x.to_array(number_rows))?;
|
||||
let result = result.as_binary_view();
|
||||
|
||||
let expected = &BinaryViewArray::from_iter(
|
||||
vec![
|
||||
Some(r#"{"a": {"b": {"c": {"d": 1}}}}"#),
|
||||
Some(r#"{"b": {"c": {"d": 1}}}"#),
|
||||
Some(r#"{"c": {"d": 1}}"#),
|
||||
Some(r#"{"d": 1}"#),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
]
|
||||
.into_iter()
|
||||
.map(|x| x.and_then(|s| parse_string_to_jsonb(s).ok())),
|
||||
);
|
||||
assert_eq!(result, expected);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,15 @@ impl Default for JsonToStringFunction {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
// TODO(LFC): Use a more clear type here instead of "Binary" for Json input, once we have a "Json" type.
|
||||
signature: Signature::exact(vec![DataType::Binary], Volatility::Immutable),
|
||||
signature: Signature::uniform(
|
||||
1,
|
||||
vec![
|
||||
DataType::Binary,
|
||||
DataType::LargeBinary,
|
||||
DataType::BinaryView,
|
||||
],
|
||||
Volatility::Immutable,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -57,7 +65,8 @@ impl Function for JsonToStringFunction {
|
||||
args: ScalarFunctionArgs,
|
||||
) -> datafusion_common::Result<ColumnarValue> {
|
||||
let [arg0] = extract_args(self.name(), &args)?;
|
||||
let jsons = arg0.as_binary::<i32>();
|
||||
let arg0 = arrow::compute::cast(&arg0, &DataType::BinaryView)?;
|
||||
let jsons = arg0.as_binary_view();
|
||||
|
||||
let size = jsons.len();
|
||||
let mut builder = StringViewBuilder::with_capacity(size);
|
||||
|
||||
521
src/common/function/src/scalars/primary_key.rs
Normal file
521
src/common/function/src/scalars/primary_key.rs
Normal file
@@ -0,0 +1,521 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{self, Display};
|
||||
use std::sync::Arc;
|
||||
|
||||
use datafusion_common::arrow::array::{
|
||||
Array, ArrayRef, BinaryArray, BinaryViewArray, DictionaryArray, ListBuilder, StringBuilder,
|
||||
};
|
||||
use datafusion_common::arrow::datatypes::{DataType, Field};
|
||||
use datafusion_common::{DataFusionError, ScalarValue};
|
||||
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature, Volatility};
|
||||
use datatypes::arrow::datatypes::UInt32Type;
|
||||
use datatypes::value::Value;
|
||||
use mito_codec::row_converter::{
|
||||
CompositeValues, PrimaryKeyCodec, SortField, build_primary_key_codec_with_fields,
|
||||
};
|
||||
use store_api::codec::PrimaryKeyEncoding;
|
||||
use store_api::metadata::RegionMetadata;
|
||||
use store_api::storage::ColumnId;
|
||||
use store_api::storage::consts::{PRIMARY_KEY_COLUMN_NAME, ReservedColumnId};
|
||||
|
||||
use crate::function::{Function, extract_args};
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
type NameValuePair = (String, Option<String>);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct DecodePrimaryKeyFunction {
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
const NAME: &str = "decode_primary_key";
|
||||
const NULL_VALUE_LITERAL: &str = "null";
|
||||
|
||||
impl Default for DecodePrimaryKeyFunction {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
signature: Signature::any(3, Volatility::Immutable),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DecodePrimaryKeyFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register_scalar(Self::default());
|
||||
}
|
||||
|
||||
fn return_data_type() -> DataType {
|
||||
DataType::List(Arc::new(Field::new("item", DataType::Utf8, true)))
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for DecodePrimaryKeyFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
Ok(Self::return_data_type())
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn invoke_with_args(
|
||||
&self,
|
||||
args: ScalarFunctionArgs,
|
||||
) -> datafusion_common::Result<ColumnarValue> {
|
||||
let [encoded, _, _] = extract_args(self.name(), &args)?;
|
||||
let number_rows = args.number_rows;
|
||||
|
||||
let encoding = parse_encoding(&args.args[1])?;
|
||||
let metadata = parse_region_metadata(&args.args[2])?;
|
||||
let codec = build_codec(&metadata, encoding);
|
||||
let name_lookup: HashMap<_, _> = metadata
|
||||
.column_metadatas
|
||||
.iter()
|
||||
.map(|c| (c.column_id, c.column_schema.name.clone()))
|
||||
.collect();
|
||||
|
||||
let decoded_rows = decode_primary_keys(encoded, number_rows, codec.as_ref(), &name_lookup)?;
|
||||
let array = build_list_array(&decoded_rows)?;
|
||||
|
||||
Ok(ColumnarValue::Array(array))
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for DecodePrimaryKeyFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "DECODE_PRIMARY_KEY")
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_encoding(arg: &ColumnarValue) -> datafusion_common::Result<PrimaryKeyEncoding> {
|
||||
let encoding = match arg {
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some(v)))
|
||||
| ColumnarValue::Scalar(ScalarValue::LargeUtf8(Some(v))) => v.as_str(),
|
||||
ColumnarValue::Scalar(value) => {
|
||||
return Err(DataFusionError::Execution(format!(
|
||||
"encoding must be a string literal, got {value:?}"
|
||||
)));
|
||||
}
|
||||
ColumnarValue::Array(_) => {
|
||||
return Err(DataFusionError::Execution(
|
||||
"encoding must be a scalar string".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
match encoding.to_ascii_lowercase().as_str() {
|
||||
"dense" => Ok(PrimaryKeyEncoding::Dense),
|
||||
"sparse" => Ok(PrimaryKeyEncoding::Sparse),
|
||||
_ => Err(DataFusionError::Execution(format!(
|
||||
"unsupported primary key encoding: {encoding}"
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_codec(
|
||||
metadata: &RegionMetadata,
|
||||
encoding: PrimaryKeyEncoding,
|
||||
) -> Arc<dyn PrimaryKeyCodec> {
|
||||
let fields = metadata.primary_key_columns().map(|c| {
|
||||
(
|
||||
c.column_id,
|
||||
SortField::new(c.column_schema.data_type.clone()),
|
||||
)
|
||||
});
|
||||
build_primary_key_codec_with_fields(encoding, fields)
|
||||
}
|
||||
|
||||
fn parse_region_metadata(arg: &ColumnarValue) -> datafusion_common::Result<RegionMetadata> {
|
||||
let json = match arg {
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some(v)))
|
||||
| ColumnarValue::Scalar(ScalarValue::LargeUtf8(Some(v))) => v.as_str(),
|
||||
ColumnarValue::Scalar(value) => {
|
||||
return Err(DataFusionError::Execution(format!(
|
||||
"region metadata must be a string literal, got {value:?}"
|
||||
)));
|
||||
}
|
||||
ColumnarValue::Array(_) => {
|
||||
return Err(DataFusionError::Execution(
|
||||
"region metadata must be a scalar string".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
RegionMetadata::from_json(json)
|
||||
.map_err(|e| DataFusionError::Execution(format!("failed to parse region metadata: {e:?}")))
|
||||
}
|
||||
|
||||
fn decode_primary_keys(
|
||||
encoded: ArrayRef,
|
||||
number_rows: usize,
|
||||
codec: &dyn PrimaryKeyCodec,
|
||||
name_lookup: &HashMap<ColumnId, String>,
|
||||
) -> datafusion_common::Result<Vec<Vec<NameValuePair>>> {
|
||||
if let Some(dict) = encoded
|
||||
.as_any()
|
||||
.downcast_ref::<DictionaryArray<UInt32Type>>()
|
||||
{
|
||||
decode_dictionary(dict, number_rows, codec, name_lookup)
|
||||
} else if let Some(array) = encoded.as_any().downcast_ref::<BinaryArray>() {
|
||||
decode_binary_array(array, codec, name_lookup)
|
||||
} else if let Some(array) = encoded.as_any().downcast_ref::<BinaryViewArray>() {
|
||||
decode_binary_view_array(array, codec, name_lookup)
|
||||
} else {
|
||||
Err(DataFusionError::Execution(format!(
|
||||
"column {PRIMARY_KEY_COLUMN_NAME} must be binary or dictionary(binary) array"
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
fn decode_dictionary(
|
||||
dict: &DictionaryArray<UInt32Type>,
|
||||
number_rows: usize,
|
||||
codec: &dyn PrimaryKeyCodec,
|
||||
name_lookup: &HashMap<ColumnId, String>,
|
||||
) -> datafusion_common::Result<Vec<Vec<NameValuePair>>> {
|
||||
let values = dict
|
||||
.values()
|
||||
.as_any()
|
||||
.downcast_ref::<BinaryArray>()
|
||||
.ok_or_else(|| {
|
||||
DataFusionError::Execution("primary key dictionary values are not binary".to_string())
|
||||
})?;
|
||||
|
||||
let mut decoded_values = Vec::with_capacity(values.len());
|
||||
for i in 0..values.len() {
|
||||
let pk = values.value(i);
|
||||
let pairs = decode_one(pk, codec, name_lookup)?;
|
||||
decoded_values.push(pairs);
|
||||
}
|
||||
|
||||
let mut rows = Vec::with_capacity(number_rows);
|
||||
let keys = dict.keys();
|
||||
for i in 0..number_rows {
|
||||
let dict_index = keys.value(i) as usize;
|
||||
rows.push(decoded_values[dict_index].clone());
|
||||
}
|
||||
|
||||
Ok(rows)
|
||||
}
|
||||
|
||||
fn decode_binary_array(
|
||||
array: &BinaryArray,
|
||||
codec: &dyn PrimaryKeyCodec,
|
||||
name_lookup: &HashMap<ColumnId, String>,
|
||||
) -> datafusion_common::Result<Vec<Vec<NameValuePair>>> {
|
||||
(0..array.len())
|
||||
.map(|i| decode_one(array.value(i), codec, name_lookup))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn decode_binary_view_array(
|
||||
array: &BinaryViewArray,
|
||||
codec: &dyn PrimaryKeyCodec,
|
||||
name_lookup: &HashMap<ColumnId, String>,
|
||||
) -> datafusion_common::Result<Vec<Vec<NameValuePair>>> {
|
||||
(0..array.len())
|
||||
.map(|i| decode_one(array.value(i), codec, name_lookup))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn decode_one(
|
||||
pk: &[u8],
|
||||
codec: &dyn PrimaryKeyCodec,
|
||||
name_lookup: &HashMap<ColumnId, String>,
|
||||
) -> datafusion_common::Result<Vec<NameValuePair>> {
|
||||
let decoded = codec
|
||||
.decode(pk)
|
||||
.map_err(|e| DataFusionError::Execution(format!("failed to decode primary key: {e}")))?;
|
||||
|
||||
Ok(match decoded {
|
||||
CompositeValues::Dense(values) => values
|
||||
.into_iter()
|
||||
.map(|(column_id, value)| (column_name(column_id, name_lookup), value_to_string(value)))
|
||||
.collect(),
|
||||
CompositeValues::Sparse(values) => {
|
||||
let mut values: Vec<_> = values
|
||||
.iter()
|
||||
.map(|(column_id, value)| {
|
||||
(
|
||||
*column_id,
|
||||
column_name(*column_id, name_lookup),
|
||||
value_to_string(value.clone()),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
values.sort_by_key(|(column_id, _, _)| {
|
||||
(ReservedColumnId::is_reserved(*column_id), *column_id)
|
||||
});
|
||||
values
|
||||
.into_iter()
|
||||
.map(|(_, name, value)| (name, value))
|
||||
.collect()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn column_name(column_id: ColumnId, name_lookup: &HashMap<ColumnId, String>) -> String {
|
||||
if let Some(name) = name_lookup.get(&column_id) {
|
||||
return name.clone();
|
||||
}
|
||||
|
||||
if column_id == ReservedColumnId::table_id() {
|
||||
return "__table_id".to_string();
|
||||
}
|
||||
if column_id == ReservedColumnId::tsid() {
|
||||
return "__tsid".to_string();
|
||||
}
|
||||
|
||||
column_id.to_string()
|
||||
}
|
||||
|
||||
fn value_to_string(value: Value) -> Option<String> {
|
||||
match value {
|
||||
Value::Null => None,
|
||||
_ => Some(value.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_list_array(rows: &[Vec<NameValuePair>]) -> datafusion_common::Result<ArrayRef> {
|
||||
let mut builder = ListBuilder::new(StringBuilder::new());
|
||||
|
||||
for row in rows {
|
||||
for (key, value) in row {
|
||||
let value = value.as_deref().unwrap_or(NULL_VALUE_LITERAL);
|
||||
builder.values().append_value(format!("{key} : {value}"));
|
||||
}
|
||||
builder.append(true);
|
||||
}
|
||||
|
||||
Ok(Arc::new(builder.finish()))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use api::v1::SemanticType;
|
||||
use datafusion_common::ScalarValue;
|
||||
use datatypes::arrow::array::builder::BinaryDictionaryBuilder;
|
||||
use datatypes::arrow::array::{BinaryArray, ListArray, StringArray};
|
||||
use datatypes::arrow::datatypes::UInt32Type;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use datatypes::value::Value;
|
||||
use mito_codec::row_converter::{
|
||||
DensePrimaryKeyCodec, PrimaryKeyCodecExt, SortField, SparsePrimaryKeyCodec,
|
||||
};
|
||||
use store_api::codec::PrimaryKeyEncoding;
|
||||
use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder};
|
||||
use store_api::storage::consts::ReservedColumnId;
|
||||
use store_api::storage::{ColumnId, RegionId};
|
||||
|
||||
use super::*;
|
||||
|
||||
fn pk_field() -> Arc<Field> {
|
||||
Arc::new(Field::new_dictionary(
|
||||
PRIMARY_KEY_COLUMN_NAME,
|
||||
DataType::UInt32,
|
||||
DataType::Binary,
|
||||
false,
|
||||
))
|
||||
}
|
||||
|
||||
fn region_metadata_json(
|
||||
columns: &[(ColumnId, &str, ConcreteDataType)],
|
||||
encoding: PrimaryKeyEncoding,
|
||||
) -> String {
|
||||
let mut builder = RegionMetadataBuilder::new(RegionId::new(1, 1));
|
||||
builder.push_column_metadata(ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Timestamp,
|
||||
column_id: 100,
|
||||
});
|
||||
builder.primary_key_encoding(encoding);
|
||||
for (id, name, ty) in columns {
|
||||
builder.push_column_metadata(ColumnMetadata {
|
||||
column_schema: ColumnSchema::new((*name).to_string(), ty.clone(), true),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: *id,
|
||||
});
|
||||
}
|
||||
builder.primary_key(columns.iter().map(|(id, _, _)| *id).collect());
|
||||
|
||||
builder.build().unwrap().to_json().unwrap()
|
||||
}
|
||||
|
||||
fn list_row(list: &ListArray, row_idx: usize) -> Vec<String> {
|
||||
let values = list.value(row_idx);
|
||||
let values = values.as_any().downcast_ref::<StringArray>().unwrap();
|
||||
(0..values.len())
|
||||
.map(|i| values.value(i).to_string())
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decode_dense_primary_key() {
|
||||
let columns = vec![
|
||||
(0, "host", ConcreteDataType::string_datatype()),
|
||||
(1, "core", ConcreteDataType::int64_datatype()),
|
||||
];
|
||||
let metadata_json = region_metadata_json(&columns, PrimaryKeyEncoding::Dense);
|
||||
let codec = DensePrimaryKeyCodec::with_fields(
|
||||
columns
|
||||
.iter()
|
||||
.map(|(id, _, ty)| (*id, SortField::new(ty.clone())))
|
||||
.collect(),
|
||||
);
|
||||
|
||||
let rows = vec![
|
||||
vec![Value::from("a"), Value::from(1_i64)],
|
||||
vec![Value::from("b"), Value::from(2_i64)],
|
||||
vec![Value::from("a"), Value::from(1_i64)],
|
||||
];
|
||||
|
||||
let mut builder = BinaryDictionaryBuilder::<UInt32Type>::new();
|
||||
for row in &rows {
|
||||
let encoded = codec.encode(row.iter().map(|v| v.as_value_ref())).unwrap();
|
||||
builder.append(encoded.as_slice()).unwrap();
|
||||
}
|
||||
let dict_array: ArrayRef = Arc::new(builder.finish());
|
||||
|
||||
let args = ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Array(dict_array),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("dense".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some(metadata_json))),
|
||||
],
|
||||
arg_fields: vec![
|
||||
pk_field(),
|
||||
Arc::new(Field::new("encoding", DataType::Utf8, false)),
|
||||
Arc::new(Field::new("region_metadata", DataType::Utf8, false)),
|
||||
],
|
||||
number_rows: 3,
|
||||
return_field: Arc::new(Field::new(
|
||||
"decoded",
|
||||
DecodePrimaryKeyFunction::return_data_type(),
|
||||
false,
|
||||
)),
|
||||
config_options: Default::default(),
|
||||
};
|
||||
|
||||
let func = DecodePrimaryKeyFunction::default();
|
||||
let result = func
|
||||
.invoke_with_args(args)
|
||||
.and_then(|v| v.to_array(3))
|
||||
.unwrap();
|
||||
let list = result.as_any().downcast_ref::<ListArray>().unwrap();
|
||||
|
||||
let expected = [
|
||||
vec!["host : a".to_string(), "core : 1".to_string()],
|
||||
vec!["host : b".to_string(), "core : 2".to_string()],
|
||||
vec!["host : a".to_string(), "core : 1".to_string()],
|
||||
];
|
||||
|
||||
for (row_idx, expected_row) in expected.iter().enumerate() {
|
||||
assert_eq!(*expected_row, list_row(list, row_idx));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decode_sparse_primary_key() {
|
||||
let columns = vec![
|
||||
(10, "k0", ConcreteDataType::string_datatype()),
|
||||
(11, "k1", ConcreteDataType::string_datatype()),
|
||||
];
|
||||
let metadata_json = region_metadata_json(&columns, PrimaryKeyEncoding::Sparse);
|
||||
let codec = SparsePrimaryKeyCodec::schemaless();
|
||||
|
||||
let rows = vec![
|
||||
vec![
|
||||
(ReservedColumnId::table_id(), Value::UInt32(1)),
|
||||
(ReservedColumnId::tsid(), Value::UInt64(100)),
|
||||
(10, Value::from("a")),
|
||||
(11, Value::from("b")),
|
||||
],
|
||||
vec![
|
||||
(ReservedColumnId::table_id(), Value::UInt32(1)),
|
||||
(ReservedColumnId::tsid(), Value::UInt64(200)),
|
||||
(10, Value::from("c")),
|
||||
(11, Value::from("d")),
|
||||
],
|
||||
];
|
||||
|
||||
let mut encoded_values = Vec::with_capacity(rows.len());
|
||||
for row in &rows {
|
||||
let mut buf = Vec::new();
|
||||
codec.encode_values(row, &mut buf).unwrap();
|
||||
encoded_values.push(buf);
|
||||
}
|
||||
|
||||
let pk_array: ArrayRef = Arc::new(BinaryArray::from_iter_values(
|
||||
encoded_values.iter().cloned(),
|
||||
));
|
||||
|
||||
let args = ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Array(pk_array),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("sparse".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some(metadata_json))),
|
||||
],
|
||||
arg_fields: vec![
|
||||
pk_field(),
|
||||
Arc::new(Field::new("encoding", DataType::Utf8, false)),
|
||||
Arc::new(Field::new("region_metadata", DataType::Utf8, false)),
|
||||
],
|
||||
number_rows: rows.len(),
|
||||
return_field: Arc::new(Field::new(
|
||||
"decoded",
|
||||
DecodePrimaryKeyFunction::return_data_type(),
|
||||
false,
|
||||
)),
|
||||
config_options: Default::default(),
|
||||
};
|
||||
|
||||
let func = DecodePrimaryKeyFunction::default();
|
||||
let result = func
|
||||
.invoke_with_args(args)
|
||||
.and_then(|v| v.to_array(rows.len()))
|
||||
.unwrap();
|
||||
let list = result.as_any().downcast_ref::<ListArray>().unwrap();
|
||||
|
||||
let expected = [
|
||||
vec![
|
||||
"k0 : a".to_string(),
|
||||
"k1 : b".to_string(),
|
||||
"__tsid : 100".to_string(),
|
||||
"__table_id : 1".to_string(),
|
||||
],
|
||||
vec![
|
||||
"k0 : c".to_string(),
|
||||
"k1 : d".to_string(),
|
||||
"__tsid : 200".to_string(),
|
||||
"__table_id : 1".to_string(),
|
||||
],
|
||||
];
|
||||
|
||||
for (row_idx, expected_row) in expected.iter().enumerate() {
|
||||
assert_eq!(*expected_row, list_row(list, row_idx));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -44,7 +44,8 @@ impl FunctionState {
|
||||
use session::context::QueryContextRef;
|
||||
use store_api::storage::RegionId;
|
||||
use table::requests::{
|
||||
CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest,
|
||||
BuildIndexTableRequest, CompactTableRequest, DeleteRequest, FlushTableRequest,
|
||||
InsertRequest,
|
||||
};
|
||||
|
||||
use crate::handlers::{FlowServiceHandler, ProcedureServiceHandler, TableMutationHandler};
|
||||
@@ -120,6 +121,14 @@ impl FunctionState {
|
||||
Ok(ROWS)
|
||||
}
|
||||
|
||||
async fn build_index(
|
||||
&self,
|
||||
_request: BuildIndexTableRequest,
|
||||
_ctx: QueryContextRef,
|
||||
) -> Result<AffectedRows> {
|
||||
Ok(ROWS)
|
||||
}
|
||||
|
||||
async fn flush_region(
|
||||
&self,
|
||||
_region_id: RegionId,
|
||||
|
||||
@@ -12,14 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod version;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_PRIVATE_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME,
|
||||
};
|
||||
use datafusion::arrow::array::{ArrayRef, StringArray, as_boolean_array};
|
||||
use datafusion::arrow::array::{ArrayRef, StringArray, StringBuilder, as_boolean_array};
|
||||
use datafusion::catalog::TableFunction;
|
||||
use datafusion::common::ScalarValue;
|
||||
use datafusion::common::utils::SingleRowListArrayBuilder;
|
||||
@@ -27,7 +25,6 @@ use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature, TypeSignatur
|
||||
use datafusion_pg_catalog::pg_catalog::{self, PgCatalogStaticTables};
|
||||
use datatypes::arrow::datatypes::{DataType, Field};
|
||||
use derive_more::derive::Display;
|
||||
use version::PGVersionFunction;
|
||||
|
||||
use crate::function::{Function, find_function_context};
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
@@ -37,10 +34,15 @@ const CURRENT_SCHEMA_FUNCTION_NAME: &str = "current_schema";
|
||||
const CURRENT_SCHEMAS_FUNCTION_NAME: &str = "current_schemas";
|
||||
const SESSION_USER_FUNCTION_NAME: &str = "session_user";
|
||||
const CURRENT_DATABASE_FUNCTION_NAME: &str = "current_database";
|
||||
const OBJ_DESCRIPTION_FUNCTION_NAME: &str = "obj_description";
|
||||
const COL_DESCRIPTION_FUNCTION_NAME: &str = "col_description";
|
||||
const SHOBJ_DESCRIPTION_FUNCTION_NAME: &str = "shobj_description";
|
||||
const PG_MY_TEMP_SCHEMA_FUNCTION_NAME: &str = "pg_my_temp_schema";
|
||||
|
||||
define_nullary_udf!(CurrentSchemaFunction);
|
||||
define_nullary_udf!(SessionUserFunction);
|
||||
define_nullary_udf!(CurrentDatabaseFunction);
|
||||
define_nullary_udf!(PgMyTempSchemaFunction);
|
||||
|
||||
impl Function for CurrentDatabaseFunction {
|
||||
fn name(&self) -> &str {
|
||||
@@ -176,6 +178,175 @@ impl Function for CurrentSchemasFunction {
|
||||
}
|
||||
}
|
||||
|
||||
/// PostgreSQL obj_description - returns NULL for compatibility
|
||||
#[derive(Display, Debug, Clone)]
|
||||
#[display("{}", self.name())]
|
||||
pub(super) struct ObjDescriptionFunction {
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl ObjDescriptionFunction {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
signature: Signature::one_of(
|
||||
vec![
|
||||
TypeSignature::Exact(vec![DataType::Int64, DataType::Utf8]),
|
||||
TypeSignature::Exact(vec![DataType::UInt32, DataType::Utf8]),
|
||||
TypeSignature::Exact(vec![DataType::Int64]),
|
||||
TypeSignature::Exact(vec![DataType::UInt32]),
|
||||
],
|
||||
Volatility::Stable,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for ObjDescriptionFunction {
|
||||
fn name(&self) -> &str {
|
||||
OBJ_DESCRIPTION_FUNCTION_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
Ok(DataType::Utf8)
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn invoke_with_args(
|
||||
&self,
|
||||
args: ScalarFunctionArgs,
|
||||
) -> datafusion_common::Result<ColumnarValue> {
|
||||
let num_rows = args.number_rows;
|
||||
let mut builder = StringBuilder::with_capacity(num_rows, 0);
|
||||
for _ in 0..num_rows {
|
||||
builder.append_null();
|
||||
}
|
||||
Ok(ColumnarValue::Array(Arc::new(builder.finish())))
|
||||
}
|
||||
}
|
||||
|
||||
/// PostgreSQL col_description - returns NULL for compatibility
|
||||
#[derive(Display, Debug, Clone)]
|
||||
#[display("{}", self.name())]
|
||||
pub(super) struct ColDescriptionFunction {
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl ColDescriptionFunction {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
signature: Signature::one_of(
|
||||
vec![
|
||||
TypeSignature::Exact(vec![DataType::Int64, DataType::Int32]),
|
||||
TypeSignature::Exact(vec![DataType::UInt32, DataType::Int32]),
|
||||
TypeSignature::Exact(vec![DataType::Int64, DataType::Int64]),
|
||||
TypeSignature::Exact(vec![DataType::UInt32, DataType::Int64]),
|
||||
],
|
||||
Volatility::Stable,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for ColDescriptionFunction {
|
||||
fn name(&self) -> &str {
|
||||
COL_DESCRIPTION_FUNCTION_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
Ok(DataType::Utf8)
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn invoke_with_args(
|
||||
&self,
|
||||
args: ScalarFunctionArgs,
|
||||
) -> datafusion_common::Result<ColumnarValue> {
|
||||
let num_rows = args.number_rows;
|
||||
let mut builder = StringBuilder::with_capacity(num_rows, 0);
|
||||
for _ in 0..num_rows {
|
||||
builder.append_null();
|
||||
}
|
||||
Ok(ColumnarValue::Array(Arc::new(builder.finish())))
|
||||
}
|
||||
}
|
||||
|
||||
/// PostgreSQL shobj_description - returns NULL for compatibility
|
||||
#[derive(Display, Debug, Clone)]
|
||||
#[display("{}", self.name())]
|
||||
pub(super) struct ShobjDescriptionFunction {
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl ShobjDescriptionFunction {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
signature: Signature::one_of(
|
||||
vec![
|
||||
TypeSignature::Exact(vec![DataType::Int64, DataType::Utf8]),
|
||||
TypeSignature::Exact(vec![DataType::UInt64, DataType::Utf8]),
|
||||
TypeSignature::Exact(vec![DataType::Int32, DataType::Utf8]),
|
||||
TypeSignature::Exact(vec![DataType::UInt32, DataType::Utf8]),
|
||||
],
|
||||
Volatility::Stable,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for ShobjDescriptionFunction {
|
||||
fn name(&self) -> &str {
|
||||
SHOBJ_DESCRIPTION_FUNCTION_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
Ok(DataType::Utf8)
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn invoke_with_args(
|
||||
&self,
|
||||
args: ScalarFunctionArgs,
|
||||
) -> datafusion_common::Result<ColumnarValue> {
|
||||
let num_rows = args.number_rows;
|
||||
let mut builder = StringBuilder::with_capacity(num_rows, 0);
|
||||
for _ in 0..num_rows {
|
||||
builder.append_null();
|
||||
}
|
||||
Ok(ColumnarValue::Array(Arc::new(builder.finish())))
|
||||
}
|
||||
}
|
||||
|
||||
/// PostgreSQL pg_my_temp_schema - returns 0 (no temp schema) for compatibility
|
||||
impl Function for PgMyTempSchemaFunction {
|
||||
fn name(&self) -> &str {
|
||||
PG_MY_TEMP_SCHEMA_FUNCTION_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
Ok(DataType::UInt32)
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn invoke_with_args(
|
||||
&self,
|
||||
_args: ScalarFunctionArgs,
|
||||
) -> datafusion_common::Result<ColumnarValue> {
|
||||
Ok(ColumnarValue::Scalar(ScalarValue::UInt32(Some(0))))
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) struct PGCatalogFunction;
|
||||
|
||||
impl PGCatalogFunction {
|
||||
@@ -183,7 +354,6 @@ impl PGCatalogFunction {
|
||||
let static_tables =
|
||||
Arc::new(PgCatalogStaticTables::try_new().expect("load postgres static tables"));
|
||||
|
||||
registry.register_scalar(PGVersionFunction::default());
|
||||
registry.register_scalar(CurrentSchemaFunction::default());
|
||||
registry.register_scalar(CurrentSchemasFunction::new());
|
||||
registry.register_scalar(SessionUserFunction::default());
|
||||
@@ -216,5 +386,100 @@ impl PGCatalogFunction {
|
||||
registry.register(pg_catalog::create_pg_total_relation_size_udf());
|
||||
registry.register(pg_catalog::create_pg_stat_get_numscans());
|
||||
registry.register(pg_catalog::create_pg_get_constraintdef());
|
||||
registry.register(pg_catalog::create_pg_get_partition_ancestors_udf());
|
||||
registry.register(pg_catalog::quote_ident_udf::create_quote_ident_udf());
|
||||
registry.register(pg_catalog::quote_ident_udf::create_parse_ident_udf());
|
||||
registry.register_scalar(ObjDescriptionFunction::new());
|
||||
registry.register_scalar(ColDescriptionFunction::new());
|
||||
registry.register_scalar(ShobjDescriptionFunction::new());
|
||||
registry.register_scalar(PgMyTempSchemaFunction::default());
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::Field;
|
||||
use datafusion::arrow::array::Array;
|
||||
use datafusion_common::ScalarValue;
|
||||
use datafusion_expr::ColumnarValue;
|
||||
|
||||
use super::*;
|
||||
|
||||
fn create_test_args(args: Vec<ColumnarValue>, number_rows: usize) -> ScalarFunctionArgs {
|
||||
ScalarFunctionArgs {
|
||||
args,
|
||||
arg_fields: vec![],
|
||||
number_rows,
|
||||
return_field: Arc::new(Field::new("result", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_obj_description_function() {
|
||||
let func = ObjDescriptionFunction::new();
|
||||
assert_eq!("obj_description", func.name());
|
||||
assert_eq!(DataType::Utf8, func.return_type(&[]).unwrap());
|
||||
|
||||
let args = create_test_args(
|
||||
vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Int64(Some(1234))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("pg_class".to_string()))),
|
||||
],
|
||||
1,
|
||||
);
|
||||
let result = func.invoke_with_args(args).unwrap();
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
assert_eq!(1, arr.len());
|
||||
assert!(arr.is_null(0));
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_col_description_function() {
|
||||
let func = ColDescriptionFunction::new();
|
||||
assert_eq!("col_description", func.name());
|
||||
assert_eq!(DataType::Utf8, func.return_type(&[]).unwrap());
|
||||
|
||||
let args = create_test_args(
|
||||
vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Int64(Some(1234))),
|
||||
ColumnarValue::Scalar(ScalarValue::Int64(Some(1))),
|
||||
],
|
||||
1,
|
||||
);
|
||||
let result = func.invoke_with_args(args).unwrap();
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
assert_eq!(1, arr.len());
|
||||
assert!(arr.is_null(0));
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shobj_description_function() {
|
||||
let func = ShobjDescriptionFunction::new();
|
||||
assert_eq!("shobj_description", func.name());
|
||||
assert_eq!(DataType::Utf8, func.return_type(&[]).unwrap());
|
||||
|
||||
let args = create_test_args(
|
||||
vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Int64(Some(1))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("pg_database".to_string()))),
|
||||
],
|
||||
1,
|
||||
);
|
||||
let result = func.invoke_with_args(args).unwrap();
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
assert_eq!(1, arr.len());
|
||||
assert!(arr.is_null(0));
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use datafusion::arrow::datatypes::DataType;
|
||||
use datafusion_common::ScalarValue;
|
||||
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature, Volatility};
|
||||
|
||||
use crate::function::Function;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct PGVersionFunction {
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl Default for PGVersionFunction {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
signature: Signature::exact(vec![], Volatility::Immutable),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for PGVersionFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "pg_catalog.VERSION")
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for PGVersionFunction {
|
||||
fn name(&self) -> &str {
|
||||
"pg_catalog.version"
|
||||
}
|
||||
|
||||
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
Ok(DataType::Utf8View)
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn invoke_with_args(&self, _: ScalarFunctionArgs) -> datafusion_common::Result<ColumnarValue> {
|
||||
Ok(ColumnarValue::Scalar(ScalarValue::Utf8View(Some(format!(
|
||||
"PostgreSQL 16.3 GreptimeDB {}",
|
||||
common_version::version()
|
||||
)))))
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user