Compare commits

..

1 Commits

Author SHA1 Message Date
discord9
13582c9efb bytes trace
Signed-off-by: discord9 <discord9@163.com>
2025-11-04 11:19:07 +08:00
635 changed files with 9558 additions and 40738 deletions

22
.github/CODEOWNERS vendored
View File

@@ -5,23 +5,23 @@
* @GreptimeTeam/db-approver
## [Module] Database Engine
/src/index @evenyag @discord9 @WenyXu
/src/index @zhongzc
/src/mito2 @evenyag @v0y4g3r @waynexia
/src/query @evenyag @waynexia @discord9
/src/query @evenyag
## [Module] Distributed
/src/common/meta @MichaelScofield @WenyXu
/src/common/procedure @MichaelScofield @WenyXu
/src/meta-client @MichaelScofield @WenyXu
/src/meta-srv @MichaelScofield @WenyXu
/src/common/meta @MichaelScofield
/src/common/procedure @MichaelScofield
/src/meta-client @MichaelScofield
/src/meta-srv @MichaelScofield
## [Module] Write Ahead Log
/src/log-store @v0y4g3r @WenyXu
/src/store-api @v0y4g3r @evenyag
/src/log-store @v0y4g3r
/src/store-api @v0y4g3r
## [Module] Metrics Engine
/src/metric-engine @waynexia @WenyXu
/src/promql @waynexia @evenyag @discord9
/src/metric-engine @waynexia
/src/promql @waynexia
## [Module] Flow
/src/flow @discord9 @waynexia
/src/flow @zhongzc @waynexia

View File

@@ -32,23 +32,9 @@ inputs:
description: Image Registry
required: false
default: 'docker.io'
large-page-size:
description: Build GreptimeDB with large page size (65536).
required: false
default: 'false'
runs:
using: composite
steps:
- name: Set extra build environment variables
shell: bash
run: |
if [[ '${{ inputs.large-page-size }}' == 'true' ]]; then
echo 'EXTRA_BUILD_ENVS="JEMALLOC_SYS_WITH_LG_PAGE=16"' >> $GITHUB_ENV
else
echo 'EXTRA_BUILD_ENVS=' >> $GITHUB_ENV
fi
- name: Build greptime binary
shell: bash
if: ${{ inputs.build-android-artifacts == 'false' }}
@@ -59,8 +45,7 @@ runs:
FEATURES=${{ inputs.features }} \
BASE_IMAGE=${{ inputs.base-image }} \
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }} \
EXTRA_BUILD_ENVS=$EXTRA_BUILD_ENVS
IMAGE_REGISTRY=${{ inputs.image-registry }}
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts

View File

@@ -27,10 +27,6 @@ inputs:
description: Working directory to build the artifacts
required: false
default: .
large-page-size:
description: Build GreptimeDB with large page size (65536).
required: false
default: 'false'
runs:
using: composite
steps:
@@ -63,7 +59,6 @@ runs:
working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
large-page-size: ${{ inputs.large-page-size }}
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
shell: bash
@@ -82,7 +77,6 @@ runs:
working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
large-page-size: ${{ inputs.large-page-size }}
- name: Build greptime on android base image
uses: ./.github/actions/build-greptime-binary
@@ -95,4 +89,3 @@ runs:
build-android-artifacts: true
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
large-page-size: ${{ inputs.large-page-size }}

View File

@@ -39,11 +39,8 @@ update_helm_charts_version() {
--body "This PR updates the GreptimeDB version." \
--base main \
--head $BRANCH_NAME \
--reviewer sunng87 \
--reviewer daviderli614 \
--reviewer killme2008 \
--reviewer evenyag \
--reviewer fengjiachun
--reviewer zyy17 \
--reviewer daviderli614
}
update_helm_charts_version

View File

@@ -35,11 +35,8 @@ update_homebrew_greptime_version() {
--body "This PR updates the GreptimeDB version." \
--base main \
--head $BRANCH_NAME \
--reviewer sunng87 \
--reviewer daviderli614 \
--reviewer killme2008 \
--reviewer evenyag \
--reviewer fengjiachun
--reviewer zyy17 \
--reviewer daviderli614
}
update_homebrew_greptime_version

View File

@@ -4,11 +4,10 @@ name: GreptimeDB Development Build
on:
workflow_dispatch: # Allows you to run this workflow manually.
inputs:
large-page-size:
description: Build GreptimeDB with large page size (65536).
type: boolean
repository:
description: The public repository to build
required: false
default: false
default: GreptimeTeam/greptimedb
commit: # Note: We only pull the source code and use the current workflow to build the artifacts.
description: The commit to build
required: true
@@ -182,7 +181,6 @@ jobs:
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
large-page-size: ${{ inputs.large-page-size }}
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
@@ -216,7 +214,6 @@ jobs:
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
large-page-size: ${{ inputs.large-page-size }}
release-images-to-dockerhub:
name: Build and push images to DockerHub

View File

@@ -613,9 +613,6 @@ jobs:
- name: "MySQL Kvbackend"
opts: "--setup-mysql"
kafka: false
- name: "Flat format"
opts: "--enable-flat-format"
kafka: false
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
@@ -811,7 +808,7 @@ jobs:
- name: Setup external services
working-directory: tests-integration/fixtures
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait
- name: Run nextest cases
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend -F mysql_kvbackend
env:

View File

@@ -92,6 +92,5 @@ jobs:
mode:
- name: "Basic"
- name: "Remote WAL"
- name: "Flat format"
steps:
- run: 'echo "No action required"'

View File

@@ -1,57 +0,0 @@
name: Multi-language Integration Tests
on:
push:
branches:
- main
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build-greptimedb:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Build GreptimeDB binary
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- uses: Swatinem/rust-cache@v2
with:
shared-key: "multi-lang-build"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin --force
- name: Build greptime binary
shell: bash
run: cargo gc -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
- name: Pack greptime binary
shell: bash
run: |
mkdir bin && \
mv ./target/debug/greptime bin
- name: Print greptime binary info
run: ls -lh bin
- name: Upload greptime binary
uses: actions/upload-artifact@v4
with:
name: greptime-bin
path: bin/
retention-days: 1
run-multi-lang-tests:
name: Run Multi-language SDK Tests
needs: build-greptimedb
uses: ./.github/workflows/run-multi-lang-tests.yml
with:
artifact-name: greptime-bin

View File

@@ -174,18 +174,6 @@ jobs:
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
run-multi-lang-tests:
name: Run Multi-language SDK Tests
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
]
uses: ./.github/workflows/run-multi-lang-tests.yml
with:
artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
artifact-is-tarball: true
release-images-to-dockerhub:
name: Build and push images to DockerHub
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
@@ -313,8 +301,7 @@ jobs:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team
needs: [
release-images-to-dockerhub,
run-multi-lang-tests,
release-images-to-dockerhub
]
runs-on: ubuntu-latest
permissions:
@@ -332,17 +319,17 @@ jobs:
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
- name: Notify nightly build successful result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }}
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
with:
payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
- name: Notify nightly build failed result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' || needs.run-multi-lang-tests.result == 'failure' }}
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
with:
payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}

View File

@@ -215,18 +215,6 @@ jobs:
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
run-multi-lang-tests:
name: Run Multi-language SDK Tests
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
]
uses: ./.github/workflows/run-multi-lang-tests.yml
with:
artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
artifact-is-tarball: true
build-macos-artifacts:
name: Build macOS artifacts
strategy:
@@ -315,7 +303,6 @@ jobs:
allocate-runners,
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
run-multi-lang-tests,
]
runs-on: ubuntu-latest
outputs:
@@ -394,7 +381,6 @@ jobs:
build-macos-artifacts,
build-windows-artifacts,
release-images-to-dockerhub,
run-multi-lang-tests,
]
runs-on: ubuntu-latest
steps:

View File

@@ -1,194 +0,0 @@
# Reusable workflow for running multi-language SDK tests against GreptimeDB
# Used by: multi-lang-tests.yml, release.yml, nightly-build.yml
# Supports both direct binary artifacts and tarball artifacts
name: Run Multi-language SDK Tests
on:
workflow_call:
inputs:
artifact-name:
required: true
type: string
description: 'Name of the artifact containing greptime binary'
http-port:
required: false
type: string
default: '4000'
description: 'HTTP server port'
mysql-port:
required: false
type: string
default: '4002'
description: 'MySQL server port'
postgres-port:
required: false
type: string
default: '4003'
description: 'PostgreSQL server port'
db-name:
required: false
type: string
default: 'test_db'
description: 'Test database name'
username:
required: false
type: string
default: 'greptime_user'
description: 'Authentication username'
password:
required: false
type: string
default: 'greptime_pwd'
description: 'Authentication password'
timeout-minutes:
required: false
type: number
default: 30
description: 'Job timeout in minutes'
artifact-is-tarball:
required: false
type: boolean
default: false
description: 'Whether the artifact is a tarball (tar.gz) that needs to be extracted'
jobs:
run-tests:
name: Run Multi-language SDK Tests
runs-on: ubuntu-latest
timeout-minutes: ${{ inputs.timeout-minutes }}
steps:
- name: Checkout greptimedb-tests repository
uses: actions/checkout@v4
with:
repository: GreptimeTeam/greptimedb-tests
persist-credentials: false
- name: Download pre-built greptime binary
uses: actions/download-artifact@v4
with:
name: ${{ inputs.artifact-name }}
path: artifact
- name: Setup greptime binary
run: |
mkdir -p bin
if [ "${{ inputs.artifact-is-tarball }}" = "true" ]; then
# Extract tarball and find greptime binary
tar -xzf artifact/*.tar.gz -C artifact
find artifact -name "greptime" -type f -exec cp {} bin/greptime \;
else
# Direct binary format
if [ -f artifact/greptime ]; then
cp artifact/greptime bin/greptime
else
cp artifact/* bin/greptime
fi
fi
chmod +x ./bin/greptime
ls -lh ./bin/greptime
./bin/greptime --version
- name: Setup Java 17
uses: actions/setup-java@v4
with:
distribution: 'temurin'
java-version: '17'
cache: 'maven'
- name: Setup Python 3.8
uses: actions/setup-python@v5
with:
python-version: '3.8'
- name: Setup Go 1.24
uses: actions/setup-go@v5
with:
go-version: '1.24'
cache: true
cache-dependency-path: go-tests/go.sum
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '18'
- name: Install Python dependencies
run: |
pip install mysql-connector-python psycopg2-binary
python3 -c "import mysql.connector; print(f'mysql-connector-python {mysql.connector.__version__}')"
python3 -c "import psycopg2; print(f'psycopg2 {psycopg2.__version__}')"
- name: Install Go dependencies
working-directory: go-tests
run: |
go mod download
go mod verify
go version
- name: Kill existing GreptimeDB processes
run: |
pkill -f greptime || true
sleep 2
- name: Start GreptimeDB standalone
run: |
./bin/greptime standalone start \
--http-addr 0.0.0.0:${{ inputs.http-port }} \
--rpc-addr 0.0.0.0:4001 \
--mysql-addr 0.0.0.0:${{ inputs.mysql-port }} \
--postgres-addr 0.0.0.0:${{ inputs.postgres-port }} \
--user-provider=static_user_provider:cmd:${{ inputs.username }}=${{ inputs.password }} > /tmp/greptimedb.log 2>&1 &
- name: Wait for GreptimeDB to be ready
run: |
echo "Waiting for GreptimeDB..."
for i in {1..60}; do
if curl -sf http://localhost:${{ inputs.http-port }}/health > /dev/null; then
echo "✅ GreptimeDB is ready"
exit 0
fi
sleep 2
done
echo "❌ GreptimeDB failed to start"
cat /tmp/greptimedb.log
exit 1
- name: Run multi-language tests
env:
DB_NAME: ${{ inputs.db-name }}
MYSQL_HOST: 127.0.0.1
MYSQL_PORT: ${{ inputs.mysql-port }}
POSTGRES_HOST: 127.0.0.1
POSTGRES_PORT: ${{ inputs.postgres-port }}
HTTP_HOST: 127.0.0.1
HTTP_PORT: ${{ inputs.http-port }}
GREPTIME_USERNAME: ${{ inputs.username }}
GREPTIME_PASSWORD: ${{ inputs.password }}
run: |
chmod +x ./run_tests.sh
./run_tests.sh
- name: Collect logs on failure
if: failure()
run: |
echo "=== GreptimeDB Logs ==="
cat /tmp/greptimedb.log || true
- name: Upload test logs on failure
if: failure()
uses: actions/upload-artifact@v4
with:
name: test-logs
path: |
/tmp/greptimedb.log
java-tests/target/surefire-reports/
python-tests/.pytest_cache/
go-tests/*.log
**/test-output/
retention-days: 7
- name: Cleanup
if: always()
run: |
pkill -f greptime || true

View File

@@ -2,41 +2,41 @@
## Individual Committers (in alphabetical order)
- [apdong2022](https://github.com/apdong2022)
- [beryl678](https://github.com/beryl678)
- [CookiePieWw](https://github.com/CookiePieWw)
- [etolbakov](https://github.com/etolbakov)
- [irenjj](https://github.com/irenjj)
- [KKould](https://github.com/KKould)
- [Lanqing Yang](https://github.com/lyang24)
- [nicecui](https://github.com/nicecui)
- [NiwakaDev](https://github.com/NiwakaDev)
- [paomian](https://github.com/paomian)
- [tisonkun](https://github.com/tisonkun)
- [Wenjie0329](https://github.com/Wenjie0329)
- [zhaoyingnan01](https://github.com/zhaoyingnan01)
- [zhongzc](https://github.com/zhongzc)
- [ZonaHex](https://github.com/ZonaHex)
- [zyy17](https://github.com/zyy17)
* [CookiePieWw](https://github.com/CookiePieWw)
* [etolbakov](https://github.com/etolbakov)
* [irenjj](https://github.com/irenjj)
* [KKould](https://github.com/KKould)
* [Lanqing Yang](https://github.com/lyang24)
* [NiwakaDev](https://github.com/NiwakaDev)
* [tisonkun](https://github.com/tisonkun)
## Team Members (in alphabetical order)
- [daviderli614](https://github.com/daviderli614)
- [discord9](https://github.com/discord9)
- [evenyag](https://github.com/evenyag)
- [fengjiachun](https://github.com/fengjiachun)
- [fengys1996](https://github.com/fengys1996)
- [GrepTime](https://github.com/GrepTime)
- [holalengyu](https://github.com/holalengyu)
- [killme2008](https://github.com/killme2008)
- [MichaelScofield](https://github.com/MichaelScofield)
- [shuiyisong](https://github.com/shuiyisong)
- [sunchanglong](https://github.com/sunchanglong)
- [sunng87](https://github.com/sunng87)
- [v0y4g3r](https://github.com/v0y4g3r)
- [waynexia](https://github.com/waynexia)
- [WenyXu](https://github.com/WenyXu)
- [xtang](https://github.com/xtang)
* [apdong2022](https://github.com/apdong2022)
* [beryl678](https://github.com/beryl678)
* [daviderli614](https://github.com/daviderli614)
* [discord9](https://github.com/discord9)
* [evenyag](https://github.com/evenyag)
* [fengjiachun](https://github.com/fengjiachun)
* [fengys1996](https://github.com/fengys1996)
* [GrepTime](https://github.com/GrepTime)
* [holalengyu](https://github.com/holalengyu)
* [killme2008](https://github.com/killme2008)
* [MichaelScofield](https://github.com/MichaelScofield)
* [nicecui](https://github.com/nicecui)
* [paomian](https://github.com/paomian)
* [shuiyisong](https://github.com/shuiyisong)
* [sunchanglong](https://github.com/sunchanglong)
* [sunng87](https://github.com/sunng87)
* [v0y4g3r](https://github.com/v0y4g3r)
* [waynexia](https://github.com/waynexia)
* [Wenjie0329](https://github.com/Wenjie0329)
* [WenyXu](https://github.com/WenyXu)
* [xtang](https://github.com/xtang)
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
* [zhongzc](https://github.com/zhongzc)
* [ZonaHex](https://github.com/ZonaHex)
* [zyy17](https://github.com/zyy17)
## All Contributors

301
Cargo.lock generated
View File

@@ -212,9 +212,8 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
[[package]]
name = "api"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"arrow-schema",
"common-base",
"common-decimal",
"common-error",
@@ -733,7 +732,7 @@ dependencies = [
[[package]]
name = "auth"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"async-trait",
@@ -1337,9 +1336,13 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "bytes"
version = "1.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
source = "git+https://github.com/discord9/bytes?rev=1572ab22c3cbad0e9b6681d1f68eca4139322a2a#1572ab22c3cbad0e9b6681d1f68eca4139322a2a"
dependencies = [
"backtrace",
"crossbeam-channel",
"inferno 0.12.2",
"papaya",
"quanta",
"serde",
]
@@ -1383,7 +1386,7 @@ dependencies = [
[[package]]
name = "cache"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"catalog",
"common-error",
@@ -1418,7 +1421,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "catalog"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"arrow",
@@ -1630,7 +1633,6 @@ dependencies = [
"chrono",
"chrono-tz-build",
"phf 0.11.3",
"uncased",
]
[[package]]
@@ -1641,8 +1643,6 @@ checksum = "8f10f8c9340e31fc120ff885fcdb54a0b48e474bbd77cab557f0c30a3e569402"
dependencies = [
"parse-zoneinfo",
"phf_codegen 0.11.3",
"phf_shared 0.11.3",
"uncased",
]
[[package]]
@@ -1763,7 +1763,7 @@ checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675"
[[package]]
name = "cli"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"async-stream",
"async-trait",
@@ -1816,7 +1816,7 @@ dependencies = [
[[package]]
name = "client"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"arc-swap",
@@ -1848,8 +1848,8 @@ dependencies = [
"serde_json",
"snafu 0.8.6",
"store-api",
"substrait 0.18.0",
"substrait 0.37.3",
"substrait 1.0.0-beta.2",
"tokio",
"tokio-stream",
"tonic 0.13.1",
@@ -1889,7 +1889,7 @@ dependencies = [
[[package]]
name = "cmd"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"async-trait",
"auth",
@@ -2012,7 +2012,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]]
name = "common-base"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"anymap2",
"async-trait",
@@ -2036,14 +2036,14 @@ dependencies = [
[[package]]
name = "common-catalog"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"const_format",
]
[[package]]
name = "common-config"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"common-base",
"common-error",
@@ -2067,7 +2067,7 @@ dependencies = [
[[package]]
name = "common-datasource"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"arrow",
"arrow-schema",
@@ -2102,7 +2102,7 @@ dependencies = [
[[package]]
name = "common-decimal"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"bigdecimal 0.4.8",
"common-error",
@@ -2115,7 +2115,7 @@ dependencies = [
[[package]]
name = "common-error"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"common-macro",
"http 1.3.1",
@@ -2126,7 +2126,7 @@ dependencies = [
[[package]]
name = "common-event-recorder"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"async-trait",
@@ -2148,7 +2148,7 @@ dependencies = [
[[package]]
name = "common-frontend"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"async-trait",
@@ -2170,7 +2170,7 @@ dependencies = [
[[package]]
name = "common-function"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"ahash 0.8.12",
"api",
@@ -2208,7 +2208,6 @@ dependencies = [
"hyperloglogplus",
"jsonb",
"memchr",
"mito-codec",
"nalgebra",
"num",
"num-traits",
@@ -2230,7 +2229,7 @@ dependencies = [
[[package]]
name = "common-greptimedb-telemetry"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"async-trait",
"common-runtime",
@@ -2247,7 +2246,7 @@ dependencies = [
[[package]]
name = "common-grpc"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"arrow-flight",
@@ -2266,13 +2265,11 @@ dependencies = [
"hyper 1.6.0",
"hyper-util",
"lazy_static",
"notify",
"prost 0.13.5",
"rand 0.9.1",
"serde",
"serde_json",
"snafu 0.8.6",
"tempfile",
"tokio",
"tokio-util",
"tonic 0.13.1",
@@ -2282,7 +2279,7 @@ dependencies = [
[[package]]
name = "common-grpc-expr"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"common-base",
@@ -2302,7 +2299,7 @@ dependencies = [
[[package]]
name = "common-macro"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"greptime-proto",
"once_cell",
@@ -2313,7 +2310,7 @@ dependencies = [
[[package]]
name = "common-mem-prof"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"anyhow",
"common-error",
@@ -2329,7 +2326,7 @@ dependencies = [
[[package]]
name = "common-meta"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"anymap2",
"api",
@@ -2401,7 +2398,7 @@ dependencies = [
[[package]]
name = "common-options"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"common-grpc",
"humantime-serde",
@@ -2410,11 +2407,11 @@ dependencies = [
[[package]]
name = "common-plugins"
version = "1.0.0-beta.2"
version = "0.18.0"
[[package]]
name = "common-pprof"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"common-error",
"common-macro",
@@ -2426,7 +2423,7 @@ dependencies = [
[[package]]
name = "common-procedure"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"async-stream",
@@ -2455,7 +2452,7 @@ dependencies = [
[[package]]
name = "common-procedure-test"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"async-trait",
"common-procedure",
@@ -2465,7 +2462,7 @@ dependencies = [
[[package]]
name = "common-query"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"async-trait",
@@ -2491,7 +2488,7 @@ dependencies = [
[[package]]
name = "common-recordbatch"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"arc-swap",
"common-base",
@@ -2515,7 +2512,7 @@ dependencies = [
[[package]]
name = "common-runtime"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"async-trait",
"clap 4.5.40",
@@ -2544,7 +2541,7 @@ dependencies = [
[[package]]
name = "common-session"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"serde",
"strum 0.27.1",
@@ -2552,7 +2549,7 @@ dependencies = [
[[package]]
name = "common-sql"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"common-base",
"common-decimal",
@@ -2570,7 +2567,7 @@ dependencies = [
[[package]]
name = "common-stat"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"common-base",
"common-runtime",
@@ -2585,7 +2582,7 @@ dependencies = [
[[package]]
name = "common-telemetry"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"backtrace",
"common-base",
@@ -2614,7 +2611,7 @@ dependencies = [
[[package]]
name = "common-test-util"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"client",
"common-grpc",
@@ -2627,7 +2624,7 @@ dependencies = [
[[package]]
name = "common-time"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"arrow",
"chrono",
@@ -2645,7 +2642,7 @@ dependencies = [
[[package]]
name = "common-version"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"build-data",
"cargo-manifest",
@@ -2656,7 +2653,7 @@ dependencies = [
[[package]]
name = "common-wal"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"common-base",
"common-error",
@@ -2679,7 +2676,7 @@ dependencies = [
[[package]]
name = "common-workload"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"common-telemetry",
"serde",
@@ -3741,9 +3738,9 @@ dependencies = [
[[package]]
name = "datafusion-pg-catalog"
version = "0.12.2"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "755393864c0c2dd95575ceed4b25e348686028e1b83d06f8f39914209999f821"
checksum = "15824c98ff2009c23b0398d441499b147f7c5ac0e5ee993e7a473d79040e3626"
dependencies = [
"async-trait",
"datafusion",
@@ -3916,7 +3913,7 @@ dependencies = [
[[package]]
name = "datanode"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"arrow-flight",
@@ -3980,7 +3977,7 @@ dependencies = [
[[package]]
name = "datatypes"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"arrow",
"arrow-array",
@@ -4652,7 +4649,7 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
name = "file-engine"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"async-trait",
@@ -4784,7 +4781,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
[[package]]
name = "flow"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"arrow",
@@ -4853,7 +4850,7 @@ dependencies = [
"sql",
"store-api",
"strum 0.27.1",
"substrait 1.0.0-beta.2",
"substrait 0.18.0",
"table",
"tokio",
"tonic 0.13.1",
@@ -4908,7 +4905,7 @@ checksum = "28dd6caf6059519a65843af8fe2a3ae298b14b80179855aeb4adc2c1934ee619"
[[package]]
name = "frontend"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"arc-swap",
@@ -5351,7 +5348,7 @@ dependencies = [
[[package]]
name = "greptime-proto"
version = "0.1.0"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=0df99f09f1d6785055b2d9da96fc4ecc2bdf6803#0df99f09f1d6785055b2d9da96fc4ecc2bdf6803"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=14b9dc40bdc8288742b0cefc7bb024303b7429ef#14b9dc40bdc8288742b0cefc7bb024303b7429ef"
dependencies = [
"prost 0.13.5",
"prost-types 0.13.5",
@@ -6119,7 +6116,7 @@ dependencies = [
[[package]]
name = "index"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"async-trait",
"asynchronous-codec",
@@ -7048,7 +7045,7 @@ checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
[[package]]
name = "log-query"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"chrono",
"common-error",
@@ -7060,7 +7057,7 @@ dependencies = [
[[package]]
name = "log-store"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"async-stream",
"async-trait",
@@ -7367,7 +7364,7 @@ dependencies = [
[[package]]
name = "meta-client"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"async-trait",
@@ -7395,7 +7392,7 @@ dependencies = [
[[package]]
name = "meta-srv"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"async-trait",
@@ -7443,9 +7440,7 @@ dependencies = [
"lazy_static",
"local-ip-address",
"once_cell",
"ordered-float 4.6.0",
"parking_lot 0.12.4",
"partition",
"prometheus",
"prost 0.13.5",
"rand 0.9.1",
@@ -7495,7 +7490,7 @@ dependencies = [
[[package]]
name = "metric-engine"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"aquamarine",
@@ -7513,12 +7508,9 @@ dependencies = [
"common-telemetry",
"common-test-util",
"common-time",
"common-wal",
"criterion 0.4.0",
"datafusion",
"datatypes",
"futures-util",
"fxhash",
"humantime-serde",
"itertools 0.14.0",
"lazy_static",
@@ -7592,7 +7584,7 @@ dependencies = [
[[package]]
name = "mito-codec"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"bytes",
@@ -7617,7 +7609,7 @@ dependencies = [
[[package]]
name = "mito2"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"aquamarine",
@@ -8355,7 +8347,7 @@ dependencies = [
[[package]]
name = "object-store"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"anyhow",
"bytes",
@@ -8364,7 +8356,6 @@ dependencies = [
"common-macro",
"common-telemetry",
"common-test-util",
"derive_builder 0.20.2",
"futures",
"humantime-serde",
"lazy_static",
@@ -8535,7 +8526,7 @@ dependencies = [
[[package]]
name = "opensrv-mysql"
version = "0.8.0"
source = "git+https://github.com/datafuselabs/opensrv?tag=v0.10.0#074bd8fb81da3c9e6d6a098a482f3380478b9c0b"
source = "git+https://github.com/datafuselabs/opensrv?rev=a1fb4da215c8693c7e4f62be249a01b7fec52997#a1fb4da215c8693c7e4f62be249a01b7fec52997"
dependencies = [
"async-trait",
"byteorder",
@@ -8641,7 +8632,7 @@ dependencies = [
[[package]]
name = "operator"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"ahash 0.8.12",
"api",
@@ -8667,7 +8658,6 @@ dependencies = [
"common-recordbatch",
"common-runtime",
"common-sql",
"common-stat",
"common-telemetry",
"common-test-util",
"common-time",
@@ -8679,7 +8669,6 @@ dependencies = [
"futures",
"futures-util",
"humantime",
"itertools 0.14.0",
"jsonb",
"lazy_static",
"meta-client",
@@ -8701,7 +8690,7 @@ dependencies = [
"sql",
"sqlparser",
"store-api",
"substrait 1.0.0-beta.2",
"substrait 0.18.0",
"table",
"tokio",
"tokio-util",
@@ -8877,6 +8866,16 @@ dependencies = [
"unicode-width 0.1.14",
]
[[package]]
name = "papaya"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f92dd0b07c53a0a0c764db2ace8c541dc47320dad97c2200c2a637ab9dd2328f"
dependencies = [
"equivalent",
"seize",
]
[[package]]
name = "parking"
version = "2.2.1"
@@ -8987,7 +8986,7 @@ dependencies = [
[[package]]
name = "partition"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"async-trait",
@@ -9190,22 +9189,11 @@ dependencies = [
"serde",
]
[[package]]
name = "pg_interval"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe46640b465e284b048ef065cbed8ef17a622878d310c724578396b4cfd00df2"
dependencies = [
"bytes",
"chrono",
"postgres-types",
]
[[package]]
name = "pgwire"
version = "0.36.3"
version = "0.34.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70a2bcdcc4b20a88e0648778ecf00415bbd5b447742275439c22176835056f99"
checksum = "4f56a81b4fcc69016028f657a68f9b8e8a2a4b7d07684ca3298f2d3e7ff199ce"
dependencies = [
"async-trait",
"base64 0.22.1",
@@ -9221,7 +9209,6 @@ dependencies = [
"ring",
"rust_decimal",
"rustls-pki-types",
"ryu",
"serde",
"serde_json",
"stringprep",
@@ -9298,7 +9285,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5"
dependencies = [
"siphasher",
"uncased",
]
[[package]]
@@ -9344,7 +9330,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "pipeline"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"ahash 0.8.12",
"api",
@@ -9500,10 +9486,9 @@ dependencies = [
[[package]]
name = "plugins"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"auth",
"catalog",
"clap 4.5.40",
"cli",
"common-base",
@@ -9512,7 +9497,6 @@ dependencies = [
"datanode",
"flow",
"frontend",
"meta-client",
"meta-srv",
"serde",
"snafu 0.8.6",
@@ -9802,7 +9786,7 @@ dependencies = [
[[package]]
name = "promql"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"ahash 0.8.12",
"async-trait",
@@ -10085,7 +10069,7 @@ dependencies = [
[[package]]
name = "puffin"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"async-compression 0.4.19",
"async-trait",
@@ -10125,9 +10109,24 @@ dependencies = [
"variadics",
]
[[package]]
name = "quanta"
version = "0.12.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3ab5a9d756f0d97bdc89019bd2e4ea098cf9cde50ee7564dde6b81ccc8f06c7"
dependencies = [
"crossbeam-utils",
"libc",
"once_cell",
"raw-cpuid",
"wasi 0.11.1+wasi-snapshot-preview1",
"web-sys",
"winapi",
]
[[package]]
name = "query"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"ahash 0.8.12",
"api",
@@ -10151,7 +10150,6 @@ dependencies = [
"common-query",
"common-recordbatch",
"common-runtime",
"common-stat",
"common-telemetry",
"common-time",
"datafusion",
@@ -10194,7 +10192,7 @@ dependencies = [
"sql",
"sqlparser",
"store-api",
"substrait 1.0.0-beta.2",
"substrait 0.18.0",
"table",
"tokio",
"tokio-stream",
@@ -10425,6 +10423,15 @@ dependencies = [
"thiserror 1.0.69",
]
[[package]]
name = "raw-cpuid"
version = "11.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "498cd0dc59d73224351ee52a95fee0f1a617a2eae0e7d9d720cc622c73a54186"
dependencies = [
"bitflags 2.9.1",
]
[[package]]
name = "rawpointer"
version = "0.2.1"
@@ -11365,6 +11372,16 @@ dependencies = [
"libc",
]
[[package]]
name = "seize"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b55fb86dfd3a2f5f76ea78310a88f96c4ea21a3031f8d212443d56123fd0521"
dependencies = [
"libc",
"windows-sys 0.52.0",
]
[[package]]
name = "semver"
version = "1.0.26"
@@ -11530,7 +11547,7 @@ dependencies = [
[[package]]
name = "servers"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"ahash 0.8.12",
"api",
@@ -11607,7 +11624,6 @@ dependencies = [
"otel-arrow-rust",
"parking_lot 0.12.4",
"permutation",
"pg_interval",
"pgwire",
"pin-project",
"pipeline",
@@ -11649,7 +11665,6 @@ dependencies = [
"tower 0.5.2",
"tower-http 0.6.6",
"tracing",
"tracing-opentelemetry",
"urlencoding",
"uuid",
"vrl",
@@ -11658,7 +11673,7 @@ dependencies = [
[[package]]
name = "session"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"ahash 0.8.12",
"api",
@@ -11992,7 +12007,7 @@ dependencies = [
[[package]]
name = "sql"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"arrow-buffer",
@@ -12052,7 +12067,7 @@ dependencies = [
[[package]]
name = "sqlness-runner"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"async-trait",
"clap 4.5.40",
@@ -12329,7 +12344,7 @@ dependencies = [
[[package]]
name = "standalone"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"async-trait",
"catalog",
@@ -12370,7 +12385,7 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "store-api"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"aquamarine",
@@ -12535,6 +12550,28 @@ dependencies = [
"winapi",
]
[[package]]
name = "substrait"
version = "0.18.0"
dependencies = [
"async-trait",
"bytes",
"common-error",
"common-function",
"common-macro",
"common-telemetry",
"datafusion",
"datafusion-common",
"datafusion-expr",
"datafusion-substrait",
"datatypes",
"promql",
"prost 0.13.5",
"snafu 0.8.6",
"substrait 0.37.3",
"tokio",
]
[[package]]
name = "substrait"
version = "0.37.3"
@@ -12581,28 +12618,6 @@ dependencies = [
"walkdir",
]
[[package]]
name = "substrait"
version = "1.0.0-beta.2"
dependencies = [
"async-trait",
"bytes",
"common-error",
"common-function",
"common-macro",
"common-telemetry",
"datafusion",
"datafusion-common",
"datafusion-expr",
"datafusion-substrait",
"datatypes",
"promql",
"prost 0.13.5",
"snafu 0.8.6",
"substrait 0.37.3",
"tokio",
]
[[package]]
name = "subtle"
version = "2.6.1"
@@ -12706,7 +12721,7 @@ dependencies = [
[[package]]
name = "table"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"async-trait",
@@ -12945,7 +12960,7 @@ dependencies = [
"getrandom 0.3.3",
"once_cell",
"rustix 1.0.7",
"windows-sys 0.61.2",
"windows-sys 0.59.0",
]
[[package]]
@@ -12975,7 +12990,7 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683"
[[package]]
name = "tests-fuzz"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"arbitrary",
"async-trait",
@@ -13019,7 +13034,7 @@ dependencies = [
[[package]]
name = "tests-integration"
version = "1.0.0-beta.2"
version = "0.18.0"
dependencies = [
"api",
"arrow-flight",
@@ -13069,7 +13084,6 @@ dependencies = [
"loki-proto",
"meta-client",
"meta-srv",
"mito2",
"moka",
"mysql_async",
"object-store",
@@ -13094,7 +13108,7 @@ dependencies = [
"sqlx",
"standalone",
"store-api",
"substrait 1.0.0-beta.2",
"substrait 0.18.0",
"table",
"tempfile",
"time",
@@ -14004,15 +14018,6 @@ dependencies = [
"serde",
]
[[package]]
name = "uncased"
version = "0.9.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1b88fcfe09e89d3866a5c11019378088af2d24c3fbd4f0543f96b479ec90697"
dependencies = [
"version_check",
]
[[package]]
name = "unescaper"
version = "0.1.6"

View File

@@ -74,7 +74,7 @@ members = [
resolver = "2"
[workspace.package]
version = "1.0.0-beta.2"
version = "0.18.0"
edition = "2024"
license = "Apache-2.0"
@@ -118,7 +118,7 @@ bitflags = "2.4.1"
bytemuck = "1.12"
bytes = { version = "1.7", features = ["serde"] }
chrono = { version = "0.4", features = ["serde"] }
chrono-tz = { version = "0.10.1", features = ["case-insensitive"] }
chrono-tz = "0.10.1"
clap = { version = "4.4", features = ["derive"] }
config = "0.13.0"
const_format = "0.2"
@@ -131,7 +131,7 @@ datafusion-functions = "50"
datafusion-functions-aggregate-common = "50"
datafusion-optimizer = "50"
datafusion-orc = "0.5"
datafusion-pg-catalog = "0.12.2"
datafusion-pg-catalog = "0.12.1"
datafusion-physical-expr = "50"
datafusion-physical-plan = "50"
datafusion-sql = "50"
@@ -148,7 +148,7 @@ etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62d
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0df99f09f1d6785055b2d9da96fc4ecc2bdf6803" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "14b9dc40bdc8288742b0cefc7bb024303b7429ef" }
hex = "0.4"
http = "1"
humantime = "2.1"
@@ -219,7 +219,12 @@ similar-asserts = "1.6.0"
smallvec = { version = "1", features = ["serde"] }
snafu = "0.8"
sqlparser = { version = "0.58.0", default-features = false, features = ["std", "visitor", "serde"] }
sqlx = { version = "0.8", default-features = false, features = ["any", "macros", "json", "runtime-tokio-rustls"] }
sqlx = { version = "0.8", features = [
"runtime-tokio-rustls",
"mysql",
"postgres",
"chrono",
] }
strum = { version = "0.27", features = ["derive"] }
sysinfo = "0.33"
tempfile = "3"
@@ -234,7 +239,6 @@ tower = "0.5"
tower-http = "0.6"
tracing = "0.1"
tracing-appender = "0.2"
tracing-opentelemetry = "0.31.0"
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
typetag = "0.2"
uuid = { version = "1.17", features = ["serde", "v4", "fast-rng"] }
@@ -329,6 +333,7 @@ datafusion-datasource = { git = "https://github.com/GreptimeTeam/datafusion.git"
datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "4b519a5caa95472cc3988f5556813a583dd35af1" } # branch = "v0.58.x"
bytes = { git = "https://github.com/discord9/bytes", rev = "1572ab22c3cbad0e9b6681d1f68eca4139322a2a" }
[profile.release]
debug = 1

View File

@@ -17,8 +17,6 @@ CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
SQLNESS_OPTS ?=
EXTRA_BUILD_ENVS ?=
ASSEMBLED_EXTRA_BUILD_ENV := $(foreach var,$(EXTRA_BUILD_ENVS),-e $(var))
# The arguments for running integration tests.
ETCD_VERSION ?= v3.5.9
@@ -85,7 +83,6 @@ build: ## Build debug version greptime.
.PHONY: build-by-dev-builder
build-by-dev-builder: ## Build greptime by dev-builder.
docker run --network=host \
${ASSEMBLED_EXTRA_BUILD_ENV} \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
make build \

View File

@@ -12,7 +12,8 @@
<div align="center">
<h3 align="center">
<a href="https://docs.greptime.com/user-guide/overview/">User Guide</a> |
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
<a href="https://docs.greptime.com/">User Guide</a> |
<a href="https://greptimedb.rs/">API Docs</a> |
<a href="https://github.com/GreptimeTeam/greptimedb/issues/5446">Roadmap 2025</a>
</h4>
@@ -66,24 +67,17 @@
## Introduction
**GreptimeDB** is an open-source, cloud-native database that unifies metrics, logs, and traces, enabling real-time observability at any scale — across edge, cloud, and hybrid environments.
**GreptimeDB** is an open-source, cloud-native database purpose-built for the unified collection and analysis of observability data (metrics, logs, and traces). Whether youre operating on the edge, in the cloud, or across hybrid environments, GreptimeDB empowers real-time insights at massive scale — all in one system.
## Features
| Feature | Description |
| --------- | ----------- |
| [All-in-One Observability](https://docs.greptime.com/user-guide/concepts/why-greptimedb) | OpenTelemetry-native platform unifying metrics, logs, and traces. Query via [SQL](https://docs.greptime.com/user-guide/query-data/sql), [PromQL](https://docs.greptime.com/user-guide/query-data/promql), and [Flow](https://docs.greptime.com/user-guide/flow-computation/overview). |
| [High Performance](https://docs.greptime.com/user-guide/manage-data/data-index) | Written in Rust with [rich indexing](https://docs.greptime.com/user-guide/manage-data/data-index) (inverted, fulltext, skipping, vector), delivering sub-second responses at PB scale. |
| [Cost Efficiency](https://docs.greptime.com/user-guide/concepts/architecture) | 50x lower operational and storage costs with compute-storage separation and native object storage (S3, Azure Blob, etc.). |
| [Cloud-Native & Scalable](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management) | Purpose-built for [Kubernetes](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management) with unlimited cross-cloud scaling, handling hundreds of thousands of concurrent requests. |
| [Developer-Friendly](https://docs.greptime.com/user-guide/protocols/overview) | SQL/PromQL interfaces, built-in web dashboard, REST API, MySQL/PostgreSQL protocol compatibility, and native [OpenTelemetry](https://docs.greptime.com/user-guide/ingest-data/for-observability/opentelemetry/) support. |
| [Flexible Deployment](https://docs.greptime.com/user-guide/deployments-administration/overview) | Deploy anywhere from ARM-based edge devices (including [Android](https://docs.greptime.com/user-guide/deployments-administration/run-on-android)) to cloud, with unified APIs and efficient data sync. |
**Perfect for:**
- Unified observability stack replacing Prometheus + Loki + Tempo
- Large-scale metrics with high cardinality (millions to billions of time series)
- Large-scale observability platform requiring cost efficiency and scalability
- IoT and edge computing with resource and bandwidth constraints
| [Unified Observability Data](https://docs.greptime.com/user-guide/concepts/why-greptimedb) | Store metrics, logs, and traces as timestamped, contextual wide events. Query via [SQL](https://docs.greptime.com/user-guide/query-data/sql), [PromQL](https://docs.greptime.com/user-guide/query-data/promql), and [streaming](https://docs.greptime.com/user-guide/flow-computation/overview). |
| [High Performance & Cost Effective](https://docs.greptime.com/user-guide/manage-data/data-index) | Written in Rust, with a distributed query engine, [rich indexing](https://docs.greptime.com/user-guide/manage-data/data-index), and optimized columnar storage, delivering sub-second responses at PB scale. |
| [Cloud-Native Architecture](https://docs.greptime.com/user-guide/concepts/architecture) | Designed for [Kubernetes](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management), with compute/storage separation, native object storage (AWS S3, Azure Blob, etc.) and seamless cross-cloud access. |
| [Developer-Friendly](https://docs.greptime.com/user-guide/protocols/overview) | Access via SQL/PromQL interfaces, REST API, MySQL/PostgreSQL protocols, and popular ingestion [protocols](https://docs.greptime.com/user-guide/protocols/overview). |
| [Flexible Deployment](https://docs.greptime.com/user-guide/deployments-administration/overview) | Deploy anywhere: edge (including ARM/[Android](https://docs.greptime.com/user-guide/deployments-administration/run-on-android)) or cloud, with unified APIs and efficient data sync. |
Learn more in [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb) and [Observability 2.0 and the Database for It](https://greptime.com/blogs/2025-04-25-greptimedb-observability2-new-database).
@@ -92,10 +86,10 @@ Learn more in [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why
| Feature | GreptimeDB | Traditional TSDB | Log Stores |
|----------------------------------|-----------------------|--------------------|-----------------|
| Data Types | Metrics, Logs, Traces | Metrics only | Logs only |
| Query Language | SQL, PromQL | Custom/PromQL | Custom/DSL |
| Query Language | SQL, PromQL, Streaming| Custom/PromQL | Custom/DSL |
| Deployment | Edge + Cloud | Cloud/On-prem | Mostly central |
| Indexing & Performance | PB-Scale, Sub-second | Varies | Varies |
| Integration | REST API, SQL, Common protocols | Varies | Varies |
| Integration | REST, SQL, Common protocols | Varies | Varies |
**Performance:**
* [GreptimeDB tops JSONBench's billion-record cold run test!](https://greptime.com/blogs/2025-03-18-jsonbench-greptimedb-performance)
@@ -105,18 +99,22 @@ Read [more benchmark reports](https://docs.greptime.com/user-guide/concepts/feat
## Architecture
GreptimeDB can run in two modes:
* **Standalone Mode** - Single binary for development and small deployments
* **Distributed Mode** - Separate components for production scale:
- Frontend: Query processing and protocol handling
- Datanode: Data storage and retrieval
- Metasrv: Metadata management and coordination
Read the [architecture](https://docs.greptime.com/contributor-guide/overview/#architecture) document. [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb/1-overview) provides an in-depth look at GreptimeDB:
* Read the [architecture](https://docs.greptime.com/contributor-guide/overview/#architecture) document.
* [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb/1-overview) provides an in-depth look at GreptimeDB:
<img alt="GreptimeDB System Overview" src="docs/architecture.png">
## Try GreptimeDB
### 1. [Live Demo](https://greptime.com/playground)
Experience GreptimeDB directly in your browser.
### 2. [GreptimeCloud](https://console.greptime.cloud/)
Start instantly with a free cluster.
### 3. Docker (Local Quickstart)
```shell
docker pull greptime/greptimedb
```
@@ -132,8 +130,7 @@ docker run -p 127.0.0.1:4000-4003:4000-4003 \
--postgres-addr 0.0.0.0:4003
```
Dashboard: [http://localhost:4000/dashboard](http://localhost:4000/dashboard)
Read more in the [full Install Guide](https://docs.greptime.com/getting-started/installation/overview).
[Full Install Guide](https://docs.greptime.com/getting-started/installation/overview)
**Troubleshooting:**
* Cannot connect to the database? Ensure that ports `4000`, `4001`, `4002`, and `4003` are not blocked by a firewall or used by other services.
@@ -162,26 +159,21 @@ cargo run -- standalone start
## Tools & Extensions
- **Kubernetes**: [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
- **Helm Charts**: [Greptime Helm Charts](https://github.com/GreptimeTeam/helm-charts)
- **Dashboard**: [Web UI](https://github.com/GreptimeTeam/dashboard)
- **gRPC Ingester**: [Go](https://github.com/GreptimeTeam/greptimedb-ingester-go), [Java](https://github.com/GreptimeTeam/greptimedb-ingester-java), [C++](https://github.com/GreptimeTeam/greptimedb-ingester-cpp), [Erlang](https://github.com/GreptimeTeam/greptimedb-ingester-erl), [Rust](https://github.com/GreptimeTeam/greptimedb-ingester-rust)
- **Grafana Data Source**: [GreptimeDB Grafana data source plugin](https://github.com/GreptimeTeam/greptimedb-grafana-datasource)
- **Grafana Dashboard**: [Official Dashboard for monitoring](https://github.com/GreptimeTeam/greptimedb/blob/main/grafana/README.md)
- **Kubernetes:** [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
- **Helm Charts:** [Greptime Helm Charts](https://github.com/GreptimeTeam/helm-charts)
- **Dashboard:** [Web UI](https://github.com/GreptimeTeam/dashboard)
- **SDKs/Ingester:** [Go](https://github.com/GreptimeTeam/greptimedb-ingester-go), [Java](https://github.com/GreptimeTeam/greptimedb-ingester-java), [C++](https://github.com/GreptimeTeam/greptimedb-ingester-cpp), [Erlang](https://github.com/GreptimeTeam/greptimedb-ingester-erl), [Rust](https://github.com/GreptimeTeam/greptimedb-ingester-rust), [JS](https://github.com/GreptimeTeam/greptimedb-ingester-js)
- **Grafana**: [Official Dashboard](https://github.com/GreptimeTeam/greptimedb/blob/main/grafana/README.md)
## Project Status
> **Status:** Beta — marching toward v1.0 GA!
> **GA (v1.0):** January 10, 2026
> **Status:** Beta.
> **GA (v1.0):** Targeted for mid 2025.
- Deployed in production by open-source projects and commercial users
- Being used in production by early adopters
- Stable, actively maintained, with regular releases ([version info](https://docs.greptime.com/nightly/reference/about-greptimedb-version))
- Suitable for evaluation and pilot deployments
GreptimeDB v1.0 represents a major milestone toward maturity — marking stable APIs, production readiness, and proven performance.
**Roadmap:** Beta1 (Nov 10) → Beta2 (Nov 24) → RC1 (Dec 8) → GA (Jan 10, 2026), please read [v1.0 highlights and release plan](https://greptime.com/blogs/2025-11-05-greptimedb-v1-highlights) for details.
For production use, we recommend using the latest stable release.
[![Star History Chart](https://api.star-history.com/svg?repos=GreptimeTeam/GreptimeDB&type=Date)](https://www.star-history.com/#GreptimeTeam/GreptimeDB&Date)
@@ -222,5 +214,5 @@ Special thanks to all contributors! See [AUTHORS.md](https://github.com/Greptime
- Uses [Apache Arrow™](https://arrow.apache.org/) (memory model)
- [Apache Parquet™](https://parquet.apache.org/) (file storage)
- [Apache DataFusion™](https://arrow.apache.org/datafusion/) (query engine)
- [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/) (query engine)
- [Apache OpenDAL™](https://opendal.apache.org/) (data access abstraction)

View File

@@ -16,7 +16,7 @@
| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited.<br/>NOTE: This setting affects scan_memory_limit's privileged tier allocation.<br/>When set, 70% of queries get privileged memory access (full scan_memory_limit).<br/>The remaining 30% get standard tier access (70% of scan_memory_limit). |
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
| `runtime` | -- | -- | The runtime options. |
@@ -104,7 +104,6 @@
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
| `query` | -- | -- | The query engine options. |
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
@@ -152,13 +151,10 @@
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).<br/>When enabled, index files are loaded into the write cache during region initialization,<br/>which can improve query performance at the cost of longer startup times. |
| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).<br/>The remaining capacity is used for data (parquet) files.<br/>Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,<br/>1GiB is reserved for index files and 4GiB for data files. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.scan_memory_limit` | String | `50%` | Memory limit for table scans across all queries.<br/>Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit.<br/>NOTE: Works with max_concurrent_queries for tiered memory allocation.<br/>- If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.<br/>- If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.default_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format as the default format. |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
@@ -192,7 +188,7 @@
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. |
| `region_engine.metric` | -- | -- | Metric engine options. |
| `region_engine.metric.sparse_primary_key_encoding` | Bool | `true` | Whether to use sparse primary key encoding. |
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
@@ -210,6 +206,14 @@
| `slow_query.record_type` | String | Unset | The record type of slow queries. It can be `system_table` or `log`. |
| `slow_query.threshold` | String | Unset | The threshold of slow query. |
| `slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommended to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | Unset | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
@@ -294,6 +298,7 @@
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
@@ -303,7 +308,6 @@
| `query` | -- | -- | The query engine options. |
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
| `query.allow_query_fallback` | Bool | `false` | Whether to allow query fallback when push down optimize fails.<br/>Default to false, meaning when push down optimize failed, return error msg |
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "4GB", "8GB") or percentage of system memory (e.g., "30%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans (only applies to datanodes). |
| `datanode` | -- | -- | Datanode options. |
| `datanode.client` | -- | -- | Datanode client options. |
| `datanode.client.connect_timeout` | String | `10s` | -- |
@@ -326,6 +330,12 @@
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. |
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. |
| `slow_query.ttl` | String | `90d` | The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`. |
| `export_metrics` | -- | -- | The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
@@ -339,7 +349,7 @@
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `data_home` | String | `./greptimedb_data` | The working home directory. |
| `store_addrs` | Array | -- | Store server address(es). The format depends on the selected backend.<br/><br/>For etcd: a list of "host:port" endpoints.<br/>e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]<br/><br/>For PostgreSQL: a connection string in libpq format or URI.<br/>e.g.<br/>- "host=localhost port=5432 user=postgres password=<PASSWORD> dbname=postgres"<br/>- "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"<br/>The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html<br/><br/>For mysql store, the format is a MySQL connection URL.<br/>e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem" |
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store`<br/>- `mysql_store` |
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
@@ -355,11 +365,12 @@
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `backend_tls` | -- | -- | TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)<br/>When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here<br/><br/>Note: if TLS is configured in both this section and the `store_addrs` connection string, the<br/>settings here will override the TLS settings in `store_addrs`. |
| `backend_tls` | -- | -- | TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)<br/>When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here |
| `backend_tls.mode` | String | `prefer` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- "disable" - No TLS<br/>- "prefer" (default) - Try TLS, fallback to plain<br/>- "require" - Require TLS<br/>- "verify_ca" - Require TLS and verify CA<br/>- "verify_full" - Require TLS and verify hostname |
| `backend_tls.cert_path` | String | `""` | Path to client certificate file (for client authentication)<br/>Like "/path/to/client.crt" |
| `backend_tls.key_path` | String | `""` | Path to client private key file (for client authentication)<br/>Like "/path/to/client.key" |
| `backend_tls.ca_cert_path` | String | `""` | Path to CA certificate file (for server certificate verification)<br/>Required when using custom CAs or self-signed certificates<br/>Leave empty to use system root certificates only<br/>Like "/path/to/ca.crt" |
| `backend_tls.watch` | Bool | `false` | Watch for certificate file changes and auto reload |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:3002` | The address to bind the gRPC server. |
| `grpc.server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
@@ -414,6 +425,12 @@
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
@@ -429,7 +446,7 @@
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited.<br/>NOTE: This setting affects scan_memory_limit's privileged tier allocation.<br/>When set, 70% of queries get privileged memory access (full scan_memory_limit).<br/>The remaining 30% get standard tier access (70% of scan_memory_limit). |
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
@@ -456,6 +473,7 @@
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
@@ -482,7 +500,6 @@
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
| `query` | -- | -- | The query engine options. |
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
@@ -532,13 +549,10 @@
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).<br/>When enabled, index files are loaded into the write cache during region initialization,<br/>which can improve query performance at the cost of longer startup times. |
| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).<br/>The remaining capacity is used for data (parquet) files.<br/>Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,<br/>1GiB is reserved for index files and 4GiB for data files. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.scan_memory_limit` | String | `50%` | Memory limit for table scans across all queries.<br/>Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit.<br/>NOTE: Works with max_concurrent_queries for tiered memory allocation.<br/>- If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.<br/>- If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.default_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format as the default format. |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
@@ -572,7 +586,7 @@
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. |
| `region_engine.metric` | -- | -- | Metric engine options. |
| `region_engine.metric.sparse_primary_key_encoding` | Bool | `true` | Whether to use sparse primary key encoding. |
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
@@ -585,6 +599,12 @@
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
@@ -627,6 +647,7 @@
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
@@ -652,6 +673,5 @@
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `query` | -- | -- | -- |
| `query.parallelism` | Integer | `1` | Parallelism of the query engine for query sent by flownode.<br/>Default to 1, so it won't use too much cpu or memory |
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "1GB", "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
| `memory` | -- | -- | The memory options. |
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |

View File

@@ -18,9 +18,6 @@ init_regions_in_background = false
init_regions_parallelism = 16
## The maximum current queries allowed to be executed. Zero means unlimited.
## NOTE: This setting affects scan_memory_limit's privileged tier allocation.
## When set, 70% of queries get privileged memory access (full scan_memory_limit).
## The remaining 30% get standard tier access (70% of scan_memory_limit).
max_concurrent_queries = 0
## Enable telemetry to collect anonymous usage data. Enabled by default.
@@ -99,6 +96,9 @@ metasrv_addrs = ["127.0.0.1:3002"]
## Operation timeout.
timeout = "3s"
## Heartbeat timeout.
heartbeat_timeout = "500ms"
## DDL timeout.
ddl_timeout = "10s"
@@ -261,13 +261,6 @@ overwrite_entry_start_id = false
## Default to 0, which means the number of CPU cores.
parallelism = 0
## Memory pool size for query execution operators (aggregation, sorting, join).
## Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").
## Setting it to 0 disables the limit (unbounded, default behavior).
## When this limit is reached, queries will fail with ResourceExhausted error.
## NOTE: This does NOT limit memory used by table scans.
memory_pool_size = "50%"
## The data storage options.
[storage]
## The working home directory.
@@ -496,17 +489,6 @@ write_cache_size = "5GiB"
## @toml2docs:none-default
write_cache_ttl = "8h"
## Preload index (puffin) files into cache on region open (default: true).
## When enabled, index files are loaded into the write cache during region initialization,
## which can improve query performance at the cost of longer startup times.
preload_index_cache = true
## Percentage of write cache capacity allocated for index (puffin) files (default: 20).
## The remaining capacity is used for data (parquet) files.
## Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,
## 1GiB is reserved for index files and 4GiB for data files.
index_cache_percent = 20
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
@@ -519,14 +501,6 @@ max_concurrent_scan_files = 384
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false
## Memory limit for table scans across all queries.
## Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").
## Setting it to 0 disables the limit.
## NOTE: Works with max_concurrent_queries for tiered memory allocation.
## - If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.
## - If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access.
scan_memory_limit = "50%"
## Minimum time interval between two compactions.
## To align with the old behavior, the default value is 0 (no restrictions).
min_compaction_interval = "0m"
@@ -666,8 +640,8 @@ fork_dictionary_bytes = "1GiB"
[[region_engine]]
## Metric engine options.
[region_engine.metric]
## Whether to use sparse primary key encoding.
sparse_primary_key_encoding = true
## Whether to enable the experimental sparse primary key encoding.
experimental_sparse_primary_key_encoding = false
## The logging options.
[logging]
@@ -709,6 +683,21 @@ otlp_export_protocol = "http"
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
[export_metrics.remote_write]
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.

View File

@@ -78,6 +78,9 @@ metasrv_addrs = ["127.0.0.1:3002"]
## Operation timeout.
timeout = "3s"
## Heartbeat timeout.
heartbeat_timeout = "500ms"
## DDL timeout.
ddl_timeout = "10s"
@@ -155,13 +158,6 @@ default_ratio = 1.0
## Default to 1, so it won't use too much cpu or memory
parallelism = 1
## Memory pool size for query execution operators (aggregation, sorting, join).
## Supports absolute size (e.g., "1GB", "2GB") or percentage of system memory (e.g., "20%").
## Setting it to 0 disables the limit (unbounded, default behavior).
## When this limit is reached, queries will fail with ResourceExhausted error.
## NOTE: This does NOT limit memory used by table scans.
memory_pool_size = "50%"
## The memory options.
[memory]
## Whether to enable heap profiling activation during startup.

View File

@@ -226,6 +226,9 @@ metasrv_addrs = ["127.0.0.1:3002"]
## Operation timeout.
timeout = "3s"
## Heartbeat timeout.
heartbeat_timeout = "500ms"
## DDL timeout.
ddl_timeout = "10s"
@@ -253,13 +256,6 @@ parallelism = 0
## Default to false, meaning when push down optimize failed, return error msg
allow_query_fallback = false
## Memory pool size for query execution operators (aggregation, sorting, join).
## Supports absolute size (e.g., "4GB", "8GB") or percentage of system memory (e.g., "30%").
## Setting it to 0 disables the limit (unbounded, default behavior).
## When this limit is reached, queries will fail with ResourceExhausted error.
## NOTE: This does NOT limit memory used by table scans (only applies to datanodes).
memory_pool_size = "50%"
## Datanode options.
[datanode]
## Datanode client options.
@@ -326,6 +322,21 @@ sample_ratio = 1.0
## The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`.
ttl = "90d"
## The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
[export_metrics.remote_write]
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.

View File

@@ -1,19 +1,11 @@
## The working home directory.
data_home = "./greptimedb_data"
## Store server address(es). The format depends on the selected backend.
##
## For etcd: a list of "host:port" endpoints.
## e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]
##
## For PostgreSQL: a connection string in libpq format or URI.
## e.g.
## - "host=localhost port=5432 user=postgres password=<PASSWORD> dbname=postgres"
## - "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"
## The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html
##
## For mysql store, the format is a MySQL connection URL.
## e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem"
## Store server address default to etcd store.
## For postgres store, the format is:
## "password=password dbname=postgres user=postgres host=localhost port=5432"
## For etcd store, the format is:
## "127.0.0.1:2379"
store_addrs = ["127.0.0.1:2379"]
## If it's not empty, the metasrv will store all data with this key prefix.
@@ -83,9 +75,6 @@ node_max_idle_time = "24hours"
## TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)
## When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here
##
## Note: if TLS is configured in both this section and the `store_addrs` connection string, the
## settings here will override the TLS settings in `store_addrs`.
[backend_tls]
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
## - "disable" - No TLS
@@ -109,6 +98,9 @@ key_path = ""
## Like "/path/to/ca.crt"
ca_cert_path = ""
## Watch for certificate file changes and auto reload
watch = false
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
@@ -331,6 +323,21 @@ otlp_export_protocol = "http"
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
[export_metrics.remote_write]
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.

View File

@@ -14,9 +14,6 @@ init_regions_in_background = false
init_regions_parallelism = 16
## The maximum current queries allowed to be executed. Zero means unlimited.
## NOTE: This setting affects scan_memory_limit's privileged tier allocation.
## When set, 70% of queries get privileged memory access (full scan_memory_limit).
## The remaining 30% get standard tier access (70% of scan_memory_limit).
max_concurrent_queries = 0
## Enable telemetry to collect anonymous usage data. Enabled by default.
@@ -368,13 +365,6 @@ max_running_procedures = 128
## Default to 0, which means the number of CPU cores.
parallelism = 0
## Memory pool size for query execution operators (aggregation, sorting, join).
## Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").
## Setting it to 0 disables the limit (unbounded, default behavior).
## When this limit is reached, queries will fail with ResourceExhausted error.
## NOTE: This does NOT limit memory used by table scans.
memory_pool_size = "50%"
## The data storage options.
[storage]
## The working home directory.
@@ -590,17 +580,6 @@ write_cache_size = "5GiB"
## @toml2docs:none-default
write_cache_ttl = "8h"
## Preload index (puffin) files into cache on region open (default: true).
## When enabled, index files are loaded into the write cache during region initialization,
## which can improve query performance at the cost of longer startup times.
preload_index_cache = true
## Percentage of write cache capacity allocated for index (puffin) files (default: 20).
## The remaining capacity is used for data (parquet) files.
## Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,
## 1GiB is reserved for index files and 4GiB for data files.
index_cache_percent = 20
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
@@ -613,14 +592,6 @@ max_concurrent_scan_files = 384
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false
## Memory limit for table scans across all queries.
## Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").
## Setting it to 0 disables the limit.
## NOTE: Works with max_concurrent_queries for tiered memory allocation.
## - If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.
## - If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access.
scan_memory_limit = "50%"
## Minimum time interval between two compactions.
## To align with the old behavior, the default value is 0 (no restrictions).
min_compaction_interval = "0m"
@@ -760,8 +731,8 @@ fork_dictionary_bytes = "1GiB"
[[region_engine]]
## Metric engine options.
[region_engine.metric]
## Whether to use sparse primary key encoding.
sparse_primary_key_encoding = true
## Whether to enable the experimental sparse primary key encoding.
experimental_sparse_primary_key_encoding = false
## The logging options.
[logging]
@@ -820,6 +791,27 @@ default_ratio = 1.0
## @toml2docs:none-default
#+ sample_ratio = 1.0
## The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommended to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## @toml2docs:none-default
db = "greptime_metrics"
[export_metrics.remote_write]
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.

View File

@@ -1,10 +1,10 @@
FROM centos:7 AS builder
FROM centos:7 as builder
ARG CARGO_PROFILE
ARG FEATURES
ARG OUTPUT_DIR
ENV LANG=en_US.utf8
ENV LANG en_US.utf8
WORKDIR /greptimedb
# Install dependencies
@@ -22,7 +22,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
# Install Rust
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH=/usr/local/bin:/root/.cargo/bin/:$PATH
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
# Build the project in release mode.
RUN --mount=target=.,rw \
@@ -33,7 +33,7 @@ RUN --mount=target=.,rw \
TARGET_DIR=/out/target
# Export the binary to the clean image.
FROM centos:7 AS base
FROM centos:7 as base
ARG OUTPUT_DIR
@@ -45,7 +45,7 @@ RUN yum install -y epel-release \
WORKDIR /greptime
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
ENV PATH=/greptime/bin/:$PATH
ENV PATH /greptime/bin/:$PATH
ENV MALLOC_CONF="prof:true,prof_active:false"

View File

@@ -1,65 +0,0 @@
FROM ubuntu:22.04 AS builder
ARG CARGO_PROFILE
ARG FEATURES
ARG OUTPUT_DIR
ENV LANG=en_US.utf8
WORKDIR /greptimedb
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
# Install dependencies.
RUN --mount=type=cache,target=/var/cache/apt \
apt-get update && apt-get install -y \
libssl-dev \
protobuf-compiler \
curl \
git \
build-essential \
pkg-config
# Install Rust.
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH=/root/.cargo/bin/:$PATH
# Build the project in release mode.
RUN --mount=target=. \
--mount=type=cache,target=/root/.cargo/registry \
make build \
CARGO_PROFILE=${CARGO_PROFILE} \
FEATURES=${FEATURES} \
TARGET_DIR=/out/target
FROM ubuntu:22.04 AS libs
ARG TARGETARCH
# Copy required library dependencies based on architecture
RUN if [ "$TARGETARCH" = "amd64" ]; then \
cp /lib/x86_64-linux-gnu/libz.so.1.2.11 /lib/x86_64-linux-gnu/libz.so.1; \
elif [ "$TARGETARCH" = "arm64" ]; then \
cp /lib/aarch64-linux-gnu/libz.so.1.2.11 /lib/aarch64-linux-gnu/libz.so.1; \
else \
echo "Unsupported architecture: $TARGETARCH" && exit 1; \
fi
# Export the binary to the clean distroless image.
FROM gcr.io/distroless/cc-debian12:latest AS base
ARG OUTPUT_DIR
ARG TARGETARCH
# Copy required library dependencies
COPY --from=libs /lib /lib
COPY --from=busybox:stable /bin/busybox /bin/busybox
WORKDIR /greptime
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/greptime
ENV PATH=/greptime/bin/:$PATH
ENV MALLOC_CONF="prof:true,prof_active:false"
ENTRYPOINT ["greptime"]

View File

@@ -1,10 +1,10 @@
FROM ubuntu:22.04 AS builder
FROM ubuntu:22.04 as builder
ARG CARGO_PROFILE
ARG FEATURES
ARG OUTPUT_DIR
ENV LANG=en_US.utf8
ENV LANG en_US.utf8
WORKDIR /greptimedb
RUN apt-get update && \
@@ -23,7 +23,7 @@ RUN --mount=type=cache,target=/var/cache/apt \
# Install Rust.
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH=/root/.cargo/bin/:$PATH
ENV PATH /root/.cargo/bin/:$PATH
# Build the project in release mode.
RUN --mount=target=. \
@@ -35,7 +35,7 @@ RUN --mount=target=. \
# Export the binary to the clean image.
# TODO(zyy17): Maybe should use the more secure container image.
FROM ubuntu:22.04 AS base
FROM ubuntu:22.04 as base
ARG OUTPUT_DIR
@@ -45,7 +45,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
WORKDIR /greptime
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
ENV PATH=/greptime/bin/:$PATH
ENV PATH /greptime/bin/:$PATH
ENV MALLOC_CONF="prof:true,prof_active:false"

View File

@@ -13,7 +13,7 @@ ARG TARGETARCH
ADD $TARGETARCH/greptime /greptime/bin/
ENV PATH=/greptime/bin/:$PATH
ENV PATH /greptime/bin/:$PATH
ENV MALLOC_CONF="prof:true,prof_active:false"

View File

@@ -1,40 +0,0 @@
FROM ubuntu:22.04 AS libs
ARG TARGETARCH
# Copy required library dependencies based on architecture
# TARGETARCH values: amd64, arm64
# Ubuntu library paths: x86_64-linux-gnu, aarch64-linux-gnu
RUN if [ "$TARGETARCH" = "amd64" ]; then \
mkdir -p /output/x86_64-linux-gnu && \
cp /lib/x86_64-linux-gnu/libz.so.1.2.11 /output/x86_64-linux-gnu/libz.so.1; \
elif [ "$TARGETARCH" = "arm64" ]; then \
mkdir -p /output/aarch64-linux-gnu && \
cp /lib/aarch64-linux-gnu/libz.so.1.2.11 /output/aarch64-linux-gnu/libz.so.1; \
else \
echo "Unsupported architecture: $TARGETARCH" && exit 1; \
fi
FROM gcr.io/distroless/cc-debian12:latest
# The root path under which contains all the dependencies to build this Dockerfile.
ARG DOCKER_BUILD_ROOT=.
# The binary name of GreptimeDB executable.
# Defaults to "greptime", but sometimes in other projects it might be different.
ARG TARGET_BIN=greptime
ARG TARGETARCH
# Copy required library dependencies
COPY --from=libs /output /lib
COPY --from=busybox:stable /bin/busybox /bin/busybox
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
ENV PATH=/greptime/bin/:$PATH
ENV TARGET_BIN=$TARGET_BIN
ENV MALLOC_CONF="prof:true,prof_active:false"
ENTRYPOINT ["greptime"]

View File

@@ -14,7 +14,7 @@ ARG TARGETARCH
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
ENV PATH=/greptime/bin/:$PATH
ENV PATH /greptime/bin/:$PATH
ENV TARGET_BIN=$TARGET_BIN

View File

@@ -13,19 +13,4 @@ Log Level changed from Some("info") to "trace,flow=debug"%
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follows the same rule of `RUST_LOG`.
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).
# Enable/Disable Trace on the Fly
## HTTP API
example:
```bash
curl --data "true" 127.0.0.1:4000/debug/enable_trace
```
And database will reply with something like:
```
trace enabled%
```
Possible values are "true" or "false".
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).

View File

@@ -92,6 +92,9 @@ curl -X POST localhost:4000/debug/prof/mem > greptime.hprof
curl -X POST "localhost:4000/debug/prof/mem?output=flamegraph" > greptime.svg
# or output pprof format
curl -X POST "localhost:4000/debug/prof/mem?output=proto" > greptime.pprof
curl -X POST "localhost:4000/debug/prof/bytes" > greptime.svg
```
You can periodically dump profiling data and compare them to find the delta memory usage.

View File

@@ -106,37 +106,6 @@ This mechanism may be too complex to implement at once. We can consider a two-ph
Also the read replica shouldn't be later in manifest version for more than the lingering time of obsolete files, otherwise it might ref to files that are already deleted by the GC worker.
- need to upload tmp manifest to object storage, which may introduce additional complexity and potential performance overhead. But since long-running queries are typically not frequent, the performance impact is expected to be minimal.
one potential race condition with region-migration is illustrated below:
```mermaid
sequenceDiagram
participant gc_worker as GC Worker(same dn as region 1)
participant region1 as Region 1 (Leader → Follower)
participant region2 as Region 2 (Follower → Leader)
participant region_dir as Region Directory
gc_worker->>region1: Start GC, get region manifest
activate region1
region1-->>gc_worker: Region 1 manifest
deactivate region1
gc_worker->>region_dir: Scan region directory
Note over region1,region2: Region Migration Occurs
region1-->>region2: Downgrade to Follower
region2-->>region1: Becomes Leader
region2->>region_dir: Add new file
gc_worker->>region_dir: Continue scanning
gc_worker-->>region_dir: Discovers new file
Note over gc_worker: New file not in Region 1's manifest
gc_worker->>gc_worker: Mark file as orphan(incorrectly)
```
which could cause gc worker to incorrectly mark the new file as orphan and delete it, if config the lingering time for orphan files(files not mentioned anywhere(in used or unused)) is not long enough.
A good enough solution could be to use lock to prevent gc worker to happen on the region if region migration is happening on the region, and vise versa.
The race condition between gc worker and repartition also needs to be considered carefully. For now, acquiring lock for both region-migration and repartition during gc worker process could be a simple solution.
## Conclusion and Rationale

View File

@@ -1,94 +0,0 @@
---
Feature Name: Vector Index
Tracking Issue: TBD
Date: 2025-12-04
Author: "TBD"
---
# Summary
Introduce a per-SST approximate nearest neighbor (ANN) index for `VECTOR(dim)` columns with a pluggable engine. USearch HNSW is the initial engine, while the design keeps VSAG (default when linked) and future engines selectable at DDL or alter time and encoded in the index metadata. The index is built alongside SST creation and accelerates `ORDER BY vec_*_distance(column, <literal vector>) LIMIT k` queries, falling back to the existing brute-force path when an index is unavailable or ineligible.
# Motivation
Vector distances are currently computed with nalgebra across all rows (O(N)) before sorting, which does not scale to millions of vectors. An on-disk ANN index with sub-linear search reduces latency and compute cost for common RAG, semantic search, and recommendation workloads without changing SQL.
# Details
## Current Behavior
`VECTOR(dim)` values are stored as binary blobs. Queries call `vec_cos_distance`/`vec_l2sq_distance`/`vec_dot_product` via nalgebra for every row and then sort; there is no indexing or caching.
## Index Eligibility and Configuration
Only `VECTOR(dim)` columns can be indexed. A column metadata flag follows the existing column-option pattern with an intentionally small surface area:
- `engine`: `vsag` (default when the binding is built) or `usearch`. If a configured engine is unavailable at runtime, the builder logs and falls back to `usearch` while leaving the option intact for future rebuilds.
- `metric`: `cosine` (default), `l2sq`, or `dot`; mismatches with query functions force brute-force execution.
- `m`: HNSW graph connectivity (higher = denser graph, more memory, better recall), default `16`.
- `ef_construct`: build-time expansion, default `128`.
- `ef_search`: query-time expansion, default `64`; engines may clamp values.
Option semantics mirror HNSW defaults so both USearch and VSAG can honor them; engine-specific tunables stay in reserved key-value pairs inside the blob header for forward compatibility.
DDL reuses column extensions similar to inverted/fulltext indexes:
```sql
CREATE TABLE embeddings (
ts TIMESTAMP TIME INDEX,
id STRING PRIMARY KEY,
vec VECTOR(384) VECTOR INDEX WITH (engine = 'vsag', metric = 'cosine', ef_search = 64)
);
```
Altering column options toggles the flag, can switch engines (for example `usearch` -> `vsag`), and triggers rebuilds through the existing alter/compaction flow. Engine choice stays in table metadata and each blob header; new SSTs use the configured engine while older SSTs remain readable under their recorded engine until compaction or a manual rebuild rewrites them.
## Storage and Format
- One vector index per indexed column per SST, stored as a Puffin blob with type `greptime-vector-index-v1`.
- Each blob records the engine (`usearch`, `vsag`, future values) and engine parameters in the header so readers can select the matching decoder. Mixed-engine SSTs remain readable because the engine id travels with the blob.
- USearch uses `f32` vectors and SST row offsets (`u64`) as keys; nulls and `OpType::Delete` rows are skipped. Row ids are the absolute SST ordinal so readers can derive `RowSelection` directly from parquet row group lengths without extra side tables.
- Blob layout:
- Header: version, column id, dimension, engine id, metric, `m`, `ef_construct`, `ef_search`, and reserved engine-specific key-value pairs.
- Counts: total rows written and indexed rows.
- Payload: USearch binary produced by `save_to_buffer`.
- An empty index (no eligible vectors) results in no available index entry for that column.
- `puffin_manager` registers the blob type so caches and readers discover it alongside inverted/fulltext/bloom blobs in the same index file.
## Row Visibility and Duplicates
- The indexer increments `row_offset` for every incoming row (including skipped/null/delete rows) so offsets stay aligned with parquet ordering across row groups.
- Only `OpType::Put` rows with the expected dimension are inserted; `OpType::Delete` and malformed rows are skipped but still advance `row_offset`, matching the data planes visibility rules.
- Multiple versions of the same primary key remain in the graph; the read path intersects search hits with the standard mito2 deduplication/visibility pipeline (sequence-aware dedup, delete filtering, projection) before returning results.
- Searches overfetch beyond `k` to compensate for rows discarded by visibility checks and to avoid reissuing index reads.
## Build Path (mito2 write)
Extend `sst::index::Indexer` to optionally create a `VectorIndexer` when region metadata marks a column as vector-indexed, mirroring how inverted/fulltext/bloom filters attach to `IndexerBuilderImpl` in `mito2`.
The indexer consumes `Batch`/`RecordBatch` data and shares memory tracking and abort semantics with existing indexers:
- Maintain a running `row_offset` that follows SST write order and spans row groups so the search result can be turned into `RowSelection`.
- For each `OpType::Put`, if the vector is non-null and matches the declared dimension, insert into USearch with `row_offset` as the key; otherwise skip.
- Track memory with existing index build metrics; on failure, abort only the vector index while keeping SST writing unaffected.
Engine selection is table-driven: the builder picks the configured engine (default `vsag`, fallback `usearch` if `vsag` is not compiled in) and dispatches to the matching implementation. Unknown engines skip index build with a warning.
On `finish`, serialize the engine-tagged index into the Puffin writer and record `IndexType::Vector` metadata for the column. `IndexOutput` and `FileMeta::indexes/available_indexes` gain a vector entry so manifest updates and `RegionVersion` surface per-column presence, following patterns used by inverted/fulltext/bloom indexes. Planner/metadata validation ensures that mismatched dimensions only reduce the indexed-row count and do not break reads.
## Read Path (mito2 query)
A planner rule in `query` identifies eligible plans on mito2 tables: a single `ORDER BY vec_cos_distance|vec_l2sq_distance|vec_dot_product(<vector column>, <literal vector>)` in ascending order plus a `LIMIT`/`TopK`. The rule rejects plans with multiple sort keys, non-literal query vectors, or additional projections that would change the distance expression and falls back to brute-force in those cases.
For eligible scans, build a `VectorIndexScan` execution node that:
- Consults SST metadata for `IndexType::Vector`, loads the index via Puffin using the existing `mito2::cache::index` infrastructure, and dispatches to the engine declared in the blob header (USearch/VSAG/etc.).
- Runs the engines `search` with an overfetch (for example 2×k) to tolerate rows filtered by deletes, dimension mismatches, or late-stage dedup; keys already match SST row offsets produced by the writer.
- Converts hits to `RowSelection` using parquet row group lengths and reuses the parquet reader so visibility, projection, and deduplication logic stay unchanged; distances are recomputed with `vec_*_distance` before the final trim to k to guarantee ordering and to merge distributed partial results deterministically.
Any unsupported shape, load error, or cache miss falls back to the current brute-force execution path.
## Lifecycle and Maintenance
Lifecycle piggybacks on the existing SST/index flow: rebuilds run where other secondary indexes do, graphs are always rebuilt from source rows (no HNSW merge), and cleanup/versioning/caching reuse the existing Puffin and index cache paths.
# Implementation Plan
1. Add the `usearch` dependency (wrapper module in `index` or `mito2`) and map minimal HNSW options; keep an engine trait that allows plugging VSAG without changing the rest of the pipeline.
2. Introduce `IndexType::Vector` and a column metadata key for vector index options (including `engine`); add SQL parser and `SHOW CREATE TABLE` support for `VECTOR INDEX WITH (...)`.
3. Implement `vector_index` build/read modules under `mito2` (and `index` if shared), including Puffin serialization that records engine id, blob-type registration with `puffin_manager`, and integration with the `Indexer` builder, `IndexOutput`, manifest updates, and compaction rebuild.
4. Extend the query planner/execution to detect eligible plans and drive a `RowSelection`-based ANN scan with a fallback path, dispatching by engine at read time and using existing Puffin and index caches.
5. Add unit tests for serialization/search correctness and an end-to-end test covering plan rewrite, cache usage, engine selection, and fallback; add a mixed-engine test to confirm old USearch blobs still serve after a VSAG switch.
6. Follow up with an optional VSAG engine binding (feature flag), validate parity with USearch on dense vectors, exercise alternative algorithms (for example PQ), and flip the default `engine` to `vsag` when the binding is present.
# Alternatives
- **VSAG (follow-up engine):** C++ library with HNSW and additional algorithms (for example SINDI for sparse vectors and PQ) targeting in-memory and disk-friendly search. Provides parameter generators and a roadmap for GPU-assisted build and graph compression. Compared to FAISS it is newer with fewer integrations but bundles sparse/dense coverage and out-of-core focus in one engine. Fits the pluggable-engine design and would become the default `engine = 'vsag'` when linked; USearch remains available for lighter dependencies.
- **FAISS:** Broad feature set (IVF/IVFPQ/PQ/HNSW, GPU acceleration, scalar filtering, pre/post filters) and battle-tested performance across datasets, but it requires a heavier C++/GPU toolchain, has no official Rust binding, and is less disk-centric than VSAG; integrating it would add more build/distribution burden than USearch/VSAG.
- **Do nothing:** Keep brute-force evaluation, which remains O(N) and unacceptable at scale.

View File

@@ -8,7 +8,6 @@ license.workspace = true
workspace = true
[dependencies]
arrow-schema.workspace = true
common-base.workspace = true
common-decimal.workspace = true
common-error.workspace = true

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::{BTreeMap, HashSet};
use std::collections::HashSet;
use std::sync::Arc;
use common_decimal::Decimal128;
@@ -20,12 +20,13 @@ use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECIS
use common_time::time::Time;
use common_time::timestamp::TimeUnit;
use common_time::{Date, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp};
use datatypes::json::value::{JsonNumber, JsonValue, JsonValueRef, JsonVariant};
use datatypes::prelude::{ConcreteDataType, ValueRef};
use datatypes::types::{
IntervalType, JsonFormat, JsonType, StructField, StructType, TimeType, TimestampType,
IntervalType, JsonFormat, StructField, StructType, TimeType, TimestampType,
};
use datatypes::value::{
ListValue, ListValueRef, OrderedF32, OrderedF64, StructValue, StructValueRef, Value,
};
use datatypes::value::{ListValueRef, OrderedF32, OrderedF64, StructValueRef, Value};
use datatypes::vectors::VectorRef;
use greptime_proto::v1::column_data_type_extension::TypeExt;
use greptime_proto::v1::ddl_request::Expr;
@@ -33,9 +34,9 @@ use greptime_proto::v1::greptime_request::Request;
use greptime_proto::v1::query_request::Query;
use greptime_proto::v1::value::ValueData;
use greptime_proto::v1::{
self, ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, DictionaryTypeExtension,
JsonList, JsonNativeTypeExtension, JsonObject, JsonTypeExtension, ListTypeExtension,
QueryRequest, Row, SemanticType, StructTypeExtension, VectorTypeExtension, json_value,
self, ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, JsonNativeTypeExtension,
JsonTypeExtension, ListTypeExtension, QueryRequest, Row, SemanticType, StructTypeExtension,
VectorTypeExtension,
};
use paste::paste;
use snafu::prelude::*;
@@ -80,10 +81,6 @@ impl ColumnDataTypeWrapper {
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
(self.datatype, self.datatype_ext.clone())
}
pub fn into_parts(self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
(self.datatype, self.datatype_ext)
}
}
impl From<ColumnDataTypeWrapper> for ConcreteDataType {
@@ -129,7 +126,6 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
};
ConcreteDataType::json_native_datatype(inner_type.into())
}
None => ConcreteDataType::Json(JsonType::null()),
_ => {
// invalid state, type extension is missing or invalid
ConcreteDataType::null_datatype()
@@ -219,26 +215,6 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
ConcreteDataType::null_datatype()
}
}
ColumnDataType::Dictionary => {
if let Some(TypeExt::DictionaryType(d)) = datatype_wrapper
.datatype_ext
.as_ref()
.and_then(|datatype_ext| datatype_ext.type_ext.as_ref())
{
let key_type = ColumnDataTypeWrapper {
datatype: d.key_datatype(),
datatype_ext: d.key_datatype_extension.clone().map(|ext| *ext),
};
let value_type = ColumnDataTypeWrapper {
datatype: d.value_datatype(),
datatype_ext: d.value_datatype_extension.clone().map(|ext| *ext),
};
ConcreteDataType::dictionary_datatype(key_type.into(), value_type.into())
} else {
// invalid state: type extension not found
ConcreteDataType::null_datatype()
}
}
}
}
}
@@ -362,30 +338,13 @@ impl ColumnDataTypeWrapper {
}),
}
}
pub fn dictionary_datatype(
key_type: ColumnDataTypeWrapper,
value_type: ColumnDataTypeWrapper,
) -> Self {
ColumnDataTypeWrapper {
datatype: ColumnDataType::Dictionary,
datatype_ext: Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::DictionaryType(Box::new(DictionaryTypeExtension {
key_datatype: key_type.datatype().into(),
key_datatype_extension: key_type.datatype_ext.map(Box::new),
value_datatype: value_type.datatype().into(),
value_datatype_extension: value_type.datatype_ext.map(Box::new),
}))),
}),
}
}
}
impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
type Error = error::Error;
fn try_from(datatype: ConcreteDataType) -> Result<Self> {
let column_datatype = match &datatype {
let column_datatype = match datatype {
ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
ConcreteDataType::Int8(_) => ColumnDataType::Int8,
ConcreteDataType::Int16(_) => ColumnDataType::Int16,
@@ -422,8 +381,9 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
ConcreteDataType::Vector(_) => ColumnDataType::Vector,
ConcreteDataType::List(_) => ColumnDataType::List,
ConcreteDataType::Struct(_) => ColumnDataType::Struct,
ConcreteDataType::Dictionary(_) => ColumnDataType::Dictionary,
ConcreteDataType::Null(_) | ConcreteDataType::Duration(_) => {
ConcreteDataType::Null(_)
| ConcreteDataType::Dictionary(_)
| ConcreteDataType::Duration(_) => {
return error::IntoColumnDataTypeSnafu { from: datatype }.fail();
}
};
@@ -444,22 +404,16 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
JsonFormat::Jsonb => Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
}),
JsonFormat::Native(native_type) => {
if native_type.is_null() {
None
} else {
let native_type = ConcreteDataType::from(native_type.as_ref());
let (datatype, datatype_extension) =
ColumnDataTypeWrapper::try_from(native_type)?.into_parts();
Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::JsonNativeType(Box::new(
JsonNativeTypeExtension {
datatype: datatype as i32,
datatype_extension: datatype_extension.map(Box::new),
},
))),
})
}
JsonFormat::Native(inner) => {
let inner_type = ColumnDataTypeWrapper::try_from(*inner.clone())?;
Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::JsonNativeType(Box::new(
JsonNativeTypeExtension {
datatype: inner_type.datatype.into(),
datatype_extension: inner_type.datatype_ext.map(Box::new),
},
))),
})
}
}
} else {
@@ -509,25 +463,6 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
None
}
}
ColumnDataType::Dictionary => {
if let ConcreteDataType::Dictionary(dict_type) = &datatype {
let key_type = ColumnDataTypeWrapper::try_from(dict_type.key_type().clone())?;
let value_type =
ColumnDataTypeWrapper::try_from(dict_type.value_type().clone())?;
Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::DictionaryType(Box::new(
DictionaryTypeExtension {
key_datatype: key_type.datatype.into(),
key_datatype_extension: key_type.datatype_ext.map(Box::new),
value_datatype: value_type.datatype.into(),
value_datatype_extension: value_type.datatype_ext.map(Box::new),
},
))),
})
} else {
None
}
}
_ => None,
};
Ok(Self {
@@ -666,9 +601,6 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
struct_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Dictionary => Values {
..Default::default()
},
}
}
@@ -869,8 +801,21 @@ pub fn pb_value_to_value_ref<'a>(
}
ValueData::JsonValue(inner_value) => {
let value = decode_json_value(inner_value);
ValueRef::Json(Box::new(value))
let json_datatype_ext = datatype_ext
.as_ref()
.and_then(|ext| {
if let Some(TypeExt::JsonNativeType(l)) = &ext.type_ext {
Some(l)
} else {
None
}
})
.expect("json value must contain datatype ext");
ValueRef::Json(Box::new(pb_value_to_value_ref(
inner_value,
json_datatype_ext.datatype_extension.as_deref(),
)))
}
}
}
@@ -894,64 +839,125 @@ pub fn is_column_type_value_eq(
.unwrap_or(false)
}
fn encode_json_value(value: JsonValue) -> v1::JsonValue {
fn helper(json: JsonVariant) -> v1::JsonValue {
let value = match json {
JsonVariant::Null => None,
JsonVariant::Bool(x) => Some(json_value::Value::Boolean(x)),
JsonVariant::Number(x) => Some(match x {
JsonNumber::PosInt(i) => json_value::Value::Uint(i),
JsonNumber::NegInt(i) => json_value::Value::Int(i),
JsonNumber::Float(f) => json_value::Value::Float(f.0),
}),
JsonVariant::String(x) => Some(json_value::Value::Str(x)),
JsonVariant::Array(x) => Some(json_value::Value::Array(JsonList {
items: x.into_iter().map(helper).collect::<Vec<_>>(),
/// Convert value into proto's value.
pub fn to_proto_value(value: Value) -> v1::Value {
match value {
Value::Null => v1::Value { value_data: None },
Value::Boolean(v) => v1::Value {
value_data: Some(ValueData::BoolValue(v)),
},
Value::UInt8(v) => v1::Value {
value_data: Some(ValueData::U8Value(v.into())),
},
Value::UInt16(v) => v1::Value {
value_data: Some(ValueData::U16Value(v.into())),
},
Value::UInt32(v) => v1::Value {
value_data: Some(ValueData::U32Value(v)),
},
Value::UInt64(v) => v1::Value {
value_data: Some(ValueData::U64Value(v)),
},
Value::Int8(v) => v1::Value {
value_data: Some(ValueData::I8Value(v.into())),
},
Value::Int16(v) => v1::Value {
value_data: Some(ValueData::I16Value(v.into())),
},
Value::Int32(v) => v1::Value {
value_data: Some(ValueData::I32Value(v)),
},
Value::Int64(v) => v1::Value {
value_data: Some(ValueData::I64Value(v)),
},
Value::Float32(v) => v1::Value {
value_data: Some(ValueData::F32Value(*v)),
},
Value::Float64(v) => v1::Value {
value_data: Some(ValueData::F64Value(*v)),
},
Value::String(v) => v1::Value {
value_data: Some(ValueData::StringValue(v.as_utf8().to_string())),
},
Value::Binary(v) => v1::Value {
value_data: Some(ValueData::BinaryValue(v.to_vec())),
},
Value::Date(v) => v1::Value {
value_data: Some(ValueData::DateValue(v.val())),
},
Value::Timestamp(v) => match v.unit() {
TimeUnit::Second => v1::Value {
value_data: Some(ValueData::TimestampSecondValue(v.value())),
},
TimeUnit::Millisecond => v1::Value {
value_data: Some(ValueData::TimestampMillisecondValue(v.value())),
},
TimeUnit::Microsecond => v1::Value {
value_data: Some(ValueData::TimestampMicrosecondValue(v.value())),
},
TimeUnit::Nanosecond => v1::Value {
value_data: Some(ValueData::TimestampNanosecondValue(v.value())),
},
},
Value::Time(v) => match v.unit() {
TimeUnit::Second => v1::Value {
value_data: Some(ValueData::TimeSecondValue(v.value())),
},
TimeUnit::Millisecond => v1::Value {
value_data: Some(ValueData::TimeMillisecondValue(v.value())),
},
TimeUnit::Microsecond => v1::Value {
value_data: Some(ValueData::TimeMicrosecondValue(v.value())),
},
TimeUnit::Nanosecond => v1::Value {
value_data: Some(ValueData::TimeNanosecondValue(v.value())),
},
},
Value::IntervalYearMonth(v) => v1::Value {
value_data: Some(ValueData::IntervalYearMonthValue(v.to_i32())),
},
Value::IntervalDayTime(v) => v1::Value {
value_data: Some(ValueData::IntervalDayTimeValue(v.to_i64())),
},
Value::IntervalMonthDayNano(v) => v1::Value {
value_data: Some(ValueData::IntervalMonthDayNanoValue(
convert_month_day_nano_to_pb(v),
)),
},
Value::Decimal128(v) => v1::Value {
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
},
Value::List(list_value) => v1::Value {
value_data: Some(ValueData::ListValue(v1::ListValue {
items: convert_list_to_pb_values(list_value),
})),
JsonVariant::Object(x) => {
let entries = x
.into_iter()
.map(|(key, v)| v1::json_object::Entry {
key,
value: Some(helper(v)),
})
.collect::<Vec<_>>();
Some(json_value::Value::Object(JsonObject { entries }))
}
};
v1::JsonValue { value }
},
Value::Struct(struct_value) => v1::Value {
value_data: Some(ValueData::StructValue(v1::StructValue {
items: convert_struct_to_pb_values(struct_value),
})),
},
Value::Json(v) => v1::Value {
value_data: Some(ValueData::JsonValue(Box::new(to_proto_value(*v)))),
},
Value::Duration(_) => v1::Value { value_data: None },
}
helper(value.into_variant())
}
fn decode_json_value(value: &v1::JsonValue) -> JsonValueRef<'_> {
let Some(value) = &value.value else {
return JsonValueRef::null();
};
match value {
json_value::Value::Boolean(x) => (*x).into(),
json_value::Value::Int(x) => (*x).into(),
json_value::Value::Uint(x) => (*x).into(),
json_value::Value::Float(x) => (*x).into(),
json_value::Value::Str(x) => (x.as_str()).into(),
json_value::Value::Array(array) => array
.items
.iter()
.map(|x| decode_json_value(x).into_variant())
.collect::<Vec<_>>()
.into(),
json_value::Value::Object(x) => x
.entries
.iter()
.filter_map(|entry| {
entry
.value
.as_ref()
.map(|v| (entry.key.as_str(), decode_json_value(v).into_variant()))
})
.collect::<BTreeMap<_, _>>()
.into(),
}
fn convert_list_to_pb_values(list_value: ListValue) -> Vec<v1::Value> {
list_value
.take_items()
.into_iter()
.map(to_proto_value)
.collect()
}
fn convert_struct_to_pb_values(struct_value: StructValue) -> Vec<v1::Value> {
struct_value
.take_items()
.into_iter()
.map(to_proto_value)
.collect()
}
/// Returns the [ColumnDataTypeWrapper] of the value.
@@ -1000,14 +1006,14 @@ pub fn vectors_to_rows<'a>(
let mut rows = vec![Row { values: vec![] }; row_count];
for column in columns {
for (row_index, row) in rows.iter_mut().enumerate() {
row.values.push(to_grpc_value(column.get(row_index)))
row.values.push(value_to_grpc_value(column.get(row_index)))
}
}
rows
}
pub fn to_grpc_value(value: Value) -> GrpcValue {
pub fn value_to_grpc_value(value: Value) -> GrpcValue {
GrpcValue {
value_data: match value {
Value::Null => None,
@@ -1047,7 +1053,7 @@ pub fn to_grpc_value(value: Value) -> GrpcValue {
let items = list_value
.take_items()
.into_iter()
.map(to_grpc_value)
.map(value_to_grpc_value)
.collect();
Some(ValueData::ListValue(v1::ListValue { items }))
}
@@ -1055,11 +1061,13 @@ pub fn to_grpc_value(value: Value) -> GrpcValue {
let items = struct_value
.take_items()
.into_iter()
.map(to_grpc_value)
.map(value_to_grpc_value)
.collect();
Some(ValueData::StructValue(v1::StructValue { items }))
}
Value::Json(v) => Some(ValueData::JsonValue(encode_json_value(*v))),
Value::Json(inner_value) => Some(ValueData::JsonValue(Box::new(value_to_grpc_value(
*inner_value,
)))),
Value::Duration(_) => unreachable!(),
},
}
@@ -1155,7 +1163,6 @@ mod tests {
use common_time::interval::IntervalUnit;
use datatypes::scalars::ScalarVector;
use datatypes::types::{Int8Type, Int32Type, UInt8Type, UInt32Type};
use datatypes::value::{ListValue, StructValue};
use datatypes::vectors::{
BooleanVector, DateVector, Float32Vector, PrimitiveVector, StringVector,
};
@@ -1252,9 +1259,6 @@ mod tests {
let values = values_with_capacity(ColumnDataType::Json, 2);
assert_eq!(2, values.json_values.capacity());
assert_eq!(2, values.string_values.capacity());
let values = values_with_capacity(ColumnDataType::Dictionary, 2);
assert!(values.bool_values.is_empty());
}
#[test]
@@ -1351,17 +1355,6 @@ mod tests {
ConcreteDataType::list_datatype(Arc::new(ConcreteDataType::string_datatype())),
ColumnDataTypeWrapper::list_datatype(ColumnDataTypeWrapper::string_datatype()).into()
);
assert_eq!(
ConcreteDataType::dictionary_datatype(
ConcreteDataType::int32_datatype(),
ConcreteDataType::string_datatype()
),
ColumnDataTypeWrapper::dictionary_datatype(
ColumnDataTypeWrapper::int32_datatype(),
ColumnDataTypeWrapper::string_datatype()
)
.into()
);
let struct_type = StructType::new(Arc::new(vec![
StructField::new("id".to_string(), ConcreteDataType::int64_datatype(), true),
StructField::new(
@@ -1532,18 +1525,6 @@ mod tests {
ColumnDataTypeWrapper::vector_datatype(3),
ConcreteDataType::vector_datatype(3).try_into().unwrap()
);
assert_eq!(
ColumnDataTypeWrapper::dictionary_datatype(
ColumnDataTypeWrapper::int32_datatype(),
ColumnDataTypeWrapper::string_datatype()
),
ConcreteDataType::dictionary_datatype(
ConcreteDataType::int32_datatype(),
ConcreteDataType::string_datatype()
)
.try_into()
.unwrap()
);
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
assert!(result.is_err());
@@ -1599,20 +1580,6 @@ mod tests {
datatype_extension: Some(Box::new(ColumnDataTypeExtension {
type_ext: Some(TypeExt::StructType(StructTypeExtension {
fields: vec![
v1::StructField {
name: "address".to_string(),
datatype: ColumnDataTypeWrapper::string_datatype()
.datatype()
.into(),
datatype_extension: None
},
v1::StructField {
name: "age".to_string(),
datatype: ColumnDataTypeWrapper::int64_datatype()
.datatype()
.into(),
datatype_extension: None
},
v1::StructField {
name: "id".to_string(),
datatype: ColumnDataTypeWrapper::int64_datatype()
@@ -1627,6 +1594,20 @@ mod tests {
.into(),
datatype_extension: None
},
v1::StructField {
name: "age".to_string(),
datatype: ColumnDataTypeWrapper::int32_datatype()
.datatype()
.into(),
datatype_extension: None
},
v1::StructField {
name: "address".to_string(),
datatype: ColumnDataTypeWrapper::string_datatype()
.datatype()
.into(),
datatype_extension: None
}
]
}))
}))
@@ -1759,7 +1740,7 @@ mod tests {
Arc::new(ConcreteDataType::boolean_datatype()),
));
let pb_value = to_grpc_value(value);
let pb_value = to_proto_value(value);
match pb_value.value_data.unwrap() {
ValueData::ListValue(pb_list_value) => {
@@ -1788,7 +1769,7 @@ mod tests {
.unwrap(),
);
let pb_value = to_grpc_value(value);
let pb_value = to_proto_value(value);
match pb_value.value_data.unwrap() {
ValueData::StructValue(pb_struct_value) => {
@@ -1797,199 +1778,4 @@ mod tests {
_ => panic!("Unexpected value type"),
}
}
#[test]
fn test_encode_decode_json_value() {
let json = JsonValue::null();
let proto = encode_json_value(json.clone());
assert!(proto.value.is_none());
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = true.into();
let proto = encode_json_value(json.clone());
assert_eq!(proto.value, Some(json_value::Value::Boolean(true)));
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = (-1i64).into();
let proto = encode_json_value(json.clone());
assert_eq!(proto.value, Some(json_value::Value::Int(-1)));
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = 1u64.into();
let proto = encode_json_value(json.clone());
assert_eq!(proto.value, Some(json_value::Value::Uint(1)));
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = 1.0f64.into();
let proto = encode_json_value(json.clone());
assert_eq!(proto.value, Some(json_value::Value::Float(1.0)));
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = "s".into();
let proto = encode_json_value(json.clone());
assert_eq!(proto.value, Some(json_value::Value::Str("s".to_string())));
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = [1i64, 2, 3].into();
let proto = encode_json_value(json.clone());
assert_eq!(
proto.value,
Some(json_value::Value::Array(JsonList {
items: vec![
v1::JsonValue {
value: Some(json_value::Value::Int(1))
},
v1::JsonValue {
value: Some(json_value::Value::Int(2))
},
v1::JsonValue {
value: Some(json_value::Value::Int(3))
}
]
}))
);
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = [(); 0].into();
let proto = encode_json_value(json.clone());
assert_eq!(
proto.value,
Some(json_value::Value::Array(JsonList { items: vec![] }))
);
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = [("k3", 3i64), ("k2", 2i64), ("k1", 1i64)].into();
let proto = encode_json_value(json.clone());
assert_eq!(
proto.value,
Some(json_value::Value::Object(JsonObject {
entries: vec![
v1::json_object::Entry {
key: "k1".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Int(1))
}),
},
v1::json_object::Entry {
key: "k2".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Int(2))
}),
},
v1::json_object::Entry {
key: "k3".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Int(3))
}),
},
]
}))
);
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = [("null", ()); 0].into();
let proto = encode_json_value(json.clone());
assert_eq!(
proto.value,
Some(json_value::Value::Object(JsonObject { entries: vec![] }))
);
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = [
("null", JsonVariant::from(())),
("bool", false.into()),
("list", ["hello", "world"].into()),
(
"object",
[
("positive_i", JsonVariant::from(42u64)),
("negative_i", (-42i64).into()),
("nested", [("what", "blah")].into()),
]
.into(),
),
]
.into();
let proto = encode_json_value(json.clone());
assert_eq!(
proto.value,
Some(json_value::Value::Object(JsonObject {
entries: vec![
v1::json_object::Entry {
key: "bool".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Boolean(false))
}),
},
v1::json_object::Entry {
key: "list".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Array(JsonList {
items: vec![
v1::JsonValue {
value: Some(json_value::Value::Str("hello".to_string()))
},
v1::JsonValue {
value: Some(json_value::Value::Str("world".to_string()))
},
]
}))
}),
},
v1::json_object::Entry {
key: "null".to_string(),
value: Some(v1::JsonValue { value: None }),
},
v1::json_object::Entry {
key: "object".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Object(JsonObject {
entries: vec![
v1::json_object::Entry {
key: "negative_i".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Int(-42))
}),
},
v1::json_object::Entry {
key: "nested".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Object(JsonObject {
entries: vec![v1::json_object::Entry {
key: "what".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Str(
"blah".to_string()
))
}),
},]
}))
}),
},
v1::json_object::Entry {
key: "positive_i".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Uint(42))
}),
},
]
}))
}),
},
]
}))
);
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
}
}

View File

@@ -14,11 +14,10 @@
use std::collections::HashMap;
use arrow_schema::extension::{EXTENSION_TYPE_METADATA_KEY, EXTENSION_TYPE_NAME_KEY};
use datatypes::schema::{
COMMENT_KEY, ColumnDefaultConstraint, ColumnSchema, FULLTEXT_KEY, FulltextAnalyzer,
FulltextBackend, FulltextOptions, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY, SkippingIndexOptions,
SkippingIndexType,
FulltextBackend, FulltextOptions, INVERTED_INDEX_KEY, JSON_STRUCTURE_SETTINGS_KEY,
SKIPPING_INDEX_KEY, SkippingIndexOptions, SkippingIndexType,
};
use greptime_proto::v1::{
Analyzer, FulltextBackend as PbFulltextBackend, SkippingIndexType as PbSkippingIndexType,
@@ -69,14 +68,8 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.to_owned());
}
if let Some(extension_name) = options.options.get(EXTENSION_TYPE_NAME_KEY) {
metadata.insert(EXTENSION_TYPE_NAME_KEY.to_string(), extension_name.clone());
}
if let Some(extension_metadata) = options.options.get(EXTENSION_TYPE_METADATA_KEY) {
metadata.insert(
EXTENSION_TYPE_METADATA_KEY.to_string(),
extension_metadata.clone(),
);
if let Some(settings) = options.options.get(JSON_STRUCTURE_SETTINGS_KEY) {
metadata.insert(JSON_STRUCTURE_SETTINGS_KEY.to_string(), settings.clone());
}
}
@@ -149,16 +142,10 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
.options
.insert(SKIPPING_INDEX_GRPC_KEY.to_string(), skipping_index.clone());
}
if let Some(extension_name) = column_schema.metadata().get(EXTENSION_TYPE_NAME_KEY) {
if let Some(settings) = column_schema.metadata().get(JSON_STRUCTURE_SETTINGS_KEY) {
options
.options
.insert(EXTENSION_TYPE_NAME_KEY.to_string(), extension_name.clone());
}
if let Some(extension_metadata) = column_schema.metadata().get(EXTENSION_TYPE_METADATA_KEY) {
options.options.insert(
EXTENSION_TYPE_METADATA_KEY.to_string(),
extension_metadata.clone(),
);
.insert(JSON_STRUCTURE_SETTINGS_KEY.to_string(), settings.clone());
}
(!options.options.is_empty()).then_some(options)

View File

@@ -5,6 +5,7 @@ edition.workspace = true
license.workspace = true
[features]
enterprise = []
testing = []
[lints]

View File

@@ -12,14 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
mod builder;
mod client;
mod manager;
mod table_cache;
pub use builder::{
CatalogManagerConfigurator, CatalogManagerConfiguratorRef, KvBackendCatalogManagerBuilder,
};
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
pub use builder::KvBackendCatalogManagerBuilder;
pub use manager::KvBackendCatalogManager;
pub use table_cache::{TableCache, TableCacheRef, new_table_cache};

View File

@@ -12,11 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::sync::Arc;
use common_catalog::consts::DEFAULT_CATALOG_NAME;
use common_error::ext::BoxedError;
use common_meta::cache::LayeredCacheRegistryRef;
use common_meta::key::TableMetadataManager;
use common_meta::key::flow::FlowMetadataManager;
@@ -25,34 +23,24 @@ use common_procedure::ProcedureManagerRef;
use moka::sync::Cache;
use partition::manager::PartitionRuleManager;
use crate::information_schema::{
InformationExtensionRef, InformationSchemaProvider, InformationSchemaTableFactoryRef,
};
#[cfg(feature = "enterprise")]
use crate::information_schema::InformationSchemaTableFactoryRef;
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
use crate::kvbackend::KvBackendCatalogManager;
use crate::kvbackend::manager::{CATALOG_CACHE_MAX_CAPACITY, SystemCatalog};
use crate::process_manager::ProcessManagerRef;
use crate::system_schema::numbers_table_provider::NumbersTableProvider;
use crate::system_schema::pg_catalog::PGCatalogProvider;
/// The configurator that customizes or enhances the [`KvBackendCatalogManagerBuilder`].
#[async_trait::async_trait]
pub trait CatalogManagerConfigurator<C>: Send + Sync {
async fn configure(
&self,
builder: KvBackendCatalogManagerBuilder,
ctx: C,
) -> std::result::Result<KvBackendCatalogManagerBuilder, BoxedError>;
}
pub type CatalogManagerConfiguratorRef<C> = Arc<dyn CatalogManagerConfigurator<C>>;
pub struct KvBackendCatalogManagerBuilder {
information_extension: InformationExtensionRef,
backend: KvBackendRef,
cache_registry: LayeredCacheRegistryRef,
procedure_manager: Option<ProcedureManagerRef>,
process_manager: Option<ProcessManagerRef>,
extra_information_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
#[cfg(feature = "enterprise")]
extra_information_table_factories:
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
}
impl KvBackendCatalogManagerBuilder {
@@ -67,7 +55,8 @@ impl KvBackendCatalogManagerBuilder {
cache_registry,
procedure_manager: None,
process_manager: None,
extra_information_table_factories: HashMap::new(),
#[cfg(feature = "enterprise")]
extra_information_table_factories: std::collections::HashMap::new(),
}
}
@@ -82,9 +71,10 @@ impl KvBackendCatalogManagerBuilder {
}
/// Sets the extra information tables.
#[cfg(feature = "enterprise")]
pub fn with_extra_information_table_factories(
mut self,
factories: HashMap<String, InformationSchemaTableFactoryRef>,
factories: std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
) -> Self {
self.extra_information_table_factories = factories;
self
@@ -97,6 +87,7 @@ impl KvBackendCatalogManagerBuilder {
cache_registry,
procedure_manager,
process_manager,
#[cfg(feature = "enterprise")]
extra_information_table_factories,
} = self;
Arc::new_cyclic(|me| KvBackendCatalogManager {
@@ -120,6 +111,7 @@ impl KvBackendCatalogManagerBuilder {
process_manager.clone(),
backend.clone(),
);
#[cfg(feature = "enterprise")]
let provider = provider
.with_extra_table_factories(extra_information_table_factories.clone());
Arc::new(provider)
@@ -131,6 +123,7 @@ impl KvBackendCatalogManagerBuilder {
numbers_table_provider: NumbersTableProvider,
backend,
process_manager,
#[cfg(feature = "enterprise")]
extra_information_table_factories,
},
cache_registry,

View File

@@ -53,9 +53,9 @@ use crate::error::{
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
};
use crate::information_schema::{
InformationExtensionRef, InformationSchemaProvider, InformationSchemaTableFactoryRef,
};
#[cfg(feature = "enterprise")]
use crate::information_schema::InformationSchemaTableFactoryRef;
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
use crate::kvbackend::TableCacheRef;
use crate::process_manager::ProcessManagerRef;
use crate::system_schema::SystemSchemaProvider;
@@ -557,6 +557,7 @@ pub(super) struct SystemCatalog {
pub(super) numbers_table_provider: NumbersTableProvider,
pub(super) backend: KvBackendRef,
pub(super) process_manager: Option<ProcessManagerRef>,
#[cfg(feature = "enterprise")]
pub(super) extra_information_table_factories:
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
}
@@ -627,6 +628,7 @@ impl SystemCatalog {
self.process_manager.clone(),
self.backend.clone(),
);
#[cfg(feature = "enterprise")]
let provider = provider
.with_extra_table_factories(self.extra_information_table_factories.clone());
Arc::new(provider)

View File

@@ -22,6 +22,7 @@ mod procedure_info;
pub mod process_list;
pub mod region_peers;
mod region_statistics;
mod runtime_metrics;
pub mod schemata;
mod ssts;
mod table_constraints;
@@ -64,6 +65,7 @@ use crate::system_schema::information_schema::information_memory_table::get_sche
use crate::system_schema::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
use crate::system_schema::information_schema::partitions::InformationSchemaPartitions;
use crate::system_schema::information_schema::region_peers::InformationSchemaRegionPeers;
use crate::system_schema::information_schema::runtime_metrics::InformationSchemaMetrics;
use crate::system_schema::information_schema::schemata::InformationSchemaSchemata;
use crate::system_schema::information_schema::ssts::{
InformationSchemaSstsIndexMeta, InformationSchemaSstsManifest, InformationSchemaSstsStorage,
@@ -95,6 +97,7 @@ lazy_static! {
ROUTINES,
SCHEMA_PRIVILEGES,
TABLE_PRIVILEGES,
TRIGGERS,
GLOBAL_STATUS,
SESSION_STATUS,
PARTITIONS,
@@ -117,6 +120,7 @@ macro_rules! setup_memory_table {
};
}
#[cfg(feature = "enterprise")]
pub struct MakeInformationTableRequest {
pub catalog_name: String,
pub catalog_manager: Weak<dyn CatalogManager>,
@@ -127,10 +131,12 @@ pub struct MakeInformationTableRequest {
///
/// This trait allows for extensibility of the information schema by providing
/// a way to dynamically create custom information schema tables.
#[cfg(feature = "enterprise")]
pub trait InformationSchemaTableFactory {
fn make_information_table(&self, req: MakeInformationTableRequest) -> SystemTableRef;
}
#[cfg(feature = "enterprise")]
pub type InformationSchemaTableFactoryRef = Arc<dyn InformationSchemaTableFactory + Send + Sync>;
/// The `information_schema` tables info provider.
@@ -140,7 +146,9 @@ pub struct InformationSchemaProvider {
process_manager: Option<ProcessManagerRef>,
flow_metadata_manager: Arc<FlowMetadataManager>,
tables: HashMap<String, TableRef>,
#[allow(dead_code)]
kv_backend: KvBackendRef,
#[cfg(feature = "enterprise")]
extra_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
}
@@ -161,6 +169,7 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
}
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
#[cfg(feature = "enterprise")]
if let Some(factory) = self.extra_table_factories.get(name) {
let req = MakeInformationTableRequest {
catalog_name: self.catalog_name.clone(),
@@ -198,6 +207,7 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
ROUTINES => setup_memory_table!(ROUTINES),
SCHEMA_PRIVILEGES => setup_memory_table!(SCHEMA_PRIVILEGES),
TABLE_PRIVILEGES => setup_memory_table!(TABLE_PRIVILEGES),
TRIGGERS => setup_memory_table!(TRIGGERS),
GLOBAL_STATUS => setup_memory_table!(GLOBAL_STATUS),
SESSION_STATUS => setup_memory_table!(SESSION_STATUS),
KEY_COLUMN_USAGE => Some(Arc::new(InformationSchemaKeyColumnUsage::new(
@@ -208,6 +218,7 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
self.catalog_name.clone(),
self.catalog_manager.clone(),
)) as _),
RUNTIME_METRICS => Some(Arc::new(InformationSchemaMetrics::new())),
PARTITIONS => Some(Arc::new(InformationSchemaPartitions::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
@@ -275,6 +286,7 @@ impl InformationSchemaProvider {
process_manager,
tables: HashMap::new(),
kv_backend,
#[cfg(feature = "enterprise")]
extra_table_factories: HashMap::new(),
};
@@ -283,6 +295,7 @@ impl InformationSchemaProvider {
provider
}
#[cfg(feature = "enterprise")]
pub(crate) fn with_extra_table_factories(
mut self,
factories: HashMap<String, InformationSchemaTableFactoryRef>,
@@ -300,6 +313,10 @@ impl InformationSchemaProvider {
// authentication details, and other critical information.
// Only put these tables under `greptime` catalog to prevent info leak.
if self.catalog_name == DEFAULT_CATALOG_NAME {
tables.insert(
RUNTIME_METRICS.to_string(),
self.build_table(RUNTIME_METRICS).unwrap(),
);
tables.insert(
BUILD_INFO.to_string(),
self.build_table(BUILD_INFO).unwrap(),
@@ -350,6 +367,7 @@ impl InformationSchemaProvider {
if let Some(process_list) = self.build_table(PROCESS_LIST) {
tables.insert(PROCESS_LIST.to_string(), process_list);
}
#[cfg(feature = "enterprise")]
for name in self.extra_table_factories.keys() {
tables.insert(name.clone(), self.build_table(name).expect(name));
}

View File

@@ -15,7 +15,8 @@
use std::sync::Arc;
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
use datatypes::schema::{Schema, SchemaRef};
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::{Int64Vector, StringVector, VectorRef};
use crate::system_schema::information_schema::table_names::*;
@@ -365,6 +366,16 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
vec![],
),
TRIGGERS => (
vec![
string_column("TRIGGER_NAME"),
ColumnSchema::new("trigger_id", ConcreteDataType::uint64_datatype(), false),
string_column("TRIGGER_DEFINITION"),
ColumnSchema::new("flownode_id", ConcreteDataType::uint64_datatype(), true),
],
vec![],
),
// TODO: Considering store internal metrics in `global_status` and
// `session_status` tables.
GLOBAL_STATUS => (

View File

@@ -211,7 +211,6 @@ struct InformationSchemaPartitionsBuilder {
partition_names: StringVectorBuilder,
partition_ordinal_positions: Int64VectorBuilder,
partition_expressions: StringVectorBuilder,
partition_descriptions: StringVectorBuilder,
create_times: TimestampSecondVectorBuilder,
partition_ids: UInt64VectorBuilder,
}
@@ -232,7 +231,6 @@ impl InformationSchemaPartitionsBuilder {
partition_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
partition_ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
partition_expressions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
partition_descriptions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
create_times: TimestampSecondVectorBuilder::with_capacity(INIT_CAPACITY),
partition_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
}
@@ -321,21 +319,6 @@ impl InformationSchemaPartitionsBuilder {
return;
}
// Get partition column names (shared by all partitions)
// In MySQL, PARTITION_EXPRESSION is the partitioning function expression (e.g., column name)
let partition_columns: String = table_info
.meta
.partition_column_names()
.cloned()
.collect::<Vec<_>>()
.join(", ");
let partition_expr_str = if partition_columns.is_empty() {
None
} else {
Some(partition_columns)
};
for (index, partition) in partitions.iter().enumerate() {
let partition_name = format!("p{index}");
@@ -345,12 +328,8 @@ impl InformationSchemaPartitionsBuilder {
self.partition_names.push(Some(&partition_name));
self.partition_ordinal_positions
.push(Some((index + 1) as i64));
// PARTITION_EXPRESSION: partition column names (same for all partitions)
self.partition_expressions
.push(partition_expr_str.as_deref());
// PARTITION_DESCRIPTION: partition boundary expression (different for each partition)
let description = partition.partition_expr.as_ref().map(|e| e.to_string());
self.partition_descriptions.push(description.as_deref());
let expression = partition.partition_expr.as_ref().map(|e| e.to_string());
self.partition_expressions.push(expression.as_deref());
self.create_times.push(Some(TimestampSecond::from(
table_info.meta.created_on.timestamp(),
)));
@@ -390,7 +369,7 @@ impl InformationSchemaPartitionsBuilder {
null_string_vector.clone(),
Arc::new(self.partition_expressions.finish()),
null_string_vector.clone(),
Arc::new(self.partition_descriptions.finish()),
null_string_vector.clone(),
// TODO(dennis): rows and index statistics info
null_i64_vector.clone(),
null_i64_vector.clone(),

View File

@@ -0,0 +1,265 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID;
use common_error::ext::BoxedError;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use common_time::util::current_time_millis;
use datafusion::execution::TaskContext;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datatypes::prelude::{ConcreteDataType, MutableVector};
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::{
ConstantVector, Float64VectorBuilder, StringVectorBuilder, TimestampMillisecondVector,
VectorRef,
};
use itertools::Itertools;
use snafu::ResultExt;
use store_api::storage::{ScanRequest, TableId};
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
use crate::system_schema::information_schema::{InformationTable, RUNTIME_METRICS};
#[derive(Debug)]
pub(super) struct InformationSchemaMetrics {
schema: SchemaRef,
}
const METRIC_NAME: &str = "metric_name";
const METRIC_VALUE: &str = "value";
const METRIC_LABELS: &str = "labels";
const PEER_ADDR: &str = "peer_addr";
const PEER_TYPE: &str = "peer_type";
const TIMESTAMP: &str = "timestamp";
/// The `information_schema.runtime_metrics` virtual table.
/// It provides the GreptimeDB runtime metrics for the users by SQL.
impl InformationSchemaMetrics {
pub(super) fn new() -> Self {
Self {
schema: Self::schema(),
}
}
fn schema() -> SchemaRef {
Arc::new(Schema::new(vec![
ColumnSchema::new(METRIC_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(METRIC_VALUE, ConcreteDataType::float64_datatype(), false),
ColumnSchema::new(METRIC_LABELS, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(
TIMESTAMP,
ConcreteDataType::timestamp_millisecond_datatype(),
false,
),
]))
}
fn builder(&self) -> InformationSchemaMetricsBuilder {
InformationSchemaMetricsBuilder::new(self.schema.clone())
}
}
impl InformationTable for InformationSchemaMetrics {
fn table_id(&self) -> TableId {
INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID
}
fn table_name(&self) -> &'static str {
RUNTIME_METRICS
}
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_metrics(Some(request))
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
));
Ok(Box::pin(
RecordBatchStreamAdapter::try_new(stream)
.map_err(BoxedError::new)
.context(InternalSnafu)?,
))
}
}
struct InformationSchemaMetricsBuilder {
schema: SchemaRef,
metric_names: StringVectorBuilder,
metric_values: Float64VectorBuilder,
metric_labels: StringVectorBuilder,
peer_addrs: StringVectorBuilder,
peer_types: StringVectorBuilder,
}
impl InformationSchemaMetricsBuilder {
fn new(schema: SchemaRef) -> Self {
Self {
schema,
metric_names: StringVectorBuilder::with_capacity(42),
metric_values: Float64VectorBuilder::with_capacity(42),
metric_labels: StringVectorBuilder::with_capacity(42),
peer_addrs: StringVectorBuilder::with_capacity(42),
peer_types: StringVectorBuilder::with_capacity(42),
}
}
fn add_metric(
&mut self,
metric_name: &str,
labels: String,
metric_value: f64,
peer: Option<&str>,
peer_type: &str,
) {
self.metric_names.push(Some(metric_name));
self.metric_values.push(Some(metric_value));
self.metric_labels.push(Some(&labels));
self.peer_addrs.push(peer);
self.peer_types.push(Some(peer_type));
}
async fn make_metrics(&mut self, _request: Option<ScanRequest>) -> Result<RecordBatch> {
let metric_families = prometheus::gather();
let write_request =
common_telemetry::metric::convert_metric_to_write_request(metric_families, None, 0);
for ts in write_request.timeseries {
//Safety: always has `__name__` label
let metric_name = ts
.labels
.iter()
.find_map(|label| {
if label.name == "__name__" {
Some(label.value.clone())
} else {
None
}
})
.unwrap();
self.add_metric(
&metric_name,
ts.labels
.into_iter()
.filter_map(|label| {
if label.name == "__name__" {
None
} else {
Some(format!("{}={}", label.name, label.value))
}
})
.join(", "),
// Safety: always has a sample
ts.samples[0].value,
// The peer column is always `None` for standalone
None,
"STANDALONE",
);
}
// FIXME(dennis): fetching other peers metrics
self.finish()
}
fn finish(&mut self) -> Result<RecordBatch> {
let rows_num = self.metric_names.len();
let timestamps = Arc::new(ConstantVector::new(
Arc::new(TimestampMillisecondVector::from_slice([
current_time_millis(),
])),
rows_num,
));
let columns: Vec<VectorRef> = vec![
Arc::new(self.metric_names.finish()),
Arc::new(self.metric_values.finish()),
Arc::new(self.metric_labels.finish()),
Arc::new(self.peer_addrs.finish()),
Arc::new(self.peer_types.finish()),
timestamps,
];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
}
}
impl DfPartitionStream for InformationSchemaMetrics {
fn schema(&self) -> &ArrowSchemaRef {
self.schema.arrow_schema()
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_metrics(None)
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
))
}
}
#[cfg(test)]
mod tests {
use common_recordbatch::RecordBatches;
use super::*;
#[tokio::test]
async fn test_make_metrics() {
let metrics = InformationSchemaMetrics::new();
let stream = metrics.to_stream(ScanRequest::default()).unwrap();
let batches = RecordBatches::try_collect(stream).await.unwrap();
let result_literal = batches.pretty_print().unwrap();
assert!(result_literal.contains(METRIC_NAME));
assert!(result_literal.contains(METRIC_VALUE));
assert!(result_literal.contains(METRIC_LABELS));
assert!(result_literal.contains(PEER_ADDR));
assert!(result_literal.contains(PEER_TYPE));
assert!(result_literal.contains(TIMESTAMP));
}
}

View File

@@ -38,6 +38,7 @@ pub const TABLE_PRIVILEGES: &str = "table_privileges";
pub const TRIGGERS: &str = "triggers";
pub const GLOBAL_STATUS: &str = "global_status";
pub const SESSION_STATUS: &str = "session_status";
pub const RUNTIME_METRICS: &str = "runtime_metrics";
pub const PARTITIONS: &str = "partitions";
pub const REGION_PEERS: &str = "region_peers";
pub const TABLE_CONSTRAINTS: &str = "table_constraints";

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashSet;
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
@@ -254,17 +255,14 @@ impl InformationSchemaTablesBuilder {
// TODO(dennis): `region_stats` API is not stable in distributed cluster because of network issue etc.
// But we don't want the statements such as `show tables` fail,
// so using `unwrap_or_else` here instead of `?` operator.
let region_stats = {
let mut x = information_extension
.region_stats()
.await
.unwrap_or_else(|e| {
error!(e; "Failed to find region stats in information_schema, fallback to all empty");
vec![]
});
x.sort_unstable_by_key(|x| x.id);
x
};
let region_stats = information_extension
.region_stats()
.await
.map_err(|e| {
error!(e; "Failed to call region_stats");
e
})
.unwrap_or_else(|_| vec![]);
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
@@ -275,16 +273,16 @@ impl InformationSchemaTablesBuilder {
// TODO(dennis): make it working for metric engine
let table_region_stats =
if table_info.meta.engine == MITO_ENGINE || table_info.is_physical_table() {
table_info
let region_ids = table_info
.meta
.region_numbers
.iter()
.map(|n| RegionId::new(table_info.ident.table_id, *n))
.flat_map(|region_id| {
region_stats
.binary_search_by_key(&region_id, |x| x.id)
.map(|i| &region_stats[i])
})
.collect::<HashSet<_>>();
region_stats
.iter()
.filter(|stat| region_ids.contains(&stat.id))
.collect::<Vec<_>>()
} else {
vec![]

View File

@@ -23,8 +23,6 @@ use crate::Tool;
use crate::data::export::ExportCommand;
use crate::data::import::ImportCommand;
pub(crate) const COPY_PATH_PLACEHOLDER: &str = "<PATH/TO/FILES>";
/// Command for data operations including exporting data from and importing data into GreptimeDB.
#[derive(Subcommand)]
pub enum DataCommand {

View File

@@ -30,7 +30,7 @@ use snafu::{OptionExt, ResultExt};
use tokio::sync::Semaphore;
use tokio::time::Instant;
use crate::data::{COPY_PATH_PLACEHOLDER, default_database};
use crate::data::default_database;
use crate::database::{DatabaseClient, parse_proxy_opts};
use crate::error::{
EmptyResultSnafu, Error, OpenDalSnafu, OutputDirNotSetSnafu, Result, S3ConfigNotSetSnafu,
@@ -67,17 +67,9 @@ pub struct ExportCommand {
#[clap(long, default_value_t = default_database())]
database: String,
/// The number of databases exported in parallel.
/// For example, if there are 20 databases and `db_parallelism` is 4,
/// 4 databases will be exported concurrently.
#[clap(long, short = 'j', default_value = "1", alias = "export-jobs")]
db_parallelism: usize,
/// The number of tables exported in parallel within a single database.
/// For example, if a database has 30 tables and `parallelism` is 8,
/// 8 tables will be exported concurrently.
#[clap(long, default_value = "4")]
table_parallelism: usize,
/// Parallelism of the export.
#[clap(long, short = 'j', default_value = "1")]
export_jobs: usize,
/// Max retry times for each job.
#[clap(long, default_value = "3")]
@@ -218,11 +210,10 @@ impl ExportCommand {
schema,
database_client,
output_dir: self.output_dir.clone(),
export_jobs: self.db_parallelism,
parallelism: self.export_jobs,
target: self.target.clone(),
start_time: self.start_time.clone(),
end_time: self.end_time.clone(),
parallelism: self.table_parallelism,
s3: self.s3,
ddl_local_dir: self.ddl_local_dir.clone(),
s3_bucket: self.s3_bucket.clone(),
@@ -260,11 +251,10 @@ pub struct Export {
schema: Option<String>,
database_client: DatabaseClient,
output_dir: Option<String>,
export_jobs: usize,
parallelism: usize,
target: ExportTarget,
start_time: Option<String>,
end_time: Option<String>,
parallelism: usize,
s3: bool,
ddl_local_dir: Option<String>,
s3_bucket: Option<String>,
@@ -474,7 +464,7 @@ impl Export {
async fn export_create_table(&self) -> Result<()> {
let timer = Instant::now();
let semaphore = Arc::new(Semaphore::new(self.export_jobs));
let semaphore = Arc::new(Semaphore::new(self.parallelism));
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
let operator = Arc::new(self.build_prefer_fs_operator().await?);
@@ -635,13 +625,13 @@ impl Export {
async fn export_database_data(&self) -> Result<()> {
let timer = Instant::now();
let semaphore = Arc::new(Semaphore::new(self.export_jobs));
let semaphore = Arc::new(Semaphore::new(self.parallelism));
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
let mut tasks = Vec::with_capacity(db_count);
let operator = Arc::new(self.build_operator().await?);
let fs_first_operator = Arc::new(self.build_prefer_fs_operator().await?);
let with_options = build_with_options(&self.start_time, &self.end_time, self.parallelism);
let with_options = build_with_options(&self.start_time, &self.end_time);
for schema in db_names {
let semaphore_moved = semaphore.clone();
@@ -678,26 +668,10 @@ impl Export {
);
// Create copy_from.sql file
let copy_database_from_sql = {
let command_without_connection = format!(
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH ({});"#,
export_self.catalog, schema, COPY_PATH_PLACEHOLDER, with_options_clone
);
if connection_part.is_empty() {
command_without_connection
} else {
let command_with_connection = format!(
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH ({}){};"#,
export_self.catalog, schema, path, with_options_clone, connection_part
);
format!(
"-- {}\n{}",
command_with_connection, command_without_connection
)
}
};
let copy_database_from_sql = format!(
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH ({}){};"#,
export_self.catalog, schema, path, with_options_clone, connection_part
);
let copy_from_path = export_self.get_file_path(&schema, "copy_from.sql");
export_self
@@ -898,11 +872,7 @@ impl Tool for Export {
}
/// Builds the WITH options string for SQL commands, assuming consistent syntax across S3 and local exports.
fn build_with_options(
start_time: &Option<String>,
end_time: &Option<String>,
parallelism: usize,
) -> String {
fn build_with_options(start_time: &Option<String>, end_time: &Option<String>) -> String {
let mut options = vec!["format = 'parquet'".to_string()];
if let Some(start) = start_time {
options.push(format!("start_time = '{}'", start));
@@ -910,6 +880,5 @@ fn build_with_options(
if let Some(end) = end_time {
options.push(format!("end_time = '{}'", end));
}
options.push(format!("parallelism = {}", parallelism));
options.join(", ")
}

View File

@@ -21,13 +21,13 @@ use clap::{Parser, ValueEnum};
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use common_error::ext::BoxedError;
use common_telemetry::{error, info, warn};
use snafu::{OptionExt, ResultExt, ensure};
use snafu::{OptionExt, ResultExt};
use tokio::sync::Semaphore;
use tokio::time::Instant;
use crate::data::{COPY_PATH_PLACEHOLDER, default_database};
use crate::data::default_database;
use crate::database::{DatabaseClient, parse_proxy_opts};
use crate::error::{Error, FileIoSnafu, InvalidArgumentsSnafu, Result, SchemaNotFoundSnafu};
use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
use crate::{Tool, database};
#[derive(Debug, Default, Clone, ValueEnum)]
@@ -56,11 +56,9 @@ pub struct ImportCommand {
#[clap(long, default_value_t = default_database())]
database: String,
/// The number of databases imported in parallel.
/// For example, if there are 20 databases and `db_parallelism` is 4,
/// 4 databases will be imported concurrently.
#[clap(long, short = 'j', default_value = "1", alias = "import-jobs")]
db_parallelism: usize,
/// Parallelism of the import.
#[clap(long, short = 'j', default_value = "1")]
import_jobs: usize,
/// Max retry times for each job.
#[clap(long, default_value = "3")]
@@ -111,7 +109,7 @@ impl ImportCommand {
schema,
database_client,
input_dir: self.input_dir.clone(),
parallelism: self.db_parallelism,
parallelism: self.import_jobs,
target: self.target.clone(),
}))
}
@@ -150,15 +148,12 @@ impl Import {
let _permit = semaphore_moved.acquire().await.unwrap();
let database_input_dir = self.catalog_path().join(&schema);
let sql_file = database_input_dir.join(filename);
let mut sql = tokio::fs::read_to_string(sql_file)
let sql = tokio::fs::read_to_string(sql_file)
.await
.context(FileIoSnafu)?;
if sql.trim().is_empty() {
if sql.is_empty() {
info!("Empty `{filename}` {database_input_dir:?}");
} else {
if filename == "copy_from.sql" {
sql = self.rewrite_copy_database_sql(&schema, &sql)?;
}
let db = exec_db.unwrap_or(&schema);
self.database_client.sql(&sql, db).await?;
info!("Imported `{filename}` for database {schema}");
@@ -231,57 +226,6 @@ impl Import {
}
Ok(db_names)
}
fn rewrite_copy_database_sql(&self, schema: &str, sql: &str) -> Result<String> {
let target_location = self.build_copy_database_location(schema);
let escaped_location = target_location.replace('\'', "''");
let mut first_stmt_checked = false;
for line in sql.lines() {
let trimmed = line.trim_start();
if trimmed.is_empty() || trimmed.starts_with("--") {
continue;
}
ensure!(
trimmed.starts_with("COPY DATABASE"),
InvalidArgumentsSnafu {
msg: "Expected COPY DATABASE statement at start of copy_from.sql"
}
);
first_stmt_checked = true;
break;
}
ensure!(
first_stmt_checked,
InvalidArgumentsSnafu {
msg: "COPY DATABASE statement not found in copy_from.sql"
}
);
ensure!(
sql.contains(COPY_PATH_PLACEHOLDER),
InvalidArgumentsSnafu {
msg: format!(
"Placeholder `{}` not found in COPY DATABASE statement",
COPY_PATH_PLACEHOLDER
)
}
);
Ok(sql.replacen(COPY_PATH_PLACEHOLDER, &escaped_location, 1))
}
fn build_copy_database_location(&self, schema: &str) -> String {
let mut path = self.catalog_path();
path.push(schema);
let mut path_str = path.to_string_lossy().into_owned();
if !path_str.ends_with('/') {
path_str.push('/');
}
path_str
}
}
#[async_trait]
@@ -297,52 +241,3 @@ impl Tool for Import {
}
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use super::*;
fn build_import(input_dir: &str) -> Import {
Import {
catalog: "catalog".to_string(),
schema: None,
database_client: DatabaseClient::new(
"127.0.0.1:4000".to_string(),
"catalog".to_string(),
None,
Duration::from_secs(0),
None,
),
input_dir: input_dir.to_string(),
parallelism: 1,
target: ImportTarget::Data,
}
}
#[test]
fn rewrite_copy_database_sql_replaces_placeholder() {
let import = build_import("/tmp/export-path");
let comment = "-- COPY DATABASE \"catalog\".\"schema\" FROM 's3://bucket/demo/' WITH (format = 'parquet') CONNECTION (region = 'us-west-2')";
let sql = format!(
"{comment}\nCOPY DATABASE \"catalog\".\"schema\" FROM '{}' WITH (format = 'parquet');",
COPY_PATH_PLACEHOLDER
);
let rewritten = import.rewrite_copy_database_sql("schema", &sql).unwrap();
let expected_location = import.build_copy_database_location("schema");
let escaped = expected_location.replace('\'', "''");
assert!(rewritten.starts_with(comment));
assert!(rewritten.contains(&format!("FROM '{escaped}'")));
assert!(!rewritten.contains(COPY_PATH_PLACEHOLDER));
}
#[test]
fn rewrite_copy_database_sql_requires_placeholder() {
let import = build_import("/tmp/export-path");
let sql = "COPY DATABASE \"catalog\".\"schema\" FROM '/tmp/export-path/catalog/schema/' WITH (format = 'parquet');";
assert!(import.rewrite_copy_database_sql("schema", sql).is_err());
}
}

View File

@@ -20,9 +20,7 @@ use api::v1::health_check_client::HealthCheckClient;
use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
use api::v1::region::region_client::RegionClient as PbRegionClient;
use arrow_flight::flight_service_client::FlightServiceClient;
use common_grpc::channel_manager::{
ChannelConfig, ChannelManager, ClientTlsOption, load_client_tls_config,
};
use common_grpc::channel_manager::{ChannelConfig, ChannelManager, ClientTlsOption};
use parking_lot::RwLock;
use snafu::{OptionExt, ResultExt};
use tonic::codec::CompressionEncoding;
@@ -95,10 +93,9 @@ impl Client {
U: AsRef<str>,
A: AsRef<[U]>,
{
let channel_config = ChannelConfig::default().client_tls_config(client_tls.clone());
let tls_config =
load_client_tls_config(Some(client_tls)).context(error::CreateTlsChannelSnafu)?;
let channel_manager = ChannelManager::with_config(channel_config, tls_config);
let channel_config = ChannelConfig::default().client_tls_config(client_tls);
let channel_manager = ChannelManager::with_tls_config(channel_config)
.context(error::CreateTlsChannelSnafu)?;
Ok(Self::with_manager_and_urls(channel_manager, urls))
}

View File

@@ -74,7 +74,7 @@ impl FlownodeManager for NodeClients {
impl NodeClients {
pub fn new(config: ChannelConfig) -> Self {
Self {
channel_manager: ChannelManager::with_config(config, None),
channel_manager: ChannelManager::with_config(config),
clients: CacheBuilder::new(1024)
.time_to_live(Duration::from_secs(30 * 60))
.time_to_idle(Duration::from_secs(5 * 60))

View File

@@ -435,10 +435,10 @@ impl Database {
.context(ExternalSnafu)?;
match flight_message {
FlightMessage::RecordBatch(arrow_batch) => {
yield Ok(RecordBatch::from_df_record_batch(
yield RecordBatch::try_from_df_record_batch(
schema_cloned.clone(),
arrow_batch,
))
)
}
FlightMessage::Metrics(_) => {}
FlightMessage::AffectedRows(_) | FlightMessage::Schema(_) => {

View File

@@ -182,8 +182,10 @@ impl RegionRequester {
match flight_message {
FlightMessage::RecordBatch(record_batch) => {
let result_to_yield =
RecordBatch::from_df_record_batch(schema_cloned.clone(), record_batch);
let result_to_yield = RecordBatch::try_from_df_record_batch(
schema_cloned.clone(),
record_batch,
);
// get the next message from the stream. normally it should be a metrics message.
if let Some(next_flight_message_result) = flight_message_stream.next().await
@@ -217,7 +219,7 @@ impl RegionRequester {
stream_ended = true;
}
yield Ok(result_to_yield);
yield result_to_yield;
}
FlightMessage::Metrics(s) => {
// just a branch in case of some metrics message comes after other things.

View File

@@ -16,7 +16,7 @@ default = [
"meta-srv/pg_kvbackend",
"meta-srv/mysql_kvbackend",
]
enterprise = ["common-meta/enterprise", "frontend/enterprise", "meta-srv/enterprise"]
enterprise = ["common-meta/enterprise", "frontend/enterprise", "meta-srv/enterprise", "catalog/enterprise"]
tokio-console = ["common-telemetry/tokio-console"]
[lints]

View File

@@ -161,9 +161,7 @@ impl ObjbenchCommand {
level: 0,
file_size,
available_indexes: Default::default(),
indexes: Default::default(),
index_file_size: 0,
index_file_id: None,
num_rows,
num_row_groups,
sequence: None,

View File

@@ -99,6 +99,13 @@ pub enum Error {
source: flow::Error,
},
#[snafu(display("Servers error"))]
Servers {
#[snafu(implicit)]
location: Location,
source: servers::error::Error,
},
#[snafu(display("Failed to start frontend"))]
StartFrontend {
#[snafu(implicit)]
@@ -329,6 +336,7 @@ impl ErrorExt for Error {
Error::ShutdownFrontend { source, .. } => source.status_code(),
Error::StartMetaServer { source, .. } => source.status_code(),
Error::ShutdownMetaServer { source, .. } => source.status_code(),
Error::Servers { source, .. } => source.status_code(),
Error::BuildMetaServer { source, .. } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
Error::BuildCli { source, .. } => source.status_code(),

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Debug;
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
@@ -40,14 +39,12 @@ use flow::{
get_flow_auth_options,
};
use meta_client::{MetaClientOptions, MetaClientType};
use plugins::flownode::context::GrpcConfigureContext;
use servers::configurator::GrpcBuilderConfiguratorRef;
use snafu::{OptionExt, ResultExt, ensure};
use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{
BuildCacheRegistrySnafu, InitMetadataSnafu, LoadLayeredConfigSnafu, MetaClientInitSnafu,
MissingConfigSnafu, OtherSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
MissingConfigSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
};
use crate::options::{GlobalOptions, GreptimeOptions};
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
@@ -58,14 +55,33 @@ type FlownodeOptions = GreptimeOptions<flow::FlownodeOptions>;
pub struct Instance {
flownode: FlownodeInstance,
// The components of flownode, which make it easier to expand based
// on the components.
#[cfg(feature = "enterprise")]
components: Components,
// Keep the logging guard to prevent the worker from being dropped.
_guard: Vec<WorkerGuard>,
}
#[cfg(feature = "enterprise")]
pub struct Components {
pub catalog_manager: catalog::CatalogManagerRef,
pub fe_client: Arc<FrontendClient>,
pub kv_backend: common_meta::kv_backend::KvBackendRef,
}
impl Instance {
pub fn new(flownode: FlownodeInstance, guard: Vec<WorkerGuard>) -> Self {
pub fn new(
flownode: FlownodeInstance,
#[cfg(feature = "enterprise")] components: Components,
guard: Vec<WorkerGuard>,
) -> Self {
Self {
flownode,
#[cfg(feature = "enterprise")]
components,
_guard: guard,
}
}
@@ -78,6 +94,11 @@ impl Instance {
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
&mut self.flownode
}
#[cfg(feature = "enterprise")]
pub fn components(&self) -> &Components {
&self.components
}
}
#[async_trait::async_trait]
@@ -375,7 +396,7 @@ impl StartCommand {
let frontend_client = Arc::new(frontend_client);
let flownode_builder = FlownodeBuilder::new(
opts.clone(),
plugins.clone(),
plugins,
table_metadata_manager,
catalog_manager.clone(),
flow_metadata_manager,
@@ -384,29 +405,8 @@ impl StartCommand {
.with_heartbeat_task(heartbeat_task);
let mut flownode = flownode_builder.build().await.context(StartFlownodeSnafu)?;
let builder =
FlownodeServiceBuilder::grpc_server_builder(&opts, flownode.flownode_server());
let builder = if let Some(configurator) =
plugins.get::<GrpcBuilderConfiguratorRef<GrpcConfigureContext>>()
{
let context = GrpcConfigureContext {
kv_backend: cached_meta_backend.clone(),
fe_client: frontend_client.clone(),
flownode_id: member_id,
catalog_manager: catalog_manager.clone(),
};
configurator
.configure(builder, context)
.await
.context(OtherSnafu)?
} else {
builder
};
let grpc_server = builder.build();
let services = FlownodeServiceBuilder::new(&opts)
.with_grpc_server(grpc_server)
.with_default_grpc_server(flownode.flownode_server())
.enable_http_service()
.build()
.context(StartFlownodeSnafu)?;
@@ -430,6 +430,16 @@ impl StartCommand {
.set_frontend_invoker(invoker)
.await;
Ok(Instance::new(flownode, guard))
#[cfg(feature = "enterprise")]
let components = Components {
catalog_manager: catalog_manager.clone(),
fe_client: frontend_client,
kv_backend: cached_meta_backend,
};
#[cfg(not(feature = "enterprise"))]
return Ok(Instance::new(flownode, guard));
#[cfg(feature = "enterprise")]
Ok(Instance::new(flownode, components, guard))
}
}

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Debug;
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
@@ -20,10 +19,7 @@ use std::time::Duration;
use async_trait::async_trait;
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
use catalog::information_extension::DistributedInformationExtension;
use catalog::kvbackend::{
CachedKvBackendBuilder, CatalogManagerConfiguratorRef, KvBackendCatalogManagerBuilder,
MetaKvBackend,
};
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManagerBuilder, MetaKvBackend};
use catalog::process_manager::ProcessManager;
use clap::Parser;
use client::client_manager::NodeClients;
@@ -46,16 +42,14 @@ use frontend::heartbeat::HeartbeatTask;
use frontend::instance::builder::FrontendBuilder;
use frontend::server::Services;
use meta_client::{MetaClientOptions, MetaClientType};
use plugins::frontend::context::{
CatalogManagerConfigureContext, DistributedCatalogManagerConfigureContext,
};
use servers::addrs;
use servers::export_metrics::ExportMetricsTask;
use servers::grpc::GrpcOptions;
use servers::tls::{TlsMode, TlsOption};
use snafu::{OptionExt, ResultExt};
use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{self, OtherSnafu, Result};
use crate::error::{self, Result};
use crate::options::{GlobalOptions, GreptimeOptions};
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
@@ -183,8 +177,6 @@ pub struct StartCommand {
#[clap(long)]
tls_key_path: Option<String>,
#[clap(long)]
tls_watch: bool,
#[clap(long)]
user_provider: Option<String>,
#[clap(long)]
disable_dashboard: Option<bool>,
@@ -238,7 +230,6 @@ impl StartCommand {
self.tls_mode.clone(),
self.tls_cert_path.clone(),
self.tls_key_path.clone(),
self.tls_watch,
);
if let Some(addr) = &self.http_addr {
@@ -423,18 +414,9 @@ impl StartCommand {
layered_cache_registry.clone(),
)
.with_process_manager(process_manager.clone());
let builder = if let Some(configurator) =
plugins.get::<CatalogManagerConfiguratorRef<CatalogManagerConfigureContext>>()
{
let ctx = DistributedCatalogManagerConfigureContext {
meta_client: meta_client.clone(),
};
let ctx = CatalogManagerConfigureContext::Distributed(ctx);
configurator
.configure(builder, ctx)
.await
.context(OtherSnafu)?
#[cfg(feature = "enterprise")]
let builder = if let Some(factories) = plugins.get() {
builder.with_extra_information_table_factories(factories)
} else {
builder
};
@@ -473,6 +455,9 @@ impl StartCommand {
.context(error::StartFrontendSnafu)?;
let instance = Arc::new(instance);
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
.context(error::ServersSnafu)?;
let servers = Services::new(opts, instance.clone(), plugins)
.build()
.context(error::StartFrontendSnafu)?;
@@ -481,6 +466,7 @@ impl StartCommand {
instance,
servers,
heartbeat_task,
export_metrics_task,
};
Ok(Instance::new(frontend, guard))

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::{self, Debug};
use std::fmt;
use std::path::Path;
use std::time::Duration;
@@ -23,7 +23,7 @@ use common_config::Configurable;
use common_telemetry::info;
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
use common_version::{short_version, verbose_version};
use meta_srv::bootstrap::{MetasrvInstance, metasrv_builder};
use meta_srv::bootstrap::MetasrvInstance;
use meta_srv::metasrv::BackendImpl;
use snafu::ResultExt;
use tracing_appender::non_blocking::WorkerGuard;
@@ -177,7 +177,7 @@ pub struct StartCommand {
backend: Option<BackendImpl>,
}
impl Debug for StartCommand {
impl fmt::Debug for StartCommand {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("StartCommand")
.field("rpc_bind_addr", &self.rpc_bind_addr)
@@ -341,7 +341,7 @@ impl StartCommand {
.await
.context(StartMetaServerSnafu)?;
let builder = metasrv_builder(&opts, plugins, None)
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins, None)
.await
.context(error::BuildMetaServerSnafu)?;
let metasrv = builder.build().await.context(error::BuildMetaServerSnafu)?;

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Debug;
use std::net::SocketAddr;
use std::path::Path;
use std::sync::Arc;
@@ -21,7 +20,7 @@ use std::{fs, path};
use async_trait::async_trait;
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
use catalog::information_schema::InformationExtensionRef;
use catalog::kvbackend::{CatalogManagerConfiguratorRef, KvBackendCatalogManagerBuilder};
use catalog::kvbackend::KvBackendCatalogManagerBuilder;
use catalog::process_manager::ProcessManager;
use clap::Parser;
use common_base::Plugins;
@@ -32,7 +31,7 @@ use common_meta::cache::LayeredCacheRegistryBuilder;
use common_meta::ddl::flow_meta::FlowMetadataAllocator;
use common_meta::ddl::table_meta::TableMetadataAllocator;
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl};
use common_meta::ddl_manager::{DdlManager, DdlManagerConfiguratorRef};
use common_meta::ddl_manager::DdlManager;
use common_meta::key::flow::FlowMetadataManager;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef;
@@ -58,17 +57,14 @@ use frontend::instance::StandaloneDatanodeManager;
use frontend::instance::builder::FrontendBuilder;
use frontend::server::Services;
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
use plugins::frontend::context::{
CatalogManagerConfigureContext, StandaloneCatalogManagerConfigureContext,
};
use plugins::standalone::context::DdlManagerConfigureContext;
use servers::export_metrics::ExportMetricsTask;
use servers::tls::{TlsMode, TlsOption};
use snafu::ResultExt;
use standalone::StandaloneInformationExtension;
use standalone::options::StandaloneOptions;
use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{OtherSnafu, Result, StartFlownodeSnafu};
use crate::error::{Result, StartFlownodeSnafu};
use crate::options::{GlobalOptions, GreptimeOptions};
use crate::{App, create_resource_limit_metrics, error, log_versions, maybe_activate_heap_profile};
@@ -121,15 +117,34 @@ pub struct Instance {
flownode: FlownodeInstance,
procedure_manager: ProcedureManagerRef,
wal_options_allocator: WalOptionsAllocatorRef,
// The components of standalone, which make it easier to expand based
// on the components.
#[cfg(feature = "enterprise")]
components: Components,
// Keep the logging guard to prevent the worker from being dropped.
_guard: Vec<WorkerGuard>,
}
#[cfg(feature = "enterprise")]
pub struct Components {
pub plugins: Plugins,
pub kv_backend: KvBackendRef,
pub frontend_client: Arc<FrontendClient>,
pub catalog_manager: catalog::CatalogManagerRef,
}
impl Instance {
/// Find the socket addr of a server by its `name`.
pub fn server_addr(&self, name: &str) -> Option<SocketAddr> {
self.frontend.server_handlers().addr(name)
}
#[cfg(feature = "enterprise")]
pub fn components(&self) -> &Components {
&self.components
}
}
#[async_trait]
@@ -213,8 +228,6 @@ pub struct StartCommand {
#[clap(long)]
tls_key_path: Option<String>,
#[clap(long)]
tls_watch: bool,
#[clap(long)]
user_provider: Option<String>,
#[clap(long, default_value = "GREPTIMEDB_STANDALONE")]
pub env_prefix: String,
@@ -264,7 +277,6 @@ impl StartCommand {
self.tls_mode.clone(),
self.tls_cert_path.clone(),
self.tls_key_path.clone(),
self.tls_watch,
);
if let Some(addr) = &self.http_addr {
@@ -401,13 +413,6 @@ impl StartCommand {
plugins.insert::<InformationExtensionRef>(information_extension.clone());
let process_manager = Arc::new(ProcessManager::new(opts.grpc.server_addr.clone(), None));
// for standalone not use grpc, but get a handler to frontend grpc client without
// actually make a connection
let (frontend_client, frontend_instance_handler) =
FrontendClient::from_empty_grpc_handler(opts.query.clone());
let frontend_client = Arc::new(frontend_client);
let builder = KvBackendCatalogManagerBuilder::new(
information_extension.clone(),
kv_backend.clone(),
@@ -415,17 +420,9 @@ impl StartCommand {
)
.with_procedure_manager(procedure_manager.clone())
.with_process_manager(process_manager.clone());
let builder = if let Some(configurator) =
plugins.get::<CatalogManagerConfiguratorRef<CatalogManagerConfigureContext>>()
{
let ctx = StandaloneCatalogManagerConfigureContext {
fe_client: frontend_client.clone(),
};
let ctx = CatalogManagerConfigureContext::Standalone(ctx);
configurator
.configure(builder, ctx)
.await
.context(OtherSnafu)?
#[cfg(feature = "enterprise")]
let builder = if let Some(factories) = plugins.get() {
builder.with_extra_information_table_factories(factories)
} else {
builder
};
@@ -440,6 +437,11 @@ impl StartCommand {
..Default::default()
};
// for standalone not use grpc, but get a handler to frontend grpc client without
// actually make a connection
let (frontend_client, frontend_instance_handler) =
FrontendClient::from_empty_grpc_handler(opts.query.clone());
let frontend_client = Arc::new(frontend_client);
let flow_builder = FlownodeBuilder::new(
flownode_options,
plugins.clone(),
@@ -510,21 +512,11 @@ impl StartCommand {
let ddl_manager = DdlManager::try_new(ddl_context, procedure_manager.clone(), true)
.context(error::InitDdlManagerSnafu)?;
let ddl_manager = if let Some(configurator) =
plugins.get::<DdlManagerConfiguratorRef<DdlManagerConfigureContext>>()
{
let ctx = DdlManagerConfigureContext {
kv_backend: kv_backend.clone(),
fe_client: frontend_client.clone(),
catalog_manager: catalog_manager.clone(),
};
configurator
.configure(ddl_manager, ctx)
.await
.context(OtherSnafu)?
} else {
ddl_manager
#[cfg(feature = "enterprise")]
let ddl_manager = {
let trigger_ddl_manager: Option<common_meta::ddl_manager::TriggerDdlManagerRef> =
plugins.get();
ddl_manager.with_trigger_ddl_manager(trigger_ddl_manager)
};
let procedure_executor = Arc::new(LocalProcedureExecutor::new(
@@ -570,6 +562,9 @@ impl StartCommand {
.context(StartFlownodeSnafu)?;
flow_streaming_engine.set_frontend_invoker(invoker).await;
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
.context(error::ServersSnafu)?;
let servers = Services::new(opts, fe_instance.clone(), plugins.clone())
.build()
.context(error::StartFrontendSnafu)?;
@@ -578,6 +573,15 @@ impl StartCommand {
instance: fe_instance,
servers,
heartbeat_task: None,
export_metrics_task,
};
#[cfg(feature = "enterprise")]
let components = Components {
plugins,
kv_backend,
frontend_client,
catalog_manager,
};
Ok(Instance {
@@ -586,6 +590,8 @@ impl StartCommand {
flownode,
procedure_manager,
wal_options_allocator,
#[cfg(feature = "enterprise")]
components,
_guard: guard,
})
}
@@ -763,9 +769,6 @@ mod tests {
fn test_load_log_options_from_cli() {
let cmd = StartCommand {
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
mysql_addr: Some("127.0.0.1:4002".to_string()),
postgres_addr: Some("127.0.0.1:4003".to_string()),
tls_watch: true,
..Default::default()
};
@@ -782,8 +785,6 @@ mod tests {
assert_eq!("./greptimedb_data/test/logs", opts.logging.dir);
assert_eq!("debug", opts.logging.level.unwrap());
assert!(opts.mysql.tls.watch);
assert!(opts.postgres.tls.watch);
}
#[test]

View File

@@ -15,7 +15,6 @@
use std::time::Duration;
use cmd::options::GreptimeOptions;
use common_base::memory_limit::MemoryLimit;
use common_config::{Configurable, DEFAULT_DATA_HOME};
use common_options::datanode::{ClientOptions, DatanodeClientOptions};
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, DEFAULT_OTLP_HTTP_ENDPOINT, LoggingOptions};
@@ -31,6 +30,7 @@ use meta_srv::selector::SelectorType;
use metric_engine::config::EngineConfig as MetricEngineConfig;
use mito2::config::MitoConfig;
use query::options::QueryOptions;
use servers::export_metrics::ExportMetricsOption;
use servers::grpc::GrpcOptions;
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
@@ -52,6 +52,7 @@ fn test_load_datanode_example_config() {
meta_client: Some(MetaClientOptions {
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
timeout: Duration::from_secs(3),
heartbeat_timeout: Duration::from_millis(500),
ddl_timeout: Duration::from_secs(10),
connect_timeout: Duration::from_secs(1),
tcp_nodelay: true,
@@ -73,19 +74,14 @@ fn test_load_datanode_example_config() {
RegionEngineConfig::Mito(MitoConfig {
auto_flush_interval: Duration::from_secs(3600),
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
scan_memory_limit: MemoryLimit::Percentage(50),
..Default::default()
}),
RegionEngineConfig::File(FileEngineConfig {}),
RegionEngineConfig::Metric(MetricEngineConfig {
sparse_primary_key_encoding: true,
experimental_sparse_primary_key_encoding: false,
flush_metadata_region_interval: Duration::from_secs(30),
}),
],
query: QueryOptions {
memory_pool_size: MemoryLimit::Percentage(50),
..Default::default()
},
logging: LoggingOptions {
level: Some("info".to_string()),
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
@@ -93,6 +89,11 @@ fn test_load_datanode_example_config() {
tracing_sample_ratio: Some(Default::default()),
..Default::default()
},
export_metrics: ExportMetricsOption {
self_import: None,
remote_write: Some(Default::default()),
..Default::default()
},
grpc: GrpcOptions::default()
.with_bind_addr("127.0.0.1:3001")
.with_server_addr("127.0.0.1:3001"),
@@ -117,6 +118,7 @@ fn test_load_frontend_example_config() {
meta_client: Some(MetaClientOptions {
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
timeout: Duration::from_secs(3),
heartbeat_timeout: Duration::from_millis(500),
ddl_timeout: Duration::from_secs(10),
connect_timeout: Duration::from_secs(1),
tcp_nodelay: true,
@@ -138,6 +140,11 @@ fn test_load_frontend_example_config() {
..Default::default()
},
},
export_metrics: ExportMetricsOption {
self_import: None,
remote_write: Some(Default::default()),
..Default::default()
},
grpc: GrpcOptions {
bind_addr: "127.0.0.1:4001".to_string(),
server_addr: "127.0.0.1:4001".to_string(),
@@ -148,10 +155,6 @@ fn test_load_frontend_example_config() {
cors_allowed_origins: vec!["https://example.com".to_string()],
..Default::default()
},
query: QueryOptions {
memory_pool_size: MemoryLimit::Percentage(50),
..Default::default()
},
..Default::default()
},
..Default::default()
@@ -188,6 +191,11 @@ fn test_load_metasrv_example_config() {
tcp_nodelay: true,
},
},
export_metrics: ExportMetricsOption {
self_import: None,
remote_write: Some(Default::default()),
..Default::default()
},
backend_tls: Some(TlsOption {
mode: TlsMode::Prefer,
cert_path: String::new(),
@@ -234,11 +242,11 @@ fn test_load_flownode_example_config() {
query: QueryOptions {
parallelism: 1,
allow_query_fallback: false,
memory_pool_size: MemoryLimit::Percentage(50),
},
meta_client: Some(MetaClientOptions {
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
timeout: Duration::from_secs(3),
heartbeat_timeout: Duration::from_millis(500),
ddl_timeout: Duration::from_secs(10),
connect_timeout: Duration::from_secs(1),
tcp_nodelay: true,
@@ -278,12 +286,11 @@ fn test_load_standalone_example_config() {
RegionEngineConfig::Mito(MitoConfig {
auto_flush_interval: Duration::from_secs(3600),
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
scan_memory_limit: MemoryLimit::Percentage(50),
..Default::default()
}),
RegionEngineConfig::File(FileEngineConfig {}),
RegionEngineConfig::Metric(MetricEngineConfig {
sparse_primary_key_encoding: true,
experimental_sparse_primary_key_encoding: false,
flush_metadata_region_interval: Duration::from_secs(30),
}),
],
@@ -298,14 +305,16 @@ fn test_load_standalone_example_config() {
tracing_sample_ratio: Some(Default::default()),
..Default::default()
},
export_metrics: ExportMetricsOption {
self_import: Some(Default::default()),
remote_write: Some(Default::default()),
..Default::default()
},
http: HttpOptions {
cors_allowed_origins: vec!["https://example.com".to_string()],
..Default::default()
},
query: QueryOptions {
memory_pool_size: MemoryLimit::Percentage(50),
..Default::default()
},
..Default::default()
},
..Default::default()

View File

@@ -15,7 +15,6 @@
pub mod bit_vec;
pub mod bytes;
pub mod cancellation;
pub mod memory_limit;
pub mod plugins;
pub mod range_read;
#[allow(clippy::all)]

View File

@@ -1,265 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::{self, Display};
use std::str::FromStr;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::readable_size::ReadableSize;
/// Memory limit configuration that supports both absolute size and percentage.
///
/// Examples:
/// - Absolute size: "2GB", "4GiB", "512MB"
/// - Percentage: "50%", "75%"
/// - Unlimited: "unlimited", "0"
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum MemoryLimit {
/// Absolute memory size.
Size(ReadableSize),
/// Percentage of total system memory (0-100).
Percentage(u8),
/// No memory limit.
#[default]
Unlimited,
}
impl MemoryLimit {
/// Resolve the memory limit to bytes based on total system memory.
/// Returns 0 if the limit is unlimited.
pub fn resolve(&self, total_memory_bytes: u64) -> u64 {
match self {
MemoryLimit::Size(size) => size.as_bytes(),
MemoryLimit::Percentage(pct) => total_memory_bytes * (*pct as u64) / 100,
MemoryLimit::Unlimited => 0,
}
}
/// Returns true if this limit is unlimited.
pub fn is_unlimited(&self) -> bool {
match self {
MemoryLimit::Size(size) => size.as_bytes() == 0,
MemoryLimit::Percentage(pct) => *pct == 0,
MemoryLimit::Unlimited => true,
}
}
}
impl FromStr for MemoryLimit {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let s = s.trim();
if s.eq_ignore_ascii_case("unlimited") {
return Ok(MemoryLimit::Unlimited);
}
if let Some(pct_str) = s.strip_suffix('%') {
let pct = pct_str
.trim()
.parse::<u8>()
.map_err(|e| format!("invalid percentage value '{}': {}", pct_str, e))?;
if pct > 100 {
return Err(format!("percentage must be between 0 and 100, got {}", pct));
}
if pct == 0 {
Ok(MemoryLimit::Unlimited)
} else {
Ok(MemoryLimit::Percentage(pct))
}
} else {
let size = ReadableSize::from_str(s)?;
if size.as_bytes() == 0 {
Ok(MemoryLimit::Unlimited)
} else {
Ok(MemoryLimit::Size(size))
}
}
}
}
impl Display for MemoryLimit {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
MemoryLimit::Size(size) => write!(f, "{}", size),
MemoryLimit::Percentage(pct) => write!(f, "{}%", pct),
MemoryLimit::Unlimited => write!(f, "unlimited"),
}
}
}
impl Serialize for MemoryLimit {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
impl<'de> Deserialize<'de> for MemoryLimit {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
MemoryLimit::from_str(&s).map_err(serde::de::Error::custom)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_absolute_size() {
assert_eq!(
"2GB".parse::<MemoryLimit>().unwrap(),
MemoryLimit::Size(ReadableSize(2 * 1024 * 1024 * 1024))
);
assert_eq!(
"512MB".parse::<MemoryLimit>().unwrap(),
MemoryLimit::Size(ReadableSize(512 * 1024 * 1024))
);
assert_eq!("0".parse::<MemoryLimit>().unwrap(), MemoryLimit::Unlimited);
}
#[test]
fn test_parse_percentage() {
assert_eq!(
"50%".parse::<MemoryLimit>().unwrap(),
MemoryLimit::Percentage(50)
);
assert_eq!(
"75%".parse::<MemoryLimit>().unwrap(),
MemoryLimit::Percentage(75)
);
assert_eq!("0%".parse::<MemoryLimit>().unwrap(), MemoryLimit::Unlimited);
}
#[test]
fn test_parse_invalid() {
assert!("150%".parse::<MemoryLimit>().is_err());
assert!("-10%".parse::<MemoryLimit>().is_err());
assert!("invalid".parse::<MemoryLimit>().is_err());
}
#[test]
fn test_resolve() {
let total = 8 * 1024 * 1024 * 1024; // 8GB
assert_eq!(
MemoryLimit::Size(ReadableSize(2 * 1024 * 1024 * 1024)).resolve(total),
2 * 1024 * 1024 * 1024
);
assert_eq!(
MemoryLimit::Percentage(50).resolve(total),
4 * 1024 * 1024 * 1024
);
assert_eq!(MemoryLimit::Unlimited.resolve(total), 0);
}
#[test]
fn test_is_unlimited() {
assert!(MemoryLimit::Unlimited.is_unlimited());
assert!(!MemoryLimit::Size(ReadableSize(1024)).is_unlimited());
assert!(!MemoryLimit::Percentage(50).is_unlimited());
assert!(!MemoryLimit::Percentage(1).is_unlimited());
// Defensive: these states shouldn't exist via public API, but check anyway
assert!(MemoryLimit::Size(ReadableSize(0)).is_unlimited());
assert!(MemoryLimit::Percentage(0).is_unlimited());
}
#[test]
fn test_parse_100_percent() {
assert_eq!(
"100%".parse::<MemoryLimit>().unwrap(),
MemoryLimit::Percentage(100)
);
}
#[test]
fn test_display_percentage() {
assert_eq!(MemoryLimit::Percentage(20).to_string(), "20%");
assert_eq!(MemoryLimit::Percentage(50).to_string(), "50%");
assert_eq!(MemoryLimit::Percentage(100).to_string(), "100%");
}
#[test]
fn test_parse_unlimited() {
assert_eq!(
"unlimited".parse::<MemoryLimit>().unwrap(),
MemoryLimit::Unlimited
);
assert_eq!(
"UNLIMITED".parse::<MemoryLimit>().unwrap(),
MemoryLimit::Unlimited
);
assert_eq!(
"Unlimited".parse::<MemoryLimit>().unwrap(),
MemoryLimit::Unlimited
);
}
#[test]
fn test_display_unlimited() {
assert_eq!(MemoryLimit::Unlimited.to_string(), "unlimited");
}
#[test]
fn test_parse_display_roundtrip() {
let cases = vec![
"50%",
"100%",
"1%",
"2GB",
"512MB",
"unlimited",
"UNLIMITED",
"0", // normalized to unlimited
"0%", // normalized to unlimited
];
for input in cases {
let parsed = input.parse::<MemoryLimit>().unwrap();
let displayed = parsed.to_string();
let reparsed = displayed.parse::<MemoryLimit>().unwrap();
assert_eq!(
parsed, reparsed,
"round-trip failed: '{}' -> '{}' -> '{:?}'",
input, displayed, reparsed
);
}
}
#[test]
fn test_zero_normalization() {
// All forms of zero should normalize to Unlimited
assert_eq!("0".parse::<MemoryLimit>().unwrap(), MemoryLimit::Unlimited);
assert_eq!("0%".parse::<MemoryLimit>().unwrap(), MemoryLimit::Unlimited);
assert_eq!("0B".parse::<MemoryLimit>().unwrap(), MemoryLimit::Unlimited);
assert_eq!(
"0KB".parse::<MemoryLimit>().unwrap(),
MemoryLimit::Unlimited
);
// Unlimited always displays as "unlimited"
assert_eq!(MemoryLimit::Unlimited.to_string(), "unlimited");
}
}

View File

@@ -32,12 +32,7 @@ impl Plugins {
pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
let last = self.write().insert(value);
if last.is_some() {
panic!(
"Plugin of type {} already exists",
std::any::type_name::<T>()
);
}
assert!(last.is_none(), "each type of plugins must be one and only");
}
pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
@@ -145,7 +140,7 @@ mod tests {
}
#[test]
#[should_panic(expected = "Plugin of type i32 already exists")]
#[should_panic(expected = "each type of plugins must be one and only")]
fn test_plugin_uniqueness() {
let plugins = Plugins::new();
plugins.insert(1i32);

View File

@@ -86,6 +86,8 @@ pub const INFORMATION_SCHEMA_TRIGGERS_TABLE_ID: u32 = 24;
pub const INFORMATION_SCHEMA_GLOBAL_STATUS_TABLE_ID: u32 = 25;
/// id for information_schema.SESSION_STATUS
pub const INFORMATION_SCHEMA_SESSION_STATUS_TABLE_ID: u32 = 26;
/// id for information_schema.RUNTIME_METRICS
pub const INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID: u32 = 27;
/// id for information_schema.PARTITIONS
pub const INFORMATION_SCHEMA_PARTITIONS_TABLE_ID: u32 = 28;
/// id for information_schema.REGION_PEERS
@@ -110,8 +112,6 @@ pub const INFORMATION_SCHEMA_SSTS_MANIFEST_TABLE_ID: u32 = 37;
pub const INFORMATION_SCHEMA_SSTS_STORAGE_TABLE_ID: u32 = 38;
/// id for information_schema.ssts_index_meta
pub const INFORMATION_SCHEMA_SSTS_INDEX_META_TABLE_ID: u32 = 39;
/// id for information_schema.alerts
pub const INFORMATION_SCHEMA_ALERTS_TABLE_ID: u32 = 40;
// ----- End of information_schema tables -----

View File

@@ -12,11 +12,28 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::future::Future;
use arrow::record_batch::RecordBatch;
use async_trait::async_trait;
use datafusion::parquet::format::FileMetaData;
use snafu::{OptionExt, ResultExt};
use tokio::io::{AsyncWrite, AsyncWriteExt};
use crate::error::Result;
use crate::error::{self, Result};
use crate::share_buffer::SharedBuffer;
pub struct LazyBufferedWriter<T, U, F> {
path: String,
writer_factory: F,
writer: Option<T>,
/// None stands for [`LazyBufferedWriter`] closed.
encoder: Option<U>,
buffer: SharedBuffer,
rows_written: usize,
bytes_written: u64,
threshold: usize,
}
pub trait DfRecordBatchEncoder {
fn write(&mut self, batch: &RecordBatch) -> Result<()>;
@@ -26,3 +43,126 @@ pub trait DfRecordBatchEncoder {
pub trait ArrowWriterCloser {
async fn close(mut self) -> Result<FileMetaData>;
}
impl<
T: AsyncWrite + Send + Unpin,
U: DfRecordBatchEncoder + ArrowWriterCloser,
F: Fn(String) -> Fut,
Fut: Future<Output = Result<T>>,
> LazyBufferedWriter<T, U, F>
{
/// Closes `LazyBufferedWriter` and optionally flushes all data to underlying storage
/// if any row's been written.
pub async fn close_with_arrow_writer(mut self) -> Result<(FileMetaData, u64)> {
let encoder = self
.encoder
.take()
.context(error::BufferedWriterClosedSnafu)?;
let metadata = encoder.close().await?;
// It's important to shut down! flushes all pending writes
self.close_inner_writer().await?;
Ok((metadata, self.bytes_written))
}
}
impl<
T: AsyncWrite + Send + Unpin,
U: DfRecordBatchEncoder,
F: Fn(String) -> Fut,
Fut: Future<Output = Result<T>>,
> LazyBufferedWriter<T, U, F>
{
/// Closes the writer and flushes the buffer data.
pub async fn close_inner_writer(&mut self) -> Result<()> {
// Use `rows_written` to keep a track of if any rows have been written.
// If no row's been written, then we can simply close the underlying
// writer without flush so that no file will be actually created.
if self.rows_written != 0 {
self.bytes_written += self.try_flush(true).await?;
}
if let Some(writer) = &mut self.writer {
writer.shutdown().await.context(error::AsyncWriteSnafu)?;
}
Ok(())
}
pub fn new(
threshold: usize,
buffer: SharedBuffer,
encoder: U,
path: impl AsRef<str>,
writer_factory: F,
) -> Self {
Self {
path: path.as_ref().to_string(),
threshold,
encoder: Some(encoder),
buffer,
rows_written: 0,
bytes_written: 0,
writer_factory,
writer: None,
}
}
pub async fn write(&mut self, batch: &RecordBatch) -> Result<()> {
let encoder = self
.encoder
.as_mut()
.context(error::BufferedWriterClosedSnafu)?;
encoder.write(batch)?;
self.rows_written += batch.num_rows();
self.bytes_written += self.try_flush(false).await?;
Ok(())
}
async fn try_flush(&mut self, all: bool) -> Result<u64> {
let mut bytes_written: u64 = 0;
// Once buffered data size reaches threshold, split the data in chunks (typically 4MB)
// and write to underlying storage.
while self.buffer.buffer.lock().unwrap().len() >= self.threshold {
let chunk = {
let mut buffer = self.buffer.buffer.lock().unwrap();
buffer.split_to(self.threshold)
};
let size = chunk.len();
self.maybe_init_writer()
.await?
.write_all(&chunk)
.await
.context(error::AsyncWriteSnafu)?;
bytes_written += size as u64;
}
if all {
bytes_written += self.try_flush_all().await?;
}
Ok(bytes_written)
}
/// Only initiates underlying file writer when rows have been written.
async fn maybe_init_writer(&mut self) -> Result<&mut T> {
if let Some(ref mut writer) = self.writer {
Ok(writer)
} else {
let writer = (self.writer_factory)(self.path.clone()).await?;
Ok(self.writer.insert(writer))
}
}
async fn try_flush_all(&mut self) -> Result<u64> {
let remain = self.buffer.buffer.lock().unwrap().split();
let size = remain.len();
self.maybe_init_writer()
.await?
.write_all(&remain)
.await
.context(error::AsyncWriteSnafu)?;
Ok(size as u64)
}
}

View File

@@ -1,202 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use async_compression::tokio::write::{BzEncoder, GzipEncoder, XzEncoder, ZstdEncoder};
use snafu::ResultExt;
use tokio::io::{AsyncWrite, AsyncWriteExt};
use crate::compression::CompressionType;
use crate::error::{self, Result};
/// A compressed writer that wraps an underlying async writer with compression.
///
/// This writer supports multiple compression formats including GZIP, BZIP2, XZ, and ZSTD.
/// It provides transparent compression for any async writer implementation.
pub struct CompressedWriter {
inner: Box<dyn AsyncWrite + Unpin + Send>,
compression_type: CompressionType,
}
impl CompressedWriter {
/// Creates a new compressed writer with the specified compression type.
///
/// # Arguments
///
/// * `writer` - The underlying writer to wrap with compression
/// * `compression_type` - The type of compression to apply
pub fn new(
writer: impl AsyncWrite + Unpin + Send + 'static,
compression_type: CompressionType,
) -> Self {
let inner: Box<dyn AsyncWrite + Unpin + Send> = match compression_type {
CompressionType::Gzip => Box::new(GzipEncoder::new(writer)),
CompressionType::Bzip2 => Box::new(BzEncoder::new(writer)),
CompressionType::Xz => Box::new(XzEncoder::new(writer)),
CompressionType::Zstd => Box::new(ZstdEncoder::new(writer)),
CompressionType::Uncompressed => Box::new(writer),
};
Self {
inner,
compression_type,
}
}
/// Returns the compression type used by this writer.
pub fn compression_type(&self) -> CompressionType {
self.compression_type
}
/// Flush the writer and shutdown compression
pub async fn shutdown(mut self) -> Result<()> {
self.inner
.shutdown()
.await
.context(error::AsyncWriteSnafu)?;
Ok(())
}
}
impl AsyncWrite for CompressedWriter {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.inner).poll_write(cx, buf)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_flush(cx)
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_shutdown(cx)
}
}
/// A trait for converting async writers into compressed writers.
///
/// This trait is automatically implemented for all types that implement [`AsyncWrite`].
pub trait IntoCompressedWriter {
/// Converts this writer into a [`CompressedWriter`] with the specified compression type.
///
/// # Arguments
///
/// * `self` - The underlying writer to wrap with compression
/// * `compression_type` - The type of compression to apply
fn into_compressed_writer(self, compression_type: CompressionType) -> CompressedWriter
where
Self: AsyncWrite + Unpin + Send + 'static + Sized,
{
CompressedWriter::new(self, compression_type)
}
}
impl<W: AsyncWrite + Unpin + Send + 'static> IntoCompressedWriter for W {}
#[cfg(test)]
mod tests {
use tokio::io::{AsyncReadExt, AsyncWriteExt, duplex};
use super::*;
#[tokio::test]
async fn test_compressed_writer_gzip() {
let (duplex_writer, mut duplex_reader) = duplex(1024);
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Gzip);
let original = b"test data for gzip compression";
writer.write_all(original).await.unwrap();
writer.shutdown().await.unwrap();
let mut buffer = Vec::new();
duplex_reader.read_to_end(&mut buffer).await.unwrap();
// The compressed data should be different from the original
assert_ne!(buffer, original);
assert!(!buffer.is_empty());
}
#[tokio::test]
async fn test_compressed_writer_bzip2() {
let (duplex_writer, mut duplex_reader) = duplex(1024);
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Bzip2);
let original = b"test data for bzip2 compression";
writer.write_all(original).await.unwrap();
writer.shutdown().await.unwrap();
let mut buffer = Vec::new();
duplex_reader.read_to_end(&mut buffer).await.unwrap();
// The compressed data should be different from the original
assert_ne!(buffer, original);
assert!(!buffer.is_empty());
}
#[tokio::test]
async fn test_compressed_writer_xz() {
let (duplex_writer, mut duplex_reader) = duplex(1024);
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Xz);
let original = b"test data for xz compression";
writer.write_all(original).await.unwrap();
writer.shutdown().await.unwrap();
let mut buffer = Vec::new();
duplex_reader.read_to_end(&mut buffer).await.unwrap();
// The compressed data should be different from the original
assert_ne!(buffer, original);
assert!(!buffer.is_empty());
}
#[tokio::test]
async fn test_compressed_writer_zstd() {
let (duplex_writer, mut duplex_reader) = duplex(1024);
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Zstd);
let original = b"test data for zstd compression";
writer.write_all(original).await.unwrap();
writer.shutdown().await.unwrap();
let mut buffer = Vec::new();
duplex_reader.read_to_end(&mut buffer).await.unwrap();
// The compressed data should be different from the original
assert_ne!(buffer, original);
assert!(!buffer.is_empty());
}
#[tokio::test]
async fn test_compressed_writer_uncompressed() {
let (duplex_writer, mut duplex_reader) = duplex(1024);
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Uncompressed);
let original = b"test data for uncompressed";
writer.write_all(original).await.unwrap();
writer.shutdown().await.unwrap();
let mut buffer = Vec::new();
duplex_reader.read_to_end(&mut buffer).await.unwrap();
// Uncompressed data should be the same as the original
assert_eq!(buffer, original);
}
}

View File

@@ -194,6 +194,12 @@ pub enum Error {
location: Location,
},
#[snafu(display("Buffered writer closed"))]
BufferedWriterClosed {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to write parquet file, path: {}", path))]
WriteParquet {
path: String,
@@ -202,14 +208,6 @@ pub enum Error {
#[snafu(source)]
error: parquet::errors::ParquetError,
},
#[snafu(display("Failed to build file stream"))]
BuildFileStream {
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: datafusion::error::DataFusionError,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -241,7 +239,7 @@ impl ErrorExt for Error {
| ReadRecordBatch { .. }
| WriteRecordBatch { .. }
| EncodeRecordBatch { .. }
| BuildFileStream { .. }
| BufferedWriterClosed { .. }
| OrcReader { .. } => StatusCode::Unexpected,
}
}

View File

@@ -30,22 +30,12 @@ use arrow::record_batch::RecordBatch;
use arrow_schema::{ArrowError, Schema as ArrowSchema};
use async_trait::async_trait;
use bytes::{Buf, Bytes};
use common_recordbatch::DfSendableRecordBatchStream;
use datafusion::datasource::file_format::file_compression_type::FileCompressionType as DfCompressionType;
use datafusion::datasource::listing::PartitionedFile;
use datafusion::datasource::object_store::ObjectStoreUrl;
use datafusion::datasource::physical_plan::{
FileGroup, FileOpenFuture, FileScanConfigBuilder, FileSource, FileStream,
};
use datafusion::datasource::physical_plan::FileOpenFuture;
use datafusion::error::{DataFusionError, Result as DataFusionResult};
use datafusion::physical_plan::SendableRecordBatchStream;
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
use datatypes::arrow::datatypes::SchemaRef;
use futures::{StreamExt, TryStreamExt};
use object_store::ObjectStore;
use object_store_opendal::OpendalStore;
use snafu::ResultExt;
use tokio::io::AsyncWriteExt;
use tokio_util::compat::FuturesAsyncWriteCompatExt;
use self::csv::CsvFormat;
@@ -53,8 +43,7 @@ use self::json::JsonFormat;
use self::orc::OrcFormat;
use self::parquet::ParquetFormat;
use crate::DEFAULT_WRITE_BUFFER_SIZE;
use crate::buffered_writer::DfRecordBatchEncoder;
use crate::compressed_writer::{CompressedWriter, IntoCompressedWriter};
use crate::buffered_writer::{DfRecordBatchEncoder, LazyBufferedWriter};
use crate::compression::CompressionType;
use crate::error::{self, Result};
use crate::share_buffer::SharedBuffer;
@@ -206,128 +195,33 @@ pub async fn infer_schemas(
ArrowSchema::try_merge(schemas).context(error::MergeSchemaSnafu)
}
/// Writes data to a compressed writer if the data is not empty.
///
/// Does nothing if `data` is empty; otherwise writes all data and returns any error.
async fn write_to_compressed_writer(
compressed_writer: &mut CompressedWriter,
data: &[u8],
) -> Result<()> {
if !data.is_empty() {
compressed_writer
.write_all(data)
.await
.context(error::AsyncWriteSnafu)?;
}
Ok(())
}
/// Streams [SendableRecordBatchStream] to a file with optional compression support.
/// Data is buffered and flushed according to the given `threshold`.
/// Ensures that writer resources are cleanly released and that an empty file is not
/// created if no rows are written.
///
/// Returns the total number of rows successfully written.
pub async fn stream_to_file<E>(
pub async fn stream_to_file<T: DfRecordBatchEncoder, U: Fn(SharedBuffer) -> T>(
mut stream: SendableRecordBatchStream,
store: ObjectStore,
path: &str,
threshold: usize,
concurrency: usize,
compression_type: CompressionType,
encoder_factory: impl Fn(SharedBuffer) -> E,
) -> Result<usize>
where
E: DfRecordBatchEncoder,
{
// Create the file writer with OpenDAL's built-in buffering
let writer = store
.writer_with(path)
.concurrent(concurrency)
.chunk(DEFAULT_WRITE_BUFFER_SIZE.as_bytes() as usize)
.await
.with_context(|_| error::WriteObjectSnafu { path })?
.into_futures_async_write()
.compat_write();
// Apply compression if needed
let mut compressed_writer = writer.into_compressed_writer(compression_type);
// Create a buffer for the encoder
encoder_factory: U,
) -> Result<usize> {
let buffer = SharedBuffer::with_capacity(threshold);
let mut encoder = encoder_factory(buffer.clone());
let encoder = encoder_factory(buffer.clone());
let mut writer = LazyBufferedWriter::new(threshold, buffer, encoder, path, |path| async {
store
.writer_with(&path)
.concurrent(concurrency)
.chunk(DEFAULT_WRITE_BUFFER_SIZE.as_bytes() as usize)
.await
.map(|v| v.into_futures_async_write().compat_write())
.context(error::WriteObjectSnafu { path })
});
let mut rows = 0;
// Process each record batch
while let Some(batch) = stream.next().await {
let batch = batch.context(error::ReadRecordBatchSnafu)?;
// Write batch using the encoder
encoder.write(&batch)?;
writer.write(&batch).await?;
rows += batch.num_rows();
loop {
let chunk = {
let mut buffer_guard = buffer.buffer.lock().unwrap();
if buffer_guard.len() < threshold {
break;
}
buffer_guard.split_to(threshold)
};
write_to_compressed_writer(&mut compressed_writer, &chunk).await?;
}
}
// If no row's been written, just simply close the underlying writer
// without flush so that no file will be actually created.
if rows != 0 {
// Final flush of any remaining data
let final_data = {
let mut buffer_guard = buffer.buffer.lock().unwrap();
buffer_guard.split()
};
write_to_compressed_writer(&mut compressed_writer, &final_data).await?;
}
// Shutdown compression and close writer
compressed_writer.shutdown().await?;
writer.close_inner_writer().await?;
Ok(rows)
}
/// Creates a [FileStream] for reading data from a file with optional column projection
/// and compression support.
///
/// Returns [SendableRecordBatchStream].
pub async fn file_to_stream(
store: &ObjectStore,
filename: &str,
file_schema: SchemaRef,
file_source: Arc<dyn FileSource>,
projection: Option<Vec<usize>>,
compression_type: CompressionType,
) -> Result<DfSendableRecordBatchStream> {
let df_compression: DfCompressionType = compression_type.into();
let config = FileScanConfigBuilder::new(
ObjectStoreUrl::local_filesystem(),
file_schema,
file_source.clone(),
)
.with_file_group(FileGroup::new(vec![PartitionedFile::new(
filename.to_string(),
0,
)]))
.with_projection(projection)
.with_file_compression_type(df_compression)
.build();
let store = Arc::new(OpendalStore::new(store.clone()));
let file_opener = file_source
.with_projection(&config)
.create_file_opener(store, &config, 0);
let stream = FileStream::new(&config, 0, file_opener, &ExecutionPlanMetricsSet::new())
.context(error::BuildFileStreamSnafu)?;
Ok(Box::pin(stream))
}

View File

@@ -157,27 +157,19 @@ pub async fn stream_to_csv(
concurrency: usize,
format: &CsvFormat,
) -> Result<usize> {
stream_to_file(
stream,
store,
path,
threshold,
concurrency,
format.compression_type,
|buffer| {
let mut builder = WriterBuilder::new();
if let Some(timestamp_format) = &format.timestamp_format {
builder = builder.with_timestamp_format(timestamp_format.to_owned())
}
if let Some(date_format) = &format.date_format {
builder = builder.with_date_format(date_format.to_owned())
}
if let Some(time_format) = &format.time_format {
builder = builder.with_time_format(time_format.to_owned())
}
builder.build(buffer)
},
)
stream_to_file(stream, store, path, threshold, concurrency, |buffer| {
let mut builder = WriterBuilder::new();
if let Some(timestamp_format) = &format.timestamp_format {
builder = builder.with_timestamp_format(timestamp_format.to_owned())
}
if let Some(date_format) = &format.date_format {
builder = builder.with_date_format(date_format.to_owned())
}
if let Some(time_format) = &format.time_format {
builder = builder.with_time_format(time_format.to_owned())
}
builder.build(buffer)
})
.await
}
@@ -189,21 +181,13 @@ impl DfRecordBatchEncoder for csv::Writer<SharedBuffer> {
#[cfg(test)]
mod tests {
use std::sync::Arc;
use common_recordbatch::adapter::DfRecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, RecordBatches};
use common_test_util::find_workspace_path;
use datafusion::datasource::physical_plan::{CsvSource, FileSource};
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
use datatypes::vectors::{Float64Vector, StringVector, UInt32Vector, VectorRef};
use futures::TryStreamExt;
use super::*;
use crate::file_format::{
FORMAT_COMPRESSION_TYPE, FORMAT_DELIMITER, FORMAT_HAS_HEADER,
FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat, file_to_stream,
FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat,
};
use crate::test_util::{format_schema, test_store};
@@ -313,166 +297,4 @@ mod tests {
}
);
}
#[tokio::test]
async fn test_compressed_csv() {
// Create test data
let column_schemas = vec![
ColumnSchema::new("id", ConcreteDataType::uint32_datatype(), false),
ColumnSchema::new("name", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("value", ConcreteDataType::float64_datatype(), false),
];
let schema = Arc::new(Schema::new(column_schemas));
// Create multiple record batches with different data
let batch1_columns: Vec<VectorRef> = vec![
Arc::new(UInt32Vector::from_slice(vec![1, 2, 3])),
Arc::new(StringVector::from(vec!["Alice", "Bob", "Charlie"])),
Arc::new(Float64Vector::from_slice(vec![10.5, 20.3, 30.7])),
];
let batch1 = RecordBatch::new(schema.clone(), batch1_columns).unwrap();
let batch2_columns: Vec<VectorRef> = vec![
Arc::new(UInt32Vector::from_slice(vec![4, 5, 6])),
Arc::new(StringVector::from(vec!["David", "Eva", "Frank"])),
Arc::new(Float64Vector::from_slice(vec![40.1, 50.2, 60.3])),
];
let batch2 = RecordBatch::new(schema.clone(), batch2_columns).unwrap();
let batch3_columns: Vec<VectorRef> = vec![
Arc::new(UInt32Vector::from_slice(vec![7, 8, 9])),
Arc::new(StringVector::from(vec!["Grace", "Henry", "Ivy"])),
Arc::new(Float64Vector::from_slice(vec![70.4, 80.5, 90.6])),
];
let batch3 = RecordBatch::new(schema.clone(), batch3_columns).unwrap();
// Combine all batches into a RecordBatches collection
let recordbatches = RecordBatches::try_new(schema, vec![batch1, batch2, batch3]).unwrap();
// Test with different compression types
let compression_types = vec![
CompressionType::Gzip,
CompressionType::Bzip2,
CompressionType::Xz,
CompressionType::Zstd,
];
// Create a temporary file path
let temp_dir = common_test_util::temp_dir::create_temp_dir("test_compressed_csv");
for compression_type in compression_types {
let format = CsvFormat {
compression_type,
..CsvFormat::default()
};
// Use correct format without Debug formatter
let compressed_file_name =
format!("test_compressed_csv.{}", compression_type.file_extension());
let compressed_file_path = temp_dir.path().join(&compressed_file_name);
let compressed_file_path_str = compressed_file_path.to_str().unwrap();
// Create a simple file store for testing
let store = test_store("/");
// Export CSV with compression
let rows = stream_to_csv(
Box::pin(DfRecordBatchStreamAdapter::new(recordbatches.as_stream())),
store,
compressed_file_path_str,
1024,
1,
&format,
)
.await
.unwrap();
assert_eq!(rows, 9);
// Verify compressed file was created and has content
assert!(compressed_file_path.exists());
let file_size = std::fs::metadata(&compressed_file_path).unwrap().len();
assert!(file_size > 0);
// Verify the file is actually compressed
let file_content = std::fs::read(&compressed_file_path).unwrap();
// Compressed files should not start with CSV header
// They should have compression magic bytes
match compression_type {
CompressionType::Gzip => {
// Gzip magic bytes: 0x1f 0x8b
assert_eq!(file_content[0], 0x1f, "Gzip file should start with 0x1f");
assert_eq!(
file_content[1], 0x8b,
"Gzip file should have 0x8b as second byte"
);
}
CompressionType::Bzip2 => {
// Bzip2 magic bytes: 'BZ'
assert_eq!(file_content[0], b'B', "Bzip2 file should start with 'B'");
assert_eq!(
file_content[1], b'Z',
"Bzip2 file should have 'Z' as second byte"
);
}
CompressionType::Xz => {
// XZ magic bytes: 0xFD '7zXZ'
assert_eq!(file_content[0], 0xFD, "XZ file should start with 0xFD");
}
CompressionType::Zstd => {
// Zstd magic bytes: 0x28 0xB5 0x2F 0xFD
assert_eq!(file_content[0], 0x28, "Zstd file should start with 0x28");
assert_eq!(
file_content[1], 0xB5,
"Zstd file should have 0xB5 as second byte"
);
}
_ => {}
}
// Verify the compressed file can be decompressed and content matches original data
let store = test_store("/");
let schema = Arc::new(
CsvFormat {
compression_type,
..Default::default()
}
.infer_schema(&store, compressed_file_path_str)
.await
.unwrap(),
);
let csv_source = CsvSource::new(true, b',', b'"')
.with_schema(schema.clone())
.with_batch_size(8192);
let stream = file_to_stream(
&store,
compressed_file_path_str,
schema.clone(),
csv_source.clone(),
None,
compression_type,
)
.await
.unwrap();
let batches = stream.try_collect::<Vec<_>>().await.unwrap();
let pretty_print = arrow::util::pretty::pretty_format_batches(&batches)
.unwrap()
.to_string();
let expected = r#"+----+---------+-------+
| id | name | value |
+----+---------+-------+
| 1 | Alice | 10.5 |
| 2 | Bob | 20.3 |
| 3 | Charlie | 30.7 |
| 4 | David | 40.1 |
| 5 | Eva | 50.2 |
| 6 | Frank | 60.3 |
| 7 | Grace | 70.4 |
| 8 | Henry | 80.5 |
| 9 | Ivy | 90.6 |
+----+---------+-------+"#;
assert_eq!(expected, pretty_print);
}
}
}

View File

@@ -115,17 +115,10 @@ pub async fn stream_to_json(
path: &str,
threshold: usize,
concurrency: usize,
format: &JsonFormat,
) -> Result<usize> {
stream_to_file(
stream,
store,
path,
threshold,
concurrency,
format.compression_type,
json::LineDelimitedWriter::new,
)
stream_to_file(stream, store, path, threshold, concurrency, |buffer| {
json::LineDelimitedWriter::new(buffer)
})
.await
}
@@ -137,21 +130,10 @@ impl DfRecordBatchEncoder for json::Writer<SharedBuffer, LineDelimited> {
#[cfg(test)]
mod tests {
use std::sync::Arc;
use common_recordbatch::adapter::DfRecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, RecordBatches};
use common_test_util::find_workspace_path;
use datafusion::datasource::physical_plan::{FileSource, JsonSource};
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
use datatypes::vectors::{Float64Vector, StringVector, UInt32Vector, VectorRef};
use futures::TryStreamExt;
use super::*;
use crate::file_format::{
FORMAT_COMPRESSION_TYPE, FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat, file_to_stream,
};
use crate::file_format::{FORMAT_COMPRESSION_TYPE, FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat};
use crate::test_util::{format_schema, test_store};
fn test_data_root() -> String {
@@ -221,165 +203,4 @@ mod tests {
}
);
}
#[tokio::test]
async fn test_compressed_json() {
// Create test data
let column_schemas = vec![
ColumnSchema::new("id", ConcreteDataType::uint32_datatype(), false),
ColumnSchema::new("name", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("value", ConcreteDataType::float64_datatype(), false),
];
let schema = Arc::new(Schema::new(column_schemas));
// Create multiple record batches with different data
let batch1_columns: Vec<VectorRef> = vec![
Arc::new(UInt32Vector::from_slice(vec![1, 2, 3])),
Arc::new(StringVector::from(vec!["Alice", "Bob", "Charlie"])),
Arc::new(Float64Vector::from_slice(vec![10.5, 20.3, 30.7])),
];
let batch1 = RecordBatch::new(schema.clone(), batch1_columns).unwrap();
let batch2_columns: Vec<VectorRef> = vec![
Arc::new(UInt32Vector::from_slice(vec![4, 5, 6])),
Arc::new(StringVector::from(vec!["David", "Eva", "Frank"])),
Arc::new(Float64Vector::from_slice(vec![40.1, 50.2, 60.3])),
];
let batch2 = RecordBatch::new(schema.clone(), batch2_columns).unwrap();
let batch3_columns: Vec<VectorRef> = vec![
Arc::new(UInt32Vector::from_slice(vec![7, 8, 9])),
Arc::new(StringVector::from(vec!["Grace", "Henry", "Ivy"])),
Arc::new(Float64Vector::from_slice(vec![70.4, 80.5, 90.6])),
];
let batch3 = RecordBatch::new(schema.clone(), batch3_columns).unwrap();
// Combine all batches into a RecordBatches collection
let recordbatches = RecordBatches::try_new(schema, vec![batch1, batch2, batch3]).unwrap();
// Test with different compression types
let compression_types = vec![
CompressionType::Gzip,
CompressionType::Bzip2,
CompressionType::Xz,
CompressionType::Zstd,
];
// Create a temporary file path
let temp_dir = common_test_util::temp_dir::create_temp_dir("test_compressed_json");
for compression_type in compression_types {
let format = JsonFormat {
compression_type,
..JsonFormat::default()
};
let compressed_file_name =
format!("test_compressed_json.{}", compression_type.file_extension());
let compressed_file_path = temp_dir.path().join(&compressed_file_name);
let compressed_file_path_str = compressed_file_path.to_str().unwrap();
// Create a simple file store for testing
let store = test_store("/");
// Export JSON with compression
let rows = stream_to_json(
Box::pin(DfRecordBatchStreamAdapter::new(recordbatches.as_stream())),
store,
compressed_file_path_str,
1024,
1,
&format,
)
.await
.unwrap();
assert_eq!(rows, 9);
// Verify compressed file was created and has content
assert!(compressed_file_path.exists());
let file_size = std::fs::metadata(&compressed_file_path).unwrap().len();
assert!(file_size > 0);
// Verify the file is actually compressed
let file_content = std::fs::read(&compressed_file_path).unwrap();
// Compressed files should not start with '{' (JSON character)
// They should have compression magic bytes
match compression_type {
CompressionType::Gzip => {
// Gzip magic bytes: 0x1f 0x8b
assert_eq!(file_content[0], 0x1f, "Gzip file should start with 0x1f");
assert_eq!(
file_content[1], 0x8b,
"Gzip file should have 0x8b as second byte"
);
}
CompressionType::Bzip2 => {
// Bzip2 magic bytes: 'BZ'
assert_eq!(file_content[0], b'B', "Bzip2 file should start with 'B'");
assert_eq!(
file_content[1], b'Z',
"Bzip2 file should have 'Z' as second byte"
);
}
CompressionType::Xz => {
// XZ magic bytes: 0xFD '7zXZ'
assert_eq!(file_content[0], 0xFD, "XZ file should start with 0xFD");
}
CompressionType::Zstd => {
// Zstd magic bytes: 0x28 0xB5 0x2F 0xFD
assert_eq!(file_content[0], 0x28, "Zstd file should start with 0x28");
assert_eq!(
file_content[1], 0xB5,
"Zstd file should have 0xB5 as second byte"
);
}
_ => {}
}
// Verify the compressed file can be decompressed and content matches original data
let store = test_store("/");
let schema = Arc::new(
JsonFormat {
compression_type,
..Default::default()
}
.infer_schema(&store, compressed_file_path_str)
.await
.unwrap(),
);
let json_source = JsonSource::new()
.with_schema(schema.clone())
.with_batch_size(8192);
let stream = file_to_stream(
&store,
compressed_file_path_str,
schema.clone(),
json_source.clone(),
None,
compression_type,
)
.await
.unwrap();
let batches = stream.try_collect::<Vec<_>>().await.unwrap();
let pretty_print = arrow::util::pretty::pretty_format_batches(&batches)
.unwrap()
.to_string();
let expected = r#"+----+---------+-------+
| id | name | value |
+----+---------+-------+
| 1 | Alice | 10.5 |
| 2 | Bob | 20.3 |
| 3 | Charlie | 30.7 |
| 4 | David | 40.1 |
| 5 | Eva | 50.2 |
| 6 | Frank | 60.3 |
| 7 | Grace | 70.4 |
| 8 | Henry | 80.5 |
| 9 | Ivy | 90.6 |
+----+---------+-------+"#;
assert_eq!(expected, pretty_print);
}
}
}

View File

@@ -16,7 +16,6 @@
#![feature(type_alias_impl_trait)]
pub mod buffered_writer;
pub mod compressed_writer;
pub mod compression;
pub mod error;
pub mod file_format;

View File

@@ -28,7 +28,7 @@ use object_store::ObjectStore;
use object_store::services::Fs;
use crate::file_format::csv::{CsvFormat, stream_to_csv};
use crate::file_format::json::{JsonFormat, stream_to_json};
use crate::file_format::json::stream_to_json;
use crate::test_util;
pub const TEST_BATCH_SIZE: usize = 100;
@@ -122,16 +122,13 @@ pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usi
let output_path = format!("{}/{}", dir.path().display(), "output");
let json_format = JsonFormat::default();
assert!(
stream_to_json(
Box::pin(stream),
tmp_store.clone(),
&output_path,
threshold(size),
8,
&json_format,
8
)
.await
.is_ok()

View File

@@ -45,19 +45,3 @@ pub fn from_err_code_msg_to_header(code: u32, msg: &str) -> HeaderMap {
header.insert(GREPTIME_DB_HEADER_ERROR_MSG, msg);
header
}
/// Returns the external root cause of the source error (exclude the current error).
pub fn root_source(err: &dyn std::error::Error) -> Option<&dyn std::error::Error> {
// There are some divergence about the behavior of the `sources()` API
// in https://github.com/rust-lang/rust/issues/58520
// So this function iterates the sources manually.
let mut root = err.source();
while let Some(r) = root {
if let Some(s) = r.source() {
root = Some(s);
} else {
break;
}
}
root
}

View File

@@ -97,9 +97,9 @@ pub trait Event: Send + Sync + Debug {
vec![]
}
/// Add the extra rows to the event with the default row.
fn extra_rows(&self) -> Result<Vec<Row>> {
Ok(vec![Row { values: vec![] }])
/// Add the extra row to the event with the default row.
fn extra_row(&self) -> Result<Row> {
Ok(Row { values: vec![] })
}
/// Returns the event as any type.
@@ -159,17 +159,15 @@ pub fn build_row_inserts_request(events: &[&Box<dyn Event>]) -> Result<RowInsert
let mut rows: Vec<Row> = Vec::with_capacity(events.len());
for event in events {
let extra_rows = event.extra_rows()?;
for extra_row in extra_rows {
let mut values = Vec::with_capacity(3 + extra_row.values.len());
values.extend([
ValueData::StringValue(event.event_type().to_string()).into(),
ValueData::BinaryValue(event.json_payload()?.into_bytes()).into(),
ValueData::TimestampNanosecondValue(event.timestamp().value()).into(),
]);
values.extend(extra_row.values);
rows.push(Row { values });
}
let extra_row = event.extra_row()?;
let mut values = Vec::with_capacity(3 + extra_row.values.len());
values.extend([
ValueData::StringValue(event.event_type().to_string()).into(),
ValueData::BinaryValue(event.json_payload()?.into_bytes()).into(),
ValueData::TimestampNanosecondValue(event.timestamp().value()).into(),
]);
values.extend(extra_row.values);
rows.push(Row { values });
}
Ok(RowInsertRequests {

View File

@@ -104,7 +104,7 @@ impl MetaClientSelector {
let cfg = ChannelConfig::new()
.connect_timeout(Duration::from_secs(30))
.timeout(Duration::from_secs(30));
let channel_manager = ChannelManager::with_config(cfg, None);
let channel_manager = ChannelManager::with_config(cfg);
Self {
meta_client,
channel_manager,

View File

@@ -107,8 +107,8 @@ impl Event for SlowQueryEvent {
]
}
fn extra_rows(&self) -> Result<Vec<Row>> {
Ok(vec![Row {
fn extra_row(&self) -> Result<Row> {
Ok(Row {
values: vec![
ValueData::U64Value(self.cost).into(),
ValueData::U64Value(self.threshold).into(),
@@ -119,7 +119,7 @@ impl Event for SlowQueryEvent {
ValueData::TimestampMillisecondValue(self.promql_start.unwrap_or(0)).into(),
ValueData::TimestampMillisecondValue(self.promql_end.unwrap_or(0)).into(),
],
}])
})
}
fn json_payload(&self) -> Result<String> {

View File

@@ -47,7 +47,6 @@ h3o = { version = "0.6", optional = true }
hyperloglogplus = "0.4"
jsonb.workspace = true
memchr = "2.7"
mito-codec.workspace = true
nalgebra.workspace = true
num = "0.4"
num-traits = "0.2"

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
mod build_index_table;
mod flush_compact_region;
mod flush_compact_table;
mod migrate_region;
@@ -27,7 +26,6 @@ use reconcile_catalog::ReconcileCatalogFunction;
use reconcile_database::ReconcileDatabaseFunction;
use reconcile_table::ReconcileTableFunction;
use crate::admin::build_index_table::BuildIndexFunction;
use crate::flush_flow::FlushFlowFunction;
use crate::function_registry::FunctionRegistry;
@@ -42,7 +40,6 @@ impl AdminFunction {
registry.register(CompactRegionFunction::factory());
registry.register(FlushTableFunction::factory());
registry.register(CompactTableFunction::factory());
registry.register(BuildIndexFunction::factory());
registry.register(FlushFlowFunction::factory());
registry.register(ReconcileCatalogFunction::factory());
registry.register(ReconcileDatabaseFunction::factory());

View File

@@ -1,80 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use arrow::datatypes::DataType as ArrowDataType;
use common_error::ext::BoxedError;
use common_macro::admin_fn;
use common_query::error::{
InvalidFuncArgsSnafu, MissingTableMutationHandlerSnafu, Result, TableMutationSnafu,
UnsupportedInputDataTypeSnafu,
};
use datafusion_expr::{Signature, Volatility};
use datatypes::prelude::*;
use session::context::QueryContextRef;
use session::table_name::table_name_to_full_name;
use snafu::{ResultExt, ensure};
use table::requests::BuildIndexTableRequest;
use crate::handlers::TableMutationHandlerRef;
#[admin_fn(
name = BuildIndexFunction,
display_name = build_index,
sig_fn = build_index_signature,
ret = uint64
)]
pub(crate) async fn build_index(
table_mutation_handler: &TableMutationHandlerRef,
query_ctx: &QueryContextRef,
params: &[ValueRef<'_>],
) -> Result<Value> {
ensure!(
params.len() == 1,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect 1, have: {}",
params.len()
),
}
);
let ValueRef::String(table_name) = params[0] else {
return UnsupportedInputDataTypeSnafu {
function: "build_index",
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
}
.fail();
};
let (catalog_name, schema_name, table_name) = table_name_to_full_name(table_name, query_ctx)
.map_err(BoxedError::new)
.context(TableMutationSnafu)?;
let affected_rows = table_mutation_handler
.build_index(
BuildIndexTableRequest {
catalog_name,
schema_name,
table_name,
},
query_ctx.clone(),
)
.await?;
Ok(Value::from(affected_rows as u64))
}
fn build_index_signature() -> Signature {
Signature::uniform(1, vec![ArrowDataType::Utf8], Volatility::Immutable)
}

View File

@@ -12,12 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::aggrs::vector::avg::VectorAvg;
use crate::aggrs::vector::product::VectorProduct;
use crate::aggrs::vector::sum::VectorSum;
use crate::function_registry::FunctionRegistry;
mod avg;
mod product;
mod sum;
@@ -27,6 +25,5 @@ impl VectorFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register_aggr(VectorSum::uadf_impl());
registry.register_aggr(VectorProduct::uadf_impl());
registry.register_aggr(VectorAvg::uadf_impl());
}
}

View File

@@ -1,270 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::borrow::Cow;
use std::sync::Arc;
use arrow::array::{Array, ArrayRef, AsArray, BinaryArray, LargeStringArray, StringArray};
use arrow::compute::sum;
use arrow::datatypes::UInt64Type;
use arrow_schema::{DataType, Field};
use datafusion_common::{Result, ScalarValue};
use datafusion_expr::{
Accumulator, AggregateUDF, Signature, SimpleAggregateUDF, TypeSignature, Volatility,
};
use datafusion_functions_aggregate_common::accumulator::AccumulatorArgs;
use nalgebra::{Const, DVector, DVectorView, Dyn, OVector};
use crate::scalars::vector::impl_conv::{
binlit_as_veclit, parse_veclit_from_strlit, veclit_to_binlit,
};
/// The accumulator for the `vec_avg` aggregate function.
#[derive(Debug, Default)]
pub struct VectorAvg {
sum: Option<OVector<f32, Dyn>>,
count: u64,
}
impl VectorAvg {
/// Create a new `AggregateUDF` for the `vec_avg` aggregate function.
pub fn uadf_impl() -> AggregateUDF {
let signature = Signature::one_of(
vec![
TypeSignature::Exact(vec![DataType::Utf8]),
TypeSignature::Exact(vec![DataType::LargeUtf8]),
TypeSignature::Exact(vec![DataType::Binary]),
],
Volatility::Immutable,
);
let udaf = SimpleAggregateUDF::new_with_signature(
"vec_avg",
signature,
DataType::Binary,
Arc::new(Self::accumulator),
vec![
Arc::new(Field::new("sum", DataType::Binary, true)),
Arc::new(Field::new("count", DataType::UInt64, true)),
],
);
AggregateUDF::from(udaf)
}
fn accumulator(args: AccumulatorArgs) -> Result<Box<dyn Accumulator>> {
if args.schema.fields().len() != 1 {
return Err(datafusion_common::DataFusionError::Internal(format!(
"expect creating `VEC_AVG` with only one input field, actual {}",
args.schema.fields().len()
)));
}
let t = args.schema.field(0).data_type();
if !matches!(t, DataType::Utf8 | DataType::LargeUtf8 | DataType::Binary) {
return Err(datafusion_common::DataFusionError::Internal(format!(
"unexpected input datatype {t} when creating `VEC_AVG`"
)));
}
Ok(Box::new(VectorAvg::default()))
}
fn inner(&mut self, len: usize) -> &mut OVector<f32, Dyn> {
self.sum
.get_or_insert_with(|| OVector::zeros_generic(Dyn(len), Const::<1>))
}
fn update(&mut self, values: &[ArrayRef], is_update: bool) -> Result<()> {
if values.is_empty() {
return Ok(());
};
let vectors = match values[0].data_type() {
DataType::Utf8 => {
let arr: &StringArray = values[0].as_string();
arr.iter()
.filter_map(|x| x.map(|s| parse_veclit_from_strlit(s).map_err(Into::into)))
.map(|x| x.map(Cow::Owned))
.collect::<Result<Vec<_>>>()?
}
DataType::LargeUtf8 => {
let arr: &LargeStringArray = values[0].as_string();
arr.iter()
.filter_map(|x| x.map(|s| parse_veclit_from_strlit(s).map_err(Into::into)))
.map(|x: Result<Vec<f32>>| x.map(Cow::Owned))
.collect::<Result<Vec<_>>>()?
}
DataType::Binary => {
let arr: &BinaryArray = values[0].as_binary();
arr.iter()
.filter_map(|x| x.map(|b| binlit_as_veclit(b).map_err(Into::into)))
.collect::<Result<Vec<_>>>()?
}
_ => {
return Err(datafusion_common::DataFusionError::NotImplemented(format!(
"unsupported data type {} for `VEC_AVG`",
values[0].data_type()
)));
}
};
if vectors.is_empty() {
return Ok(());
}
let len = if is_update {
vectors.len() as u64
} else {
sum(values[1].as_primitive::<UInt64Type>()).unwrap_or_default()
};
let dims = vectors[0].len();
let mut sum = DVector::zeros(dims);
for v in vectors {
if v.len() != dims {
return Err(datafusion_common::DataFusionError::Execution(
"vectors length not match: VEC_AVG".to_string(),
));
}
let v_view = DVectorView::from_slice(&v, dims);
sum += &v_view;
}
*self.inner(dims) += sum;
self.count += len;
Ok(())
}
}
impl Accumulator for VectorAvg {
fn state(&mut self) -> Result<Vec<ScalarValue>> {
let vector = match &self.sum {
None => ScalarValue::Binary(None),
Some(sum) => ScalarValue::Binary(Some(veclit_to_binlit(sum.as_slice()))),
};
Ok(vec![vector, ScalarValue::from(self.count)])
}
fn update_batch(&mut self, values: &[ArrayRef]) -> Result<()> {
self.update(values, true)
}
fn merge_batch(&mut self, states: &[ArrayRef]) -> Result<()> {
self.update(states, false)
}
fn evaluate(&mut self) -> Result<ScalarValue> {
match &self.sum {
None => Ok(ScalarValue::Binary(None)),
Some(sum) => Ok(ScalarValue::Binary(Some(veclit_to_binlit(
(sum / self.count as f32).as_slice(),
)))),
}
}
fn size(&self) -> usize {
size_of_val(self)
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use arrow::array::StringArray;
use datatypes::scalars::ScalarVector;
use datatypes::vectors::{ConstantVector, StringVector, Vector};
use super::*;
#[test]
fn test_update_batch() {
// test update empty batch, expect not updating anything
let mut vec_avg = VectorAvg::default();
vec_avg.update_batch(&[]).unwrap();
assert!(vec_avg.sum.is_none());
assert_eq!(ScalarValue::Binary(None), vec_avg.evaluate().unwrap());
// test update one not-null value
let mut vec_avg = VectorAvg::default();
let v: Vec<ArrayRef> = vec![Arc::new(StringArray::from(vec![
Some("[1.0,2.0,3.0]".to_string()),
Some("[4.0,5.0,6.0]".to_string()),
]))];
vec_avg.update_batch(&v).unwrap();
assert_eq!(
ScalarValue::Binary(Some(veclit_to_binlit(&[2.5, 3.5, 4.5]))),
vec_avg.evaluate().unwrap()
);
// test update one null value
let mut vec_avg = VectorAvg::default();
let v: Vec<ArrayRef> = vec![Arc::new(StringArray::from(vec![Option::<String>::None]))];
vec_avg.update_batch(&v).unwrap();
assert_eq!(ScalarValue::Binary(None), vec_avg.evaluate().unwrap());
// test update no null-value batch
let mut vec_avg = VectorAvg::default();
let v: Vec<ArrayRef> = vec![Arc::new(StringArray::from(vec![
Some("[1.0,2.0,3.0]".to_string()),
Some("[4.0,5.0,6.0]".to_string()),
Some("[7.0,8.0,9.0]".to_string()),
]))];
vec_avg.update_batch(&v).unwrap();
assert_eq!(
ScalarValue::Binary(Some(veclit_to_binlit(&[4.0, 5.0, 6.0]))),
vec_avg.evaluate().unwrap()
);
// test update null-value batch
let mut vec_avg = VectorAvg::default();
let v: Vec<ArrayRef> = vec![Arc::new(StringArray::from(vec![
Some("[1.0,2.0,3.0]".to_string()),
None,
Some("[7.0,8.0,9.0]".to_string()),
]))];
vec_avg.update_batch(&v).unwrap();
assert_eq!(
ScalarValue::Binary(Some(veclit_to_binlit(&[4.0, 5.0, 6.0]))),
vec_avg.evaluate().unwrap()
);
let mut vec_avg = VectorAvg::default();
let v: Vec<ArrayRef> = vec![Arc::new(StringArray::from(vec![
None,
Some("[4.0,5.0,6.0]".to_string()),
Some("[7.0,8.0,9.0]".to_string()),
]))];
vec_avg.update_batch(&v).unwrap();
assert_eq!(
ScalarValue::Binary(Some(veclit_to_binlit(&[5.5, 6.5, 7.5]))),
vec_avg.evaluate().unwrap()
);
// test update with constant vector
let mut vec_avg = VectorAvg::default();
let v: Vec<ArrayRef> = vec![
Arc::new(ConstantVector::new(
Arc::new(StringVector::from_vec(vec!["[1.0,2.0,3.0]".to_string()])),
4,
))
.to_arrow_array(),
];
vec_avg.update_batch(&v).unwrap();
assert_eq!(
ScalarValue::Binary(Some(veclit_to_binlit(&[1.0, 2.0, 3.0]))),
vec_avg.evaluate().unwrap()
);
}
}

View File

@@ -34,7 +34,6 @@ use crate::scalars::json::JsonFunction;
use crate::scalars::matches::MatchesFunction;
use crate::scalars::matches_term::MatchesTermFunction;
use crate::scalars::math::MathFunction;
use crate::scalars::primary_key::DecodePrimaryKeyFunction;
use crate::scalars::string::register_string_functions;
use crate::scalars::timestamp::TimestampFunction;
use crate::scalars::uddsketch_calc::UddSketchCalcFunction;
@@ -144,7 +143,6 @@ pub static FUNCTION_REGISTRY: LazyLock<Arc<FunctionRegistry>> = LazyLock::new(||
ExpressionFunction::register(&function_registry);
UddSketchCalcFunction::register(&function_registry);
HllCalcFunction::register(&function_registry);
DecodePrimaryKeyFunction::register(&function_registry);
// Full text search function
MatchesFunction::register(&function_registry);

View File

@@ -25,9 +25,7 @@ use common_query::Output;
use common_query::error::Result;
use session::context::QueryContextRef;
use store_api::storage::RegionId;
use table::requests::{
BuildIndexTableRequest, CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest,
};
use table::requests::{CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest};
/// A trait for handling table mutations in `QueryEngine`.
#[async_trait]
@@ -49,13 +47,6 @@ pub trait TableMutationHandler: Send + Sync {
ctx: QueryContextRef,
) -> Result<AffectedRows>;
/// Trigger an index build task for the table.
async fn build_index(
&self,
request: BuildIndexTableRequest,
ctx: QueryContextRef,
) -> Result<AffectedRows>;
/// Trigger a flush task for a table region.
async fn flush_region(&self, region_id: RegionId, ctx: QueryContextRef)
-> Result<AffectedRows>;

View File

@@ -20,7 +20,6 @@ pub mod json;
pub mod matches;
pub mod matches_term;
pub mod math;
pub mod primary_key;
pub(crate) mod string;
pub mod vector;

View File

@@ -14,7 +14,6 @@
mod binary;
mod ctx;
mod if_func;
mod is_null;
mod unary;
@@ -23,7 +22,6 @@ pub use ctx::EvalContext;
pub use unary::scalar_unary_op;
use crate::function_registry::FunctionRegistry;
use crate::scalars::expression::if_func::IfFunction;
use crate::scalars::expression::is_null::IsNullFunction;
pub(crate) struct ExpressionFunction;
@@ -31,6 +29,5 @@ pub(crate) struct ExpressionFunction;
impl ExpressionFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register_scalar(IsNullFunction::default());
registry.register_scalar(IfFunction::default());
}
}

View File

@@ -1,404 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use std::fmt::Display;
use arrow::array::ArrowNativeTypeOp;
use arrow::datatypes::ArrowPrimitiveType;
use datafusion::arrow::array::{Array, ArrayRef, AsArray, BooleanArray, PrimitiveArray};
use datafusion::arrow::compute::kernels::zip::zip;
use datafusion::arrow::datatypes::DataType;
use datafusion_common::DataFusionError;
use datafusion_expr::type_coercion::binary::comparison_coercion;
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature, Volatility};
use crate::function::Function;
const NAME: &str = "if";
/// MySQL-compatible IF function: IF(condition, true_value, false_value)
///
/// Returns true_value if condition is TRUE (not NULL and not 0),
/// otherwise returns false_value.
///
/// MySQL truthy rules:
/// - NULL -> false
/// - 0 (numeric zero) -> false
/// - Any non-zero numeric -> true
/// - Boolean true/false -> use directly
#[derive(Clone, Debug)]
pub struct IfFunction {
signature: Signature,
}
impl Default for IfFunction {
fn default() -> Self {
Self {
signature: Signature::any(3, Volatility::Immutable),
}
}
}
impl Display for IfFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", NAME.to_ascii_uppercase())
}
}
impl Function for IfFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, input_types: &[DataType]) -> datafusion_common::Result<DataType> {
// Return the common type of true_value and false_value (args[1] and args[2])
if input_types.len() < 3 {
return Err(DataFusionError::Plan(format!(
"{} requires 3 arguments, got {}",
NAME,
input_types.len()
)));
}
let true_type = &input_types[1];
let false_type = &input_types[2];
// Use comparison_coercion to find common type
comparison_coercion(true_type, false_type).ok_or_else(|| {
DataFusionError::Plan(format!(
"Cannot find common type for IF function between {:?} and {:?}",
true_type, false_type
))
})
}
fn signature(&self) -> &Signature {
&self.signature
}
fn invoke_with_args(
&self,
args: ScalarFunctionArgs,
) -> datafusion_common::Result<ColumnarValue> {
if args.args.len() != 3 {
return Err(DataFusionError::Plan(format!(
"{} requires exactly 3 arguments, got {}",
NAME,
args.args.len()
)));
}
let condition = &args.args[0];
let true_value = &args.args[1];
let false_value = &args.args[2];
// Convert condition to boolean array using MySQL truthy rules
let bool_array = to_boolean_array(condition, args.number_rows)?;
// Convert true and false values to arrays
let true_array = true_value.to_array(args.number_rows)?;
let false_array = false_value.to_array(args.number_rows)?;
// Use zip to select values based on condition
// zip expects &dyn Datum, and ArrayRef (Arc<dyn Array>) implements Datum
let result = zip(&bool_array, &true_array, &false_array)?;
Ok(ColumnarValue::Array(result))
}
}
/// Convert a ColumnarValue to a BooleanArray using MySQL truthy rules:
/// - NULL -> false
/// - 0 (any numeric zero) -> false
/// - Non-zero numeric -> true
/// - Boolean -> use directly
fn to_boolean_array(
value: &ColumnarValue,
num_rows: usize,
) -> datafusion_common::Result<BooleanArray> {
let array = value.to_array(num_rows)?;
array_to_bool(array)
}
/// Convert an integer PrimitiveArray to BooleanArray using MySQL truthy rules:
/// NULL -> false, 0 -> false, non-zero -> true
fn int_array_to_bool<T>(array: &PrimitiveArray<T>) -> BooleanArray
where
T: ArrowPrimitiveType,
T::Native: ArrowNativeTypeOp,
{
BooleanArray::from_iter(
array
.iter()
.map(|opt| Some(opt.is_some_and(|v| !v.is_zero()))),
)
}
/// Convert a float PrimitiveArray to BooleanArray using MySQL truthy rules:
/// NULL -> false, 0 (including -0.0) -> false, NaN -> true, other non-zero -> true
fn float_array_to_bool<T>(array: &PrimitiveArray<T>) -> BooleanArray
where
T: ArrowPrimitiveType,
T::Native: ArrowNativeTypeOp + num_traits::Float,
{
use num_traits::Float;
BooleanArray::from_iter(
array
.iter()
.map(|opt| Some(opt.is_some_and(|v| v.is_nan() || !v.is_zero()))),
)
}
/// Convert an Array to BooleanArray using MySQL truthy rules
fn array_to_bool(array: ArrayRef) -> datafusion_common::Result<BooleanArray> {
use arrow::datatypes::*;
match array.data_type() {
DataType::Boolean => {
let bool_array = array.as_boolean();
Ok(BooleanArray::from_iter(
bool_array.iter().map(|opt| Some(opt.unwrap_or(false))),
))
}
DataType::Int8 => Ok(int_array_to_bool(array.as_primitive::<Int8Type>())),
DataType::Int16 => Ok(int_array_to_bool(array.as_primitive::<Int16Type>())),
DataType::Int32 => Ok(int_array_to_bool(array.as_primitive::<Int32Type>())),
DataType::Int64 => Ok(int_array_to_bool(array.as_primitive::<Int64Type>())),
DataType::UInt8 => Ok(int_array_to_bool(array.as_primitive::<UInt8Type>())),
DataType::UInt16 => Ok(int_array_to_bool(array.as_primitive::<UInt16Type>())),
DataType::UInt32 => Ok(int_array_to_bool(array.as_primitive::<UInt32Type>())),
DataType::UInt64 => Ok(int_array_to_bool(array.as_primitive::<UInt64Type>())),
// Float16 needs special handling since half::f16 doesn't implement num_traits::Float
DataType::Float16 => {
let typed_array = array.as_primitive::<Float16Type>();
Ok(BooleanArray::from_iter(typed_array.iter().map(|opt| {
Some(opt.is_some_and(|v| {
let f = v.to_f32();
f.is_nan() || !f.is_zero()
}))
})))
}
DataType::Float32 => Ok(float_array_to_bool(array.as_primitive::<Float32Type>())),
DataType::Float64 => Ok(float_array_to_bool(array.as_primitive::<Float64Type>())),
// Null type is always false.
// Note: NullArray::is_null() returns false (physical null), so we must handle it explicitly.
// See: https://github.com/apache/arrow-rs/issues/4840
DataType::Null => Ok(BooleanArray::from(vec![false; array.len()])),
// For other types, treat non-null as true
_ => {
let len = array.len();
Ok(BooleanArray::from_iter(
(0..len).map(|i| Some(!array.is_null(i))),
))
}
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use arrow_schema::Field;
use datafusion_common::ScalarValue;
use datafusion_common::arrow::array::{AsArray, Int32Array, StringArray};
use super::*;
#[test]
fn test_if_function_basic() {
let if_func = IfFunction::default();
assert_eq!("if", if_func.name());
// Test IF(true, 'yes', 'no') -> 'yes'
let result = if_func
.invoke_with_args(ScalarFunctionArgs {
args: vec![
ColumnarValue::Scalar(ScalarValue::Boolean(Some(true))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
],
arg_fields: vec![],
number_rows: 1,
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
config_options: Arc::new(Default::default()),
})
.unwrap();
if let ColumnarValue::Array(arr) = result {
let str_arr = arr.as_string::<i32>();
assert_eq!(str_arr.value(0), "yes");
} else {
panic!("Expected Array result");
}
}
#[test]
fn test_if_function_false() {
let if_func = IfFunction::default();
// Test IF(false, 'yes', 'no') -> 'no'
let result = if_func
.invoke_with_args(ScalarFunctionArgs {
args: vec![
ColumnarValue::Scalar(ScalarValue::Boolean(Some(false))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
],
arg_fields: vec![],
number_rows: 1,
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
config_options: Arc::new(Default::default()),
})
.unwrap();
if let ColumnarValue::Array(arr) = result {
let str_arr = arr.as_string::<i32>();
assert_eq!(str_arr.value(0), "no");
} else {
panic!("Expected Array result");
}
}
#[test]
fn test_if_function_null_is_false() {
let if_func = IfFunction::default();
// Test IF(NULL, 'yes', 'no') -> 'no' (NULL is treated as false)
// Using Boolean(None) - typed null
let result = if_func
.invoke_with_args(ScalarFunctionArgs {
args: vec![
ColumnarValue::Scalar(ScalarValue::Boolean(None)),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
],
arg_fields: vec![],
number_rows: 1,
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
config_options: Arc::new(Default::default()),
})
.unwrap();
if let ColumnarValue::Array(arr) = result {
let str_arr = arr.as_string::<i32>();
assert_eq!(str_arr.value(0), "no");
} else {
panic!("Expected Array result");
}
// Test IF(NULL, 'yes', 'no') -> 'no' using ScalarValue::Null (untyped null from SQL NULL literal)
let result = if_func
.invoke_with_args(ScalarFunctionArgs {
args: vec![
ColumnarValue::Scalar(ScalarValue::Null),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
],
arg_fields: vec![],
number_rows: 1,
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
config_options: Arc::new(Default::default()),
})
.unwrap();
if let ColumnarValue::Array(arr) = result {
let str_arr = arr.as_string::<i32>();
assert_eq!(str_arr.value(0), "no");
} else {
panic!("Expected Array result");
}
}
#[test]
fn test_if_function_numeric_truthy() {
let if_func = IfFunction::default();
// Test IF(1, 'yes', 'no') -> 'yes' (non-zero is true)
let result = if_func
.invoke_with_args(ScalarFunctionArgs {
args: vec![
ColumnarValue::Scalar(ScalarValue::Int32(Some(1))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
],
arg_fields: vec![],
number_rows: 1,
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
config_options: Arc::new(Default::default()),
})
.unwrap();
if let ColumnarValue::Array(arr) = result {
let str_arr = arr.as_string::<i32>();
assert_eq!(str_arr.value(0), "yes");
} else {
panic!("Expected Array result");
}
// Test IF(0, 'yes', 'no') -> 'no' (zero is false)
let result = if_func
.invoke_with_args(ScalarFunctionArgs {
args: vec![
ColumnarValue::Scalar(ScalarValue::Int32(Some(0))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
],
arg_fields: vec![],
number_rows: 1,
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
config_options: Arc::new(Default::default()),
})
.unwrap();
if let ColumnarValue::Array(arr) = result {
let str_arr = arr.as_string::<i32>();
assert_eq!(str_arr.value(0), "no");
} else {
panic!("Expected Array result");
}
}
#[test]
fn test_if_function_with_arrays() {
let if_func = IfFunction::default();
// Test with array condition
let condition = Int32Array::from(vec![Some(1), Some(0), None, Some(5)]);
let true_val = StringArray::from(vec!["yes", "yes", "yes", "yes"]);
let false_val = StringArray::from(vec!["no", "no", "no", "no"]);
let result = if_func
.invoke_with_args(ScalarFunctionArgs {
args: vec![
ColumnarValue::Array(Arc::new(condition)),
ColumnarValue::Array(Arc::new(true_val)),
ColumnarValue::Array(Arc::new(false_val)),
],
arg_fields: vec![],
number_rows: 4,
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
config_options: Arc::new(Default::default()),
})
.unwrap();
if let ColumnarValue::Array(arr) = result {
let str_arr = arr.as_string::<i32>();
assert_eq!(str_arr.value(0), "yes"); // 1 is true
assert_eq!(str_arr.value(1), "no"); // 0 is false
assert_eq!(str_arr.value(2), "no"); // NULL is false
assert_eq!(str_arr.value(3), "yes"); // 5 is true
} else {
panic!("Expected Array result");
}
}
}

View File

@@ -19,7 +19,7 @@ mod json_path_match;
mod json_to_string;
mod parse_json;
use json_get::{JsonGetBool, JsonGetFloat, JsonGetInt, JsonGetObject, JsonGetString};
use json_get::{JsonGetBool, JsonGetFloat, JsonGetInt, JsonGetString};
use json_is::{
JsonIsArray, JsonIsBool, JsonIsFloat, JsonIsInt, JsonIsNull, JsonIsObject, JsonIsString,
};
@@ -39,7 +39,6 @@ impl JsonFunction {
registry.register_scalar(JsonGetFloat::default());
registry.register_scalar(JsonGetString::default());
registry.register_scalar(JsonGetBool::default());
registry.register_scalar(JsonGetObject::default());
registry.register_scalar(JsonIsNull::default());
registry.register_scalar(JsonIsInt::default());

View File

@@ -16,13 +16,10 @@ use std::fmt::{self, Display};
use std::sync::Arc;
use arrow::compute;
use datafusion_common::DataFusionError;
use datafusion_common::arrow::array::{
Array, AsArray, BinaryViewBuilder, BooleanBuilder, Float64Builder, Int64Builder,
StringViewBuilder,
Array, AsArray, BooleanBuilder, Float64Builder, Int64Builder, StringViewBuilder,
};
use datafusion_common::arrow::datatypes::DataType;
use datafusion_expr::type_coercion::aggregates::STRINGS;
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature};
use crate::function::{Function, extract_args};
@@ -215,92 +212,13 @@ impl Display for JsonGetString {
}
}
/// Get the object from JSON value by path.
pub(super) struct JsonGetObject {
signature: Signature,
}
impl JsonGetObject {
const NAME: &'static str = "json_get_object";
}
impl Default for JsonGetObject {
fn default() -> Self {
Self {
signature: helper::one_of_sigs2(
vec![
DataType::Binary,
DataType::LargeBinary,
DataType::BinaryView,
],
STRINGS.to_vec(),
),
}
}
}
impl Function for JsonGetObject {
fn name(&self) -> &str {
Self::NAME
}
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
Ok(DataType::BinaryView)
}
fn signature(&self) -> &Signature {
&self.signature
}
fn invoke_with_args(
&self,
args: ScalarFunctionArgs,
) -> datafusion_common::Result<ColumnarValue> {
let [arg0, arg1] = extract_args(self.name(), &args)?;
let arg0 = compute::cast(&arg0, &DataType::BinaryView)?;
let jsons = arg0.as_binary_view();
let arg1 = compute::cast(&arg1, &DataType::Utf8View)?;
let paths = arg1.as_string_view();
let len = jsons.len();
let mut builder = BinaryViewBuilder::with_capacity(len);
for i in 0..len {
let json = jsons.is_valid(i).then(|| jsons.value(i));
let path = paths.is_valid(i).then(|| paths.value(i));
let result = if let (Some(json), Some(path)) = (json, path) {
let result = jsonb::jsonpath::parse_json_path(path.as_bytes()).and_then(|path| {
let mut data = Vec::new();
let mut offset = Vec::new();
jsonb::get_by_path(json, path, &mut data, &mut offset)
.map(|()| jsonb::is_object(&data).then_some(data))
});
result.map_err(|e| DataFusionError::Execution(e.to_string()))?
} else {
None
};
builder.append_option(result);
}
Ok(ColumnarValue::Array(Arc::new(builder.finish())))
}
}
impl Display for JsonGetObject {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", Self::NAME.to_ascii_uppercase())
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use arrow_schema::Field;
use datafusion_common::ScalarValue;
use datafusion_common::arrow::array::{BinaryArray, BinaryViewArray, StringArray};
use datafusion_common::arrow::array::{BinaryArray, StringArray};
use datafusion_common::arrow::datatypes::{Float64Type, Int64Type};
use datatypes::types::parse_string_to_jsonb;
use super::*;
@@ -507,49 +425,4 @@ mod tests {
assert_eq!(*gt, result);
}
}
#[test]
fn test_json_get_object() -> datafusion_common::Result<()> {
let udf = JsonGetObject::default();
assert_eq!("json_get_object", udf.name());
assert_eq!(
DataType::BinaryView,
udf.return_type(&[DataType::BinaryView, DataType::Utf8View])?
);
let json_value = parse_string_to_jsonb(r#"{"a": {"b": {"c": {"d": 1}}}}"#).unwrap();
let paths = vec!["$", "$.a", "$.a.b", "$.a.b.c", "$.a.b.c.d", "$.e", "$.a.e"];
let number_rows = paths.len();
let args = ScalarFunctionArgs {
args: vec![
ColumnarValue::Scalar(ScalarValue::Binary(Some(json_value))),
ColumnarValue::Array(Arc::new(StringArray::from_iter_values(paths))),
],
arg_fields: vec![],
number_rows,
return_field: Arc::new(Field::new("x", DataType::Binary, false)),
config_options: Arc::new(Default::default()),
};
let result = udf
.invoke_with_args(args)
.and_then(|x| x.to_array(number_rows))?;
let result = result.as_binary_view();
let expected = &BinaryViewArray::from_iter(
vec![
Some(r#"{"a": {"b": {"c": {"d": 1}}}}"#),
Some(r#"{"b": {"c": {"d": 1}}}"#),
Some(r#"{"c": {"d": 1}}"#),
Some(r#"{"d": 1}"#),
None,
None,
None,
]
.into_iter()
.map(|x| x.and_then(|s| parse_string_to_jsonb(s).ok())),
);
assert_eq!(result, expected);
Ok(())
}
}

View File

@@ -32,15 +32,7 @@ impl Default for JsonToStringFunction {
fn default() -> Self {
Self {
// TODO(LFC): Use a more clear type here instead of "Binary" for Json input, once we have a "Json" type.
signature: Signature::uniform(
1,
vec![
DataType::Binary,
DataType::LargeBinary,
DataType::BinaryView,
],
Volatility::Immutable,
),
signature: Signature::exact(vec![DataType::Binary], Volatility::Immutable),
}
}
}
@@ -65,8 +57,7 @@ impl Function for JsonToStringFunction {
args: ScalarFunctionArgs,
) -> datafusion_common::Result<ColumnarValue> {
let [arg0] = extract_args(self.name(), &args)?;
let arg0 = arrow::compute::cast(&arg0, &DataType::BinaryView)?;
let jsons = arg0.as_binary_view();
let jsons = arg0.as_binary::<i32>();
let size = jsons.len();
let mut builder = StringViewBuilder::with_capacity(size);

View File

@@ -1,521 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::fmt::{self, Display};
use std::sync::Arc;
use datafusion_common::arrow::array::{
Array, ArrayRef, BinaryArray, BinaryViewArray, DictionaryArray, ListBuilder, StringBuilder,
};
use datafusion_common::arrow::datatypes::{DataType, Field};
use datafusion_common::{DataFusionError, ScalarValue};
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature, Volatility};
use datatypes::arrow::datatypes::UInt32Type;
use datatypes::value::Value;
use mito_codec::row_converter::{
CompositeValues, PrimaryKeyCodec, SortField, build_primary_key_codec_with_fields,
};
use store_api::codec::PrimaryKeyEncoding;
use store_api::metadata::RegionMetadata;
use store_api::storage::ColumnId;
use store_api::storage::consts::{PRIMARY_KEY_COLUMN_NAME, ReservedColumnId};
use crate::function::{Function, extract_args};
use crate::function_registry::FunctionRegistry;
type NameValuePair = (String, Option<String>);
#[derive(Clone, Debug)]
pub(crate) struct DecodePrimaryKeyFunction {
signature: Signature,
}
const NAME: &str = "decode_primary_key";
const NULL_VALUE_LITERAL: &str = "null";
impl Default for DecodePrimaryKeyFunction {
fn default() -> Self {
Self {
signature: Signature::any(3, Volatility::Immutable),
}
}
}
impl DecodePrimaryKeyFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register_scalar(Self::default());
}
fn return_data_type() -> DataType {
DataType::List(Arc::new(Field::new("item", DataType::Utf8, true)))
}
}
impl Function for DecodePrimaryKeyFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
Ok(Self::return_data_type())
}
fn signature(&self) -> &Signature {
&self.signature
}
fn invoke_with_args(
&self,
args: ScalarFunctionArgs,
) -> datafusion_common::Result<ColumnarValue> {
let [encoded, _, _] = extract_args(self.name(), &args)?;
let number_rows = args.number_rows;
let encoding = parse_encoding(&args.args[1])?;
let metadata = parse_region_metadata(&args.args[2])?;
let codec = build_codec(&metadata, encoding);
let name_lookup: HashMap<_, _> = metadata
.column_metadatas
.iter()
.map(|c| (c.column_id, c.column_schema.name.clone()))
.collect();
let decoded_rows = decode_primary_keys(encoded, number_rows, codec.as_ref(), &name_lookup)?;
let array = build_list_array(&decoded_rows)?;
Ok(ColumnarValue::Array(array))
}
}
impl Display for DecodePrimaryKeyFunction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "DECODE_PRIMARY_KEY")
}
}
fn parse_encoding(arg: &ColumnarValue) -> datafusion_common::Result<PrimaryKeyEncoding> {
let encoding = match arg {
ColumnarValue::Scalar(ScalarValue::Utf8(Some(v)))
| ColumnarValue::Scalar(ScalarValue::LargeUtf8(Some(v))) => v.as_str(),
ColumnarValue::Scalar(value) => {
return Err(DataFusionError::Execution(format!(
"encoding must be a string literal, got {value:?}"
)));
}
ColumnarValue::Array(_) => {
return Err(DataFusionError::Execution(
"encoding must be a scalar string".to_string(),
));
}
};
match encoding.to_ascii_lowercase().as_str() {
"dense" => Ok(PrimaryKeyEncoding::Dense),
"sparse" => Ok(PrimaryKeyEncoding::Sparse),
_ => Err(DataFusionError::Execution(format!(
"unsupported primary key encoding: {encoding}"
))),
}
}
fn build_codec(
metadata: &RegionMetadata,
encoding: PrimaryKeyEncoding,
) -> Arc<dyn PrimaryKeyCodec> {
let fields = metadata.primary_key_columns().map(|c| {
(
c.column_id,
SortField::new(c.column_schema.data_type.clone()),
)
});
build_primary_key_codec_with_fields(encoding, fields)
}
fn parse_region_metadata(arg: &ColumnarValue) -> datafusion_common::Result<RegionMetadata> {
let json = match arg {
ColumnarValue::Scalar(ScalarValue::Utf8(Some(v)))
| ColumnarValue::Scalar(ScalarValue::LargeUtf8(Some(v))) => v.as_str(),
ColumnarValue::Scalar(value) => {
return Err(DataFusionError::Execution(format!(
"region metadata must be a string literal, got {value:?}"
)));
}
ColumnarValue::Array(_) => {
return Err(DataFusionError::Execution(
"region metadata must be a scalar string".to_string(),
));
}
};
RegionMetadata::from_json(json)
.map_err(|e| DataFusionError::Execution(format!("failed to parse region metadata: {e:?}")))
}
fn decode_primary_keys(
encoded: ArrayRef,
number_rows: usize,
codec: &dyn PrimaryKeyCodec,
name_lookup: &HashMap<ColumnId, String>,
) -> datafusion_common::Result<Vec<Vec<NameValuePair>>> {
if let Some(dict) = encoded
.as_any()
.downcast_ref::<DictionaryArray<UInt32Type>>()
{
decode_dictionary(dict, number_rows, codec, name_lookup)
} else if let Some(array) = encoded.as_any().downcast_ref::<BinaryArray>() {
decode_binary_array(array, codec, name_lookup)
} else if let Some(array) = encoded.as_any().downcast_ref::<BinaryViewArray>() {
decode_binary_view_array(array, codec, name_lookup)
} else {
Err(DataFusionError::Execution(format!(
"column {PRIMARY_KEY_COLUMN_NAME} must be binary or dictionary(binary) array"
)))
}
}
fn decode_dictionary(
dict: &DictionaryArray<UInt32Type>,
number_rows: usize,
codec: &dyn PrimaryKeyCodec,
name_lookup: &HashMap<ColumnId, String>,
) -> datafusion_common::Result<Vec<Vec<NameValuePair>>> {
let values = dict
.values()
.as_any()
.downcast_ref::<BinaryArray>()
.ok_or_else(|| {
DataFusionError::Execution("primary key dictionary values are not binary".to_string())
})?;
let mut decoded_values = Vec::with_capacity(values.len());
for i in 0..values.len() {
let pk = values.value(i);
let pairs = decode_one(pk, codec, name_lookup)?;
decoded_values.push(pairs);
}
let mut rows = Vec::with_capacity(number_rows);
let keys = dict.keys();
for i in 0..number_rows {
let dict_index = keys.value(i) as usize;
rows.push(decoded_values[dict_index].clone());
}
Ok(rows)
}
fn decode_binary_array(
array: &BinaryArray,
codec: &dyn PrimaryKeyCodec,
name_lookup: &HashMap<ColumnId, String>,
) -> datafusion_common::Result<Vec<Vec<NameValuePair>>> {
(0..array.len())
.map(|i| decode_one(array.value(i), codec, name_lookup))
.collect()
}
fn decode_binary_view_array(
array: &BinaryViewArray,
codec: &dyn PrimaryKeyCodec,
name_lookup: &HashMap<ColumnId, String>,
) -> datafusion_common::Result<Vec<Vec<NameValuePair>>> {
(0..array.len())
.map(|i| decode_one(array.value(i), codec, name_lookup))
.collect()
}
fn decode_one(
pk: &[u8],
codec: &dyn PrimaryKeyCodec,
name_lookup: &HashMap<ColumnId, String>,
) -> datafusion_common::Result<Vec<NameValuePair>> {
let decoded = codec
.decode(pk)
.map_err(|e| DataFusionError::Execution(format!("failed to decode primary key: {e}")))?;
Ok(match decoded {
CompositeValues::Dense(values) => values
.into_iter()
.map(|(column_id, value)| (column_name(column_id, name_lookup), value_to_string(value)))
.collect(),
CompositeValues::Sparse(values) => {
let mut values: Vec<_> = values
.iter()
.map(|(column_id, value)| {
(
*column_id,
column_name(*column_id, name_lookup),
value_to_string(value.clone()),
)
})
.collect();
values.sort_by_key(|(column_id, _, _)| {
(ReservedColumnId::is_reserved(*column_id), *column_id)
});
values
.into_iter()
.map(|(_, name, value)| (name, value))
.collect()
}
})
}
fn column_name(column_id: ColumnId, name_lookup: &HashMap<ColumnId, String>) -> String {
if let Some(name) = name_lookup.get(&column_id) {
return name.clone();
}
if column_id == ReservedColumnId::table_id() {
return "__table_id".to_string();
}
if column_id == ReservedColumnId::tsid() {
return "__tsid".to_string();
}
column_id.to_string()
}
fn value_to_string(value: Value) -> Option<String> {
match value {
Value::Null => None,
_ => Some(value.to_string()),
}
}
fn build_list_array(rows: &[Vec<NameValuePair>]) -> datafusion_common::Result<ArrayRef> {
let mut builder = ListBuilder::new(StringBuilder::new());
for row in rows {
for (key, value) in row {
let value = value.as_deref().unwrap_or(NULL_VALUE_LITERAL);
builder.values().append_value(format!("{key} : {value}"));
}
builder.append(true);
}
Ok(Arc::new(builder.finish()))
}
#[cfg(test)]
mod tests {
use api::v1::SemanticType;
use datafusion_common::ScalarValue;
use datatypes::arrow::array::builder::BinaryDictionaryBuilder;
use datatypes::arrow::array::{BinaryArray, ListArray, StringArray};
use datatypes::arrow::datatypes::UInt32Type;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::ColumnSchema;
use datatypes::value::Value;
use mito_codec::row_converter::{
DensePrimaryKeyCodec, PrimaryKeyCodecExt, SortField, SparsePrimaryKeyCodec,
};
use store_api::codec::PrimaryKeyEncoding;
use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder};
use store_api::storage::consts::ReservedColumnId;
use store_api::storage::{ColumnId, RegionId};
use super::*;
fn pk_field() -> Arc<Field> {
Arc::new(Field::new_dictionary(
PRIMARY_KEY_COLUMN_NAME,
DataType::UInt32,
DataType::Binary,
false,
))
}
fn region_metadata_json(
columns: &[(ColumnId, &str, ConcreteDataType)],
encoding: PrimaryKeyEncoding,
) -> String {
let mut builder = RegionMetadataBuilder::new(RegionId::new(1, 1));
builder.push_column_metadata(ColumnMetadata {
column_schema: ColumnSchema::new(
"ts",
ConcreteDataType::timestamp_millisecond_datatype(),
false,
),
semantic_type: SemanticType::Timestamp,
column_id: 100,
});
builder.primary_key_encoding(encoding);
for (id, name, ty) in columns {
builder.push_column_metadata(ColumnMetadata {
column_schema: ColumnSchema::new((*name).to_string(), ty.clone(), true),
semantic_type: SemanticType::Tag,
column_id: *id,
});
}
builder.primary_key(columns.iter().map(|(id, _, _)| *id).collect());
builder.build().unwrap().to_json().unwrap()
}
fn list_row(list: &ListArray, row_idx: usize) -> Vec<String> {
let values = list.value(row_idx);
let values = values.as_any().downcast_ref::<StringArray>().unwrap();
(0..values.len())
.map(|i| values.value(i).to_string())
.collect()
}
#[test]
fn test_decode_dense_primary_key() {
let columns = vec![
(0, "host", ConcreteDataType::string_datatype()),
(1, "core", ConcreteDataType::int64_datatype()),
];
let metadata_json = region_metadata_json(&columns, PrimaryKeyEncoding::Dense);
let codec = DensePrimaryKeyCodec::with_fields(
columns
.iter()
.map(|(id, _, ty)| (*id, SortField::new(ty.clone())))
.collect(),
);
let rows = vec![
vec![Value::from("a"), Value::from(1_i64)],
vec![Value::from("b"), Value::from(2_i64)],
vec![Value::from("a"), Value::from(1_i64)],
];
let mut builder = BinaryDictionaryBuilder::<UInt32Type>::new();
for row in &rows {
let encoded = codec.encode(row.iter().map(|v| v.as_value_ref())).unwrap();
builder.append(encoded.as_slice()).unwrap();
}
let dict_array: ArrayRef = Arc::new(builder.finish());
let args = ScalarFunctionArgs {
args: vec![
ColumnarValue::Array(dict_array),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("dense".to_string()))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some(metadata_json))),
],
arg_fields: vec![
pk_field(),
Arc::new(Field::new("encoding", DataType::Utf8, false)),
Arc::new(Field::new("region_metadata", DataType::Utf8, false)),
],
number_rows: 3,
return_field: Arc::new(Field::new(
"decoded",
DecodePrimaryKeyFunction::return_data_type(),
false,
)),
config_options: Default::default(),
};
let func = DecodePrimaryKeyFunction::default();
let result = func
.invoke_with_args(args)
.and_then(|v| v.to_array(3))
.unwrap();
let list = result.as_any().downcast_ref::<ListArray>().unwrap();
let expected = [
vec!["host : a".to_string(), "core : 1".to_string()],
vec!["host : b".to_string(), "core : 2".to_string()],
vec!["host : a".to_string(), "core : 1".to_string()],
];
for (row_idx, expected_row) in expected.iter().enumerate() {
assert_eq!(*expected_row, list_row(list, row_idx));
}
}
#[test]
fn test_decode_sparse_primary_key() {
let columns = vec![
(10, "k0", ConcreteDataType::string_datatype()),
(11, "k1", ConcreteDataType::string_datatype()),
];
let metadata_json = region_metadata_json(&columns, PrimaryKeyEncoding::Sparse);
let codec = SparsePrimaryKeyCodec::schemaless();
let rows = vec![
vec![
(ReservedColumnId::table_id(), Value::UInt32(1)),
(ReservedColumnId::tsid(), Value::UInt64(100)),
(10, Value::from("a")),
(11, Value::from("b")),
],
vec![
(ReservedColumnId::table_id(), Value::UInt32(1)),
(ReservedColumnId::tsid(), Value::UInt64(200)),
(10, Value::from("c")),
(11, Value::from("d")),
],
];
let mut encoded_values = Vec::with_capacity(rows.len());
for row in &rows {
let mut buf = Vec::new();
codec.encode_values(row, &mut buf).unwrap();
encoded_values.push(buf);
}
let pk_array: ArrayRef = Arc::new(BinaryArray::from_iter_values(
encoded_values.iter().cloned(),
));
let args = ScalarFunctionArgs {
args: vec![
ColumnarValue::Array(pk_array),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("sparse".to_string()))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some(metadata_json))),
],
arg_fields: vec![
pk_field(),
Arc::new(Field::new("encoding", DataType::Utf8, false)),
Arc::new(Field::new("region_metadata", DataType::Utf8, false)),
],
number_rows: rows.len(),
return_field: Arc::new(Field::new(
"decoded",
DecodePrimaryKeyFunction::return_data_type(),
false,
)),
config_options: Default::default(),
};
let func = DecodePrimaryKeyFunction::default();
let result = func
.invoke_with_args(args)
.and_then(|v| v.to_array(rows.len()))
.unwrap();
let list = result.as_any().downcast_ref::<ListArray>().unwrap();
let expected = [
vec![
"k0 : a".to_string(),
"k1 : b".to_string(),
"__tsid : 100".to_string(),
"__table_id : 1".to_string(),
],
vec![
"k0 : c".to_string(),
"k1 : d".to_string(),
"__tsid : 200".to_string(),
"__table_id : 1".to_string(),
],
];
for (row_idx, expected_row) in expected.iter().enumerate() {
assert_eq!(*expected_row, list_row(list, row_idx));
}
}
}

View File

@@ -14,7 +14,6 @@
mod convert;
mod distance;
mod elem_avg;
mod elem_product;
mod elem_sum;
pub mod impl_conv;
@@ -65,7 +64,6 @@ impl VectorFunction {
registry.register_scalar(vector_subvector::VectorSubvectorFunction::default());
registry.register_scalar(elem_sum::ElemSumFunction::default());
registry.register_scalar(elem_product::ElemProductFunction::default());
registry.register_scalar(elem_avg::ElemAvgFunction::default());
}
}

View File

@@ -1,128 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Display;
use datafusion::arrow::datatypes::DataType;
use datafusion::logical_expr::ColumnarValue;
use datafusion_common::ScalarValue;
use datafusion_expr::type_coercion::aggregates::{BINARYS, STRINGS};
use datafusion_expr::{ScalarFunctionArgs, Signature, TypeSignature, Volatility};
use nalgebra::DVectorView;
use crate::function::Function;
use crate::scalars::vector::{VectorCalculator, impl_conv};
const NAME: &str = "vec_elem_avg";
#[derive(Debug, Clone)]
pub(crate) struct ElemAvgFunction {
signature: Signature,
}
impl Default for ElemAvgFunction {
fn default() -> Self {
Self {
signature: Signature::one_of(
vec![
TypeSignature::Uniform(1, STRINGS.to_vec()),
TypeSignature::Uniform(1, BINARYS.to_vec()),
TypeSignature::Uniform(1, vec![DataType::BinaryView]),
],
Volatility::Immutable,
),
}
}
}
impl Function for ElemAvgFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
Ok(DataType::Float32)
}
fn signature(&self) -> &Signature {
&self.signature
}
fn invoke_with_args(
&self,
args: ScalarFunctionArgs,
) -> datafusion_common::Result<ColumnarValue> {
let body = |v0: &ScalarValue| -> datafusion_common::Result<ScalarValue> {
let v0 =
impl_conv::as_veclit(v0)?.map(|v0| DVectorView::from_slice(&v0, v0.len()).mean());
Ok(ScalarValue::Float32(v0))
};
let calculator = VectorCalculator {
name: self.name(),
func: body,
};
calculator.invoke_with_single_argument(args)
}
}
impl Display for ElemAvgFunction {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", NAME.to_ascii_uppercase())
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use arrow::array::StringViewArray;
use arrow_schema::Field;
use datafusion::arrow::array::{Array, AsArray};
use datafusion::arrow::datatypes::Float32Type;
use datafusion_common::config::ConfigOptions;
use super::*;
#[test]
fn test_elem_avg() {
let func = ElemAvgFunction::default();
let input = Arc::new(StringViewArray::from(vec![
Some("[1.0,2.0,3.0]".to_string()),
Some("[4.0,5.0,6.0]".to_string()),
Some("[7.0,8.0,9.0]".to_string()),
None,
]));
let result = func
.invoke_with_args(ScalarFunctionArgs {
args: vec![ColumnarValue::Array(input.clone())],
arg_fields: vec![],
number_rows: input.len(),
return_field: Arc::new(Field::new("x", DataType::Float32, true)),
config_options: Arc::new(ConfigOptions::new()),
})
.and_then(|v| ColumnarValue::values_to_arrays(&[v]))
.map(|mut a| a.remove(0))
.unwrap();
let result = result.as_primitive::<Float32Type>();
assert_eq!(result.len(), 4);
assert_eq!(result.value(0), 2.0);
assert_eq!(result.value(1), 5.0);
assert_eq!(result.value(2), 8.0);
assert!(result.is_null(3));
}
}

View File

@@ -44,8 +44,7 @@ impl FunctionState {
use session::context::QueryContextRef;
use store_api::storage::RegionId;
use table::requests::{
BuildIndexTableRequest, CompactTableRequest, DeleteRequest, FlushTableRequest,
InsertRequest,
CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest,
};
use crate::handlers::{FlowServiceHandler, ProcedureServiceHandler, TableMutationHandler};
@@ -121,14 +120,6 @@ impl FunctionState {
Ok(ROWS)
}
async fn build_index(
&self,
_request: BuildIndexTableRequest,
_ctx: QueryContextRef,
) -> Result<AffectedRows> {
Ok(ROWS)
}
async fn flush_region(
&self,
_region_id: RegionId,

View File

@@ -12,19 +12,21 @@
// See the License for the specific language governing permissions and
// limitations under the License.
mod version;
use std::sync::Arc;
use common_catalog::consts::{
DEFAULT_PRIVATE_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME,
};
use datafusion::arrow::array::{ArrayRef, StringArray, StringBuilder, as_boolean_array};
use datafusion::arrow::array::{ArrayRef, StringArray, as_boolean_array};
use datafusion::catalog::TableFunction;
use datafusion::common::ScalarValue;
use datafusion::common::utils::SingleRowListArrayBuilder;
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature, TypeSignature, Volatility};
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature, Volatility};
use datafusion_pg_catalog::pg_catalog::{self, PgCatalogStaticTables};
use datatypes::arrow::datatypes::{DataType, Field};
use derive_more::derive::Display;
use version::PGVersionFunction;
use crate::function::{Function, find_function_context};
use crate::function_registry::FunctionRegistry;
@@ -34,15 +36,11 @@ const CURRENT_SCHEMA_FUNCTION_NAME: &str = "current_schema";
const CURRENT_SCHEMAS_FUNCTION_NAME: &str = "current_schemas";
const SESSION_USER_FUNCTION_NAME: &str = "session_user";
const CURRENT_DATABASE_FUNCTION_NAME: &str = "current_database";
const OBJ_DESCRIPTION_FUNCTION_NAME: &str = "obj_description";
const COL_DESCRIPTION_FUNCTION_NAME: &str = "col_description";
const SHOBJ_DESCRIPTION_FUNCTION_NAME: &str = "shobj_description";
const PG_MY_TEMP_SCHEMA_FUNCTION_NAME: &str = "pg_my_temp_schema";
define_nullary_udf!(CurrentSchemaFunction);
define_nullary_udf!(CurrentSchemasFunction);
define_nullary_udf!(SessionUserFunction);
define_nullary_udf!(CurrentDatabaseFunction);
define_nullary_udf!(PgMyTempSchemaFunction);
impl Function for CurrentDatabaseFunction {
fn name(&self) -> &str {
@@ -120,23 +118,6 @@ impl Function for SessionUserFunction {
}
}
#[derive(Display, Debug)]
#[display("{}", self.name())]
pub(super) struct CurrentSchemasFunction {
signature: Signature,
}
impl CurrentSchemasFunction {
pub fn new() -> Self {
Self {
signature: Signature::new(
TypeSignature::Exact(vec![DataType::Boolean]),
Volatility::Stable,
),
}
}
}
impl Function for CurrentSchemasFunction {
fn name(&self) -> &str {
CURRENT_SCHEMAS_FUNCTION_NAME
@@ -144,9 +125,9 @@ impl Function for CurrentSchemasFunction {
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
Ok(DataType::List(Arc::new(Field::new(
"item",
DataType::Utf8,
true,
"x",
DataType::Utf8View,
false,
))))
}
@@ -178,175 +159,6 @@ impl Function for CurrentSchemasFunction {
}
}
/// PostgreSQL obj_description - returns NULL for compatibility
#[derive(Display, Debug, Clone)]
#[display("{}", self.name())]
pub(super) struct ObjDescriptionFunction {
signature: Signature,
}
impl ObjDescriptionFunction {
pub fn new() -> Self {
Self {
signature: Signature::one_of(
vec![
TypeSignature::Exact(vec![DataType::Int64, DataType::Utf8]),
TypeSignature::Exact(vec![DataType::UInt32, DataType::Utf8]),
TypeSignature::Exact(vec![DataType::Int64]),
TypeSignature::Exact(vec![DataType::UInt32]),
],
Volatility::Stable,
),
}
}
}
impl Function for ObjDescriptionFunction {
fn name(&self) -> &str {
OBJ_DESCRIPTION_FUNCTION_NAME
}
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
Ok(DataType::Utf8)
}
fn signature(&self) -> &Signature {
&self.signature
}
fn invoke_with_args(
&self,
args: ScalarFunctionArgs,
) -> datafusion_common::Result<ColumnarValue> {
let num_rows = args.number_rows;
let mut builder = StringBuilder::with_capacity(num_rows, 0);
for _ in 0..num_rows {
builder.append_null();
}
Ok(ColumnarValue::Array(Arc::new(builder.finish())))
}
}
/// PostgreSQL col_description - returns NULL for compatibility
#[derive(Display, Debug, Clone)]
#[display("{}", self.name())]
pub(super) struct ColDescriptionFunction {
signature: Signature,
}
impl ColDescriptionFunction {
pub fn new() -> Self {
Self {
signature: Signature::one_of(
vec![
TypeSignature::Exact(vec![DataType::Int64, DataType::Int32]),
TypeSignature::Exact(vec![DataType::UInt32, DataType::Int32]),
TypeSignature::Exact(vec![DataType::Int64, DataType::Int64]),
TypeSignature::Exact(vec![DataType::UInt32, DataType::Int64]),
],
Volatility::Stable,
),
}
}
}
impl Function for ColDescriptionFunction {
fn name(&self) -> &str {
COL_DESCRIPTION_FUNCTION_NAME
}
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
Ok(DataType::Utf8)
}
fn signature(&self) -> &Signature {
&self.signature
}
fn invoke_with_args(
&self,
args: ScalarFunctionArgs,
) -> datafusion_common::Result<ColumnarValue> {
let num_rows = args.number_rows;
let mut builder = StringBuilder::with_capacity(num_rows, 0);
for _ in 0..num_rows {
builder.append_null();
}
Ok(ColumnarValue::Array(Arc::new(builder.finish())))
}
}
/// PostgreSQL shobj_description - returns NULL for compatibility
#[derive(Display, Debug, Clone)]
#[display("{}", self.name())]
pub(super) struct ShobjDescriptionFunction {
signature: Signature,
}
impl ShobjDescriptionFunction {
pub fn new() -> Self {
Self {
signature: Signature::one_of(
vec![
TypeSignature::Exact(vec![DataType::Int64, DataType::Utf8]),
TypeSignature::Exact(vec![DataType::UInt64, DataType::Utf8]),
TypeSignature::Exact(vec![DataType::Int32, DataType::Utf8]),
TypeSignature::Exact(vec![DataType::UInt32, DataType::Utf8]),
],
Volatility::Stable,
),
}
}
}
impl Function for ShobjDescriptionFunction {
fn name(&self) -> &str {
SHOBJ_DESCRIPTION_FUNCTION_NAME
}
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
Ok(DataType::Utf8)
}
fn signature(&self) -> &Signature {
&self.signature
}
fn invoke_with_args(
&self,
args: ScalarFunctionArgs,
) -> datafusion_common::Result<ColumnarValue> {
let num_rows = args.number_rows;
let mut builder = StringBuilder::with_capacity(num_rows, 0);
for _ in 0..num_rows {
builder.append_null();
}
Ok(ColumnarValue::Array(Arc::new(builder.finish())))
}
}
/// PostgreSQL pg_my_temp_schema - returns 0 (no temp schema) for compatibility
impl Function for PgMyTempSchemaFunction {
fn name(&self) -> &str {
PG_MY_TEMP_SCHEMA_FUNCTION_NAME
}
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
Ok(DataType::UInt32)
}
fn signature(&self) -> &Signature {
&self.signature
}
fn invoke_with_args(
&self,
_args: ScalarFunctionArgs,
) -> datafusion_common::Result<ColumnarValue> {
Ok(ColumnarValue::Scalar(ScalarValue::UInt32(Some(0))))
}
}
pub(super) struct PGCatalogFunction;
impl PGCatalogFunction {
@@ -354,8 +166,9 @@ impl PGCatalogFunction {
let static_tables =
Arc::new(PgCatalogStaticTables::try_new().expect("load postgres static tables"));
registry.register_scalar(PGVersionFunction::default());
registry.register_scalar(CurrentSchemaFunction::default());
registry.register_scalar(CurrentSchemasFunction::new());
registry.register_scalar(CurrentSchemasFunction::default());
registry.register_scalar(SessionUserFunction::default());
registry.register_scalar(CurrentDatabaseFunction::default());
registry.register(pg_catalog::format_type::create_format_type_udf());
@@ -386,98 +199,5 @@ impl PGCatalogFunction {
registry.register(pg_catalog::create_pg_total_relation_size_udf());
registry.register(pg_catalog::create_pg_stat_get_numscans());
registry.register(pg_catalog::create_pg_get_constraintdef());
registry.register(pg_catalog::create_pg_get_partition_ancestors_udf());
registry.register_scalar(ObjDescriptionFunction::new());
registry.register_scalar(ColDescriptionFunction::new());
registry.register_scalar(ShobjDescriptionFunction::new());
registry.register_scalar(PgMyTempSchemaFunction::default());
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use arrow_schema::Field;
use datafusion::arrow::array::Array;
use datafusion_common::ScalarValue;
use datafusion_expr::ColumnarValue;
use super::*;
fn create_test_args(args: Vec<ColumnarValue>, number_rows: usize) -> ScalarFunctionArgs {
ScalarFunctionArgs {
args,
arg_fields: vec![],
number_rows,
return_field: Arc::new(Field::new("result", DataType::Utf8, true)),
config_options: Arc::new(Default::default()),
}
}
#[test]
fn test_obj_description_function() {
let func = ObjDescriptionFunction::new();
assert_eq!("obj_description", func.name());
assert_eq!(DataType::Utf8, func.return_type(&[]).unwrap());
let args = create_test_args(
vec![
ColumnarValue::Scalar(ScalarValue::Int64(Some(1234))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("pg_class".to_string()))),
],
1,
);
let result = func.invoke_with_args(args).unwrap();
if let ColumnarValue::Array(arr) = result {
assert_eq!(1, arr.len());
assert!(arr.is_null(0));
} else {
panic!("Expected Array result");
}
}
#[test]
fn test_col_description_function() {
let func = ColDescriptionFunction::new();
assert_eq!("col_description", func.name());
assert_eq!(DataType::Utf8, func.return_type(&[]).unwrap());
let args = create_test_args(
vec![
ColumnarValue::Scalar(ScalarValue::Int64(Some(1234))),
ColumnarValue::Scalar(ScalarValue::Int64(Some(1))),
],
1,
);
let result = func.invoke_with_args(args).unwrap();
if let ColumnarValue::Array(arr) = result {
assert_eq!(1, arr.len());
assert!(arr.is_null(0));
} else {
panic!("Expected Array result");
}
}
#[test]
fn test_shobj_description_function() {
let func = ShobjDescriptionFunction::new();
assert_eq!("shobj_description", func.name());
assert_eq!(DataType::Utf8, func.return_type(&[]).unwrap());
let args = create_test_args(
vec![
ColumnarValue::Scalar(ScalarValue::Int64(Some(1))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("pg_database".to_string()))),
],
1,
);
let result = func.invoke_with_args(args).unwrap();
if let ColumnarValue::Array(arr) = result {
assert_eq!(1, arr.len());
assert!(arr.is_null(0));
} else {
panic!("Expected Array result");
}
}
}

View File

@@ -0,0 +1,61 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use datafusion::arrow::datatypes::DataType;
use datafusion_common::ScalarValue;
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature, Volatility};
use crate::function::Function;
#[derive(Clone, Debug)]
pub(crate) struct PGVersionFunction {
signature: Signature,
}
impl Default for PGVersionFunction {
fn default() -> Self {
Self {
signature: Signature::exact(vec![], Volatility::Immutable),
}
}
}
impl fmt::Display for PGVersionFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "pg_catalog.VERSION")
}
}
impl Function for PGVersionFunction {
fn name(&self) -> &str {
"pg_catalog.version"
}
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
Ok(DataType::Utf8View)
}
fn signature(&self) -> &Signature {
&self.signature
}
fn invoke_with_args(&self, _: ScalarFunctionArgs) -> datafusion_common::Result<ColumnarValue> {
Ok(ColumnarValue::Scalar(ScalarValue::Utf8View(Some(format!(
"PostgreSQL 16.3 GreptimeDB {}",
common_version::version()
)))))
}
}

View File

@@ -50,7 +50,7 @@ impl Function for VersionFunction {
)
}
Channel::Postgres => {
format!("PostgreSQL 16.3 GreptimeDB {}", common_version::version())
format!("16.3-greptimedb-{}", common_version::version())
}
_ => common_version::version().to_string(),
};

View File

@@ -23,7 +23,6 @@ datatypes.workspace = true
flatbuffers = "25.2"
hyper.workspace = true
lazy_static.workspace = true
notify.workspace = true
prost.workspace = true
serde.workspace = true
serde_json.workspace = true
@@ -38,7 +37,6 @@ vec1 = "1.12"
criterion = "0.4"
hyper-util = { workspace = true, features = ["tokio"] }
rand.workspace = true
tempfile.workspace = true
[[bench]]
name = "bench_main"

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::Path;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering};
use std::time::Duration;
@@ -23,15 +22,14 @@ use dashmap::DashMap;
use dashmap::mapref::entry::Entry;
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use snafu::{OptionExt, ResultExt};
use tokio_util::sync::CancellationToken;
use tonic::transport::{
Certificate, Channel as InnerChannel, ClientTlsConfig, Endpoint, Identity, Uri,
};
use tower::Service;
use crate::error::{CreateChannelSnafu, InvalidConfigFilePathSnafu, Result};
use crate::reloadable_tls::{ReloadableTlsConfig, TlsConfigLoader, maybe_watch_tls_config};
use crate::error::{CreateChannelSnafu, InvalidConfigFilePathSnafu, InvalidTlsConfigSnafu, Result};
const RECYCLE_CHANNEL_INTERVAL_SECS: u64 = 60;
pub const DEFAULT_GRPC_REQUEST_TIMEOUT_SECS: u64 = 10;
@@ -52,7 +50,7 @@ pub struct ChannelManager {
struct Inner {
id: u64,
config: ChannelConfig,
reloadable_client_tls_config: Option<Arc<ReloadableClientTlsConfig>>,
client_tls_config: Option<ClientTlsConfig>,
pool: Arc<Pool>,
channel_recycle_started: AtomicBool,
cancel: CancellationToken,
@@ -80,7 +78,7 @@ impl Inner {
Self {
id,
config,
reloadable_client_tls_config: None,
client_tls_config: None,
pool,
channel_recycle_started: AtomicBool::new(false),
cancel,
@@ -93,22 +91,57 @@ impl ChannelManager {
Default::default()
}
/// Create a ChannelManager with configuration and optional TLS config
///
/// Use [`load_client_tls_config`] to create TLS configuration from `ClientTlsOption`.
/// The TLS config supports both static (watch disabled) and dynamic reloading (watch enabled).
/// If you want to use dynamic reloading, please **manually** invoke [`maybe_watch_client_tls_config`] after this method.
pub fn with_config(
config: ChannelConfig,
reloadable_tls_config: Option<Arc<ReloadableClientTlsConfig>>,
) -> Self {
let mut inner = Inner::with_config(config.clone());
inner.reloadable_client_tls_config = reloadable_tls_config;
pub fn with_config(config: ChannelConfig) -> Self {
let inner = Inner::with_config(config);
Self {
inner: Arc::new(inner),
}
}
/// Read tls cert and key files and create a ChannelManager with TLS config.
pub fn with_tls_config(config: ChannelConfig) -> Result<Self> {
let mut inner = Inner::with_config(config.clone());
// setup tls
let path_config = config.client_tls.context(InvalidTlsConfigSnafu {
msg: "no config input",
})?;
if !path_config.enabled {
// if TLS not enabled, just ignore other tls config
// and not set `client_tls_config` hence not use TLS
return Ok(Self {
inner: Arc::new(inner),
});
}
let mut tls_config = ClientTlsConfig::new();
if let Some(server_ca) = path_config.server_ca_cert_path {
let server_root_ca_cert =
std::fs::read_to_string(server_ca).context(InvalidConfigFilePathSnafu)?;
let server_root_ca_cert = Certificate::from_pem(server_root_ca_cert);
tls_config = tls_config.ca_certificate(server_root_ca_cert);
}
if let (Some(client_cert_path), Some(client_key_path)) =
(&path_config.client_cert_path, &path_config.client_key_path)
{
let client_cert =
std::fs::read_to_string(client_cert_path).context(InvalidConfigFilePathSnafu)?;
let client_key =
std::fs::read_to_string(client_key_path).context(InvalidConfigFilePathSnafu)?;
let client_identity = Identity::from_pem(client_cert, client_key);
tls_config = tls_config.identity(client_identity);
}
inner.client_tls_config = Some(tls_config);
Ok(Self {
inner: Arc::new(inner),
})
}
pub fn config(&self) -> &ChannelConfig {
&self.inner.config
}
@@ -178,21 +211,8 @@ impl ChannelManager {
self.pool().retain_channel(f);
}
/// Clear all channels to force reconnection.
/// This should be called when TLS configuration changes to ensure new connections use updated certificates.
pub fn clear_all_channels(&self) {
self.pool().retain_channel(|_, _| false);
}
fn build_endpoint(&self, addr: &str) -> Result<Endpoint> {
// Get the latest TLS config from reloadable config (which handles both static and dynamic cases)
let tls_config = self
.inner
.reloadable_client_tls_config
.as_ref()
.and_then(|c| c.get_config());
let http_prefix = if tls_config.is_some() {
let http_prefix = if self.inner.client_tls_config.is_some() {
"https"
} else {
"http"
@@ -231,9 +251,9 @@ impl ChannelManager {
if let Some(enabled) = self.config().http2_adaptive_window {
endpoint = endpoint.http2_adaptive_window(enabled);
}
if let Some(tls_config) = tls_config {
if let Some(tls_config) = &self.inner.client_tls_config {
endpoint = endpoint
.tls_config(tls_config)
.tls_config(tls_config.clone())
.context(CreateChannelSnafu { addr })?;
}
@@ -267,97 +287,13 @@ impl ChannelManager {
}
}
fn load_tls_config(tls_option: Option<&ClientTlsOption>) -> Result<Option<ClientTlsConfig>> {
let path_config = match tls_option {
Some(path_config) if path_config.enabled => path_config,
_ => return Ok(None),
};
let mut tls_config = ClientTlsConfig::new();
if let Some(server_ca) = &path_config.server_ca_cert_path {
let server_root_ca_cert =
std::fs::read_to_string(server_ca).context(InvalidConfigFilePathSnafu)?;
let server_root_ca_cert = Certificate::from_pem(server_root_ca_cert);
tls_config = tls_config.ca_certificate(server_root_ca_cert);
}
if let (Some(client_cert_path), Some(client_key_path)) =
(&path_config.client_cert_path, &path_config.client_key_path)
{
let client_cert =
std::fs::read_to_string(client_cert_path).context(InvalidConfigFilePathSnafu)?;
let client_key =
std::fs::read_to_string(client_key_path).context(InvalidConfigFilePathSnafu)?;
let client_identity = Identity::from_pem(client_cert, client_key);
tls_config = tls_config.identity(client_identity);
}
Ok(Some(tls_config))
}
impl TlsConfigLoader<ClientTlsConfig> for ClientTlsOption {
type Error = crate::error::Error;
fn load(&self) -> Result<Option<ClientTlsConfig>> {
load_tls_config(Some(self))
}
fn watch_paths(&self) -> Vec<&Path> {
let mut paths = Vec::new();
if let Some(cert_path) = &self.client_cert_path {
paths.push(Path::new(cert_path.as_str()));
}
if let Some(key_path) = &self.client_key_path {
paths.push(Path::new(key_path.as_str()));
}
if let Some(ca_path) = &self.server_ca_cert_path {
paths.push(Path::new(ca_path.as_str()));
}
paths
}
fn watch_enabled(&self) -> bool {
self.enabled && self.watch
}
}
/// Type alias for client-side reloadable TLS config
pub type ReloadableClientTlsConfig = ReloadableTlsConfig<ClientTlsConfig, ClientTlsOption>;
/// Load client TLS configuration from `ClientTlsOption` and return a `ReloadableClientTlsConfig`.
/// This is the primary way to create TLS configuration for the ChannelManager.
pub fn load_client_tls_config(
tls_option: Option<ClientTlsOption>,
) -> Result<Option<Arc<ReloadableClientTlsConfig>>> {
match tls_option {
Some(option) if option.enabled => {
let reloadable = ReloadableClientTlsConfig::try_new(option)?;
Ok(Some(Arc::new(reloadable)))
}
_ => Ok(None),
}
}
pub fn maybe_watch_client_tls_config(
client_tls_config: Arc<ReloadableClientTlsConfig>,
channel_manager: ChannelManager,
) -> Result<()> {
maybe_watch_tls_config(client_tls_config, move || {
// Clear all existing channels to force reconnection with new certificates
channel_manager.clear_all_channels();
info!("Cleared all existing channels to use new TLS certificates.");
})
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct ClientTlsOption {
/// Whether to enable TLS for client.
pub enabled: bool,
pub server_ca_cert_path: Option<String>,
pub client_cert_path: Option<String>,
pub client_key_path: Option<String>,
#[serde(default)]
pub watch: bool,
}
#[derive(Clone, Debug, PartialEq, Eq)]
@@ -677,7 +613,6 @@ mod tests {
server_ca_cert_path: Some("some_server_path".to_string()),
client_cert_path: Some("some_cert_path".to_string()),
client_key_path: Some("some_key_path".to_string()),
watch: false,
});
assert_eq!(
@@ -699,7 +634,6 @@ mod tests {
server_ca_cert_path: Some("some_server_path".to_string()),
client_cert_path: Some("some_cert_path".to_string()),
client_key_path: Some("some_key_path".to_string()),
watch: false,
}),
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
@@ -725,7 +659,7 @@ mod tests {
.http2_adaptive_window(true)
.tcp_keepalive(Duration::from_secs(2))
.tcp_nodelay(true);
let mgr = ChannelManager::with_config(config, None);
let mgr = ChannelManager::with_config(config);
let res = mgr.build_endpoint("test_addr");

View File

@@ -38,15 +38,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to watch config file path: {}", path))]
FileWatch {
path: String,
#[snafu(source)]
error: notify::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display(
"Write type mismatch, column name: {}, expected: {}, actual: {}",
column_name,
@@ -117,7 +108,6 @@ impl ErrorExt for Error {
match self {
Error::InvalidTlsConfig { .. }
| Error::InvalidConfigFilePath { .. }
| Error::FileWatch { .. }
| Error::TypeMismatch { .. }
| Error::InvalidFlightData { .. }
| Error::NotSupported { .. } => StatusCode::InvalidArguments,

Some files were not shown because too many files have changed in this diff Show More