mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-25 07:30:02 +00:00
Compare commits
2 Commits
poc_datafl
...
create-vie
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
94409967be | ||
|
|
7503992d61 |
@@ -24,7 +24,3 @@ GT_KAFKA_ENDPOINTS = localhost:9092
|
||||
|
||||
# Setting for fuzz tests
|
||||
GT_MYSQL_ADDR = localhost:4002
|
||||
|
||||
# Setting for unstable fuzz tests
|
||||
GT_FUZZ_BINARY_PATH=/path/to/
|
||||
GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime
|
||||
|
||||
@@ -22,15 +22,15 @@ inputs:
|
||||
build-dev-builder-ubuntu:
|
||||
description: Build dev-builder-ubuntu image
|
||||
required: false
|
||||
default: "true"
|
||||
default: 'true'
|
||||
build-dev-builder-centos:
|
||||
description: Build dev-builder-centos image
|
||||
required: false
|
||||
default: "true"
|
||||
default: 'true'
|
||||
build-dev-builder-android:
|
||||
description: Build dev-builder-android image
|
||||
required: false
|
||||
default: "true"
|
||||
default: 'true'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -47,7 +47,7 @@ runs:
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=ubuntu \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=all \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
@@ -58,7 +58,7 @@ runs:
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=centos \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
@@ -72,5 +72,5 @@ runs:
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }} && \
|
||||
|
||||
|
||||
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
||||
|
||||
@@ -16,7 +16,7 @@ inputs:
|
||||
dev-mode:
|
||||
description: Enable dev mode, only build standard greptime
|
||||
required: false
|
||||
default: "false"
|
||||
default: 'false'
|
||||
working-dir:
|
||||
description: Working directory to build the artifacts
|
||||
required: false
|
||||
@@ -68,7 +68,7 @@ runs:
|
||||
|
||||
- name: Build greptime on centos base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build centos7 base image for amd64.
|
||||
with:
|
||||
base-image: centos
|
||||
features: servers/dashboard
|
||||
@@ -79,7 +79,7 @@ runs:
|
||||
|
||||
- name: Build greptime on android base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds arm64 greptime binary for android if the host machine amd64.
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build android base image on amd64.
|
||||
with:
|
||||
base-image: android
|
||||
artifacts-dir: greptime-android-arm64-${{ inputs.version }}
|
||||
|
||||
@@ -26,6 +26,8 @@ runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
|
||||
12
.github/actions/fuzz-test/action.yaml
vendored
12
.github/actions/fuzz-test/action.yaml
vendored
@@ -3,17 +3,11 @@ description: 'Fuzz test given setup and service'
|
||||
inputs:
|
||||
target:
|
||||
description: "The fuzz target to test"
|
||||
required: true
|
||||
max-total-time:
|
||||
description: "Max total time(secs)"
|
||||
required: true
|
||||
unstable:
|
||||
default: 'false'
|
||||
description: "Enable unstable feature"
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Run Fuzz Test
|
||||
shell: bash
|
||||
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none ${{ inputs.unstable == 'true' && '--features=unstable' || '' }} -- -max_total_time=${{ inputs.max-total-time }}
|
||||
|
||||
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none -- -max_total_time=120
|
||||
env:
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
|
||||
13
.github/pr-title-breaking-change-label-config.json
vendored
Normal file
13
.github/pr-title-breaking-change-label-config.json
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"LABEL": {
|
||||
"name": "breaking change",
|
||||
"color": "D93F0B"
|
||||
},
|
||||
"CHECKS": {
|
||||
"regexp": "^(?:(?!!:).)*$",
|
||||
"ignoreLabels": [
|
||||
"ignore-title"
|
||||
],
|
||||
"alwaysPassCI": true
|
||||
}
|
||||
}
|
||||
12
.github/pr-title-checker-config.json
vendored
Normal file
12
.github/pr-title-checker-config.json
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"LABEL": {
|
||||
"name": "Invalid PR Title",
|
||||
"color": "B60205"
|
||||
},
|
||||
"CHECKS": {
|
||||
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?\\!?:.*",
|
||||
"ignoreLabels": [
|
||||
"ignore-title"
|
||||
]
|
||||
}
|
||||
}
|
||||
122
.github/workflows/develop.yml
vendored
122
.github/workflows/develop.yml
vendored
@@ -38,20 +38,13 @@ jobs:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: crate-ci/typos@master
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
- name: Check the config docs
|
||||
run: |
|
||||
make config-docs && \
|
||||
git diff --name-only --exit-code ./config/config.md \
|
||||
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
||||
|
||||
license-header-check:
|
||||
runs-on: ubuntu-20.04
|
||||
name: Check License Header
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: korandoru/hawkeye@v5
|
||||
|
||||
check:
|
||||
name: Check
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -114,13 +107,9 @@ jobs:
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "build-binaries"
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin
|
||||
- name: Build greptime binaries
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc -- --bin greptime --bin sqlness-runner
|
||||
run: cargo build --bin greptime --bin sqlness-runner
|
||||
- name: Pack greptime binaries
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -141,52 +130,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bins
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Run GreptimeDB
|
||||
run: |
|
||||
./bins/greptime standalone start&
|
||||
- name: Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
max-total-time: 120
|
||||
|
||||
unstable-fuzztest:
|
||||
name: Unstable Fuzz Test
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
target: [ "unstable_fuzz_create_table_standalone" ]
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database" ]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
@@ -212,25 +156,15 @@ jobs:
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Run GreptimeDB
|
||||
run: |
|
||||
./bins/greptime standalone start&
|
||||
- name: Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
GT_FUZZ_BINARY_PATH: ./bins/greptime
|
||||
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
max-total-time: 120
|
||||
unstable: 'true'
|
||||
- name: Upload unstable fuzz test logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: unstable-fuzz-logs
|
||||
path: /tmp/unstable-greptime/
|
||||
retention-days: 3
|
||||
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
@@ -250,13 +184,13 @@ jobs:
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Run sqlness
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/sqlness-*
|
||||
path: /tmp/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
sqlness-kafka-wal:
|
||||
@@ -280,13 +214,13 @@ jobs:
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run sqlness
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs-with-kafka-wal
|
||||
path: /tmp/sqlness-*
|
||||
path: /tmp/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
fmt:
|
||||
@@ -330,7 +264,7 @@ jobs:
|
||||
# Shares with `Check` job
|
||||
shared-key: "check-lint"
|
||||
- name: Run cargo clippy
|
||||
run: make clippy
|
||||
run: cargo clippy --workspace --all-targets -- -D warnings
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
@@ -396,20 +330,20 @@ jobs:
|
||||
fail_ci_if_error: false
|
||||
verbose: true
|
||||
|
||||
# compat:
|
||||
# name: Compatibility Test
|
||||
# needs: build
|
||||
# runs-on: ubuntu-20.04
|
||||
# timeout-minutes: 60
|
||||
# steps:
|
||||
# - uses: actions/checkout@v4
|
||||
# - name: Download pre-built binaries
|
||||
# uses: actions/download-artifact@v4
|
||||
# with:
|
||||
# name: bins
|
||||
# path: .
|
||||
# - name: Unzip binaries
|
||||
# run: |
|
||||
# mkdir -p ./bins/current
|
||||
# tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
|
||||
# - run: ./tests/compat/test-compat.sh 0.6.0
|
||||
compat:
|
||||
name: Compatibility Test
|
||||
needs: build
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bins
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: |
|
||||
mkdir -p ./bins/current
|
||||
tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
|
||||
- run: ./tests/compat/test-compat.sh 0.6.0
|
||||
|
||||
9
.github/workflows/docs.yml
vendored
9
.github/workflows/docs.yml
vendored
@@ -34,14 +34,7 @@ jobs:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: crate-ci/typos@master
|
||||
|
||||
license-header-check:
|
||||
runs-on: ubuntu-20.04
|
||||
name: Check License Header
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: korandoru/hawkeye@v5
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
|
||||
check:
|
||||
name: Check
|
||||
|
||||
16
.github/workflows/license.yaml
vendored
Normal file
16
.github/workflows/license.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: License checker
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
jobs:
|
||||
license-header-check:
|
||||
runs-on: ubuntu-20.04
|
||||
name: license-header-check
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Check License Header
|
||||
uses: korandoru/hawkeye@v5
|
||||
31
.github/workflows/nightly-ci.yml
vendored
31
.github/workflows/nightly-ci.yml
vendored
@@ -1,3 +1,5 @@
|
||||
# Nightly CI: runs tests every night for our second tier plaforms (Windows)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 23 * * 1-5'
|
||||
@@ -13,29 +15,13 @@ env:
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
|
||||
jobs:
|
||||
sqlness-test:
|
||||
name: Run sqlness test
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Run sqlness test
|
||||
uses: ./.github/actions/sqlness-test
|
||||
with:
|
||||
data-root: sqlness-test
|
||||
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
|
||||
sqlness-windows:
|
||||
name: Sqlness tests on Windows
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: windows-latest-8-cores
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ windows-latest-8-cores ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -66,7 +52,6 @@ jobs:
|
||||
retention-days: 3
|
||||
|
||||
test-on-windows:
|
||||
name: Run tests on Windows
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: windows-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
|
||||
27
.github/workflows/nightly-funtional-tests.yml
vendored
Normal file
27
.github/workflows/nightly-funtional-tests.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: Nightly functional tests
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# At 00:00 on Tuesday.
|
||||
- cron: '0 0 * * 2'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
sqlness-test:
|
||||
name: Run sqlness test
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Run sqlness test
|
||||
uses: ./.github/actions/sqlness-test
|
||||
with:
|
||||
data-root: sqlness-test
|
||||
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
29
.github/workflows/pr-title-checker.yml
vendored
Normal file
29
.github/workflows/pr-title-checker.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: "PR Title Checker"
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- synchronize
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@v1.4.2
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
pass_on_octokit_error: false
|
||||
configuration_path: ".github/pr-title-checker-config.json"
|
||||
breaking:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@v1.4.2
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
pass_on_octokit_error: false
|
||||
configuration_path: ".github/pr-title-breaking-change-label-config.json"
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -436,7 +436,7 @@ jobs:
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ always() || github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub,
|
||||
|
||||
33
.github/workflows/schedule.yml
vendored
33
.github/workflows/schedule.yml
vendored
@@ -1,33 +0,0 @@
|
||||
name: Schedule Management
|
||||
on:
|
||||
schedule:
|
||||
- cron: '4 2 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
maintenance:
|
||||
name: Periodic Maintenance
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
- uses: pnpm/action-setup@v3
|
||||
with:
|
||||
package_json_file: 'cyborg/package.json'
|
||||
run_install: true
|
||||
- name: Describe the Environment
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx -v
|
||||
- name: Do Maintenance
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/schedule.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
30
.github/workflows/semantic-pull-request.yml
vendored
30
.github/workflows/semantic-pull-request.yml
vendored
@@ -1,30 +0,0 @@
|
||||
name: "Semantic Pull Request"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- edited
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
- uses: pnpm/action-setup@v3
|
||||
with:
|
||||
package_json_file: 'cyborg/package.json'
|
||||
run_install: true
|
||||
- name: Describe the Environment
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx -v
|
||||
- name: Check Pull Request
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/check-pull-request.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
21
.github/workflows/unassign.yml
vendored
Normal file
21
.github/workflows/unassign.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Auto Unassign
|
||||
on:
|
||||
schedule:
|
||||
- cron: '4 2 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
auto-unassign:
|
||||
name: Auto Unassign
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Auto Unassign
|
||||
uses: tisonspieces/auto-unassign@main
|
||||
with:
|
||||
token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
repository: ${{ github.repository }}
|
||||
@@ -50,7 +50,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
||||
|
||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](http://github.com/greptimeTeam/docs/style-guide.md).
|
||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||
|
||||
|
||||
760
Cargo.lock
generated
760
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
12
Cargo.toml
12
Cargo.toml
@@ -11,7 +11,6 @@ members = [
|
||||
"src/common/config",
|
||||
"src/common/datasource",
|
||||
"src/common/error",
|
||||
"src/common/frontend",
|
||||
"src/common/function",
|
||||
"src/common/macro",
|
||||
"src/common/greptimedb-telemetry",
|
||||
@@ -99,7 +98,6 @@ bytemuck = "1.12"
|
||||
bytes = { version = "1.5", features = ["serde"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
clap = { version = "4.4", features = ["derive"] }
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "5.4"
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
@@ -116,7 +114,7 @@ etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "219b2409bb701f75b43fc0ba64967d2ed8e75491" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "73ac0207ab71dfea48f30259ffdb611501b5ecb8" }
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
@@ -182,7 +180,6 @@ common-config = { path = "src/common/config" }
|
||||
common-datasource = { path = "src/common/datasource" }
|
||||
common-decimal = { path = "src/common/decimal" }
|
||||
common-error = { path = "src/common/error" }
|
||||
common-frontend = { path = "src/common/frontend" }
|
||||
common-function = { path = "src/common/function" }
|
||||
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
||||
common-grpc = { path = "src/common/grpc" }
|
||||
@@ -204,7 +201,6 @@ common-wal = { path = "src/common/wal" }
|
||||
datanode = { path = "src/datanode" }
|
||||
datatypes = { path = "src/datatypes" }
|
||||
file-engine = { path = "src/file-engine" }
|
||||
flow = { path = "src/flow" }
|
||||
frontend = { path = "src/frontend" }
|
||||
index = { path = "src/index" }
|
||||
log-store = { path = "src/log-store" }
|
||||
@@ -226,8 +222,6 @@ sql = { path = "src/sql" }
|
||||
store-api = { path = "src/store-api" }
|
||||
substrait = { path = "src/common/substrait" }
|
||||
table = { path = "src/table" }
|
||||
# TODO some code depends on this
|
||||
tests-integration = { path = "tests-integration" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
@@ -242,7 +236,3 @@ strip = true
|
||||
lto = "thin"
|
||||
debug = false
|
||||
incremental = false
|
||||
|
||||
[profile.dev.package.sqlness-runner]
|
||||
debug = false
|
||||
strip = true
|
||||
|
||||
4
Makefile
4
Makefile
@@ -54,10 +54,8 @@ ifneq ($(strip $(RELEASE)),)
|
||||
CARGO_BUILD_OPTS += --release
|
||||
endif
|
||||
|
||||
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), all)
|
||||
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), true)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
|
||||
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
|
||||
else
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
||||
endif
|
||||
|
||||
@@ -33,8 +33,6 @@ rand.workspace = true
|
||||
rskafka.workspace = true
|
||||
serde.workspace = true
|
||||
store-api.workspace = true
|
||||
# TODO depend `Database` client
|
||||
tests-integration.workspace = true
|
||||
tokio.workspace = true
|
||||
toml.workspace = true
|
||||
uuid.workspace = true
|
||||
|
||||
513
benchmarks/src/bin/nyc-taxi.rs
Normal file
513
benchmarks/src/bin/nyc-taxi.rs
Normal file
@@ -0,0 +1,513 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Use the taxi trip records from New York City dataset to bench. You can download the dataset from
|
||||
//! [here](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page).
|
||||
|
||||
#![allow(clippy::print_stdout)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Instant;
|
||||
|
||||
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
|
||||
use arrow::datatypes::{DataType, Float64Type, Int64Type};
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use clap::Parser;
|
||||
use client::api::v1::column::Values;
|
||||
use client::api::v1::{
|
||||
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
|
||||
};
|
||||
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use futures_util::TryStreamExt;
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
use tokio::task::JoinSet;
|
||||
|
||||
const CATALOG_NAME: &str = "greptime";
|
||||
const SCHEMA_NAME: &str = "public";
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "NYC benchmark runner")]
|
||||
struct Args {
|
||||
/// Path to the dataset
|
||||
#[arg(short, long)]
|
||||
path: Option<String>,
|
||||
|
||||
/// Batch size of insert request.
|
||||
#[arg(short = 's', long = "batch-size", default_value_t = 4096)]
|
||||
batch_size: usize,
|
||||
|
||||
/// Number of client threads on write (parallel on file level)
|
||||
#[arg(short = 't', long = "thread-num", default_value_t = 4)]
|
||||
thread_num: usize,
|
||||
|
||||
/// Number of query iteration
|
||||
#[arg(short = 'i', long = "iter-num", default_value_t = 3)]
|
||||
iter_num: usize,
|
||||
|
||||
#[arg(long = "skip-write")]
|
||||
skip_write: bool,
|
||||
|
||||
#[arg(long = "skip-read")]
|
||||
skip_read: bool,
|
||||
|
||||
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
|
||||
endpoint: String,
|
||||
}
|
||||
|
||||
fn get_file_list<P: AsRef<Path>>(path: P) -> Vec<PathBuf> {
|
||||
std::fs::read_dir(path)
|
||||
.unwrap()
|
||||
.map(|dir| dir.unwrap().path().canonicalize().unwrap())
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn new_table_name() -> String {
|
||||
format!("nyc_taxi_{}", chrono::Utc::now().timestamp())
|
||||
}
|
||||
|
||||
async fn write_data(
|
||||
table_name: &str,
|
||||
batch_size: usize,
|
||||
db: &Database,
|
||||
path: PathBuf,
|
||||
mpb: MultiProgress,
|
||||
pb_style: ProgressStyle,
|
||||
) -> u128 {
|
||||
let file = std::fs::File::open(&path).unwrap();
|
||||
let record_batch_reader_builder = ParquetRecordBatchReaderBuilder::try_new(file).unwrap();
|
||||
let row_num = record_batch_reader_builder
|
||||
.metadata()
|
||||
.file_metadata()
|
||||
.num_rows();
|
||||
let record_batch_reader = record_batch_reader_builder
|
||||
.with_batch_size(batch_size)
|
||||
.build()
|
||||
.unwrap();
|
||||
let progress_bar = mpb.add(ProgressBar::new(row_num as _));
|
||||
progress_bar.set_style(pb_style);
|
||||
progress_bar.set_message(format!("{path:?}"));
|
||||
|
||||
let mut total_rpc_elapsed_ms = 0;
|
||||
|
||||
for record_batch in record_batch_reader {
|
||||
let record_batch = record_batch.unwrap();
|
||||
if !is_record_batch_full(&record_batch) {
|
||||
continue;
|
||||
}
|
||||
let (columns, row_count) = convert_record_batch(record_batch);
|
||||
let request = InsertRequest {
|
||||
table_name: table_name.to_string(),
|
||||
columns,
|
||||
row_count,
|
||||
};
|
||||
let requests = InsertRequests {
|
||||
inserts: vec![request],
|
||||
};
|
||||
|
||||
let now = Instant::now();
|
||||
db.insert(requests).await.unwrap();
|
||||
let elapsed = now.elapsed();
|
||||
total_rpc_elapsed_ms += elapsed.as_millis();
|
||||
progress_bar.inc(row_count as _);
|
||||
}
|
||||
|
||||
progress_bar.finish_with_message(format!("file {path:?} done in {total_rpc_elapsed_ms}ms",));
|
||||
total_rpc_elapsed_ms
|
||||
}
|
||||
|
||||
fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
let schema = record_batch.schema();
|
||||
let fields = schema.fields();
|
||||
let row_count = record_batch.num_rows();
|
||||
let mut columns = vec![];
|
||||
|
||||
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
|
||||
let (values, datatype) = build_values(array);
|
||||
let semantic_type = match field.name().as_str() {
|
||||
"VendorID" => SemanticType::Tag,
|
||||
"tpep_pickup_datetime" => SemanticType::Timestamp,
|
||||
_ => SemanticType::Field,
|
||||
};
|
||||
|
||||
let column = Column {
|
||||
column_name: field.name().clone(),
|
||||
values: Some(values),
|
||||
null_mask: array
|
||||
.to_data()
|
||||
.nulls()
|
||||
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
|
||||
.unwrap_or_default(),
|
||||
datatype: datatype.into(),
|
||||
semantic_type: semantic_type as i32,
|
||||
..Default::default()
|
||||
};
|
||||
columns.push(column);
|
||||
}
|
||||
|
||||
(columns, row_count as _)
|
||||
}
|
||||
|
||||
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
|
||||
match column.data_type() {
|
||||
DataType::Int64 => {
|
||||
let array = column
|
||||
.as_any()
|
||||
.downcast_ref::<PrimitiveArray<Int64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
(
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64,
|
||||
)
|
||||
}
|
||||
DataType::Float64 => {
|
||||
let array = column
|
||||
.as_any()
|
||||
.downcast_ref::<PrimitiveArray<Float64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
(
|
||||
Values {
|
||||
f64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float64,
|
||||
)
|
||||
}
|
||||
DataType::Timestamp(_, _) => {
|
||||
let array = column
|
||||
.as_any()
|
||||
.downcast_ref::<TimestampMicrosecondArray>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
(
|
||||
Values {
|
||||
timestamp_microsecond_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMicrosecond,
|
||||
)
|
||||
}
|
||||
DataType::Utf8 => {
|
||||
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
|
||||
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
|
||||
(
|
||||
Values {
|
||||
string_values: values,
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::String,
|
||||
)
|
||||
}
|
||||
_ => unimplemented!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn is_record_batch_full(batch: &RecordBatch) -> bool {
|
||||
batch.columns().iter().all(|col| col.null_count() == 0)
|
||||
}
|
||||
|
||||
fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
CreateTableExpr {
|
||||
catalog_name: CATALOG_NAME.to_string(),
|
||||
schema_name: SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
desc: String::default(),
|
||||
column_defs: vec![
|
||||
ColumnDef {
|
||||
name: "VendorID".to_string(),
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_pickup_datetime".to_string(),
|
||||
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: false,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_dropoff_datetime".to_string(),
|
||||
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "passenger_count".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "trip_distance".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "RatecodeID".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "store_and_fwd_flag".to_string(),
|
||||
data_type: ColumnDataType::String as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "PULocationID".to_string(),
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "DOLocationID".to_string(),
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "payment_type".to_string(),
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "fare_amount".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "extra".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "mta_tax".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tip_amount".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tolls_amount".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "improvement_surcharge".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "total_amount".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "congestion_surcharge".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "airport_fee".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
time_index: "tpep_pickup_datetime".to_string(),
|
||||
primary_keys: vec!["VendorID".to_string()],
|
||||
create_if_not_exists: true,
|
||||
table_options: Default::default(),
|
||||
table_id: None,
|
||||
engine: "mito".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn query_set(table_name: &str) -> HashMap<String, String> {
|
||||
HashMap::from([
|
||||
(
|
||||
"count_all".to_string(),
|
||||
format!("SELECT COUNT(*) FROM {table_name};"),
|
||||
),
|
||||
(
|
||||
"fare_amt_by_passenger".to_string(),
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {table_name} GROUP BY passenger_count"),
|
||||
)
|
||||
])
|
||||
}
|
||||
|
||||
async fn do_write(args: &Args, db: &Database, table_name: &str) {
|
||||
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
|
||||
let mut write_jobs = JoinSet::new();
|
||||
|
||||
let create_table_result = db.create(create_table_expr(table_name)).await;
|
||||
println!("Create table result: {create_table_result:?}");
|
||||
|
||||
let progress_bar_style = ProgressStyle::with_template(
|
||||
"[{elapsed_precise}] {bar:60.cyan/blue} {pos:>7}/{len:7} {msg}",
|
||||
)
|
||||
.unwrap()
|
||||
.progress_chars("##-");
|
||||
let multi_progress_bar = MultiProgress::new();
|
||||
let file_progress = multi_progress_bar.add(ProgressBar::new(file_list.len() as _));
|
||||
file_progress.inc(0);
|
||||
|
||||
let batch_size = args.batch_size;
|
||||
for _ in 0..args.thread_num {
|
||||
if let Some(path) = file_list.pop() {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
let table_name = table_name.to_string();
|
||||
let _ = write_jobs.spawn(async move {
|
||||
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||
});
|
||||
}
|
||||
}
|
||||
while write_jobs.join_next().await.is_some() {
|
||||
file_progress.inc(1);
|
||||
if let Some(path) = file_list.pop() {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
let table_name = table_name.to_string();
|
||||
let _ = write_jobs.spawn(async move {
|
||||
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
|
||||
for (query_name, query) in query_set(table_name) {
|
||||
println!("Running query: {query}");
|
||||
for i in 0..num_iter {
|
||||
let now = Instant::now();
|
||||
let res = db.sql(&query).await.unwrap();
|
||||
match res.data {
|
||||
OutputData::AffectedRows(_) | OutputData::RecordBatches(_) => (),
|
||||
OutputData::Stream(stream) => {
|
||||
stream.try_collect::<Vec<_>>().await.unwrap();
|
||||
}
|
||||
}
|
||||
let elapsed = now.elapsed();
|
||||
println!(
|
||||
"query {}, iteration {}: {}ms",
|
||||
query_name,
|
||||
i,
|
||||
elapsed.as_millis(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let args = Args::parse();
|
||||
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.worker_threads(args.thread_num)
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap()
|
||||
.block_on(async {
|
||||
let client = Client::with_urls(vec![&args.endpoint]);
|
||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
let table_name = new_table_name();
|
||||
|
||||
if !args.skip_write {
|
||||
do_write(&args, &db, &table_name).await;
|
||||
}
|
||||
|
||||
if !args.skip_read {
|
||||
do_query(args.iter_num, &db, &table_name).await;
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -33,7 +33,9 @@
|
||||
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||
| `opentsdb.enable` | Bool | `true` | Whether to enable |
|
||||
| `opentsdb.addr` | String | `127.0.0.1:4242` | OpenTSDB telnet API server address. |
|
||||
| `opentsdb.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||
@@ -166,7 +168,9 @@
|
||||
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||
| `opentsdb.enable` | Bool | `true` | Whether to enable |
|
||||
| `opentsdb.addr` | String | `127.0.0.1:4242` | OpenTSDB telnet API server address. |
|
||||
| `opentsdb.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||
|
||||
@@ -88,8 +88,12 @@ watch = false
|
||||
|
||||
## OpenTSDB protocol options.
|
||||
[opentsdb]
|
||||
## Whether to enable OpenTSDB put in HTTP API.
|
||||
## Whether to enable
|
||||
enable = true
|
||||
## OpenTSDB telnet API server address.
|
||||
addr = "127.0.0.1:4242"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
|
||||
## InfluxDB protocol options.
|
||||
[influxdb]
|
||||
|
||||
@@ -83,8 +83,12 @@ watch = false
|
||||
|
||||
## OpenTSDB protocol options.
|
||||
[opentsdb]
|
||||
## Whether to enable OpenTSDB put in HTTP API.
|
||||
## Whether to enable
|
||||
enable = true
|
||||
## OpenTSDB telnet API server address.
|
||||
addr = "127.0.0.1:4242"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
|
||||
## InfluxDB protocol options.
|
||||
[influxdb]
|
||||
|
||||
2
cyborg/.gitignore
vendored
2
cyborg/.gitignore
vendored
@@ -1,2 +0,0 @@
|
||||
node_modules
|
||||
.env
|
||||
@@ -1,79 +0,0 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from '@actions/core'
|
||||
import {handleError, obtainClient} from "@/common";
|
||||
import {context} from "@actions/github";
|
||||
import {PullRequestEvent} from "@octokit/webhooks-types";
|
||||
import {Options, sync as conventionalCommitsParser} from 'conventional-commits-parser';
|
||||
import conventionalCommitTypes from 'conventional-commit-types';
|
||||
import _ from "lodash";
|
||||
|
||||
const defaultTypes = Object.keys(conventionalCommitTypes.types)
|
||||
const breakingChangeLabel = "breaking-change"
|
||||
|
||||
// These options are copied from [1].
|
||||
// [1] https://github.com/conventional-changelog/conventional-changelog/blob/3f60b464/packages/conventional-changelog-conventionalcommits/src/parser.js
|
||||
export const parserOpts: Options = {
|
||||
headerPattern: /^(\w*)(?:\((.*)\))?!?: (.*)$/,
|
||||
breakingHeaderPattern: /^(\w*)(?:\((.*)\))?!: (.*)$/,
|
||||
headerCorrespondence: [
|
||||
'type',
|
||||
'scope',
|
||||
'subject'
|
||||
],
|
||||
noteKeywords: ['BREAKING CHANGE', 'BREAKING-CHANGE'],
|
||||
revertPattern: /^(?:Revert|revert:)\s"?([\s\S]+?)"?\s*This reverts commit (\w*)\./i,
|
||||
revertCorrespondence: ['header', 'hash'],
|
||||
issuePrefixes: ['#']
|
||||
}
|
||||
|
||||
async function main() {
|
||||
if (!context.payload.pull_request) {
|
||||
throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
|
||||
}
|
||||
|
||||
const client = obtainClient("GITHUB_TOKEN")
|
||||
const payload = context.payload as PullRequestEvent
|
||||
const { owner, repo, number } = {
|
||||
owner: payload.pull_request.base.user.login,
|
||||
repo: payload.pull_request.base.repo.name,
|
||||
number: payload.pull_request.number,
|
||||
}
|
||||
const { data: pull_request } = await client.rest.pulls.get({
|
||||
owner, repo, pull_number: number,
|
||||
})
|
||||
|
||||
const commit = conventionalCommitsParser(pull_request.title, parserOpts)
|
||||
core.info(`Receive commit: ${JSON.stringify(commit)}`)
|
||||
|
||||
if (!commit.type) {
|
||||
throw Error(`Malformed commit: ${JSON.stringify(commit)}`)
|
||||
}
|
||||
|
||||
if (!defaultTypes.includes(commit.type)) {
|
||||
throw Error(`Unexpected type ${JSON.stringify(commit.type)} of commit: ${JSON.stringify(commit)}`)
|
||||
}
|
||||
|
||||
const breakingChanges = _.filter(commit.notes, _.matches({ title: 'BREAKING CHANGE'}))
|
||||
if (breakingChanges.length > 0) {
|
||||
await client.rest.issues.addLabels({
|
||||
owner, repo, issue_number: number, labels: [breakingChangeLabel]
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
main().catch(handleError)
|
||||
@@ -1,73 +0,0 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from '@actions/core'
|
||||
import {GitHub} from "@actions/github/lib/utils"
|
||||
import _ from "lodash";
|
||||
import dayjs from "dayjs";
|
||||
import {handleError, obtainClient} from "@/common";
|
||||
|
||||
async function main() {
|
||||
const client = obtainClient("GITHUB_TOKEN")
|
||||
await unassign(client)
|
||||
}
|
||||
|
||||
async function unassign(client: InstanceType<typeof GitHub>) {
|
||||
const owner = "GreptimeTeam"
|
||||
const repo = "greptimedb"
|
||||
|
||||
const dt = dayjs().subtract(14, 'days');
|
||||
core.info(`Open issues updated before ${dt.toISOString()} will be considered stale.`)
|
||||
|
||||
const members = await client.paginate(client.rest.repos.listCollaborators, {
|
||||
owner,
|
||||
repo,
|
||||
permission: "push",
|
||||
per_page: 100
|
||||
}).then((members) => members.map((member) => member.login))
|
||||
core.info(`Members (${members.length}): ${members}`)
|
||||
|
||||
const issues = await client.paginate(client.rest.issues.listForRepo, {
|
||||
owner,
|
||||
repo,
|
||||
state: "open",
|
||||
sort: "created",
|
||||
direction: "asc",
|
||||
per_page: 100
|
||||
})
|
||||
for (const issue of issues) {
|
||||
let assignees = [];
|
||||
if (issue.assignee) {
|
||||
assignees.push(issue.assignee.login)
|
||||
}
|
||||
for (const assignee of issue.assignees) {
|
||||
assignees.push(assignee.login)
|
||||
}
|
||||
assignees = _.uniq(assignees)
|
||||
assignees = _.difference(assignees, members)
|
||||
if (assignees.length > 0 && dayjs(issue.updated_at).isBefore(dt)) {
|
||||
core.info(`Assignees ${assignees} of issue ${issue.number} will be unassigned.`)
|
||||
await client.rest.issues.removeAssignees({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issue.number,
|
||||
assignees: assignees,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
main().catch(handleError)
|
||||
@@ -1,25 +0,0 @@
|
||||
{
|
||||
"name": "cyborg",
|
||||
"version": "1.0.0",
|
||||
"description": "Automator for GreptimeDB Repository Management",
|
||||
"private": true,
|
||||
"packageManager": "pnpm@8.15.5",
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.10.1",
|
||||
"@actions/github": "^6.0.0",
|
||||
"@octokit/webhooks-types": "^7.5.1",
|
||||
"conventional-commit-types": "^3.0.0",
|
||||
"conventional-commits-parser": "^5.0.0",
|
||||
"dayjs": "^1.11.11",
|
||||
"dotenv": "^16.4.5",
|
||||
"lodash": "^4.17.21"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/conventional-commits-parser": "^5.0.0",
|
||||
"@types/lodash": "^4.17.0",
|
||||
"@types/node": "^20.12.7",
|
||||
"tsconfig-paths": "^4.2.0",
|
||||
"tsx": "^4.8.2",
|
||||
"typescript": "^5.4.5"
|
||||
}
|
||||
}
|
||||
602
cyborg/pnpm-lock.yaml
generated
602
cyborg/pnpm-lock.yaml
generated
@@ -1,602 +0,0 @@
|
||||
lockfileVersion: '6.0'
|
||||
|
||||
settings:
|
||||
autoInstallPeers: true
|
||||
excludeLinksFromLockfile: false
|
||||
|
||||
dependencies:
|
||||
'@actions/core':
|
||||
specifier: ^1.10.1
|
||||
version: 1.10.1
|
||||
'@actions/github':
|
||||
specifier: ^6.0.0
|
||||
version: 6.0.0
|
||||
'@octokit/webhooks-types':
|
||||
specifier: ^7.5.1
|
||||
version: 7.5.1
|
||||
conventional-commit-types:
|
||||
specifier: ^3.0.0
|
||||
version: 3.0.0
|
||||
conventional-commits-parser:
|
||||
specifier: ^5.0.0
|
||||
version: 5.0.0
|
||||
dayjs:
|
||||
specifier: ^1.11.11
|
||||
version: 1.11.11
|
||||
dotenv:
|
||||
specifier: ^16.4.5
|
||||
version: 16.4.5
|
||||
lodash:
|
||||
specifier: ^4.17.21
|
||||
version: 4.17.21
|
||||
|
||||
devDependencies:
|
||||
'@types/conventional-commits-parser':
|
||||
specifier: ^5.0.0
|
||||
version: 5.0.0
|
||||
'@types/lodash':
|
||||
specifier: ^4.17.0
|
||||
version: 4.17.0
|
||||
'@types/node':
|
||||
specifier: ^20.12.7
|
||||
version: 20.12.7
|
||||
tsconfig-paths:
|
||||
specifier: ^4.2.0
|
||||
version: 4.2.0
|
||||
tsx:
|
||||
specifier: ^4.8.2
|
||||
version: 4.8.2
|
||||
typescript:
|
||||
specifier: ^5.4.5
|
||||
version: 5.4.5
|
||||
|
||||
packages:
|
||||
|
||||
/@actions/core@1.10.1:
|
||||
resolution: {integrity: sha512-3lBR9EDAY+iYIpTnTIXmWcNbX3T2kCkAEQGIQx4NVQ0575nk2k3GRZDTPQG+vVtS2izSLmINlxXf0uLtnrTP+g==}
|
||||
dependencies:
|
||||
'@actions/http-client': 2.2.1
|
||||
uuid: 8.3.2
|
||||
dev: false
|
||||
|
||||
/@actions/github@6.0.0:
|
||||
resolution: {integrity: sha512-alScpSVnYmjNEXboZjarjukQEzgCRmjMv6Xj47fsdnqGS73bjJNDpiiXmp8jr0UZLdUB6d9jW63IcmddUP+l0g==}
|
||||
dependencies:
|
||||
'@actions/http-client': 2.2.1
|
||||
'@octokit/core': 5.2.0
|
||||
'@octokit/plugin-paginate-rest': 9.2.1(@octokit/core@5.2.0)
|
||||
'@octokit/plugin-rest-endpoint-methods': 10.4.1(@octokit/core@5.2.0)
|
||||
dev: false
|
||||
|
||||
/@actions/http-client@2.2.1:
|
||||
resolution: {integrity: sha512-KhC/cZsq7f8I4LfZSJKgCvEwfkE8o1538VoBeoGzokVLLnbFDEAdFD3UhoMklxo2un9NJVBdANOresx7vTHlHw==}
|
||||
dependencies:
|
||||
tunnel: 0.0.6
|
||||
undici: 5.28.4
|
||||
dev: false
|
||||
|
||||
/@esbuild/aix-ppc64@0.20.2:
|
||||
resolution: {integrity: sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [ppc64]
|
||||
os: [aix]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/android-arm64@0.20.2:
|
||||
resolution: {integrity: sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm64]
|
||||
os: [android]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/android-arm@0.20.2:
|
||||
resolution: {integrity: sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm]
|
||||
os: [android]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/android-x64@0.20.2:
|
||||
resolution: {integrity: sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [android]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/darwin-arm64@0.20.2:
|
||||
resolution: {integrity: sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/darwin-x64@0.20.2:
|
||||
resolution: {integrity: sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/freebsd-arm64@0.20.2:
|
||||
resolution: {integrity: sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm64]
|
||||
os: [freebsd]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/freebsd-x64@0.20.2:
|
||||
resolution: {integrity: sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [freebsd]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-arm64@0.20.2:
|
||||
resolution: {integrity: sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-arm@0.20.2:
|
||||
resolution: {integrity: sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-ia32@0.20.2:
|
||||
resolution: {integrity: sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [ia32]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-loong64@0.20.2:
|
||||
resolution: {integrity: sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [loong64]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-mips64el@0.20.2:
|
||||
resolution: {integrity: sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [mips64el]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-ppc64@0.20.2:
|
||||
resolution: {integrity: sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [ppc64]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-riscv64@0.20.2:
|
||||
resolution: {integrity: sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [riscv64]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-s390x@0.20.2:
|
||||
resolution: {integrity: sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [s390x]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-x64@0.20.2:
|
||||
resolution: {integrity: sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/netbsd-x64@0.20.2:
|
||||
resolution: {integrity: sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [netbsd]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/openbsd-x64@0.20.2:
|
||||
resolution: {integrity: sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [openbsd]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/sunos-x64@0.20.2:
|
||||
resolution: {integrity: sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [sunos]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/win32-arm64@0.20.2:
|
||||
resolution: {integrity: sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm64]
|
||||
os: [win32]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/win32-ia32@0.20.2:
|
||||
resolution: {integrity: sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [ia32]
|
||||
os: [win32]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/win32-x64@0.20.2:
|
||||
resolution: {integrity: sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@fastify/busboy@2.1.1:
|
||||
resolution: {integrity: sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==}
|
||||
engines: {node: '>=14'}
|
||||
dev: false
|
||||
|
||||
/@octokit/auth-token@4.0.0:
|
||||
resolution: {integrity: sha512-tY/msAuJo6ARbK6SPIxZrPBms3xPbfwBrulZe0Wtr/DIY9lje2HeV1uoebShn6mx7SjCHif6EjMvoREj+gZ+SA==}
|
||||
engines: {node: '>= 18'}
|
||||
dev: false
|
||||
|
||||
/@octokit/core@5.2.0:
|
||||
resolution: {integrity: sha512-1LFfa/qnMQvEOAdzlQymH0ulepxbxnCYAKJZfMci/5XJyIHWgEYnDmgnKakbTh7CH2tFQ5O60oYDvns4i9RAIg==}
|
||||
engines: {node: '>= 18'}
|
||||
dependencies:
|
||||
'@octokit/auth-token': 4.0.0
|
||||
'@octokit/graphql': 7.1.0
|
||||
'@octokit/request': 8.4.0
|
||||
'@octokit/request-error': 5.1.0
|
||||
'@octokit/types': 13.5.0
|
||||
before-after-hook: 2.2.3
|
||||
universal-user-agent: 6.0.1
|
||||
dev: false
|
||||
|
||||
/@octokit/endpoint@9.0.5:
|
||||
resolution: {integrity: sha512-ekqR4/+PCLkEBF6qgj8WqJfvDq65RH85OAgrtnVp1mSxaXF03u2xW/hUdweGS5654IlC0wkNYC18Z50tSYTAFw==}
|
||||
engines: {node: '>= 18'}
|
||||
dependencies:
|
||||
'@octokit/types': 13.5.0
|
||||
universal-user-agent: 6.0.1
|
||||
dev: false
|
||||
|
||||
/@octokit/graphql@7.1.0:
|
||||
resolution: {integrity: sha512-r+oZUH7aMFui1ypZnAvZmn0KSqAUgE1/tUXIWaqUCa1758ts/Jio84GZuzsvUkme98kv0WFY8//n0J1Z+vsIsQ==}
|
||||
engines: {node: '>= 18'}
|
||||
dependencies:
|
||||
'@octokit/request': 8.4.0
|
||||
'@octokit/types': 13.5.0
|
||||
universal-user-agent: 6.0.1
|
||||
dev: false
|
||||
|
||||
/@octokit/openapi-types@20.0.0:
|
||||
resolution: {integrity: sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA==}
|
||||
dev: false
|
||||
|
||||
/@octokit/openapi-types@22.2.0:
|
||||
resolution: {integrity: sha512-QBhVjcUa9W7Wwhm6DBFu6ZZ+1/t/oYxqc2tp81Pi41YNuJinbFRx8B133qVOrAaBbF7D/m0Et6f9/pZt9Rc+tg==}
|
||||
dev: false
|
||||
|
||||
/@octokit/plugin-paginate-rest@9.2.1(@octokit/core@5.2.0):
|
||||
resolution: {integrity: sha512-wfGhE/TAkXZRLjksFXuDZdmGnJQHvtU/joFQdweXUgzo1XwvBCD4o4+75NtFfjfLK5IwLf9vHTfSiU3sLRYpRw==}
|
||||
engines: {node: '>= 18'}
|
||||
peerDependencies:
|
||||
'@octokit/core': '5'
|
||||
dependencies:
|
||||
'@octokit/core': 5.2.0
|
||||
'@octokit/types': 12.6.0
|
||||
dev: false
|
||||
|
||||
/@octokit/plugin-rest-endpoint-methods@10.4.1(@octokit/core@5.2.0):
|
||||
resolution: {integrity: sha512-xV1b+ceKV9KytQe3zCVqjg+8GTGfDYwaT1ATU5isiUyVtlVAO3HNdzpS4sr4GBx4hxQ46s7ITtZrAsxG22+rVg==}
|
||||
engines: {node: '>= 18'}
|
||||
peerDependencies:
|
||||
'@octokit/core': '5'
|
||||
dependencies:
|
||||
'@octokit/core': 5.2.0
|
||||
'@octokit/types': 12.6.0
|
||||
dev: false
|
||||
|
||||
/@octokit/request-error@5.1.0:
|
||||
resolution: {integrity: sha512-GETXfE05J0+7H2STzekpKObFe765O5dlAKUTLNGeH+x47z7JjXHfsHKo5z21D/o/IOZTUEI6nyWyR+bZVP/n5Q==}
|
||||
engines: {node: '>= 18'}
|
||||
dependencies:
|
||||
'@octokit/types': 13.5.0
|
||||
deprecation: 2.3.1
|
||||
once: 1.4.0
|
||||
dev: false
|
||||
|
||||
/@octokit/request@8.4.0:
|
||||
resolution: {integrity: sha512-9Bb014e+m2TgBeEJGEbdplMVWwPmL1FPtggHQRkV+WVsMggPtEkLKPlcVYm/o8xKLkpJ7B+6N8WfQMtDLX2Dpw==}
|
||||
engines: {node: '>= 18'}
|
||||
dependencies:
|
||||
'@octokit/endpoint': 9.0.5
|
||||
'@octokit/request-error': 5.1.0
|
||||
'@octokit/types': 13.5.0
|
||||
universal-user-agent: 6.0.1
|
||||
dev: false
|
||||
|
||||
/@octokit/types@12.6.0:
|
||||
resolution: {integrity: sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw==}
|
||||
dependencies:
|
||||
'@octokit/openapi-types': 20.0.0
|
||||
dev: false
|
||||
|
||||
/@octokit/types@13.5.0:
|
||||
resolution: {integrity: sha512-HdqWTf5Z3qwDVlzCrP8UJquMwunpDiMPt5er+QjGzL4hqr/vBVY/MauQgS1xWxCDT1oMx1EULyqxncdCY/NVSQ==}
|
||||
dependencies:
|
||||
'@octokit/openapi-types': 22.2.0
|
||||
dev: false
|
||||
|
||||
/@octokit/webhooks-types@7.5.1:
|
||||
resolution: {integrity: sha512-1dozxWEP8lKGbtEu7HkRbK1F/nIPuJXNfT0gd96y6d3LcHZTtRtlf8xz3nicSJfesADxJyDh+mWBOsdLkqgzYw==}
|
||||
dev: false
|
||||
|
||||
/@types/conventional-commits-parser@5.0.0:
|
||||
resolution: {integrity: sha512-loB369iXNmAZglwWATL+WRe+CRMmmBPtpolYzIebFaX4YA3x+BEfLqhUAV9WanycKI3TG1IMr5bMJDajDKLlUQ==}
|
||||
dependencies:
|
||||
'@types/node': 20.12.7
|
||||
dev: true
|
||||
|
||||
/@types/lodash@4.17.0:
|
||||
resolution: {integrity: sha512-t7dhREVv6dbNj0q17X12j7yDG4bD/DHYX7o5/DbDxobP0HnGPgpRz2Ej77aL7TZT3DSw13fqUTj8J4mMnqa7WA==}
|
||||
dev: true
|
||||
|
||||
/@types/node@20.12.7:
|
||||
resolution: {integrity: sha512-wq0cICSkRLVaf3UGLMGItu/PtdY7oaXaI/RVU+xliKVOtRna3PRY57ZDfztpDL0n11vfymMUnXv8QwYCO7L1wg==}
|
||||
dependencies:
|
||||
undici-types: 5.26.5
|
||||
dev: true
|
||||
|
||||
/JSONStream@1.3.5:
|
||||
resolution: {integrity: sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==}
|
||||
hasBin: true
|
||||
dependencies:
|
||||
jsonparse: 1.3.1
|
||||
through: 2.3.8
|
||||
dev: false
|
||||
|
||||
/before-after-hook@2.2.3:
|
||||
resolution: {integrity: sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==}
|
||||
dev: false
|
||||
|
||||
/conventional-commit-types@3.0.0:
|
||||
resolution: {integrity: sha512-SmmCYnOniSsAa9GqWOeLqc179lfr5TRu5b4QFDkbsrJ5TZjPJx85wtOr3zn+1dbeNiXDKGPbZ72IKbPhLXh/Lg==}
|
||||
dev: false
|
||||
|
||||
/conventional-commits-parser@5.0.0:
|
||||
resolution: {integrity: sha512-ZPMl0ZJbw74iS9LuX9YIAiW8pfM5p3yh2o/NbXHbkFuZzY5jvdi5jFycEOkmBW5H5I7nA+D6f3UcsCLP2vvSEA==}
|
||||
engines: {node: '>=16'}
|
||||
hasBin: true
|
||||
dependencies:
|
||||
JSONStream: 1.3.5
|
||||
is-text-path: 2.0.0
|
||||
meow: 12.1.1
|
||||
split2: 4.2.0
|
||||
dev: false
|
||||
|
||||
/dayjs@1.11.11:
|
||||
resolution: {integrity: sha512-okzr3f11N6WuqYtZSvm+F776mB41wRZMhKP+hc34YdW+KmtYYK9iqvHSwo2k9FEH3fhGXvOPV6yz2IcSrfRUDg==}
|
||||
dev: false
|
||||
|
||||
/deprecation@2.3.1:
|
||||
resolution: {integrity: sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==}
|
||||
dev: false
|
||||
|
||||
/dotenv@16.4.5:
|
||||
resolution: {integrity: sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==}
|
||||
engines: {node: '>=12'}
|
||||
dev: false
|
||||
|
||||
/esbuild@0.20.2:
|
||||
resolution: {integrity: sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==}
|
||||
engines: {node: '>=12'}
|
||||
hasBin: true
|
||||
requiresBuild: true
|
||||
optionalDependencies:
|
||||
'@esbuild/aix-ppc64': 0.20.2
|
||||
'@esbuild/android-arm': 0.20.2
|
||||
'@esbuild/android-arm64': 0.20.2
|
||||
'@esbuild/android-x64': 0.20.2
|
||||
'@esbuild/darwin-arm64': 0.20.2
|
||||
'@esbuild/darwin-x64': 0.20.2
|
||||
'@esbuild/freebsd-arm64': 0.20.2
|
||||
'@esbuild/freebsd-x64': 0.20.2
|
||||
'@esbuild/linux-arm': 0.20.2
|
||||
'@esbuild/linux-arm64': 0.20.2
|
||||
'@esbuild/linux-ia32': 0.20.2
|
||||
'@esbuild/linux-loong64': 0.20.2
|
||||
'@esbuild/linux-mips64el': 0.20.2
|
||||
'@esbuild/linux-ppc64': 0.20.2
|
||||
'@esbuild/linux-riscv64': 0.20.2
|
||||
'@esbuild/linux-s390x': 0.20.2
|
||||
'@esbuild/linux-x64': 0.20.2
|
||||
'@esbuild/netbsd-x64': 0.20.2
|
||||
'@esbuild/openbsd-x64': 0.20.2
|
||||
'@esbuild/sunos-x64': 0.20.2
|
||||
'@esbuild/win32-arm64': 0.20.2
|
||||
'@esbuild/win32-ia32': 0.20.2
|
||||
'@esbuild/win32-x64': 0.20.2
|
||||
dev: true
|
||||
|
||||
/fsevents@2.3.3:
|
||||
resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
|
||||
engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
|
||||
os: [darwin]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/get-tsconfig@4.7.3:
|
||||
resolution: {integrity: sha512-ZvkrzoUA0PQZM6fy6+/Hce561s+faD1rsNwhnO5FelNjyy7EMGJ3Rz1AQ8GYDWjhRs/7dBLOEJvhK8MiEJOAFg==}
|
||||
dependencies:
|
||||
resolve-pkg-maps: 1.0.0
|
||||
dev: true
|
||||
|
||||
/is-text-path@2.0.0:
|
||||
resolution: {integrity: sha512-+oDTluR6WEjdXEJMnC2z6A4FRwFoYuvShVVEGsS7ewc0UTi2QtAKMDJuL4BDEVt+5T7MjFo12RP8ghOM75oKJw==}
|
||||
engines: {node: '>=8'}
|
||||
dependencies:
|
||||
text-extensions: 2.4.0
|
||||
dev: false
|
||||
|
||||
/json5@2.2.3:
|
||||
resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==}
|
||||
engines: {node: '>=6'}
|
||||
hasBin: true
|
||||
dev: true
|
||||
|
||||
/jsonparse@1.3.1:
|
||||
resolution: {integrity: sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==}
|
||||
engines: {'0': node >= 0.2.0}
|
||||
dev: false
|
||||
|
||||
/lodash@4.17.21:
|
||||
resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==}
|
||||
dev: false
|
||||
|
||||
/meow@12.1.1:
|
||||
resolution: {integrity: sha512-BhXM0Au22RwUneMPwSCnyhTOizdWoIEPU9sp0Aqa1PnDMR5Wv2FGXYDjuzJEIX+Eo2Rb8xuYe5jrnm5QowQFkw==}
|
||||
engines: {node: '>=16.10'}
|
||||
dev: false
|
||||
|
||||
/minimist@1.2.8:
|
||||
resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
|
||||
dev: true
|
||||
|
||||
/once@1.4.0:
|
||||
resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
|
||||
dependencies:
|
||||
wrappy: 1.0.2
|
||||
dev: false
|
||||
|
||||
/resolve-pkg-maps@1.0.0:
|
||||
resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==}
|
||||
dev: true
|
||||
|
||||
/split2@4.2.0:
|
||||
resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==}
|
||||
engines: {node: '>= 10.x'}
|
||||
dev: false
|
||||
|
||||
/strip-bom@3.0.0:
|
||||
resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==}
|
||||
engines: {node: '>=4'}
|
||||
dev: true
|
||||
|
||||
/text-extensions@2.4.0:
|
||||
resolution: {integrity: sha512-te/NtwBwfiNRLf9Ijqx3T0nlqZiQ2XrrtBvu+cLL8ZRrGkO0NHTug8MYFKyoSrv/sHTaSKfilUkizV6XhxMJ3g==}
|
||||
engines: {node: '>=8'}
|
||||
dev: false
|
||||
|
||||
/through@2.3.8:
|
||||
resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==}
|
||||
dev: false
|
||||
|
||||
/tsconfig-paths@4.2.0:
|
||||
resolution: {integrity: sha512-NoZ4roiN7LnbKn9QqE1amc9DJfzvZXxF4xDavcOWt1BPkdx+m+0gJuPM+S0vCe7zTJMYUP0R8pO2XMr+Y8oLIg==}
|
||||
engines: {node: '>=6'}
|
||||
dependencies:
|
||||
json5: 2.2.3
|
||||
minimist: 1.2.8
|
||||
strip-bom: 3.0.0
|
||||
dev: true
|
||||
|
||||
/tsx@4.8.2:
|
||||
resolution: {integrity: sha512-hmmzS4U4mdy1Cnzpl/NQiPUC2k34EcNSTZYVJThYKhdqTwuBeF+4cG9KUK/PFQ7KHaAaYwqlb7QfmsE2nuj+WA==}
|
||||
engines: {node: '>=18.0.0'}
|
||||
hasBin: true
|
||||
dependencies:
|
||||
esbuild: 0.20.2
|
||||
get-tsconfig: 4.7.3
|
||||
optionalDependencies:
|
||||
fsevents: 2.3.3
|
||||
dev: true
|
||||
|
||||
/tunnel@0.0.6:
|
||||
resolution: {integrity: sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==}
|
||||
engines: {node: '>=0.6.11 <=0.7.0 || >=0.7.3'}
|
||||
dev: false
|
||||
|
||||
/typescript@5.4.5:
|
||||
resolution: {integrity: sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==}
|
||||
engines: {node: '>=14.17'}
|
||||
hasBin: true
|
||||
dev: true
|
||||
|
||||
/undici-types@5.26.5:
|
||||
resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==}
|
||||
dev: true
|
||||
|
||||
/undici@5.28.4:
|
||||
resolution: {integrity: sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==}
|
||||
engines: {node: '>=14.0'}
|
||||
dependencies:
|
||||
'@fastify/busboy': 2.1.1
|
||||
dev: false
|
||||
|
||||
/universal-user-agent@6.0.1:
|
||||
resolution: {integrity: sha512-yCzhz6FN2wU1NiiQRogkTQszlQSlpWaw8SvVegAc+bDxbzHgh1vX8uIe8OYyMH6DwH+sdTJsgMl36+mSMdRJIQ==}
|
||||
dev: false
|
||||
|
||||
/uuid@8.3.2:
|
||||
resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==}
|
||||
hasBin: true
|
||||
dev: false
|
||||
|
||||
/wrappy@1.0.2:
|
||||
resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
|
||||
dev: false
|
||||
@@ -1,30 +0,0 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from "@actions/core";
|
||||
import {config} from "dotenv";
|
||||
import {getOctokit} from "@actions/github";
|
||||
import {GitHub} from "@actions/github/lib/utils";
|
||||
|
||||
export function handleError(err: any): void {
|
||||
console.error(err)
|
||||
core.setFailed(`Unhandled error: ${err}`)
|
||||
}
|
||||
|
||||
export function obtainClient(token: string): InstanceType<typeof GitHub> {
|
||||
config()
|
||||
return getOctokit(process.env[token])
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
{
|
||||
"ts-node": {
|
||||
"require": ["tsconfig-paths/register"]
|
||||
},
|
||||
"compilerOptions": {
|
||||
"module": "NodeNext",
|
||||
"moduleResolution": "NodeNext",
|
||||
"target": "ES6",
|
||||
"paths": {
|
||||
"@/*": ["./src/*"]
|
||||
},
|
||||
"resolveJsonModule": true,
|
||||
}
|
||||
}
|
||||
@@ -1,136 +0,0 @@
|
||||
# How to write fuzz tests
|
||||
|
||||
This document introduces how to write fuzz tests in GreptimeDB.
|
||||
|
||||
## What is a fuzz test
|
||||
Fuzz test is tool that leverage deterministic random generation to assist in finding bugs. The goal of fuzz tests is to identify inputs generated by the fuzzer that cause system panics, crashes, or unexpected behaviors to occur. And we are using the [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) to run our fuzz test targets.
|
||||
|
||||
## Why we need them
|
||||
- Find bugs by leveraging random generation
|
||||
- Integrate with other tests (e.g., e2e)
|
||||
|
||||
## Resources
|
||||
All fuzz test-related resources are located in the `/tests-fuzz` directory.
|
||||
There are two types of resources: (1) fundamental components and (2) test targets.
|
||||
|
||||
### Fundamental components
|
||||
They are located in the `/tests-fuzz/src` directory. The fundamental components define how to generate SQLs (including dialects for different protocols) and validate execution results (e.g., column attribute validation), etc.
|
||||
|
||||
### Test targets
|
||||
They are located in the `/tests-fuzz/targets` directory, with each file representing an independent fuzz test case. The target utilizes fundamental components to generate SQLs, sends the generated SQLs via specified protocol, and validates the results of SQL execution.
|
||||
|
||||
Figure 1 illustrates the fundamental components of the fuzz test provide the ability to generate random SQLs. It utilizes a Random Number Generator (Rng) to generate the Intermediate Representation (IR), then employs a DialectTranslator to produce specified dialects for different protocols. Finally, the fuzz tests send the generated SQL via the specified protocol and verify that the execution results meet expectations.
|
||||
```
|
||||
Rng
|
||||
|
|
||||
|
|
||||
v
|
||||
ExprGenerator
|
||||
|
|
||||
|
|
||||
v
|
||||
Intermediate representation (IR)
|
||||
|
|
||||
|
|
||||
+----------------------+----------------------+
|
||||
| | |
|
||||
v v v
|
||||
MySQLTranslator PostgreSQLTranslator OtherDialectTranslator
|
||||
| | |
|
||||
| | |
|
||||
v v v
|
||||
SQL(MySQL Dialect) ..... .....
|
||||
|
|
||||
|
|
||||
v
|
||||
Fuzz Test
|
||||
|
||||
```
|
||||
(Figure1: Overview of fuzz tests)
|
||||
|
||||
For more details about fuzz targets and fundamental components, please refer to this [tracking issue](https://github.com/GreptimeTeam/greptimedb/issues/3174).
|
||||
|
||||
## How to add a fuzz test target
|
||||
|
||||
1. Create an empty rust source file under the `/tests-fuzz/targets/<fuzz-target>.rs` directory.
|
||||
|
||||
2. Register the fuzz test target in the `/tests-fuzz/Cargo.toml` file.
|
||||
|
||||
```toml
|
||||
[[bin]]
|
||||
name = "<fuzz-target>"
|
||||
path = "targets/<fuzz-target>.rs"
|
||||
test = false
|
||||
bench = false
|
||||
doc = false
|
||||
```
|
||||
|
||||
3. Define the `FuzzInput` in the `/tests-fuzz/targets/<fuzz-target>.rs`.
|
||||
|
||||
```rust
|
||||
#![no_main]
|
||||
use libfuzzer_sys::arbitrary::{Arbitrary, Unstructured};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct FuzzInput {
|
||||
seed: u64,
|
||||
}
|
||||
|
||||
impl Arbitrary<'_> for FuzzInput {
|
||||
fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> {
|
||||
let seed = u.int_in_range(u64::MIN..=u64::MAX)?;
|
||||
Ok(FuzzInput { seed })
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
4. Write your first fuzz test target in the `/tests-fuzz/targets/<fuzz-target>.rs`.
|
||||
|
||||
```rust
|
||||
use libfuzzer_sys::fuzz_target;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use rand_chacha::ChaChaRng;
|
||||
use snafu::ResultExt;
|
||||
use sqlx::{MySql, Pool};
|
||||
use tests_fuzz::fake::{
|
||||
merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map,
|
||||
MappedGenerator, WordGenerator,
|
||||
};
|
||||
use tests_fuzz::generator::create_expr::CreateTableExprGeneratorBuilder;
|
||||
use tests_fuzz::generator::Generator;
|
||||
use tests_fuzz::ir::CreateTableExpr;
|
||||
use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
|
||||
use tests_fuzz::translator::DslTranslator;
|
||||
use tests_fuzz::utils::{init_greptime_connections, Connections};
|
||||
|
||||
fuzz_target!(|input: FuzzInput| {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
common_runtime::block_on_write(async {
|
||||
let Connections { mysql } = init_greptime_connections().await;
|
||||
let mut rng = ChaChaRng::seed_from_u64(input.seed);
|
||||
let columns = rng.gen_range(2..30);
|
||||
let create_table_generator = CreateTableExprGeneratorBuilder::default()
|
||||
.name_generator(Box::new(MappedGenerator::new(
|
||||
WordGenerator,
|
||||
merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map),
|
||||
)))
|
||||
.columns(columns)
|
||||
.engine("mito")
|
||||
.if_not_exists(if_not_exists)
|
||||
.build()
|
||||
.unwrap();
|
||||
let ir = create_table_generator.generate(&mut rng);
|
||||
let translator = CreateTableExprTranslator;
|
||||
let sql = translator.translate(&expr).unwrap();
|
||||
mysql.execute(&sql).await
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
5. Run your fuzz test target
|
||||
|
||||
```bash
|
||||
cargo fuzz run <fuzz-target> --fuzz-dir tests-fuzz
|
||||
```
|
||||
|
||||
For more details, please refer to this [document](/tests-fuzz/README.md).
|
||||
@@ -73,7 +73,7 @@ CREATE TABLE cpu (
|
||||
usage_system DOUBLE,
|
||||
datacenter STRING,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito;
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
|
||||
```
|
||||
|
||||
Then the table's `TableMeta` may look like this:
|
||||
@@ -249,7 +249,7 @@ CREATE TABLE cpu (
|
||||
usage_system DOUBLE,
|
||||
datacenter STRING,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito;
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
|
||||
|
||||
select ts, usage_system from cpu;
|
||||
```
|
||||
|
||||
@@ -7,60 +7,4 @@ Status notify: we are still working on this config. It's expected to change freq
|
||||
|
||||
# How to use
|
||||
|
||||
## `greptimedb.json`
|
||||
|
||||
Open Grafana Dashboard page, choose `New` -> `Import`. And upload `greptimedb.json` file.
|
||||
|
||||
## `greptimedb-cluster.json`
|
||||
|
||||
This cluster dashboard provides a comprehensive view of incoming requests, response statuses, and internal activities such as flush and compaction, with a layered structure from frontend to datanode. Designed with a focus on alert functionality, its primary aim is to highlight any anomalies in metrics, allowing users to quickly pinpoint the cause of errors.
|
||||
|
||||
We use Prometheus to scrape off metrics from nodes in GreptimeDB cluster, Grafana to visualize the diagram. Any compatible stack should work too.
|
||||
|
||||
__Note__: This dashboard is still in an early stage of development. Any issue or advice on improvement is welcomed.
|
||||
|
||||
### Configuration
|
||||
|
||||
Please ensure the following configuration before importing the dashboard into Grafana.
|
||||
|
||||
__1. Prometheus scrape config__
|
||||
|
||||
Assign `greptime_pod` label to each host target. We use this label to identify each node instance.
|
||||
|
||||
```yml
|
||||
# example config
|
||||
# only to indicate how to assign labels to each target
|
||||
# modify yours accordingly
|
||||
scrape_configs:
|
||||
- job_name: metasrv
|
||||
static_configs:
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: metasrv
|
||||
|
||||
- job_name: datanode
|
||||
static_configs:
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: datanode1
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: datanode2
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: datanode3
|
||||
|
||||
- job_name: frontend
|
||||
static_configs:
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: frontend
|
||||
```
|
||||
|
||||
__2. Grafana config__
|
||||
|
||||
Create a Prometheus data source in Grafana before using this dashboard. We use `datasource` as a variable in Grafana dashboard so that multiple environments are supported.
|
||||
|
||||
### Usage
|
||||
|
||||
Use `datasource` or `greptime_pod` on the upper-left corner to filter data from certain node.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -17,15 +17,12 @@ headerPath = "Apache-2.0.txt"
|
||||
includes = [
|
||||
"*.rs",
|
||||
"*.py",
|
||||
"*.ts",
|
||||
]
|
||||
|
||||
excludes = [
|
||||
# copied sources
|
||||
"src/common/base/src/readable_size.rs",
|
||||
"src/common/base/src/secrets.rs",
|
||||
"src/servers/src/repeated_field.rs",
|
||||
"src/servers/src/http/test_helpers.rs",
|
||||
]
|
||||
|
||||
[properties]
|
||||
|
||||
@@ -20,20 +20,21 @@ use common_decimal::Decimal128;
|
||||
use common_time::interval::IntervalUnit;
|
||||
use common_time::time::Time;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::{Date, DateTime, Interval, Timestamp};
|
||||
use common_time::{Date, DateTime, Duration, Interval, Timestamp};
|
||||
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::types::{
|
||||
Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type,
|
||||
DurationType, Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type,
|
||||
};
|
||||
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
||||
use datatypes::vectors::{
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector, Float32Vector,
|
||||
Float64Vector, Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
|
||||
IntervalYearMonthVector, PrimitiveVector, StringVector, TimeMicrosecondVector,
|
||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
|
||||
UInt64Vector, VectorRef,
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector,
|
||||
DurationMicrosecondVector, DurationMillisecondVector, DurationNanosecondVector,
|
||||
DurationSecondVector, Float32Vector, Float64Vector, Int32Vector, Int64Vector,
|
||||
IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector, PrimitiveVector,
|
||||
StringVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector,
|
||||
TimeSecondVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
TimestampNanosecondVector, TimestampSecondVector, UInt32Vector, UInt64Vector, VectorRef,
|
||||
};
|
||||
use greptime_proto::v1;
|
||||
use greptime_proto::v1::column_data_type_extension::TypeExt;
|
||||
@@ -126,6 +127,14 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||
ColumnDataType::IntervalMonthDayNano => {
|
||||
ConcreteDataType::interval_month_day_nano_datatype()
|
||||
}
|
||||
ColumnDataType::DurationSecond => ConcreteDataType::duration_second_datatype(),
|
||||
ColumnDataType::DurationMillisecond => {
|
||||
ConcreteDataType::duration_millisecond_datatype()
|
||||
}
|
||||
ColumnDataType::DurationMicrosecond => {
|
||||
ConcreteDataType::duration_microsecond_datatype()
|
||||
}
|
||||
ColumnDataType::DurationNanosecond => ConcreteDataType::duration_nanosecond_datatype(),
|
||||
ColumnDataType::Decimal128 => {
|
||||
if let Some(TypeExt::DecimalType(d)) = datatype_wrapper
|
||||
.datatype_ext
|
||||
@@ -203,7 +212,11 @@ impl_column_type_functions_with_snake!(
|
||||
TimeNanosecond,
|
||||
IntervalYearMonth,
|
||||
IntervalDayTime,
|
||||
IntervalMonthDayNano
|
||||
IntervalMonthDayNano,
|
||||
DurationSecond,
|
||||
DurationMillisecond,
|
||||
DurationMicrosecond,
|
||||
DurationNanosecond
|
||||
);
|
||||
|
||||
impl ColumnDataTypeWrapper {
|
||||
@@ -257,11 +270,16 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
IntervalType::DayTime(_) => ColumnDataType::IntervalDayTime,
|
||||
IntervalType::MonthDayNano(_) => ColumnDataType::IntervalMonthDayNano,
|
||||
},
|
||||
ConcreteDataType::Duration(d) => match d {
|
||||
DurationType::Second(_) => ColumnDataType::DurationSecond,
|
||||
DurationType::Millisecond(_) => ColumnDataType::DurationMillisecond,
|
||||
DurationType::Microsecond(_) => ColumnDataType::DurationMicrosecond,
|
||||
DurationType::Nanosecond(_) => ColumnDataType::DurationNanosecond,
|
||||
},
|
||||
ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128,
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_) => {
|
||||
| ConcreteDataType::Dictionary(_) => {
|
||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
||||
}
|
||||
};
|
||||
@@ -391,6 +409,22 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
|
||||
interval_month_day_nano_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::DurationSecond => Values {
|
||||
duration_second_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::DurationMillisecond => Values {
|
||||
duration_millisecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::DurationMicrosecond => Values {
|
||||
duration_microsecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::DurationNanosecond => Values {
|
||||
duration_nanosecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Decimal128 => Values {
|
||||
decimal128_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
@@ -442,8 +476,14 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
||||
.interval_month_day_nano_values
|
||||
.push(convert_i128_to_interval(val.to_i128())),
|
||||
},
|
||||
Value::Duration(val) => match val.unit() {
|
||||
TimeUnit::Second => values.duration_second_values.push(val.value()),
|
||||
TimeUnit::Millisecond => values.duration_millisecond_values.push(val.value()),
|
||||
TimeUnit::Microsecond => values.duration_microsecond_values.push(val.value()),
|
||||
TimeUnit::Nanosecond => values.duration_nanosecond_values.push(val.value()),
|
||||
},
|
||||
Value::Decimal128(val) => values.decimal128_values.push(convert_to_pb_decimal128(val)),
|
||||
Value::List(_) | Value::Duration(_) => unreachable!(),
|
||||
Value::List(_) => unreachable!(),
|
||||
});
|
||||
column.null_mask = null_mask.into_vec();
|
||||
}
|
||||
@@ -478,8 +518,6 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
||||
Some(Expr::Alter(_)) => "ddl.alter",
|
||||
Some(Expr::DropTable(_)) => "ddl.drop_table",
|
||||
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
|
||||
Some(Expr::CreateFlow(_)) => "ddl.create_flow",
|
||||
Some(Expr::DropFlow(_)) => "ddl.drop_flow",
|
||||
None => "ddl.empty",
|
||||
}
|
||||
}
|
||||
@@ -545,6 +583,10 @@ pub fn pb_value_to_value_ref<'a>(
|
||||
let interval = Interval::from_month_day_nano(v.months, v.days, v.nanoseconds);
|
||||
ValueRef::Interval(interval)
|
||||
}
|
||||
ValueData::DurationSecondValue(v) => ValueRef::Duration(Duration::new_second(*v)),
|
||||
ValueData::DurationMillisecondValue(v) => ValueRef::Duration(Duration::new_millisecond(*v)),
|
||||
ValueData::DurationMicrosecondValue(v) => ValueRef::Duration(Duration::new_microsecond(*v)),
|
||||
ValueData::DurationNanosecondValue(v) => ValueRef::Duration(Duration::new_nanosecond(*v)),
|
||||
ValueData::Decimal128Value(v) => {
|
||||
// get precision and scale from datatype_extension
|
||||
if let Some(TypeExt::DecimalType(d)) = datatype_ext
|
||||
@@ -639,15 +681,26 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
||||
))
|
||||
}
|
||||
},
|
||||
ConcreteDataType::Duration(unit) => match unit {
|
||||
DurationType::Second(_) => Arc::new(DurationSecondVector::from_vec(
|
||||
values.duration_second_values,
|
||||
)),
|
||||
DurationType::Millisecond(_) => Arc::new(DurationMillisecondVector::from_vec(
|
||||
values.duration_millisecond_values,
|
||||
)),
|
||||
DurationType::Microsecond(_) => Arc::new(DurationMicrosecondVector::from_vec(
|
||||
values.duration_microsecond_values,
|
||||
)),
|
||||
DurationType::Nanosecond(_) => Arc::new(DurationNanosecondVector::from_vec(
|
||||
values.duration_nanosecond_values,
|
||||
)),
|
||||
},
|
||||
ConcreteDataType::Decimal128(d) => Arc::new(Decimal128Vector::from_values(
|
||||
values.decimal128_values.iter().map(|x| {
|
||||
Decimal128::from_value_precision_scale(x.hi, x.lo, d.precision(), d.scale()).into()
|
||||
}),
|
||||
)),
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_) => {
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
@@ -796,6 +849,26 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
||||
))
|
||||
})
|
||||
.collect(),
|
||||
ConcreteDataType::Duration(DurationType::Second(_)) => values
|
||||
.duration_second_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Duration(Duration::new_second(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Duration(DurationType::Millisecond(_)) => values
|
||||
.duration_millisecond_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Duration(Duration::new_millisecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Duration(DurationType::Microsecond(_)) => values
|
||||
.duration_microsecond_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Duration(Duration::new_microsecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Duration(DurationType::Nanosecond(_)) => values
|
||||
.duration_nanosecond_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Duration(Duration::new_nanosecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Decimal128(d) => values
|
||||
.decimal128_values
|
||||
.into_iter()
|
||||
@@ -808,10 +881,7 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
||||
))
|
||||
})
|
||||
.collect(),
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_) => {
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
@@ -923,10 +993,24 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
|
||||
)),
|
||||
},
|
||||
},
|
||||
Value::Duration(v) => match v.unit() {
|
||||
TimeUnit::Second => v1::Value {
|
||||
value_data: Some(ValueData::DurationSecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Millisecond => v1::Value {
|
||||
value_data: Some(ValueData::DurationMillisecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Microsecond => v1::Value {
|
||||
value_data: Some(ValueData::DurationMicrosecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Nanosecond => v1::Value {
|
||||
value_data: Some(ValueData::DurationNanosecondValue(v.value())),
|
||||
},
|
||||
},
|
||||
Value::Decimal128(v) => v1::Value {
|
||||
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||
},
|
||||
Value::List(_) | Value::Duration(_) => return None,
|
||||
Value::List(_) => return None,
|
||||
};
|
||||
|
||||
Some(proto_value)
|
||||
@@ -963,6 +1047,10 @@ pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
|
||||
ValueData::IntervalYearMonthValue(_) => ColumnDataType::IntervalYearMonth,
|
||||
ValueData::IntervalDayTimeValue(_) => ColumnDataType::IntervalDayTime,
|
||||
ValueData::IntervalMonthDayNanoValue(_) => ColumnDataType::IntervalMonthDayNano,
|
||||
ValueData::DurationSecondValue(_) => ColumnDataType::DurationSecond,
|
||||
ValueData::DurationMillisecondValue(_) => ColumnDataType::DurationMillisecond,
|
||||
ValueData::DurationMicrosecondValue(_) => ColumnDataType::DurationMicrosecond,
|
||||
ValueData::DurationNanosecondValue(_) => ColumnDataType::DurationNanosecond,
|
||||
ValueData::Decimal128Value(_) => ColumnDataType::Decimal128,
|
||||
};
|
||||
Some(value_type)
|
||||
@@ -1020,8 +1108,14 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
ValueData::IntervalMonthDayNanoValue(convert_i128_to_interval(v.to_i128()))
|
||||
}
|
||||
}),
|
||||
Value::Duration(v) => Some(match v.unit() {
|
||||
TimeUnit::Second => ValueData::DurationSecondValue(v.value()),
|
||||
TimeUnit::Millisecond => ValueData::DurationMillisecondValue(v.value()),
|
||||
TimeUnit::Microsecond => ValueData::DurationMicrosecondValue(v.value()),
|
||||
TimeUnit::Nanosecond => ValueData::DurationNanosecondValue(v.value()),
|
||||
}),
|
||||
Value::Decimal128(v) => Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||
Value::List(_) | Value::Duration(_) => unreachable!(),
|
||||
Value::List(_) => unreachable!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1031,15 +1125,16 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::types::{
|
||||
Int32Type, IntervalDayTimeType, IntervalMonthDayNanoType, IntervalYearMonthType,
|
||||
TimeMillisecondType, TimeSecondType, TimestampMillisecondType, TimestampSecondType,
|
||||
UInt32Type,
|
||||
DurationMillisecondType, DurationSecondType, Int32Type, IntervalDayTimeType,
|
||||
IntervalMonthDayNanoType, IntervalYearMonthType, TimeMillisecondType, TimeSecondType,
|
||||
TimestampMillisecondType, TimestampSecondType, UInt32Type,
|
||||
};
|
||||
use datatypes::vectors::{
|
||||
BooleanVector, IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector,
|
||||
TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector,
|
||||
TimestampMicrosecondVector, TimestampMillisecondVector, TimestampNanosecondVector,
|
||||
TimestampSecondVector, Vector,
|
||||
BooleanVector, DurationMicrosecondVector, DurationMillisecondVector,
|
||||
DurationNanosecondVector, DurationSecondVector, IntervalDayTimeVector,
|
||||
IntervalMonthDayNanoVector, IntervalYearMonthVector, TimeMicrosecondVector,
|
||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, Vector,
|
||||
};
|
||||
use paste::paste;
|
||||
|
||||
@@ -1115,6 +1210,10 @@ mod tests {
|
||||
let values = values.interval_month_day_nano_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = values_with_capacity(ColumnDataType::DurationMillisecond, 2);
|
||||
let values = values.duration_millisecond_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = values_with_capacity(ColumnDataType::Decimal128, 2);
|
||||
let values = values.decimal128_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
@@ -1202,6 +1301,10 @@ mod tests {
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
||||
ColumnDataTypeWrapper::interval_month_day_nano_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::duration_millisecond_datatype(),
|
||||
ColumnDataTypeWrapper::duration_millisecond_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::decimal128_datatype(10, 2),
|
||||
ColumnDataTypeWrapper::decimal128_datatype(10, 2).into()
|
||||
@@ -1294,6 +1397,12 @@ mod tests {
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper::duration_millisecond_datatype(),
|
||||
ConcreteDataType::duration_millisecond_datatype()
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper::decimal128_datatype(10, 2),
|
||||
@@ -1447,6 +1556,48 @@ mod tests {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_put_duration_values() {
|
||||
let mut column = Column {
|
||||
column_name: "test".to_string(),
|
||||
semantic_type: 0,
|
||||
values: Some(Values {
|
||||
..Default::default()
|
||||
}),
|
||||
null_mask: vec![],
|
||||
datatype: 0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let vector = Arc::new(DurationNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![1, 2, 3],
|
||||
column.values.as_ref().unwrap().duration_nanosecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(DurationMicrosecondVector::from_vec(vec![7, 8, 9]));
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![7, 8, 9],
|
||||
column.values.as_ref().unwrap().duration_microsecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(DurationMillisecondVector::from_vec(vec![4, 5, 6]));
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![4, 5, 6],
|
||||
column.values.as_ref().unwrap().duration_millisecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(DurationSecondVector::from_vec(vec![10, 11, 12]));
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![10, 11, 12],
|
||||
column.values.as_ref().unwrap().duration_second_values
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_put_vector() {
|
||||
use crate::v1::SemanticType;
|
||||
@@ -1548,6 +1699,39 @@ mod tests {
|
||||
assert_eq!(expect, actual);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_duration_values() {
|
||||
// second
|
||||
let actual = pb_values_to_values(
|
||||
&ConcreteDataType::Duration(DurationType::Second(DurationSecondType)),
|
||||
Values {
|
||||
duration_second_values: vec![1_i64, 2_i64, 3_i64],
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
let expect = vec![
|
||||
Value::Duration(Duration::new_second(1_i64)),
|
||||
Value::Duration(Duration::new_second(2_i64)),
|
||||
Value::Duration(Duration::new_second(3_i64)),
|
||||
];
|
||||
assert_eq!(expect, actual);
|
||||
|
||||
// millisecond
|
||||
let actual = pb_values_to_values(
|
||||
&ConcreteDataType::Duration(DurationType::Millisecond(DurationMillisecondType)),
|
||||
Values {
|
||||
duration_millisecond_values: vec![1_i64, 2_i64, 3_i64],
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
let expect = vec![
|
||||
Value::Duration(Duration::new_millisecond(1_i64)),
|
||||
Value::Duration(Duration::new_millisecond(2_i64)),
|
||||
Value::Duration(Duration::new_millisecond(3_i64)),
|
||||
];
|
||||
assert_eq!(expect, actual);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_interval_values() {
|
||||
// year_month
|
||||
|
||||
@@ -14,12 +14,12 @@ workspace = true
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
digest = "0.10"
|
||||
notify.workspace = true
|
||||
secrecy = { version = "0.8", features = ["serde", "alloc"] }
|
||||
sha1 = "0.10"
|
||||
snafu.workspace = true
|
||||
sql.workspace = true
|
||||
|
||||
@@ -14,8 +14,8 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_base::secrets::SecretString;
|
||||
use digest::Digest;
|
||||
use secrecy::SecretString;
|
||||
use sha1::Sha1;
|
||||
use snafu::{ensure, OptionExt};
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_base::secrets::ExposeSecret;
|
||||
use secrecy::ExposeSecret;
|
||||
|
||||
use crate::error::{
|
||||
AccessDeniedSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
|
||||
|
||||
@@ -21,7 +21,7 @@ use std::io;
|
||||
use std::io::BufRead;
|
||||
use std::path::Path;
|
||||
|
||||
use common_base::secrets::ExposeSecret;
|
||||
use secrecy::ExposeSecret;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
|
||||
use crate::common::{Identity, Password};
|
||||
|
||||
@@ -17,7 +17,6 @@ arrow-schema.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait = "0.1"
|
||||
common-catalog.workspace = true
|
||||
common-config.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
@@ -31,7 +30,6 @@ datafusion.workspace = true
|
||||
datatypes.workspace = true
|
||||
futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
humantime.workspace = true
|
||||
itertools.workspace = true
|
||||
lazy_static.workspace = true
|
||||
meta-client.workspace = true
|
||||
|
||||
@@ -49,12 +49,6 @@ pub enum Error {
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list nodes in cluster: {source}"))]
|
||||
ListNodes {
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
||||
CompileScriptInternal {
|
||||
location: Location,
|
||||
@@ -300,7 +294,6 @@ impl ErrorExt for Error {
|
||||
}
|
||||
|
||||
Error::ListCatalogs { source, .. }
|
||||
| Error::ListNodes { source, .. }
|
||||
| Error::ListSchemas { source, .. }
|
||||
| Error::ListTables { source, .. } => source.status_code(),
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod cluster_info;
|
||||
pub mod columns;
|
||||
pub mod key_column_usage;
|
||||
mod memory_table;
|
||||
@@ -24,7 +23,6 @@ pub mod schemata;
|
||||
mod table_constraints;
|
||||
mod table_names;
|
||||
pub mod tables;
|
||||
pub(crate) mod utils;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
@@ -49,7 +47,6 @@ pub use table_names::*;
|
||||
|
||||
use self::columns::InformationSchemaColumns;
|
||||
use crate::error::Result;
|
||||
use crate::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||
use crate::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
||||
use crate::information_schema::memory_table::{get_schema_columns, MemoryTable};
|
||||
use crate::information_schema::partitions::InformationSchemaPartitions;
|
||||
@@ -153,7 +150,6 @@ impl InformationSchemaProvider {
|
||||
fn build_tables(&mut self) {
|
||||
let mut tables = HashMap::new();
|
||||
|
||||
// SECURITY NOTE:
|
||||
// Carefully consider the tables that may expose sensitive cluster configurations,
|
||||
// authentication details, and other critical information.
|
||||
// Only put these tables under `greptime` catalog to prevent info leak.
|
||||
@@ -170,10 +166,6 @@ impl InformationSchemaProvider {
|
||||
REGION_PEERS.to_string(),
|
||||
self.build_table(REGION_PEERS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
CLUSTER_INFO.to_string(),
|
||||
self.build_table(CLUSTER_INFO).unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
|
||||
@@ -259,9 +251,6 @@ impl InformationSchemaProvider {
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
CLUSTER_INFO => Some(Arc::new(InformationSchemaClusterInfo::new(
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,317 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::time::Duration;
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID;
|
||||
use common_config::Mode;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cluster::{ClusterInfo, NodeInfo, NodeStatus};
|
||||
use common_meta::peer::Peer;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_telemetry::warn;
|
||||
use common_time::timestamp::Timestamp;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::timestamp::TimestampMillisecond;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
Int64VectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
|
||||
};
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::CLUSTER_INFO;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, ListNodesSnafu, Result};
|
||||
use crate::information_schema::{utils, InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const PEER_ID: &str = "peer_id";
|
||||
const PEER_TYPE: &str = "peer_type";
|
||||
const PEER_ADDR: &str = "peer_addr";
|
||||
const VERSION: &str = "version";
|
||||
const GIT_COMMIT: &str = "git_commit";
|
||||
const START_TIME: &str = "start_time";
|
||||
const UPTIME: &str = "uptime";
|
||||
const ACTIVE_TIME: &str = "active_time";
|
||||
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
/// The `CLUSTER_INFO` table provides information about the current topology information of the cluster.
|
||||
///
|
||||
/// - `peer_id`: the peer server id.
|
||||
/// - `peer_type`: the peer type, such as `datanode`, `frontend`, `metasrv` etc.
|
||||
/// - `peer_addr`: the peer gRPC address.
|
||||
/// - `version`: the build package version of the peer.
|
||||
/// - `git_commit`: the build git commit hash of the peer.
|
||||
/// - `start_time`: the starting time of the peer.
|
||||
/// - `uptime`: the uptime of the peer.
|
||||
/// - `active_time`: the time since the last activity of the peer.
|
||||
///
|
||||
pub(super) struct InformationSchemaClusterInfo {
|
||||
schema: SchemaRef,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
start_time_ms: u64,
|
||||
}
|
||||
|
||||
impl InformationSchemaClusterInfo {
|
||||
pub(super) fn new(catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_manager,
|
||||
start_time_ms: common_time::util::current_time_millis() as u64,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new(PEER_ID, ConcreteDataType::int64_datatype(), false),
|
||||
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(VERSION, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(GIT_COMMIT, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(
|
||||
START_TIME,
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(UPTIME, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(ACTIVE_TIME, ConcreteDataType::string_datatype(), true),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaClusterInfoBuilder {
|
||||
InformationSchemaClusterInfoBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.start_time_ms,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaClusterInfo {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
CLUSTER_INFO
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_cluster_info(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
struct InformationSchemaClusterInfoBuilder {
|
||||
schema: SchemaRef,
|
||||
start_time_ms: u64,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
peer_ids: Int64VectorBuilder,
|
||||
peer_types: StringVectorBuilder,
|
||||
peer_addrs: StringVectorBuilder,
|
||||
versions: StringVectorBuilder,
|
||||
git_commits: StringVectorBuilder,
|
||||
start_times: TimestampMillisecondVectorBuilder,
|
||||
uptimes: StringVectorBuilder,
|
||||
active_times: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaClusterInfoBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
start_time_ms: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_manager,
|
||||
peer_ids: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
peer_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
peer_addrs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
versions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
git_commits: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
start_times: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
uptimes: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
active_times: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
start_time_ms,
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.cluster_info` virtual table
|
||||
async fn make_cluster_info(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
let mode = utils::running_mode(&self.catalog_manager)?.unwrap_or(Mode::Standalone);
|
||||
|
||||
match mode {
|
||||
Mode::Standalone => {
|
||||
let build_info = common_version::build_info();
|
||||
|
||||
self.add_node_info(
|
||||
&predicates,
|
||||
NodeInfo {
|
||||
// For the standalone:
|
||||
// - id always 0
|
||||
// - empty string for peer_addr
|
||||
peer: Peer {
|
||||
id: 0,
|
||||
addr: "".to_string(),
|
||||
},
|
||||
last_activity_ts: -1,
|
||||
status: NodeStatus::Standalone,
|
||||
version: build_info.version.to_string(),
|
||||
git_commit: build_info.commit_short.to_string(),
|
||||
// Use `self.start_time_ms` instead.
|
||||
// It's not precise but enough.
|
||||
start_time_ms: self.start_time_ms,
|
||||
},
|
||||
);
|
||||
}
|
||||
Mode::Distributed => {
|
||||
if let Some(meta_client) = utils::meta_client(&self.catalog_manager)? {
|
||||
let node_infos = meta_client
|
||||
.list_nodes(None)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ListNodesSnafu)?;
|
||||
|
||||
for node_info in node_infos {
|
||||
self.add_node_info(&predicates, node_info);
|
||||
}
|
||||
} else {
|
||||
warn!("Could not find meta client in distributed mode.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_node_info(&mut self, predicates: &Predicates, node_info: NodeInfo) {
|
||||
let peer_type = node_info.status.role_name();
|
||||
|
||||
let row = [
|
||||
(PEER_ID, &Value::from(node_info.peer.id)),
|
||||
(PEER_TYPE, &Value::from(peer_type)),
|
||||
(PEER_ADDR, &Value::from(node_info.peer.addr.as_str())),
|
||||
(VERSION, &Value::from(node_info.version.as_str())),
|
||||
(GIT_COMMIT, &Value::from(node_info.git_commit.as_str())),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
|
||||
if peer_type == "FRONTEND" || peer_type == "METASRV" {
|
||||
// Always set peer_id to be -1 for frontends and metasrvs
|
||||
self.peer_ids.push(Some(-1));
|
||||
} else {
|
||||
self.peer_ids.push(Some(node_info.peer.id as i64));
|
||||
}
|
||||
|
||||
self.peer_types.push(Some(peer_type));
|
||||
self.peer_addrs.push(Some(&node_info.peer.addr));
|
||||
self.versions.push(Some(&node_info.version));
|
||||
self.git_commits.push(Some(&node_info.git_commit));
|
||||
if node_info.start_time_ms > 0 {
|
||||
self.start_times
|
||||
.push(Some(TimestampMillisecond(Timestamp::new_millisecond(
|
||||
node_info.start_time_ms as i64,
|
||||
))));
|
||||
self.uptimes.push(Some(
|
||||
Self::format_duration_since(node_info.start_time_ms).as_str(),
|
||||
));
|
||||
} else {
|
||||
self.start_times.push(None);
|
||||
self.uptimes.push(None);
|
||||
}
|
||||
|
||||
if node_info.last_activity_ts > 0 {
|
||||
self.active_times.push(Some(
|
||||
Self::format_duration_since(node_info.last_activity_ts as u64).as_str(),
|
||||
));
|
||||
} else {
|
||||
self.active_times.push(None);
|
||||
}
|
||||
}
|
||||
|
||||
fn format_duration_since(ts: u64) -> String {
|
||||
let now = common_time::util::current_time_millis() as u64;
|
||||
let duration_since = now - ts;
|
||||
humantime::format_duration(Duration::from_millis(duration_since)).to_string()
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.peer_ids.finish()),
|
||||
Arc::new(self.peer_types.finish()),
|
||||
Arc::new(self.peer_addrs.finish()),
|
||||
Arc::new(self.versions.finish()),
|
||||
Arc::new(self.git_commits.finish()),
|
||||
Arc::new(self.start_times.finish()),
|
||||
Arc::new(self.uptimes.finish()),
|
||||
Arc::new(self.active_times.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for InformationSchemaClusterInfo {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_cluster_info(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -258,7 +258,7 @@ impl InformationSchemaColumnsBuilder {
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let keys = &table.table_info().meta.primary_key_indices;
|
||||
|
||||
@@ -243,6 +243,7 @@ impl InformationSchemaPartitionsBuilder {
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let table_info_stream = catalog_manager
|
||||
.tables(&catalog_name, &schema_name)
|
||||
.await
|
||||
.try_filter_map(|t| async move {
|
||||
let table_info = t.table_info();
|
||||
if table_info.table_type == TableType::Temporary {
|
||||
|
||||
@@ -55,7 +55,7 @@ const INIT_CAPACITY: usize = 42;
|
||||
///
|
||||
/// - `region_id`: the region id
|
||||
/// - `peer_id`: the region storage datanode peer id
|
||||
/// - `peer_addr`: the region storage datanode gRPC peer address
|
||||
/// - `peer_addr`: the region storage datanode peer address
|
||||
/// - `is_leader`: whether the peer is the leader
|
||||
/// - `status`: the region status, `ALIVE` or `DOWNGRADED`.
|
||||
/// - `down_seconds`: the duration of being offline, in seconds.
|
||||
@@ -179,6 +179,7 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let table_id_stream = catalog_manager
|
||||
.tables(&catalog_name, &schema_name)
|
||||
.await
|
||||
.try_filter_map(|t| async move {
|
||||
let table_info = t.table_info();
|
||||
if table_info.table_type == TableType::Temporary {
|
||||
|
||||
@@ -28,8 +28,8 @@ use datatypes::prelude::{ConcreteDataType, MutableVector};
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{
|
||||
ConstantVector, Float64VectorBuilder, StringVectorBuilder, TimestampMillisecondVector,
|
||||
VectorRef,
|
||||
ConstantVector, Float64VectorBuilder, StringVector, StringVectorBuilder,
|
||||
TimestampMillisecondVector, VectorRef,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
use snafu::ResultExt;
|
||||
@@ -45,8 +45,8 @@ pub(super) struct InformationSchemaMetrics {
|
||||
const METRIC_NAME: &str = "metric_name";
|
||||
const METRIC_VALUE: &str = "value";
|
||||
const METRIC_LABELS: &str = "labels";
|
||||
const PEER_ADDR: &str = "peer_addr";
|
||||
const PEER_TYPE: &str = "peer_type";
|
||||
const NODE: &str = "node";
|
||||
const NODE_TYPE: &str = "node_type";
|
||||
const TIMESTAMP: &str = "timestamp";
|
||||
|
||||
/// The `information_schema.runtime_metrics` virtual table.
|
||||
@@ -63,8 +63,8 @@ impl InformationSchemaMetrics {
|
||||
ColumnSchema::new(METRIC_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(METRIC_VALUE, ConcreteDataType::float64_datatype(), false),
|
||||
ColumnSchema::new(METRIC_LABELS, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(NODE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(NODE_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(
|
||||
TIMESTAMP,
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
@@ -104,7 +104,6 @@ impl InformationTable for InformationSchemaMetrics {
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
@@ -119,8 +118,6 @@ struct InformationSchemaMetricsBuilder {
|
||||
metric_names: StringVectorBuilder,
|
||||
metric_values: Float64VectorBuilder,
|
||||
metric_labels: StringVectorBuilder,
|
||||
peer_addrs: StringVectorBuilder,
|
||||
peer_types: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaMetricsBuilder {
|
||||
@@ -130,24 +127,13 @@ impl InformationSchemaMetricsBuilder {
|
||||
metric_names: StringVectorBuilder::with_capacity(42),
|
||||
metric_values: Float64VectorBuilder::with_capacity(42),
|
||||
metric_labels: StringVectorBuilder::with_capacity(42),
|
||||
peer_addrs: StringVectorBuilder::with_capacity(42),
|
||||
peer_types: StringVectorBuilder::with_capacity(42),
|
||||
}
|
||||
}
|
||||
|
||||
fn add_metric(
|
||||
&mut self,
|
||||
metric_name: &str,
|
||||
labels: String,
|
||||
metric_value: f64,
|
||||
peer: Option<&str>,
|
||||
peer_type: &str,
|
||||
) {
|
||||
fn add_metric(&mut self, metric_name: &str, labels: String, metric_value: f64) {
|
||||
self.metric_names.push(Some(metric_name));
|
||||
self.metric_values.push(Some(metric_value));
|
||||
self.metric_labels.push(Some(&labels));
|
||||
self.peer_addrs.push(peer);
|
||||
self.peer_types.push(Some(peer_type));
|
||||
}
|
||||
|
||||
async fn make_metrics(&mut self, _request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
@@ -184,19 +170,18 @@ impl InformationSchemaMetricsBuilder {
|
||||
.join(", "),
|
||||
// Safety: always has a sample
|
||||
ts.samples[0].value,
|
||||
// The peer column is always `None` for standalone
|
||||
None,
|
||||
"STANDALONE",
|
||||
);
|
||||
}
|
||||
|
||||
// FIXME(dennis): fetching other peers metrics
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let rows_num = self.metric_names.len();
|
||||
|
||||
let unknowns = Arc::new(ConstantVector::new(
|
||||
Arc::new(StringVector::from(vec!["unknown"])),
|
||||
rows_num,
|
||||
));
|
||||
let timestamps = Arc::new(ConstantVector::new(
|
||||
Arc::new(TimestampMillisecondVector::from_slice([
|
||||
current_time_millis(),
|
||||
@@ -208,8 +193,9 @@ impl InformationSchemaMetricsBuilder {
|
||||
Arc::new(self.metric_names.finish()),
|
||||
Arc::new(self.metric_values.finish()),
|
||||
Arc::new(self.metric_labels.finish()),
|
||||
Arc::new(self.peer_addrs.finish()),
|
||||
Arc::new(self.peer_types.finish()),
|
||||
// TODO(dennis): supports node and node_type for cluster
|
||||
unknowns.clone(),
|
||||
unknowns,
|
||||
timestamps,
|
||||
];
|
||||
|
||||
@@ -257,8 +243,8 @@ mod tests {
|
||||
assert!(result_literal.contains(METRIC_NAME));
|
||||
assert!(result_literal.contains(METRIC_VALUE));
|
||||
assert!(result_literal.contains(METRIC_LABELS));
|
||||
assert!(result_literal.contains(PEER_ADDR));
|
||||
assert!(result_literal.contains(PEER_TYPE));
|
||||
assert!(result_literal.contains(NODE));
|
||||
assert!(result_literal.contains(NODE_TYPE));
|
||||
assert!(result_literal.contains(TIMESTAMP));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -177,7 +177,7 @@ impl InformationSchemaTableConstraintsBuilder {
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let keys = &table.table_info().meta.primary_key_indices;
|
||||
|
||||
@@ -40,6 +40,5 @@ pub const GLOBAL_STATUS: &str = "global_status";
|
||||
pub const SESSION_STATUS: &str = "session_status";
|
||||
pub const RUNTIME_METRICS: &str = "runtime_metrics";
|
||||
pub const PARTITIONS: &str = "partitions";
|
||||
pub const REGION_PEERS: &str = "region_peers";
|
||||
pub const REGION_PEERS: &str = "greptime_region_peers";
|
||||
pub const TABLE_CONSTRAINTS: &str = "table_constraints";
|
||||
pub const CLUSTER_INFO: &str = "cluster_info";
|
||||
|
||||
@@ -161,7 +161,7 @@ impl InformationSchemaTablesBuilder {
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let table_info = table.table_info();
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_config::Mode;
|
||||
use meta_client::client::MetaClient;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::error::{Result, UpgradeWeakCatalogManagerRefSnafu};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// Try to get the server running mode from `[CatalogManager]` weak reference.
|
||||
pub fn running_mode(catalog_manager: &Weak<dyn CatalogManager>) -> Result<Option<Mode>> {
|
||||
let catalog_manager = catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
|
||||
Ok(catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.map(|manager| manager.running_mode())
|
||||
.copied())
|
||||
}
|
||||
|
||||
/// Try to get the `[MetaClient]` from `[CatalogManager]` weak reference.
|
||||
pub fn meta_client(catalog_manager: &Weak<dyn CatalogManager>) -> Result<Option<Arc<MetaClient>>> {
|
||||
let catalog_manager = catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
|
||||
let meta_client = match catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
{
|
||||
None => None,
|
||||
Some(manager) => manager.meta_client(),
|
||||
};
|
||||
|
||||
Ok(meta_client)
|
||||
}
|
||||
@@ -22,7 +22,6 @@ use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
|
||||
};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_config::Mode;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::{CacheInvalidator, Context, MultiCacheInvalidator};
|
||||
use common_meta::instruction::CacheIdent;
|
||||
@@ -34,7 +33,6 @@ use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use futures_util::stream::BoxStream;
|
||||
use futures_util::{StreamExt, TryStreamExt};
|
||||
use meta_client::client::MetaClient;
|
||||
use moka::future::{Cache as AsyncCache, CacheBuilder};
|
||||
use moka::sync::Cache;
|
||||
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||
@@ -58,8 +56,6 @@ use crate::CatalogManager;
|
||||
/// comes from `SystemCatalog`, which is static and read-only.
|
||||
#[derive(Clone)]
|
||||
pub struct KvBackendCatalogManager {
|
||||
mode: Mode,
|
||||
meta_client: Option<Arc<MetaClient>>,
|
||||
partition_manager: PartitionRuleManagerRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
/// A sub-CatalogManager that handles system tables
|
||||
@@ -105,8 +101,6 @@ const TABLE_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
||||
|
||||
impl KvBackendCatalogManager {
|
||||
pub async fn new(
|
||||
mode: Mode,
|
||||
meta_client: Option<Arc<MetaClient>>,
|
||||
backend: KvBackendRef,
|
||||
multi_cache_invalidator: Arc<MultiCacheInvalidator>,
|
||||
) -> Arc<Self> {
|
||||
@@ -119,8 +113,6 @@ impl KvBackendCatalogManager {
|
||||
.await;
|
||||
|
||||
Arc::new_cyclic(|me| Self {
|
||||
mode,
|
||||
meta_client,
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||
system_catalog: SystemCatalog {
|
||||
@@ -135,16 +127,6 @@ impl KvBackendCatalogManager {
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the server running mode.
|
||||
pub fn running_mode(&self) -> &Mode {
|
||||
&self.mode
|
||||
}
|
||||
|
||||
/// Returns the `[MetaClient]`.
|
||||
pub fn meta_client(&self) -> Option<Arc<MetaClient>> {
|
||||
self.meta_client.clone()
|
||||
}
|
||||
|
||||
pub fn partition_manager(&self) -> PartitionRuleManagerRef {
|
||||
self.partition_manager.clone()
|
||||
}
|
||||
@@ -301,7 +283,11 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
})
|
||||
}
|
||||
|
||||
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>> {
|
||||
async fn tables<'a>(
|
||||
&'a self,
|
||||
catalog: &'a str,
|
||||
schema: &'a str,
|
||||
) -> BoxStream<'a, Result<TableRef>> {
|
||||
let sys_tables = try_stream!({
|
||||
// System tables
|
||||
let sys_table_names = self.system_catalog.table_names(schema);
|
||||
|
||||
@@ -59,7 +59,11 @@ pub trait CatalogManager: Send + Sync {
|
||||
) -> Result<Option<TableRef>>;
|
||||
|
||||
/// Returns all tables with a stream by catalog and schema.
|
||||
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>>;
|
||||
async fn tables<'a>(
|
||||
&'a self,
|
||||
catalog: &'a str,
|
||||
schema: &'a str,
|
||||
) -> BoxStream<'a, Result<TableRef>>;
|
||||
}
|
||||
|
||||
pub type CatalogManagerRef = Arc<dyn CatalogManager>;
|
||||
|
||||
@@ -117,7 +117,11 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>> {
|
||||
async fn tables<'a>(
|
||||
&'a self,
|
||||
catalog: &'a str,
|
||||
schema: &'a str,
|
||||
) -> BoxStream<'a, Result<TableRef>> {
|
||||
let catalogs = self.catalogs.read().unwrap();
|
||||
|
||||
let Some(schemas) = catalogs.get(catalog) else {
|
||||
@@ -137,11 +141,11 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
|
||||
let tables = tables.values().cloned().collect::<Vec<_>>();
|
||||
|
||||
Box::pin(try_stream!({
|
||||
return Box::pin(try_stream!({
|
||||
for table in tables {
|
||||
yield table;
|
||||
}
|
||||
}))
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -364,7 +368,9 @@ mod tests {
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let stream = catalog_list.tables(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME);
|
||||
let stream = catalog_list
|
||||
.tables(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
|
||||
.await;
|
||||
let tables = stream.try_collect::<Vec<_>>().await.unwrap();
|
||||
assert_eq!(tables.len(), 1);
|
||||
assert_eq!(
|
||||
|
||||
115
src/client/examples/logical.rs
Normal file
115
src/client/examples/logical.rs
Normal file
@@ -0,0 +1,115 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::{ColumnDataType, ColumnDef, CreateTableExpr, SemanticType, TableId};
|
||||
use client::{Client, Database};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
|
||||
use prost::Message;
|
||||
use substrait_proto::proto::plan_rel::RelType as PlanRelType;
|
||||
use substrait_proto::proto::read_rel::{NamedTable, ReadType};
|
||||
use substrait_proto::proto::rel::RelType;
|
||||
use substrait_proto::proto::{PlanRel, ReadRel, Rel};
|
||||
use tracing::{event, Level};
|
||||
|
||||
fn main() {
|
||||
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
|
||||
.unwrap();
|
||||
|
||||
run();
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn run() {
|
||||
let client = Client::with_urls(vec!["127.0.0.1:3001"]);
|
||||
|
||||
let create_table_expr = CreateTableExpr {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "test_logical_dist_exec".to_string(),
|
||||
desc: String::default(),
|
||||
column_defs: vec![
|
||||
ColumnDef {
|
||||
name: "timestamp".to_string(),
|
||||
data_type: ColumnDataType::TimestampMillisecond as i32,
|
||||
is_nullable: false,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "key".to_string(),
|
||||
data_type: ColumnDataType::Uint64 as i32,
|
||||
is_nullable: false,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "value".to_string(),
|
||||
data_type: ColumnDataType::Uint64 as i32,
|
||||
is_nullable: false,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
time_index: "timestamp".to_string(),
|
||||
primary_keys: vec!["key".to_string()],
|
||||
create_if_not_exists: false,
|
||||
table_options: Default::default(),
|
||||
table_id: Some(TableId { id: 1024 }),
|
||||
engine: MITO_ENGINE.to_string(),
|
||||
};
|
||||
|
||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
let result = db.create(create_table_expr).await.unwrap();
|
||||
event!(Level::INFO, "create table result: {:#?}", result);
|
||||
|
||||
let logical = mock_logical_plan();
|
||||
event!(Level::INFO, "plan size: {:#?}", logical.len());
|
||||
let result = db.logical_plan(logical).await.unwrap();
|
||||
|
||||
event!(Level::INFO, "result: {:#?}", result);
|
||||
}
|
||||
|
||||
fn mock_logical_plan() -> Vec<u8> {
|
||||
let catalog_name = "greptime".to_string();
|
||||
let schema_name = "public".to_string();
|
||||
let table_name = "test_logical_dist_exec".to_string();
|
||||
|
||||
let named_table = NamedTable {
|
||||
names: vec![catalog_name, schema_name, table_name],
|
||||
advanced_extension: None,
|
||||
};
|
||||
let read_type = ReadType::NamedTable(named_table);
|
||||
|
||||
let read_rel = ReadRel {
|
||||
read_type: Some(read_type),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut buf = vec![];
|
||||
let rel = Rel {
|
||||
rel_type: Some(RelType::Read(Box::new(read_rel))),
|
||||
};
|
||||
let plan_rel = PlanRel {
|
||||
rel_type: Some(PlanRelType::Rel(rel)),
|
||||
};
|
||||
plan_rel.encode(&mut buf).unwrap();
|
||||
|
||||
buf
|
||||
}
|
||||
181
src/client/examples/stream_ingest.rs
Normal file
181
src/client/examples/stream_ingest.rs
Normal file
@@ -0,0 +1,181 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::*;
|
||||
use client::{Client, Database, DEFAULT_SCHEMA_NAME};
|
||||
use derive_new::new;
|
||||
use tracing::{error, info};
|
||||
|
||||
fn main() {
|
||||
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
|
||||
.unwrap();
|
||||
|
||||
run();
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn run() {
|
||||
let greptimedb_endpoint =
|
||||
std::env::var("GREPTIMEDB_ENDPOINT").unwrap_or_else(|_| "localhost:4001".to_owned());
|
||||
|
||||
let greptimedb_dbname =
|
||||
std::env::var("GREPTIMEDB_DBNAME").unwrap_or_else(|_| DEFAULT_SCHEMA_NAME.to_owned());
|
||||
|
||||
let grpc_client = Client::with_urls(vec![&greptimedb_endpoint]);
|
||||
|
||||
let client = Database::new_with_dbname(greptimedb_dbname, grpc_client);
|
||||
|
||||
let stream_inserter = client.streaming_inserter().unwrap();
|
||||
|
||||
if let Err(e) = stream_inserter
|
||||
.insert(vec![to_insert_request(weather_records_1())])
|
||||
.await
|
||||
{
|
||||
error!("Error: {e:?}");
|
||||
}
|
||||
|
||||
if let Err(e) = stream_inserter
|
||||
.insert(vec![to_insert_request(weather_records_2())])
|
||||
.await
|
||||
{
|
||||
error!("Error: {e:?}");
|
||||
}
|
||||
|
||||
let result = stream_inserter.finish().await;
|
||||
|
||||
match result {
|
||||
Ok(rows) => {
|
||||
info!("Rows written: {rows}");
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error: {e:?}");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(new)]
|
||||
struct WeatherRecord {
|
||||
timestamp_millis: i64,
|
||||
collector: String,
|
||||
temperature: f32,
|
||||
humidity: i32,
|
||||
}
|
||||
|
||||
fn weather_records_1() -> Vec<WeatherRecord> {
|
||||
vec![
|
||||
WeatherRecord::new(1686109527000, "c1".to_owned(), 26.4, 15),
|
||||
WeatherRecord::new(1686023127000, "c1".to_owned(), 29.3, 20),
|
||||
WeatherRecord::new(1685936727000, "c1".to_owned(), 31.8, 13),
|
||||
WeatherRecord::new(1686109527000, "c2".to_owned(), 20.4, 67),
|
||||
WeatherRecord::new(1686023127000, "c2".to_owned(), 18.0, 74),
|
||||
WeatherRecord::new(1685936727000, "c2".to_owned(), 19.2, 81),
|
||||
]
|
||||
}
|
||||
|
||||
fn weather_records_2() -> Vec<WeatherRecord> {
|
||||
vec![
|
||||
WeatherRecord::new(1686109527001, "c3".to_owned(), 26.4, 15),
|
||||
WeatherRecord::new(1686023127002, "c3".to_owned(), 29.3, 20),
|
||||
WeatherRecord::new(1685936727003, "c3".to_owned(), 31.8, 13),
|
||||
WeatherRecord::new(1686109527004, "c4".to_owned(), 20.4, 67),
|
||||
WeatherRecord::new(1686023127005, "c4".to_owned(), 18.0, 74),
|
||||
WeatherRecord::new(1685936727006, "c4".to_owned(), 19.2, 81),
|
||||
]
|
||||
}
|
||||
|
||||
/// This function generates some random data and bundle them into a
|
||||
/// `InsertRequest`.
|
||||
///
|
||||
/// Data structure:
|
||||
///
|
||||
/// - `ts`: a timestamp column
|
||||
/// - `collector`: a tag column
|
||||
/// - `temperature`: a value field of f32
|
||||
/// - `humidity`: a value field of i32
|
||||
///
|
||||
fn to_insert_request(records: Vec<WeatherRecord>) -> InsertRequest {
|
||||
// convert records into columns
|
||||
let rows = records.len();
|
||||
|
||||
// transpose records into columns
|
||||
let (timestamp_millis, collectors, temp, humidity) = records.into_iter().fold(
|
||||
(
|
||||
Vec::with_capacity(rows),
|
||||
Vec::with_capacity(rows),
|
||||
Vec::with_capacity(rows),
|
||||
Vec::with_capacity(rows),
|
||||
),
|
||||
|mut acc, rec| {
|
||||
acc.0.push(rec.timestamp_millis);
|
||||
acc.1.push(rec.collector);
|
||||
acc.2.push(rec.temperature);
|
||||
acc.3.push(rec.humidity);
|
||||
|
||||
acc
|
||||
},
|
||||
);
|
||||
|
||||
let columns = vec![
|
||||
// timestamp column: `ts`
|
||||
Column {
|
||||
column_name: "ts".to_owned(),
|
||||
values: Some(column::Values {
|
||||
timestamp_millisecond_values: timestamp_millis,
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||
..Default::default()
|
||||
},
|
||||
// tag column: collectors
|
||||
Column {
|
||||
column_name: "collector".to_owned(),
|
||||
values: Some(column::Values {
|
||||
string_values: collectors.into_iter().collect(),
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
datatype: ColumnDataType::String as i32,
|
||||
..Default::default()
|
||||
},
|
||||
// field column: temperature
|
||||
Column {
|
||||
column_name: "temperature".to_owned(),
|
||||
values: Some(column::Values {
|
||||
f32_values: temp,
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
datatype: ColumnDataType::Float32 as i32,
|
||||
..Default::default()
|
||||
},
|
||||
// field column: humidity
|
||||
Column {
|
||||
column_name: "humidity".to_owned(),
|
||||
values: Some(column::Values {
|
||||
i32_values: humidity,
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
datatype: ColumnDataType::Int32 as i32,
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
|
||||
InsertRequest {
|
||||
table_name: "weather_demo".to_owned(),
|
||||
columns,
|
||||
row_count: rows as u32,
|
||||
}
|
||||
}
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
||||
use api::v1::health_check_client::HealthCheckClient;
|
||||
use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
|
||||
use api::v1::region::region_client::RegionClient as PbRegionClient;
|
||||
@@ -27,17 +28,21 @@ use tonic::transport::Channel;
|
||||
use crate::load_balance::{LoadBalance, Loadbalancer};
|
||||
use crate::{error, Result};
|
||||
|
||||
pub struct FlightClient {
|
||||
pub(crate) struct DatabaseClient {
|
||||
pub(crate) inner: GreptimeDatabaseClient<Channel>,
|
||||
}
|
||||
|
||||
pub(crate) struct FlightClient {
|
||||
addr: String,
|
||||
client: FlightServiceClient<Channel>,
|
||||
}
|
||||
|
||||
impl FlightClient {
|
||||
pub fn addr(&self) -> &str {
|
||||
pub(crate) fn addr(&self) -> &str {
|
||||
&self.addr
|
||||
}
|
||||
|
||||
pub fn mut_inner(&mut self) -> &mut FlightServiceClient<Channel> {
|
||||
pub(crate) fn mut_inner(&mut self) -> &mut FlightServiceClient<Channel> {
|
||||
&mut self.client
|
||||
}
|
||||
}
|
||||
@@ -133,7 +138,7 @@ impl Client {
|
||||
Ok((addr, channel))
|
||||
}
|
||||
|
||||
pub fn max_grpc_recv_message_size(&self) -> usize {
|
||||
fn max_grpc_recv_message_size(&self) -> usize {
|
||||
self.inner
|
||||
.channel_manager
|
||||
.config()
|
||||
@@ -141,7 +146,7 @@ impl Client {
|
||||
.as_bytes() as usize
|
||||
}
|
||||
|
||||
pub fn max_grpc_send_message_size(&self) -> usize {
|
||||
fn max_grpc_send_message_size(&self) -> usize {
|
||||
self.inner
|
||||
.channel_manager
|
||||
.config()
|
||||
@@ -149,7 +154,7 @@ impl Client {
|
||||
.as_bytes() as usize
|
||||
}
|
||||
|
||||
pub fn make_flight_client(&self) -> Result<FlightClient> {
|
||||
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
|
||||
let (addr, channel) = self.find_channel()?;
|
||||
Ok(FlightClient {
|
||||
addr,
|
||||
@@ -159,6 +164,15 @@ impl Client {
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn make_database_client(&self) -> Result<DatabaseClient> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
Ok(DatabaseClient {
|
||||
inner: GreptimeDatabaseClient::new(channel)
|
||||
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
||||
.max_encoding_message_size(self.max_grpc_send_message_size()),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn raw_region_client(&self) -> Result<PbRegionClient<Channel>> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
Ok(PbRegionClient::new(channel)
|
||||
|
||||
@@ -17,7 +17,7 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
use common_meta::node_manager::{DatanodeRef, FlownodeRef, NodeManager};
|
||||
use common_meta::datanode_manager::{Datanode, DatanodeManager};
|
||||
use common_meta::peer::Peer;
|
||||
use moka::future::{Cache, CacheBuilder};
|
||||
|
||||
@@ -44,17 +44,12 @@ impl Debug for DatanodeClients {
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl NodeManager for DatanodeClients {
|
||||
async fn datanode(&self, datanode: &Peer) -> DatanodeRef {
|
||||
impl DatanodeManager for DatanodeClients {
|
||||
async fn datanode(&self, datanode: &Peer) -> Arc<dyn Datanode> {
|
||||
let client = self.get_client(datanode).await;
|
||||
|
||||
Arc::new(RegionRequester::new(client))
|
||||
}
|
||||
|
||||
async fn flownode(&self, _node: &Peer) -> FlownodeRef {
|
||||
// TODO(weny): Support it.
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl DatanodeClients {
|
||||
|
||||
@@ -14,28 +14,28 @@
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{
|
||||
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, GreptimeRequest, InsertRequests,
|
||||
QueryRequest, RequestHeader,
|
||||
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, DeleteRequests, DropTableExpr,
|
||||
GreptimeRequest, InsertRequests, PromRangeQuery, QueryRequest, RequestHeader,
|
||||
RowInsertRequests, TruncateTableExpr,
|
||||
};
|
||||
use arrow_flight::Ticket;
|
||||
use async_stream::stream;
|
||||
use client::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
|
||||
use client::{from_grpc_response, Client, Result};
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
use common_recordbatch::error::ExternalSnafu;
|
||||
use common_recordbatch::RecordBatchStreamWrapper;
|
||||
use common_telemetry::error;
|
||||
use common_telemetry::logging;
|
||||
use common_telemetry::tracing_context::W3cTrace;
|
||||
use futures_util::StreamExt;
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use tonic::transport::Channel;
|
||||
|
||||
use crate::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
|
||||
use crate::{error, from_grpc_response, metrics, Client, Result, StreamInserter};
|
||||
|
||||
pub const DEFAULT_LOOKBACK_STRING: &str = "5m";
|
||||
|
||||
@@ -57,19 +57,6 @@ pub struct Database {
|
||||
ctx: FlightContext,
|
||||
}
|
||||
|
||||
pub struct DatabaseClient {
|
||||
pub inner: GreptimeDatabaseClient<Channel>,
|
||||
}
|
||||
|
||||
fn make_database_client(client: &Client) -> Result<DatabaseClient> {
|
||||
let (_, channel) = client.find_channel()?;
|
||||
Ok(DatabaseClient {
|
||||
inner: GreptimeDatabaseClient::new(channel)
|
||||
.max_decoding_message_size(client.max_grpc_recv_message_size())
|
||||
.max_encoding_message_size(client.max_grpc_send_message_size()),
|
||||
})
|
||||
}
|
||||
|
||||
impl Database {
|
||||
/// Create database service client using catalog and schema
|
||||
pub fn new(catalog: impl Into<String>, schema: impl Into<String>, client: Client) -> Self {
|
||||
@@ -101,14 +88,34 @@ impl Database {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn catalog(&self) -> &String {
|
||||
&self.catalog
|
||||
}
|
||||
|
||||
pub fn set_catalog(&mut self, catalog: impl Into<String>) {
|
||||
self.catalog = catalog.into();
|
||||
}
|
||||
|
||||
pub fn schema(&self) -> &String {
|
||||
&self.schema
|
||||
}
|
||||
|
||||
pub fn set_schema(&mut self, schema: impl Into<String>) {
|
||||
self.schema = schema.into();
|
||||
}
|
||||
|
||||
pub fn dbname(&self) -> &String {
|
||||
&self.dbname
|
||||
}
|
||||
|
||||
pub fn set_dbname(&mut self, dbname: impl Into<String>) {
|
||||
self.dbname = dbname.into();
|
||||
}
|
||||
|
||||
pub fn timezone(&self) -> &String {
|
||||
&self.timezone
|
||||
}
|
||||
|
||||
pub fn set_timezone(&mut self, timezone: impl Into<String>) {
|
||||
self.timezone = timezone.into();
|
||||
}
|
||||
@@ -120,11 +127,42 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn insert(&self, requests: InsertRequests) -> Result<u32> {
|
||||
let _timer = metrics::METRIC_GRPC_INSERT.start_timer();
|
||||
self.handle(Request::Inserts(requests)).await
|
||||
}
|
||||
|
||||
pub async fn row_insert(&self, requests: RowInsertRequests) -> Result<u32> {
|
||||
let _timer = metrics::METRIC_GRPC_INSERT.start_timer();
|
||||
self.handle(Request::RowInserts(requests)).await
|
||||
}
|
||||
|
||||
pub fn streaming_inserter(&self) -> Result<StreamInserter> {
|
||||
self.streaming_inserter_with_channel_size(65536)
|
||||
}
|
||||
|
||||
pub fn streaming_inserter_with_channel_size(
|
||||
&self,
|
||||
channel_size: usize,
|
||||
) -> Result<StreamInserter> {
|
||||
let client = self.client.make_database_client()?.inner;
|
||||
|
||||
let stream_inserter = StreamInserter::new(
|
||||
client,
|
||||
self.dbname().to_string(),
|
||||
self.ctx.auth_header.clone(),
|
||||
channel_size,
|
||||
);
|
||||
|
||||
Ok(stream_inserter)
|
||||
}
|
||||
|
||||
pub async fn delete(&self, request: DeleteRequests) -> Result<u32> {
|
||||
let _timer = metrics::METRIC_GRPC_DELETE.start_timer();
|
||||
self.handle(Request::Deletes(request)).await
|
||||
}
|
||||
|
||||
async fn handle(&self, request: Request) -> Result<u32> {
|
||||
let mut client = make_database_client(&self.client)?.inner;
|
||||
let mut client = self.client.make_database_client()?.inner;
|
||||
let request = self.to_rpc_request(request);
|
||||
let response = client.handle(request).await?.into_inner();
|
||||
from_grpc_response(response)
|
||||
@@ -150,13 +188,43 @@ impl Database {
|
||||
where
|
||||
S: AsRef<str>,
|
||||
{
|
||||
let _timer = metrics::METRIC_GRPC_SQL.start_timer();
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql.as_ref().to_string())),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<Output> {
|
||||
let _timer = metrics::METRIC_GRPC_LOGICAL_PLAN.start_timer();
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::LogicalPlan(logical_plan)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn prom_range_query(
|
||||
&self,
|
||||
promql: &str,
|
||||
start: &str,
|
||||
end: &str,
|
||||
step: &str,
|
||||
) -> Result<Output> {
|
||||
let _timer = metrics::METRIC_GRPC_PROMQL_RANGE_QUERY.start_timer();
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::PromRangeQuery(PromRangeQuery {
|
||||
query: promql.to_string(),
|
||||
start: start.to_string(),
|
||||
end: end.to_string(),
|
||||
step: step.to_string(),
|
||||
lookback: DEFAULT_LOOKBACK_STRING.to_string(),
|
||||
})),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
||||
let _timer = metrics::METRIC_GRPC_CREATE_TABLE.start_timer();
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(expr)),
|
||||
}))
|
||||
@@ -164,13 +232,32 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
|
||||
let _timer = metrics::METRIC_GRPC_ALTER.start_timer();
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::Alter(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<Output> {
|
||||
let _timer = metrics::METRIC_GRPC_DROP_TABLE.start_timer();
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::DropTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn truncate_table(&self, expr: TruncateTableExpr) -> Result<Output> {
|
||||
let _timer = metrics::METRIC_GRPC_TRUNCATE_TABLE.start_timer();
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::TruncateTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
async fn do_get(&self, request: Request) -> Result<Output> {
|
||||
// FIXME(paomian): should be added some labels for metrics
|
||||
let _timer = metrics::METRIC_GRPC_DO_GET.start_timer();
|
||||
let request = self.to_rpc_request(request);
|
||||
let request = Ticket {
|
||||
ticket: request.encode_to_vec().into(),
|
||||
@@ -180,7 +267,7 @@ impl Database {
|
||||
|
||||
let response = client.mut_inner().do_get(request).await.map_err(|e| {
|
||||
let tonic_code = e.code();
|
||||
let e: Error = e.into();
|
||||
let e: error::Error = e.into();
|
||||
let code = e.status_code();
|
||||
let msg = e.to_string();
|
||||
let error = Error::FlightGet {
|
||||
@@ -188,7 +275,7 @@ impl Database {
|
||||
addr: client.addr().to_string(),
|
||||
source: BoxedError::new(ServerSnafu { code, msg }.build()),
|
||||
};
|
||||
error!(
|
||||
logging::error!(
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {:?}",
|
||||
client.addr(),
|
||||
tonic_code,
|
||||
@@ -263,7 +350,7 @@ impl Database {
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
struct FlightContext {
|
||||
pub struct FlightContext {
|
||||
auth_header: Option<AuthHeader>,
|
||||
}
|
||||
|
||||
@@ -271,14 +358,8 @@ struct FlightContext {
|
||||
mod tests {
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::{AuthHeader, Basic};
|
||||
use clap::Parser;
|
||||
use client::Client;
|
||||
use cmd::error::Result as CmdResult;
|
||||
use cmd::options::{GlobalOptions, Options};
|
||||
use cmd::{cli, standalone, App};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
|
||||
use super::{Database, FlightContext};
|
||||
use crate::database::FlightContext;
|
||||
|
||||
#[test]
|
||||
fn test_flight_ctx() {
|
||||
@@ -301,72 +382,4 @@ mod tests {
|
||||
})
|
||||
))
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
|
||||
let output_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
let standalone = standalone::Command::parse_from([
|
||||
"standalone",
|
||||
"start",
|
||||
"--data-home",
|
||||
&*output_dir.path().to_string_lossy(),
|
||||
]);
|
||||
let Options::Standalone(standalone_opts) =
|
||||
standalone.load_options(&GlobalOptions::default())?
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
let mut instance = standalone.build(*standalone_opts).await?;
|
||||
instance.start().await?;
|
||||
|
||||
let client = Client::with_urls(["127.0.0.1:4001"]);
|
||||
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
database
|
||||
.sql(r#"CREATE DATABASE "cli.export.create_table";"#)
|
||||
.await
|
||||
.unwrap();
|
||||
database
|
||||
.sql(
|
||||
r#"CREATE TABLE "cli.export.create_table"."a.b.c"(
|
||||
ts TIMESTAMP,
|
||||
TIME INDEX (ts)
|
||||
) engine=mito;
|
||||
"#,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let output_dir = tempfile::tempdir().unwrap();
|
||||
let cli = cli::Command::parse_from([
|
||||
"cli",
|
||||
"export",
|
||||
"--addr",
|
||||
"127.0.0.1:4000",
|
||||
"--output-dir",
|
||||
&*output_dir.path().to_string_lossy(),
|
||||
"--target",
|
||||
"create-table",
|
||||
]);
|
||||
let mut cli_app = cli.build().await?;
|
||||
cli_app.start().await?;
|
||||
|
||||
instance.stop().await?;
|
||||
|
||||
let output_file = output_dir
|
||||
.path()
|
||||
.join("greptime-cli.export.create_table.sql");
|
||||
let res = std::fs::read_to_string(output_file).unwrap();
|
||||
let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
|
||||
"ts" TIMESTAMP(3) NOT NULL,
|
||||
TIME INDEX ("ts")
|
||||
)
|
||||
|
||||
ENGINE=mito
|
||||
;
|
||||
"#;
|
||||
assert_eq!(res.trim(), expect.trim());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -14,10 +14,12 @@
|
||||
|
||||
mod client;
|
||||
pub mod client_manager;
|
||||
mod database;
|
||||
pub mod error;
|
||||
pub mod load_balance;
|
||||
mod metrics;
|
||||
pub mod region;
|
||||
mod stream_insert;
|
||||
|
||||
pub use api;
|
||||
use api::v1::greptime_response::Response;
|
||||
@@ -29,7 +31,9 @@ pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
|
||||
use snafu::OptionExt;
|
||||
|
||||
pub use self::client::Client;
|
||||
pub use self::database::Database;
|
||||
pub use self::error::{Error, Result};
|
||||
pub use self::stream_insert::StreamInserter;
|
||||
use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};
|
||||
|
||||
pub fn from_grpc_response(response: GreptimeResponse) -> Result<u32> {
|
||||
|
||||
@@ -24,8 +24,8 @@ use async_trait::async_trait;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||
use common_meta::datanode_manager::Datanode;
|
||||
use common_meta::error::{self as meta_error, Result as MetaResult};
|
||||
use common_meta::node_manager::Datanode;
|
||||
use common_recordbatch::error::ExternalSnafu;
|
||||
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||
use common_telemetry::error;
|
||||
|
||||
118
src/client/src/stream_insert.rs
Normal file
118
src/client/src/stream_insert.rs
Normal file
@@ -0,0 +1,118 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::{
|
||||
AuthHeader, GreptimeRequest, GreptimeResponse, InsertRequest, InsertRequests, RequestHeader,
|
||||
RowInsertRequest, RowInsertRequests,
|
||||
};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use tonic::transport::Channel;
|
||||
use tonic::{Response, Status};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::from_grpc_response;
|
||||
|
||||
/// A structure that provides some methods for streaming data insert.
|
||||
///
|
||||
/// [`StreamInserter`] cannot be constructed via the `StreamInserter::new` method.
|
||||
/// You can use the following way to obtain [`StreamInserter`].
|
||||
///
|
||||
/// ```ignore
|
||||
/// let grpc_client = Client::with_urls(vec!["127.0.0.1:4002"]);
|
||||
/// let client = Database::new_with_dbname("db_name", grpc_client);
|
||||
/// let stream_inserter = client.streaming_inserter().unwrap();
|
||||
/// ```
|
||||
///
|
||||
/// If you want to see a concrete usage example, please see
|
||||
/// [stream_inserter.rs](https://github.com/GreptimeTeam/greptimedb/blob/main/src/client/examples/stream_ingest.rs).
|
||||
pub struct StreamInserter {
|
||||
sender: mpsc::Sender<GreptimeRequest>,
|
||||
|
||||
auth_header: Option<AuthHeader>,
|
||||
|
||||
dbname: String,
|
||||
|
||||
join: JoinHandle<std::result::Result<Response<GreptimeResponse>, Status>>,
|
||||
}
|
||||
|
||||
impl StreamInserter {
|
||||
pub(crate) fn new(
|
||||
mut client: GreptimeDatabaseClient<Channel>,
|
||||
dbname: String,
|
||||
auth_header: Option<AuthHeader>,
|
||||
channel_size: usize,
|
||||
) -> StreamInserter {
|
||||
let (send, recv) = tokio::sync::mpsc::channel(channel_size);
|
||||
|
||||
let join: JoinHandle<std::result::Result<Response<GreptimeResponse>, Status>> =
|
||||
tokio::spawn(async move {
|
||||
let recv_stream = ReceiverStream::new(recv);
|
||||
client.handle_requests(recv_stream).await
|
||||
});
|
||||
|
||||
StreamInserter {
|
||||
sender: send,
|
||||
auth_header,
|
||||
dbname,
|
||||
join,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn insert(&self, requests: Vec<InsertRequest>) -> Result<()> {
|
||||
let inserts = InsertRequests { inserts: requests };
|
||||
let request = self.to_rpc_request(Request::Inserts(inserts));
|
||||
|
||||
self.sender.send(request).await.map_err(|e| {
|
||||
error::ClientStreamingSnafu {
|
||||
err_msg: e.to_string(),
|
||||
}
|
||||
.build()
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn row_insert(&self, requests: Vec<RowInsertRequest>) -> Result<()> {
|
||||
let inserts = RowInsertRequests { inserts: requests };
|
||||
let request = self.to_rpc_request(Request::RowInserts(inserts));
|
||||
|
||||
self.sender.send(request).await.map_err(|e| {
|
||||
error::ClientStreamingSnafu {
|
||||
err_msg: e.to_string(),
|
||||
}
|
||||
.build()
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn finish(self) -> Result<u32> {
|
||||
drop(self.sender);
|
||||
|
||||
let response = self.join.await.unwrap()?;
|
||||
let response = response.into_inner();
|
||||
from_grpc_response(response)
|
||||
}
|
||||
|
||||
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
|
||||
GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
authorization: self.auth_header.clone(),
|
||||
dbname: self.dbname.clone(),
|
||||
..Default::default()
|
||||
}),
|
||||
request: Some(request),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -18,7 +18,6 @@ workspace = true
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
auth.workspace = true
|
||||
base64.workspace = true
|
||||
catalog.workspace = true
|
||||
chrono.workspace = true
|
||||
clap.workspace = true
|
||||
@@ -37,7 +36,6 @@ common-telemetry = { workspace = true, features = [
|
||||
"deadlock_detection",
|
||||
] }
|
||||
common-time.workspace = true
|
||||
common-version.workspace = true
|
||||
common-wal.workspace = true
|
||||
config = "0.13"
|
||||
datanode.workspace = true
|
||||
@@ -45,7 +43,6 @@ datatypes.workspace = true
|
||||
either = "1.8"
|
||||
etcd-client.workspace = true
|
||||
file-engine.workspace = true
|
||||
flow.workspace = true
|
||||
frontend.workspace = true
|
||||
futures.workspace = true
|
||||
human-panic = "1.2.2"
|
||||
@@ -60,7 +57,6 @@ prost.workspace = true
|
||||
query.workspace = true
|
||||
rand.workspace = true
|
||||
regex.workspace = true
|
||||
reqwest.workspace = true
|
||||
rustyline = "10.1"
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
|
||||
@@ -16,42 +16,23 @@
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use clap::{Parser, Subcommand};
|
||||
use clap::{FromArgMatches, Parser, Subcommand};
|
||||
use cmd::error::Result;
|
||||
use cmd::options::{GlobalOptions, Options};
|
||||
use cmd::{cli, datanode, frontend, log_versions, metasrv, standalone, start_app, App};
|
||||
use common_version::{short_version, version};
|
||||
use cmd::options::{CliOptions, Options};
|
||||
use cmd::{
|
||||
cli, datanode, frontend, greptimedb_cli, log_versions, metasrv, standalone, start_app, App,
|
||||
};
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "greptime", author, version, long_version = version!(), about)]
|
||||
#[command(propagate_version = true)]
|
||||
pub(crate) struct Command {
|
||||
#[clap(subcommand)]
|
||||
pub(crate) subcmd: SubCommand,
|
||||
|
||||
#[clap(flatten)]
|
||||
pub(crate) global_options: GlobalOptions,
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
enum SubCommand {
|
||||
/// Start datanode service.
|
||||
#[clap(name = "datanode")]
|
||||
Datanode(datanode::Command),
|
||||
|
||||
/// Start frontend service.
|
||||
#[clap(name = "frontend")]
|
||||
Frontend(frontend::Command),
|
||||
|
||||
/// Start metasrv service.
|
||||
#[clap(name = "metasrv")]
|
||||
Metasrv(metasrv::Command),
|
||||
|
||||
/// Run greptimedb as a standalone service.
|
||||
#[clap(name = "standalone")]
|
||||
Standalone(standalone::Command),
|
||||
|
||||
/// Execute the cli tools for greptimedb.
|
||||
#[clap(name = "cli")]
|
||||
Cli(cli::Command),
|
||||
}
|
||||
@@ -85,13 +66,13 @@ impl SubCommand {
|
||||
Ok(app)
|
||||
}
|
||||
|
||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Datanode(cmd) => cmd.load_options(global_options),
|
||||
SubCommand::Frontend(cmd) => cmd.load_options(global_options),
|
||||
SubCommand::Metasrv(cmd) => cmd.load_options(global_options),
|
||||
SubCommand::Standalone(cmd) => cmd.load_options(global_options),
|
||||
SubCommand::Cli(cmd) => cmd.load_options(global_options),
|
||||
SubCommand::Datanode(cmd) => cmd.load_options(cli_options),
|
||||
SubCommand::Frontend(cmd) => cmd.load_options(cli_options),
|
||||
SubCommand::Metasrv(cmd) => cmd.load_options(cli_options),
|
||||
SubCommand::Standalone(cmd) => cmd.load_options(cli_options),
|
||||
SubCommand::Cli(cmd) => cmd.load_options(cli_options),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -114,32 +95,6 @@ static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
setup_human_panic();
|
||||
start(Command::parse()).await
|
||||
}
|
||||
|
||||
async fn start(cli: Command) -> Result<()> {
|
||||
let subcmd = cli.subcmd;
|
||||
|
||||
let app_name = subcmd.to_string();
|
||||
|
||||
let opts = subcmd.load_options(&cli.global_options)?;
|
||||
|
||||
let _guard = common_telemetry::init_global_logging(
|
||||
&app_name,
|
||||
opts.logging_options(),
|
||||
cli.global_options.tracing_options(),
|
||||
opts.node_id(),
|
||||
);
|
||||
|
||||
log_versions(version!(), short_version!());
|
||||
|
||||
let app = subcmd.build(opts).await?;
|
||||
|
||||
start_app(app).await
|
||||
}
|
||||
|
||||
fn setup_human_panic() {
|
||||
let metadata = human_panic::Metadata {
|
||||
version: env!("CARGO_PKG_VERSION").into(),
|
||||
name: "GreptimeDB".into(),
|
||||
@@ -149,4 +104,34 @@ fn setup_human_panic() {
|
||||
human_panic::setup_panic!(metadata);
|
||||
|
||||
common_telemetry::set_panic_hook();
|
||||
|
||||
let cli = greptimedb_cli();
|
||||
|
||||
let cli = SubCommand::augment_subcommands(cli);
|
||||
|
||||
let args = cli.get_matches();
|
||||
|
||||
let subcmd = match SubCommand::from_arg_matches(&args) {
|
||||
Ok(subcmd) => subcmd,
|
||||
Err(e) => e.exit(),
|
||||
};
|
||||
|
||||
let app_name = subcmd.to_string();
|
||||
|
||||
let cli_options = CliOptions::new(&args);
|
||||
|
||||
let opts = subcmd.load_options(&cli_options)?;
|
||||
|
||||
let _guard = common_telemetry::init_global_logging(
|
||||
&app_name,
|
||||
opts.logging_options(),
|
||||
cli_options.tracing_options(),
|
||||
opts.node_id(),
|
||||
);
|
||||
|
||||
log_versions();
|
||||
|
||||
let app = subcmd.build(opts).await?;
|
||||
|
||||
start_app(app).await
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ mod helper;
|
||||
|
||||
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
||||
#[allow(unused)]
|
||||
// mod repl;
|
||||
mod repl;
|
||||
// TODO(weny): Removes it
|
||||
#[allow(deprecated)]
|
||||
mod upgrade;
|
||||
@@ -31,12 +31,12 @@ use async_trait::async_trait;
|
||||
use bench::BenchTableMetadataCommand;
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
// pub use repl::Repl;
|
||||
pub use repl::Repl;
|
||||
use upgrade::UpgradeCommand;
|
||||
|
||||
use self::export::ExportCommand;
|
||||
use crate::error::Result;
|
||||
use crate::options::{GlobalOptions, Options};
|
||||
use crate::options::{CliOptions, Options};
|
||||
use crate::App;
|
||||
|
||||
#[async_trait]
|
||||
@@ -80,14 +80,14 @@ impl Command {
|
||||
self.cmd.build().await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
let mut logging_opts = LoggingOptions::default();
|
||||
|
||||
if let Some(dir) = &global_options.log_dir {
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
logging_opts.dir.clone_from(dir);
|
||||
}
|
||||
|
||||
logging_opts.level.clone_from(&global_options.log_level);
|
||||
logging_opts.level.clone_from(&cli_options.log_level);
|
||||
|
||||
Ok(Options::Cli(Box::new(logging_opts)))
|
||||
}
|
||||
|
||||
@@ -16,14 +16,14 @@ use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use base64::engine::general_purpose;
|
||||
use base64::Engine;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use client::DEFAULT_SCHEMA_NAME;
|
||||
use client::api::v1::auth_header::AuthScheme;
|
||||
use client::api::v1::Basic;
|
||||
use client::{Client, Database, OutputData, DEFAULT_SCHEMA_NAME};
|
||||
use common_recordbatch::util::collect;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use serde_json::Value;
|
||||
use servers::http::greptime_result_v1::GreptimedbV1Response;
|
||||
use servers::http::GreptimeQueryOutput;
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::vectors::{StringVector, Vector};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::fs::File;
|
||||
use tokio::io::{AsyncWriteExt, BufWriter};
|
||||
@@ -31,8 +31,9 @@ use tokio::sync::Semaphore;
|
||||
|
||||
use crate::cli::{Instance, Tool};
|
||||
use crate::error::{
|
||||
EmptyResultSnafu, Error, FileIoSnafu, HttpQuerySqlSnafu, InvalidDatabaseNameSnafu, Result,
|
||||
SerdeJsonSnafu,
|
||||
CollectRecordBatchesSnafu, ConnectServerSnafu, EmptyResultSnafu, Error, FileIoSnafu,
|
||||
IllegalConfigSnafu, InvalidDatabaseNameSnafu, NotDataFromOutputSnafu, RequestDatabaseSnafu,
|
||||
Result,
|
||||
};
|
||||
|
||||
type TableReference = (String, String, String);
|
||||
@@ -79,75 +80,51 @@ pub struct ExportCommand {
|
||||
|
||||
impl ExportCommand {
|
||||
pub async fn build(&self) -> Result<Instance> {
|
||||
let client = Client::with_urls([self.addr.clone()]);
|
||||
client
|
||||
.health_check()
|
||||
.await
|
||||
.with_context(|_| ConnectServerSnafu {
|
||||
addr: self.addr.clone(),
|
||||
})?;
|
||||
let (catalog, schema) = split_database(&self.database)?;
|
||||
let mut database_client = Database::new(
|
||||
catalog.clone(),
|
||||
schema.clone().unwrap_or(DEFAULT_SCHEMA_NAME.to_string()),
|
||||
client,
|
||||
);
|
||||
|
||||
let auth_header = if let Some(basic) = &self.auth_basic {
|
||||
let encoded = general_purpose::STANDARD.encode(basic);
|
||||
Some(format!("basic {}", encoded))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if let Some(auth_basic) = &self.auth_basic {
|
||||
let (username, password) = auth_basic.split_once(':').context(IllegalConfigSnafu {
|
||||
msg: "auth_basic cannot be split by ':'".to_string(),
|
||||
})?;
|
||||
database_client.set_auth(AuthScheme::Basic(Basic {
|
||||
username: username.to_string(),
|
||||
password: password.to_string(),
|
||||
}));
|
||||
}
|
||||
|
||||
Ok(Instance::new(Box::new(Export {
|
||||
addr: self.addr.clone(),
|
||||
client: database_client,
|
||||
catalog,
|
||||
schema,
|
||||
output_dir: self.output_dir.clone(),
|
||||
parallelism: self.export_jobs,
|
||||
target: self.target.clone(),
|
||||
auth_header,
|
||||
})))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Export {
|
||||
addr: String,
|
||||
client: Database,
|
||||
catalog: String,
|
||||
schema: Option<String>,
|
||||
output_dir: String,
|
||||
parallelism: usize,
|
||||
target: ExportTarget,
|
||||
auth_header: Option<String>,
|
||||
}
|
||||
|
||||
impl Export {
|
||||
/// Execute one single sql query.
|
||||
async fn sql(&self, sql: &str) -> Result<Option<Vec<Vec<Value>>>> {
|
||||
let url = format!(
|
||||
"http://{}/v1/sql?db={}-{}&sql={}",
|
||||
self.addr,
|
||||
self.catalog,
|
||||
self.schema.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME),
|
||||
sql
|
||||
);
|
||||
|
||||
let mut request = reqwest::Client::new()
|
||||
.get(&url)
|
||||
.header("Content-Type", "application/x-www-form-urlencoded");
|
||||
if let Some(ref auth) = self.auth_header {
|
||||
request = request.header("Authorization", auth);
|
||||
}
|
||||
|
||||
let response = request.send().await.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: format!("bad url: {}", url),
|
||||
})?;
|
||||
let response = response
|
||||
.error_for_status()
|
||||
.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: format!("query failed: {}", sql),
|
||||
})?;
|
||||
|
||||
let text = response.text().await.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: "cannot get response text".to_string(),
|
||||
})?;
|
||||
|
||||
let body = serde_json::from_str::<GreptimedbV1Response>(&text).context(SerdeJsonSnafu)?;
|
||||
Ok(body.output().first().and_then(|output| match output {
|
||||
GreptimeQueryOutput::Records(records) => Some(records.rows().clone()),
|
||||
GreptimeQueryOutput::AffectedRows(_) => None,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Iterate over all db names.
|
||||
///
|
||||
/// Newbie: `db_name` is catalog + schema.
|
||||
@@ -155,19 +132,35 @@ impl Export {
|
||||
if let Some(schema) = &self.schema {
|
||||
Ok(vec![(self.catalog.clone(), schema.clone())])
|
||||
} else {
|
||||
let result = self.sql("show databases").await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
let mut client = self.client.clone();
|
||||
client.set_catalog(self.catalog.clone());
|
||||
let result =
|
||||
client
|
||||
.sql("show databases")
|
||||
.await
|
||||
.with_context(|_| RequestDatabaseSnafu {
|
||||
sql: "show databases".to_string(),
|
||||
})?;
|
||||
let OutputData::Stream(stream) = result.data else {
|
||||
NotDataFromOutputSnafu.fail()?
|
||||
};
|
||||
let mut result = Vec::with_capacity(records.len());
|
||||
for value in records {
|
||||
let serde_json::Value::String(schema) = &value[0] else {
|
||||
unreachable!()
|
||||
};
|
||||
let record_batch = collect(stream)
|
||||
.await
|
||||
.context(CollectRecordBatchesSnafu)?
|
||||
.pop()
|
||||
.context(EmptyResultSnafu)?;
|
||||
let schemas = record_batch
|
||||
.column(0)
|
||||
.as_any()
|
||||
.downcast_ref::<StringVector>()
|
||||
.unwrap();
|
||||
let mut result = Vec::with_capacity(schemas.len());
|
||||
for i in 0..schemas.len() {
|
||||
let schema = schemas.get_data(i).unwrap().to_owned();
|
||||
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
|
||||
continue;
|
||||
}
|
||||
result.push((self.catalog.clone(), schema.clone()));
|
||||
result.push((self.catalog.clone(), schema));
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
@@ -179,30 +172,54 @@ impl Export {
|
||||
// TODO: SQL injection hurts
|
||||
let sql = format!(
|
||||
"select table_catalog, table_schema, table_name from \
|
||||
information_schema.tables where table_type = \'BASE TABLE\' \
|
||||
information_schema.tables where table_type = \'BASE TABLE\'\
|
||||
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'",
|
||||
);
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
let mut client = self.client.clone();
|
||||
client.set_catalog(catalog);
|
||||
client.set_schema(schema);
|
||||
let result = client
|
||||
.sql(&sql)
|
||||
.await
|
||||
.with_context(|_| RequestDatabaseSnafu { sql })?;
|
||||
let OutputData::Stream(stream) = result.data else {
|
||||
NotDataFromOutputSnafu.fail()?
|
||||
};
|
||||
let Some(record_batch) = collect(stream)
|
||||
.await
|
||||
.context(CollectRecordBatchesSnafu)?
|
||||
.pop()
|
||||
else {
|
||||
return Ok(vec![]);
|
||||
};
|
||||
|
||||
debug!("Fetched table list: {:?}", records);
|
||||
debug!("Fetched table list: {}", record_batch.pretty_print());
|
||||
|
||||
if records.is_empty() {
|
||||
if record_batch.num_rows() == 0 {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
let mut result = Vec::with_capacity(records.len());
|
||||
for value in records {
|
||||
let mut t = Vec::with_capacity(3);
|
||||
for v in &value {
|
||||
let serde_json::Value::String(value) = v else {
|
||||
unreachable!()
|
||||
};
|
||||
t.push(value);
|
||||
}
|
||||
result.push((t[0].clone(), t[1].clone(), t[2].clone()));
|
||||
let mut result = Vec::with_capacity(record_batch.num_rows());
|
||||
let catalog_column = record_batch
|
||||
.column(0)
|
||||
.as_any()
|
||||
.downcast_ref::<StringVector>()
|
||||
.unwrap();
|
||||
let schema_column = record_batch
|
||||
.column(1)
|
||||
.as_any()
|
||||
.downcast_ref::<StringVector>()
|
||||
.unwrap();
|
||||
let table_column = record_batch
|
||||
.column(2)
|
||||
.as_any()
|
||||
.downcast_ref::<StringVector>()
|
||||
.unwrap();
|
||||
for i in 0..record_batch.num_rows() {
|
||||
let catalog = catalog_column.get_data(i).unwrap().to_owned();
|
||||
let schema = schema_column.get_data(i).unwrap().to_owned();
|
||||
let table = table_column.get_data(i).unwrap().to_owned();
|
||||
result.push((catalog, schema, table));
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
@@ -213,15 +230,30 @@ impl Export {
|
||||
r#"show create table "{}"."{}"."{}""#,
|
||||
catalog, schema, table
|
||||
);
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
};
|
||||
let serde_json::Value::String(create_table) = &records[0][1] else {
|
||||
unreachable!()
|
||||
let mut client = self.client.clone();
|
||||
client.set_catalog(catalog);
|
||||
client.set_schema(schema);
|
||||
let result = client
|
||||
.sql(&sql)
|
||||
.await
|
||||
.with_context(|_| RequestDatabaseSnafu { sql })?;
|
||||
let OutputData::Stream(stream) = result.data else {
|
||||
NotDataFromOutputSnafu.fail()?
|
||||
};
|
||||
let record_batch = collect(stream)
|
||||
.await
|
||||
.context(CollectRecordBatchesSnafu)?
|
||||
.pop()
|
||||
.context(EmptyResultSnafu)?;
|
||||
let create_table = record_batch
|
||||
.column(1)
|
||||
.as_any()
|
||||
.downcast_ref::<StringVector>()
|
||||
.unwrap()
|
||||
.get_data(0)
|
||||
.unwrap();
|
||||
|
||||
Ok(format!("{};\n", create_table))
|
||||
Ok(format!("{create_table};\n"))
|
||||
}
|
||||
|
||||
async fn export_create_table(&self) -> Result<()> {
|
||||
@@ -289,13 +321,20 @@ impl Export {
|
||||
.context(FileIoSnafu)?;
|
||||
let output_dir = Path::new(&self.output_dir).join(format!("{catalog}-{schema}/"));
|
||||
|
||||
let mut client = self.client.clone();
|
||||
client.set_catalog(catalog.clone());
|
||||
client.set_schema(schema.clone());
|
||||
|
||||
// copy database to
|
||||
let sql = format!(
|
||||
"copy database {} to '{}' with (format='parquet');",
|
||||
schema,
|
||||
output_dir.to_str().unwrap()
|
||||
);
|
||||
self.sql(&sql).await?;
|
||||
client
|
||||
.sql(sql.clone())
|
||||
.await
|
||||
.context(RequestDatabaseSnafu { sql })?;
|
||||
info!("finished exporting {catalog}.{schema} data");
|
||||
|
||||
// export copy from sql
|
||||
@@ -381,3 +420,84 @@ fn split_database(database: &str) -> Result<(String, Option<String>)> {
|
||||
Ok((catalog.to_string(), Some(schema.to_string())))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use clap::Parser;
|
||||
use client::{Client, Database};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::options::{CliOptions, Options};
|
||||
use crate::{cli, standalone, App};
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_export_create_table_with_quoted_names() -> Result<()> {
|
||||
let output_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
let standalone = standalone::Command::parse_from([
|
||||
"standalone",
|
||||
"start",
|
||||
"--data-home",
|
||||
&*output_dir.path().to_string_lossy(),
|
||||
]);
|
||||
let Options::Standalone(standalone_opts) =
|
||||
standalone.load_options(&CliOptions::default())?
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
let mut instance = standalone.build(*standalone_opts).await?;
|
||||
instance.start().await?;
|
||||
|
||||
let client = Client::with_urls(["127.0.0.1:4001"]);
|
||||
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
database
|
||||
.sql(r#"CREATE DATABASE "cli.export.create_table";"#)
|
||||
.await
|
||||
.unwrap();
|
||||
database
|
||||
.sql(
|
||||
r#"CREATE TABLE "cli.export.create_table"."a.b.c"(
|
||||
ts TIMESTAMP,
|
||||
TIME INDEX (ts)
|
||||
) engine=mito;
|
||||
"#,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let output_dir = tempfile::tempdir().unwrap();
|
||||
let cli = cli::Command::parse_from([
|
||||
"cli",
|
||||
"export",
|
||||
"--addr",
|
||||
"127.0.0.1:4001",
|
||||
"--output-dir",
|
||||
&*output_dir.path().to_string_lossy(),
|
||||
"--target",
|
||||
"create-table",
|
||||
]);
|
||||
let mut cli_app = cli.build().await?;
|
||||
cli_app.start().await?;
|
||||
|
||||
instance.stop().await?;
|
||||
|
||||
let output_file = output_dir
|
||||
.path()
|
||||
.join("greptime-cli.export.create_table.sql");
|
||||
let res = std::fs::read_to_string(output_file).unwrap();
|
||||
let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
|
||||
"ts" TIMESTAMP(3) NOT NULL,
|
||||
TIME INDEX ("ts")
|
||||
)
|
||||
|
||||
ENGINE=mito
|
||||
WITH(
|
||||
regions = 1
|
||||
);
|
||||
"#;
|
||||
assert_eq!(res.trim(), expect.trim());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,14 +19,13 @@ use std::time::Instant;
|
||||
use catalog::kvbackend::{
|
||||
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager,
|
||||
};
|
||||
use client::{Client, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_base::Plugins;
|
||||
use common_config::Mode;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_meta::cache_invalidator::MultiCacheInvalidator;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_telemetry::debug;
|
||||
use common_telemetry::logging;
|
||||
use either::Either;
|
||||
use meta_client::client::MetaClientBuilder;
|
||||
use query::datafusion::DatafusionQueryEngine;
|
||||
@@ -78,7 +77,7 @@ impl Repl {
|
||||
|
||||
let history_file = history_file();
|
||||
if let Err(e) = rl.load_history(&history_file) {
|
||||
debug!(
|
||||
logging::debug!(
|
||||
"failed to load history file on {}, error: {e}",
|
||||
history_file.display()
|
||||
);
|
||||
@@ -161,10 +160,7 @@ impl Repl {
|
||||
let start = Instant::now();
|
||||
|
||||
let output = if let Some(query_engine) = &self.query_engine {
|
||||
let query_ctx = Arc::new(QueryContext::with(
|
||||
self.database.catalog(),
|
||||
self.database.schema(),
|
||||
));
|
||||
let query_ctx = QueryContext::with(self.database.catalog(), self.database.schema());
|
||||
|
||||
let stmt = QueryLanguageParser::parse_sql(&sql, &query_ctx)
|
||||
.with_context(|_| ParseSqlSnafu { sql: sql.clone() })?;
|
||||
@@ -228,7 +224,7 @@ impl Drop for Repl {
|
||||
if self.rl.helper().is_some() {
|
||||
let history_file = history_file();
|
||||
if let Err(e) = self.rl.save_history(&history_file) {
|
||||
debug!(
|
||||
logging::debug!(
|
||||
"failed to save history file on {}, error: {e}",
|
||||
history_file.display()
|
||||
);
|
||||
@@ -260,13 +256,8 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::with_invalidators(vec![
|
||||
cached_meta_backend.clone(),
|
||||
]));
|
||||
let catalog_list = KvBackendCatalogManager::new(
|
||||
Mode::Distributed,
|
||||
Some(meta_client.clone()),
|
||||
cached_meta_backend.clone(),
|
||||
multi_cache_invalidator,
|
||||
)
|
||||
.await;
|
||||
let catalog_list =
|
||||
KvBackendCatalogManager::new(cached_meta_backend.clone(), multi_cache_invalidator).await;
|
||||
let plugins: Plugins = Default::default();
|
||||
let state = Arc::new(QueryEngineState::new(
|
||||
catalog_list,
|
||||
|
||||
@@ -27,7 +27,7 @@ use common_meta::key::table_info::{TableInfoKey, TableInfoValue};
|
||||
use common_meta::key::table_name::{TableNameKey, TableNameValue};
|
||||
use common_meta::key::table_region::{TableRegionKey, TableRegionValue};
|
||||
use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue};
|
||||
use common_meta::key::{MetaKey, RegionDistribution, TableMetaValue};
|
||||
use common_meta::key::{RegionDistribution, TableMetaKey, TableMetaValue};
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::range_stream::PaginationStream;
|
||||
@@ -137,7 +137,7 @@ impl MigrateTableMetadata {
|
||||
while let Some((key, value)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
||||
let table_id = self.migrate_table_route_key(value).await?;
|
||||
keys.push(key);
|
||||
keys.push(TableRegionKey::new(table_id).to_bytes())
|
||||
keys.push(TableRegionKey::new(table_id).as_raw_key())
|
||||
}
|
||||
|
||||
info!("Total migrated TableRouteKeys: {}", keys.len() / 2);
|
||||
@@ -165,7 +165,7 @@ impl MigrateTableMetadata {
|
||||
self.etcd_store
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(new_key.to_bytes())
|
||||
.with_key(new_key.as_raw_key())
|
||||
.with_value(new_table_value.try_as_raw_value().unwrap()),
|
||||
)
|
||||
.await
|
||||
@@ -192,10 +192,10 @@ impl MigrateTableMetadata {
|
||||
let key = v1SchemaKey::parse(key_str)
|
||||
.unwrap_or_else(|e| panic!("schema key is corrupted: {e}, key: {key_str}"));
|
||||
|
||||
Ok(key)
|
||||
Ok((key, ()))
|
||||
}),
|
||||
);
|
||||
while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
||||
while let Some((key, _)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
||||
let _ = self.migrate_schema_key(&key).await;
|
||||
keys.push(key.to_string().as_bytes().to_vec());
|
||||
}
|
||||
@@ -217,7 +217,7 @@ impl MigrateTableMetadata {
|
||||
self.etcd_store
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(new_key.to_bytes())
|
||||
.with_key(new_key.as_raw_key())
|
||||
.with_value(schema_name_value.try_as_raw_value().unwrap()),
|
||||
)
|
||||
.await
|
||||
@@ -244,10 +244,10 @@ impl MigrateTableMetadata {
|
||||
let key = v1CatalogKey::parse(key_str)
|
||||
.unwrap_or_else(|e| panic!("catalog key is corrupted: {e}, key: {key_str}"));
|
||||
|
||||
Ok(key)
|
||||
Ok((key, ()))
|
||||
}),
|
||||
);
|
||||
while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
||||
while let Some((key, _)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
||||
let _ = self.migrate_catalog_key(&key).await;
|
||||
keys.push(key.to_string().as_bytes().to_vec());
|
||||
}
|
||||
@@ -269,7 +269,7 @@ impl MigrateTableMetadata {
|
||||
self.etcd_store
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(new_key.to_bytes())
|
||||
.with_key(new_key.as_raw_key())
|
||||
.with_value(catalog_name_value.try_as_raw_value().unwrap()),
|
||||
)
|
||||
.await
|
||||
@@ -346,11 +346,11 @@ impl MigrateTableMetadata {
|
||||
.batch_put(
|
||||
BatchPutRequest::new()
|
||||
.add_kv(
|
||||
table_info_key.to_bytes(),
|
||||
table_info_key.as_raw_key(),
|
||||
table_info_value.try_as_raw_value().unwrap(),
|
||||
)
|
||||
.add_kv(
|
||||
table_region_key.to_bytes(),
|
||||
table_region_key.as_raw_key(),
|
||||
table_region_value.try_as_raw_value().unwrap(),
|
||||
),
|
||||
)
|
||||
@@ -378,7 +378,7 @@ impl MigrateTableMetadata {
|
||||
self.etcd_store
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(table_name_key.to_bytes())
|
||||
.with_key(table_name_key.as_raw_key())
|
||||
.with_value(table_name_value.try_as_raw_value().unwrap()),
|
||||
)
|
||||
.await
|
||||
@@ -425,7 +425,7 @@ impl MigrateTableMetadata {
|
||||
} else {
|
||||
let mut req = BatchPutRequest::new();
|
||||
for (key, value) in datanode_table_kvs {
|
||||
req = req.add_kv(key.to_bytes(), value.try_as_raw_value().unwrap());
|
||||
req = req.add_kv(key.as_raw_key(), value.try_as_raw_value().unwrap());
|
||||
}
|
||||
self.etcd_store.batch_put(req).await.unwrap();
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ use std::time::Duration;
|
||||
use async_trait::async_trait;
|
||||
use catalog::kvbackend::MetaKvBackend;
|
||||
use clap::Parser;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::{info, logging};
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::DatanodeOptions;
|
||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
@@ -28,7 +28,7 @@ use servers::Mode;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
||||
use crate::options::{GlobalOptions, Options};
|
||||
use crate::options::{CliOptions, Options};
|
||||
use crate::App;
|
||||
|
||||
pub struct Instance {
|
||||
@@ -82,8 +82,8 @@ impl Command {
|
||||
self.subcmd.build(opts).await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(global_options)
|
||||
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(cli_options)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,9 +99,9 @@ impl SubCommand {
|
||||
}
|
||||
}
|
||||
|
||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
||||
SubCommand::Start(cmd) => cmd.load_options(cli_options),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -131,19 +131,19 @@ struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
let mut opts: DatanodeOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
DatanodeOptions::env_list_keys(),
|
||||
)?;
|
||||
|
||||
if let Some(dir) = &global_options.log_dir {
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
|
||||
if global_options.log_level.is_some() {
|
||||
opts.logging.level.clone_from(&global_options.log_level);
|
||||
if cli_options.log_level.is_some() {
|
||||
opts.logging.level.clone_from(&cli_options.log_level);
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
@@ -210,8 +210,8 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
info!("Datanode start command: {:#?}", self);
|
||||
info!("Datanode options: {:#?}", opts);
|
||||
logging::info!("Datanode start command: {:#?}", self);
|
||||
logging::info!("Datanode options: {:#?}", opts);
|
||||
|
||||
let node_id = opts
|
||||
.node_id
|
||||
@@ -259,7 +259,7 @@ mod tests {
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
use crate::options::{GlobalOptions, ENV_VAR_SEP};
|
||||
use crate::options::{CliOptions, ENV_VAR_SEP};
|
||||
|
||||
#[test]
|
||||
fn test_read_from_config_file() {
|
||||
@@ -315,8 +315,7 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Datanode(options) = cmd.load_options(&GlobalOptions::default()).unwrap()
|
||||
else {
|
||||
let Options::Datanode(options) = cmd.load_options(&CliOptions::default()).unwrap() else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
@@ -378,7 +377,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_try_from_cmd() {
|
||||
if let Options::Datanode(opt) = StartCommand::default()
|
||||
.load_options(&GlobalOptions::default())
|
||||
.load_options(&CliOptions::default())
|
||||
.unwrap()
|
||||
{
|
||||
assert_eq!(Mode::Standalone, opt.mode)
|
||||
@@ -389,7 +388,7 @@ mod tests {
|
||||
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(&GlobalOptions::default())
|
||||
.load_options(&CliOptions::default())
|
||||
.unwrap()
|
||||
{
|
||||
assert_eq!(Mode::Distributed, opt.mode)
|
||||
@@ -399,7 +398,7 @@ mod tests {
|
||||
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(&GlobalOptions::default())
|
||||
.load_options(&CliOptions::default())
|
||||
.is_err());
|
||||
|
||||
// Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value
|
||||
@@ -407,7 +406,7 @@ mod tests {
|
||||
node_id: Some(42),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(&GlobalOptions::default())
|
||||
.load_options(&CliOptions::default())
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
@@ -416,7 +415,7 @@ mod tests {
|
||||
let cmd = StartCommand::default();
|
||||
|
||||
let options = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
.load_options(&CliOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
@@ -505,8 +504,7 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Datanode(opts) =
|
||||
command.load_options(&GlobalOptions::default()).unwrap()
|
||||
let Options::Datanode(opts) = command.load_options(&CliOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
@@ -139,6 +139,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to request database, sql: {sql}"))]
|
||||
RequestDatabase {
|
||||
sql: String,
|
||||
location: Location,
|
||||
source: client::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to collect RecordBatches"))]
|
||||
CollectRecordBatches {
|
||||
location: Location,
|
||||
@@ -211,14 +218,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to run http request: {reason}"))]
|
||||
HttpQuerySql {
|
||||
reason: String,
|
||||
#[snafu(source)]
|
||||
error: reqwest::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Expect data from output, but got another thing"))]
|
||||
NotDataFromOutput { location: Location },
|
||||
|
||||
@@ -291,9 +290,8 @@ impl ErrorExt for Error {
|
||||
Error::StartProcedureManager { source, .. }
|
||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||
Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } | Error::HttpQuerySql { .. } => {
|
||||
StatusCode::Internal
|
||||
}
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
||||
Error::RequestDatabase { source, .. } => source.status_code(),
|
||||
Error::CollectRecordBatches { source, .. }
|
||||
| Error::PrettyPrintRecordBatches { source, .. } => source.status_code(),
|
||||
Error::StartMetaClient { source, .. } => source.status_code(),
|
||||
|
||||
@@ -22,7 +22,7 @@ use client::client_manager::DatanodeClients;
|
||||
use common_meta::cache_invalidator::MultiCacheInvalidator;
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging;
|
||||
use common_time::timezone::set_default_timezone;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
|
||||
@@ -36,7 +36,7 @@ use servers::Mode;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{self, InitTimezoneSnafu, MissingConfigSnafu, Result, StartFrontendSnafu};
|
||||
use crate::options::{GlobalOptions, Options};
|
||||
use crate::options::{CliOptions, Options};
|
||||
use crate::App;
|
||||
|
||||
pub struct Instance {
|
||||
@@ -90,8 +90,8 @@ impl Command {
|
||||
self.subcmd.build(opts).await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(global_options)
|
||||
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(cli_options)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,9 +107,9 @@ impl SubCommand {
|
||||
}
|
||||
}
|
||||
|
||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
||||
SubCommand::Start(cmd) => cmd.load_options(cli_options),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -126,6 +126,8 @@ pub struct StartCommand {
|
||||
mysql_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
postgres_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
opentsdb_addr: Option<String>,
|
||||
#[clap(short, long)]
|
||||
config_file: Option<String>,
|
||||
#[clap(short, long)]
|
||||
@@ -147,19 +149,19 @@ pub struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
let mut opts: FrontendOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
FrontendOptions::env_list_keys(),
|
||||
)?;
|
||||
|
||||
if let Some(dir) = &global_options.log_dir {
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
|
||||
if global_options.log_level.is_some() {
|
||||
opts.logging.level.clone_from(&global_options.log_level);
|
||||
if cli_options.log_level.is_some() {
|
||||
opts.logging.level.clone_from(&cli_options.log_level);
|
||||
}
|
||||
|
||||
let tls_opts = TlsOption::new(
|
||||
@@ -196,6 +198,11 @@ impl StartCommand {
|
||||
opts.postgres.tls = tls_opts;
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.opentsdb_addr {
|
||||
opts.opentsdb.enable = true;
|
||||
opts.opentsdb.addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(enable) = self.influxdb_enable {
|
||||
opts.influxdb.enable = enable;
|
||||
}
|
||||
@@ -219,8 +226,8 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
info!("Frontend start command: {:#?}", self);
|
||||
info!("Frontend options: {:#?}", opts);
|
||||
logging::info!("Frontend start command: {:#?}", self);
|
||||
logging::info!("Frontend options: {:#?}", opts);
|
||||
|
||||
set_default_timezone(opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?;
|
||||
|
||||
@@ -246,8 +253,6 @@ impl StartCommand {
|
||||
cached_meta_backend.clone(),
|
||||
]));
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
opts.mode,
|
||||
Some(meta_client.clone()),
|
||||
cached_meta_backend.clone(),
|
||||
multi_cache_invalidator.clone(),
|
||||
)
|
||||
@@ -261,7 +266,6 @@ impl StartCommand {
|
||||
]);
|
||||
|
||||
let heartbeat_task = HeartbeatTask::new(
|
||||
&opts,
|
||||
meta_client.clone(),
|
||||
opts.heartbeat.clone(),
|
||||
Arc::new(executor),
|
||||
@@ -304,7 +308,7 @@ mod tests {
|
||||
use servers::http::HttpOptions;
|
||||
|
||||
use super::*;
|
||||
use crate::options::{GlobalOptions, ENV_VAR_SEP};
|
||||
use crate::options::{CliOptions, ENV_VAR_SEP};
|
||||
|
||||
#[test]
|
||||
fn test_try_from_start_command() {
|
||||
@@ -312,13 +316,13 @@ mod tests {
|
||||
http_addr: Some("127.0.0.1:1234".to_string()),
|
||||
mysql_addr: Some("127.0.0.1:5678".to_string()),
|
||||
postgres_addr: Some("127.0.0.1:5432".to_string()),
|
||||
opentsdb_addr: Some("127.0.0.1:4321".to_string()),
|
||||
influxdb_enable: Some(false),
|
||||
disable_dashboard: Some(false),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Frontend(opts) = command.load_options(&GlobalOptions::default()).unwrap()
|
||||
else {
|
||||
let Options::Frontend(opts) = command.load_options(&CliOptions::default()).unwrap() else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
@@ -326,6 +330,7 @@ mod tests {
|
||||
assert_eq!(ReadableSize::mb(64), opts.http.body_limit);
|
||||
assert_eq!(opts.mysql.addr, "127.0.0.1:5678");
|
||||
assert_eq!(opts.postgres.addr, "127.0.0.1:5432");
|
||||
assert_eq!(opts.opentsdb.addr, "127.0.0.1:4321");
|
||||
|
||||
let default_opts = FrontendOptions::default();
|
||||
|
||||
@@ -338,6 +343,10 @@ mod tests {
|
||||
default_opts.postgres.runtime_size
|
||||
);
|
||||
assert!(opts.opentsdb.enable);
|
||||
assert_eq!(
|
||||
opts.opentsdb.runtime_size,
|
||||
default_opts.opentsdb.runtime_size
|
||||
);
|
||||
|
||||
assert!(!opts.influxdb.enable);
|
||||
}
|
||||
@@ -353,9 +362,6 @@ mod tests {
|
||||
timeout = "30s"
|
||||
body_limit = "2GB"
|
||||
|
||||
[opentsdb]
|
||||
enable = false
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
@@ -368,7 +374,7 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Frontend(fe_opts) = command.load_options(&GlobalOptions::default()).unwrap()
|
||||
let Options::Frontend(fe_opts) = command.load_options(&CliOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
@@ -380,7 +386,6 @@ mod tests {
|
||||
|
||||
assert_eq!("debug", fe_opts.logging.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), fe_opts.logging.dir);
|
||||
assert!(!fe_opts.opentsdb.enable);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -415,7 +420,7 @@ mod tests {
|
||||
};
|
||||
|
||||
let options = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
.load_options(&CliOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
@@ -501,7 +506,7 @@ mod tests {
|
||||
};
|
||||
|
||||
let Options::Frontend(fe_opts) =
|
||||
command.load_options(&GlobalOptions::default()).unwrap()
|
||||
command.load_options(&CliOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#![feature(assert_matches, let_chains)]
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::arg;
|
||||
use common_telemetry::{error, info};
|
||||
|
||||
pub mod cli;
|
||||
@@ -63,21 +64,62 @@ pub async fn start_app(mut app: Box<dyn App>) -> error::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Log the versions of the application, and the arguments passed to the cli.
|
||||
/// `version_string` should be the same as the output of cli "--version";
|
||||
/// and the `app_version` is the short version of the codes, often consist of git branch and commit.
|
||||
pub fn log_versions(version_string: &str, app_version: &str) {
|
||||
pub fn log_versions() {
|
||||
// Report app version as gauge.
|
||||
APP_VERSION
|
||||
.with_label_values(&[env!("CARGO_PKG_VERSION"), app_version])
|
||||
.with_label_values(&[short_version(), full_version()])
|
||||
.inc();
|
||||
|
||||
// Log version and argument flags.
|
||||
info!("GreptimeDB version: {}", version_string);
|
||||
info!(
|
||||
"short_version: {}, full_version: {}",
|
||||
short_version(),
|
||||
full_version()
|
||||
);
|
||||
|
||||
log_env_flags();
|
||||
}
|
||||
|
||||
pub fn greptimedb_cli() -> clap::Command {
|
||||
let cmd = clap::Command::new("greptimedb")
|
||||
.version(print_version())
|
||||
.subcommand_required(true);
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
let cmd = cmd.arg(arg!(--"tokio-console-addr"[TOKIO_CONSOLE_ADDR]));
|
||||
|
||||
cmd.args([arg!(--"log-dir"[LOG_DIR]), arg!(--"log-level"[LOG_LEVEL])])
|
||||
}
|
||||
|
||||
fn print_version() -> &'static str {
|
||||
concat!(
|
||||
"\nbranch: ",
|
||||
env!("GIT_BRANCH"),
|
||||
"\ncommit: ",
|
||||
env!("GIT_COMMIT"),
|
||||
"\ndirty: ",
|
||||
env!("GIT_DIRTY"),
|
||||
"\nversion: ",
|
||||
env!("CARGO_PKG_VERSION")
|
||||
)
|
||||
}
|
||||
|
||||
fn short_version() -> &'static str {
|
||||
env!("CARGO_PKG_VERSION")
|
||||
}
|
||||
|
||||
// {app_name}-{branch_name}-{commit_short}
|
||||
// The branch name (tag) of a release build should already contain the short
|
||||
// version so the full version doesn't concat the short version explicitly.
|
||||
fn full_version() -> &'static str {
|
||||
concat!(
|
||||
"greptimedb-",
|
||||
env!("GIT_BRANCH"),
|
||||
"-",
|
||||
env!("GIT_COMMIT_SHORT")
|
||||
)
|
||||
}
|
||||
|
||||
fn log_env_flags() {
|
||||
info!("command line arguments");
|
||||
for argument in std::env::args() {
|
||||
|
||||
@@ -16,13 +16,13 @@ use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging;
|
||||
use meta_srv::bootstrap::MetasrvInstance;
|
||||
use meta_srv::metasrv::MetasrvOptions;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result, StartMetaServerSnafu};
|
||||
use crate::options::{GlobalOptions, Options};
|
||||
use crate::options::{CliOptions, Options};
|
||||
use crate::App;
|
||||
|
||||
pub struct Instance {
|
||||
@@ -68,8 +68,8 @@ impl Command {
|
||||
self.subcmd.build(opts).await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(global_options)
|
||||
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(cli_options)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,9 +85,9 @@ impl SubCommand {
|
||||
}
|
||||
}
|
||||
|
||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
||||
SubCommand::Start(cmd) => cmd.load_options(cli_options),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -126,19 +126,19 @@ struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
let mut opts: MetasrvOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
MetasrvOptions::env_list_keys(),
|
||||
)?;
|
||||
|
||||
if let Some(dir) = &global_options.log_dir {
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
|
||||
if global_options.log_level.is_some() {
|
||||
opts.logging.level.clone_from(&global_options.log_level);
|
||||
if cli_options.log_level.is_some() {
|
||||
opts.logging.level.clone_from(&cli_options.log_level);
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.bind_addr {
|
||||
@@ -198,8 +198,8 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartMetaServerSnafu)?;
|
||||
|
||||
info!("Metasrv start command: {:#?}", self);
|
||||
info!("Metasrv options: {:#?}", opts);
|
||||
logging::info!("Metasrv start command: {:#?}", self);
|
||||
logging::info!("Metasrv options: {:#?}", opts);
|
||||
|
||||
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins.clone(), None)
|
||||
.await
|
||||
@@ -235,7 +235,7 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Metasrv(options) = cmd.load_options(&GlobalOptions::default()).unwrap() else {
|
||||
let Options::Metasrv(options) = cmd.load_options(&CliOptions::default()).unwrap() else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
@@ -270,7 +270,7 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Metasrv(options) = cmd.load_options(&GlobalOptions::default()).unwrap() else {
|
||||
let Options::Metasrv(options) = cmd.load_options(&CliOptions::default()).unwrap() else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
@@ -315,7 +315,7 @@ mod tests {
|
||||
};
|
||||
|
||||
let options = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
.load_options(&CliOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
@@ -379,8 +379,7 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Metasrv(opts) =
|
||||
command.load_options(&GlobalOptions::default()).unwrap()
|
||||
let Options::Metasrv(opts) = command.load_options(&CliOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use clap::Parser;
|
||||
use clap::ArgMatches;
|
||||
use common_config::KvBackendConfig;
|
||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||
use common_wal::config::MetasrvWalConfig;
|
||||
@@ -61,23 +61,26 @@ pub enum Options {
|
||||
Cli(Box<LoggingOptions>),
|
||||
}
|
||||
|
||||
#[derive(Parser, Default, Debug, Clone)]
|
||||
pub struct GlobalOptions {
|
||||
#[clap(long, value_name = "LOG_DIR")]
|
||||
#[arg(global = true)]
|
||||
#[derive(Default)]
|
||||
pub struct CliOptions {
|
||||
pub log_dir: Option<String>,
|
||||
|
||||
#[clap(long, value_name = "LOG_LEVEL")]
|
||||
#[arg(global = true)]
|
||||
pub log_level: Option<String>,
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
#[clap(long, value_name = "TOKIO_CONSOLE_ADDR")]
|
||||
#[arg(global = true)]
|
||||
pub tokio_console_addr: Option<String>,
|
||||
}
|
||||
|
||||
impl GlobalOptions {
|
||||
impl CliOptions {
|
||||
pub fn new(args: &ArgMatches) -> Self {
|
||||
Self {
|
||||
log_dir: args.get_one::<String>("log-dir").cloned(),
|
||||
log_level: args.get_one::<String>("log-level").cloned(),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
tokio_console_addr: args.get_one::<String>("tokio-console-addr").cloned(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn tracing_options(&self) -> TracingOptions {
|
||||
TracingOptions {
|
||||
#[cfg(feature = "tokio-console")]
|
||||
|
||||
@@ -18,17 +18,15 @@ use std::{fs, path};
|
||||
use async_trait::async_trait;
|
||||
use catalog::kvbackend::KvBackendCatalogManager;
|
||||
use clap::Parser;
|
||||
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
||||
use common_catalog::consts::MIN_USER_TABLE_ID;
|
||||
use common_config::{metadata_store_dir, KvBackendConfig};
|
||||
use common_meta::cache_invalidator::{CacheInvalidatorRef, MultiCacheInvalidator};
|
||||
use common_meta::ddl::flow_meta::{FlowMetadataAllocator, FlowMetadataAllocatorRef};
|
||||
use common_meta::datanode_manager::DatanodeManagerRef;
|
||||
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
|
||||
use common_meta::ddl::{DdlContext, ProcedureExecutorRef};
|
||||
use common_meta::ddl::ProcedureExecutorRef;
|
||||
use common_meta::ddl_manager::DdlManager;
|
||||
use common_meta::key::flow::{FlowMetadataManager, FlowMetadataManagerRef};
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::node_manager::NodeManagerRef;
|
||||
use common_meta::region_keeper::MemoryRegionKeeper;
|
||||
use common_meta::sequence::SequenceBuilder;
|
||||
use common_meta::wal_options_allocator::{WalOptionsAllocator, WalOptionsAllocatorRef};
|
||||
@@ -40,7 +38,6 @@ use common_wal::config::StandaloneWalConfig;
|
||||
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
|
||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||
use flow::FlownodeBuilder;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
||||
@@ -48,7 +45,6 @@ use frontend::server::Services;
|
||||
use frontend::service_config::{
|
||||
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
|
||||
};
|
||||
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
|
||||
use mito2::config::MitoConfig;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::export_metrics::ExportMetricsOption;
|
||||
@@ -62,7 +58,7 @@ use crate::error::{
|
||||
Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
|
||||
StartProcedureManagerSnafu, StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
|
||||
};
|
||||
use crate::options::{GlobalOptions, MixOptions, Options};
|
||||
use crate::options::{CliOptions, MixOptions, Options};
|
||||
use crate::App;
|
||||
|
||||
#[derive(Parser)]
|
||||
@@ -76,8 +72,8 @@ impl Command {
|
||||
self.subcmd.build(opts).await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(global_options)
|
||||
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(cli_options)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,9 +89,9 @@ impl SubCommand {
|
||||
}
|
||||
}
|
||||
|
||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
||||
SubCommand::Start(cmd) => cmd.load_options(cli_options),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -257,6 +253,8 @@ pub struct StartCommand {
|
||||
mysql_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
postgres_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
opentsdb_addr: Option<String>,
|
||||
#[clap(short, long)]
|
||||
influxdb_enable: bool,
|
||||
#[clap(short, long)]
|
||||
@@ -277,29 +275,29 @@ pub struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
let opts: StandaloneOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
StandaloneOptions::env_list_keys(),
|
||||
)?;
|
||||
|
||||
self.convert_options(global_options, opts)
|
||||
self.convert_options(cli_options, opts)
|
||||
}
|
||||
|
||||
pub fn convert_options(
|
||||
&self,
|
||||
global_options: &GlobalOptions,
|
||||
cli_options: &CliOptions,
|
||||
mut opts: StandaloneOptions,
|
||||
) -> Result<Options> {
|
||||
opts.mode = Mode::Standalone;
|
||||
|
||||
if let Some(dir) = &global_options.log_dir {
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
|
||||
if global_options.log_level.is_some() {
|
||||
opts.logging.level.clone_from(&global_options.log_level);
|
||||
if cli_options.log_level.is_some() {
|
||||
opts.logging.level.clone_from(&cli_options.log_level);
|
||||
}
|
||||
|
||||
let tls_opts = TlsOption::new(
|
||||
@@ -342,6 +340,11 @@ impl StartCommand {
|
||||
opts.postgres.tls = tls_opts;
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.opentsdb_addr {
|
||||
opts.opentsdb.enable = true;
|
||||
opts.opentsdb.addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if self.influxdb_enable {
|
||||
opts.influxdb.enable = self.influxdb_enable;
|
||||
}
|
||||
@@ -398,83 +401,53 @@ impl StartCommand {
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::default());
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
dn_opts.mode,
|
||||
None,
|
||||
kv_backend.clone(),
|
||||
multi_cache_invalidator.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let table_metadata_manager =
|
||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||
|
||||
let flow_builder = FlownodeBuilder::new(
|
||||
Default::default(),
|
||||
fe_plugins.clone(),
|
||||
table_metadata_manager.clone(),
|
||||
catalog_manager.clone(),
|
||||
)
|
||||
.with_kv_backend(kv_backend.clone());
|
||||
let flownode = Arc::new(flow_builder.build().await);
|
||||
let catalog_manager =
|
||||
KvBackendCatalogManager::new(kv_backend.clone(), multi_cache_invalidator.clone()).await;
|
||||
|
||||
let builder =
|
||||
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
|
||||
let datanode = builder.build().await.context(StartDatanodeSnafu)?;
|
||||
|
||||
let node_manager = Arc::new(StandaloneDatanodeManager {
|
||||
region_server: datanode.region_server(),
|
||||
flow_server: flownode.clone(),
|
||||
});
|
||||
let datanode_manager = Arc::new(StandaloneDatanodeManager(datanode.region_server()));
|
||||
|
||||
let table_id_sequence = Arc::new(
|
||||
SequenceBuilder::new(TABLE_ID_SEQ, kv_backend.clone())
|
||||
SequenceBuilder::new("table_id", kv_backend.clone())
|
||||
.initial(MIN_USER_TABLE_ID as u64)
|
||||
.step(10)
|
||||
.build(),
|
||||
);
|
||||
let flow_id_sequence = Arc::new(
|
||||
SequenceBuilder::new(FLOW_ID_SEQ, kv_backend.clone())
|
||||
.initial(MIN_USER_FLOW_ID as u64)
|
||||
.step(10)
|
||||
.build(),
|
||||
);
|
||||
let wal_options_allocator = Arc::new(WalOptionsAllocator::new(
|
||||
opts.wal_meta.clone(),
|
||||
kv_backend.clone(),
|
||||
));
|
||||
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
|
||||
|
||||
let table_metadata_manager =
|
||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||
|
||||
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
|
||||
table_id_sequence,
|
||||
wal_options_allocator.clone(),
|
||||
));
|
||||
let flow_meta_allocator = Arc::new(FlowMetadataAllocator::with_noop_peer_allocator(
|
||||
flow_id_sequence,
|
||||
));
|
||||
|
||||
let ddl_task_executor = Self::create_ddl_task_executor(
|
||||
procedure_manager.clone(),
|
||||
node_manager.clone(),
|
||||
multi_cache_invalidator,
|
||||
table_metadata_manager,
|
||||
procedure_manager.clone(),
|
||||
datanode_manager.clone(),
|
||||
multi_cache_invalidator,
|
||||
table_meta_allocator,
|
||||
flow_metadata_manager,
|
||||
flow_meta_allocator,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut frontend =
|
||||
FrontendBuilder::new(kv_backend, catalog_manager, node_manager, ddl_task_executor)
|
||||
.with_plugin(fe_plugins.clone())
|
||||
.try_build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
// flow server need to be able to use frontend to write insert requests back
|
||||
flownode
|
||||
.set_frontend_invoker(Box::new(frontend.clone()))
|
||||
.await;
|
||||
let _handle = flownode.clone().run_background();
|
||||
let mut frontend = FrontendBuilder::new(
|
||||
kv_backend,
|
||||
catalog_manager,
|
||||
datanode_manager,
|
||||
ddl_task_executor,
|
||||
)
|
||||
.with_plugin(fe_plugins.clone())
|
||||
.try_build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let servers = Services::new(fe_opts.clone(), Arc::new(frontend.clone()), fe_plugins)
|
||||
.build()
|
||||
@@ -493,26 +466,20 @@ impl StartCommand {
|
||||
}
|
||||
|
||||
pub async fn create_ddl_task_executor(
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
node_manager: NodeManagerRef,
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
table_metadata_allocator: TableMetadataAllocatorRef,
|
||||
flow_metadata_manager: FlowMetadataManagerRef,
|
||||
flow_metadata_allocator: FlowMetadataAllocatorRef,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
table_meta_allocator: TableMetadataAllocatorRef,
|
||||
) -> Result<ProcedureExecutorRef> {
|
||||
let procedure_executor: ProcedureExecutorRef = Arc::new(
|
||||
DdlManager::try_new(
|
||||
DdlContext {
|
||||
node_manager,
|
||||
cache_invalidator,
|
||||
memory_region_keeper: Arc::new(MemoryRegionKeeper::default()),
|
||||
table_metadata_manager,
|
||||
table_metadata_allocator,
|
||||
flow_metadata_manager,
|
||||
flow_metadata_allocator,
|
||||
},
|
||||
procedure_manager,
|
||||
datanode_manager,
|
||||
cache_invalidator,
|
||||
table_metadata_manager,
|
||||
table_meta_allocator,
|
||||
Arc::new(MemoryRegionKeeper::default()),
|
||||
true,
|
||||
)
|
||||
.context(InitDdlManagerSnafu)?,
|
||||
@@ -549,7 +516,7 @@ mod tests {
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
use crate::options::{GlobalOptions, ENV_VAR_SEP};
|
||||
use crate::options::{CliOptions, ENV_VAR_SEP};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_try_from_start_command_to_anymap() {
|
||||
@@ -623,9 +590,6 @@ mod tests {
|
||||
timeout = "33s"
|
||||
body_limit = "128MB"
|
||||
|
||||
[opentsdb]
|
||||
enable = true
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
@@ -637,8 +601,7 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Standalone(options) = cmd.load_options(&GlobalOptions::default()).unwrap()
|
||||
else {
|
||||
let Options::Standalone(options) = cmd.load_options(&CliOptions::default()).unwrap() else {
|
||||
unreachable!()
|
||||
};
|
||||
let fe_opts = options.frontend;
|
||||
@@ -654,7 +617,6 @@ mod tests {
|
||||
assert_eq!(2, fe_opts.mysql.runtime_size);
|
||||
assert_eq!(None, fe_opts.mysql.reject_no_database);
|
||||
assert!(fe_opts.influxdb.enable);
|
||||
assert!(fe_opts.opentsdb.enable);
|
||||
|
||||
let DatanodeWalConfig::RaftEngine(raft_engine_config) = dn_opts.wal else {
|
||||
unreachable!()
|
||||
@@ -673,7 +635,7 @@ mod tests {
|
||||
match &dn_opts.storage.providers[1] {
|
||||
datanode::config::ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(
|
||||
"SecretBox<alloc::string::String>([REDACTED])".to_string(),
|
||||
"Secret([REDACTED alloc::string::String])".to_string(),
|
||||
format!("{:?}", s3_config.access_key_id)
|
||||
);
|
||||
}
|
||||
@@ -694,7 +656,7 @@ mod tests {
|
||||
};
|
||||
|
||||
let Options::Standalone(opts) = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
.load_options(&CliOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
@@ -767,7 +729,7 @@ mod tests {
|
||||
};
|
||||
|
||||
let Options::Standalone(opts) =
|
||||
command.load_options(&GlobalOptions::default()).unwrap()
|
||||
command.load_options(&CliOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
@@ -16,7 +16,6 @@ common-macro.workspace = true
|
||||
paste = "1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
snafu.workspace = true
|
||||
zeroize = { version = "1.6", default-features = false, features = ["alloc"] }
|
||||
|
||||
[dev-dependencies]
|
||||
toml.workspace = true
|
||||
|
||||
@@ -53,6 +53,14 @@ impl ErrorExt for Error {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn location_opt(&self) -> Option<common_error::snafu::Location> {
|
||||
match self {
|
||||
Error::Overflow { location, .. } => Some(*location),
|
||||
Error::Underflow { location, .. } => Some(*location),
|
||||
Error::Eof { location, .. } => Some(*location),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_read_le {
|
||||
|
||||
@@ -15,12 +15,67 @@
|
||||
pub mod bit_vec;
|
||||
pub mod buffer;
|
||||
pub mod bytes;
|
||||
pub mod plugins;
|
||||
#[allow(clippy::all)]
|
||||
pub mod readable_size;
|
||||
pub mod secrets;
|
||||
|
||||
use core::any::Any;
|
||||
use std::sync::{Arc, Mutex, MutexGuard};
|
||||
|
||||
pub type AffectedRows = usize;
|
||||
|
||||
pub use bit_vec::BitVec;
|
||||
pub use plugins::Plugins;
|
||||
|
||||
/// [`Plugins`] is a wrapper of Arc contents.
|
||||
/// Make it Cloneable and we can treat it like an Arc struct.
|
||||
#[derive(Default, Clone)]
|
||||
pub struct Plugins {
|
||||
inner: Arc<Mutex<anymap::Map<dyn Any + Send + Sync>>>,
|
||||
}
|
||||
|
||||
impl Plugins {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Mutex::new(anymap::Map::new())),
|
||||
}
|
||||
}
|
||||
|
||||
fn lock(&self) -> MutexGuard<anymap::Map<dyn Any + Send + Sync>> {
|
||||
self.inner.lock().unwrap()
|
||||
}
|
||||
|
||||
pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
|
||||
let _ = self.lock().insert(value);
|
||||
}
|
||||
|
||||
pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
|
||||
let binding = self.lock();
|
||||
binding.get::<T>().cloned()
|
||||
}
|
||||
|
||||
pub fn map_mut<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> R
|
||||
where
|
||||
F: FnOnce(Option<&mut T>) -> R,
|
||||
{
|
||||
let mut binding = self.lock();
|
||||
let opt = binding.get_mut::<T>();
|
||||
mapper(opt)
|
||||
}
|
||||
|
||||
pub fn map<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> Option<R>
|
||||
where
|
||||
F: FnOnce(&T) -> R,
|
||||
{
|
||||
let binding = self.lock();
|
||||
binding.get::<T>().map(mapper)
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
let binding = self.lock();
|
||||
binding.len()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
let binding = self.lock();
|
||||
binding.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,127 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||
|
||||
/// [`Plugins`] is a wrapper of [AnyMap](https://github.com/chris-morgan/anymap) and provides a thread-safe way to store and retrieve plugins.
|
||||
/// Make it Cloneable and we can treat it like an Arc struct.
|
||||
#[derive(Default, Clone)]
|
||||
pub struct Plugins {
|
||||
inner: Arc<RwLock<anymap::Map<dyn Any + Send + Sync>>>,
|
||||
}
|
||||
|
||||
impl Plugins {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
inner: Arc::new(RwLock::new(anymap::Map::new())),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
|
||||
let _ = self.write().insert(value);
|
||||
}
|
||||
|
||||
pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
|
||||
self.read().get::<T>().cloned()
|
||||
}
|
||||
|
||||
pub fn map_mut<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> R
|
||||
where
|
||||
F: FnOnce(Option<&mut T>) -> R,
|
||||
{
|
||||
let mut binding = self.write();
|
||||
let opt = binding.get_mut::<T>();
|
||||
mapper(opt)
|
||||
}
|
||||
|
||||
pub fn map<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> Option<R>
|
||||
where
|
||||
F: FnOnce(&T) -> R,
|
||||
{
|
||||
self.read().get::<T>().map(mapper)
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.read().len()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.read().is_empty()
|
||||
}
|
||||
|
||||
fn read(&self) -> RwLockReadGuard<anymap::Map<dyn Any + Send + Sync>> {
|
||||
self.inner.read().unwrap()
|
||||
}
|
||||
|
||||
fn write(&self) -> RwLockWriteGuard<anymap::Map<dyn Any + Send + Sync>> {
|
||||
self.inner.write().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_plugins() {
|
||||
#[derive(Debug, Clone)]
|
||||
struct FooPlugin {
|
||||
x: i32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct BarPlugin {
|
||||
y: String,
|
||||
}
|
||||
|
||||
let plugins = Plugins::new();
|
||||
|
||||
let m = plugins.clone();
|
||||
let thread1 = std::thread::spawn(move || {
|
||||
m.insert(FooPlugin { x: 42 });
|
||||
|
||||
if let Some(foo) = m.get::<FooPlugin>() {
|
||||
assert_eq!(foo.x, 42);
|
||||
}
|
||||
|
||||
assert_eq!(m.map::<FooPlugin, _, _>(|foo| foo.x * 2), Some(84));
|
||||
});
|
||||
|
||||
let m = plugins.clone();
|
||||
let thread2 = std::thread::spawn(move || {
|
||||
m.clone().insert(BarPlugin {
|
||||
y: "hello".to_string(),
|
||||
});
|
||||
|
||||
if let Some(bar) = m.get::<BarPlugin>() {
|
||||
assert_eq!(bar.y, "hello");
|
||||
}
|
||||
|
||||
m.map_mut::<BarPlugin, _, _>(|bar| {
|
||||
if let Some(bar) = bar {
|
||||
bar.y = "world".to_string();
|
||||
}
|
||||
});
|
||||
|
||||
assert_eq!(m.get::<BarPlugin>().unwrap().y, "world");
|
||||
});
|
||||
|
||||
thread1.join().unwrap();
|
||||
thread2.join().unwrap();
|
||||
|
||||
assert_eq!(plugins.len(), 2);
|
||||
assert!(!plugins.is_empty());
|
||||
}
|
||||
}
|
||||
@@ -1,218 +0,0 @@
|
||||
// This file is copied from https://github.com/iqlusioninc/crates/blob/f98d4ccf/secrecy/src/lib.rs
|
||||
|
||||
//! [`SecretBox`] wrapper type for more carefully handling secret values
|
||||
//! (e.g. passwords, cryptographic keys, access tokens or other credentials)
|
||||
//!
|
||||
//! # Goals
|
||||
//!
|
||||
//! - Make secret access explicit and easy-to-audit via the
|
||||
//! [`ExposeSecret`] and [`ExposeSecretMut`] traits.
|
||||
//! - Prevent accidental leakage of secrets via channels like debug logging
|
||||
//! - Ensure secrets are wiped from memory on drop securely
|
||||
//! (using the [`zeroize`] crate)
|
||||
//!
|
||||
//! Presently this crate favors a simple, `no_std`-friendly, safe i.e.
|
||||
//! `forbid(unsafe_code)`-based implementation and does not provide more advanced
|
||||
//! memory protection mechanisms e.g. ones based on `mlock(2)`/`mprotect(2)`.
|
||||
//! We may explore more advanced protection mechanisms in the future.
|
||||
//! Those who don't mind `std` and `libc` dependencies should consider using
|
||||
//! the [`secrets`](https://crates.io/crates/secrets) crate.
|
||||
//!
|
||||
//! # `serde` support
|
||||
//!
|
||||
//! When the `serde` feature of this crate is enabled, the [`SecretBox`] type will
|
||||
//! receive a [`Deserialize`] impl for all `SecretBox<T>` types where
|
||||
//! `T: DeserializeOwned`. This allows *loading* secret values from data
|
||||
//! deserialized from `serde` (be careful to clean up any intermediate secrets
|
||||
//! when doing this, e.g. the unparsed input!)
|
||||
//!
|
||||
//! To prevent exfiltration of secret values via `serde`, by default `SecretBox<T>`
|
||||
//! does *not* receive a corresponding [`Serialize`] impl. If you would like
|
||||
//! types of `SecretBox<T>` to be serializable with `serde`, you will need to impl
|
||||
//! the [`SerializableSecret`] marker trait on `T`
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::{any, fmt};
|
||||
|
||||
use serde::{de, ser, Deserialize, Serialize};
|
||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||
|
||||
/// Wrapper type for strings that contains secrets. See also [SecretBox].
|
||||
pub type SecretString = SecretBox<String>;
|
||||
|
||||
impl From<String> for SecretString {
|
||||
fn from(value: String) -> Self {
|
||||
SecretString::new(Box::new(value))
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper type for values that contains secrets, which attempts to limit
|
||||
/// accidental exposure and ensure secrets are wiped from memory when dropped.
|
||||
/// (e.g. passwords, cryptographic keys, access tokens or other credentials)
|
||||
///
|
||||
/// Access to the secret inner value occurs through the [`ExposeSecret`]
|
||||
/// or [`ExposeSecretMut`] traits, which provide methods for accessing the inner secret value.
|
||||
pub struct SecretBox<S: Zeroize> {
|
||||
inner_secret: Box<S>,
|
||||
}
|
||||
|
||||
impl<S: Zeroize> Zeroize for SecretBox<S> {
|
||||
fn zeroize(&mut self) {
|
||||
self.inner_secret.as_mut().zeroize()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Zeroize> Drop for SecretBox<S> {
|
||||
fn drop(&mut self) {
|
||||
self.zeroize()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Zeroize> ZeroizeOnDrop for SecretBox<S> {}
|
||||
|
||||
impl<S: Zeroize> From<Box<S>> for SecretBox<S> {
|
||||
fn from(source: Box<S>) -> Self {
|
||||
Self::new(source)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Zeroize> SecretBox<S> {
|
||||
/// Create a secret value using a pre-boxed value.
|
||||
pub fn new(boxed_secret: Box<S>) -> Self {
|
||||
Self {
|
||||
inner_secret: boxed_secret,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Zeroize + Default> SecretBox<S> {
|
||||
/// Create a secret value using a function that can initialize the vale in-place.
|
||||
pub fn new_with_mut(ctr: impl FnOnce(&mut S)) -> Self {
|
||||
let mut secret = Self::default();
|
||||
ctr(secret.expose_secret_mut());
|
||||
secret
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Zeroize + Clone> SecretBox<S> {
|
||||
/// Create a secret value using the provided function as a constructor.
|
||||
///
|
||||
/// The implementation makes an effort to zeroize the locally constructed value
|
||||
/// before it is copied to the heap, and constructing it inside the closure minimizes
|
||||
/// the possibility of it being accidentally copied by other code.
|
||||
///
|
||||
/// **Note:** using [`Self::new`] or [`Self::new_with_mut`] is preferable when possible,
|
||||
/// since this method's safety relies on empyric evidence and may be violated on some targets.
|
||||
pub fn new_with_ctr(ctr: impl FnOnce() -> S) -> Self {
|
||||
let mut data = ctr();
|
||||
let secret = Self {
|
||||
inner_secret: Box::new(data.clone()),
|
||||
};
|
||||
data.zeroize();
|
||||
secret
|
||||
}
|
||||
|
||||
/// Same as [`Self::new_with_ctr`], but the constructor can be fallible.
|
||||
///
|
||||
///
|
||||
/// **Note:** using [`Self::new`] or [`Self::new_with_mut`] is preferable when possible,
|
||||
/// since this method's safety relies on empyric evidence and may be violated on some targets.
|
||||
pub fn try_new_with_ctr<E>(ctr: impl FnOnce() -> Result<S, E>) -> Result<Self, E> {
|
||||
let mut data = ctr()?;
|
||||
let secret = Self {
|
||||
inner_secret: Box::new(data.clone()),
|
||||
};
|
||||
data.zeroize();
|
||||
Ok(secret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Zeroize + Default> Default for SecretBox<S> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
inner_secret: Box::<S>::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Zeroize> Debug for SecretBox<S> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "SecretBox<{}>([REDACTED])", any::type_name::<S>())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Clone for SecretBox<S>
|
||||
where
|
||||
S: Clone + Zeroize,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
SecretBox {
|
||||
inner_secret: self.inner_secret.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Zeroize> ExposeSecret<S> for SecretBox<S> {
|
||||
fn expose_secret(&self) -> &S {
|
||||
self.inner_secret.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Zeroize> ExposeSecretMut<S> for SecretBox<S> {
|
||||
fn expose_secret_mut(&mut self) -> &mut S {
|
||||
self.inner_secret.as_mut()
|
||||
}
|
||||
}
|
||||
|
||||
/// Expose a reference to an inner secret
|
||||
pub trait ExposeSecret<S> {
|
||||
/// Expose secret: this is the only method providing access to a secret.
|
||||
fn expose_secret(&self) -> &S;
|
||||
}
|
||||
|
||||
/// Expose a mutable reference to an inner secret
|
||||
pub trait ExposeSecretMut<S> {
|
||||
/// Expose secret: this is the only method providing access to a secret.
|
||||
fn expose_secret_mut(&mut self) -> &mut S;
|
||||
}
|
||||
|
||||
/// Marker trait for secret types which can be [`Serialize`]-d by [`serde`].
|
||||
///
|
||||
/// When the `serde` feature of this crate is enabled and types are marked with
|
||||
/// this trait, they receive a [`Serialize` impl][1] for `SecretBox<T>`.
|
||||
/// (NOTE: all types which impl `DeserializeOwned` receive a [`Deserialize`]
|
||||
/// impl)
|
||||
///
|
||||
/// This is done deliberately to prevent accidental exfiltration of secrets
|
||||
/// via `serde` serialization.
|
||||
///
|
||||
/// If you really want to have `serde` serialize those types, use the
|
||||
/// [`serialize_with`][2] attribute to specify a serializer that exposes the secret.
|
||||
///
|
||||
/// [1]: https://docs.rs/secrecy/latest/secrecy/struct.Secret.html#implementations
|
||||
/// [2]: https://serde.rs/field-attrs.html#serialize_with
|
||||
pub trait SerializableSecret: Serialize {}
|
||||
|
||||
impl<'de, T> Deserialize<'de> for SecretBox<T>
|
||||
where
|
||||
T: Zeroize + Clone + de::DeserializeOwned + Sized,
|
||||
{
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: de::Deserializer<'de>,
|
||||
{
|
||||
Self::try_new_with_ctr(|| T::deserialize(deserializer))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Serialize for SecretBox<T>
|
||||
where
|
||||
T: Zeroize + SerializableSecret + Serialize + Sized,
|
||||
{
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: ser::Serializer,
|
||||
{
|
||||
self.expose_secret().serialize(serializer)
|
||||
}
|
||||
}
|
||||
@@ -19,9 +19,6 @@ pub const DEFAULT_CATALOG_NAME: &str = "greptime";
|
||||
pub const DEFAULT_SCHEMA_NAME: &str = "public";
|
||||
pub const DEFAULT_PRIVATE_SCHEMA_NAME: &str = "greptime_private";
|
||||
|
||||
/// Reserves [0,MIN_USER_FLOW_ID) for internal usage.
|
||||
/// User defined table id starts from this value.
|
||||
pub const MIN_USER_FLOW_ID: u32 = 1024;
|
||||
/// Reserves [0,MIN_USER_TABLE_ID) for internal usage.
|
||||
/// User defined table id starts from this value.
|
||||
pub const MIN_USER_TABLE_ID: u32 = 1024;
|
||||
@@ -91,8 +88,6 @@ pub const INFORMATION_SCHEMA_PARTITIONS_TABLE_ID: u32 = 28;
|
||||
pub const INFORMATION_SCHEMA_REGION_PEERS_TABLE_ID: u32 = 29;
|
||||
/// id for information_schema.columns
|
||||
pub const INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID: u32 = 30;
|
||||
/// id for information_schema.cluster_info
|
||||
pub const INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID: u32 = 31;
|
||||
/// ----- End of information_schema tables -----
|
||||
|
||||
pub const MITO_ENGINE: &str = "mito";
|
||||
|
||||
@@ -28,12 +28,6 @@ pub fn format_full_table_name(catalog: &str, schema: &str, table: &str) -> Strin
|
||||
format!("{catalog}.{schema}.{table}")
|
||||
}
|
||||
|
||||
/// Formats flow fully-qualified name
|
||||
#[inline]
|
||||
pub fn format_full_flow_name(catalog: &str, flow: &str) -> String {
|
||||
format!("{catalog}.{flow}")
|
||||
}
|
||||
|
||||
/// Build db name from catalog and schema string
|
||||
pub fn build_db_string(catalog: &str, schema: &str) -> String {
|
||||
if catalog == DEFAULT_CATALOG_NAME {
|
||||
|
||||
@@ -21,16 +21,6 @@ pub fn metadata_store_dir(store_dir: &str) -> String {
|
||||
format!("{store_dir}/metadata")
|
||||
}
|
||||
|
||||
/// The Server running mode
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, Copy)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum Mode {
|
||||
// The single process mode.
|
||||
Standalone,
|
||||
// The distributed cluster mode.
|
||||
Distributed,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(default)]
|
||||
pub struct KvBackendConfig {
|
||||
|
||||
@@ -213,4 +213,34 @@ impl ErrorExt for Error {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn location_opt(&self) -> Option<common_error::snafu::Location> {
|
||||
use Error::*;
|
||||
match self {
|
||||
OrcReader { location, .. } => Some(*location),
|
||||
BuildBackend { location, .. } => Some(*location),
|
||||
ReadObject { location, .. } => Some(*location),
|
||||
ListObjects { location, .. } => Some(*location),
|
||||
InferSchema { location, .. } => Some(*location),
|
||||
ReadParquetSnafu { location, .. } => Some(*location),
|
||||
ParquetToSchema { location, .. } => Some(*location),
|
||||
JoinHandle { location, .. } => Some(*location),
|
||||
ParseFormat { location, .. } => Some(*location),
|
||||
MergeSchema { location, .. } => Some(*location),
|
||||
WriteObject { location, .. } => Some(*location),
|
||||
ReadRecordBatch { location, .. } => Some(*location),
|
||||
WriteRecordBatch { location, .. } => Some(*location),
|
||||
AsyncWrite { location, .. } => Some(*location),
|
||||
EncodeRecordBatch { location, .. } => Some(*location),
|
||||
BufferedWriterClosed { location, .. } => Some(*location),
|
||||
|
||||
UnsupportedBackendProtocol { location, .. } => Some(*location),
|
||||
EmptyHostPath { location, .. } => Some(*location),
|
||||
InvalidUrl { location, .. } => Some(*location),
|
||||
InvalidConnection { location, .. } => Some(*location),
|
||||
UnsupportedCompressionType { location, .. } => Some(*location),
|
||||
UnsupportedFormat { location, .. } => Some(*location),
|
||||
WriteParquet { location, .. } => Some(*location),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
|
||||
#[cfg(windows)]
|
||||
{
|
||||
// On Windows, the url may start with `C:/`.
|
||||
if handle_windows_path(url).is_some() {
|
||||
if let Some(_) = handle_windows_path(url) {
|
||||
return Ok((FS_SCHEMA.to_string(), None, url.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,6 +56,14 @@ impl ErrorExt for Error {
|
||||
}
|
||||
}
|
||||
|
||||
fn location_opt(&self) -> Option<common_error::snafu::Location> {
|
||||
match self {
|
||||
Error::BigDecimalOutOfRange { location, .. } => Some(*location),
|
||||
Error::InvalidPrecisionOrScale { location, .. } => Some(*location),
|
||||
Error::ParseRustDecimalStr { .. } | Error::ParseBigDecimalStr { .. } => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
@@ -24,6 +24,13 @@ pub trait ErrorExt: StackError {
|
||||
StatusCode::Unknown
|
||||
}
|
||||
|
||||
// TODO(ruihang): remove this default implementation
|
||||
/// Get the location of this error, None if the location is unavailable.
|
||||
/// Add `_opt` suffix to avoid confusing with similar method in `std::error::Error`
|
||||
fn location_opt(&self) -> Option<crate::snafu::Location> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Returns the error as [Any](std::any::Any) so that it can be
|
||||
/// downcast to a specific implementation.
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
@@ -109,9 +116,9 @@ impl BoxedError {
|
||||
|
||||
impl std::fmt::Debug for BoxedError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let mut buf = vec![];
|
||||
self.debug_fmt(0, &mut buf);
|
||||
write!(f, "{}", buf.join("\n"))
|
||||
// Use the pretty debug format of inner error for opaque error.
|
||||
let debug_format = crate::format::DebugFormat::new(&*self.inner);
|
||||
debug_format.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -132,6 +139,10 @@ impl crate::ext::ErrorExt for BoxedError {
|
||||
self.inner.status_code()
|
||||
}
|
||||
|
||||
fn location_opt(&self) -> Option<crate::snafu::Location> {
|
||||
self.inner.location_opt()
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self.inner.as_any()
|
||||
}
|
||||
@@ -185,6 +196,10 @@ impl crate::ext::ErrorExt for PlainError {
|
||||
self.status_code
|
||||
}
|
||||
|
||||
fn location_opt(&self) -> Option<crate::snafu::Location> {
|
||||
None
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self as _
|
||||
}
|
||||
|
||||
154
src/common/error/src/format.rs
Normal file
154
src/common/error/src/format.rs
Normal file
@@ -0,0 +1,154 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use crate::ext::ErrorExt;
|
||||
|
||||
/// Pretty debug format for error, also prints source and backtrace.
|
||||
pub struct DebugFormat<'a, E: ?Sized>(&'a E);
|
||||
|
||||
impl<'a, E: ?Sized> DebugFormat<'a, E> {
|
||||
/// Create a new format struct from `err`.
|
||||
pub fn new(err: &'a E) -> Self {
|
||||
Self(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, E: ErrorExt + ?Sized> fmt::Debug for DebugFormat<'a, E> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}.", self.0)?;
|
||||
if let Some(source) = self.0.source() {
|
||||
// Source error use debug format for more verbose info.
|
||||
write!(f, " Caused by: {source:?}")?;
|
||||
}
|
||||
if let Some(location) = self.0.location_opt() {
|
||||
// Add a newline to separate causes and backtrace.
|
||||
write!(f, " at: {location}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::any::Any;
|
||||
|
||||
use snafu::prelude::*;
|
||||
use snafu::{GenerateImplicitData, Location};
|
||||
|
||||
use super::*;
|
||||
use crate::ext::StackError;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(display("This is a leaf error"))]
|
||||
struct Leaf;
|
||||
|
||||
impl ErrorExt for Leaf {
|
||||
fn location_opt(&self) -> Option<Location> {
|
||||
None
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl StackError for Leaf {
|
||||
fn debug_fmt(&self, _: usize, _: &mut Vec<String>) {}
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(display("This is a leaf with location"))]
|
||||
struct LeafWithLocation {
|
||||
location: Location,
|
||||
}
|
||||
|
||||
impl ErrorExt for LeafWithLocation {
|
||||
fn location_opt(&self) -> Option<Location> {
|
||||
None
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl StackError for LeafWithLocation {
|
||||
fn debug_fmt(&self, _: usize, _: &mut Vec<String>) {}
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(display("Internal error"))]
|
||||
struct Internal {
|
||||
#[snafu(source)]
|
||||
source: Leaf,
|
||||
location: Location,
|
||||
}
|
||||
|
||||
impl ErrorExt for Internal {
|
||||
fn location_opt(&self) -> Option<Location> {
|
||||
None
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl StackError for Internal {
|
||||
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
|
||||
buf.push(format!("{}: Internal error, at {}", layer, self.location));
|
||||
self.source.debug_fmt(layer + 1, buf);
|
||||
}
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError> {
|
||||
Some(&self.source)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_debug_format() {
|
||||
let err = Leaf;
|
||||
|
||||
let msg = format!("{:?}", DebugFormat::new(&err));
|
||||
assert_eq!("This is a leaf error.", msg);
|
||||
|
||||
let err = LeafWithLocation {
|
||||
location: Location::generate(),
|
||||
};
|
||||
|
||||
// TODO(ruihang): display location here
|
||||
let msg = format!("{:?}", DebugFormat::new(&err));
|
||||
assert!(msg.starts_with("This is a leaf with location."));
|
||||
|
||||
let err = Internal {
|
||||
source: Leaf,
|
||||
location: Location::generate(),
|
||||
};
|
||||
|
||||
// TODO(ruihang): display location here
|
||||
let msg = format!("{:?}", DebugFormat::new(&err));
|
||||
assert!(msg.contains("Internal error. Caused by: Leaf"));
|
||||
}
|
||||
}
|
||||
@@ -15,6 +15,7 @@
|
||||
#![feature(error_iter)]
|
||||
|
||||
pub mod ext;
|
||||
pub mod format;
|
||||
pub mod mock;
|
||||
pub mod status_code;
|
||||
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
use std::any::Any;
|
||||
use std::fmt;
|
||||
|
||||
use snafu::Location;
|
||||
|
||||
use crate::ext::{ErrorExt, StackError};
|
||||
use crate::status_code::StatusCode;
|
||||
|
||||
@@ -59,6 +61,10 @@ impl ErrorExt for MockError {
|
||||
self.code
|
||||
}
|
||||
|
||||
fn location_opt(&self) -> Option<Location> {
|
||||
None
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
@@ -59,7 +59,6 @@ pub enum StatusCode {
|
||||
RegionNotFound = 4005,
|
||||
RegionAlreadyExists = 4006,
|
||||
RegionReadonly = 4007,
|
||||
/// Region is not in a proper state to handle specific request.
|
||||
RegionNotReady = 4008,
|
||||
// If mutually exclusive operations are reached at the same time,
|
||||
// only one can be executed, another one will get region busy.
|
||||
@@ -97,11 +96,6 @@ pub enum StatusCode {
|
||||
/// User is not authorized to perform the operation
|
||||
PermissionDenied = 7006,
|
||||
// ====== End of auth related status code =====
|
||||
|
||||
// ====== Begin of flow related status code =====
|
||||
FlowAlreadyExists = 8000,
|
||||
FlowNotFound = 8001,
|
||||
// ====== End of flow related status code =====
|
||||
}
|
||||
|
||||
impl StatusCode {
|
||||
@@ -130,10 +124,8 @@ impl StatusCode {
|
||||
| StatusCode::EngineExecuteQuery
|
||||
| StatusCode::TableAlreadyExists
|
||||
| StatusCode::TableNotFound
|
||||
| StatusCode::RegionAlreadyExists
|
||||
| StatusCode::RegionNotFound
|
||||
| StatusCode::FlowAlreadyExists
|
||||
| StatusCode::FlowNotFound
|
||||
| StatusCode::RegionAlreadyExists
|
||||
| StatusCode::RegionReadonly
|
||||
| StatusCode::TableColumnNotFound
|
||||
| StatusCode::TableColumnExists
|
||||
@@ -168,12 +160,10 @@ impl StatusCode {
|
||||
| StatusCode::InvalidSyntax
|
||||
| StatusCode::TableAlreadyExists
|
||||
| StatusCode::TableNotFound
|
||||
| StatusCode::RegionAlreadyExists
|
||||
| StatusCode::RegionNotFound
|
||||
| StatusCode::FlowAlreadyExists
|
||||
| StatusCode::FlowNotFound
|
||||
| StatusCode::RegionNotReady
|
||||
| StatusCode::RegionBusy
|
||||
| StatusCode::RegionAlreadyExists
|
||||
| StatusCode::RegionReadonly
|
||||
| StatusCode::TableColumnNotFound
|
||||
| StatusCode::TableColumnExists
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
[package]
|
||||
name = "common-frontend"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-query.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
sql.workspace = true
|
||||
@@ -1,44 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("External error"))]
|
||||
External {
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
use Error::*;
|
||||
match self {
|
||||
External { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::{RowDeleteRequests, RowInsertRequests};
|
||||
use async_trait::async_trait;
|
||||
use common_query::Output;
|
||||
use session::context::QueryContextRef;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
/// [FrontendInvoker] provides the ability to:
|
||||
/// - Insert rows
|
||||
/// - Delete rows
|
||||
#[async_trait]
|
||||
pub trait FrontendInvoker {
|
||||
async fn row_inserts(
|
||||
&self,
|
||||
requests: RowInsertRequests,
|
||||
ctx: QueryContextRef,
|
||||
) -> Result<Output>;
|
||||
|
||||
async fn row_deletes(
|
||||
&self,
|
||||
requests: RowDeleteRequests,
|
||||
ctx: QueryContextRef,
|
||||
) -> Result<Output>;
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod error;
|
||||
pub mod handler;
|
||||
@@ -35,7 +35,7 @@ impl FunctionContext {
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn mock() -> Self {
|
||||
Self {
|
||||
query_ctx: QueryContextBuilder::default().build().into(),
|
||||
query_ctx: QueryContextBuilder::default().build(),
|
||||
state: Arc::new(FunctionState::mock()),
|
||||
}
|
||||
}
|
||||
@@ -44,7 +44,7 @@ impl FunctionContext {
|
||||
impl Default for FunctionContext {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
query_ctx: QueryContextBuilder::default().build().into(),
|
||||
query_ctx: QueryContextBuilder::default().build(),
|
||||
state: Arc::new(FunctionState::default()),
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user