mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-04 20:32:56 +00:00
Compare commits
114 Commits
flow_p3_re
...
poc_datafl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
93fcb7454c | ||
|
|
cfe28b6974 | ||
|
|
9ec6107988 | ||
|
|
e840bb469d | ||
|
|
684850f451 | ||
|
|
76aadb2223 | ||
|
|
f360b2e812 | ||
|
|
138a2aba7f | ||
|
|
8f6462c0b0 | ||
|
|
46d0b3cd64 | ||
|
|
a17a7f4e47 | ||
|
|
50335dd53c | ||
|
|
abaf881f06 | ||
|
|
e1a8215394 | ||
|
|
d7942a1a00 | ||
|
|
a6727e2e8d | ||
|
|
d5bdbedcd6 | ||
|
|
878737f781 | ||
|
|
d88cff6f51 | ||
|
|
e7801abd0c | ||
|
|
d7a132a02f | ||
|
|
a3417f50cf | ||
|
|
099f414f63 | ||
|
|
c22185abce | ||
|
|
e33afa53f4 | ||
|
|
7eaf471808 | ||
|
|
acba753500 | ||
|
|
5736373820 | ||
|
|
74dee25688 | ||
|
|
edcbc89c38 | ||
|
|
e88a40b58b | ||
|
|
c7647759be | ||
|
|
d8a191a2db | ||
|
|
ea40691c71 | ||
|
|
640674b9bc | ||
|
|
3fb3fb18c2 | ||
|
|
1067d3453d | ||
|
|
57e3912aca | ||
|
|
ebcfb0f1d7 | ||
|
|
6442c96847 | ||
|
|
b19febc97c | ||
|
|
8240a1ace1 | ||
|
|
89dbf6ddd2 | ||
|
|
66aa08d815 | ||
|
|
b8a325d18c | ||
|
|
ed95e99556 | ||
|
|
5545a8b023 | ||
|
|
5140d247e3 | ||
|
|
f995f6099f | ||
|
|
7de62ef5d0 | ||
|
|
0e05f85a9d | ||
|
|
a6a702de4e | ||
|
|
d99746385b | ||
|
|
9d8f72d611 | ||
|
|
c07a1babd5 | ||
|
|
cc8d6b1200 | ||
|
|
5274806108 | ||
|
|
6e1cc1df55 | ||
|
|
65f80af9a9 | ||
|
|
a68072cb21 | ||
|
|
71c1c7ca24 | ||
|
|
1b5862223c | ||
|
|
c0be0c30de | ||
|
|
154f561da1 | ||
|
|
aa2934b422 | ||
|
|
1b93a026c2 | ||
|
|
530353785c | ||
|
|
573c19be32 | ||
|
|
f3b68253c2 | ||
|
|
6e9e8fad26 | ||
|
|
6e12e1b84b | ||
|
|
7d447c20c5 | ||
|
|
9c3b9600ca | ||
|
|
73fe075049 | ||
|
|
2748cec7e2 | ||
|
|
65d47bab56 | ||
|
|
f6e2039eb8 | ||
|
|
3b89b9ddd8 | ||
|
|
695746193b | ||
|
|
573d369f77 | ||
|
|
e6eca8ca0c | ||
|
|
e84b1eefdf | ||
|
|
777bc3b89d | ||
|
|
81f3007f6f | ||
|
|
863ee608ca | ||
|
|
20cbc039e6 | ||
|
|
d11b1fa389 | ||
|
|
a0f4881c6e | ||
|
|
aba5e41799 | ||
|
|
371d4cf9f5 | ||
|
|
8e3515d396 | ||
|
|
701aba9cdb | ||
|
|
b493ea1b38 | ||
|
|
336db38ce9 | ||
|
|
c387687262 | ||
|
|
7ef18c0915 | ||
|
|
1bbde15a15 | ||
|
|
3dac7cbe37 | ||
|
|
08263995f6 | ||
|
|
c0b909330a | ||
|
|
dadee99d69 | ||
|
|
f29aebf89f | ||
|
|
e154dc5fd4 | ||
|
|
ed8b13689e | ||
|
|
3112ced9c0 | ||
|
|
e410192560 | ||
|
|
eb3d2ca759 | ||
|
|
934c7e3fef | ||
|
|
d8ea7c5585 | ||
|
|
77fc1e6de0 | ||
|
|
4eadd9f8a8 | ||
|
|
1ec595134d | ||
|
|
9206f60b28 | ||
|
|
2d0f493040 |
@@ -24,3 +24,7 @@ GT_KAFKA_ENDPOINTS = localhost:9092
|
|||||||
|
|
||||||
# Setting for fuzz tests
|
# Setting for fuzz tests
|
||||||
GT_MYSQL_ADDR = localhost:4002
|
GT_MYSQL_ADDR = localhost:4002
|
||||||
|
|
||||||
|
# Setting for unstable fuzz tests
|
||||||
|
GT_FUZZ_BINARY_PATH=/path/to/
|
||||||
|
GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime
|
||||||
|
|||||||
12
.github/actions/fuzz-test/action.yaml
vendored
12
.github/actions/fuzz-test/action.yaml
vendored
@@ -3,11 +3,17 @@ description: 'Fuzz test given setup and service'
|
|||||||
inputs:
|
inputs:
|
||||||
target:
|
target:
|
||||||
description: "The fuzz target to test"
|
description: "The fuzz target to test"
|
||||||
|
required: true
|
||||||
|
max-total-time:
|
||||||
|
description: "Max total time(secs)"
|
||||||
|
required: true
|
||||||
|
unstable:
|
||||||
|
default: 'false'
|
||||||
|
description: "Enable unstable feature"
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- name: Run Fuzz Test
|
- name: Run Fuzz Test
|
||||||
shell: bash
|
shell: bash
|
||||||
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none -- -max_total_time=120
|
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none ${{ inputs.unstable == 'true' && '--features=unstable' || '' }} -- -max_total_time=${{ inputs.max-total-time }}
|
||||||
env:
|
|
||||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
|
||||||
|
|||||||
@@ -1,13 +0,0 @@
|
|||||||
{
|
|
||||||
"LABEL": {
|
|
||||||
"name": "breaking change",
|
|
||||||
"color": "D93F0B"
|
|
||||||
},
|
|
||||||
"CHECKS": {
|
|
||||||
"regexp": "^(?:(?!!:).)*$",
|
|
||||||
"ignoreLabels": [
|
|
||||||
"ignore-title"
|
|
||||||
],
|
|
||||||
"alwaysPassCI": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
12
.github/pr-title-checker-config.json
vendored
12
.github/pr-title-checker-config.json
vendored
@@ -1,12 +0,0 @@
|
|||||||
{
|
|
||||||
"LABEL": {
|
|
||||||
"name": "Invalid PR Title",
|
|
||||||
"color": "B60205"
|
|
||||||
},
|
|
||||||
"CHECKS": {
|
|
||||||
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?\\!?:.*",
|
|
||||||
"ignoreLabels": [
|
|
||||||
"ignore-title"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
73
.github/workflows/develop.yml
vendored
73
.github/workflows/develop.yml
vendored
@@ -38,13 +38,20 @@ jobs:
|
|||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: crate-ci/typos@v1.13.10
|
- uses: crate-ci/typos@master
|
||||||
- name: Check the config docs
|
- name: Check the config docs
|
||||||
run: |
|
run: |
|
||||||
make config-docs && \
|
make config-docs && \
|
||||||
git diff --name-only --exit-code ./config/config.md \
|
git diff --name-only --exit-code ./config/config.md \
|
||||||
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
||||||
|
|
||||||
|
license-header-check:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
name: Check License Header
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: korandoru/hawkeye@v5
|
||||||
|
|
||||||
check:
|
check:
|
||||||
name: Check
|
name: Check
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
@@ -107,9 +114,13 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
# Shares across multiple jobs
|
# Shares across multiple jobs
|
||||||
shared-key: "build-binaries"
|
shared-key: "build-binaries"
|
||||||
|
- name: Install cargo-gc-bin
|
||||||
|
shell: bash
|
||||||
|
run: cargo install cargo-gc-bin
|
||||||
- name: Build greptime binaries
|
- name: Build greptime binaries
|
||||||
shell: bash
|
shell: bash
|
||||||
run: cargo build --bin greptime --bin sqlness-runner
|
# `cargo gc` will invoke `cargo build` with specified args
|
||||||
|
run: cargo gc -- --bin greptime --bin sqlness-runner
|
||||||
- name: Pack greptime binaries
|
- name: Pack greptime binaries
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -130,7 +141,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database" ]
|
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
@@ -164,8 +175,62 @@ jobs:
|
|||||||
uses: ./.github/actions/fuzz-test
|
uses: ./.github/actions/fuzz-test
|
||||||
env:
|
env:
|
||||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||||
|
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||||
with:
|
with:
|
||||||
target: ${{ matrix.target }}
|
target: ${{ matrix.target }}
|
||||||
|
max-total-time: 120
|
||||||
|
|
||||||
|
unstable-fuzztest:
|
||||||
|
name: Unstable Fuzz Test
|
||||||
|
needs: build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
target: [ "unstable_fuzz_create_table_standalone" ]
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: arduino/setup-protoc@v3
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
|
- name: Rust Cache
|
||||||
|
uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
# Shares across multiple jobs
|
||||||
|
shared-key: "fuzz-test-targets"
|
||||||
|
- name: Set Rust Fuzz
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||||
|
cargo install cargo-fuzz
|
||||||
|
- name: Download pre-built binaries
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: bins
|
||||||
|
path: .
|
||||||
|
- name: Unzip binaries
|
||||||
|
run: tar -xvf ./bins.tar.gz
|
||||||
|
- name: Fuzz Test
|
||||||
|
uses: ./.github/actions/fuzz-test
|
||||||
|
env:
|
||||||
|
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||||
|
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||||
|
GT_FUZZ_BINARY_PATH: ./bins/greptime
|
||||||
|
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
|
||||||
|
with:
|
||||||
|
target: ${{ matrix.target }}
|
||||||
|
max-total-time: 120
|
||||||
|
unstable: 'true'
|
||||||
|
- name: Upload unstable fuzz test logs
|
||||||
|
if: failure()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: unstable-fuzz-logs
|
||||||
|
path: /tmp/unstable-greptime/
|
||||||
|
retention-days: 3
|
||||||
|
|
||||||
|
|
||||||
sqlness:
|
sqlness:
|
||||||
name: Sqlness Test
|
name: Sqlness Test
|
||||||
@@ -265,7 +330,7 @@ jobs:
|
|||||||
# Shares with `Check` job
|
# Shares with `Check` job
|
||||||
shared-key: "check-lint"
|
shared-key: "check-lint"
|
||||||
- name: Run cargo clippy
|
- name: Run cargo clippy
|
||||||
run: cargo clippy --workspace --all-targets -- -D warnings
|
run: make clippy
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
|
|||||||
9
.github/workflows/docs.yml
vendored
9
.github/workflows/docs.yml
vendored
@@ -34,7 +34,14 @@ jobs:
|
|||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: crate-ci/typos@v1.13.10
|
- uses: crate-ci/typos@master
|
||||||
|
|
||||||
|
license-header-check:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
name: Check License Header
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: korandoru/hawkeye@v5
|
||||||
|
|
||||||
check:
|
check:
|
||||||
name: Check
|
name: Check
|
||||||
|
|||||||
16
.github/workflows/license.yaml
vendored
16
.github/workflows/license.yaml
vendored
@@ -1,16 +0,0 @@
|
|||||||
name: License checker
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
pull_request:
|
|
||||||
types: [opened, synchronize, reopened, ready_for_review]
|
|
||||||
jobs:
|
|
||||||
license-header-check:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
name: license-header-check
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- name: Check License Header
|
|
||||||
uses: korandoru/hawkeye@v5
|
|
||||||
31
.github/workflows/nightly-ci.yml
vendored
31
.github/workflows/nightly-ci.yml
vendored
@@ -1,5 +1,3 @@
|
|||||||
# Nightly CI: runs tests every night for our second tier plaforms (Windows)
|
|
||||||
|
|
||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '0 23 * * 1-5'
|
- cron: '0 23 * * 1-5'
|
||||||
@@ -15,13 +13,29 @@ env:
|
|||||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
sqlness:
|
sqlness-test:
|
||||||
name: Sqlness Test
|
name: Run sqlness test
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ubuntu-22.04
|
||||||
strategy:
|
steps:
|
||||||
matrix:
|
- name: Checkout
|
||||||
os: [ windows-latest-8-cores ]
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Run sqlness test
|
||||||
|
uses: ./.github/actions/sqlness-test
|
||||||
|
with:
|
||||||
|
data-root: sqlness-test
|
||||||
|
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||||
|
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||||
|
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
|
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||||
|
|
||||||
|
sqlness-windows:
|
||||||
|
name: Sqlness tests on Windows
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
|
runs-on: windows-latest-8-cores
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -52,6 +66,7 @@ jobs:
|
|||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
test-on-windows:
|
test-on-windows:
|
||||||
|
name: Run tests on Windows
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: windows-latest-8-cores
|
runs-on: windows-latest-8-cores
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
|
|||||||
27
.github/workflows/nightly-funtional-tests.yml
vendored
27
.github/workflows/nightly-funtional-tests.yml
vendored
@@ -1,27 +0,0 @@
|
|||||||
name: Nightly functional tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
# At 00:00 on Tuesday.
|
|
||||||
- cron: '0 0 * * 2'
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
sqlness-test:
|
|
||||||
name: Run sqlness test
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Run sqlness test
|
|
||||||
uses: ./.github/actions/sqlness-test
|
|
||||||
with:
|
|
||||||
data-root: sqlness-test
|
|
||||||
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
|
|
||||||
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
|
||||||
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
|
||||||
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
|
||||||
29
.github/workflows/pr-title-checker.yml
vendored
29
.github/workflows/pr-title-checker.yml
vendored
@@ -1,29 +0,0 @@
|
|||||||
name: "PR Title Checker"
|
|
||||||
on:
|
|
||||||
pull_request_target:
|
|
||||||
types:
|
|
||||||
- opened
|
|
||||||
- edited
|
|
||||||
- synchronize
|
|
||||||
- labeled
|
|
||||||
- unlabeled
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
check:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
timeout-minutes: 10
|
|
||||||
steps:
|
|
||||||
- uses: thehanimo/pr-title-checker@v1.4.2
|
|
||||||
with:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
pass_on_octokit_error: false
|
|
||||||
configuration_path: ".github/pr-title-checker-config.json"
|
|
||||||
breaking:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
timeout-minutes: 10
|
|
||||||
steps:
|
|
||||||
- uses: thehanimo/pr-title-checker@v1.4.2
|
|
||||||
with:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
pass_on_octokit_error: false
|
|
||||||
configuration_path: ".github/pr-title-breaking-change-label-config.json"
|
|
||||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -436,7 +436,7 @@ jobs:
|
|||||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||||
|
|
||||||
notification:
|
notification:
|
||||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
if: ${{ always() || github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Send notification to Greptime team
|
name: Send notification to Greptime team
|
||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
|
|||||||
33
.github/workflows/schedule.yml
vendored
Normal file
33
.github/workflows/schedule.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
name: Schedule Management
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '4 2 * * *'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
maintenance:
|
||||||
|
name: Periodic Maintenance
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: 22
|
||||||
|
- uses: pnpm/action-setup@v3
|
||||||
|
with:
|
||||||
|
package_json_file: 'cyborg/package.json'
|
||||||
|
run_install: true
|
||||||
|
- name: Describe the Environment
|
||||||
|
working-directory: cyborg
|
||||||
|
run: pnpm tsx -v
|
||||||
|
- name: Do Maintenance
|
||||||
|
working-directory: cyborg
|
||||||
|
run: pnpm tsx bin/schedule.ts
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||||
30
.github/workflows/semantic-pull-request.yml
vendored
Normal file
30
.github/workflows/semantic-pull-request.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
name: "Semantic Pull Request"
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types:
|
||||||
|
- opened
|
||||||
|
- reopened
|
||||||
|
- edited
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
timeout-minutes: 10
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: 22
|
||||||
|
- uses: pnpm/action-setup@v3
|
||||||
|
with:
|
||||||
|
package_json_file: 'cyborg/package.json'
|
||||||
|
run_install: true
|
||||||
|
- name: Describe the Environment
|
||||||
|
working-directory: cyborg
|
||||||
|
run: pnpm tsx -v
|
||||||
|
- name: Check Pull Request
|
||||||
|
working-directory: cyborg
|
||||||
|
run: pnpm tsx bin/check-pull-request.ts
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
21
.github/workflows/unassign.yml
vendored
21
.github/workflows/unassign.yml
vendored
@@ -1,21 +0,0 @@
|
|||||||
name: Auto Unassign
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: '4 2 * * *'
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
issues: write
|
|
||||||
pull-requests: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
auto-unassign:
|
|
||||||
name: Auto Unassign
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Auto Unassign
|
|
||||||
uses: tisonspieces/auto-unassign@main
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
|
||||||
repository: ${{ github.repository }}
|
|
||||||
@@ -50,7 +50,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
|||||||
|
|
||||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](http://github.com/greptimeTeam/docs/style-guide.md).
|
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
|
||||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||||
|
|
||||||
|
|||||||
731
Cargo.lock
generated
731
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
11
Cargo.toml
11
Cargo.toml
@@ -11,6 +11,7 @@ members = [
|
|||||||
"src/common/config",
|
"src/common/config",
|
||||||
"src/common/datasource",
|
"src/common/datasource",
|
||||||
"src/common/error",
|
"src/common/error",
|
||||||
|
"src/common/frontend",
|
||||||
"src/common/function",
|
"src/common/function",
|
||||||
"src/common/macro",
|
"src/common/macro",
|
||||||
"src/common/greptimedb-telemetry",
|
"src/common/greptimedb-telemetry",
|
||||||
@@ -115,7 +116,7 @@ etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev
|
|||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "73ac0207ab71dfea48f30259ffdb611501b5ecb8" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "219b2409bb701f75b43fc0ba64967d2ed8e75491" }
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
humantime-serde = "1.1"
|
humantime-serde = "1.1"
|
||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
@@ -181,6 +182,7 @@ common-config = { path = "src/common/config" }
|
|||||||
common-datasource = { path = "src/common/datasource" }
|
common-datasource = { path = "src/common/datasource" }
|
||||||
common-decimal = { path = "src/common/decimal" }
|
common-decimal = { path = "src/common/decimal" }
|
||||||
common-error = { path = "src/common/error" }
|
common-error = { path = "src/common/error" }
|
||||||
|
common-frontend = { path = "src/common/frontend" }
|
||||||
common-function = { path = "src/common/function" }
|
common-function = { path = "src/common/function" }
|
||||||
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
||||||
common-grpc = { path = "src/common/grpc" }
|
common-grpc = { path = "src/common/grpc" }
|
||||||
@@ -202,6 +204,7 @@ common-wal = { path = "src/common/wal" }
|
|||||||
datanode = { path = "src/datanode" }
|
datanode = { path = "src/datanode" }
|
||||||
datatypes = { path = "src/datatypes" }
|
datatypes = { path = "src/datatypes" }
|
||||||
file-engine = { path = "src/file-engine" }
|
file-engine = { path = "src/file-engine" }
|
||||||
|
flow = { path = "src/flow" }
|
||||||
frontend = { path = "src/frontend" }
|
frontend = { path = "src/frontend" }
|
||||||
index = { path = "src/index" }
|
index = { path = "src/index" }
|
||||||
log-store = { path = "src/log-store" }
|
log-store = { path = "src/log-store" }
|
||||||
@@ -223,6 +226,8 @@ sql = { path = "src/sql" }
|
|||||||
store-api = { path = "src/store-api" }
|
store-api = { path = "src/store-api" }
|
||||||
substrait = { path = "src/common/substrait" }
|
substrait = { path = "src/common/substrait" }
|
||||||
table = { path = "src/table" }
|
table = { path = "src/table" }
|
||||||
|
# TODO some code depends on this
|
||||||
|
tests-integration = { path = "tests-integration" }
|
||||||
|
|
||||||
[workspace.dependencies.meter-macros]
|
[workspace.dependencies.meter-macros]
|
||||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||||
@@ -237,3 +242,7 @@ strip = true
|
|||||||
lto = "thin"
|
lto = "thin"
|
||||||
debug = false
|
debug = false
|
||||||
incremental = false
|
incremental = false
|
||||||
|
|
||||||
|
[profile.dev.package.sqlness-runner]
|
||||||
|
debug = false
|
||||||
|
strip = true
|
||||||
|
|||||||
@@ -33,6 +33,8 @@ rand.workspace = true
|
|||||||
rskafka.workspace = true
|
rskafka.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
store-api.workspace = true
|
store-api.workspace = true
|
||||||
|
# TODO depend `Database` client
|
||||||
|
tests-integration.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
toml.workspace = true
|
toml.workspace = true
|
||||||
uuid.workspace = true
|
uuid.workspace = true
|
||||||
|
|||||||
@@ -1,513 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
//! Use the taxi trip records from New York City dataset to bench. You can download the dataset from
|
|
||||||
//! [here](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page).
|
|
||||||
|
|
||||||
#![allow(clippy::print_stdout)]
|
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::time::Instant;
|
|
||||||
|
|
||||||
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
|
|
||||||
use arrow::datatypes::{DataType, Float64Type, Int64Type};
|
|
||||||
use arrow::record_batch::RecordBatch;
|
|
||||||
use clap::Parser;
|
|
||||||
use client::api::v1::column::Values;
|
|
||||||
use client::api::v1::{
|
|
||||||
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
|
|
||||||
};
|
|
||||||
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
|
||||||
use futures_util::TryStreamExt;
|
|
||||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
|
||||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
|
||||||
use tokio::task::JoinSet;
|
|
||||||
|
|
||||||
const CATALOG_NAME: &str = "greptime";
|
|
||||||
const SCHEMA_NAME: &str = "public";
|
|
||||||
|
|
||||||
#[derive(Parser)]
|
|
||||||
#[command(name = "NYC benchmark runner")]
|
|
||||||
struct Args {
|
|
||||||
/// Path to the dataset
|
|
||||||
#[arg(short, long)]
|
|
||||||
path: Option<String>,
|
|
||||||
|
|
||||||
/// Batch size of insert request.
|
|
||||||
#[arg(short = 's', long = "batch-size", default_value_t = 4096)]
|
|
||||||
batch_size: usize,
|
|
||||||
|
|
||||||
/// Number of client threads on write (parallel on file level)
|
|
||||||
#[arg(short = 't', long = "thread-num", default_value_t = 4)]
|
|
||||||
thread_num: usize,
|
|
||||||
|
|
||||||
/// Number of query iteration
|
|
||||||
#[arg(short = 'i', long = "iter-num", default_value_t = 3)]
|
|
||||||
iter_num: usize,
|
|
||||||
|
|
||||||
#[arg(long = "skip-write")]
|
|
||||||
skip_write: bool,
|
|
||||||
|
|
||||||
#[arg(long = "skip-read")]
|
|
||||||
skip_read: bool,
|
|
||||||
|
|
||||||
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
|
|
||||||
endpoint: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_file_list<P: AsRef<Path>>(path: P) -> Vec<PathBuf> {
|
|
||||||
std::fs::read_dir(path)
|
|
||||||
.unwrap()
|
|
||||||
.map(|dir| dir.unwrap().path().canonicalize().unwrap())
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_table_name() -> String {
|
|
||||||
format!("nyc_taxi_{}", chrono::Utc::now().timestamp())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn write_data(
|
|
||||||
table_name: &str,
|
|
||||||
batch_size: usize,
|
|
||||||
db: &Database,
|
|
||||||
path: PathBuf,
|
|
||||||
mpb: MultiProgress,
|
|
||||||
pb_style: ProgressStyle,
|
|
||||||
) -> u128 {
|
|
||||||
let file = std::fs::File::open(&path).unwrap();
|
|
||||||
let record_batch_reader_builder = ParquetRecordBatchReaderBuilder::try_new(file).unwrap();
|
|
||||||
let row_num = record_batch_reader_builder
|
|
||||||
.metadata()
|
|
||||||
.file_metadata()
|
|
||||||
.num_rows();
|
|
||||||
let record_batch_reader = record_batch_reader_builder
|
|
||||||
.with_batch_size(batch_size)
|
|
||||||
.build()
|
|
||||||
.unwrap();
|
|
||||||
let progress_bar = mpb.add(ProgressBar::new(row_num as _));
|
|
||||||
progress_bar.set_style(pb_style);
|
|
||||||
progress_bar.set_message(format!("{path:?}"));
|
|
||||||
|
|
||||||
let mut total_rpc_elapsed_ms = 0;
|
|
||||||
|
|
||||||
for record_batch in record_batch_reader {
|
|
||||||
let record_batch = record_batch.unwrap();
|
|
||||||
if !is_record_batch_full(&record_batch) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
let (columns, row_count) = convert_record_batch(record_batch);
|
|
||||||
let request = InsertRequest {
|
|
||||||
table_name: table_name.to_string(),
|
|
||||||
columns,
|
|
||||||
row_count,
|
|
||||||
};
|
|
||||||
let requests = InsertRequests {
|
|
||||||
inserts: vec![request],
|
|
||||||
};
|
|
||||||
|
|
||||||
let now = Instant::now();
|
|
||||||
db.insert(requests).await.unwrap();
|
|
||||||
let elapsed = now.elapsed();
|
|
||||||
total_rpc_elapsed_ms += elapsed.as_millis();
|
|
||||||
progress_bar.inc(row_count as _);
|
|
||||||
}
|
|
||||||
|
|
||||||
progress_bar.finish_with_message(format!("file {path:?} done in {total_rpc_elapsed_ms}ms",));
|
|
||||||
total_rpc_elapsed_ms
|
|
||||||
}
|
|
||||||
|
|
||||||
fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
|
||||||
let schema = record_batch.schema();
|
|
||||||
let fields = schema.fields();
|
|
||||||
let row_count = record_batch.num_rows();
|
|
||||||
let mut columns = vec![];
|
|
||||||
|
|
||||||
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
|
|
||||||
let (values, datatype) = build_values(array);
|
|
||||||
let semantic_type = match field.name().as_str() {
|
|
||||||
"VendorID" => SemanticType::Tag,
|
|
||||||
"tpep_pickup_datetime" => SemanticType::Timestamp,
|
|
||||||
_ => SemanticType::Field,
|
|
||||||
};
|
|
||||||
|
|
||||||
let column = Column {
|
|
||||||
column_name: field.name().clone(),
|
|
||||||
values: Some(values),
|
|
||||||
null_mask: array
|
|
||||||
.to_data()
|
|
||||||
.nulls()
|
|
||||||
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
|
|
||||||
.unwrap_or_default(),
|
|
||||||
datatype: datatype.into(),
|
|
||||||
semantic_type: semantic_type as i32,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
columns.push(column);
|
|
||||||
}
|
|
||||||
|
|
||||||
(columns, row_count as _)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
|
|
||||||
match column.data_type() {
|
|
||||||
DataType::Int64 => {
|
|
||||||
let array = column
|
|
||||||
.as_any()
|
|
||||||
.downcast_ref::<PrimitiveArray<Int64Type>>()
|
|
||||||
.unwrap();
|
|
||||||
let values = array.values();
|
|
||||||
(
|
|
||||||
Values {
|
|
||||||
i64_values: values.to_vec(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDataType::Int64,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
DataType::Float64 => {
|
|
||||||
let array = column
|
|
||||||
.as_any()
|
|
||||||
.downcast_ref::<PrimitiveArray<Float64Type>>()
|
|
||||||
.unwrap();
|
|
||||||
let values = array.values();
|
|
||||||
(
|
|
||||||
Values {
|
|
||||||
f64_values: values.to_vec(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDataType::Float64,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
DataType::Timestamp(_, _) => {
|
|
||||||
let array = column
|
|
||||||
.as_any()
|
|
||||||
.downcast_ref::<TimestampMicrosecondArray>()
|
|
||||||
.unwrap();
|
|
||||||
let values = array.values();
|
|
||||||
(
|
|
||||||
Values {
|
|
||||||
timestamp_microsecond_values: values.to_vec(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDataType::TimestampMicrosecond,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
DataType::Utf8 => {
|
|
||||||
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
|
|
||||||
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
|
|
||||||
(
|
|
||||||
Values {
|
|
||||||
string_values: values,
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDataType::String,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
_ => unimplemented!(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_record_batch_full(batch: &RecordBatch) -> bool {
|
|
||||||
batch.columns().iter().all(|col| col.null_count() == 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|
||||||
CreateTableExpr {
|
|
||||||
catalog_name: CATALOG_NAME.to_string(),
|
|
||||||
schema_name: SCHEMA_NAME.to_string(),
|
|
||||||
table_name: table_name.to_string(),
|
|
||||||
desc: String::default(),
|
|
||||||
column_defs: vec![
|
|
||||||
ColumnDef {
|
|
||||||
name: "VendorID".to_string(),
|
|
||||||
data_type: ColumnDataType::Int64 as i32,
|
|
||||||
is_nullable: true,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Tag as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "tpep_pickup_datetime".to_string(),
|
|
||||||
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
|
||||||
is_nullable: false,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Timestamp as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "tpep_dropoff_datetime".to_string(),
|
|
||||||
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
|
||||||
is_nullable: true,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "passenger_count".to_string(),
|
|
||||||
data_type: ColumnDataType::Float64 as i32,
|
|
||||||
is_nullable: true,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "trip_distance".to_string(),
|
|
||||||
data_type: ColumnDataType::Float64 as i32,
|
|
||||||
is_nullable: true,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "RatecodeID".to_string(),
|
|
||||||
data_type: ColumnDataType::Float64 as i32,
|
|
||||||
is_nullable: true,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "store_and_fwd_flag".to_string(),
|
|
||||||
data_type: ColumnDataType::String as i32,
|
|
||||||
is_nullable: true,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "PULocationID".to_string(),
|
|
||||||
data_type: ColumnDataType::Int64 as i32,
|
|
||||||
is_nullable: true,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "DOLocationID".to_string(),
|
|
||||||
data_type: ColumnDataType::Int64 as i32,
|
|
||||||
is_nullable: true,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "payment_type".to_string(),
|
|
||||||
data_type: ColumnDataType::Int64 as i32,
|
|
||||||
is_nullable: true,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "fare_amount".to_string(),
|
|
||||||
data_type: ColumnDataType::Float64 as i32,
|
|
||||||
is_nullable: true,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "extra".to_string(),
|
|
||||||
data_type: ColumnDataType::Float64 as i32,
|
|
||||||
is_nullable: true,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "mta_tax".to_string(),
|
|
||||||
data_type: ColumnDataType::Float64 as i32,
|
|
||||||
is_nullable: true,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "tip_amount".to_string(),
|
|
||||||
data_type: ColumnDataType::Float64 as i32,
|
|
||||||
is_nullable: true,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "tolls_amount".to_string(),
|
|
||||||
data_type: ColumnDataType::Float64 as i32,
|
|
||||||
is_nullable: true,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "improvement_surcharge".to_string(),
|
|
||||||
data_type: ColumnDataType::Float64 as i32,
|
|
||||||
is_nullable: true,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "total_amount".to_string(),
|
|
||||||
data_type: ColumnDataType::Float64 as i32,
|
|
||||||
is_nullable: true,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "congestion_surcharge".to_string(),
|
|
||||||
data_type: ColumnDataType::Float64 as i32,
|
|
||||||
is_nullable: true,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "airport_fee".to_string(),
|
|
||||||
data_type: ColumnDataType::Float64 as i32,
|
|
||||||
is_nullable: true,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
],
|
|
||||||
time_index: "tpep_pickup_datetime".to_string(),
|
|
||||||
primary_keys: vec!["VendorID".to_string()],
|
|
||||||
create_if_not_exists: true,
|
|
||||||
table_options: Default::default(),
|
|
||||||
table_id: None,
|
|
||||||
engine: "mito".to_string(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn query_set(table_name: &str) -> HashMap<String, String> {
|
|
||||||
HashMap::from([
|
|
||||||
(
|
|
||||||
"count_all".to_string(),
|
|
||||||
format!("SELECT COUNT(*) FROM {table_name};"),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"fare_amt_by_passenger".to_string(),
|
|
||||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {table_name} GROUP BY passenger_count"),
|
|
||||||
)
|
|
||||||
])
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn do_write(args: &Args, db: &Database, table_name: &str) {
|
|
||||||
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
|
|
||||||
let mut write_jobs = JoinSet::new();
|
|
||||||
|
|
||||||
let create_table_result = db.create(create_table_expr(table_name)).await;
|
|
||||||
println!("Create table result: {create_table_result:?}");
|
|
||||||
|
|
||||||
let progress_bar_style = ProgressStyle::with_template(
|
|
||||||
"[{elapsed_precise}] {bar:60.cyan/blue} {pos:>7}/{len:7} {msg}",
|
|
||||||
)
|
|
||||||
.unwrap()
|
|
||||||
.progress_chars("##-");
|
|
||||||
let multi_progress_bar = MultiProgress::new();
|
|
||||||
let file_progress = multi_progress_bar.add(ProgressBar::new(file_list.len() as _));
|
|
||||||
file_progress.inc(0);
|
|
||||||
|
|
||||||
let batch_size = args.batch_size;
|
|
||||||
for _ in 0..args.thread_num {
|
|
||||||
if let Some(path) = file_list.pop() {
|
|
||||||
let db = db.clone();
|
|
||||||
let mpb = multi_progress_bar.clone();
|
|
||||||
let pb_style = progress_bar_style.clone();
|
|
||||||
let table_name = table_name.to_string();
|
|
||||||
let _ = write_jobs.spawn(async move {
|
|
||||||
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
while write_jobs.join_next().await.is_some() {
|
|
||||||
file_progress.inc(1);
|
|
||||||
if let Some(path) = file_list.pop() {
|
|
||||||
let db = db.clone();
|
|
||||||
let mpb = multi_progress_bar.clone();
|
|
||||||
let pb_style = progress_bar_style.clone();
|
|
||||||
let table_name = table_name.to_string();
|
|
||||||
let _ = write_jobs.spawn(async move {
|
|
||||||
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
|
|
||||||
for (query_name, query) in query_set(table_name) {
|
|
||||||
println!("Running query: {query}");
|
|
||||||
for i in 0..num_iter {
|
|
||||||
let now = Instant::now();
|
|
||||||
let res = db.sql(&query).await.unwrap();
|
|
||||||
match res.data {
|
|
||||||
OutputData::AffectedRows(_) | OutputData::RecordBatches(_) => (),
|
|
||||||
OutputData::Stream(stream) => {
|
|
||||||
stream.try_collect::<Vec<_>>().await.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let elapsed = now.elapsed();
|
|
||||||
println!(
|
|
||||||
"query {}, iteration {}: {}ms",
|
|
||||||
query_name,
|
|
||||||
i,
|
|
||||||
elapsed.as_millis(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let args = Args::parse();
|
|
||||||
|
|
||||||
tokio::runtime::Builder::new_multi_thread()
|
|
||||||
.worker_threads(args.thread_num)
|
|
||||||
.enable_all()
|
|
||||||
.build()
|
|
||||||
.unwrap()
|
|
||||||
.block_on(async {
|
|
||||||
let client = Client::with_urls(vec![&args.endpoint]);
|
|
||||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
|
||||||
let table_name = new_table_name();
|
|
||||||
|
|
||||||
if !args.skip_write {
|
|
||||||
do_write(&args, &db, &table_name).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
if !args.skip_read {
|
|
||||||
do_query(args.iter_num, &db, &table_name).await;
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -33,9 +33,7 @@
|
|||||||
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
||||||
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||||
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
||||||
| `opentsdb.enable` | Bool | `true` | Whether to enable |
|
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||||
| `opentsdb.addr` | String | `127.0.0.1:4242` | OpenTSDB telnet API server address. |
|
|
||||||
| `opentsdb.runtime_size` | Integer | `2` | The number of server worker threads. |
|
|
||||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||||
@@ -168,9 +166,7 @@
|
|||||||
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
||||||
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||||
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
||||||
| `opentsdb.enable` | Bool | `true` | Whether to enable |
|
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||||
| `opentsdb.addr` | String | `127.0.0.1:4242` | OpenTSDB telnet API server address. |
|
|
||||||
| `opentsdb.runtime_size` | Integer | `2` | The number of server worker threads. |
|
|
||||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||||
|
|||||||
@@ -88,12 +88,8 @@ watch = false
|
|||||||
|
|
||||||
## OpenTSDB protocol options.
|
## OpenTSDB protocol options.
|
||||||
[opentsdb]
|
[opentsdb]
|
||||||
## Whether to enable
|
## Whether to enable OpenTSDB put in HTTP API.
|
||||||
enable = true
|
enable = true
|
||||||
## OpenTSDB telnet API server address.
|
|
||||||
addr = "127.0.0.1:4242"
|
|
||||||
## The number of server worker threads.
|
|
||||||
runtime_size = 2
|
|
||||||
|
|
||||||
## InfluxDB protocol options.
|
## InfluxDB protocol options.
|
||||||
[influxdb]
|
[influxdb]
|
||||||
|
|||||||
@@ -83,12 +83,8 @@ watch = false
|
|||||||
|
|
||||||
## OpenTSDB protocol options.
|
## OpenTSDB protocol options.
|
||||||
[opentsdb]
|
[opentsdb]
|
||||||
## Whether to enable
|
## Whether to enable OpenTSDB put in HTTP API.
|
||||||
enable = true
|
enable = true
|
||||||
## OpenTSDB telnet API server address.
|
|
||||||
addr = "127.0.0.1:4242"
|
|
||||||
## The number of server worker threads.
|
|
||||||
runtime_size = 2
|
|
||||||
|
|
||||||
## InfluxDB protocol options.
|
## InfluxDB protocol options.
|
||||||
[influxdb]
|
[influxdb]
|
||||||
|
|||||||
2
cyborg/.gitignore
vendored
Normal file
2
cyborg/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
node_modules
|
||||||
|
.env
|
||||||
79
cyborg/bin/check-pull-request.ts
Normal file
79
cyborg/bin/check-pull-request.ts
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2023 Greptime Team
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import * as core from '@actions/core'
|
||||||
|
import {handleError, obtainClient} from "@/common";
|
||||||
|
import {context} from "@actions/github";
|
||||||
|
import {PullRequestEvent} from "@octokit/webhooks-types";
|
||||||
|
import {Options, sync as conventionalCommitsParser} from 'conventional-commits-parser';
|
||||||
|
import conventionalCommitTypes from 'conventional-commit-types';
|
||||||
|
import _ from "lodash";
|
||||||
|
|
||||||
|
const defaultTypes = Object.keys(conventionalCommitTypes.types)
|
||||||
|
const breakingChangeLabel = "breaking-change"
|
||||||
|
|
||||||
|
// These options are copied from [1].
|
||||||
|
// [1] https://github.com/conventional-changelog/conventional-changelog/blob/3f60b464/packages/conventional-changelog-conventionalcommits/src/parser.js
|
||||||
|
export const parserOpts: Options = {
|
||||||
|
headerPattern: /^(\w*)(?:\((.*)\))?!?: (.*)$/,
|
||||||
|
breakingHeaderPattern: /^(\w*)(?:\((.*)\))?!: (.*)$/,
|
||||||
|
headerCorrespondence: [
|
||||||
|
'type',
|
||||||
|
'scope',
|
||||||
|
'subject'
|
||||||
|
],
|
||||||
|
noteKeywords: ['BREAKING CHANGE', 'BREAKING-CHANGE'],
|
||||||
|
revertPattern: /^(?:Revert|revert:)\s"?([\s\S]+?)"?\s*This reverts commit (\w*)\./i,
|
||||||
|
revertCorrespondence: ['header', 'hash'],
|
||||||
|
issuePrefixes: ['#']
|
||||||
|
}
|
||||||
|
|
||||||
|
async function main() {
|
||||||
|
if (!context.payload.pull_request) {
|
||||||
|
throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const client = obtainClient("GITHUB_TOKEN")
|
||||||
|
const payload = context.payload as PullRequestEvent
|
||||||
|
const { owner, repo, number } = {
|
||||||
|
owner: payload.pull_request.base.user.login,
|
||||||
|
repo: payload.pull_request.base.repo.name,
|
||||||
|
number: payload.pull_request.number,
|
||||||
|
}
|
||||||
|
const { data: pull_request } = await client.rest.pulls.get({
|
||||||
|
owner, repo, pull_number: number,
|
||||||
|
})
|
||||||
|
|
||||||
|
const commit = conventionalCommitsParser(pull_request.title, parserOpts)
|
||||||
|
core.info(`Receive commit: ${JSON.stringify(commit)}`)
|
||||||
|
|
||||||
|
if (!commit.type) {
|
||||||
|
throw Error(`Malformed commit: ${JSON.stringify(commit)}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!defaultTypes.includes(commit.type)) {
|
||||||
|
throw Error(`Unexpected type ${JSON.stringify(commit.type)} of commit: ${JSON.stringify(commit)}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const breakingChanges = _.filter(commit.notes, _.matches({ title: 'BREAKING CHANGE'}))
|
||||||
|
if (breakingChanges.length > 0) {
|
||||||
|
await client.rest.issues.addLabels({
|
||||||
|
owner, repo, issue_number: number, labels: [breakingChangeLabel]
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
main().catch(handleError)
|
||||||
73
cyborg/bin/schedule.ts
Normal file
73
cyborg/bin/schedule.ts
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2023 Greptime Team
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import * as core from '@actions/core'
|
||||||
|
import {GitHub} from "@actions/github/lib/utils"
|
||||||
|
import _ from "lodash";
|
||||||
|
import dayjs from "dayjs";
|
||||||
|
import {handleError, obtainClient} from "@/common";
|
||||||
|
|
||||||
|
async function main() {
|
||||||
|
const client = obtainClient("GITHUB_TOKEN")
|
||||||
|
await unassign(client)
|
||||||
|
}
|
||||||
|
|
||||||
|
async function unassign(client: InstanceType<typeof GitHub>) {
|
||||||
|
const owner = "GreptimeTeam"
|
||||||
|
const repo = "greptimedb"
|
||||||
|
|
||||||
|
const dt = dayjs().subtract(14, 'days');
|
||||||
|
core.info(`Open issues updated before ${dt.toISOString()} will be considered stale.`)
|
||||||
|
|
||||||
|
const members = await client.paginate(client.rest.repos.listCollaborators, {
|
||||||
|
owner,
|
||||||
|
repo,
|
||||||
|
permission: "push",
|
||||||
|
per_page: 100
|
||||||
|
}).then((members) => members.map((member) => member.login))
|
||||||
|
core.info(`Members (${members.length}): ${members}`)
|
||||||
|
|
||||||
|
const issues = await client.paginate(client.rest.issues.listForRepo, {
|
||||||
|
owner,
|
||||||
|
repo,
|
||||||
|
state: "open",
|
||||||
|
sort: "created",
|
||||||
|
direction: "asc",
|
||||||
|
per_page: 100
|
||||||
|
})
|
||||||
|
for (const issue of issues) {
|
||||||
|
let assignees = [];
|
||||||
|
if (issue.assignee) {
|
||||||
|
assignees.push(issue.assignee.login)
|
||||||
|
}
|
||||||
|
for (const assignee of issue.assignees) {
|
||||||
|
assignees.push(assignee.login)
|
||||||
|
}
|
||||||
|
assignees = _.uniq(assignees)
|
||||||
|
assignees = _.difference(assignees, members)
|
||||||
|
if (assignees.length > 0 && dayjs(issue.updated_at).isBefore(dt)) {
|
||||||
|
core.info(`Assignees ${assignees} of issue ${issue.number} will be unassigned.`)
|
||||||
|
await client.rest.issues.removeAssignees({
|
||||||
|
owner,
|
||||||
|
repo,
|
||||||
|
issue_number: issue.number,
|
||||||
|
assignees: assignees,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
main().catch(handleError)
|
||||||
25
cyborg/package.json
Normal file
25
cyborg/package.json
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
{
|
||||||
|
"name": "cyborg",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"description": "Automator for GreptimeDB Repository Management",
|
||||||
|
"private": true,
|
||||||
|
"packageManager": "pnpm@8.15.5",
|
||||||
|
"dependencies": {
|
||||||
|
"@actions/core": "^1.10.1",
|
||||||
|
"@actions/github": "^6.0.0",
|
||||||
|
"@octokit/webhooks-types": "^7.5.1",
|
||||||
|
"conventional-commit-types": "^3.0.0",
|
||||||
|
"conventional-commits-parser": "^5.0.0",
|
||||||
|
"dayjs": "^1.11.11",
|
||||||
|
"dotenv": "^16.4.5",
|
||||||
|
"lodash": "^4.17.21"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"@types/conventional-commits-parser": "^5.0.0",
|
||||||
|
"@types/lodash": "^4.17.0",
|
||||||
|
"@types/node": "^20.12.7",
|
||||||
|
"tsconfig-paths": "^4.2.0",
|
||||||
|
"tsx": "^4.8.2",
|
||||||
|
"typescript": "^5.4.5"
|
||||||
|
}
|
||||||
|
}
|
||||||
602
cyborg/pnpm-lock.yaml
generated
Normal file
602
cyborg/pnpm-lock.yaml
generated
Normal file
@@ -0,0 +1,602 @@
|
|||||||
|
lockfileVersion: '6.0'
|
||||||
|
|
||||||
|
settings:
|
||||||
|
autoInstallPeers: true
|
||||||
|
excludeLinksFromLockfile: false
|
||||||
|
|
||||||
|
dependencies:
|
||||||
|
'@actions/core':
|
||||||
|
specifier: ^1.10.1
|
||||||
|
version: 1.10.1
|
||||||
|
'@actions/github':
|
||||||
|
specifier: ^6.0.0
|
||||||
|
version: 6.0.0
|
||||||
|
'@octokit/webhooks-types':
|
||||||
|
specifier: ^7.5.1
|
||||||
|
version: 7.5.1
|
||||||
|
conventional-commit-types:
|
||||||
|
specifier: ^3.0.0
|
||||||
|
version: 3.0.0
|
||||||
|
conventional-commits-parser:
|
||||||
|
specifier: ^5.0.0
|
||||||
|
version: 5.0.0
|
||||||
|
dayjs:
|
||||||
|
specifier: ^1.11.11
|
||||||
|
version: 1.11.11
|
||||||
|
dotenv:
|
||||||
|
specifier: ^16.4.5
|
||||||
|
version: 16.4.5
|
||||||
|
lodash:
|
||||||
|
specifier: ^4.17.21
|
||||||
|
version: 4.17.21
|
||||||
|
|
||||||
|
devDependencies:
|
||||||
|
'@types/conventional-commits-parser':
|
||||||
|
specifier: ^5.0.0
|
||||||
|
version: 5.0.0
|
||||||
|
'@types/lodash':
|
||||||
|
specifier: ^4.17.0
|
||||||
|
version: 4.17.0
|
||||||
|
'@types/node':
|
||||||
|
specifier: ^20.12.7
|
||||||
|
version: 20.12.7
|
||||||
|
tsconfig-paths:
|
||||||
|
specifier: ^4.2.0
|
||||||
|
version: 4.2.0
|
||||||
|
tsx:
|
||||||
|
specifier: ^4.8.2
|
||||||
|
version: 4.8.2
|
||||||
|
typescript:
|
||||||
|
specifier: ^5.4.5
|
||||||
|
version: 5.4.5
|
||||||
|
|
||||||
|
packages:
|
||||||
|
|
||||||
|
/@actions/core@1.10.1:
|
||||||
|
resolution: {integrity: sha512-3lBR9EDAY+iYIpTnTIXmWcNbX3T2kCkAEQGIQx4NVQ0575nk2k3GRZDTPQG+vVtS2izSLmINlxXf0uLtnrTP+g==}
|
||||||
|
dependencies:
|
||||||
|
'@actions/http-client': 2.2.1
|
||||||
|
uuid: 8.3.2
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/@actions/github@6.0.0:
|
||||||
|
resolution: {integrity: sha512-alScpSVnYmjNEXboZjarjukQEzgCRmjMv6Xj47fsdnqGS73bjJNDpiiXmp8jr0UZLdUB6d9jW63IcmddUP+l0g==}
|
||||||
|
dependencies:
|
||||||
|
'@actions/http-client': 2.2.1
|
||||||
|
'@octokit/core': 5.2.0
|
||||||
|
'@octokit/plugin-paginate-rest': 9.2.1(@octokit/core@5.2.0)
|
||||||
|
'@octokit/plugin-rest-endpoint-methods': 10.4.1(@octokit/core@5.2.0)
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/@actions/http-client@2.2.1:
|
||||||
|
resolution: {integrity: sha512-KhC/cZsq7f8I4LfZSJKgCvEwfkE8o1538VoBeoGzokVLLnbFDEAdFD3UhoMklxo2un9NJVBdANOresx7vTHlHw==}
|
||||||
|
dependencies:
|
||||||
|
tunnel: 0.0.6
|
||||||
|
undici: 5.28.4
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/@esbuild/aix-ppc64@0.20.2:
|
||||||
|
resolution: {integrity: sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [ppc64]
|
||||||
|
os: [aix]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/android-arm64@0.20.2:
|
||||||
|
resolution: {integrity: sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [arm64]
|
||||||
|
os: [android]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/android-arm@0.20.2:
|
||||||
|
resolution: {integrity: sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [arm]
|
||||||
|
os: [android]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/android-x64@0.20.2:
|
||||||
|
resolution: {integrity: sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [x64]
|
||||||
|
os: [android]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/darwin-arm64@0.20.2:
|
||||||
|
resolution: {integrity: sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [arm64]
|
||||||
|
os: [darwin]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/darwin-x64@0.20.2:
|
||||||
|
resolution: {integrity: sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [x64]
|
||||||
|
os: [darwin]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/freebsd-arm64@0.20.2:
|
||||||
|
resolution: {integrity: sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [arm64]
|
||||||
|
os: [freebsd]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/freebsd-x64@0.20.2:
|
||||||
|
resolution: {integrity: sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [x64]
|
||||||
|
os: [freebsd]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/linux-arm64@0.20.2:
|
||||||
|
resolution: {integrity: sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [arm64]
|
||||||
|
os: [linux]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/linux-arm@0.20.2:
|
||||||
|
resolution: {integrity: sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [arm]
|
||||||
|
os: [linux]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/linux-ia32@0.20.2:
|
||||||
|
resolution: {integrity: sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [ia32]
|
||||||
|
os: [linux]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/linux-loong64@0.20.2:
|
||||||
|
resolution: {integrity: sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [loong64]
|
||||||
|
os: [linux]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/linux-mips64el@0.20.2:
|
||||||
|
resolution: {integrity: sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [mips64el]
|
||||||
|
os: [linux]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/linux-ppc64@0.20.2:
|
||||||
|
resolution: {integrity: sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [ppc64]
|
||||||
|
os: [linux]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/linux-riscv64@0.20.2:
|
||||||
|
resolution: {integrity: sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [riscv64]
|
||||||
|
os: [linux]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/linux-s390x@0.20.2:
|
||||||
|
resolution: {integrity: sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [s390x]
|
||||||
|
os: [linux]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/linux-x64@0.20.2:
|
||||||
|
resolution: {integrity: sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [x64]
|
||||||
|
os: [linux]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/netbsd-x64@0.20.2:
|
||||||
|
resolution: {integrity: sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [x64]
|
||||||
|
os: [netbsd]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/openbsd-x64@0.20.2:
|
||||||
|
resolution: {integrity: sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [x64]
|
||||||
|
os: [openbsd]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/sunos-x64@0.20.2:
|
||||||
|
resolution: {integrity: sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [x64]
|
||||||
|
os: [sunos]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/win32-arm64@0.20.2:
|
||||||
|
resolution: {integrity: sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [arm64]
|
||||||
|
os: [win32]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/win32-ia32@0.20.2:
|
||||||
|
resolution: {integrity: sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [ia32]
|
||||||
|
os: [win32]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@esbuild/win32-x64@0.20.2:
|
||||||
|
resolution: {integrity: sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
cpu: [x64]
|
||||||
|
os: [win32]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/@fastify/busboy@2.1.1:
|
||||||
|
resolution: {integrity: sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==}
|
||||||
|
engines: {node: '>=14'}
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/@octokit/auth-token@4.0.0:
|
||||||
|
resolution: {integrity: sha512-tY/msAuJo6ARbK6SPIxZrPBms3xPbfwBrulZe0Wtr/DIY9lje2HeV1uoebShn6mx7SjCHif6EjMvoREj+gZ+SA==}
|
||||||
|
engines: {node: '>= 18'}
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/@octokit/core@5.2.0:
|
||||||
|
resolution: {integrity: sha512-1LFfa/qnMQvEOAdzlQymH0ulepxbxnCYAKJZfMci/5XJyIHWgEYnDmgnKakbTh7CH2tFQ5O60oYDvns4i9RAIg==}
|
||||||
|
engines: {node: '>= 18'}
|
||||||
|
dependencies:
|
||||||
|
'@octokit/auth-token': 4.0.0
|
||||||
|
'@octokit/graphql': 7.1.0
|
||||||
|
'@octokit/request': 8.4.0
|
||||||
|
'@octokit/request-error': 5.1.0
|
||||||
|
'@octokit/types': 13.5.0
|
||||||
|
before-after-hook: 2.2.3
|
||||||
|
universal-user-agent: 6.0.1
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/@octokit/endpoint@9.0.5:
|
||||||
|
resolution: {integrity: sha512-ekqR4/+PCLkEBF6qgj8WqJfvDq65RH85OAgrtnVp1mSxaXF03u2xW/hUdweGS5654IlC0wkNYC18Z50tSYTAFw==}
|
||||||
|
engines: {node: '>= 18'}
|
||||||
|
dependencies:
|
||||||
|
'@octokit/types': 13.5.0
|
||||||
|
universal-user-agent: 6.0.1
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/@octokit/graphql@7.1.0:
|
||||||
|
resolution: {integrity: sha512-r+oZUH7aMFui1ypZnAvZmn0KSqAUgE1/tUXIWaqUCa1758ts/Jio84GZuzsvUkme98kv0WFY8//n0J1Z+vsIsQ==}
|
||||||
|
engines: {node: '>= 18'}
|
||||||
|
dependencies:
|
||||||
|
'@octokit/request': 8.4.0
|
||||||
|
'@octokit/types': 13.5.0
|
||||||
|
universal-user-agent: 6.0.1
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/@octokit/openapi-types@20.0.0:
|
||||||
|
resolution: {integrity: sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA==}
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/@octokit/openapi-types@22.2.0:
|
||||||
|
resolution: {integrity: sha512-QBhVjcUa9W7Wwhm6DBFu6ZZ+1/t/oYxqc2tp81Pi41YNuJinbFRx8B133qVOrAaBbF7D/m0Et6f9/pZt9Rc+tg==}
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/@octokit/plugin-paginate-rest@9.2.1(@octokit/core@5.2.0):
|
||||||
|
resolution: {integrity: sha512-wfGhE/TAkXZRLjksFXuDZdmGnJQHvtU/joFQdweXUgzo1XwvBCD4o4+75NtFfjfLK5IwLf9vHTfSiU3sLRYpRw==}
|
||||||
|
engines: {node: '>= 18'}
|
||||||
|
peerDependencies:
|
||||||
|
'@octokit/core': '5'
|
||||||
|
dependencies:
|
||||||
|
'@octokit/core': 5.2.0
|
||||||
|
'@octokit/types': 12.6.0
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/@octokit/plugin-rest-endpoint-methods@10.4.1(@octokit/core@5.2.0):
|
||||||
|
resolution: {integrity: sha512-xV1b+ceKV9KytQe3zCVqjg+8GTGfDYwaT1ATU5isiUyVtlVAO3HNdzpS4sr4GBx4hxQ46s7ITtZrAsxG22+rVg==}
|
||||||
|
engines: {node: '>= 18'}
|
||||||
|
peerDependencies:
|
||||||
|
'@octokit/core': '5'
|
||||||
|
dependencies:
|
||||||
|
'@octokit/core': 5.2.0
|
||||||
|
'@octokit/types': 12.6.0
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/@octokit/request-error@5.1.0:
|
||||||
|
resolution: {integrity: sha512-GETXfE05J0+7H2STzekpKObFe765O5dlAKUTLNGeH+x47z7JjXHfsHKo5z21D/o/IOZTUEI6nyWyR+bZVP/n5Q==}
|
||||||
|
engines: {node: '>= 18'}
|
||||||
|
dependencies:
|
||||||
|
'@octokit/types': 13.5.0
|
||||||
|
deprecation: 2.3.1
|
||||||
|
once: 1.4.0
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/@octokit/request@8.4.0:
|
||||||
|
resolution: {integrity: sha512-9Bb014e+m2TgBeEJGEbdplMVWwPmL1FPtggHQRkV+WVsMggPtEkLKPlcVYm/o8xKLkpJ7B+6N8WfQMtDLX2Dpw==}
|
||||||
|
engines: {node: '>= 18'}
|
||||||
|
dependencies:
|
||||||
|
'@octokit/endpoint': 9.0.5
|
||||||
|
'@octokit/request-error': 5.1.0
|
||||||
|
'@octokit/types': 13.5.0
|
||||||
|
universal-user-agent: 6.0.1
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/@octokit/types@12.6.0:
|
||||||
|
resolution: {integrity: sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw==}
|
||||||
|
dependencies:
|
||||||
|
'@octokit/openapi-types': 20.0.0
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/@octokit/types@13.5.0:
|
||||||
|
resolution: {integrity: sha512-HdqWTf5Z3qwDVlzCrP8UJquMwunpDiMPt5er+QjGzL4hqr/vBVY/MauQgS1xWxCDT1oMx1EULyqxncdCY/NVSQ==}
|
||||||
|
dependencies:
|
||||||
|
'@octokit/openapi-types': 22.2.0
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/@octokit/webhooks-types@7.5.1:
|
||||||
|
resolution: {integrity: sha512-1dozxWEP8lKGbtEu7HkRbK1F/nIPuJXNfT0gd96y6d3LcHZTtRtlf8xz3nicSJfesADxJyDh+mWBOsdLkqgzYw==}
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/@types/conventional-commits-parser@5.0.0:
|
||||||
|
resolution: {integrity: sha512-loB369iXNmAZglwWATL+WRe+CRMmmBPtpolYzIebFaX4YA3x+BEfLqhUAV9WanycKI3TG1IMr5bMJDajDKLlUQ==}
|
||||||
|
dependencies:
|
||||||
|
'@types/node': 20.12.7
|
||||||
|
dev: true
|
||||||
|
|
||||||
|
/@types/lodash@4.17.0:
|
||||||
|
resolution: {integrity: sha512-t7dhREVv6dbNj0q17X12j7yDG4bD/DHYX7o5/DbDxobP0HnGPgpRz2Ej77aL7TZT3DSw13fqUTj8J4mMnqa7WA==}
|
||||||
|
dev: true
|
||||||
|
|
||||||
|
/@types/node@20.12.7:
|
||||||
|
resolution: {integrity: sha512-wq0cICSkRLVaf3UGLMGItu/PtdY7oaXaI/RVU+xliKVOtRna3PRY57ZDfztpDL0n11vfymMUnXv8QwYCO7L1wg==}
|
||||||
|
dependencies:
|
||||||
|
undici-types: 5.26.5
|
||||||
|
dev: true
|
||||||
|
|
||||||
|
/JSONStream@1.3.5:
|
||||||
|
resolution: {integrity: sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==}
|
||||||
|
hasBin: true
|
||||||
|
dependencies:
|
||||||
|
jsonparse: 1.3.1
|
||||||
|
through: 2.3.8
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/before-after-hook@2.2.3:
|
||||||
|
resolution: {integrity: sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==}
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/conventional-commit-types@3.0.0:
|
||||||
|
resolution: {integrity: sha512-SmmCYnOniSsAa9GqWOeLqc179lfr5TRu5b4QFDkbsrJ5TZjPJx85wtOr3zn+1dbeNiXDKGPbZ72IKbPhLXh/Lg==}
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/conventional-commits-parser@5.0.0:
|
||||||
|
resolution: {integrity: sha512-ZPMl0ZJbw74iS9LuX9YIAiW8pfM5p3yh2o/NbXHbkFuZzY5jvdi5jFycEOkmBW5H5I7nA+D6f3UcsCLP2vvSEA==}
|
||||||
|
engines: {node: '>=16'}
|
||||||
|
hasBin: true
|
||||||
|
dependencies:
|
||||||
|
JSONStream: 1.3.5
|
||||||
|
is-text-path: 2.0.0
|
||||||
|
meow: 12.1.1
|
||||||
|
split2: 4.2.0
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/dayjs@1.11.11:
|
||||||
|
resolution: {integrity: sha512-okzr3f11N6WuqYtZSvm+F776mB41wRZMhKP+hc34YdW+KmtYYK9iqvHSwo2k9FEH3fhGXvOPV6yz2IcSrfRUDg==}
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/deprecation@2.3.1:
|
||||||
|
resolution: {integrity: sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==}
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/dotenv@16.4.5:
|
||||||
|
resolution: {integrity: sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/esbuild@0.20.2:
|
||||||
|
resolution: {integrity: sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==}
|
||||||
|
engines: {node: '>=12'}
|
||||||
|
hasBin: true
|
||||||
|
requiresBuild: true
|
||||||
|
optionalDependencies:
|
||||||
|
'@esbuild/aix-ppc64': 0.20.2
|
||||||
|
'@esbuild/android-arm': 0.20.2
|
||||||
|
'@esbuild/android-arm64': 0.20.2
|
||||||
|
'@esbuild/android-x64': 0.20.2
|
||||||
|
'@esbuild/darwin-arm64': 0.20.2
|
||||||
|
'@esbuild/darwin-x64': 0.20.2
|
||||||
|
'@esbuild/freebsd-arm64': 0.20.2
|
||||||
|
'@esbuild/freebsd-x64': 0.20.2
|
||||||
|
'@esbuild/linux-arm': 0.20.2
|
||||||
|
'@esbuild/linux-arm64': 0.20.2
|
||||||
|
'@esbuild/linux-ia32': 0.20.2
|
||||||
|
'@esbuild/linux-loong64': 0.20.2
|
||||||
|
'@esbuild/linux-mips64el': 0.20.2
|
||||||
|
'@esbuild/linux-ppc64': 0.20.2
|
||||||
|
'@esbuild/linux-riscv64': 0.20.2
|
||||||
|
'@esbuild/linux-s390x': 0.20.2
|
||||||
|
'@esbuild/linux-x64': 0.20.2
|
||||||
|
'@esbuild/netbsd-x64': 0.20.2
|
||||||
|
'@esbuild/openbsd-x64': 0.20.2
|
||||||
|
'@esbuild/sunos-x64': 0.20.2
|
||||||
|
'@esbuild/win32-arm64': 0.20.2
|
||||||
|
'@esbuild/win32-ia32': 0.20.2
|
||||||
|
'@esbuild/win32-x64': 0.20.2
|
||||||
|
dev: true
|
||||||
|
|
||||||
|
/fsevents@2.3.3:
|
||||||
|
resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
|
||||||
|
engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
|
||||||
|
os: [darwin]
|
||||||
|
requiresBuild: true
|
||||||
|
dev: true
|
||||||
|
optional: true
|
||||||
|
|
||||||
|
/get-tsconfig@4.7.3:
|
||||||
|
resolution: {integrity: sha512-ZvkrzoUA0PQZM6fy6+/Hce561s+faD1rsNwhnO5FelNjyy7EMGJ3Rz1AQ8GYDWjhRs/7dBLOEJvhK8MiEJOAFg==}
|
||||||
|
dependencies:
|
||||||
|
resolve-pkg-maps: 1.0.0
|
||||||
|
dev: true
|
||||||
|
|
||||||
|
/is-text-path@2.0.0:
|
||||||
|
resolution: {integrity: sha512-+oDTluR6WEjdXEJMnC2z6A4FRwFoYuvShVVEGsS7ewc0UTi2QtAKMDJuL4BDEVt+5T7MjFo12RP8ghOM75oKJw==}
|
||||||
|
engines: {node: '>=8'}
|
||||||
|
dependencies:
|
||||||
|
text-extensions: 2.4.0
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/json5@2.2.3:
|
||||||
|
resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==}
|
||||||
|
engines: {node: '>=6'}
|
||||||
|
hasBin: true
|
||||||
|
dev: true
|
||||||
|
|
||||||
|
/jsonparse@1.3.1:
|
||||||
|
resolution: {integrity: sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==}
|
||||||
|
engines: {'0': node >= 0.2.0}
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/lodash@4.17.21:
|
||||||
|
resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==}
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/meow@12.1.1:
|
||||||
|
resolution: {integrity: sha512-BhXM0Au22RwUneMPwSCnyhTOizdWoIEPU9sp0Aqa1PnDMR5Wv2FGXYDjuzJEIX+Eo2Rb8xuYe5jrnm5QowQFkw==}
|
||||||
|
engines: {node: '>=16.10'}
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/minimist@1.2.8:
|
||||||
|
resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
|
||||||
|
dev: true
|
||||||
|
|
||||||
|
/once@1.4.0:
|
||||||
|
resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
|
||||||
|
dependencies:
|
||||||
|
wrappy: 1.0.2
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/resolve-pkg-maps@1.0.0:
|
||||||
|
resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==}
|
||||||
|
dev: true
|
||||||
|
|
||||||
|
/split2@4.2.0:
|
||||||
|
resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==}
|
||||||
|
engines: {node: '>= 10.x'}
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/strip-bom@3.0.0:
|
||||||
|
resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==}
|
||||||
|
engines: {node: '>=4'}
|
||||||
|
dev: true
|
||||||
|
|
||||||
|
/text-extensions@2.4.0:
|
||||||
|
resolution: {integrity: sha512-te/NtwBwfiNRLf9Ijqx3T0nlqZiQ2XrrtBvu+cLL8ZRrGkO0NHTug8MYFKyoSrv/sHTaSKfilUkizV6XhxMJ3g==}
|
||||||
|
engines: {node: '>=8'}
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/through@2.3.8:
|
||||||
|
resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==}
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/tsconfig-paths@4.2.0:
|
||||||
|
resolution: {integrity: sha512-NoZ4roiN7LnbKn9QqE1amc9DJfzvZXxF4xDavcOWt1BPkdx+m+0gJuPM+S0vCe7zTJMYUP0R8pO2XMr+Y8oLIg==}
|
||||||
|
engines: {node: '>=6'}
|
||||||
|
dependencies:
|
||||||
|
json5: 2.2.3
|
||||||
|
minimist: 1.2.8
|
||||||
|
strip-bom: 3.0.0
|
||||||
|
dev: true
|
||||||
|
|
||||||
|
/tsx@4.8.2:
|
||||||
|
resolution: {integrity: sha512-hmmzS4U4mdy1Cnzpl/NQiPUC2k34EcNSTZYVJThYKhdqTwuBeF+4cG9KUK/PFQ7KHaAaYwqlb7QfmsE2nuj+WA==}
|
||||||
|
engines: {node: '>=18.0.0'}
|
||||||
|
hasBin: true
|
||||||
|
dependencies:
|
||||||
|
esbuild: 0.20.2
|
||||||
|
get-tsconfig: 4.7.3
|
||||||
|
optionalDependencies:
|
||||||
|
fsevents: 2.3.3
|
||||||
|
dev: true
|
||||||
|
|
||||||
|
/tunnel@0.0.6:
|
||||||
|
resolution: {integrity: sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==}
|
||||||
|
engines: {node: '>=0.6.11 <=0.7.0 || >=0.7.3'}
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/typescript@5.4.5:
|
||||||
|
resolution: {integrity: sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==}
|
||||||
|
engines: {node: '>=14.17'}
|
||||||
|
hasBin: true
|
||||||
|
dev: true
|
||||||
|
|
||||||
|
/undici-types@5.26.5:
|
||||||
|
resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==}
|
||||||
|
dev: true
|
||||||
|
|
||||||
|
/undici@5.28.4:
|
||||||
|
resolution: {integrity: sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==}
|
||||||
|
engines: {node: '>=14.0'}
|
||||||
|
dependencies:
|
||||||
|
'@fastify/busboy': 2.1.1
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/universal-user-agent@6.0.1:
|
||||||
|
resolution: {integrity: sha512-yCzhz6FN2wU1NiiQRogkTQszlQSlpWaw8SvVegAc+bDxbzHgh1vX8uIe8OYyMH6DwH+sdTJsgMl36+mSMdRJIQ==}
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/uuid@8.3.2:
|
||||||
|
resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==}
|
||||||
|
hasBin: true
|
||||||
|
dev: false
|
||||||
|
|
||||||
|
/wrappy@1.0.2:
|
||||||
|
resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
|
||||||
|
dev: false
|
||||||
30
cyborg/src/common.ts
Normal file
30
cyborg/src/common.ts
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2023 Greptime Team
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import * as core from "@actions/core";
|
||||||
|
import {config} from "dotenv";
|
||||||
|
import {getOctokit} from "@actions/github";
|
||||||
|
import {GitHub} from "@actions/github/lib/utils";
|
||||||
|
|
||||||
|
export function handleError(err: any): void {
|
||||||
|
console.error(err)
|
||||||
|
core.setFailed(`Unhandled error: ${err}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
export function obtainClient(token: string): InstanceType<typeof GitHub> {
|
||||||
|
config()
|
||||||
|
return getOctokit(process.env[token])
|
||||||
|
}
|
||||||
14
cyborg/tsconfig.json
Normal file
14
cyborg/tsconfig.json
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"ts-node": {
|
||||||
|
"require": ["tsconfig-paths/register"]
|
||||||
|
},
|
||||||
|
"compilerOptions": {
|
||||||
|
"module": "NodeNext",
|
||||||
|
"moduleResolution": "NodeNext",
|
||||||
|
"target": "ES6",
|
||||||
|
"paths": {
|
||||||
|
"@/*": ["./src/*"]
|
||||||
|
},
|
||||||
|
"resolveJsonModule": true,
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -7,4 +7,60 @@ Status notify: we are still working on this config. It's expected to change freq
|
|||||||
|
|
||||||
# How to use
|
# How to use
|
||||||
|
|
||||||
|
## `greptimedb.json`
|
||||||
|
|
||||||
Open Grafana Dashboard page, choose `New` -> `Import`. And upload `greptimedb.json` file.
|
Open Grafana Dashboard page, choose `New` -> `Import`. And upload `greptimedb.json` file.
|
||||||
|
|
||||||
|
## `greptimedb-cluster.json`
|
||||||
|
|
||||||
|
This cluster dashboard provides a comprehensive view of incoming requests, response statuses, and internal activities such as flush and compaction, with a layered structure from frontend to datanode. Designed with a focus on alert functionality, its primary aim is to highlight any anomalies in metrics, allowing users to quickly pinpoint the cause of errors.
|
||||||
|
|
||||||
|
We use Prometheus to scrape off metrics from nodes in GreptimeDB cluster, Grafana to visualize the diagram. Any compatible stack should work too.
|
||||||
|
|
||||||
|
__Note__: This dashboard is still in an early stage of development. Any issue or advice on improvement is welcomed.
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
Please ensure the following configuration before importing the dashboard into Grafana.
|
||||||
|
|
||||||
|
__1. Prometheus scrape config__
|
||||||
|
|
||||||
|
Assign `greptime_pod` label to each host target. We use this label to identify each node instance.
|
||||||
|
|
||||||
|
```yml
|
||||||
|
# example config
|
||||||
|
# only to indicate how to assign labels to each target
|
||||||
|
# modify yours accordingly
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: metasrv
|
||||||
|
static_configs:
|
||||||
|
- targets: ['<ip>:<port>']
|
||||||
|
labels:
|
||||||
|
greptime_pod: metasrv
|
||||||
|
|
||||||
|
- job_name: datanode
|
||||||
|
static_configs:
|
||||||
|
- targets: ['<ip>:<port>']
|
||||||
|
labels:
|
||||||
|
greptime_pod: datanode1
|
||||||
|
- targets: ['<ip>:<port>']
|
||||||
|
labels:
|
||||||
|
greptime_pod: datanode2
|
||||||
|
- targets: ['<ip>:<port>']
|
||||||
|
labels:
|
||||||
|
greptime_pod: datanode3
|
||||||
|
|
||||||
|
- job_name: frontend
|
||||||
|
static_configs:
|
||||||
|
- targets: ['<ip>:<port>']
|
||||||
|
labels:
|
||||||
|
greptime_pod: frontend
|
||||||
|
```
|
||||||
|
|
||||||
|
__2. Grafana config__
|
||||||
|
|
||||||
|
Create a Prometheus data source in Grafana before using this dashboard. We use `datasource` as a variable in Grafana dashboard so that multiple environments are supported.
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
Use `datasource` or `greptime_pod` on the upper-left corner to filter data from certain node.
|
||||||
|
|||||||
4862
grafana/greptimedb-cluster.json
Normal file
4862
grafana/greptimedb-cluster.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -17,12 +17,15 @@ headerPath = "Apache-2.0.txt"
|
|||||||
includes = [
|
includes = [
|
||||||
"*.rs",
|
"*.rs",
|
||||||
"*.py",
|
"*.py",
|
||||||
|
"*.ts",
|
||||||
]
|
]
|
||||||
|
|
||||||
excludes = [
|
excludes = [
|
||||||
# copied sources
|
# copied sources
|
||||||
"src/common/base/src/readable_size.rs",
|
"src/common/base/src/readable_size.rs",
|
||||||
|
"src/common/base/src/secrets.rs",
|
||||||
"src/servers/src/repeated_field.rs",
|
"src/servers/src/repeated_field.rs",
|
||||||
|
"src/servers/src/http/test_helpers.rs",
|
||||||
]
|
]
|
||||||
|
|
||||||
[properties]
|
[properties]
|
||||||
|
|||||||
@@ -20,21 +20,20 @@ use common_decimal::Decimal128;
|
|||||||
use common_time::interval::IntervalUnit;
|
use common_time::interval::IntervalUnit;
|
||||||
use common_time::time::Time;
|
use common_time::time::Time;
|
||||||
use common_time::timestamp::TimeUnit;
|
use common_time::timestamp::TimeUnit;
|
||||||
use common_time::{Date, DateTime, Duration, Interval, Timestamp};
|
use common_time::{Date, DateTime, Interval, Timestamp};
|
||||||
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
||||||
use datatypes::scalars::ScalarVector;
|
use datatypes::scalars::ScalarVector;
|
||||||
use datatypes::types::{
|
use datatypes::types::{
|
||||||
DurationType, Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type,
|
Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type,
|
||||||
};
|
};
|
||||||
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
||||||
use datatypes::vectors::{
|
use datatypes::vectors::{
|
||||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector,
|
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector, Float32Vector,
|
||||||
DurationMicrosecondVector, DurationMillisecondVector, DurationNanosecondVector,
|
Float64Vector, Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
|
||||||
DurationSecondVector, Float32Vector, Float64Vector, Int32Vector, Int64Vector,
|
IntervalYearMonthVector, PrimitiveVector, StringVector, TimeMicrosecondVector,
|
||||||
IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector, PrimitiveVector,
|
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
||||||
StringVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector,
|
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
|
||||||
TimeSecondVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
UInt64Vector, VectorRef,
|
||||||
TimestampNanosecondVector, TimestampSecondVector, UInt32Vector, UInt64Vector, VectorRef,
|
|
||||||
};
|
};
|
||||||
use greptime_proto::v1;
|
use greptime_proto::v1;
|
||||||
use greptime_proto::v1::column_data_type_extension::TypeExt;
|
use greptime_proto::v1::column_data_type_extension::TypeExt;
|
||||||
@@ -127,14 +126,6 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
|||||||
ColumnDataType::IntervalMonthDayNano => {
|
ColumnDataType::IntervalMonthDayNano => {
|
||||||
ConcreteDataType::interval_month_day_nano_datatype()
|
ConcreteDataType::interval_month_day_nano_datatype()
|
||||||
}
|
}
|
||||||
ColumnDataType::DurationSecond => ConcreteDataType::duration_second_datatype(),
|
|
||||||
ColumnDataType::DurationMillisecond => {
|
|
||||||
ConcreteDataType::duration_millisecond_datatype()
|
|
||||||
}
|
|
||||||
ColumnDataType::DurationMicrosecond => {
|
|
||||||
ConcreteDataType::duration_microsecond_datatype()
|
|
||||||
}
|
|
||||||
ColumnDataType::DurationNanosecond => ConcreteDataType::duration_nanosecond_datatype(),
|
|
||||||
ColumnDataType::Decimal128 => {
|
ColumnDataType::Decimal128 => {
|
||||||
if let Some(TypeExt::DecimalType(d)) = datatype_wrapper
|
if let Some(TypeExt::DecimalType(d)) = datatype_wrapper
|
||||||
.datatype_ext
|
.datatype_ext
|
||||||
@@ -212,11 +203,7 @@ impl_column_type_functions_with_snake!(
|
|||||||
TimeNanosecond,
|
TimeNanosecond,
|
||||||
IntervalYearMonth,
|
IntervalYearMonth,
|
||||||
IntervalDayTime,
|
IntervalDayTime,
|
||||||
IntervalMonthDayNano,
|
IntervalMonthDayNano
|
||||||
DurationSecond,
|
|
||||||
DurationMillisecond,
|
|
||||||
DurationMicrosecond,
|
|
||||||
DurationNanosecond
|
|
||||||
);
|
);
|
||||||
|
|
||||||
impl ColumnDataTypeWrapper {
|
impl ColumnDataTypeWrapper {
|
||||||
@@ -270,16 +257,11 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
|||||||
IntervalType::DayTime(_) => ColumnDataType::IntervalDayTime,
|
IntervalType::DayTime(_) => ColumnDataType::IntervalDayTime,
|
||||||
IntervalType::MonthDayNano(_) => ColumnDataType::IntervalMonthDayNano,
|
IntervalType::MonthDayNano(_) => ColumnDataType::IntervalMonthDayNano,
|
||||||
},
|
},
|
||||||
ConcreteDataType::Duration(d) => match d {
|
|
||||||
DurationType::Second(_) => ColumnDataType::DurationSecond,
|
|
||||||
DurationType::Millisecond(_) => ColumnDataType::DurationMillisecond,
|
|
||||||
DurationType::Microsecond(_) => ColumnDataType::DurationMicrosecond,
|
|
||||||
DurationType::Nanosecond(_) => ColumnDataType::DurationNanosecond,
|
|
||||||
},
|
|
||||||
ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128,
|
ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128,
|
||||||
ConcreteDataType::Null(_)
|
ConcreteDataType::Null(_)
|
||||||
| ConcreteDataType::List(_)
|
| ConcreteDataType::List(_)
|
||||||
| ConcreteDataType::Dictionary(_) => {
|
| ConcreteDataType::Dictionary(_)
|
||||||
|
| ConcreteDataType::Duration(_) => {
|
||||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -409,22 +391,6 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
|
|||||||
interval_month_day_nano_values: Vec::with_capacity(capacity),
|
interval_month_day_nano_values: Vec::with_capacity(capacity),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDataType::DurationSecond => Values {
|
|
||||||
duration_second_values: Vec::with_capacity(capacity),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDataType::DurationMillisecond => Values {
|
|
||||||
duration_millisecond_values: Vec::with_capacity(capacity),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDataType::DurationMicrosecond => Values {
|
|
||||||
duration_microsecond_values: Vec::with_capacity(capacity),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDataType::DurationNanosecond => Values {
|
|
||||||
duration_nanosecond_values: Vec::with_capacity(capacity),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDataType::Decimal128 => Values {
|
ColumnDataType::Decimal128 => Values {
|
||||||
decimal128_values: Vec::with_capacity(capacity),
|
decimal128_values: Vec::with_capacity(capacity),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -476,14 +442,8 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
|||||||
.interval_month_day_nano_values
|
.interval_month_day_nano_values
|
||||||
.push(convert_i128_to_interval(val.to_i128())),
|
.push(convert_i128_to_interval(val.to_i128())),
|
||||||
},
|
},
|
||||||
Value::Duration(val) => match val.unit() {
|
|
||||||
TimeUnit::Second => values.duration_second_values.push(val.value()),
|
|
||||||
TimeUnit::Millisecond => values.duration_millisecond_values.push(val.value()),
|
|
||||||
TimeUnit::Microsecond => values.duration_microsecond_values.push(val.value()),
|
|
||||||
TimeUnit::Nanosecond => values.duration_nanosecond_values.push(val.value()),
|
|
||||||
},
|
|
||||||
Value::Decimal128(val) => values.decimal128_values.push(convert_to_pb_decimal128(val)),
|
Value::Decimal128(val) => values.decimal128_values.push(convert_to_pb_decimal128(val)),
|
||||||
Value::List(_) => unreachable!(),
|
Value::List(_) | Value::Duration(_) => unreachable!(),
|
||||||
});
|
});
|
||||||
column.null_mask = null_mask.into_vec();
|
column.null_mask = null_mask.into_vec();
|
||||||
}
|
}
|
||||||
@@ -518,6 +478,8 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
|||||||
Some(Expr::Alter(_)) => "ddl.alter",
|
Some(Expr::Alter(_)) => "ddl.alter",
|
||||||
Some(Expr::DropTable(_)) => "ddl.drop_table",
|
Some(Expr::DropTable(_)) => "ddl.drop_table",
|
||||||
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
|
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
|
||||||
|
Some(Expr::CreateFlow(_)) => "ddl.create_flow",
|
||||||
|
Some(Expr::DropFlow(_)) => "ddl.drop_flow",
|
||||||
None => "ddl.empty",
|
None => "ddl.empty",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -583,10 +545,6 @@ pub fn pb_value_to_value_ref<'a>(
|
|||||||
let interval = Interval::from_month_day_nano(v.months, v.days, v.nanoseconds);
|
let interval = Interval::from_month_day_nano(v.months, v.days, v.nanoseconds);
|
||||||
ValueRef::Interval(interval)
|
ValueRef::Interval(interval)
|
||||||
}
|
}
|
||||||
ValueData::DurationSecondValue(v) => ValueRef::Duration(Duration::new_second(*v)),
|
|
||||||
ValueData::DurationMillisecondValue(v) => ValueRef::Duration(Duration::new_millisecond(*v)),
|
|
||||||
ValueData::DurationMicrosecondValue(v) => ValueRef::Duration(Duration::new_microsecond(*v)),
|
|
||||||
ValueData::DurationNanosecondValue(v) => ValueRef::Duration(Duration::new_nanosecond(*v)),
|
|
||||||
ValueData::Decimal128Value(v) => {
|
ValueData::Decimal128Value(v) => {
|
||||||
// get precision and scale from datatype_extension
|
// get precision and scale from datatype_extension
|
||||||
if let Some(TypeExt::DecimalType(d)) = datatype_ext
|
if let Some(TypeExt::DecimalType(d)) = datatype_ext
|
||||||
@@ -681,26 +639,15 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
ConcreteDataType::Duration(unit) => match unit {
|
|
||||||
DurationType::Second(_) => Arc::new(DurationSecondVector::from_vec(
|
|
||||||
values.duration_second_values,
|
|
||||||
)),
|
|
||||||
DurationType::Millisecond(_) => Arc::new(DurationMillisecondVector::from_vec(
|
|
||||||
values.duration_millisecond_values,
|
|
||||||
)),
|
|
||||||
DurationType::Microsecond(_) => Arc::new(DurationMicrosecondVector::from_vec(
|
|
||||||
values.duration_microsecond_values,
|
|
||||||
)),
|
|
||||||
DurationType::Nanosecond(_) => Arc::new(DurationNanosecondVector::from_vec(
|
|
||||||
values.duration_nanosecond_values,
|
|
||||||
)),
|
|
||||||
},
|
|
||||||
ConcreteDataType::Decimal128(d) => Arc::new(Decimal128Vector::from_values(
|
ConcreteDataType::Decimal128(d) => Arc::new(Decimal128Vector::from_values(
|
||||||
values.decimal128_values.iter().map(|x| {
|
values.decimal128_values.iter().map(|x| {
|
||||||
Decimal128::from_value_precision_scale(x.hi, x.lo, d.precision(), d.scale()).into()
|
Decimal128::from_value_precision_scale(x.hi, x.lo, d.precision(), d.scale()).into()
|
||||||
}),
|
}),
|
||||||
)),
|
)),
|
||||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
ConcreteDataType::Null(_)
|
||||||
|
| ConcreteDataType::List(_)
|
||||||
|
| ConcreteDataType::Dictionary(_)
|
||||||
|
| ConcreteDataType::Duration(_) => {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -849,26 +796,6 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
|||||||
))
|
))
|
||||||
})
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
ConcreteDataType::Duration(DurationType::Second(_)) => values
|
|
||||||
.duration_second_values
|
|
||||||
.into_iter()
|
|
||||||
.map(|v| Value::Duration(Duration::new_second(v)))
|
|
||||||
.collect(),
|
|
||||||
ConcreteDataType::Duration(DurationType::Millisecond(_)) => values
|
|
||||||
.duration_millisecond_values
|
|
||||||
.into_iter()
|
|
||||||
.map(|v| Value::Duration(Duration::new_millisecond(v)))
|
|
||||||
.collect(),
|
|
||||||
ConcreteDataType::Duration(DurationType::Microsecond(_)) => values
|
|
||||||
.duration_microsecond_values
|
|
||||||
.into_iter()
|
|
||||||
.map(|v| Value::Duration(Duration::new_microsecond(v)))
|
|
||||||
.collect(),
|
|
||||||
ConcreteDataType::Duration(DurationType::Nanosecond(_)) => values
|
|
||||||
.duration_nanosecond_values
|
|
||||||
.into_iter()
|
|
||||||
.map(|v| Value::Duration(Duration::new_nanosecond(v)))
|
|
||||||
.collect(),
|
|
||||||
ConcreteDataType::Decimal128(d) => values
|
ConcreteDataType::Decimal128(d) => values
|
||||||
.decimal128_values
|
.decimal128_values
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@@ -881,7 +808,10 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
|||||||
))
|
))
|
||||||
})
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
ConcreteDataType::Null(_)
|
||||||
|
| ConcreteDataType::List(_)
|
||||||
|
| ConcreteDataType::Dictionary(_)
|
||||||
|
| ConcreteDataType::Duration(_) => {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -993,24 +923,10 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
|
|||||||
)),
|
)),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Value::Duration(v) => match v.unit() {
|
|
||||||
TimeUnit::Second => v1::Value {
|
|
||||||
value_data: Some(ValueData::DurationSecondValue(v.value())),
|
|
||||||
},
|
|
||||||
TimeUnit::Millisecond => v1::Value {
|
|
||||||
value_data: Some(ValueData::DurationMillisecondValue(v.value())),
|
|
||||||
},
|
|
||||||
TimeUnit::Microsecond => v1::Value {
|
|
||||||
value_data: Some(ValueData::DurationMicrosecondValue(v.value())),
|
|
||||||
},
|
|
||||||
TimeUnit::Nanosecond => v1::Value {
|
|
||||||
value_data: Some(ValueData::DurationNanosecondValue(v.value())),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Value::Decimal128(v) => v1::Value {
|
Value::Decimal128(v) => v1::Value {
|
||||||
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||||
},
|
},
|
||||||
Value::List(_) => return None,
|
Value::List(_) | Value::Duration(_) => return None,
|
||||||
};
|
};
|
||||||
|
|
||||||
Some(proto_value)
|
Some(proto_value)
|
||||||
@@ -1047,10 +963,6 @@ pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
|
|||||||
ValueData::IntervalYearMonthValue(_) => ColumnDataType::IntervalYearMonth,
|
ValueData::IntervalYearMonthValue(_) => ColumnDataType::IntervalYearMonth,
|
||||||
ValueData::IntervalDayTimeValue(_) => ColumnDataType::IntervalDayTime,
|
ValueData::IntervalDayTimeValue(_) => ColumnDataType::IntervalDayTime,
|
||||||
ValueData::IntervalMonthDayNanoValue(_) => ColumnDataType::IntervalMonthDayNano,
|
ValueData::IntervalMonthDayNanoValue(_) => ColumnDataType::IntervalMonthDayNano,
|
||||||
ValueData::DurationSecondValue(_) => ColumnDataType::DurationSecond,
|
|
||||||
ValueData::DurationMillisecondValue(_) => ColumnDataType::DurationMillisecond,
|
|
||||||
ValueData::DurationMicrosecondValue(_) => ColumnDataType::DurationMicrosecond,
|
|
||||||
ValueData::DurationNanosecondValue(_) => ColumnDataType::DurationNanosecond,
|
|
||||||
ValueData::Decimal128Value(_) => ColumnDataType::Decimal128,
|
ValueData::Decimal128Value(_) => ColumnDataType::Decimal128,
|
||||||
};
|
};
|
||||||
Some(value_type)
|
Some(value_type)
|
||||||
@@ -1108,14 +1020,8 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
|||||||
ValueData::IntervalMonthDayNanoValue(convert_i128_to_interval(v.to_i128()))
|
ValueData::IntervalMonthDayNanoValue(convert_i128_to_interval(v.to_i128()))
|
||||||
}
|
}
|
||||||
}),
|
}),
|
||||||
Value::Duration(v) => Some(match v.unit() {
|
|
||||||
TimeUnit::Second => ValueData::DurationSecondValue(v.value()),
|
|
||||||
TimeUnit::Millisecond => ValueData::DurationMillisecondValue(v.value()),
|
|
||||||
TimeUnit::Microsecond => ValueData::DurationMicrosecondValue(v.value()),
|
|
||||||
TimeUnit::Nanosecond => ValueData::DurationNanosecondValue(v.value()),
|
|
||||||
}),
|
|
||||||
Value::Decimal128(v) => Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
Value::Decimal128(v) => Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||||
Value::List(_) => unreachable!(),
|
Value::List(_) | Value::Duration(_) => unreachable!(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1125,16 +1031,15 @@ mod tests {
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use datatypes::types::{
|
use datatypes::types::{
|
||||||
DurationMillisecondType, DurationSecondType, Int32Type, IntervalDayTimeType,
|
Int32Type, IntervalDayTimeType, IntervalMonthDayNanoType, IntervalYearMonthType,
|
||||||
IntervalMonthDayNanoType, IntervalYearMonthType, TimeMillisecondType, TimeSecondType,
|
TimeMillisecondType, TimeSecondType, TimestampMillisecondType, TimestampSecondType,
|
||||||
TimestampMillisecondType, TimestampSecondType, UInt32Type,
|
UInt32Type,
|
||||||
};
|
};
|
||||||
use datatypes::vectors::{
|
use datatypes::vectors::{
|
||||||
BooleanVector, DurationMicrosecondVector, DurationMillisecondVector,
|
BooleanVector, IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector,
|
||||||
DurationNanosecondVector, DurationSecondVector, IntervalDayTimeVector,
|
TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector,
|
||||||
IntervalMonthDayNanoVector, IntervalYearMonthVector, TimeMicrosecondVector,
|
TimestampMicrosecondVector, TimestampMillisecondVector, TimestampNanosecondVector,
|
||||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
TimestampSecondVector, Vector,
|
||||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, Vector,
|
|
||||||
};
|
};
|
||||||
use paste::paste;
|
use paste::paste;
|
||||||
|
|
||||||
@@ -1210,10 +1115,6 @@ mod tests {
|
|||||||
let values = values.interval_month_day_nano_values;
|
let values = values.interval_month_day_nano_values;
|
||||||
assert_eq!(2, values.capacity());
|
assert_eq!(2, values.capacity());
|
||||||
|
|
||||||
let values = values_with_capacity(ColumnDataType::DurationMillisecond, 2);
|
|
||||||
let values = values.duration_millisecond_values;
|
|
||||||
assert_eq!(2, values.capacity());
|
|
||||||
|
|
||||||
let values = values_with_capacity(ColumnDataType::Decimal128, 2);
|
let values = values_with_capacity(ColumnDataType::Decimal128, 2);
|
||||||
let values = values.decimal128_values;
|
let values = values.decimal128_values;
|
||||||
assert_eq!(2, values.capacity());
|
assert_eq!(2, values.capacity());
|
||||||
@@ -1301,10 +1202,6 @@ mod tests {
|
|||||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
||||||
ColumnDataTypeWrapper::interval_month_day_nano_datatype().into()
|
ColumnDataTypeWrapper::interval_month_day_nano_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
|
||||||
ConcreteDataType::duration_millisecond_datatype(),
|
|
||||||
ColumnDataTypeWrapper::duration_millisecond_datatype().into()
|
|
||||||
);
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::decimal128_datatype(10, 2),
|
ConcreteDataType::decimal128_datatype(10, 2),
|
||||||
ColumnDataTypeWrapper::decimal128_datatype(10, 2).into()
|
ColumnDataTypeWrapper::decimal128_datatype(10, 2).into()
|
||||||
@@ -1397,12 +1294,6 @@ mod tests {
|
|||||||
.try_into()
|
.try_into()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
|
||||||
ColumnDataTypeWrapper::duration_millisecond_datatype(),
|
|
||||||
ConcreteDataType::duration_millisecond_datatype()
|
|
||||||
.try_into()
|
|
||||||
.unwrap()
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper::decimal128_datatype(10, 2),
|
ColumnDataTypeWrapper::decimal128_datatype(10, 2),
|
||||||
@@ -1556,48 +1447,6 @@ mod tests {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_column_put_duration_values() {
|
|
||||||
let mut column = Column {
|
|
||||||
column_name: "test".to_string(),
|
|
||||||
semantic_type: 0,
|
|
||||||
values: Some(Values {
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
null_mask: vec![],
|
|
||||||
datatype: 0,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let vector = Arc::new(DurationNanosecondVector::from_vec(vec![1, 2, 3]));
|
|
||||||
push_vals(&mut column, 3, vector);
|
|
||||||
assert_eq!(
|
|
||||||
vec![1, 2, 3],
|
|
||||||
column.values.as_ref().unwrap().duration_nanosecond_values
|
|
||||||
);
|
|
||||||
|
|
||||||
let vector = Arc::new(DurationMicrosecondVector::from_vec(vec![7, 8, 9]));
|
|
||||||
push_vals(&mut column, 3, vector);
|
|
||||||
assert_eq!(
|
|
||||||
vec![7, 8, 9],
|
|
||||||
column.values.as_ref().unwrap().duration_microsecond_values
|
|
||||||
);
|
|
||||||
|
|
||||||
let vector = Arc::new(DurationMillisecondVector::from_vec(vec![4, 5, 6]));
|
|
||||||
push_vals(&mut column, 3, vector);
|
|
||||||
assert_eq!(
|
|
||||||
vec![4, 5, 6],
|
|
||||||
column.values.as_ref().unwrap().duration_millisecond_values
|
|
||||||
);
|
|
||||||
|
|
||||||
let vector = Arc::new(DurationSecondVector::from_vec(vec![10, 11, 12]));
|
|
||||||
push_vals(&mut column, 3, vector);
|
|
||||||
assert_eq!(
|
|
||||||
vec![10, 11, 12],
|
|
||||||
column.values.as_ref().unwrap().duration_second_values
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_column_put_vector() {
|
fn test_column_put_vector() {
|
||||||
use crate::v1::SemanticType;
|
use crate::v1::SemanticType;
|
||||||
@@ -1699,39 +1548,6 @@ mod tests {
|
|||||||
assert_eq!(expect, actual);
|
assert_eq!(expect, actual);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_convert_duration_values() {
|
|
||||||
// second
|
|
||||||
let actual = pb_values_to_values(
|
|
||||||
&ConcreteDataType::Duration(DurationType::Second(DurationSecondType)),
|
|
||||||
Values {
|
|
||||||
duration_second_values: vec![1_i64, 2_i64, 3_i64],
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
);
|
|
||||||
let expect = vec![
|
|
||||||
Value::Duration(Duration::new_second(1_i64)),
|
|
||||||
Value::Duration(Duration::new_second(2_i64)),
|
|
||||||
Value::Duration(Duration::new_second(3_i64)),
|
|
||||||
];
|
|
||||||
assert_eq!(expect, actual);
|
|
||||||
|
|
||||||
// millisecond
|
|
||||||
let actual = pb_values_to_values(
|
|
||||||
&ConcreteDataType::Duration(DurationType::Millisecond(DurationMillisecondType)),
|
|
||||||
Values {
|
|
||||||
duration_millisecond_values: vec![1_i64, 2_i64, 3_i64],
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
);
|
|
||||||
let expect = vec![
|
|
||||||
Value::Duration(Duration::new_millisecond(1_i64)),
|
|
||||||
Value::Duration(Duration::new_millisecond(2_i64)),
|
|
||||||
Value::Duration(Duration::new_millisecond(3_i64)),
|
|
||||||
];
|
|
||||||
assert_eq!(expect, actual);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_convert_interval_values() {
|
fn test_convert_interval_values() {
|
||||||
// year_month
|
// year_month
|
||||||
|
|||||||
@@ -14,12 +14,12 @@ workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
api.workspace = true
|
api.workspace = true
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
|
common-base.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
common-macro.workspace = true
|
common-macro.workspace = true
|
||||||
common-telemetry.workspace = true
|
common-telemetry.workspace = true
|
||||||
digest = "0.10"
|
digest = "0.10"
|
||||||
notify.workspace = true
|
notify.workspace = true
|
||||||
secrecy = { version = "0.8", features = ["serde", "alloc"] }
|
|
||||||
sha1 = "0.10"
|
sha1 = "0.10"
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
sql.workspace = true
|
sql.workspace = true
|
||||||
|
|||||||
@@ -14,8 +14,8 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use common_base::secrets::SecretString;
|
||||||
use digest::Digest;
|
use digest::Digest;
|
||||||
use secrecy::SecretString;
|
|
||||||
use sha1::Sha1;
|
use sha1::Sha1;
|
||||||
use snafu::{ensure, OptionExt};
|
use snafu::{ensure, OptionExt};
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use secrecy::ExposeSecret;
|
use common_base::secrets::ExposeSecret;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
AccessDeniedSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
|
AccessDeniedSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ use std::io;
|
|||||||
use std::io::BufRead;
|
use std::io::BufRead;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
use secrecy::ExposeSecret;
|
use common_base::secrets::ExposeSecret;
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
|
|
||||||
use crate::common::{Identity, Password};
|
use crate::common::{Identity, Password};
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ arrow-schema.workspace = true
|
|||||||
async-stream.workspace = true
|
async-stream.workspace = true
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
common-catalog.workspace = true
|
common-catalog.workspace = true
|
||||||
|
common-config.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
common-macro.workspace = true
|
common-macro.workspace = true
|
||||||
common-meta.workspace = true
|
common-meta.workspace = true
|
||||||
@@ -30,6 +31,7 @@ datafusion.workspace = true
|
|||||||
datatypes.workspace = true
|
datatypes.workspace = true
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util.workspace = true
|
futures-util.workspace = true
|
||||||
|
humantime.workspace = true
|
||||||
itertools.workspace = true
|
itertools.workspace = true
|
||||||
lazy_static.workspace = true
|
lazy_static.workspace = true
|
||||||
meta-client.workspace = true
|
meta-client.workspace = true
|
||||||
|
|||||||
@@ -49,6 +49,12 @@ pub enum Error {
|
|||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to list nodes in cluster: {source}"))]
|
||||||
|
ListNodes {
|
||||||
|
location: Location,
|
||||||
|
source: BoxedError,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
#[snafu(display("Failed to re-compile script due to internal error"))]
|
||||||
CompileScriptInternal {
|
CompileScriptInternal {
|
||||||
location: Location,
|
location: Location,
|
||||||
@@ -294,6 +300,7 @@ impl ErrorExt for Error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Error::ListCatalogs { source, .. }
|
Error::ListCatalogs { source, .. }
|
||||||
|
| Error::ListNodes { source, .. }
|
||||||
| Error::ListSchemas { source, .. }
|
| Error::ListSchemas { source, .. }
|
||||||
| Error::ListTables { source, .. } => source.status_code(),
|
| Error::ListTables { source, .. } => source.status_code(),
|
||||||
|
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
mod cluster_info;
|
||||||
pub mod columns;
|
pub mod columns;
|
||||||
pub mod key_column_usage;
|
pub mod key_column_usage;
|
||||||
mod memory_table;
|
mod memory_table;
|
||||||
@@ -23,6 +24,7 @@ pub mod schemata;
|
|||||||
mod table_constraints;
|
mod table_constraints;
|
||||||
mod table_names;
|
mod table_names;
|
||||||
pub mod tables;
|
pub mod tables;
|
||||||
|
pub(crate) mod utils;
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
@@ -47,6 +49,7 @@ pub use table_names::*;
|
|||||||
|
|
||||||
use self::columns::InformationSchemaColumns;
|
use self::columns::InformationSchemaColumns;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
|
use crate::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||||
use crate::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
use crate::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
||||||
use crate::information_schema::memory_table::{get_schema_columns, MemoryTable};
|
use crate::information_schema::memory_table::{get_schema_columns, MemoryTable};
|
||||||
use crate::information_schema::partitions::InformationSchemaPartitions;
|
use crate::information_schema::partitions::InformationSchemaPartitions;
|
||||||
@@ -150,6 +153,7 @@ impl InformationSchemaProvider {
|
|||||||
fn build_tables(&mut self) {
|
fn build_tables(&mut self) {
|
||||||
let mut tables = HashMap::new();
|
let mut tables = HashMap::new();
|
||||||
|
|
||||||
|
// SECURITY NOTE:
|
||||||
// Carefully consider the tables that may expose sensitive cluster configurations,
|
// Carefully consider the tables that may expose sensitive cluster configurations,
|
||||||
// authentication details, and other critical information.
|
// authentication details, and other critical information.
|
||||||
// Only put these tables under `greptime` catalog to prevent info leak.
|
// Only put these tables under `greptime` catalog to prevent info leak.
|
||||||
@@ -166,6 +170,10 @@ impl InformationSchemaProvider {
|
|||||||
REGION_PEERS.to_string(),
|
REGION_PEERS.to_string(),
|
||||||
self.build_table(REGION_PEERS).unwrap(),
|
self.build_table(REGION_PEERS).unwrap(),
|
||||||
);
|
);
|
||||||
|
tables.insert(
|
||||||
|
CLUSTER_INFO.to_string(),
|
||||||
|
self.build_table(CLUSTER_INFO).unwrap(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
|
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
|
||||||
@@ -251,6 +259,9 @@ impl InformationSchemaProvider {
|
|||||||
self.catalog_name.clone(),
|
self.catalog_name.clone(),
|
||||||
self.catalog_manager.clone(),
|
self.catalog_manager.clone(),
|
||||||
)) as _),
|
)) as _),
|
||||||
|
CLUSTER_INFO => Some(Arc::new(InformationSchemaClusterInfo::new(
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
)) as _),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
317
src/catalog/src/information_schema/cluster_info.rs
Normal file
317
src/catalog/src/information_schema/cluster_info.rs
Normal file
@@ -0,0 +1,317 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::{Arc, Weak};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
|
use common_catalog::consts::INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID;
|
||||||
|
use common_config::Mode;
|
||||||
|
use common_error::ext::BoxedError;
|
||||||
|
use common_meta::cluster::{ClusterInfo, NodeInfo, NodeStatus};
|
||||||
|
use common_meta::peer::Peer;
|
||||||
|
use common_query::physical_plan::TaskContext;
|
||||||
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
|
use common_telemetry::warn;
|
||||||
|
use common_time::timestamp::Timestamp;
|
||||||
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
|
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||||
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
|
use datatypes::timestamp::TimestampMillisecond;
|
||||||
|
use datatypes::value::Value;
|
||||||
|
use datatypes::vectors::{
|
||||||
|
Int64VectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
|
||||||
|
};
|
||||||
|
use snafu::ResultExt;
|
||||||
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
|
||||||
|
use super::CLUSTER_INFO;
|
||||||
|
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, ListNodesSnafu, Result};
|
||||||
|
use crate::information_schema::{utils, InformationTable, Predicates};
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
|
const PEER_ID: &str = "peer_id";
|
||||||
|
const PEER_TYPE: &str = "peer_type";
|
||||||
|
const PEER_ADDR: &str = "peer_addr";
|
||||||
|
const VERSION: &str = "version";
|
||||||
|
const GIT_COMMIT: &str = "git_commit";
|
||||||
|
const START_TIME: &str = "start_time";
|
||||||
|
const UPTIME: &str = "uptime";
|
||||||
|
const ACTIVE_TIME: &str = "active_time";
|
||||||
|
|
||||||
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
|
/// The `CLUSTER_INFO` table provides information about the current topology information of the cluster.
|
||||||
|
///
|
||||||
|
/// - `peer_id`: the peer server id.
|
||||||
|
/// - `peer_type`: the peer type, such as `datanode`, `frontend`, `metasrv` etc.
|
||||||
|
/// - `peer_addr`: the peer gRPC address.
|
||||||
|
/// - `version`: the build package version of the peer.
|
||||||
|
/// - `git_commit`: the build git commit hash of the peer.
|
||||||
|
/// - `start_time`: the starting time of the peer.
|
||||||
|
/// - `uptime`: the uptime of the peer.
|
||||||
|
/// - `active_time`: the time since the last activity of the peer.
|
||||||
|
///
|
||||||
|
pub(super) struct InformationSchemaClusterInfo {
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
start_time_ms: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationSchemaClusterInfo {
|
||||||
|
pub(super) fn new(catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||||
|
Self {
|
||||||
|
schema: Self::schema(),
|
||||||
|
catalog_manager,
|
||||||
|
start_time_ms: common_time::util::current_time_millis() as u64,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn schema() -> SchemaRef {
|
||||||
|
Arc::new(Schema::new(vec![
|
||||||
|
ColumnSchema::new(PEER_ID, ConcreteDataType::int64_datatype(), false),
|
||||||
|
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
|
||||||
|
ColumnSchema::new(VERSION, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(GIT_COMMIT, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(
|
||||||
|
START_TIME,
|
||||||
|
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(UPTIME, ConcreteDataType::string_datatype(), true),
|
||||||
|
ColumnSchema::new(ACTIVE_TIME, ConcreteDataType::string_datatype(), true),
|
||||||
|
]))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn builder(&self) -> InformationSchemaClusterInfoBuilder {
|
||||||
|
InformationSchemaClusterInfoBuilder::new(
|
||||||
|
self.schema.clone(),
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
self.start_time_ms,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationTable for InformationSchemaClusterInfo {
|
||||||
|
fn table_id(&self) -> TableId {
|
||||||
|
INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID
|
||||||
|
}
|
||||||
|
|
||||||
|
fn table_name(&self) -> &'static str {
|
||||||
|
CLUSTER_INFO
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema(&self) -> SchemaRef {
|
||||||
|
self.schema.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_cluster_info(Some(request))
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
));
|
||||||
|
Ok(Box::pin(
|
||||||
|
RecordBatchStreamAdapter::try_new(stream)
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(InternalSnafu)?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct InformationSchemaClusterInfoBuilder {
|
||||||
|
schema: SchemaRef,
|
||||||
|
start_time_ms: u64,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
|
||||||
|
peer_ids: Int64VectorBuilder,
|
||||||
|
peer_types: StringVectorBuilder,
|
||||||
|
peer_addrs: StringVectorBuilder,
|
||||||
|
versions: StringVectorBuilder,
|
||||||
|
git_commits: StringVectorBuilder,
|
||||||
|
start_times: TimestampMillisecondVectorBuilder,
|
||||||
|
uptimes: StringVectorBuilder,
|
||||||
|
active_times: StringVectorBuilder,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationSchemaClusterInfoBuilder {
|
||||||
|
fn new(
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
start_time_ms: u64,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
schema,
|
||||||
|
catalog_manager,
|
||||||
|
peer_ids: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
peer_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
peer_addrs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
versions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
git_commits: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
start_times: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
uptimes: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
active_times: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
start_time_ms,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Construct the `information_schema.cluster_info` virtual table
|
||||||
|
async fn make_cluster_info(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||||
|
let predicates = Predicates::from_scan_request(&request);
|
||||||
|
let mode = utils::running_mode(&self.catalog_manager)?.unwrap_or(Mode::Standalone);
|
||||||
|
|
||||||
|
match mode {
|
||||||
|
Mode::Standalone => {
|
||||||
|
let build_info = common_version::build_info();
|
||||||
|
|
||||||
|
self.add_node_info(
|
||||||
|
&predicates,
|
||||||
|
NodeInfo {
|
||||||
|
// For the standalone:
|
||||||
|
// - id always 0
|
||||||
|
// - empty string for peer_addr
|
||||||
|
peer: Peer {
|
||||||
|
id: 0,
|
||||||
|
addr: "".to_string(),
|
||||||
|
},
|
||||||
|
last_activity_ts: -1,
|
||||||
|
status: NodeStatus::Standalone,
|
||||||
|
version: build_info.version.to_string(),
|
||||||
|
git_commit: build_info.commit_short.to_string(),
|
||||||
|
// Use `self.start_time_ms` instead.
|
||||||
|
// It's not precise but enough.
|
||||||
|
start_time_ms: self.start_time_ms,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Mode::Distributed => {
|
||||||
|
if let Some(meta_client) = utils::meta_client(&self.catalog_manager)? {
|
||||||
|
let node_infos = meta_client
|
||||||
|
.list_nodes(None)
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(ListNodesSnafu)?;
|
||||||
|
|
||||||
|
for node_info in node_infos {
|
||||||
|
self.add_node_info(&predicates, node_info);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!("Could not find meta client in distributed mode.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.finish()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_node_info(&mut self, predicates: &Predicates, node_info: NodeInfo) {
|
||||||
|
let peer_type = node_info.status.role_name();
|
||||||
|
|
||||||
|
let row = [
|
||||||
|
(PEER_ID, &Value::from(node_info.peer.id)),
|
||||||
|
(PEER_TYPE, &Value::from(peer_type)),
|
||||||
|
(PEER_ADDR, &Value::from(node_info.peer.addr.as_str())),
|
||||||
|
(VERSION, &Value::from(node_info.version.as_str())),
|
||||||
|
(GIT_COMMIT, &Value::from(node_info.git_commit.as_str())),
|
||||||
|
];
|
||||||
|
|
||||||
|
if !predicates.eval(&row) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if peer_type == "FRONTEND" || peer_type == "METASRV" {
|
||||||
|
// Always set peer_id to be -1 for frontends and metasrvs
|
||||||
|
self.peer_ids.push(Some(-1));
|
||||||
|
} else {
|
||||||
|
self.peer_ids.push(Some(node_info.peer.id as i64));
|
||||||
|
}
|
||||||
|
|
||||||
|
self.peer_types.push(Some(peer_type));
|
||||||
|
self.peer_addrs.push(Some(&node_info.peer.addr));
|
||||||
|
self.versions.push(Some(&node_info.version));
|
||||||
|
self.git_commits.push(Some(&node_info.git_commit));
|
||||||
|
if node_info.start_time_ms > 0 {
|
||||||
|
self.start_times
|
||||||
|
.push(Some(TimestampMillisecond(Timestamp::new_millisecond(
|
||||||
|
node_info.start_time_ms as i64,
|
||||||
|
))));
|
||||||
|
self.uptimes.push(Some(
|
||||||
|
Self::format_duration_since(node_info.start_time_ms).as_str(),
|
||||||
|
));
|
||||||
|
} else {
|
||||||
|
self.start_times.push(None);
|
||||||
|
self.uptimes.push(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
if node_info.last_activity_ts > 0 {
|
||||||
|
self.active_times.push(Some(
|
||||||
|
Self::format_duration_since(node_info.last_activity_ts as u64).as_str(),
|
||||||
|
));
|
||||||
|
} else {
|
||||||
|
self.active_times.push(None);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn format_duration_since(ts: u64) -> String {
|
||||||
|
let now = common_time::util::current_time_millis() as u64;
|
||||||
|
let duration_since = now - ts;
|
||||||
|
humantime::format_duration(Duration::from_millis(duration_since)).to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn finish(&mut self) -> Result<RecordBatch> {
|
||||||
|
let columns: Vec<VectorRef> = vec![
|
||||||
|
Arc::new(self.peer_ids.finish()),
|
||||||
|
Arc::new(self.peer_types.finish()),
|
||||||
|
Arc::new(self.peer_addrs.finish()),
|
||||||
|
Arc::new(self.versions.finish()),
|
||||||
|
Arc::new(self.git_commits.finish()),
|
||||||
|
Arc::new(self.start_times.finish()),
|
||||||
|
Arc::new(self.uptimes.finish()),
|
||||||
|
Arc::new(self.active_times.finish()),
|
||||||
|
];
|
||||||
|
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DfPartitionStream for InformationSchemaClusterInfo {
|
||||||
|
fn schema(&self) -> &ArrowSchemaRef {
|
||||||
|
self.schema.arrow_schema()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_cluster_info(None)
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -258,7 +258,7 @@ impl InformationSchemaColumnsBuilder {
|
|||||||
let predicates = Predicates::from_scan_request(&request);
|
let predicates = Predicates::from_scan_request(&request);
|
||||||
|
|
||||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
|
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||||
|
|
||||||
while let Some(table) = stream.try_next().await? {
|
while let Some(table) = stream.try_next().await? {
|
||||||
let keys = &table.table_info().meta.primary_key_indices;
|
let keys = &table.table_info().meta.primary_key_indices;
|
||||||
|
|||||||
@@ -243,7 +243,6 @@ impl InformationSchemaPartitionsBuilder {
|
|||||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||||
let table_info_stream = catalog_manager
|
let table_info_stream = catalog_manager
|
||||||
.tables(&catalog_name, &schema_name)
|
.tables(&catalog_name, &schema_name)
|
||||||
.await
|
|
||||||
.try_filter_map(|t| async move {
|
.try_filter_map(|t| async move {
|
||||||
let table_info = t.table_info();
|
let table_info = t.table_info();
|
||||||
if table_info.table_type == TableType::Temporary {
|
if table_info.table_type == TableType::Temporary {
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
///
|
///
|
||||||
/// - `region_id`: the region id
|
/// - `region_id`: the region id
|
||||||
/// - `peer_id`: the region storage datanode peer id
|
/// - `peer_id`: the region storage datanode peer id
|
||||||
/// - `peer_addr`: the region storage datanode peer address
|
/// - `peer_addr`: the region storage datanode gRPC peer address
|
||||||
/// - `is_leader`: whether the peer is the leader
|
/// - `is_leader`: whether the peer is the leader
|
||||||
/// - `status`: the region status, `ALIVE` or `DOWNGRADED`.
|
/// - `status`: the region status, `ALIVE` or `DOWNGRADED`.
|
||||||
/// - `down_seconds`: the duration of being offline, in seconds.
|
/// - `down_seconds`: the duration of being offline, in seconds.
|
||||||
@@ -179,7 +179,6 @@ impl InformationSchemaRegionPeersBuilder {
|
|||||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||||
let table_id_stream = catalog_manager
|
let table_id_stream = catalog_manager
|
||||||
.tables(&catalog_name, &schema_name)
|
.tables(&catalog_name, &schema_name)
|
||||||
.await
|
|
||||||
.try_filter_map(|t| async move {
|
.try_filter_map(|t| async move {
|
||||||
let table_info = t.table_info();
|
let table_info = t.table_info();
|
||||||
if table_info.table_type == TableType::Temporary {
|
if table_info.table_type == TableType::Temporary {
|
||||||
|
|||||||
@@ -28,8 +28,8 @@ use datatypes::prelude::{ConcreteDataType, MutableVector};
|
|||||||
use datatypes::scalars::ScalarVectorBuilder;
|
use datatypes::scalars::ScalarVectorBuilder;
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
use datatypes::vectors::{
|
use datatypes::vectors::{
|
||||||
ConstantVector, Float64VectorBuilder, StringVector, StringVectorBuilder,
|
ConstantVector, Float64VectorBuilder, StringVectorBuilder, TimestampMillisecondVector,
|
||||||
TimestampMillisecondVector, VectorRef,
|
VectorRef,
|
||||||
};
|
};
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
@@ -45,8 +45,8 @@ pub(super) struct InformationSchemaMetrics {
|
|||||||
const METRIC_NAME: &str = "metric_name";
|
const METRIC_NAME: &str = "metric_name";
|
||||||
const METRIC_VALUE: &str = "value";
|
const METRIC_VALUE: &str = "value";
|
||||||
const METRIC_LABELS: &str = "labels";
|
const METRIC_LABELS: &str = "labels";
|
||||||
const NODE: &str = "node";
|
const PEER_ADDR: &str = "peer_addr";
|
||||||
const NODE_TYPE: &str = "node_type";
|
const PEER_TYPE: &str = "peer_type";
|
||||||
const TIMESTAMP: &str = "timestamp";
|
const TIMESTAMP: &str = "timestamp";
|
||||||
|
|
||||||
/// The `information_schema.runtime_metrics` virtual table.
|
/// The `information_schema.runtime_metrics` virtual table.
|
||||||
@@ -63,8 +63,8 @@ impl InformationSchemaMetrics {
|
|||||||
ColumnSchema::new(METRIC_NAME, ConcreteDataType::string_datatype(), false),
|
ColumnSchema::new(METRIC_NAME, ConcreteDataType::string_datatype(), false),
|
||||||
ColumnSchema::new(METRIC_VALUE, ConcreteDataType::float64_datatype(), false),
|
ColumnSchema::new(METRIC_VALUE, ConcreteDataType::float64_datatype(), false),
|
||||||
ColumnSchema::new(METRIC_LABELS, ConcreteDataType::string_datatype(), true),
|
ColumnSchema::new(METRIC_LABELS, ConcreteDataType::string_datatype(), true),
|
||||||
ColumnSchema::new(NODE, ConcreteDataType::string_datatype(), false),
|
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
|
||||||
ColumnSchema::new(NODE_TYPE, ConcreteDataType::string_datatype(), false),
|
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
|
||||||
ColumnSchema::new(
|
ColumnSchema::new(
|
||||||
TIMESTAMP,
|
TIMESTAMP,
|
||||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||||
@@ -104,6 +104,7 @@ impl InformationTable for InformationSchemaMetrics {
|
|||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
}),
|
}),
|
||||||
));
|
));
|
||||||
|
|
||||||
Ok(Box::pin(
|
Ok(Box::pin(
|
||||||
RecordBatchStreamAdapter::try_new(stream)
|
RecordBatchStreamAdapter::try_new(stream)
|
||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
@@ -118,6 +119,8 @@ struct InformationSchemaMetricsBuilder {
|
|||||||
metric_names: StringVectorBuilder,
|
metric_names: StringVectorBuilder,
|
||||||
metric_values: Float64VectorBuilder,
|
metric_values: Float64VectorBuilder,
|
||||||
metric_labels: StringVectorBuilder,
|
metric_labels: StringVectorBuilder,
|
||||||
|
peer_addrs: StringVectorBuilder,
|
||||||
|
peer_types: StringVectorBuilder,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl InformationSchemaMetricsBuilder {
|
impl InformationSchemaMetricsBuilder {
|
||||||
@@ -127,13 +130,24 @@ impl InformationSchemaMetricsBuilder {
|
|||||||
metric_names: StringVectorBuilder::with_capacity(42),
|
metric_names: StringVectorBuilder::with_capacity(42),
|
||||||
metric_values: Float64VectorBuilder::with_capacity(42),
|
metric_values: Float64VectorBuilder::with_capacity(42),
|
||||||
metric_labels: StringVectorBuilder::with_capacity(42),
|
metric_labels: StringVectorBuilder::with_capacity(42),
|
||||||
|
peer_addrs: StringVectorBuilder::with_capacity(42),
|
||||||
|
peer_types: StringVectorBuilder::with_capacity(42),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add_metric(&mut self, metric_name: &str, labels: String, metric_value: f64) {
|
fn add_metric(
|
||||||
|
&mut self,
|
||||||
|
metric_name: &str,
|
||||||
|
labels: String,
|
||||||
|
metric_value: f64,
|
||||||
|
peer: Option<&str>,
|
||||||
|
peer_type: &str,
|
||||||
|
) {
|
||||||
self.metric_names.push(Some(metric_name));
|
self.metric_names.push(Some(metric_name));
|
||||||
self.metric_values.push(Some(metric_value));
|
self.metric_values.push(Some(metric_value));
|
||||||
self.metric_labels.push(Some(&labels));
|
self.metric_labels.push(Some(&labels));
|
||||||
|
self.peer_addrs.push(peer);
|
||||||
|
self.peer_types.push(Some(peer_type));
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn make_metrics(&mut self, _request: Option<ScanRequest>) -> Result<RecordBatch> {
|
async fn make_metrics(&mut self, _request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||||
@@ -170,18 +184,19 @@ impl InformationSchemaMetricsBuilder {
|
|||||||
.join(", "),
|
.join(", "),
|
||||||
// Safety: always has a sample
|
// Safety: always has a sample
|
||||||
ts.samples[0].value,
|
ts.samples[0].value,
|
||||||
|
// The peer column is always `None` for standalone
|
||||||
|
None,
|
||||||
|
"STANDALONE",
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FIXME(dennis): fetching other peers metrics
|
||||||
self.finish()
|
self.finish()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn finish(&mut self) -> Result<RecordBatch> {
|
fn finish(&mut self) -> Result<RecordBatch> {
|
||||||
let rows_num = self.metric_names.len();
|
let rows_num = self.metric_names.len();
|
||||||
let unknowns = Arc::new(ConstantVector::new(
|
|
||||||
Arc::new(StringVector::from(vec!["unknown"])),
|
|
||||||
rows_num,
|
|
||||||
));
|
|
||||||
let timestamps = Arc::new(ConstantVector::new(
|
let timestamps = Arc::new(ConstantVector::new(
|
||||||
Arc::new(TimestampMillisecondVector::from_slice([
|
Arc::new(TimestampMillisecondVector::from_slice([
|
||||||
current_time_millis(),
|
current_time_millis(),
|
||||||
@@ -193,9 +208,8 @@ impl InformationSchemaMetricsBuilder {
|
|||||||
Arc::new(self.metric_names.finish()),
|
Arc::new(self.metric_names.finish()),
|
||||||
Arc::new(self.metric_values.finish()),
|
Arc::new(self.metric_values.finish()),
|
||||||
Arc::new(self.metric_labels.finish()),
|
Arc::new(self.metric_labels.finish()),
|
||||||
// TODO(dennis): supports node and node_type for cluster
|
Arc::new(self.peer_addrs.finish()),
|
||||||
unknowns.clone(),
|
Arc::new(self.peer_types.finish()),
|
||||||
unknowns,
|
|
||||||
timestamps,
|
timestamps,
|
||||||
];
|
];
|
||||||
|
|
||||||
@@ -243,8 +257,8 @@ mod tests {
|
|||||||
assert!(result_literal.contains(METRIC_NAME));
|
assert!(result_literal.contains(METRIC_NAME));
|
||||||
assert!(result_literal.contains(METRIC_VALUE));
|
assert!(result_literal.contains(METRIC_VALUE));
|
||||||
assert!(result_literal.contains(METRIC_LABELS));
|
assert!(result_literal.contains(METRIC_LABELS));
|
||||||
assert!(result_literal.contains(NODE));
|
assert!(result_literal.contains(PEER_ADDR));
|
||||||
assert!(result_literal.contains(NODE_TYPE));
|
assert!(result_literal.contains(PEER_TYPE));
|
||||||
assert!(result_literal.contains(TIMESTAMP));
|
assert!(result_literal.contains(TIMESTAMP));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -177,7 +177,7 @@ impl InformationSchemaTableConstraintsBuilder {
|
|||||||
let predicates = Predicates::from_scan_request(&request);
|
let predicates = Predicates::from_scan_request(&request);
|
||||||
|
|
||||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
|
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||||
|
|
||||||
while let Some(table) = stream.try_next().await? {
|
while let Some(table) = stream.try_next().await? {
|
||||||
let keys = &table.table_info().meta.primary_key_indices;
|
let keys = &table.table_info().meta.primary_key_indices;
|
||||||
|
|||||||
@@ -40,5 +40,6 @@ pub const GLOBAL_STATUS: &str = "global_status";
|
|||||||
pub const SESSION_STATUS: &str = "session_status";
|
pub const SESSION_STATUS: &str = "session_status";
|
||||||
pub const RUNTIME_METRICS: &str = "runtime_metrics";
|
pub const RUNTIME_METRICS: &str = "runtime_metrics";
|
||||||
pub const PARTITIONS: &str = "partitions";
|
pub const PARTITIONS: &str = "partitions";
|
||||||
pub const REGION_PEERS: &str = "greptime_region_peers";
|
pub const REGION_PEERS: &str = "region_peers";
|
||||||
pub const TABLE_CONSTRAINTS: &str = "table_constraints";
|
pub const TABLE_CONSTRAINTS: &str = "table_constraints";
|
||||||
|
pub const CLUSTER_INFO: &str = "cluster_info";
|
||||||
|
|||||||
@@ -161,7 +161,7 @@ impl InformationSchemaTablesBuilder {
|
|||||||
let predicates = Predicates::from_scan_request(&request);
|
let predicates = Predicates::from_scan_request(&request);
|
||||||
|
|
||||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
|
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||||
|
|
||||||
while let Some(table) = stream.try_next().await? {
|
while let Some(table) = stream.try_next().await? {
|
||||||
let table_info = table.table_info();
|
let table_info = table.table_info();
|
||||||
|
|||||||
53
src/catalog/src/information_schema/utils.rs
Normal file
53
src/catalog/src/information_schema/utils.rs
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
|
use common_config::Mode;
|
||||||
|
use meta_client::client::MetaClient;
|
||||||
|
use snafu::OptionExt;
|
||||||
|
|
||||||
|
use crate::error::{Result, UpgradeWeakCatalogManagerRefSnafu};
|
||||||
|
use crate::kvbackend::KvBackendCatalogManager;
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
|
/// Try to get the server running mode from `[CatalogManager]` weak reference.
|
||||||
|
pub fn running_mode(catalog_manager: &Weak<dyn CatalogManager>) -> Result<Option<Mode>> {
|
||||||
|
let catalog_manager = catalog_manager
|
||||||
|
.upgrade()
|
||||||
|
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||||
|
|
||||||
|
Ok(catalog_manager
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<KvBackendCatalogManager>()
|
||||||
|
.map(|manager| manager.running_mode())
|
||||||
|
.copied())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Try to get the `[MetaClient]` from `[CatalogManager]` weak reference.
|
||||||
|
pub fn meta_client(catalog_manager: &Weak<dyn CatalogManager>) -> Result<Option<Arc<MetaClient>>> {
|
||||||
|
let catalog_manager = catalog_manager
|
||||||
|
.upgrade()
|
||||||
|
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||||
|
|
||||||
|
let meta_client = match catalog_manager
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<KvBackendCatalogManager>()
|
||||||
|
{
|
||||||
|
None => None,
|
||||||
|
Some(manager) => manager.meta_client(),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(meta_client)
|
||||||
|
}
|
||||||
@@ -22,6 +22,7 @@ use common_catalog::consts::{
|
|||||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
|
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
|
||||||
};
|
};
|
||||||
use common_catalog::format_full_table_name;
|
use common_catalog::format_full_table_name;
|
||||||
|
use common_config::Mode;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cache_invalidator::{CacheInvalidator, Context, MultiCacheInvalidator};
|
use common_meta::cache_invalidator::{CacheInvalidator, Context, MultiCacheInvalidator};
|
||||||
use common_meta::instruction::CacheIdent;
|
use common_meta::instruction::CacheIdent;
|
||||||
@@ -33,6 +34,7 @@ use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
|||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use futures_util::stream::BoxStream;
|
use futures_util::stream::BoxStream;
|
||||||
use futures_util::{StreamExt, TryStreamExt};
|
use futures_util::{StreamExt, TryStreamExt};
|
||||||
|
use meta_client::client::MetaClient;
|
||||||
use moka::future::{Cache as AsyncCache, CacheBuilder};
|
use moka::future::{Cache as AsyncCache, CacheBuilder};
|
||||||
use moka::sync::Cache;
|
use moka::sync::Cache;
|
||||||
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||||
@@ -56,6 +58,8 @@ use crate::CatalogManager;
|
|||||||
/// comes from `SystemCatalog`, which is static and read-only.
|
/// comes from `SystemCatalog`, which is static and read-only.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct KvBackendCatalogManager {
|
pub struct KvBackendCatalogManager {
|
||||||
|
mode: Mode,
|
||||||
|
meta_client: Option<Arc<MetaClient>>,
|
||||||
partition_manager: PartitionRuleManagerRef,
|
partition_manager: PartitionRuleManagerRef,
|
||||||
table_metadata_manager: TableMetadataManagerRef,
|
table_metadata_manager: TableMetadataManagerRef,
|
||||||
/// A sub-CatalogManager that handles system tables
|
/// A sub-CatalogManager that handles system tables
|
||||||
@@ -101,6 +105,8 @@ const TABLE_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
|||||||
|
|
||||||
impl KvBackendCatalogManager {
|
impl KvBackendCatalogManager {
|
||||||
pub async fn new(
|
pub async fn new(
|
||||||
|
mode: Mode,
|
||||||
|
meta_client: Option<Arc<MetaClient>>,
|
||||||
backend: KvBackendRef,
|
backend: KvBackendRef,
|
||||||
multi_cache_invalidator: Arc<MultiCacheInvalidator>,
|
multi_cache_invalidator: Arc<MultiCacheInvalidator>,
|
||||||
) -> Arc<Self> {
|
) -> Arc<Self> {
|
||||||
@@ -113,6 +119,8 @@ impl KvBackendCatalogManager {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
Arc::new_cyclic(|me| Self {
|
Arc::new_cyclic(|me| Self {
|
||||||
|
mode,
|
||||||
|
meta_client,
|
||||||
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
|
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
|
||||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||||
system_catalog: SystemCatalog {
|
system_catalog: SystemCatalog {
|
||||||
@@ -127,6 +135,16 @@ impl KvBackendCatalogManager {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the server running mode.
|
||||||
|
pub fn running_mode(&self) -> &Mode {
|
||||||
|
&self.mode
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the `[MetaClient]`.
|
||||||
|
pub fn meta_client(&self) -> Option<Arc<MetaClient>> {
|
||||||
|
self.meta_client.clone()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn partition_manager(&self) -> PartitionRuleManagerRef {
|
pub fn partition_manager(&self) -> PartitionRuleManagerRef {
|
||||||
self.partition_manager.clone()
|
self.partition_manager.clone()
|
||||||
}
|
}
|
||||||
@@ -283,11 +301,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn tables<'a>(
|
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>> {
|
||||||
&'a self,
|
|
||||||
catalog: &'a str,
|
|
||||||
schema: &'a str,
|
|
||||||
) -> BoxStream<'a, Result<TableRef>> {
|
|
||||||
let sys_tables = try_stream!({
|
let sys_tables = try_stream!({
|
||||||
// System tables
|
// System tables
|
||||||
let sys_table_names = self.system_catalog.table_names(schema);
|
let sys_table_names = self.system_catalog.table_names(schema);
|
||||||
|
|||||||
@@ -59,11 +59,7 @@ pub trait CatalogManager: Send + Sync {
|
|||||||
) -> Result<Option<TableRef>>;
|
) -> Result<Option<TableRef>>;
|
||||||
|
|
||||||
/// Returns all tables with a stream by catalog and schema.
|
/// Returns all tables with a stream by catalog and schema.
|
||||||
async fn tables<'a>(
|
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>>;
|
||||||
&'a self,
|
|
||||||
catalog: &'a str,
|
|
||||||
schema: &'a str,
|
|
||||||
) -> BoxStream<'a, Result<TableRef>>;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type CatalogManagerRef = Arc<dyn CatalogManager>;
|
pub type CatalogManagerRef = Arc<dyn CatalogManager>;
|
||||||
|
|||||||
@@ -117,11 +117,7 @@ impl CatalogManager for MemoryCatalogManager {
|
|||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn tables<'a>(
|
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>> {
|
||||||
&'a self,
|
|
||||||
catalog: &'a str,
|
|
||||||
schema: &'a str,
|
|
||||||
) -> BoxStream<'a, Result<TableRef>> {
|
|
||||||
let catalogs = self.catalogs.read().unwrap();
|
let catalogs = self.catalogs.read().unwrap();
|
||||||
|
|
||||||
let Some(schemas) = catalogs.get(catalog) else {
|
let Some(schemas) = catalogs.get(catalog) else {
|
||||||
@@ -141,11 +137,11 @@ impl CatalogManager for MemoryCatalogManager {
|
|||||||
|
|
||||||
let tables = tables.values().cloned().collect::<Vec<_>>();
|
let tables = tables.values().cloned().collect::<Vec<_>>();
|
||||||
|
|
||||||
return Box::pin(try_stream!({
|
Box::pin(try_stream!({
|
||||||
for table in tables {
|
for table in tables {
|
||||||
yield table;
|
yield table;
|
||||||
}
|
}
|
||||||
}));
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -368,9 +364,7 @@ mod tests {
|
|||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let stream = catalog_list
|
let stream = catalog_list.tables(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME);
|
||||||
.tables(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
|
|
||||||
.await;
|
|
||||||
let tables = stream.try_collect::<Vec<_>>().await.unwrap();
|
let tables = stream.try_collect::<Vec<_>>().await.unwrap();
|
||||||
assert_eq!(tables.len(), 1);
|
assert_eq!(tables.len(), 1);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
|
|||||||
@@ -1,115 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use api::v1::{ColumnDataType, ColumnDef, CreateTableExpr, SemanticType, TableId};
|
|
||||||
use client::{Client, Database};
|
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
|
|
||||||
use prost::Message;
|
|
||||||
use substrait_proto::proto::plan_rel::RelType as PlanRelType;
|
|
||||||
use substrait_proto::proto::read_rel::{NamedTable, ReadType};
|
|
||||||
use substrait_proto::proto::rel::RelType;
|
|
||||||
use substrait_proto::proto::{PlanRel, ReadRel, Rel};
|
|
||||||
use tracing::{event, Level};
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
run();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn run() {
|
|
||||||
let client = Client::with_urls(vec!["127.0.0.1:3001"]);
|
|
||||||
|
|
||||||
let create_table_expr = CreateTableExpr {
|
|
||||||
catalog_name: "greptime".to_string(),
|
|
||||||
schema_name: "public".to_string(),
|
|
||||||
table_name: "test_logical_dist_exec".to_string(),
|
|
||||||
desc: String::default(),
|
|
||||||
column_defs: vec![
|
|
||||||
ColumnDef {
|
|
||||||
name: "timestamp".to_string(),
|
|
||||||
data_type: ColumnDataType::TimestampMillisecond as i32,
|
|
||||||
is_nullable: false,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Timestamp as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "key".to_string(),
|
|
||||||
data_type: ColumnDataType::Uint64 as i32,
|
|
||||||
is_nullable: false,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Tag as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
ColumnDef {
|
|
||||||
name: "value".to_string(),
|
|
||||||
data_type: ColumnDataType::Uint64 as i32,
|
|
||||||
is_nullable: false,
|
|
||||||
default_constraint: vec![],
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
comment: String::new(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
],
|
|
||||||
time_index: "timestamp".to_string(),
|
|
||||||
primary_keys: vec!["key".to_string()],
|
|
||||||
create_if_not_exists: false,
|
|
||||||
table_options: Default::default(),
|
|
||||||
table_id: Some(TableId { id: 1024 }),
|
|
||||||
engine: MITO_ENGINE.to_string(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
|
||||||
let result = db.create(create_table_expr).await.unwrap();
|
|
||||||
event!(Level::INFO, "create table result: {:#?}", result);
|
|
||||||
|
|
||||||
let logical = mock_logical_plan();
|
|
||||||
event!(Level::INFO, "plan size: {:#?}", logical.len());
|
|
||||||
let result = db.logical_plan(logical).await.unwrap();
|
|
||||||
|
|
||||||
event!(Level::INFO, "result: {:#?}", result);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn mock_logical_plan() -> Vec<u8> {
|
|
||||||
let catalog_name = "greptime".to_string();
|
|
||||||
let schema_name = "public".to_string();
|
|
||||||
let table_name = "test_logical_dist_exec".to_string();
|
|
||||||
|
|
||||||
let named_table = NamedTable {
|
|
||||||
names: vec![catalog_name, schema_name, table_name],
|
|
||||||
advanced_extension: None,
|
|
||||||
};
|
|
||||||
let read_type = ReadType::NamedTable(named_table);
|
|
||||||
|
|
||||||
let read_rel = ReadRel {
|
|
||||||
read_type: Some(read_type),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut buf = vec![];
|
|
||||||
let rel = Rel {
|
|
||||||
rel_type: Some(RelType::Read(Box::new(read_rel))),
|
|
||||||
};
|
|
||||||
let plan_rel = PlanRel {
|
|
||||||
rel_type: Some(PlanRelType::Rel(rel)),
|
|
||||||
};
|
|
||||||
plan_rel.encode(&mut buf).unwrap();
|
|
||||||
|
|
||||||
buf
|
|
||||||
}
|
|
||||||
@@ -1,181 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use api::v1::*;
|
|
||||||
use client::{Client, Database, DEFAULT_SCHEMA_NAME};
|
|
||||||
use derive_new::new;
|
|
||||||
use tracing::{error, info};
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
run();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn run() {
|
|
||||||
let greptimedb_endpoint =
|
|
||||||
std::env::var("GREPTIMEDB_ENDPOINT").unwrap_or_else(|_| "localhost:4001".to_owned());
|
|
||||||
|
|
||||||
let greptimedb_dbname =
|
|
||||||
std::env::var("GREPTIMEDB_DBNAME").unwrap_or_else(|_| DEFAULT_SCHEMA_NAME.to_owned());
|
|
||||||
|
|
||||||
let grpc_client = Client::with_urls(vec![&greptimedb_endpoint]);
|
|
||||||
|
|
||||||
let client = Database::new_with_dbname(greptimedb_dbname, grpc_client);
|
|
||||||
|
|
||||||
let stream_inserter = client.streaming_inserter().unwrap();
|
|
||||||
|
|
||||||
if let Err(e) = stream_inserter
|
|
||||||
.insert(vec![to_insert_request(weather_records_1())])
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
error!("Error: {e:?}");
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Err(e) = stream_inserter
|
|
||||||
.insert(vec![to_insert_request(weather_records_2())])
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
error!("Error: {e:?}");
|
|
||||||
}
|
|
||||||
|
|
||||||
let result = stream_inserter.finish().await;
|
|
||||||
|
|
||||||
match result {
|
|
||||||
Ok(rows) => {
|
|
||||||
info!("Rows written: {rows}");
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
error!("Error: {e:?}");
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(new)]
|
|
||||||
struct WeatherRecord {
|
|
||||||
timestamp_millis: i64,
|
|
||||||
collector: String,
|
|
||||||
temperature: f32,
|
|
||||||
humidity: i32,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn weather_records_1() -> Vec<WeatherRecord> {
|
|
||||||
vec![
|
|
||||||
WeatherRecord::new(1686109527000, "c1".to_owned(), 26.4, 15),
|
|
||||||
WeatherRecord::new(1686023127000, "c1".to_owned(), 29.3, 20),
|
|
||||||
WeatherRecord::new(1685936727000, "c1".to_owned(), 31.8, 13),
|
|
||||||
WeatherRecord::new(1686109527000, "c2".to_owned(), 20.4, 67),
|
|
||||||
WeatherRecord::new(1686023127000, "c2".to_owned(), 18.0, 74),
|
|
||||||
WeatherRecord::new(1685936727000, "c2".to_owned(), 19.2, 81),
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
fn weather_records_2() -> Vec<WeatherRecord> {
|
|
||||||
vec![
|
|
||||||
WeatherRecord::new(1686109527001, "c3".to_owned(), 26.4, 15),
|
|
||||||
WeatherRecord::new(1686023127002, "c3".to_owned(), 29.3, 20),
|
|
||||||
WeatherRecord::new(1685936727003, "c3".to_owned(), 31.8, 13),
|
|
||||||
WeatherRecord::new(1686109527004, "c4".to_owned(), 20.4, 67),
|
|
||||||
WeatherRecord::new(1686023127005, "c4".to_owned(), 18.0, 74),
|
|
||||||
WeatherRecord::new(1685936727006, "c4".to_owned(), 19.2, 81),
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This function generates some random data and bundle them into a
|
|
||||||
/// `InsertRequest`.
|
|
||||||
///
|
|
||||||
/// Data structure:
|
|
||||||
///
|
|
||||||
/// - `ts`: a timestamp column
|
|
||||||
/// - `collector`: a tag column
|
|
||||||
/// - `temperature`: a value field of f32
|
|
||||||
/// - `humidity`: a value field of i32
|
|
||||||
///
|
|
||||||
fn to_insert_request(records: Vec<WeatherRecord>) -> InsertRequest {
|
|
||||||
// convert records into columns
|
|
||||||
let rows = records.len();
|
|
||||||
|
|
||||||
// transpose records into columns
|
|
||||||
let (timestamp_millis, collectors, temp, humidity) = records.into_iter().fold(
|
|
||||||
(
|
|
||||||
Vec::with_capacity(rows),
|
|
||||||
Vec::with_capacity(rows),
|
|
||||||
Vec::with_capacity(rows),
|
|
||||||
Vec::with_capacity(rows),
|
|
||||||
),
|
|
||||||
|mut acc, rec| {
|
|
||||||
acc.0.push(rec.timestamp_millis);
|
|
||||||
acc.1.push(rec.collector);
|
|
||||||
acc.2.push(rec.temperature);
|
|
||||||
acc.3.push(rec.humidity);
|
|
||||||
|
|
||||||
acc
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
let columns = vec![
|
|
||||||
// timestamp column: `ts`
|
|
||||||
Column {
|
|
||||||
column_name: "ts".to_owned(),
|
|
||||||
values: Some(column::Values {
|
|
||||||
timestamp_millisecond_values: timestamp_millis,
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
semantic_type: SemanticType::Timestamp as i32,
|
|
||||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
// tag column: collectors
|
|
||||||
Column {
|
|
||||||
column_name: "collector".to_owned(),
|
|
||||||
values: Some(column::Values {
|
|
||||||
string_values: collectors.into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
semantic_type: SemanticType::Tag as i32,
|
|
||||||
datatype: ColumnDataType::String as i32,
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
// field column: temperature
|
|
||||||
Column {
|
|
||||||
column_name: "temperature".to_owned(),
|
|
||||||
values: Some(column::Values {
|
|
||||||
f32_values: temp,
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
datatype: ColumnDataType::Float32 as i32,
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
// field column: humidity
|
|
||||||
Column {
|
|
||||||
column_name: "humidity".to_owned(),
|
|
||||||
values: Some(column::Values {
|
|
||||||
i32_values: humidity,
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
datatype: ColumnDataType::Int32 as i32,
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
InsertRequest {
|
|
||||||
table_name: "weather_demo".to_owned(),
|
|
||||||
columns,
|
|
||||||
row_count: rows as u32,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -14,7 +14,6 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
|
||||||
use api::v1::health_check_client::HealthCheckClient;
|
use api::v1::health_check_client::HealthCheckClient;
|
||||||
use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
|
use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
|
||||||
use api::v1::region::region_client::RegionClient as PbRegionClient;
|
use api::v1::region::region_client::RegionClient as PbRegionClient;
|
||||||
@@ -28,21 +27,17 @@ use tonic::transport::Channel;
|
|||||||
use crate::load_balance::{LoadBalance, Loadbalancer};
|
use crate::load_balance::{LoadBalance, Loadbalancer};
|
||||||
use crate::{error, Result};
|
use crate::{error, Result};
|
||||||
|
|
||||||
pub(crate) struct DatabaseClient {
|
pub struct FlightClient {
|
||||||
pub(crate) inner: GreptimeDatabaseClient<Channel>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) struct FlightClient {
|
|
||||||
addr: String,
|
addr: String,
|
||||||
client: FlightServiceClient<Channel>,
|
client: FlightServiceClient<Channel>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FlightClient {
|
impl FlightClient {
|
||||||
pub(crate) fn addr(&self) -> &str {
|
pub fn addr(&self) -> &str {
|
||||||
&self.addr
|
&self.addr
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn mut_inner(&mut self) -> &mut FlightServiceClient<Channel> {
|
pub fn mut_inner(&mut self) -> &mut FlightServiceClient<Channel> {
|
||||||
&mut self.client
|
&mut self.client
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -138,7 +133,7 @@ impl Client {
|
|||||||
Ok((addr, channel))
|
Ok((addr, channel))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn max_grpc_recv_message_size(&self) -> usize {
|
pub fn max_grpc_recv_message_size(&self) -> usize {
|
||||||
self.inner
|
self.inner
|
||||||
.channel_manager
|
.channel_manager
|
||||||
.config()
|
.config()
|
||||||
@@ -146,7 +141,7 @@ impl Client {
|
|||||||
.as_bytes() as usize
|
.as_bytes() as usize
|
||||||
}
|
}
|
||||||
|
|
||||||
fn max_grpc_send_message_size(&self) -> usize {
|
pub fn max_grpc_send_message_size(&self) -> usize {
|
||||||
self.inner
|
self.inner
|
||||||
.channel_manager
|
.channel_manager
|
||||||
.config()
|
.config()
|
||||||
@@ -154,7 +149,7 @@ impl Client {
|
|||||||
.as_bytes() as usize
|
.as_bytes() as usize
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
|
pub fn make_flight_client(&self) -> Result<FlightClient> {
|
||||||
let (addr, channel) = self.find_channel()?;
|
let (addr, channel) = self.find_channel()?;
|
||||||
Ok(FlightClient {
|
Ok(FlightClient {
|
||||||
addr,
|
addr,
|
||||||
@@ -164,15 +159,6 @@ impl Client {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn make_database_client(&self) -> Result<DatabaseClient> {
|
|
||||||
let (_, channel) = self.find_channel()?;
|
|
||||||
Ok(DatabaseClient {
|
|
||||||
inner: GreptimeDatabaseClient::new(channel)
|
|
||||||
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
|
||||||
.max_encoding_message_size(self.max_grpc_send_message_size()),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn raw_region_client(&self) -> Result<PbRegionClient<Channel>> {
|
pub(crate) fn raw_region_client(&self) -> Result<PbRegionClient<Channel>> {
|
||||||
let (_, channel) = self.find_channel()?;
|
let (_, channel) = self.find_channel()?;
|
||||||
Ok(PbRegionClient::new(channel)
|
Ok(PbRegionClient::new(channel)
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ use std::sync::Arc;
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||||
use common_meta::datanode_manager::{Datanode, DatanodeManager};
|
use common_meta::node_manager::{DatanodeRef, FlownodeRef, NodeManager};
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use moka::future::{Cache, CacheBuilder};
|
use moka::future::{Cache, CacheBuilder};
|
||||||
|
|
||||||
@@ -44,12 +44,17 @@ impl Debug for DatanodeClients {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl DatanodeManager for DatanodeClients {
|
impl NodeManager for DatanodeClients {
|
||||||
async fn datanode(&self, datanode: &Peer) -> Arc<dyn Datanode> {
|
async fn datanode(&self, datanode: &Peer) -> DatanodeRef {
|
||||||
let client = self.get_client(datanode).await;
|
let client = self.get_client(datanode).await;
|
||||||
|
|
||||||
Arc::new(RegionRequester::new(client))
|
Arc::new(RegionRequester::new(client))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn flownode(&self, _node: &Peer) -> FlownodeRef {
|
||||||
|
// TODO(weny): Support it.
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DatanodeClients {
|
impl DatanodeClients {
|
||||||
|
|||||||
@@ -14,12 +14,10 @@
|
|||||||
|
|
||||||
mod client;
|
mod client;
|
||||||
pub mod client_manager;
|
pub mod client_manager;
|
||||||
mod database;
|
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod load_balance;
|
pub mod load_balance;
|
||||||
mod metrics;
|
mod metrics;
|
||||||
pub mod region;
|
pub mod region;
|
||||||
mod stream_insert;
|
|
||||||
|
|
||||||
pub use api;
|
pub use api;
|
||||||
use api::v1::greptime_response::Response;
|
use api::v1::greptime_response::Response;
|
||||||
@@ -31,9 +29,7 @@ pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
|
|||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
|
|
||||||
pub use self::client::Client;
|
pub use self::client::Client;
|
||||||
pub use self::database::Database;
|
|
||||||
pub use self::error::{Error, Result};
|
pub use self::error::{Error, Result};
|
||||||
pub use self::stream_insert::StreamInserter;
|
|
||||||
use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};
|
use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};
|
||||||
|
|
||||||
pub fn from_grpc_response(response: GreptimeResponse) -> Result<u32> {
|
pub fn from_grpc_response(response: GreptimeResponse) -> Result<u32> {
|
||||||
|
|||||||
@@ -24,8 +24,8 @@ use async_trait::async_trait;
|
|||||||
use common_error::ext::{BoxedError, ErrorExt};
|
use common_error::ext::{BoxedError, ErrorExt};
|
||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||||
use common_meta::datanode_manager::Datanode;
|
|
||||||
use common_meta::error::{self as meta_error, Result as MetaResult};
|
use common_meta::error::{self as meta_error, Result as MetaResult};
|
||||||
|
use common_meta::node_manager::Datanode;
|
||||||
use common_recordbatch::error::ExternalSnafu;
|
use common_recordbatch::error::ExternalSnafu;
|
||||||
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||||
use common_telemetry::error;
|
use common_telemetry::error;
|
||||||
|
|||||||
@@ -1,118 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
|
||||||
use api::v1::greptime_request::Request;
|
|
||||||
use api::v1::{
|
|
||||||
AuthHeader, GreptimeRequest, GreptimeResponse, InsertRequest, InsertRequests, RequestHeader,
|
|
||||||
RowInsertRequest, RowInsertRequests,
|
|
||||||
};
|
|
||||||
use tokio::sync::mpsc;
|
|
||||||
use tokio::task::JoinHandle;
|
|
||||||
use tokio_stream::wrappers::ReceiverStream;
|
|
||||||
use tonic::transport::Channel;
|
|
||||||
use tonic::{Response, Status};
|
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
|
||||||
use crate::from_grpc_response;
|
|
||||||
|
|
||||||
/// A structure that provides some methods for streaming data insert.
|
|
||||||
///
|
|
||||||
/// [`StreamInserter`] cannot be constructed via the `StreamInserter::new` method.
|
|
||||||
/// You can use the following way to obtain [`StreamInserter`].
|
|
||||||
///
|
|
||||||
/// ```ignore
|
|
||||||
/// let grpc_client = Client::with_urls(vec!["127.0.0.1:4002"]);
|
|
||||||
/// let client = Database::new_with_dbname("db_name", grpc_client);
|
|
||||||
/// let stream_inserter = client.streaming_inserter().unwrap();
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// If you want to see a concrete usage example, please see
|
|
||||||
/// [stream_inserter.rs](https://github.com/GreptimeTeam/greptimedb/blob/main/src/client/examples/stream_ingest.rs).
|
|
||||||
pub struct StreamInserter {
|
|
||||||
sender: mpsc::Sender<GreptimeRequest>,
|
|
||||||
|
|
||||||
auth_header: Option<AuthHeader>,
|
|
||||||
|
|
||||||
dbname: String,
|
|
||||||
|
|
||||||
join: JoinHandle<std::result::Result<Response<GreptimeResponse>, Status>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StreamInserter {
|
|
||||||
pub(crate) fn new(
|
|
||||||
mut client: GreptimeDatabaseClient<Channel>,
|
|
||||||
dbname: String,
|
|
||||||
auth_header: Option<AuthHeader>,
|
|
||||||
channel_size: usize,
|
|
||||||
) -> StreamInserter {
|
|
||||||
let (send, recv) = tokio::sync::mpsc::channel(channel_size);
|
|
||||||
|
|
||||||
let join: JoinHandle<std::result::Result<Response<GreptimeResponse>, Status>> =
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let recv_stream = ReceiverStream::new(recv);
|
|
||||||
client.handle_requests(recv_stream).await
|
|
||||||
});
|
|
||||||
|
|
||||||
StreamInserter {
|
|
||||||
sender: send,
|
|
||||||
auth_header,
|
|
||||||
dbname,
|
|
||||||
join,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn insert(&self, requests: Vec<InsertRequest>) -> Result<()> {
|
|
||||||
let inserts = InsertRequests { inserts: requests };
|
|
||||||
let request = self.to_rpc_request(Request::Inserts(inserts));
|
|
||||||
|
|
||||||
self.sender.send(request).await.map_err(|e| {
|
|
||||||
error::ClientStreamingSnafu {
|
|
||||||
err_msg: e.to_string(),
|
|
||||||
}
|
|
||||||
.build()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn row_insert(&self, requests: Vec<RowInsertRequest>) -> Result<()> {
|
|
||||||
let inserts = RowInsertRequests { inserts: requests };
|
|
||||||
let request = self.to_rpc_request(Request::RowInserts(inserts));
|
|
||||||
|
|
||||||
self.sender.send(request).await.map_err(|e| {
|
|
||||||
error::ClientStreamingSnafu {
|
|
||||||
err_msg: e.to_string(),
|
|
||||||
}
|
|
||||||
.build()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn finish(self) -> Result<u32> {
|
|
||||||
drop(self.sender);
|
|
||||||
|
|
||||||
let response = self.join.await.unwrap()?;
|
|
||||||
let response = response.into_inner();
|
|
||||||
from_grpc_response(response)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
|
|
||||||
GreptimeRequest {
|
|
||||||
header: Some(RequestHeader {
|
|
||||||
authorization: self.auth_header.clone(),
|
|
||||||
dbname: self.dbname.clone(),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
request: Some(request),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -18,6 +18,7 @@ workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
auth.workspace = true
|
auth.workspace = true
|
||||||
|
base64.workspace = true
|
||||||
catalog.workspace = true
|
catalog.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
clap.workspace = true
|
clap.workspace = true
|
||||||
@@ -44,6 +45,7 @@ datatypes.workspace = true
|
|||||||
either = "1.8"
|
either = "1.8"
|
||||||
etcd-client.workspace = true
|
etcd-client.workspace = true
|
||||||
file-engine.workspace = true
|
file-engine.workspace = true
|
||||||
|
flow.workspace = true
|
||||||
frontend.workspace = true
|
frontend.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
human-panic = "1.2.2"
|
human-panic = "1.2.2"
|
||||||
@@ -58,6 +60,7 @@ prost.workspace = true
|
|||||||
query.workspace = true
|
query.workspace = true
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
regex.workspace = true
|
regex.workspace = true
|
||||||
|
reqwest.workspace = true
|
||||||
rustyline = "10.1"
|
rustyline = "10.1"
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
|
|||||||
@@ -16,24 +16,42 @@
|
|||||||
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use clap::{FromArgMatches, Parser, Subcommand};
|
use clap::{Parser, Subcommand};
|
||||||
use cmd::error::Result;
|
use cmd::error::Result;
|
||||||
use cmd::options::{CliOptions, Options};
|
use cmd::options::{GlobalOptions, Options};
|
||||||
use cmd::{
|
use cmd::{cli, datanode, frontend, log_versions, metasrv, standalone, start_app, App};
|
||||||
cli, datanode, frontend, greptimedb_cli, log_versions, metasrv, standalone, start_app, App,
|
|
||||||
};
|
|
||||||
use common_version::{short_version, version};
|
use common_version::{short_version, version};
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
|
#[command(name = "greptime", author, version, long_version = version!(), about)]
|
||||||
|
#[command(propagate_version = true)]
|
||||||
|
pub(crate) struct Command {
|
||||||
|
#[clap(subcommand)]
|
||||||
|
pub(crate) subcmd: SubCommand,
|
||||||
|
|
||||||
|
#[clap(flatten)]
|
||||||
|
pub(crate) global_options: GlobalOptions,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Subcommand)]
|
||||||
enum SubCommand {
|
enum SubCommand {
|
||||||
|
/// Start datanode service.
|
||||||
#[clap(name = "datanode")]
|
#[clap(name = "datanode")]
|
||||||
Datanode(datanode::Command),
|
Datanode(datanode::Command),
|
||||||
|
|
||||||
|
/// Start frontend service.
|
||||||
#[clap(name = "frontend")]
|
#[clap(name = "frontend")]
|
||||||
Frontend(frontend::Command),
|
Frontend(frontend::Command),
|
||||||
|
|
||||||
|
/// Start metasrv service.
|
||||||
#[clap(name = "metasrv")]
|
#[clap(name = "metasrv")]
|
||||||
Metasrv(metasrv::Command),
|
Metasrv(metasrv::Command),
|
||||||
|
|
||||||
|
/// Run greptimedb as a standalone service.
|
||||||
#[clap(name = "standalone")]
|
#[clap(name = "standalone")]
|
||||||
Standalone(standalone::Command),
|
Standalone(standalone::Command),
|
||||||
|
|
||||||
|
/// Execute the cli tools for greptimedb.
|
||||||
#[clap(name = "cli")]
|
#[clap(name = "cli")]
|
||||||
Cli(cli::Command),
|
Cli(cli::Command),
|
||||||
}
|
}
|
||||||
@@ -67,13 +85,13 @@ impl SubCommand {
|
|||||||
Ok(app)
|
Ok(app)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Datanode(cmd) => cmd.load_options(cli_options),
|
SubCommand::Datanode(cmd) => cmd.load_options(global_options),
|
||||||
SubCommand::Frontend(cmd) => cmd.load_options(cli_options),
|
SubCommand::Frontend(cmd) => cmd.load_options(global_options),
|
||||||
SubCommand::Metasrv(cmd) => cmd.load_options(cli_options),
|
SubCommand::Metasrv(cmd) => cmd.load_options(global_options),
|
||||||
SubCommand::Standalone(cmd) => cmd.load_options(cli_options),
|
SubCommand::Standalone(cmd) => cmd.load_options(global_options),
|
||||||
SubCommand::Cli(cmd) => cmd.load_options(cli_options),
|
SubCommand::Cli(cmd) => cmd.load_options(global_options),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -96,6 +114,32 @@ static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
|||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() -> Result<()> {
|
async fn main() -> Result<()> {
|
||||||
|
setup_human_panic();
|
||||||
|
start(Command::parse()).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn start(cli: Command) -> Result<()> {
|
||||||
|
let subcmd = cli.subcmd;
|
||||||
|
|
||||||
|
let app_name = subcmd.to_string();
|
||||||
|
|
||||||
|
let opts = subcmd.load_options(&cli.global_options)?;
|
||||||
|
|
||||||
|
let _guard = common_telemetry::init_global_logging(
|
||||||
|
&app_name,
|
||||||
|
opts.logging_options(),
|
||||||
|
cli.global_options.tracing_options(),
|
||||||
|
opts.node_id(),
|
||||||
|
);
|
||||||
|
|
||||||
|
log_versions(version!(), short_version!());
|
||||||
|
|
||||||
|
let app = subcmd.build(opts).await?;
|
||||||
|
|
||||||
|
start_app(app).await
|
||||||
|
}
|
||||||
|
|
||||||
|
fn setup_human_panic() {
|
||||||
let metadata = human_panic::Metadata {
|
let metadata = human_panic::Metadata {
|
||||||
version: env!("CARGO_PKG_VERSION").into(),
|
version: env!("CARGO_PKG_VERSION").into(),
|
||||||
name: "GreptimeDB".into(),
|
name: "GreptimeDB".into(),
|
||||||
@@ -105,35 +149,4 @@ async fn main() -> Result<()> {
|
|||||||
human_panic::setup_panic!(metadata);
|
human_panic::setup_panic!(metadata);
|
||||||
|
|
||||||
common_telemetry::set_panic_hook();
|
common_telemetry::set_panic_hook();
|
||||||
|
|
||||||
let version = version!();
|
|
||||||
let cli = greptimedb_cli().version(version);
|
|
||||||
|
|
||||||
let cli = SubCommand::augment_subcommands(cli);
|
|
||||||
|
|
||||||
let args = cli.get_matches();
|
|
||||||
|
|
||||||
let subcmd = match SubCommand::from_arg_matches(&args) {
|
|
||||||
Ok(subcmd) => subcmd,
|
|
||||||
Err(e) => e.exit(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let app_name = subcmd.to_string();
|
|
||||||
|
|
||||||
let cli_options = CliOptions::new(&args);
|
|
||||||
|
|
||||||
let opts = subcmd.load_options(&cli_options)?;
|
|
||||||
|
|
||||||
let _guard = common_telemetry::init_global_logging(
|
|
||||||
&app_name,
|
|
||||||
opts.logging_options(),
|
|
||||||
cli_options.tracing_options(),
|
|
||||||
opts.node_id(),
|
|
||||||
);
|
|
||||||
|
|
||||||
log_versions(version, short_version!());
|
|
||||||
|
|
||||||
let app = subcmd.build(opts).await?;
|
|
||||||
|
|
||||||
start_app(app).await
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ mod helper;
|
|||||||
|
|
||||||
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
mod repl;
|
// mod repl;
|
||||||
// TODO(weny): Removes it
|
// TODO(weny): Removes it
|
||||||
#[allow(deprecated)]
|
#[allow(deprecated)]
|
||||||
mod upgrade;
|
mod upgrade;
|
||||||
@@ -31,12 +31,12 @@ use async_trait::async_trait;
|
|||||||
use bench::BenchTableMetadataCommand;
|
use bench::BenchTableMetadataCommand;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_telemetry::logging::LoggingOptions;
|
use common_telemetry::logging::LoggingOptions;
|
||||||
pub use repl::Repl;
|
// pub use repl::Repl;
|
||||||
use upgrade::UpgradeCommand;
|
use upgrade::UpgradeCommand;
|
||||||
|
|
||||||
use self::export::ExportCommand;
|
use self::export::ExportCommand;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::options::{CliOptions, Options};
|
use crate::options::{GlobalOptions, Options};
|
||||||
use crate::App;
|
use crate::App;
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -80,14 +80,14 @@ impl Command {
|
|||||||
self.cmd.build().await
|
self.cmd.build().await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||||
let mut logging_opts = LoggingOptions::default();
|
let mut logging_opts = LoggingOptions::default();
|
||||||
|
|
||||||
if let Some(dir) = &cli_options.log_dir {
|
if let Some(dir) = &global_options.log_dir {
|
||||||
logging_opts.dir.clone_from(dir);
|
logging_opts.dir.clone_from(dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
logging_opts.level.clone_from(&cli_options.log_level);
|
logging_opts.level.clone_from(&global_options.log_level);
|
||||||
|
|
||||||
Ok(Options::Cli(Box::new(logging_opts)))
|
Ok(Options::Cli(Box::new(logging_opts)))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,14 +16,14 @@ use std::path::Path;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use base64::engine::general_purpose;
|
||||||
|
use base64::Engine;
|
||||||
use clap::{Parser, ValueEnum};
|
use clap::{Parser, ValueEnum};
|
||||||
use client::api::v1::auth_header::AuthScheme;
|
use client::DEFAULT_SCHEMA_NAME;
|
||||||
use client::api::v1::Basic;
|
|
||||||
use client::{Client, Database, OutputData, DEFAULT_SCHEMA_NAME};
|
|
||||||
use common_recordbatch::util::collect;
|
|
||||||
use common_telemetry::{debug, error, info, warn};
|
use common_telemetry::{debug, error, info, warn};
|
||||||
use datatypes::scalars::ScalarVector;
|
use serde_json::Value;
|
||||||
use datatypes::vectors::{StringVector, Vector};
|
use servers::http::greptime_result_v1::GreptimedbV1Response;
|
||||||
|
use servers::http::GreptimeQueryOutput;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use tokio::fs::File;
|
use tokio::fs::File;
|
||||||
use tokio::io::{AsyncWriteExt, BufWriter};
|
use tokio::io::{AsyncWriteExt, BufWriter};
|
||||||
@@ -31,9 +31,8 @@ use tokio::sync::Semaphore;
|
|||||||
|
|
||||||
use crate::cli::{Instance, Tool};
|
use crate::cli::{Instance, Tool};
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CollectRecordBatchesSnafu, ConnectServerSnafu, EmptyResultSnafu, Error, FileIoSnafu,
|
EmptyResultSnafu, Error, FileIoSnafu, HttpQuerySqlSnafu, InvalidDatabaseNameSnafu, Result,
|
||||||
IllegalConfigSnafu, InvalidDatabaseNameSnafu, NotDataFromOutputSnafu, RequestDatabaseSnafu,
|
SerdeJsonSnafu,
|
||||||
Result,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
type TableReference = (String, String, String);
|
type TableReference = (String, String, String);
|
||||||
@@ -80,51 +79,75 @@ pub struct ExportCommand {
|
|||||||
|
|
||||||
impl ExportCommand {
|
impl ExportCommand {
|
||||||
pub async fn build(&self) -> Result<Instance> {
|
pub async fn build(&self) -> Result<Instance> {
|
||||||
let client = Client::with_urls([self.addr.clone()]);
|
|
||||||
client
|
|
||||||
.health_check()
|
|
||||||
.await
|
|
||||||
.with_context(|_| ConnectServerSnafu {
|
|
||||||
addr: self.addr.clone(),
|
|
||||||
})?;
|
|
||||||
let (catalog, schema) = split_database(&self.database)?;
|
let (catalog, schema) = split_database(&self.database)?;
|
||||||
let mut database_client = Database::new(
|
|
||||||
catalog.clone(),
|
|
||||||
schema.clone().unwrap_or(DEFAULT_SCHEMA_NAME.to_string()),
|
|
||||||
client,
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Some(auth_basic) = &self.auth_basic {
|
let auth_header = if let Some(basic) = &self.auth_basic {
|
||||||
let (username, password) = auth_basic.split_once(':').context(IllegalConfigSnafu {
|
let encoded = general_purpose::STANDARD.encode(basic);
|
||||||
msg: "auth_basic cannot be split by ':'".to_string(),
|
Some(format!("basic {}", encoded))
|
||||||
})?;
|
} else {
|
||||||
database_client.set_auth(AuthScheme::Basic(Basic {
|
None
|
||||||
username: username.to_string(),
|
};
|
||||||
password: password.to_string(),
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Instance::new(Box::new(Export {
|
Ok(Instance::new(Box::new(Export {
|
||||||
client: database_client,
|
addr: self.addr.clone(),
|
||||||
catalog,
|
catalog,
|
||||||
schema,
|
schema,
|
||||||
output_dir: self.output_dir.clone(),
|
output_dir: self.output_dir.clone(),
|
||||||
parallelism: self.export_jobs,
|
parallelism: self.export_jobs,
|
||||||
target: self.target.clone(),
|
target: self.target.clone(),
|
||||||
|
auth_header,
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct Export {
|
pub struct Export {
|
||||||
client: Database,
|
addr: String,
|
||||||
catalog: String,
|
catalog: String,
|
||||||
schema: Option<String>,
|
schema: Option<String>,
|
||||||
output_dir: String,
|
output_dir: String,
|
||||||
parallelism: usize,
|
parallelism: usize,
|
||||||
target: ExportTarget,
|
target: ExportTarget,
|
||||||
|
auth_header: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Export {
|
impl Export {
|
||||||
|
/// Execute one single sql query.
|
||||||
|
async fn sql(&self, sql: &str) -> Result<Option<Vec<Vec<Value>>>> {
|
||||||
|
let url = format!(
|
||||||
|
"http://{}/v1/sql?db={}-{}&sql={}",
|
||||||
|
self.addr,
|
||||||
|
self.catalog,
|
||||||
|
self.schema.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME),
|
||||||
|
sql
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut request = reqwest::Client::new()
|
||||||
|
.get(&url)
|
||||||
|
.header("Content-Type", "application/x-www-form-urlencoded");
|
||||||
|
if let Some(ref auth) = self.auth_header {
|
||||||
|
request = request.header("Authorization", auth);
|
||||||
|
}
|
||||||
|
|
||||||
|
let response = request.send().await.with_context(|_| HttpQuerySqlSnafu {
|
||||||
|
reason: format!("bad url: {}", url),
|
||||||
|
})?;
|
||||||
|
let response = response
|
||||||
|
.error_for_status()
|
||||||
|
.with_context(|_| HttpQuerySqlSnafu {
|
||||||
|
reason: format!("query failed: {}", sql),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let text = response.text().await.with_context(|_| HttpQuerySqlSnafu {
|
||||||
|
reason: "cannot get response text".to_string(),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let body = serde_json::from_str::<GreptimedbV1Response>(&text).context(SerdeJsonSnafu)?;
|
||||||
|
Ok(body.output().first().and_then(|output| match output {
|
||||||
|
GreptimeQueryOutput::Records(records) => Some(records.rows().clone()),
|
||||||
|
GreptimeQueryOutput::AffectedRows(_) => None,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
/// Iterate over all db names.
|
/// Iterate over all db names.
|
||||||
///
|
///
|
||||||
/// Newbie: `db_name` is catalog + schema.
|
/// Newbie: `db_name` is catalog + schema.
|
||||||
@@ -132,35 +155,19 @@ impl Export {
|
|||||||
if let Some(schema) = &self.schema {
|
if let Some(schema) = &self.schema {
|
||||||
Ok(vec![(self.catalog.clone(), schema.clone())])
|
Ok(vec![(self.catalog.clone(), schema.clone())])
|
||||||
} else {
|
} else {
|
||||||
let mut client = self.client.clone();
|
let result = self.sql("show databases").await?;
|
||||||
client.set_catalog(self.catalog.clone());
|
let Some(records) = result else {
|
||||||
let result =
|
EmptyResultSnafu.fail()?
|
||||||
client
|
|
||||||
.sql("show databases")
|
|
||||||
.await
|
|
||||||
.with_context(|_| RequestDatabaseSnafu {
|
|
||||||
sql: "show databases".to_string(),
|
|
||||||
})?;
|
|
||||||
let OutputData::Stream(stream) = result.data else {
|
|
||||||
NotDataFromOutputSnafu.fail()?
|
|
||||||
};
|
};
|
||||||
let record_batch = collect(stream)
|
let mut result = Vec::with_capacity(records.len());
|
||||||
.await
|
for value in records {
|
||||||
.context(CollectRecordBatchesSnafu)?
|
let serde_json::Value::String(schema) = &value[0] else {
|
||||||
.pop()
|
unreachable!()
|
||||||
.context(EmptyResultSnafu)?;
|
};
|
||||||
let schemas = record_batch
|
|
||||||
.column(0)
|
|
||||||
.as_any()
|
|
||||||
.downcast_ref::<StringVector>()
|
|
||||||
.unwrap();
|
|
||||||
let mut result = Vec::with_capacity(schemas.len());
|
|
||||||
for i in 0..schemas.len() {
|
|
||||||
let schema = schemas.get_data(i).unwrap().to_owned();
|
|
||||||
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
|
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
result.push((self.catalog.clone(), schema));
|
result.push((self.catalog.clone(), schema.clone()));
|
||||||
}
|
}
|
||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
@@ -172,54 +179,30 @@ impl Export {
|
|||||||
// TODO: SQL injection hurts
|
// TODO: SQL injection hurts
|
||||||
let sql = format!(
|
let sql = format!(
|
||||||
"select table_catalog, table_schema, table_name from \
|
"select table_catalog, table_schema, table_name from \
|
||||||
information_schema.tables where table_type = \'BASE TABLE\'\
|
information_schema.tables where table_type = \'BASE TABLE\' \
|
||||||
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'",
|
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'",
|
||||||
);
|
);
|
||||||
let mut client = self.client.clone();
|
let result = self.sql(&sql).await?;
|
||||||
client.set_catalog(catalog);
|
let Some(records) = result else {
|
||||||
client.set_schema(schema);
|
EmptyResultSnafu.fail()?
|
||||||
let result = client
|
|
||||||
.sql(&sql)
|
|
||||||
.await
|
|
||||||
.with_context(|_| RequestDatabaseSnafu { sql })?;
|
|
||||||
let OutputData::Stream(stream) = result.data else {
|
|
||||||
NotDataFromOutputSnafu.fail()?
|
|
||||||
};
|
|
||||||
let Some(record_batch) = collect(stream)
|
|
||||||
.await
|
|
||||||
.context(CollectRecordBatchesSnafu)?
|
|
||||||
.pop()
|
|
||||||
else {
|
|
||||||
return Ok(vec![]);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
debug!("Fetched table list: {}", record_batch.pretty_print());
|
debug!("Fetched table list: {:?}", records);
|
||||||
|
|
||||||
if record_batch.num_rows() == 0 {
|
if records.is_empty() {
|
||||||
return Ok(vec![]);
|
return Ok(vec![]);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut result = Vec::with_capacity(record_batch.num_rows());
|
let mut result = Vec::with_capacity(records.len());
|
||||||
let catalog_column = record_batch
|
for value in records {
|
||||||
.column(0)
|
let mut t = Vec::with_capacity(3);
|
||||||
.as_any()
|
for v in &value {
|
||||||
.downcast_ref::<StringVector>()
|
let serde_json::Value::String(value) = v else {
|
||||||
.unwrap();
|
unreachable!()
|
||||||
let schema_column = record_batch
|
};
|
||||||
.column(1)
|
t.push(value);
|
||||||
.as_any()
|
}
|
||||||
.downcast_ref::<StringVector>()
|
result.push((t[0].clone(), t[1].clone(), t[2].clone()));
|
||||||
.unwrap();
|
|
||||||
let table_column = record_batch
|
|
||||||
.column(2)
|
|
||||||
.as_any()
|
|
||||||
.downcast_ref::<StringVector>()
|
|
||||||
.unwrap();
|
|
||||||
for i in 0..record_batch.num_rows() {
|
|
||||||
let catalog = catalog_column.get_data(i).unwrap().to_owned();
|
|
||||||
let schema = schema_column.get_data(i).unwrap().to_owned();
|
|
||||||
let table = table_column.get_data(i).unwrap().to_owned();
|
|
||||||
result.push((catalog, schema, table));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(result)
|
Ok(result)
|
||||||
@@ -230,30 +213,15 @@ impl Export {
|
|||||||
r#"show create table "{}"."{}"."{}""#,
|
r#"show create table "{}"."{}"."{}""#,
|
||||||
catalog, schema, table
|
catalog, schema, table
|
||||||
);
|
);
|
||||||
let mut client = self.client.clone();
|
let result = self.sql(&sql).await?;
|
||||||
client.set_catalog(catalog);
|
let Some(records) = result else {
|
||||||
client.set_schema(schema);
|
EmptyResultSnafu.fail()?
|
||||||
let result = client
|
};
|
||||||
.sql(&sql)
|
let serde_json::Value::String(create_table) = &records[0][1] else {
|
||||||
.await
|
unreachable!()
|
||||||
.with_context(|_| RequestDatabaseSnafu { sql })?;
|
|
||||||
let OutputData::Stream(stream) = result.data else {
|
|
||||||
NotDataFromOutputSnafu.fail()?
|
|
||||||
};
|
};
|
||||||
let record_batch = collect(stream)
|
|
||||||
.await
|
|
||||||
.context(CollectRecordBatchesSnafu)?
|
|
||||||
.pop()
|
|
||||||
.context(EmptyResultSnafu)?;
|
|
||||||
let create_table = record_batch
|
|
||||||
.column(1)
|
|
||||||
.as_any()
|
|
||||||
.downcast_ref::<StringVector>()
|
|
||||||
.unwrap()
|
|
||||||
.get_data(0)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
Ok(format!("{create_table};\n"))
|
Ok(format!("{};\n", create_table))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn export_create_table(&self) -> Result<()> {
|
async fn export_create_table(&self) -> Result<()> {
|
||||||
@@ -321,20 +289,13 @@ impl Export {
|
|||||||
.context(FileIoSnafu)?;
|
.context(FileIoSnafu)?;
|
||||||
let output_dir = Path::new(&self.output_dir).join(format!("{catalog}-{schema}/"));
|
let output_dir = Path::new(&self.output_dir).join(format!("{catalog}-{schema}/"));
|
||||||
|
|
||||||
let mut client = self.client.clone();
|
|
||||||
client.set_catalog(catalog.clone());
|
|
||||||
client.set_schema(schema.clone());
|
|
||||||
|
|
||||||
// copy database to
|
// copy database to
|
||||||
let sql = format!(
|
let sql = format!(
|
||||||
"copy database {} to '{}' with (format='parquet');",
|
"copy database {} to '{}' with (format='parquet');",
|
||||||
schema,
|
schema,
|
||||||
output_dir.to_str().unwrap()
|
output_dir.to_str().unwrap()
|
||||||
);
|
);
|
||||||
client
|
self.sql(&sql).await?;
|
||||||
.sql(sql.clone())
|
|
||||||
.await
|
|
||||||
.context(RequestDatabaseSnafu { sql })?;
|
|
||||||
info!("finished exporting {catalog}.{schema} data");
|
info!("finished exporting {catalog}.{schema} data");
|
||||||
|
|
||||||
// export copy from sql
|
// export copy from sql
|
||||||
@@ -420,82 +381,3 @@ fn split_database(database: &str) -> Result<(String, Option<String>)> {
|
|||||||
Ok((catalog.to_string(), Some(schema.to_string())))
|
Ok((catalog.to_string(), Some(schema.to_string())))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use clap::Parser;
|
|
||||||
use client::{Client, Database};
|
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
|
||||||
|
|
||||||
use crate::error::Result;
|
|
||||||
use crate::options::{CliOptions, Options};
|
|
||||||
use crate::{cli, standalone, App};
|
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread")]
|
|
||||||
async fn test_export_create_table_with_quoted_names() -> Result<()> {
|
|
||||||
let output_dir = tempfile::tempdir().unwrap();
|
|
||||||
|
|
||||||
let standalone = standalone::Command::parse_from([
|
|
||||||
"standalone",
|
|
||||||
"start",
|
|
||||||
"--data-home",
|
|
||||||
&*output_dir.path().to_string_lossy(),
|
|
||||||
]);
|
|
||||||
let Options::Standalone(standalone_opts) =
|
|
||||||
standalone.load_options(&CliOptions::default())?
|
|
||||||
else {
|
|
||||||
unreachable!()
|
|
||||||
};
|
|
||||||
let mut instance = standalone.build(*standalone_opts).await?;
|
|
||||||
instance.start().await?;
|
|
||||||
|
|
||||||
let client = Client::with_urls(["127.0.0.1:4001"]);
|
|
||||||
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
|
||||||
database
|
|
||||||
.sql(r#"CREATE DATABASE "cli.export.create_table";"#)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
database
|
|
||||||
.sql(
|
|
||||||
r#"CREATE TABLE "cli.export.create_table"."a.b.c"(
|
|
||||||
ts TIMESTAMP,
|
|
||||||
TIME INDEX (ts)
|
|
||||||
) engine=mito;
|
|
||||||
"#,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let output_dir = tempfile::tempdir().unwrap();
|
|
||||||
let cli = cli::Command::parse_from([
|
|
||||||
"cli",
|
|
||||||
"export",
|
|
||||||
"--addr",
|
|
||||||
"127.0.0.1:4001",
|
|
||||||
"--output-dir",
|
|
||||||
&*output_dir.path().to_string_lossy(),
|
|
||||||
"--target",
|
|
||||||
"create-table",
|
|
||||||
]);
|
|
||||||
let mut cli_app = cli.build().await?;
|
|
||||||
cli_app.start().await?;
|
|
||||||
|
|
||||||
instance.stop().await?;
|
|
||||||
|
|
||||||
let output_file = output_dir
|
|
||||||
.path()
|
|
||||||
.join("greptime-cli.export.create_table.sql");
|
|
||||||
let res = std::fs::read_to_string(output_file).unwrap();
|
|
||||||
let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
|
|
||||||
"ts" TIMESTAMP(3) NOT NULL,
|
|
||||||
TIME INDEX ("ts")
|
|
||||||
)
|
|
||||||
|
|
||||||
ENGINE=mito
|
|
||||||
;
|
|
||||||
"#;
|
|
||||||
assert_eq!(res.trim(), expect.trim());
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -19,13 +19,14 @@ use std::time::Instant;
|
|||||||
use catalog::kvbackend::{
|
use catalog::kvbackend::{
|
||||||
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager,
|
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager,
|
||||||
};
|
};
|
||||||
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use client::{Client, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_base::Plugins;
|
use common_base::Plugins;
|
||||||
|
use common_config::Mode;
|
||||||
use common_error::ext::ErrorExt;
|
use common_error::ext::ErrorExt;
|
||||||
use common_meta::cache_invalidator::MultiCacheInvalidator;
|
use common_meta::cache_invalidator::MultiCacheInvalidator;
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use common_recordbatch::RecordBatches;
|
use common_recordbatch::RecordBatches;
|
||||||
use common_telemetry::logging;
|
use common_telemetry::debug;
|
||||||
use either::Either;
|
use either::Either;
|
||||||
use meta_client::client::MetaClientBuilder;
|
use meta_client::client::MetaClientBuilder;
|
||||||
use query::datafusion::DatafusionQueryEngine;
|
use query::datafusion::DatafusionQueryEngine;
|
||||||
@@ -77,7 +78,7 @@ impl Repl {
|
|||||||
|
|
||||||
let history_file = history_file();
|
let history_file = history_file();
|
||||||
if let Err(e) = rl.load_history(&history_file) {
|
if let Err(e) = rl.load_history(&history_file) {
|
||||||
logging::debug!(
|
debug!(
|
||||||
"failed to load history file on {}, error: {e}",
|
"failed to load history file on {}, error: {e}",
|
||||||
history_file.display()
|
history_file.display()
|
||||||
);
|
);
|
||||||
@@ -160,7 +161,10 @@ impl Repl {
|
|||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
|
|
||||||
let output = if let Some(query_engine) = &self.query_engine {
|
let output = if let Some(query_engine) = &self.query_engine {
|
||||||
let query_ctx = QueryContext::with(self.database.catalog(), self.database.schema());
|
let query_ctx = Arc::new(QueryContext::with(
|
||||||
|
self.database.catalog(),
|
||||||
|
self.database.schema(),
|
||||||
|
));
|
||||||
|
|
||||||
let stmt = QueryLanguageParser::parse_sql(&sql, &query_ctx)
|
let stmt = QueryLanguageParser::parse_sql(&sql, &query_ctx)
|
||||||
.with_context(|_| ParseSqlSnafu { sql: sql.clone() })?;
|
.with_context(|_| ParseSqlSnafu { sql: sql.clone() })?;
|
||||||
@@ -224,7 +228,7 @@ impl Drop for Repl {
|
|||||||
if self.rl.helper().is_some() {
|
if self.rl.helper().is_some() {
|
||||||
let history_file = history_file();
|
let history_file = history_file();
|
||||||
if let Err(e) = self.rl.save_history(&history_file) {
|
if let Err(e) = self.rl.save_history(&history_file) {
|
||||||
logging::debug!(
|
debug!(
|
||||||
"failed to save history file on {}, error: {e}",
|
"failed to save history file on {}, error: {e}",
|
||||||
history_file.display()
|
history_file.display()
|
||||||
);
|
);
|
||||||
@@ -256,8 +260,13 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
|||||||
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::with_invalidators(vec![
|
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::with_invalidators(vec![
|
||||||
cached_meta_backend.clone(),
|
cached_meta_backend.clone(),
|
||||||
]));
|
]));
|
||||||
let catalog_list =
|
let catalog_list = KvBackendCatalogManager::new(
|
||||||
KvBackendCatalogManager::new(cached_meta_backend.clone(), multi_cache_invalidator).await;
|
Mode::Distributed,
|
||||||
|
Some(meta_client.clone()),
|
||||||
|
cached_meta_backend.clone(),
|
||||||
|
multi_cache_invalidator,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
let plugins: Plugins = Default::default();
|
let plugins: Plugins = Default::default();
|
||||||
let state = Arc::new(QueryEngineState::new(
|
let state = Arc::new(QueryEngineState::new(
|
||||||
catalog_list,
|
catalog_list,
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ use common_meta::key::table_info::{TableInfoKey, TableInfoValue};
|
|||||||
use common_meta::key::table_name::{TableNameKey, TableNameValue};
|
use common_meta::key::table_name::{TableNameKey, TableNameValue};
|
||||||
use common_meta::key::table_region::{TableRegionKey, TableRegionValue};
|
use common_meta::key::table_region::{TableRegionKey, TableRegionValue};
|
||||||
use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue};
|
use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue};
|
||||||
use common_meta::key::{RegionDistribution, TableMetaKey, TableMetaValue};
|
use common_meta::key::{MetaKey, RegionDistribution, TableMetaValue};
|
||||||
use common_meta::kv_backend::etcd::EtcdStore;
|
use common_meta::kv_backend::etcd::EtcdStore;
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use common_meta::range_stream::PaginationStream;
|
use common_meta::range_stream::PaginationStream;
|
||||||
@@ -137,7 +137,7 @@ impl MigrateTableMetadata {
|
|||||||
while let Some((key, value)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
while let Some((key, value)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
||||||
let table_id = self.migrate_table_route_key(value).await?;
|
let table_id = self.migrate_table_route_key(value).await?;
|
||||||
keys.push(key);
|
keys.push(key);
|
||||||
keys.push(TableRegionKey::new(table_id).as_raw_key())
|
keys.push(TableRegionKey::new(table_id).to_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("Total migrated TableRouteKeys: {}", keys.len() / 2);
|
info!("Total migrated TableRouteKeys: {}", keys.len() / 2);
|
||||||
@@ -165,7 +165,7 @@ impl MigrateTableMetadata {
|
|||||||
self.etcd_store
|
self.etcd_store
|
||||||
.put(
|
.put(
|
||||||
PutRequest::new()
|
PutRequest::new()
|
||||||
.with_key(new_key.as_raw_key())
|
.with_key(new_key.to_bytes())
|
||||||
.with_value(new_table_value.try_as_raw_value().unwrap()),
|
.with_value(new_table_value.try_as_raw_value().unwrap()),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@@ -217,7 +217,7 @@ impl MigrateTableMetadata {
|
|||||||
self.etcd_store
|
self.etcd_store
|
||||||
.put(
|
.put(
|
||||||
PutRequest::new()
|
PutRequest::new()
|
||||||
.with_key(new_key.as_raw_key())
|
.with_key(new_key.to_bytes())
|
||||||
.with_value(schema_name_value.try_as_raw_value().unwrap()),
|
.with_value(schema_name_value.try_as_raw_value().unwrap()),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@@ -269,7 +269,7 @@ impl MigrateTableMetadata {
|
|||||||
self.etcd_store
|
self.etcd_store
|
||||||
.put(
|
.put(
|
||||||
PutRequest::new()
|
PutRequest::new()
|
||||||
.with_key(new_key.as_raw_key())
|
.with_key(new_key.to_bytes())
|
||||||
.with_value(catalog_name_value.try_as_raw_value().unwrap()),
|
.with_value(catalog_name_value.try_as_raw_value().unwrap()),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@@ -346,11 +346,11 @@ impl MigrateTableMetadata {
|
|||||||
.batch_put(
|
.batch_put(
|
||||||
BatchPutRequest::new()
|
BatchPutRequest::new()
|
||||||
.add_kv(
|
.add_kv(
|
||||||
table_info_key.as_raw_key(),
|
table_info_key.to_bytes(),
|
||||||
table_info_value.try_as_raw_value().unwrap(),
|
table_info_value.try_as_raw_value().unwrap(),
|
||||||
)
|
)
|
||||||
.add_kv(
|
.add_kv(
|
||||||
table_region_key.as_raw_key(),
|
table_region_key.to_bytes(),
|
||||||
table_region_value.try_as_raw_value().unwrap(),
|
table_region_value.try_as_raw_value().unwrap(),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
@@ -378,7 +378,7 @@ impl MigrateTableMetadata {
|
|||||||
self.etcd_store
|
self.etcd_store
|
||||||
.put(
|
.put(
|
||||||
PutRequest::new()
|
PutRequest::new()
|
||||||
.with_key(table_name_key.as_raw_key())
|
.with_key(table_name_key.to_bytes())
|
||||||
.with_value(table_name_value.try_as_raw_value().unwrap()),
|
.with_value(table_name_value.try_as_raw_value().unwrap()),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@@ -425,7 +425,7 @@ impl MigrateTableMetadata {
|
|||||||
} else {
|
} else {
|
||||||
let mut req = BatchPutRequest::new();
|
let mut req = BatchPutRequest::new();
|
||||||
for (key, value) in datanode_table_kvs {
|
for (key, value) in datanode_table_kvs {
|
||||||
req = req.add_kv(key.as_raw_key(), value.try_as_raw_value().unwrap());
|
req = req.add_kv(key.to_bytes(), value.try_as_raw_value().unwrap());
|
||||||
}
|
}
|
||||||
self.etcd_store.batch_put(req).await.unwrap();
|
self.etcd_store.batch_put(req).await.unwrap();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ use std::time::Duration;
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use catalog::kvbackend::MetaKvBackend;
|
use catalog::kvbackend::MetaKvBackend;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_telemetry::{info, logging};
|
use common_telemetry::info;
|
||||||
use common_wal::config::DatanodeWalConfig;
|
use common_wal::config::DatanodeWalConfig;
|
||||||
use datanode::config::DatanodeOptions;
|
use datanode::config::DatanodeOptions;
|
||||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||||
@@ -28,7 +28,7 @@ use servers::Mode;
|
|||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
|
||||||
use crate::error::{MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
use crate::error::{MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
||||||
use crate::options::{CliOptions, Options};
|
use crate::options::{GlobalOptions, Options};
|
||||||
use crate::App;
|
use crate::App;
|
||||||
|
|
||||||
pub struct Instance {
|
pub struct Instance {
|
||||||
@@ -82,8 +82,8 @@ impl Command {
|
|||||||
self.subcmd.build(opts).await
|
self.subcmd.build(opts).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||||
self.subcmd.load_options(cli_options)
|
self.subcmd.load_options(global_options)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -99,9 +99,9 @@ impl SubCommand {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Start(cmd) => cmd.load_options(cli_options),
|
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -131,19 +131,19 @@ struct StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||||
let mut opts: DatanodeOptions = Options::load_layered_options(
|
let mut opts: DatanodeOptions = Options::load_layered_options(
|
||||||
self.config_file.as_deref(),
|
self.config_file.as_deref(),
|
||||||
self.env_prefix.as_ref(),
|
self.env_prefix.as_ref(),
|
||||||
DatanodeOptions::env_list_keys(),
|
DatanodeOptions::env_list_keys(),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
if let Some(dir) = &cli_options.log_dir {
|
if let Some(dir) = &global_options.log_dir {
|
||||||
opts.logging.dir.clone_from(dir);
|
opts.logging.dir.clone_from(dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
if cli_options.log_level.is_some() {
|
if global_options.log_level.is_some() {
|
||||||
opts.logging.level.clone_from(&cli_options.log_level);
|
opts.logging.level.clone_from(&global_options.log_level);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.rpc_addr {
|
if let Some(addr) = &self.rpc_addr {
|
||||||
@@ -210,8 +210,8 @@ impl StartCommand {
|
|||||||
.await
|
.await
|
||||||
.context(StartDatanodeSnafu)?;
|
.context(StartDatanodeSnafu)?;
|
||||||
|
|
||||||
logging::info!("Datanode start command: {:#?}", self);
|
info!("Datanode start command: {:#?}", self);
|
||||||
logging::info!("Datanode options: {:#?}", opts);
|
info!("Datanode options: {:#?}", opts);
|
||||||
|
|
||||||
let node_id = opts
|
let node_id = opts
|
||||||
.node_id
|
.node_id
|
||||||
@@ -259,7 +259,7 @@ mod tests {
|
|||||||
use servers::Mode;
|
use servers::Mode;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::options::{CliOptions, ENV_VAR_SEP};
|
use crate::options::{GlobalOptions, ENV_VAR_SEP};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_read_from_config_file() {
|
fn test_read_from_config_file() {
|
||||||
@@ -315,7 +315,8 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Datanode(options) = cmd.load_options(&CliOptions::default()).unwrap() else {
|
let Options::Datanode(options) = cmd.load_options(&GlobalOptions::default()).unwrap()
|
||||||
|
else {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -377,7 +378,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_try_from_cmd() {
|
fn test_try_from_cmd() {
|
||||||
if let Options::Datanode(opt) = StartCommand::default()
|
if let Options::Datanode(opt) = StartCommand::default()
|
||||||
.load_options(&CliOptions::default())
|
.load_options(&GlobalOptions::default())
|
||||||
.unwrap()
|
.unwrap()
|
||||||
{
|
{
|
||||||
assert_eq!(Mode::Standalone, opt.mode)
|
assert_eq!(Mode::Standalone, opt.mode)
|
||||||
@@ -388,7 +389,7 @@ mod tests {
|
|||||||
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
})
|
})
|
||||||
.load_options(&CliOptions::default())
|
.load_options(&GlobalOptions::default())
|
||||||
.unwrap()
|
.unwrap()
|
||||||
{
|
{
|
||||||
assert_eq!(Mode::Distributed, opt.mode)
|
assert_eq!(Mode::Distributed, opt.mode)
|
||||||
@@ -398,7 +399,7 @@ mod tests {
|
|||||||
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
})
|
})
|
||||||
.load_options(&CliOptions::default())
|
.load_options(&GlobalOptions::default())
|
||||||
.is_err());
|
.is_err());
|
||||||
|
|
||||||
// Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value
|
// Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value
|
||||||
@@ -406,7 +407,7 @@ mod tests {
|
|||||||
node_id: Some(42),
|
node_id: Some(42),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
})
|
})
|
||||||
.load_options(&CliOptions::default())
|
.load_options(&GlobalOptions::default())
|
||||||
.is_ok());
|
.is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -415,7 +416,7 @@ mod tests {
|
|||||||
let cmd = StartCommand::default();
|
let cmd = StartCommand::default();
|
||||||
|
|
||||||
let options = cmd
|
let options = cmd
|
||||||
.load_options(&CliOptions {
|
.load_options(&GlobalOptions {
|
||||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||||
log_level: Some("debug".to_string()),
|
log_level: Some("debug".to_string()),
|
||||||
|
|
||||||
@@ -504,7 +505,8 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Datanode(opts) = command.load_options(&CliOptions::default()).unwrap()
|
let Options::Datanode(opts) =
|
||||||
|
command.load_options(&GlobalOptions::default()).unwrap()
|
||||||
else {
|
else {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -139,13 +139,6 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to request database, sql: {sql}"))]
|
|
||||||
RequestDatabase {
|
|
||||||
sql: String,
|
|
||||||
location: Location,
|
|
||||||
source: client::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to collect RecordBatches"))]
|
#[snafu(display("Failed to collect RecordBatches"))]
|
||||||
CollectRecordBatches {
|
CollectRecordBatches {
|
||||||
location: Location,
|
location: Location,
|
||||||
@@ -218,6 +211,14 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to run http request: {reason}"))]
|
||||||
|
HttpQuerySql {
|
||||||
|
reason: String,
|
||||||
|
#[snafu(source)]
|
||||||
|
error: reqwest::Error,
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Expect data from output, but got another thing"))]
|
#[snafu(display("Expect data from output, but got another thing"))]
|
||||||
NotDataFromOutput { location: Location },
|
NotDataFromOutput { location: Location },
|
||||||
|
|
||||||
@@ -290,8 +291,9 @@ impl ErrorExt for Error {
|
|||||||
Error::StartProcedureManager { source, .. }
|
Error::StartProcedureManager { source, .. }
|
||||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||||
Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
|
Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
|
||||||
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
Error::ReplCreation { .. } | Error::Readline { .. } | Error::HttpQuerySql { .. } => {
|
||||||
Error::RequestDatabase { source, .. } => source.status_code(),
|
StatusCode::Internal
|
||||||
|
}
|
||||||
Error::CollectRecordBatches { source, .. }
|
Error::CollectRecordBatches { source, .. }
|
||||||
| Error::PrettyPrintRecordBatches { source, .. } => source.status_code(),
|
| Error::PrettyPrintRecordBatches { source, .. } => source.status_code(),
|
||||||
Error::StartMetaClient { source, .. } => source.status_code(),
|
Error::StartMetaClient { source, .. } => source.status_code(),
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ use client::client_manager::DatanodeClients;
|
|||||||
use common_meta::cache_invalidator::MultiCacheInvalidator;
|
use common_meta::cache_invalidator::MultiCacheInvalidator;
|
||||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||||
use common_telemetry::logging;
|
use common_telemetry::info;
|
||||||
use common_time::timezone::set_default_timezone;
|
use common_time::timezone::set_default_timezone;
|
||||||
use frontend::frontend::FrontendOptions;
|
use frontend::frontend::FrontendOptions;
|
||||||
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
|
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
|
||||||
@@ -36,7 +36,7 @@ use servers::Mode;
|
|||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
|
||||||
use crate::error::{self, InitTimezoneSnafu, MissingConfigSnafu, Result, StartFrontendSnafu};
|
use crate::error::{self, InitTimezoneSnafu, MissingConfigSnafu, Result, StartFrontendSnafu};
|
||||||
use crate::options::{CliOptions, Options};
|
use crate::options::{GlobalOptions, Options};
|
||||||
use crate::App;
|
use crate::App;
|
||||||
|
|
||||||
pub struct Instance {
|
pub struct Instance {
|
||||||
@@ -90,8 +90,8 @@ impl Command {
|
|||||||
self.subcmd.build(opts).await
|
self.subcmd.build(opts).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||||
self.subcmd.load_options(cli_options)
|
self.subcmd.load_options(global_options)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -107,9 +107,9 @@ impl SubCommand {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Start(cmd) => cmd.load_options(cli_options),
|
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -126,8 +126,6 @@ pub struct StartCommand {
|
|||||||
mysql_addr: Option<String>,
|
mysql_addr: Option<String>,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
postgres_addr: Option<String>,
|
postgres_addr: Option<String>,
|
||||||
#[clap(long)]
|
|
||||||
opentsdb_addr: Option<String>,
|
|
||||||
#[clap(short, long)]
|
#[clap(short, long)]
|
||||||
config_file: Option<String>,
|
config_file: Option<String>,
|
||||||
#[clap(short, long)]
|
#[clap(short, long)]
|
||||||
@@ -149,19 +147,19 @@ pub struct StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||||
let mut opts: FrontendOptions = Options::load_layered_options(
|
let mut opts: FrontendOptions = Options::load_layered_options(
|
||||||
self.config_file.as_deref(),
|
self.config_file.as_deref(),
|
||||||
self.env_prefix.as_ref(),
|
self.env_prefix.as_ref(),
|
||||||
FrontendOptions::env_list_keys(),
|
FrontendOptions::env_list_keys(),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
if let Some(dir) = &cli_options.log_dir {
|
if let Some(dir) = &global_options.log_dir {
|
||||||
opts.logging.dir.clone_from(dir);
|
opts.logging.dir.clone_from(dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
if cli_options.log_level.is_some() {
|
if global_options.log_level.is_some() {
|
||||||
opts.logging.level.clone_from(&cli_options.log_level);
|
opts.logging.level.clone_from(&global_options.log_level);
|
||||||
}
|
}
|
||||||
|
|
||||||
let tls_opts = TlsOption::new(
|
let tls_opts = TlsOption::new(
|
||||||
@@ -198,11 +196,6 @@ impl StartCommand {
|
|||||||
opts.postgres.tls = tls_opts;
|
opts.postgres.tls = tls_opts;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.opentsdb_addr {
|
|
||||||
opts.opentsdb.enable = true;
|
|
||||||
opts.opentsdb.addr.clone_from(addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(enable) = self.influxdb_enable {
|
if let Some(enable) = self.influxdb_enable {
|
||||||
opts.influxdb.enable = enable;
|
opts.influxdb.enable = enable;
|
||||||
}
|
}
|
||||||
@@ -226,8 +219,8 @@ impl StartCommand {
|
|||||||
.await
|
.await
|
||||||
.context(StartFrontendSnafu)?;
|
.context(StartFrontendSnafu)?;
|
||||||
|
|
||||||
logging::info!("Frontend start command: {:#?}", self);
|
info!("Frontend start command: {:#?}", self);
|
||||||
logging::info!("Frontend options: {:#?}", opts);
|
info!("Frontend options: {:#?}", opts);
|
||||||
|
|
||||||
set_default_timezone(opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?;
|
set_default_timezone(opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?;
|
||||||
|
|
||||||
@@ -253,6 +246,8 @@ impl StartCommand {
|
|||||||
cached_meta_backend.clone(),
|
cached_meta_backend.clone(),
|
||||||
]));
|
]));
|
||||||
let catalog_manager = KvBackendCatalogManager::new(
|
let catalog_manager = KvBackendCatalogManager::new(
|
||||||
|
opts.mode,
|
||||||
|
Some(meta_client.clone()),
|
||||||
cached_meta_backend.clone(),
|
cached_meta_backend.clone(),
|
||||||
multi_cache_invalidator.clone(),
|
multi_cache_invalidator.clone(),
|
||||||
)
|
)
|
||||||
@@ -266,6 +261,7 @@ impl StartCommand {
|
|||||||
]);
|
]);
|
||||||
|
|
||||||
let heartbeat_task = HeartbeatTask::new(
|
let heartbeat_task = HeartbeatTask::new(
|
||||||
|
&opts,
|
||||||
meta_client.clone(),
|
meta_client.clone(),
|
||||||
opts.heartbeat.clone(),
|
opts.heartbeat.clone(),
|
||||||
Arc::new(executor),
|
Arc::new(executor),
|
||||||
@@ -308,7 +304,7 @@ mod tests {
|
|||||||
use servers::http::HttpOptions;
|
use servers::http::HttpOptions;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::options::{CliOptions, ENV_VAR_SEP};
|
use crate::options::{GlobalOptions, ENV_VAR_SEP};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_try_from_start_command() {
|
fn test_try_from_start_command() {
|
||||||
@@ -316,13 +312,13 @@ mod tests {
|
|||||||
http_addr: Some("127.0.0.1:1234".to_string()),
|
http_addr: Some("127.0.0.1:1234".to_string()),
|
||||||
mysql_addr: Some("127.0.0.1:5678".to_string()),
|
mysql_addr: Some("127.0.0.1:5678".to_string()),
|
||||||
postgres_addr: Some("127.0.0.1:5432".to_string()),
|
postgres_addr: Some("127.0.0.1:5432".to_string()),
|
||||||
opentsdb_addr: Some("127.0.0.1:4321".to_string()),
|
|
||||||
influxdb_enable: Some(false),
|
influxdb_enable: Some(false),
|
||||||
disable_dashboard: Some(false),
|
disable_dashboard: Some(false),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Frontend(opts) = command.load_options(&CliOptions::default()).unwrap() else {
|
let Options::Frontend(opts) = command.load_options(&GlobalOptions::default()).unwrap()
|
||||||
|
else {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -330,7 +326,6 @@ mod tests {
|
|||||||
assert_eq!(ReadableSize::mb(64), opts.http.body_limit);
|
assert_eq!(ReadableSize::mb(64), opts.http.body_limit);
|
||||||
assert_eq!(opts.mysql.addr, "127.0.0.1:5678");
|
assert_eq!(opts.mysql.addr, "127.0.0.1:5678");
|
||||||
assert_eq!(opts.postgres.addr, "127.0.0.1:5432");
|
assert_eq!(opts.postgres.addr, "127.0.0.1:5432");
|
||||||
assert_eq!(opts.opentsdb.addr, "127.0.0.1:4321");
|
|
||||||
|
|
||||||
let default_opts = FrontendOptions::default();
|
let default_opts = FrontendOptions::default();
|
||||||
|
|
||||||
@@ -343,10 +338,6 @@ mod tests {
|
|||||||
default_opts.postgres.runtime_size
|
default_opts.postgres.runtime_size
|
||||||
);
|
);
|
||||||
assert!(opts.opentsdb.enable);
|
assert!(opts.opentsdb.enable);
|
||||||
assert_eq!(
|
|
||||||
opts.opentsdb.runtime_size,
|
|
||||||
default_opts.opentsdb.runtime_size
|
|
||||||
);
|
|
||||||
|
|
||||||
assert!(!opts.influxdb.enable);
|
assert!(!opts.influxdb.enable);
|
||||||
}
|
}
|
||||||
@@ -362,6 +353,9 @@ mod tests {
|
|||||||
timeout = "30s"
|
timeout = "30s"
|
||||||
body_limit = "2GB"
|
body_limit = "2GB"
|
||||||
|
|
||||||
|
[opentsdb]
|
||||||
|
enable = false
|
||||||
|
|
||||||
[logging]
|
[logging]
|
||||||
level = "debug"
|
level = "debug"
|
||||||
dir = "/tmp/greptimedb/test/logs"
|
dir = "/tmp/greptimedb/test/logs"
|
||||||
@@ -374,7 +368,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Frontend(fe_opts) = command.load_options(&CliOptions::default()).unwrap()
|
let Options::Frontend(fe_opts) = command.load_options(&GlobalOptions::default()).unwrap()
|
||||||
else {
|
else {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
};
|
};
|
||||||
@@ -386,6 +380,7 @@ mod tests {
|
|||||||
|
|
||||||
assert_eq!("debug", fe_opts.logging.level.as_ref().unwrap());
|
assert_eq!("debug", fe_opts.logging.level.as_ref().unwrap());
|
||||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), fe_opts.logging.dir);
|
assert_eq!("/tmp/greptimedb/test/logs".to_string(), fe_opts.logging.dir);
|
||||||
|
assert!(!fe_opts.opentsdb.enable);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -420,7 +415,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let options = cmd
|
let options = cmd
|
||||||
.load_options(&CliOptions {
|
.load_options(&GlobalOptions {
|
||||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||||
log_level: Some("debug".to_string()),
|
log_level: Some("debug".to_string()),
|
||||||
|
|
||||||
@@ -506,7 +501,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let Options::Frontend(fe_opts) =
|
let Options::Frontend(fe_opts) =
|
||||||
command.load_options(&CliOptions::default()).unwrap()
|
command.load_options(&GlobalOptions::default()).unwrap()
|
||||||
else {
|
else {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -15,7 +15,6 @@
|
|||||||
#![feature(assert_matches, let_chains)]
|
#![feature(assert_matches, let_chains)]
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use clap::arg;
|
|
||||||
use common_telemetry::{error, info};
|
use common_telemetry::{error, info};
|
||||||
|
|
||||||
pub mod cli;
|
pub mod cli;
|
||||||
@@ -79,15 +78,6 @@ pub fn log_versions(version_string: &str, app_version: &str) {
|
|||||||
log_env_flags();
|
log_env_flags();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn greptimedb_cli() -> clap::Command {
|
|
||||||
let cmd = clap::Command::new("greptimedb").subcommand_required(true);
|
|
||||||
|
|
||||||
#[cfg(feature = "tokio-console")]
|
|
||||||
let cmd = cmd.arg(arg!(--"tokio-console-addr"[TOKIO_CONSOLE_ADDR]));
|
|
||||||
|
|
||||||
cmd.args([arg!(--"log-dir"[LOG_DIR]), arg!(--"log-level"[LOG_LEVEL])])
|
|
||||||
}
|
|
||||||
|
|
||||||
fn log_env_flags() {
|
fn log_env_flags() {
|
||||||
info!("command line arguments");
|
info!("command line arguments");
|
||||||
for argument in std::env::args() {
|
for argument in std::env::args() {
|
||||||
|
|||||||
@@ -16,13 +16,13 @@ use std::time::Duration;
|
|||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_telemetry::logging;
|
use common_telemetry::info;
|
||||||
use meta_srv::bootstrap::MetasrvInstance;
|
use meta_srv::bootstrap::MetasrvInstance;
|
||||||
use meta_srv::metasrv::MetasrvOptions;
|
use meta_srv::metasrv::MetasrvOptions;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::error::{self, Result, StartMetaServerSnafu};
|
use crate::error::{self, Result, StartMetaServerSnafu};
|
||||||
use crate::options::{CliOptions, Options};
|
use crate::options::{GlobalOptions, Options};
|
||||||
use crate::App;
|
use crate::App;
|
||||||
|
|
||||||
pub struct Instance {
|
pub struct Instance {
|
||||||
@@ -68,8 +68,8 @@ impl Command {
|
|||||||
self.subcmd.build(opts).await
|
self.subcmd.build(opts).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||||
self.subcmd.load_options(cli_options)
|
self.subcmd.load_options(global_options)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -85,9 +85,9 @@ impl SubCommand {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Start(cmd) => cmd.load_options(cli_options),
|
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -126,19 +126,19 @@ struct StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||||
let mut opts: MetasrvOptions = Options::load_layered_options(
|
let mut opts: MetasrvOptions = Options::load_layered_options(
|
||||||
self.config_file.as_deref(),
|
self.config_file.as_deref(),
|
||||||
self.env_prefix.as_ref(),
|
self.env_prefix.as_ref(),
|
||||||
MetasrvOptions::env_list_keys(),
|
MetasrvOptions::env_list_keys(),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
if let Some(dir) = &cli_options.log_dir {
|
if let Some(dir) = &global_options.log_dir {
|
||||||
opts.logging.dir.clone_from(dir);
|
opts.logging.dir.clone_from(dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
if cli_options.log_level.is_some() {
|
if global_options.log_level.is_some() {
|
||||||
opts.logging.level.clone_from(&cli_options.log_level);
|
opts.logging.level.clone_from(&global_options.log_level);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.bind_addr {
|
if let Some(addr) = &self.bind_addr {
|
||||||
@@ -198,8 +198,8 @@ impl StartCommand {
|
|||||||
.await
|
.await
|
||||||
.context(StartMetaServerSnafu)?;
|
.context(StartMetaServerSnafu)?;
|
||||||
|
|
||||||
logging::info!("Metasrv start command: {:#?}", self);
|
info!("Metasrv start command: {:#?}", self);
|
||||||
logging::info!("Metasrv options: {:#?}", opts);
|
info!("Metasrv options: {:#?}", opts);
|
||||||
|
|
||||||
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins.clone(), None)
|
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins.clone(), None)
|
||||||
.await
|
.await
|
||||||
@@ -235,7 +235,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Metasrv(options) = cmd.load_options(&CliOptions::default()).unwrap() else {
|
let Options::Metasrv(options) = cmd.load_options(&GlobalOptions::default()).unwrap() else {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
};
|
};
|
||||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||||
@@ -270,7 +270,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Metasrv(options) = cmd.load_options(&CliOptions::default()).unwrap() else {
|
let Options::Metasrv(options) = cmd.load_options(&GlobalOptions::default()).unwrap() else {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
};
|
};
|
||||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||||
@@ -315,7 +315,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let options = cmd
|
let options = cmd
|
||||||
.load_options(&CliOptions {
|
.load_options(&GlobalOptions {
|
||||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||||
log_level: Some("debug".to_string()),
|
log_level: Some("debug".to_string()),
|
||||||
|
|
||||||
@@ -379,7 +379,8 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Metasrv(opts) = command.load_options(&CliOptions::default()).unwrap()
|
let Options::Metasrv(opts) =
|
||||||
|
command.load_options(&GlobalOptions::default()).unwrap()
|
||||||
else {
|
else {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use clap::ArgMatches;
|
use clap::Parser;
|
||||||
use common_config::KvBackendConfig;
|
use common_config::KvBackendConfig;
|
||||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||||
use common_wal::config::MetasrvWalConfig;
|
use common_wal::config::MetasrvWalConfig;
|
||||||
@@ -61,26 +61,23 @@ pub enum Options {
|
|||||||
Cli(Box<LoggingOptions>),
|
Cli(Box<LoggingOptions>),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Parser, Default, Debug, Clone)]
|
||||||
pub struct CliOptions {
|
pub struct GlobalOptions {
|
||||||
|
#[clap(long, value_name = "LOG_DIR")]
|
||||||
|
#[arg(global = true)]
|
||||||
pub log_dir: Option<String>,
|
pub log_dir: Option<String>,
|
||||||
|
|
||||||
|
#[clap(long, value_name = "LOG_LEVEL")]
|
||||||
|
#[arg(global = true)]
|
||||||
pub log_level: Option<String>,
|
pub log_level: Option<String>,
|
||||||
|
|
||||||
#[cfg(feature = "tokio-console")]
|
#[cfg(feature = "tokio-console")]
|
||||||
|
#[clap(long, value_name = "TOKIO_CONSOLE_ADDR")]
|
||||||
|
#[arg(global = true)]
|
||||||
pub tokio_console_addr: Option<String>,
|
pub tokio_console_addr: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CliOptions {
|
impl GlobalOptions {
|
||||||
pub fn new(args: &ArgMatches) -> Self {
|
|
||||||
Self {
|
|
||||||
log_dir: args.get_one::<String>("log-dir").cloned(),
|
|
||||||
log_level: args.get_one::<String>("log-level").cloned(),
|
|
||||||
|
|
||||||
#[cfg(feature = "tokio-console")]
|
|
||||||
tokio_console_addr: args.get_one::<String>("tokio-console-addr").cloned(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn tracing_options(&self) -> TracingOptions {
|
pub fn tracing_options(&self) -> TracingOptions {
|
||||||
TracingOptions {
|
TracingOptions {
|
||||||
#[cfg(feature = "tokio-console")]
|
#[cfg(feature = "tokio-console")]
|
||||||
|
|||||||
@@ -18,15 +18,17 @@ use std::{fs, path};
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use catalog::kvbackend::KvBackendCatalogManager;
|
use catalog::kvbackend::KvBackendCatalogManager;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_catalog::consts::MIN_USER_TABLE_ID;
|
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
||||||
use common_config::{metadata_store_dir, KvBackendConfig};
|
use common_config::{metadata_store_dir, KvBackendConfig};
|
||||||
use common_meta::cache_invalidator::{CacheInvalidatorRef, MultiCacheInvalidator};
|
use common_meta::cache_invalidator::{CacheInvalidatorRef, MultiCacheInvalidator};
|
||||||
use common_meta::datanode_manager::DatanodeManagerRef;
|
use common_meta::ddl::flow_meta::{FlowMetadataAllocator, FlowMetadataAllocatorRef};
|
||||||
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
|
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
|
||||||
use common_meta::ddl::ProcedureExecutorRef;
|
use common_meta::ddl::{DdlContext, ProcedureExecutorRef};
|
||||||
use common_meta::ddl_manager::DdlManager;
|
use common_meta::ddl_manager::DdlManager;
|
||||||
|
use common_meta::key::flow::{FlowMetadataManager, FlowMetadataManagerRef};
|
||||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
|
use common_meta::node_manager::NodeManagerRef;
|
||||||
use common_meta::region_keeper::MemoryRegionKeeper;
|
use common_meta::region_keeper::MemoryRegionKeeper;
|
||||||
use common_meta::sequence::SequenceBuilder;
|
use common_meta::sequence::SequenceBuilder;
|
||||||
use common_meta::wal_options_allocator::{WalOptionsAllocator, WalOptionsAllocatorRef};
|
use common_meta::wal_options_allocator::{WalOptionsAllocator, WalOptionsAllocatorRef};
|
||||||
@@ -38,6 +40,7 @@ use common_wal::config::StandaloneWalConfig;
|
|||||||
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
|
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
|
||||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||||
|
use flow::FlownodeBuilder;
|
||||||
use frontend::frontend::FrontendOptions;
|
use frontend::frontend::FrontendOptions;
|
||||||
use frontend::instance::builder::FrontendBuilder;
|
use frontend::instance::builder::FrontendBuilder;
|
||||||
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
||||||
@@ -45,6 +48,7 @@ use frontend::server::Services;
|
|||||||
use frontend::service_config::{
|
use frontend::service_config::{
|
||||||
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
|
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
|
||||||
};
|
};
|
||||||
|
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
|
||||||
use mito2::config::MitoConfig;
|
use mito2::config::MitoConfig;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use servers::export_metrics::ExportMetricsOption;
|
use servers::export_metrics::ExportMetricsOption;
|
||||||
@@ -58,7 +62,7 @@ use crate::error::{
|
|||||||
Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
|
Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
|
||||||
StartProcedureManagerSnafu, StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
|
StartProcedureManagerSnafu, StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
|
||||||
};
|
};
|
||||||
use crate::options::{CliOptions, MixOptions, Options};
|
use crate::options::{GlobalOptions, MixOptions, Options};
|
||||||
use crate::App;
|
use crate::App;
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
@@ -72,8 +76,8 @@ impl Command {
|
|||||||
self.subcmd.build(opts).await
|
self.subcmd.build(opts).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||||
self.subcmd.load_options(cli_options)
|
self.subcmd.load_options(global_options)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,9 +93,9 @@ impl SubCommand {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Start(cmd) => cmd.load_options(cli_options),
|
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -253,8 +257,6 @@ pub struct StartCommand {
|
|||||||
mysql_addr: Option<String>,
|
mysql_addr: Option<String>,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
postgres_addr: Option<String>,
|
postgres_addr: Option<String>,
|
||||||
#[clap(long)]
|
|
||||||
opentsdb_addr: Option<String>,
|
|
||||||
#[clap(short, long)]
|
#[clap(short, long)]
|
||||||
influxdb_enable: bool,
|
influxdb_enable: bool,
|
||||||
#[clap(short, long)]
|
#[clap(short, long)]
|
||||||
@@ -275,29 +277,29 @@ pub struct StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||||
let opts: StandaloneOptions = Options::load_layered_options(
|
let opts: StandaloneOptions = Options::load_layered_options(
|
||||||
self.config_file.as_deref(),
|
self.config_file.as_deref(),
|
||||||
self.env_prefix.as_ref(),
|
self.env_prefix.as_ref(),
|
||||||
StandaloneOptions::env_list_keys(),
|
StandaloneOptions::env_list_keys(),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
self.convert_options(cli_options, opts)
|
self.convert_options(global_options, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn convert_options(
|
pub fn convert_options(
|
||||||
&self,
|
&self,
|
||||||
cli_options: &CliOptions,
|
global_options: &GlobalOptions,
|
||||||
mut opts: StandaloneOptions,
|
mut opts: StandaloneOptions,
|
||||||
) -> Result<Options> {
|
) -> Result<Options> {
|
||||||
opts.mode = Mode::Standalone;
|
opts.mode = Mode::Standalone;
|
||||||
|
|
||||||
if let Some(dir) = &cli_options.log_dir {
|
if let Some(dir) = &global_options.log_dir {
|
||||||
opts.logging.dir.clone_from(dir);
|
opts.logging.dir.clone_from(dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
if cli_options.log_level.is_some() {
|
if global_options.log_level.is_some() {
|
||||||
opts.logging.level.clone_from(&cli_options.log_level);
|
opts.logging.level.clone_from(&global_options.log_level);
|
||||||
}
|
}
|
||||||
|
|
||||||
let tls_opts = TlsOption::new(
|
let tls_opts = TlsOption::new(
|
||||||
@@ -340,11 +342,6 @@ impl StartCommand {
|
|||||||
opts.postgres.tls = tls_opts;
|
opts.postgres.tls = tls_opts;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.opentsdb_addr {
|
|
||||||
opts.opentsdb.enable = true;
|
|
||||||
opts.opentsdb.addr.clone_from(addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.influxdb_enable {
|
if self.influxdb_enable {
|
||||||
opts.influxdb.enable = self.influxdb_enable;
|
opts.influxdb.enable = self.influxdb_enable;
|
||||||
}
|
}
|
||||||
@@ -401,53 +398,83 @@ impl StartCommand {
|
|||||||
.context(StartFrontendSnafu)?;
|
.context(StartFrontendSnafu)?;
|
||||||
|
|
||||||
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::default());
|
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::default());
|
||||||
let catalog_manager =
|
let catalog_manager = KvBackendCatalogManager::new(
|
||||||
KvBackendCatalogManager::new(kv_backend.clone(), multi_cache_invalidator.clone()).await;
|
dn_opts.mode,
|
||||||
|
None,
|
||||||
|
kv_backend.clone(),
|
||||||
|
multi_cache_invalidator.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let table_metadata_manager =
|
||||||
|
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||||
|
|
||||||
|
let flow_builder = FlownodeBuilder::new(
|
||||||
|
Default::default(),
|
||||||
|
fe_plugins.clone(),
|
||||||
|
table_metadata_manager.clone(),
|
||||||
|
catalog_manager.clone(),
|
||||||
|
)
|
||||||
|
.with_kv_backend(kv_backend.clone());
|
||||||
|
let flownode = Arc::new(flow_builder.build().await);
|
||||||
|
|
||||||
let builder =
|
let builder =
|
||||||
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
|
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
|
||||||
let datanode = builder.build().await.context(StartDatanodeSnafu)?;
|
let datanode = builder.build().await.context(StartDatanodeSnafu)?;
|
||||||
|
|
||||||
let datanode_manager = Arc::new(StandaloneDatanodeManager(datanode.region_server()));
|
let node_manager = Arc::new(StandaloneDatanodeManager {
|
||||||
|
region_server: datanode.region_server(),
|
||||||
|
flow_server: flownode.clone(),
|
||||||
|
});
|
||||||
|
|
||||||
let table_id_sequence = Arc::new(
|
let table_id_sequence = Arc::new(
|
||||||
SequenceBuilder::new("table_id", kv_backend.clone())
|
SequenceBuilder::new(TABLE_ID_SEQ, kv_backend.clone())
|
||||||
.initial(MIN_USER_TABLE_ID as u64)
|
.initial(MIN_USER_TABLE_ID as u64)
|
||||||
.step(10)
|
.step(10)
|
||||||
.build(),
|
.build(),
|
||||||
);
|
);
|
||||||
|
let flow_id_sequence = Arc::new(
|
||||||
|
SequenceBuilder::new(FLOW_ID_SEQ, kv_backend.clone())
|
||||||
|
.initial(MIN_USER_FLOW_ID as u64)
|
||||||
|
.step(10)
|
||||||
|
.build(),
|
||||||
|
);
|
||||||
let wal_options_allocator = Arc::new(WalOptionsAllocator::new(
|
let wal_options_allocator = Arc::new(WalOptionsAllocator::new(
|
||||||
opts.wal_meta.clone(),
|
opts.wal_meta.clone(),
|
||||||
kv_backend.clone(),
|
kv_backend.clone(),
|
||||||
));
|
));
|
||||||
|
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
|
||||||
let table_metadata_manager =
|
|
||||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
|
||||||
|
|
||||||
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
|
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
|
||||||
table_id_sequence,
|
table_id_sequence,
|
||||||
wal_options_allocator.clone(),
|
wal_options_allocator.clone(),
|
||||||
));
|
));
|
||||||
|
let flow_meta_allocator = Arc::new(FlowMetadataAllocator::with_noop_peer_allocator(
|
||||||
|
flow_id_sequence,
|
||||||
|
));
|
||||||
|
|
||||||
let ddl_task_executor = Self::create_ddl_task_executor(
|
let ddl_task_executor = Self::create_ddl_task_executor(
|
||||||
table_metadata_manager,
|
|
||||||
procedure_manager.clone(),
|
procedure_manager.clone(),
|
||||||
datanode_manager.clone(),
|
node_manager.clone(),
|
||||||
multi_cache_invalidator,
|
multi_cache_invalidator,
|
||||||
|
table_metadata_manager,
|
||||||
table_meta_allocator,
|
table_meta_allocator,
|
||||||
|
flow_metadata_manager,
|
||||||
|
flow_meta_allocator,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let mut frontend = FrontendBuilder::new(
|
let mut frontend =
|
||||||
kv_backend,
|
FrontendBuilder::new(kv_backend, catalog_manager, node_manager, ddl_task_executor)
|
||||||
catalog_manager,
|
.with_plugin(fe_plugins.clone())
|
||||||
datanode_manager,
|
.try_build()
|
||||||
ddl_task_executor,
|
.await
|
||||||
)
|
.context(StartFrontendSnafu)?;
|
||||||
.with_plugin(fe_plugins.clone())
|
|
||||||
.try_build()
|
// flow server need to be able to use frontend to write insert requests back
|
||||||
.await
|
flownode
|
||||||
.context(StartFrontendSnafu)?;
|
.set_frontend_invoker(Box::new(frontend.clone()))
|
||||||
|
.await;
|
||||||
|
let _handle = flownode.clone().run_background();
|
||||||
|
|
||||||
let servers = Services::new(fe_opts.clone(), Arc::new(frontend.clone()), fe_plugins)
|
let servers = Services::new(fe_opts.clone(), Arc::new(frontend.clone()), fe_plugins)
|
||||||
.build()
|
.build()
|
||||||
@@ -466,20 +493,26 @@ impl StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn create_ddl_task_executor(
|
pub async fn create_ddl_task_executor(
|
||||||
table_metadata_manager: TableMetadataManagerRef,
|
|
||||||
procedure_manager: ProcedureManagerRef,
|
procedure_manager: ProcedureManagerRef,
|
||||||
datanode_manager: DatanodeManagerRef,
|
node_manager: NodeManagerRef,
|
||||||
cache_invalidator: CacheInvalidatorRef,
|
cache_invalidator: CacheInvalidatorRef,
|
||||||
table_meta_allocator: TableMetadataAllocatorRef,
|
table_metadata_manager: TableMetadataManagerRef,
|
||||||
|
table_metadata_allocator: TableMetadataAllocatorRef,
|
||||||
|
flow_metadata_manager: FlowMetadataManagerRef,
|
||||||
|
flow_metadata_allocator: FlowMetadataAllocatorRef,
|
||||||
) -> Result<ProcedureExecutorRef> {
|
) -> Result<ProcedureExecutorRef> {
|
||||||
let procedure_executor: ProcedureExecutorRef = Arc::new(
|
let procedure_executor: ProcedureExecutorRef = Arc::new(
|
||||||
DdlManager::try_new(
|
DdlManager::try_new(
|
||||||
|
DdlContext {
|
||||||
|
node_manager,
|
||||||
|
cache_invalidator,
|
||||||
|
memory_region_keeper: Arc::new(MemoryRegionKeeper::default()),
|
||||||
|
table_metadata_manager,
|
||||||
|
table_metadata_allocator,
|
||||||
|
flow_metadata_manager,
|
||||||
|
flow_metadata_allocator,
|
||||||
|
},
|
||||||
procedure_manager,
|
procedure_manager,
|
||||||
datanode_manager,
|
|
||||||
cache_invalidator,
|
|
||||||
table_metadata_manager,
|
|
||||||
table_meta_allocator,
|
|
||||||
Arc::new(MemoryRegionKeeper::default()),
|
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
.context(InitDdlManagerSnafu)?,
|
.context(InitDdlManagerSnafu)?,
|
||||||
@@ -516,7 +549,7 @@ mod tests {
|
|||||||
use servers::Mode;
|
use servers::Mode;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::options::{CliOptions, ENV_VAR_SEP};
|
use crate::options::{GlobalOptions, ENV_VAR_SEP};
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_try_from_start_command_to_anymap() {
|
async fn test_try_from_start_command_to_anymap() {
|
||||||
@@ -590,6 +623,9 @@ mod tests {
|
|||||||
timeout = "33s"
|
timeout = "33s"
|
||||||
body_limit = "128MB"
|
body_limit = "128MB"
|
||||||
|
|
||||||
|
[opentsdb]
|
||||||
|
enable = true
|
||||||
|
|
||||||
[logging]
|
[logging]
|
||||||
level = "debug"
|
level = "debug"
|
||||||
dir = "/tmp/greptimedb/test/logs"
|
dir = "/tmp/greptimedb/test/logs"
|
||||||
@@ -601,7 +637,8 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Standalone(options) = cmd.load_options(&CliOptions::default()).unwrap() else {
|
let Options::Standalone(options) = cmd.load_options(&GlobalOptions::default()).unwrap()
|
||||||
|
else {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
};
|
};
|
||||||
let fe_opts = options.frontend;
|
let fe_opts = options.frontend;
|
||||||
@@ -617,6 +654,7 @@ mod tests {
|
|||||||
assert_eq!(2, fe_opts.mysql.runtime_size);
|
assert_eq!(2, fe_opts.mysql.runtime_size);
|
||||||
assert_eq!(None, fe_opts.mysql.reject_no_database);
|
assert_eq!(None, fe_opts.mysql.reject_no_database);
|
||||||
assert!(fe_opts.influxdb.enable);
|
assert!(fe_opts.influxdb.enable);
|
||||||
|
assert!(fe_opts.opentsdb.enable);
|
||||||
|
|
||||||
let DatanodeWalConfig::RaftEngine(raft_engine_config) = dn_opts.wal else {
|
let DatanodeWalConfig::RaftEngine(raft_engine_config) = dn_opts.wal else {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
@@ -635,7 +673,7 @@ mod tests {
|
|||||||
match &dn_opts.storage.providers[1] {
|
match &dn_opts.storage.providers[1] {
|
||||||
datanode::config::ObjectStoreConfig::S3(s3_config) => {
|
datanode::config::ObjectStoreConfig::S3(s3_config) => {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
"Secret([REDACTED alloc::string::String])".to_string(),
|
"SecretBox<alloc::string::String>([REDACTED])".to_string(),
|
||||||
format!("{:?}", s3_config.access_key_id)
|
format!("{:?}", s3_config.access_key_id)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -656,7 +694,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let Options::Standalone(opts) = cmd
|
let Options::Standalone(opts) = cmd
|
||||||
.load_options(&CliOptions {
|
.load_options(&GlobalOptions {
|
||||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||||
log_level: Some("debug".to_string()),
|
log_level: Some("debug".to_string()),
|
||||||
|
|
||||||
@@ -729,7 +767,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let Options::Standalone(opts) =
|
let Options::Standalone(opts) =
|
||||||
command.load_options(&CliOptions::default()).unwrap()
|
command.load_options(&GlobalOptions::default()).unwrap()
|
||||||
else {
|
else {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ common-macro.workspace = true
|
|||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
|
zeroize = { version = "1.6", default-features = false, features = ["alloc"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
toml.workspace = true
|
toml.workspace = true
|
||||||
|
|||||||
@@ -53,14 +53,6 @@ impl ErrorExt for Error {
|
|||||||
fn as_any(&self) -> &dyn Any {
|
fn as_any(&self) -> &dyn Any {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
fn location_opt(&self) -> Option<common_error::snafu::Location> {
|
|
||||||
match self {
|
|
||||||
Error::Overflow { location, .. } => Some(*location),
|
|
||||||
Error::Underflow { location, .. } => Some(*location),
|
|
||||||
Error::Eof { location, .. } => Some(*location),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! impl_read_le {
|
macro_rules! impl_read_le {
|
||||||
|
|||||||
@@ -15,67 +15,12 @@
|
|||||||
pub mod bit_vec;
|
pub mod bit_vec;
|
||||||
pub mod buffer;
|
pub mod buffer;
|
||||||
pub mod bytes;
|
pub mod bytes;
|
||||||
|
pub mod plugins;
|
||||||
#[allow(clippy::all)]
|
#[allow(clippy::all)]
|
||||||
pub mod readable_size;
|
pub mod readable_size;
|
||||||
|
pub mod secrets;
|
||||||
use core::any::Any;
|
|
||||||
use std::sync::{Arc, Mutex, MutexGuard};
|
|
||||||
|
|
||||||
pub type AffectedRows = usize;
|
pub type AffectedRows = usize;
|
||||||
|
|
||||||
pub use bit_vec::BitVec;
|
pub use bit_vec::BitVec;
|
||||||
|
pub use plugins::Plugins;
|
||||||
/// [`Plugins`] is a wrapper of Arc contents.
|
|
||||||
/// Make it Cloneable and we can treat it like an Arc struct.
|
|
||||||
#[derive(Default, Clone)]
|
|
||||||
pub struct Plugins {
|
|
||||||
inner: Arc<Mutex<anymap::Map<dyn Any + Send + Sync>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Plugins {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
inner: Arc::new(Mutex::new(anymap::Map::new())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn lock(&self) -> MutexGuard<anymap::Map<dyn Any + Send + Sync>> {
|
|
||||||
self.inner.lock().unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
|
|
||||||
let _ = self.lock().insert(value);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
|
|
||||||
let binding = self.lock();
|
|
||||||
binding.get::<T>().cloned()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn map_mut<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> R
|
|
||||||
where
|
|
||||||
F: FnOnce(Option<&mut T>) -> R,
|
|
||||||
{
|
|
||||||
let mut binding = self.lock();
|
|
||||||
let opt = binding.get_mut::<T>();
|
|
||||||
mapper(opt)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn map<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> Option<R>
|
|
||||||
where
|
|
||||||
F: FnOnce(&T) -> R,
|
|
||||||
{
|
|
||||||
let binding = self.lock();
|
|
||||||
binding.get::<T>().map(mapper)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn len(&self) -> usize {
|
|
||||||
let binding = self.lock();
|
|
||||||
binding.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_empty(&self) -> bool {
|
|
||||||
let binding = self.lock();
|
|
||||||
binding.is_empty()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
127
src/common/base/src/plugins.rs
Normal file
127
src/common/base/src/plugins.rs
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::any::Any;
|
||||||
|
use std::sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||||
|
|
||||||
|
/// [`Plugins`] is a wrapper of [AnyMap](https://github.com/chris-morgan/anymap) and provides a thread-safe way to store and retrieve plugins.
|
||||||
|
/// Make it Cloneable and we can treat it like an Arc struct.
|
||||||
|
#[derive(Default, Clone)]
|
||||||
|
pub struct Plugins {
|
||||||
|
inner: Arc<RwLock<anymap::Map<dyn Any + Send + Sync>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Plugins {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
inner: Arc::new(RwLock::new(anymap::Map::new())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
|
||||||
|
let _ = self.write().insert(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
|
||||||
|
self.read().get::<T>().cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn map_mut<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> R
|
||||||
|
where
|
||||||
|
F: FnOnce(Option<&mut T>) -> R,
|
||||||
|
{
|
||||||
|
let mut binding = self.write();
|
||||||
|
let opt = binding.get_mut::<T>();
|
||||||
|
mapper(opt)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn map<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> Option<R>
|
||||||
|
where
|
||||||
|
F: FnOnce(&T) -> R,
|
||||||
|
{
|
||||||
|
self.read().get::<T>().map(mapper)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
self.read().len()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.read().is_empty()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read(&self) -> RwLockReadGuard<anymap::Map<dyn Any + Send + Sync>> {
|
||||||
|
self.inner.read().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write(&self) -> RwLockWriteGuard<anymap::Map<dyn Any + Send + Sync>> {
|
||||||
|
self.inner.write().unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_plugins() {
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct FooPlugin {
|
||||||
|
x: i32,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct BarPlugin {
|
||||||
|
y: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
let plugins = Plugins::new();
|
||||||
|
|
||||||
|
let m = plugins.clone();
|
||||||
|
let thread1 = std::thread::spawn(move || {
|
||||||
|
m.insert(FooPlugin { x: 42 });
|
||||||
|
|
||||||
|
if let Some(foo) = m.get::<FooPlugin>() {
|
||||||
|
assert_eq!(foo.x, 42);
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(m.map::<FooPlugin, _, _>(|foo| foo.x * 2), Some(84));
|
||||||
|
});
|
||||||
|
|
||||||
|
let m = plugins.clone();
|
||||||
|
let thread2 = std::thread::spawn(move || {
|
||||||
|
m.clone().insert(BarPlugin {
|
||||||
|
y: "hello".to_string(),
|
||||||
|
});
|
||||||
|
|
||||||
|
if let Some(bar) = m.get::<BarPlugin>() {
|
||||||
|
assert_eq!(bar.y, "hello");
|
||||||
|
}
|
||||||
|
|
||||||
|
m.map_mut::<BarPlugin, _, _>(|bar| {
|
||||||
|
if let Some(bar) = bar {
|
||||||
|
bar.y = "world".to_string();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
assert_eq!(m.get::<BarPlugin>().unwrap().y, "world");
|
||||||
|
});
|
||||||
|
|
||||||
|
thread1.join().unwrap();
|
||||||
|
thread2.join().unwrap();
|
||||||
|
|
||||||
|
assert_eq!(plugins.len(), 2);
|
||||||
|
assert!(!plugins.is_empty());
|
||||||
|
}
|
||||||
|
}
|
||||||
218
src/common/base/src/secrets.rs
Normal file
218
src/common/base/src/secrets.rs
Normal file
@@ -0,0 +1,218 @@
|
|||||||
|
// This file is copied from https://github.com/iqlusioninc/crates/blob/f98d4ccf/secrecy/src/lib.rs
|
||||||
|
|
||||||
|
//! [`SecretBox`] wrapper type for more carefully handling secret values
|
||||||
|
//! (e.g. passwords, cryptographic keys, access tokens or other credentials)
|
||||||
|
//!
|
||||||
|
//! # Goals
|
||||||
|
//!
|
||||||
|
//! - Make secret access explicit and easy-to-audit via the
|
||||||
|
//! [`ExposeSecret`] and [`ExposeSecretMut`] traits.
|
||||||
|
//! - Prevent accidental leakage of secrets via channels like debug logging
|
||||||
|
//! - Ensure secrets are wiped from memory on drop securely
|
||||||
|
//! (using the [`zeroize`] crate)
|
||||||
|
//!
|
||||||
|
//! Presently this crate favors a simple, `no_std`-friendly, safe i.e.
|
||||||
|
//! `forbid(unsafe_code)`-based implementation and does not provide more advanced
|
||||||
|
//! memory protection mechanisms e.g. ones based on `mlock(2)`/`mprotect(2)`.
|
||||||
|
//! We may explore more advanced protection mechanisms in the future.
|
||||||
|
//! Those who don't mind `std` and `libc` dependencies should consider using
|
||||||
|
//! the [`secrets`](https://crates.io/crates/secrets) crate.
|
||||||
|
//!
|
||||||
|
//! # `serde` support
|
||||||
|
//!
|
||||||
|
//! When the `serde` feature of this crate is enabled, the [`SecretBox`] type will
|
||||||
|
//! receive a [`Deserialize`] impl for all `SecretBox<T>` types where
|
||||||
|
//! `T: DeserializeOwned`. This allows *loading* secret values from data
|
||||||
|
//! deserialized from `serde` (be careful to clean up any intermediate secrets
|
||||||
|
//! when doing this, e.g. the unparsed input!)
|
||||||
|
//!
|
||||||
|
//! To prevent exfiltration of secret values via `serde`, by default `SecretBox<T>`
|
||||||
|
//! does *not* receive a corresponding [`Serialize`] impl. If you would like
|
||||||
|
//! types of `SecretBox<T>` to be serializable with `serde`, you will need to impl
|
||||||
|
//! the [`SerializableSecret`] marker trait on `T`
|
||||||
|
|
||||||
|
use std::fmt::Debug;
|
||||||
|
use std::{any, fmt};
|
||||||
|
|
||||||
|
use serde::{de, ser, Deserialize, Serialize};
|
||||||
|
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||||
|
|
||||||
|
/// Wrapper type for strings that contains secrets. See also [SecretBox].
|
||||||
|
pub type SecretString = SecretBox<String>;
|
||||||
|
|
||||||
|
impl From<String> for SecretString {
|
||||||
|
fn from(value: String) -> Self {
|
||||||
|
SecretString::new(Box::new(value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wrapper type for values that contains secrets, which attempts to limit
|
||||||
|
/// accidental exposure and ensure secrets are wiped from memory when dropped.
|
||||||
|
/// (e.g. passwords, cryptographic keys, access tokens or other credentials)
|
||||||
|
///
|
||||||
|
/// Access to the secret inner value occurs through the [`ExposeSecret`]
|
||||||
|
/// or [`ExposeSecretMut`] traits, which provide methods for accessing the inner secret value.
|
||||||
|
pub struct SecretBox<S: Zeroize> {
|
||||||
|
inner_secret: Box<S>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: Zeroize> Zeroize for SecretBox<S> {
|
||||||
|
fn zeroize(&mut self) {
|
||||||
|
self.inner_secret.as_mut().zeroize()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: Zeroize> Drop for SecretBox<S> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.zeroize()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: Zeroize> ZeroizeOnDrop for SecretBox<S> {}
|
||||||
|
|
||||||
|
impl<S: Zeroize> From<Box<S>> for SecretBox<S> {
|
||||||
|
fn from(source: Box<S>) -> Self {
|
||||||
|
Self::new(source)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: Zeroize> SecretBox<S> {
|
||||||
|
/// Create a secret value using a pre-boxed value.
|
||||||
|
pub fn new(boxed_secret: Box<S>) -> Self {
|
||||||
|
Self {
|
||||||
|
inner_secret: boxed_secret,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: Zeroize + Default> SecretBox<S> {
|
||||||
|
/// Create a secret value using a function that can initialize the vale in-place.
|
||||||
|
pub fn new_with_mut(ctr: impl FnOnce(&mut S)) -> Self {
|
||||||
|
let mut secret = Self::default();
|
||||||
|
ctr(secret.expose_secret_mut());
|
||||||
|
secret
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: Zeroize + Clone> SecretBox<S> {
|
||||||
|
/// Create a secret value using the provided function as a constructor.
|
||||||
|
///
|
||||||
|
/// The implementation makes an effort to zeroize the locally constructed value
|
||||||
|
/// before it is copied to the heap, and constructing it inside the closure minimizes
|
||||||
|
/// the possibility of it being accidentally copied by other code.
|
||||||
|
///
|
||||||
|
/// **Note:** using [`Self::new`] or [`Self::new_with_mut`] is preferable when possible,
|
||||||
|
/// since this method's safety relies on empyric evidence and may be violated on some targets.
|
||||||
|
pub fn new_with_ctr(ctr: impl FnOnce() -> S) -> Self {
|
||||||
|
let mut data = ctr();
|
||||||
|
let secret = Self {
|
||||||
|
inner_secret: Box::new(data.clone()),
|
||||||
|
};
|
||||||
|
data.zeroize();
|
||||||
|
secret
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Same as [`Self::new_with_ctr`], but the constructor can be fallible.
|
||||||
|
///
|
||||||
|
///
|
||||||
|
/// **Note:** using [`Self::new`] or [`Self::new_with_mut`] is preferable when possible,
|
||||||
|
/// since this method's safety relies on empyric evidence and may be violated on some targets.
|
||||||
|
pub fn try_new_with_ctr<E>(ctr: impl FnOnce() -> Result<S, E>) -> Result<Self, E> {
|
||||||
|
let mut data = ctr()?;
|
||||||
|
let secret = Self {
|
||||||
|
inner_secret: Box::new(data.clone()),
|
||||||
|
};
|
||||||
|
data.zeroize();
|
||||||
|
Ok(secret)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: Zeroize + Default> Default for SecretBox<S> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
inner_secret: Box::<S>::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: Zeroize> Debug for SecretBox<S> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
write!(f, "SecretBox<{}>([REDACTED])", any::type_name::<S>())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> Clone for SecretBox<S>
|
||||||
|
where
|
||||||
|
S: Clone + Zeroize,
|
||||||
|
{
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
SecretBox {
|
||||||
|
inner_secret: self.inner_secret.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: Zeroize> ExposeSecret<S> for SecretBox<S> {
|
||||||
|
fn expose_secret(&self) -> &S {
|
||||||
|
self.inner_secret.as_ref()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: Zeroize> ExposeSecretMut<S> for SecretBox<S> {
|
||||||
|
fn expose_secret_mut(&mut self) -> &mut S {
|
||||||
|
self.inner_secret.as_mut()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Expose a reference to an inner secret
|
||||||
|
pub trait ExposeSecret<S> {
|
||||||
|
/// Expose secret: this is the only method providing access to a secret.
|
||||||
|
fn expose_secret(&self) -> &S;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Expose a mutable reference to an inner secret
|
||||||
|
pub trait ExposeSecretMut<S> {
|
||||||
|
/// Expose secret: this is the only method providing access to a secret.
|
||||||
|
fn expose_secret_mut(&mut self) -> &mut S;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Marker trait for secret types which can be [`Serialize`]-d by [`serde`].
|
||||||
|
///
|
||||||
|
/// When the `serde` feature of this crate is enabled and types are marked with
|
||||||
|
/// this trait, they receive a [`Serialize` impl][1] for `SecretBox<T>`.
|
||||||
|
/// (NOTE: all types which impl `DeserializeOwned` receive a [`Deserialize`]
|
||||||
|
/// impl)
|
||||||
|
///
|
||||||
|
/// This is done deliberately to prevent accidental exfiltration of secrets
|
||||||
|
/// via `serde` serialization.
|
||||||
|
///
|
||||||
|
/// If you really want to have `serde` serialize those types, use the
|
||||||
|
/// [`serialize_with`][2] attribute to specify a serializer that exposes the secret.
|
||||||
|
///
|
||||||
|
/// [1]: https://docs.rs/secrecy/latest/secrecy/struct.Secret.html#implementations
|
||||||
|
/// [2]: https://serde.rs/field-attrs.html#serialize_with
|
||||||
|
pub trait SerializableSecret: Serialize {}
|
||||||
|
|
||||||
|
impl<'de, T> Deserialize<'de> for SecretBox<T>
|
||||||
|
where
|
||||||
|
T: Zeroize + Clone + de::DeserializeOwned + Sized,
|
||||||
|
{
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: de::Deserializer<'de>,
|
||||||
|
{
|
||||||
|
Self::try_new_with_ctr(|| T::deserialize(deserializer))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Serialize for SecretBox<T>
|
||||||
|
where
|
||||||
|
T: Zeroize + SerializableSecret + Serialize + Sized,
|
||||||
|
{
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: ser::Serializer,
|
||||||
|
{
|
||||||
|
self.expose_secret().serialize(serializer)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -19,6 +19,9 @@ pub const DEFAULT_CATALOG_NAME: &str = "greptime";
|
|||||||
pub const DEFAULT_SCHEMA_NAME: &str = "public";
|
pub const DEFAULT_SCHEMA_NAME: &str = "public";
|
||||||
pub const DEFAULT_PRIVATE_SCHEMA_NAME: &str = "greptime_private";
|
pub const DEFAULT_PRIVATE_SCHEMA_NAME: &str = "greptime_private";
|
||||||
|
|
||||||
|
/// Reserves [0,MIN_USER_FLOW_ID) for internal usage.
|
||||||
|
/// User defined table id starts from this value.
|
||||||
|
pub const MIN_USER_FLOW_ID: u32 = 1024;
|
||||||
/// Reserves [0,MIN_USER_TABLE_ID) for internal usage.
|
/// Reserves [0,MIN_USER_TABLE_ID) for internal usage.
|
||||||
/// User defined table id starts from this value.
|
/// User defined table id starts from this value.
|
||||||
pub const MIN_USER_TABLE_ID: u32 = 1024;
|
pub const MIN_USER_TABLE_ID: u32 = 1024;
|
||||||
@@ -88,6 +91,8 @@ pub const INFORMATION_SCHEMA_PARTITIONS_TABLE_ID: u32 = 28;
|
|||||||
pub const INFORMATION_SCHEMA_REGION_PEERS_TABLE_ID: u32 = 29;
|
pub const INFORMATION_SCHEMA_REGION_PEERS_TABLE_ID: u32 = 29;
|
||||||
/// id for information_schema.columns
|
/// id for information_schema.columns
|
||||||
pub const INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID: u32 = 30;
|
pub const INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID: u32 = 30;
|
||||||
|
/// id for information_schema.cluster_info
|
||||||
|
pub const INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID: u32 = 31;
|
||||||
/// ----- End of information_schema tables -----
|
/// ----- End of information_schema tables -----
|
||||||
|
|
||||||
pub const MITO_ENGINE: &str = "mito";
|
pub const MITO_ENGINE: &str = "mito";
|
||||||
|
|||||||
@@ -28,6 +28,12 @@ pub fn format_full_table_name(catalog: &str, schema: &str, table: &str) -> Strin
|
|||||||
format!("{catalog}.{schema}.{table}")
|
format!("{catalog}.{schema}.{table}")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Formats flow fully-qualified name
|
||||||
|
#[inline]
|
||||||
|
pub fn format_full_flow_name(catalog: &str, flow: &str) -> String {
|
||||||
|
format!("{catalog}.{flow}")
|
||||||
|
}
|
||||||
|
|
||||||
/// Build db name from catalog and schema string
|
/// Build db name from catalog and schema string
|
||||||
pub fn build_db_string(catalog: &str, schema: &str) -> String {
|
pub fn build_db_string(catalog: &str, schema: &str) -> String {
|
||||||
if catalog == DEFAULT_CATALOG_NAME {
|
if catalog == DEFAULT_CATALOG_NAME {
|
||||||
|
|||||||
@@ -21,6 +21,16 @@ pub fn metadata_store_dir(store_dir: &str) -> String {
|
|||||||
format!("{store_dir}/metadata")
|
format!("{store_dir}/metadata")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The Server running mode
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, Copy)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
pub enum Mode {
|
||||||
|
// The single process mode.
|
||||||
|
Standalone,
|
||||||
|
// The distributed cluster mode.
|
||||||
|
Distributed,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub struct KvBackendConfig {
|
pub struct KvBackendConfig {
|
||||||
|
|||||||
@@ -213,34 +213,4 @@ impl ErrorExt for Error {
|
|||||||
fn as_any(&self) -> &dyn Any {
|
fn as_any(&self) -> &dyn Any {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
fn location_opt(&self) -> Option<common_error::snafu::Location> {
|
|
||||||
use Error::*;
|
|
||||||
match self {
|
|
||||||
OrcReader { location, .. } => Some(*location),
|
|
||||||
BuildBackend { location, .. } => Some(*location),
|
|
||||||
ReadObject { location, .. } => Some(*location),
|
|
||||||
ListObjects { location, .. } => Some(*location),
|
|
||||||
InferSchema { location, .. } => Some(*location),
|
|
||||||
ReadParquetSnafu { location, .. } => Some(*location),
|
|
||||||
ParquetToSchema { location, .. } => Some(*location),
|
|
||||||
JoinHandle { location, .. } => Some(*location),
|
|
||||||
ParseFormat { location, .. } => Some(*location),
|
|
||||||
MergeSchema { location, .. } => Some(*location),
|
|
||||||
WriteObject { location, .. } => Some(*location),
|
|
||||||
ReadRecordBatch { location, .. } => Some(*location),
|
|
||||||
WriteRecordBatch { location, .. } => Some(*location),
|
|
||||||
AsyncWrite { location, .. } => Some(*location),
|
|
||||||
EncodeRecordBatch { location, .. } => Some(*location),
|
|
||||||
BufferedWriterClosed { location, .. } => Some(*location),
|
|
||||||
|
|
||||||
UnsupportedBackendProtocol { location, .. } => Some(*location),
|
|
||||||
EmptyHostPath { location, .. } => Some(*location),
|
|
||||||
InvalidUrl { location, .. } => Some(*location),
|
|
||||||
InvalidConnection { location, .. } => Some(*location),
|
|
||||||
UnsupportedCompressionType { location, .. } => Some(*location),
|
|
||||||
UnsupportedFormat { location, .. } => Some(*location),
|
|
||||||
WriteParquet { location, .. } => Some(*location),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
|
|||||||
#[cfg(windows)]
|
#[cfg(windows)]
|
||||||
{
|
{
|
||||||
// On Windows, the url may start with `C:/`.
|
// On Windows, the url may start with `C:/`.
|
||||||
if let Some(_) = handle_windows_path(url) {
|
if handle_windows_path(url).is_some() {
|
||||||
return Ok((FS_SCHEMA.to_string(), None, url.to_string()));
|
return Ok((FS_SCHEMA.to_string(), None, url.to_string()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -56,14 +56,6 @@ impl ErrorExt for Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn location_opt(&self) -> Option<common_error::snafu::Location> {
|
|
||||||
match self {
|
|
||||||
Error::BigDecimalOutOfRange { location, .. } => Some(*location),
|
|
||||||
Error::InvalidPrecisionOrScale { location, .. } => Some(*location),
|
|
||||||
Error::ParseRustDecimalStr { .. } | Error::ParseBigDecimalStr { .. } => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_any(&self) -> &dyn std::any::Any {
|
fn as_any(&self) -> &dyn std::any::Any {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,13 +24,6 @@ pub trait ErrorExt: StackError {
|
|||||||
StatusCode::Unknown
|
StatusCode::Unknown
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(ruihang): remove this default implementation
|
|
||||||
/// Get the location of this error, None if the location is unavailable.
|
|
||||||
/// Add `_opt` suffix to avoid confusing with similar method in `std::error::Error`
|
|
||||||
fn location_opt(&self) -> Option<crate::snafu::Location> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the error as [Any](std::any::Any) so that it can be
|
/// Returns the error as [Any](std::any::Any) so that it can be
|
||||||
/// downcast to a specific implementation.
|
/// downcast to a specific implementation.
|
||||||
fn as_any(&self) -> &dyn Any;
|
fn as_any(&self) -> &dyn Any;
|
||||||
@@ -116,9 +109,9 @@ impl BoxedError {
|
|||||||
|
|
||||||
impl std::fmt::Debug for BoxedError {
|
impl std::fmt::Debug for BoxedError {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
// Use the pretty debug format of inner error for opaque error.
|
let mut buf = vec![];
|
||||||
let debug_format = crate::format::DebugFormat::new(&*self.inner);
|
self.debug_fmt(0, &mut buf);
|
||||||
debug_format.fmt(f)
|
write!(f, "{}", buf.join("\n"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -139,10 +132,6 @@ impl crate::ext::ErrorExt for BoxedError {
|
|||||||
self.inner.status_code()
|
self.inner.status_code()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn location_opt(&self) -> Option<crate::snafu::Location> {
|
|
||||||
self.inner.location_opt()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_any(&self) -> &dyn std::any::Any {
|
fn as_any(&self) -> &dyn std::any::Any {
|
||||||
self.inner.as_any()
|
self.inner.as_any()
|
||||||
}
|
}
|
||||||
@@ -196,10 +185,6 @@ impl crate::ext::ErrorExt for PlainError {
|
|||||||
self.status_code
|
self.status_code
|
||||||
}
|
}
|
||||||
|
|
||||||
fn location_opt(&self) -> Option<crate::snafu::Location> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_any(&self) -> &dyn std::any::Any {
|
fn as_any(&self) -> &dyn std::any::Any {
|
||||||
self as _
|
self as _
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,154 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::fmt;
|
|
||||||
|
|
||||||
use crate::ext::ErrorExt;
|
|
||||||
|
|
||||||
/// Pretty debug format for error, also prints source and backtrace.
|
|
||||||
pub struct DebugFormat<'a, E: ?Sized>(&'a E);
|
|
||||||
|
|
||||||
impl<'a, E: ?Sized> DebugFormat<'a, E> {
|
|
||||||
/// Create a new format struct from `err`.
|
|
||||||
pub fn new(err: &'a E) -> Self {
|
|
||||||
Self(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, E: ErrorExt + ?Sized> fmt::Debug for DebugFormat<'a, E> {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
write!(f, "{}.", self.0)?;
|
|
||||||
if let Some(source) = self.0.source() {
|
|
||||||
// Source error use debug format for more verbose info.
|
|
||||||
write!(f, " Caused by: {source:?}")?;
|
|
||||||
}
|
|
||||||
if let Some(location) = self.0.location_opt() {
|
|
||||||
// Add a newline to separate causes and backtrace.
|
|
||||||
write!(f, " at: {location}")?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::any::Any;
|
|
||||||
|
|
||||||
use snafu::prelude::*;
|
|
||||||
use snafu::{GenerateImplicitData, Location};
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
use crate::ext::StackError;
|
|
||||||
|
|
||||||
#[derive(Debug, Snafu)]
|
|
||||||
#[snafu(display("This is a leaf error"))]
|
|
||||||
struct Leaf;
|
|
||||||
|
|
||||||
impl ErrorExt for Leaf {
|
|
||||||
fn location_opt(&self) -> Option<Location> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_any(&self) -> &dyn Any {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StackError for Leaf {
|
|
||||||
fn debug_fmt(&self, _: usize, _: &mut Vec<String>) {}
|
|
||||||
|
|
||||||
fn next(&self) -> Option<&dyn StackError> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Snafu)]
|
|
||||||
#[snafu(display("This is a leaf with location"))]
|
|
||||||
struct LeafWithLocation {
|
|
||||||
location: Location,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ErrorExt for LeafWithLocation {
|
|
||||||
fn location_opt(&self) -> Option<Location> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_any(&self) -> &dyn Any {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StackError for LeafWithLocation {
|
|
||||||
fn debug_fmt(&self, _: usize, _: &mut Vec<String>) {}
|
|
||||||
|
|
||||||
fn next(&self) -> Option<&dyn StackError> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Snafu)]
|
|
||||||
#[snafu(display("Internal error"))]
|
|
||||||
struct Internal {
|
|
||||||
#[snafu(source)]
|
|
||||||
source: Leaf,
|
|
||||||
location: Location,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ErrorExt for Internal {
|
|
||||||
fn location_opt(&self) -> Option<Location> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_any(&self) -> &dyn Any {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StackError for Internal {
|
|
||||||
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
|
|
||||||
buf.push(format!("{}: Internal error, at {}", layer, self.location));
|
|
||||||
self.source.debug_fmt(layer + 1, buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn next(&self) -> Option<&dyn StackError> {
|
|
||||||
Some(&self.source)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_debug_format() {
|
|
||||||
let err = Leaf;
|
|
||||||
|
|
||||||
let msg = format!("{:?}", DebugFormat::new(&err));
|
|
||||||
assert_eq!("This is a leaf error.", msg);
|
|
||||||
|
|
||||||
let err = LeafWithLocation {
|
|
||||||
location: Location::generate(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO(ruihang): display location here
|
|
||||||
let msg = format!("{:?}", DebugFormat::new(&err));
|
|
||||||
assert!(msg.starts_with("This is a leaf with location."));
|
|
||||||
|
|
||||||
let err = Internal {
|
|
||||||
source: Leaf,
|
|
||||||
location: Location::generate(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO(ruihang): display location here
|
|
||||||
let msg = format!("{:?}", DebugFormat::new(&err));
|
|
||||||
assert!(msg.contains("Internal error. Caused by: Leaf"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -15,7 +15,6 @@
|
|||||||
#![feature(error_iter)]
|
#![feature(error_iter)]
|
||||||
|
|
||||||
pub mod ext;
|
pub mod ext;
|
||||||
pub mod format;
|
|
||||||
pub mod mock;
|
pub mod mock;
|
||||||
pub mod status_code;
|
pub mod status_code;
|
||||||
|
|
||||||
|
|||||||
@@ -17,8 +17,6 @@
|
|||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use snafu::Location;
|
|
||||||
|
|
||||||
use crate::ext::{ErrorExt, StackError};
|
use crate::ext::{ErrorExt, StackError};
|
||||||
use crate::status_code::StatusCode;
|
use crate::status_code::StatusCode;
|
||||||
|
|
||||||
@@ -61,10 +59,6 @@ impl ErrorExt for MockError {
|
|||||||
self.code
|
self.code
|
||||||
}
|
}
|
||||||
|
|
||||||
fn location_opt(&self) -> Option<Location> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_any(&self) -> &dyn Any {
|
fn as_any(&self) -> &dyn Any {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -97,6 +97,11 @@ pub enum StatusCode {
|
|||||||
/// User is not authorized to perform the operation
|
/// User is not authorized to perform the operation
|
||||||
PermissionDenied = 7006,
|
PermissionDenied = 7006,
|
||||||
// ====== End of auth related status code =====
|
// ====== End of auth related status code =====
|
||||||
|
|
||||||
|
// ====== Begin of flow related status code =====
|
||||||
|
FlowAlreadyExists = 8000,
|
||||||
|
FlowNotFound = 8001,
|
||||||
|
// ====== End of flow related status code =====
|
||||||
}
|
}
|
||||||
|
|
||||||
impl StatusCode {
|
impl StatusCode {
|
||||||
@@ -125,8 +130,10 @@ impl StatusCode {
|
|||||||
| StatusCode::EngineExecuteQuery
|
| StatusCode::EngineExecuteQuery
|
||||||
| StatusCode::TableAlreadyExists
|
| StatusCode::TableAlreadyExists
|
||||||
| StatusCode::TableNotFound
|
| StatusCode::TableNotFound
|
||||||
| StatusCode::RegionNotFound
|
|
||||||
| StatusCode::RegionAlreadyExists
|
| StatusCode::RegionAlreadyExists
|
||||||
|
| StatusCode::RegionNotFound
|
||||||
|
| StatusCode::FlowAlreadyExists
|
||||||
|
| StatusCode::FlowNotFound
|
||||||
| StatusCode::RegionReadonly
|
| StatusCode::RegionReadonly
|
||||||
| StatusCode::TableColumnNotFound
|
| StatusCode::TableColumnNotFound
|
||||||
| StatusCode::TableColumnExists
|
| StatusCode::TableColumnExists
|
||||||
@@ -161,10 +168,12 @@ impl StatusCode {
|
|||||||
| StatusCode::InvalidSyntax
|
| StatusCode::InvalidSyntax
|
||||||
| StatusCode::TableAlreadyExists
|
| StatusCode::TableAlreadyExists
|
||||||
| StatusCode::TableNotFound
|
| StatusCode::TableNotFound
|
||||||
|
| StatusCode::RegionAlreadyExists
|
||||||
| StatusCode::RegionNotFound
|
| StatusCode::RegionNotFound
|
||||||
|
| StatusCode::FlowAlreadyExists
|
||||||
|
| StatusCode::FlowNotFound
|
||||||
| StatusCode::RegionNotReady
|
| StatusCode::RegionNotReady
|
||||||
| StatusCode::RegionBusy
|
| StatusCode::RegionBusy
|
||||||
| StatusCode::RegionAlreadyExists
|
|
||||||
| StatusCode::RegionReadonly
|
| StatusCode::RegionReadonly
|
||||||
| StatusCode::TableColumnNotFound
|
| StatusCode::TableColumnNotFound
|
||||||
| StatusCode::TableColumnExists
|
| StatusCode::TableColumnExists
|
||||||
|
|||||||
16
src/common/frontend/Cargo.toml
Normal file
16
src/common/frontend/Cargo.toml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
[package]
|
||||||
|
name = "common-frontend"
|
||||||
|
version.workspace = true
|
||||||
|
edition.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
api.workspace = true
|
||||||
|
async-trait.workspace = true
|
||||||
|
common-base.workspace = true
|
||||||
|
common-error.workspace = true
|
||||||
|
common-macro.workspace = true
|
||||||
|
common-query.workspace = true
|
||||||
|
session.workspace = true
|
||||||
|
snafu.workspace = true
|
||||||
|
sql.workspace = true
|
||||||
44
src/common/frontend/src/error.rs
Normal file
44
src/common/frontend/src/error.rs
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use common_error::ext::{BoxedError, ErrorExt};
|
||||||
|
use common_error::status_code::StatusCode;
|
||||||
|
use common_macro::stack_trace_debug;
|
||||||
|
use snafu::{Location, Snafu};
|
||||||
|
|
||||||
|
#[derive(Snafu)]
|
||||||
|
#[snafu(visibility(pub))]
|
||||||
|
#[stack_trace_debug]
|
||||||
|
pub enum Error {
|
||||||
|
#[snafu(display("External error"))]
|
||||||
|
External {
|
||||||
|
location: Location,
|
||||||
|
source: BoxedError,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
|
|
||||||
|
impl ErrorExt for Error {
|
||||||
|
fn status_code(&self) -> StatusCode {
|
||||||
|
use Error::*;
|
||||||
|
match self {
|
||||||
|
External { source, .. } => source.status_code(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_any(&self) -> &dyn std::any::Any {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
38
src/common/frontend/src/handler.rs
Normal file
38
src/common/frontend/src/handler.rs
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use api::v1::{RowDeleteRequests, RowInsertRequests};
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use common_query::Output;
|
||||||
|
use session::context::QueryContextRef;
|
||||||
|
|
||||||
|
use crate::error::Result;
|
||||||
|
|
||||||
|
/// [FrontendInvoker] provides the ability to:
|
||||||
|
/// - Insert rows
|
||||||
|
/// - Delete rows
|
||||||
|
#[async_trait]
|
||||||
|
pub trait FrontendInvoker {
|
||||||
|
async fn row_inserts(
|
||||||
|
&self,
|
||||||
|
requests: RowInsertRequests,
|
||||||
|
ctx: QueryContextRef,
|
||||||
|
) -> Result<Output>;
|
||||||
|
|
||||||
|
async fn row_deletes(
|
||||||
|
&self,
|
||||||
|
requests: RowDeleteRequests,
|
||||||
|
ctx: QueryContextRef,
|
||||||
|
) -> Result<Output>;
|
||||||
|
}
|
||||||
@@ -12,18 +12,5 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::ops::ControlFlow;
|
pub mod error;
|
||||||
|
pub mod handler;
|
||||||
use sqlparser::ast::{Visit, Visitor};
|
|
||||||
|
|
||||||
use crate::statements::OptionMap;
|
|
||||||
|
|
||||||
impl Visit for OptionMap {
|
|
||||||
fn visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> {
|
|
||||||
for (k, v) in &self.map {
|
|
||||||
k.visit(visitor)?;
|
|
||||||
v.visit(visitor)?;
|
|
||||||
}
|
|
||||||
ControlFlow::Continue(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -35,7 +35,7 @@ impl FunctionContext {
|
|||||||
#[cfg(any(test, feature = "testing"))]
|
#[cfg(any(test, feature = "testing"))]
|
||||||
pub fn mock() -> Self {
|
pub fn mock() -> Self {
|
||||||
Self {
|
Self {
|
||||||
query_ctx: QueryContextBuilder::default().build(),
|
query_ctx: QueryContextBuilder::default().build().into(),
|
||||||
state: Arc::new(FunctionState::mock()),
|
state: Arc::new(FunctionState::mock()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -44,7 +44,7 @@ impl FunctionContext {
|
|||||||
impl Default for FunctionContext {
|
impl Default for FunctionContext {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
query_ctx: QueryContextBuilder::default().build(),
|
query_ctx: QueryContextBuilder::default().build().into(),
|
||||||
state: Arc::new(FunctionState::default()),
|
state: Arc::new(FunctionState::default()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -152,7 +152,7 @@ pub fn interp(args: &[VectorRef]) -> Result<VectorRef> {
|
|||||||
|
|
||||||
let res;
|
let res;
|
||||||
if xp.len() == 1 {
|
if xp.len() == 1 {
|
||||||
let datas = x
|
let data = x
|
||||||
.iter_data()
|
.iter_data()
|
||||||
.map(|x| {
|
.map(|x| {
|
||||||
if Value::from(x) < xp.get(0) {
|
if Value::from(x) < xp.get(0) {
|
||||||
@@ -164,7 +164,7 @@ pub fn interp(args: &[VectorRef]) -> Result<VectorRef> {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
res = Float64Vector::from(datas);
|
res = Float64Vector::from(data);
|
||||||
} else {
|
} else {
|
||||||
let mut j = 0;
|
let mut j = 0;
|
||||||
/* only pre-calculate slopes if there are relatively few of them. */
|
/* only pre-calculate slopes if there are relatively few of them. */
|
||||||
@@ -191,7 +191,7 @@ pub fn interp(args: &[VectorRef]) -> Result<VectorRef> {
|
|||||||
}
|
}
|
||||||
slopes = Some(slopes_tmp);
|
slopes = Some(slopes_tmp);
|
||||||
}
|
}
|
||||||
let datas = x
|
let data = x
|
||||||
.iter_data()
|
.iter_data()
|
||||||
.map(|x| match x {
|
.map(|x| match x {
|
||||||
Some(xi) => {
|
Some(xi) => {
|
||||||
@@ -255,7 +255,7 @@ pub fn interp(args: &[VectorRef]) -> Result<VectorRef> {
|
|||||||
_ => None,
|
_ => None,
|
||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
res = Float64Vector::from(datas);
|
res = Float64Vector::from(data);
|
||||||
}
|
}
|
||||||
Ok(Arc::new(res) as _)
|
Ok(Arc::new(res) as _)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -89,7 +89,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_create_udf() {
|
fn test_create_udf() {
|
||||||
let f = Arc::new(TestAndFunction);
|
let f = Arc::new(TestAndFunction);
|
||||||
let query_ctx = QueryContextBuilder::default().build();
|
let query_ctx = QueryContextBuilder::default().build().into();
|
||||||
|
|
||||||
let args: Vec<VectorRef> = vec![
|
let args: Vec<VectorRef> = vec![
|
||||||
Arc::new(ConstantVector::new(
|
Arc::new(ConstantVector::new(
|
||||||
|
|||||||
@@ -79,7 +79,8 @@ mod tests {
|
|||||||
|
|
||||||
let query_ctx = QueryContextBuilder::default()
|
let query_ctx = QueryContextBuilder::default()
|
||||||
.current_schema("test_db".to_string())
|
.current_schema("test_db".to_string())
|
||||||
.build();
|
.build()
|
||||||
|
.into();
|
||||||
|
|
||||||
let func_ctx = FunctionContext {
|
let func_ctx = FunctionContext {
|
||||||
query_ctx,
|
query_ctx,
|
||||||
|
|||||||
@@ -77,7 +77,7 @@ mod tests {
|
|||||||
} if valid_types == vec![]
|
} if valid_types == vec![]
|
||||||
));
|
));
|
||||||
|
|
||||||
let query_ctx = QueryContextBuilder::default().build();
|
let query_ctx = QueryContextBuilder::default().build().into();
|
||||||
|
|
||||||
let func_ctx = FunctionContext {
|
let func_ctx = FunctionContext {
|
||||||
query_ctx,
|
query_ctx,
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ use common_meta::rpc::procedure::MigrateRegionRequest;
|
|||||||
use common_query::error::Error::ThreadJoin;
|
use common_query::error::Error::ThreadJoin;
|
||||||
use common_query::error::{InvalidFuncArgsSnafu, MissingProcedureServiceHandlerSnafu, Result};
|
use common_query::error::{InvalidFuncArgsSnafu, MissingProcedureServiceHandlerSnafu, Result};
|
||||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||||
use common_telemetry::logging::error;
|
use common_telemetry::error;
|
||||||
use datatypes::data_type::DataType;
|
use datatypes::data_type::DataType;
|
||||||
use datatypes::prelude::ConcreteDataType;
|
use datatypes::prelude::ConcreteDataType;
|
||||||
use datatypes::value::{Value, ValueRef};
|
use datatypes::value::{Value, ValueRef};
|
||||||
|
|||||||
@@ -288,7 +288,7 @@ mod tests {
|
|||||||
use common_test_util::ports;
|
use common_test_util::ports;
|
||||||
use hyper::service::{make_service_fn, service_fn};
|
use hyper::service::{make_service_fn, service_fn};
|
||||||
use hyper::Server;
|
use hyper::Server;
|
||||||
use reqwest::Client;
|
use reqwest::{Client, Response};
|
||||||
use tokio::spawn;
|
use tokio::spawn;
|
||||||
|
|
||||||
use crate::{default_get_uuid, Collector, GreptimeDBTelemetry, Mode, StatisticData};
|
use crate::{default_get_uuid, Collector, GreptimeDBTelemetry, Mode, StatisticData};
|
||||||
@@ -395,6 +395,21 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn get_telemetry_report(
|
||||||
|
mut report: GreptimeDBTelemetry,
|
||||||
|
url: &'static str,
|
||||||
|
) -> Option<Response> {
|
||||||
|
report.telemetry_url = url;
|
||||||
|
report.report_telemetry_info().await
|
||||||
|
}
|
||||||
|
|
||||||
|
fn contravariance<'a>(x: &'a str) -> &'static str
|
||||||
|
where
|
||||||
|
'static: 'a,
|
||||||
|
{
|
||||||
|
unsafe { std::mem::transmute(x) }
|
||||||
|
}
|
||||||
|
|
||||||
let working_home_temp = tempfile::Builder::new()
|
let working_home_temp = tempfile::Builder::new()
|
||||||
.prefix("greptimedb_telemetry")
|
.prefix("greptimedb_telemetry")
|
||||||
.tempdir()
|
.tempdir()
|
||||||
@@ -402,14 +417,16 @@ mod tests {
|
|||||||
let working_home = working_home_temp.path().to_str().unwrap().to_string();
|
let working_home = working_home_temp.path().to_str().unwrap().to_string();
|
||||||
|
|
||||||
let test_statistic = Box::new(TestStatistic);
|
let test_statistic = Box::new(TestStatistic);
|
||||||
let mut test_report = GreptimeDBTelemetry::new(
|
let test_report = GreptimeDBTelemetry::new(
|
||||||
Some(working_home.clone()),
|
Some(working_home.clone()),
|
||||||
test_statistic,
|
test_statistic,
|
||||||
Arc::new(AtomicBool::new(true)),
|
Arc::new(AtomicBool::new(true)),
|
||||||
);
|
);
|
||||||
let url = Box::leak(format!("{}:{}", "http://localhost", port).into_boxed_str());
|
let url = format!("http://localhost:{}", port);
|
||||||
test_report.telemetry_url = url;
|
let response = {
|
||||||
let response = test_report.report_telemetry_info().await.unwrap();
|
let url = contravariance(url.as_str());
|
||||||
|
get_telemetry_report(test_report, url).await.unwrap()
|
||||||
|
};
|
||||||
|
|
||||||
let body = response.json::<StatisticData>().await.unwrap();
|
let body = response.json::<StatisticData>().await.unwrap();
|
||||||
assert_eq!(env::consts::ARCH, body.arch);
|
assert_eq!(env::consts::ARCH, body.arch);
|
||||||
@@ -420,13 +437,15 @@ mod tests {
|
|||||||
assert_eq!(1, body.nodes.unwrap());
|
assert_eq!(1, body.nodes.unwrap());
|
||||||
|
|
||||||
let failed_statistic = Box::new(FailedStatistic);
|
let failed_statistic = Box::new(FailedStatistic);
|
||||||
let mut failed_report = GreptimeDBTelemetry::new(
|
let failed_report = GreptimeDBTelemetry::new(
|
||||||
Some(working_home),
|
Some(working_home),
|
||||||
failed_statistic,
|
failed_statistic,
|
||||||
Arc::new(AtomicBool::new(true)),
|
Arc::new(AtomicBool::new(true)),
|
||||||
);
|
);
|
||||||
failed_report.telemetry_url = url;
|
let response = {
|
||||||
let response = failed_report.report_telemetry_info().await;
|
let url = contravariance(url.as_str());
|
||||||
|
get_telemetry_report(failed_report, url).await
|
||||||
|
};
|
||||||
assert!(response.is_none());
|
assert!(response.is_none());
|
||||||
|
|
||||||
let client = Client::builder()
|
let client = Client::builder()
|
||||||
|
|||||||
@@ -12,17 +12,18 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use api::helper::ColumnDataTypeWrapper;
|
||||||
use api::v1::add_column_location::LocationType;
|
use api::v1::add_column_location::LocationType;
|
||||||
use api::v1::alter_expr::Kind;
|
use api::v1::alter_expr::Kind;
|
||||||
use api::v1::{
|
use api::v1::{
|
||||||
column_def, AddColumnLocation as Location, AlterExpr, CreateTableExpr, DropColumns,
|
column_def, AddColumnLocation as Location, AlterExpr, ChangeColumnTypes, CreateTableExpr,
|
||||||
RenameTable, SemanticType,
|
DropColumns, RenameTable, SemanticType,
|
||||||
};
|
};
|
||||||
use common_query::AddColumnLocation;
|
use common_query::AddColumnLocation;
|
||||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest};
|
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, ChangeColumnTypeRequest};
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
InvalidColumnDefSnafu, MissingFieldSnafu, MissingTimestampColumnSnafu, Result,
|
InvalidColumnDefSnafu, MissingFieldSnafu, MissingTimestampColumnSnafu, Result,
|
||||||
@@ -64,6 +65,27 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterExpr) -> Result<Alter
|
|||||||
columns: add_column_requests,
|
columns: add_column_requests,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Kind::ChangeColumnTypes(ChangeColumnTypes {
|
||||||
|
change_column_types,
|
||||||
|
}) => {
|
||||||
|
let change_column_type_requests = change_column_types
|
||||||
|
.into_iter()
|
||||||
|
.map(|cct| {
|
||||||
|
let target_type =
|
||||||
|
ColumnDataTypeWrapper::new(cct.target_type(), cct.target_type_extension)
|
||||||
|
.into();
|
||||||
|
|
||||||
|
Ok(ChangeColumnTypeRequest {
|
||||||
|
column_name: cct.column_name,
|
||||||
|
target_type,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect::<Result<Vec<_>>>()?;
|
||||||
|
|
||||||
|
AlterKind::ChangeColumnTypes {
|
||||||
|
columns: change_column_type_requests,
|
||||||
|
}
|
||||||
|
}
|
||||||
Kind::DropColumns(DropColumns { drop_columns }) => AlterKind::DropColumns {
|
Kind::DropColumns(DropColumns { drop_columns }) => AlterKind::DropColumns {
|
||||||
names: drop_columns.into_iter().map(|c| c.name).collect(),
|
names: drop_columns.into_iter().map(|c| c.name).collect(),
|
||||||
},
|
},
|
||||||
@@ -137,7 +159,10 @@ fn parse_location(location: Option<Location>) -> Result<Option<AddColumnLocation
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use api::v1::{AddColumn, AddColumns, ColumnDataType, ColumnDef, DropColumn, SemanticType};
|
use api::v1::{
|
||||||
|
AddColumn, AddColumns, ChangeColumnType, ColumnDataType, ColumnDef, DropColumn,
|
||||||
|
SemanticType,
|
||||||
|
};
|
||||||
use datatypes::prelude::ConcreteDataType;
|
use datatypes::prelude::ConcreteDataType;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -260,6 +285,40 @@ mod tests {
|
|||||||
assert_eq!(Some(AddColumnLocation::First), add_column.location);
|
assert_eq!(Some(AddColumnLocation::First), add_column.location);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_change_column_type_expr() {
|
||||||
|
let expr = AlterExpr {
|
||||||
|
catalog_name: "test_catalog".to_string(),
|
||||||
|
schema_name: "test_schema".to_string(),
|
||||||
|
table_name: "monitor".to_string(),
|
||||||
|
|
||||||
|
kind: Some(Kind::ChangeColumnTypes(ChangeColumnTypes {
|
||||||
|
change_column_types: vec![ChangeColumnType {
|
||||||
|
column_name: "mem_usage".to_string(),
|
||||||
|
target_type: ColumnDataType::String as i32,
|
||||||
|
target_type_extension: None,
|
||||||
|
}],
|
||||||
|
})),
|
||||||
|
};
|
||||||
|
|
||||||
|
let alter_request = alter_expr_to_request(1, expr).unwrap();
|
||||||
|
assert_eq!(alter_request.catalog_name, "test_catalog");
|
||||||
|
assert_eq!(alter_request.schema_name, "test_schema");
|
||||||
|
assert_eq!("monitor".to_string(), alter_request.table_name);
|
||||||
|
|
||||||
|
let mut change_column_types = match alter_request.alter_kind {
|
||||||
|
AlterKind::ChangeColumnTypes { columns } => columns,
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let change_column_type = change_column_types.pop().unwrap();
|
||||||
|
assert_eq!("mem_usage", change_column_type.column_name);
|
||||||
|
assert_eq!(
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
change_column_type.target_type
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_drop_column_expr() {
|
fn test_drop_column_expr() {
|
||||||
let expr = AlterExpr {
|
let expr = AlterExpr {
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user