mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-26 08:00:01 +00:00
Compare commits
64 Commits
poc_datafl
...
v0.8.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
258675b75e | ||
|
|
11a08cb272 | ||
|
|
e9b178b8b9 | ||
|
|
3477fde0e5 | ||
|
|
9baa431656 | ||
|
|
e2a1cb5840 | ||
|
|
f696f41a02 | ||
|
|
0168d43d60 | ||
|
|
e372e25e30 | ||
|
|
ca409a732f | ||
|
|
5c0a530ad1 | ||
|
|
4b030456f6 | ||
|
|
f93b5b19f0 | ||
|
|
669a6d84e9 | ||
|
|
a45017ad71 | ||
|
|
0d9e71b653 | ||
|
|
93f178f3ad | ||
|
|
9f4a6c6fe2 | ||
|
|
c915916b62 | ||
|
|
dff7ba7598 | ||
|
|
fe34ebf770 | ||
|
|
a1c51a5885 | ||
|
|
63a8d293a1 | ||
|
|
6c621b7fcf | ||
|
|
529e344450 | ||
|
|
2a169f9364 | ||
|
|
97eb196699 | ||
|
|
cfae276d37 | ||
|
|
09129a911e | ||
|
|
15d7b9755e | ||
|
|
72897a20e3 | ||
|
|
c04d02460f | ||
|
|
4ca7ac7632 | ||
|
|
a260ba3ee7 | ||
|
|
efd3f04b7c | ||
|
|
f16ce3ca27 | ||
|
|
6214180ecd | ||
|
|
00e21e2021 | ||
|
|
494ce65729 | ||
|
|
e15294db41 | ||
|
|
be1eb4efb7 | ||
|
|
9d12496aaf | ||
|
|
5d8084a32f | ||
|
|
60eb5de3f1 | ||
|
|
a0be7198f9 | ||
|
|
6ab3aeb142 | ||
|
|
590aedd466 | ||
|
|
27e376e892 | ||
|
|
36c41d129c | ||
|
|
89da42dbc1 | ||
|
|
04852aa27e | ||
|
|
d0820bb26d | ||
|
|
fa6c371380 | ||
|
|
9aa2182cb2 | ||
|
|
bca2e393bf | ||
|
|
b1ef327bac | ||
|
|
115c74791d | ||
|
|
aec5cca2c7 | ||
|
|
06e1c43743 | ||
|
|
9d36c31209 | ||
|
|
c91132bd14 | ||
|
|
25e9076f5b | ||
|
|
08945f128b | ||
|
|
5a0629eaa0 |
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: Bug report
|
||||
description: Is something not working? Help us fix it!
|
||||
labels: [ "bug" ]
|
||||
labels: [ "C-bug" ]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -4,5 +4,5 @@ contact_links:
|
||||
url: https://greptime.com/slack
|
||||
about: Get free help from the Greptime community
|
||||
- name: Greptime Community Discussion
|
||||
url: https://github.com/greptimeTeam/greptimedb/discussions
|
||||
url: https://github.com/greptimeTeam/discussions
|
||||
about: Get free help from the Greptime community
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
2
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: Enhancement
|
||||
description: Suggest an enhancement to existing functionality
|
||||
labels: [ "enhancement" ]
|
||||
labels: [ "C-enhancement" ]
|
||||
body:
|
||||
- type: dropdown
|
||||
id: type
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: Feature request
|
||||
name: New Feature
|
||||
description: Suggest a new feature for GreptimeDB
|
||||
labels: [ "feature request" ]
|
||||
labels: [ "C-feature" ]
|
||||
body:
|
||||
- type: markdown
|
||||
id: info
|
||||
@@ -59,6 +59,9 @@ runs:
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: pwsh
|
||||
run: make test sqlness-test
|
||||
env:
|
||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||
|
||||
16
.github/actions/setup-cyborg/action.yml
vendored
Normal file
16
.github/actions/setup-cyborg/action.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: Setup cyborg environment
|
||||
description: Setup cyborg environment
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
- uses: pnpm/action-setup@v3
|
||||
with:
|
||||
package_json_file: 'cyborg/package.json'
|
||||
run_install: true
|
||||
- name: Describe the Environment
|
||||
working-directory: cyborg
|
||||
shell: bash
|
||||
run: pnpm tsx -v
|
||||
4
.github/doc-label-config.yml
vendored
4
.github/doc-label-config.yml
vendored
@@ -1,4 +0,0 @@
|
||||
Doc not needed:
|
||||
- '- \[x\] This PR does not require documentation updates.'
|
||||
Doc update required:
|
||||
- '- \[ \] This PR does not require documentation updates.'
|
||||
6
.github/pull_request_template.md
vendored
6
.github/pull_request_template.md
vendored
@@ -15,6 +15,6 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
|
||||
|
||||
## Checklist
|
||||
|
||||
- [ ] I have written the necessary rustdoc comments.
|
||||
- [ ] I have added the necessary unit tests and integration tests.
|
||||
- [x] This PR does not require documentation updates.
|
||||
- [ ] I have written the necessary rustdoc comments.
|
||||
- [ ] I have added the necessary unit tests and integration tests.
|
||||
- [ ] This PR requires documentation updates.
|
||||
|
||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
|
||||
8
.github/workflows/develop.yml
vendored
8
.github/workflows/develop.yml
vendored
@@ -30,7 +30,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
|
||||
jobs:
|
||||
check-typos-and-docs:
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ windows-latest, ubuntu-20.04 ]
|
||||
os: [ windows-2022, ubuntu-20.04 ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -256,7 +256,7 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/sqlness-*
|
||||
path: /tmp/sqlness*
|
||||
retention-days: 3
|
||||
|
||||
sqlness-kafka-wal:
|
||||
@@ -286,7 +286,7 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs-with-kafka-wal
|
||||
path: /tmp/sqlness-*
|
||||
path: /tmp/sqlness*
|
||||
retention-days: 3
|
||||
|
||||
fmt:
|
||||
|
||||
39
.github/workflows/doc-issue.yml
vendored
39
.github/workflows/doc-issue.yml
vendored
@@ -1,39 +0,0 @@
|
||||
name: Create Issue in downstream repos
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- labeled
|
||||
pull_request_target:
|
||||
types:
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
doc_issue:
|
||||
if: github.event.label.name == 'doc update required'
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: create an issue in doc repo
|
||||
uses: dacbd/create-issue-action@v1.2.1
|
||||
with:
|
||||
owner: GreptimeTeam
|
||||
repo: docs
|
||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||
body: |
|
||||
A document change request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
cloud_issue:
|
||||
if: github.event.label.name == 'cloud followup required'
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: create an issue in cloud repo
|
||||
uses: dacbd/create-issue-action@v1.2.1
|
||||
with:
|
||||
owner: GreptimeTeam
|
||||
repo: greptimedb-cloud
|
||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
title: Followup changes in ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||
body: |
|
||||
A followup request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
36
.github/workflows/doc-label.yml
vendored
36
.github/workflows/doc-label.yml
vendored
@@ -1,36 +0,0 @@
|
||||
name: "PR Doc Labeler"
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited, synchronize, ready_for_review, auto_merge_enabled, labeled, unlabeled]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: github/issue-labeler@v3.4
|
||||
with:
|
||||
configuration-path: .github/doc-label-config.yml
|
||||
enable-versioned-regex: false
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
sync-labels: 1
|
||||
- name: create an issue in doc repo
|
||||
uses: dacbd/create-issue-action@v1.2.1
|
||||
if: ${{ github.event.action == 'opened' && contains(github.event.pull_request.body, '- [ ] This PR does not require documentation updates.') }}
|
||||
with:
|
||||
owner: GreptimeTeam
|
||||
repo: docs
|
||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||
body: |
|
||||
A document change request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
- name: Check doc labels
|
||||
uses: docker://agilepathway/pull-request-label-checker:latest
|
||||
with:
|
||||
one_of: Doc update required,Doc not needed
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
22
.github/workflows/docbot.yml
vendored
Normal file
22
.github/workflows/docbot.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
name: Follow Up Docs
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
docbot:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Maybe Follow Up Docs Issue
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/follow-up-docs-issue.ts
|
||||
env:
|
||||
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
15
.github/workflows/nightly-ci.yml
vendored
15
.github/workflows/nightly-ci.yml
vendored
@@ -1,6 +1,6 @@
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 23 * * 1-5'
|
||||
- cron: "0 23 * * 1-5"
|
||||
workflow_dispatch:
|
||||
|
||||
name: Nightly CI
|
||||
@@ -10,7 +10,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
|
||||
jobs:
|
||||
sqlness-test:
|
||||
@@ -35,7 +35,7 @@ jobs:
|
||||
sqlness-windows:
|
||||
name: Sqlness tests on Windows
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: windows-latest-8-cores
|
||||
runs-on: windows-2022-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -68,7 +68,7 @@ jobs:
|
||||
test-on-windows:
|
||||
name: Run tests on Windows
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: windows-latest-8-cores
|
||||
runs-on: windows-2022-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false
|
||||
@@ -76,6 +76,9 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
with:
|
||||
version: "14.0"
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
@@ -88,7 +91,7 @@ jobs:
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
python-version: "3.10"
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install WSL distribution
|
||||
@@ -98,8 +101,10 @@ jobs:
|
||||
- name: Running tests
|
||||
run: cargo nextest run -F pyo3_backend,dashboard
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
|
||||
6
.github/workflows/release.yml
vendored
6
.github/workflows/release.yml
vendored
@@ -82,7 +82,7 @@ on:
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
# The arguments of building greptime.
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
@@ -91,7 +91,7 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.8.0
|
||||
NEXT_RELEASE_VERSION: v0.9.0
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
@@ -102,7 +102,7 @@ jobs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
|
||||
windows-runner: windows-latest-8-cores
|
||||
windows-runner: windows-2022-8-cores
|
||||
|
||||
# The following EC2 resource id will be used for resource releasing.
|
||||
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
|
||||
11
.github/workflows/schedule.yml
vendored
11
.github/workflows/schedule.yml
vendored
@@ -16,16 +16,7 @@ jobs:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
- uses: pnpm/action-setup@v3
|
||||
with:
|
||||
package_json_file: 'cyborg/package.json'
|
||||
run_install: true
|
||||
- name: Describe the Environment
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx -v
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Do Maintenance
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/schedule.ts
|
||||
|
||||
11
.github/workflows/semantic-pull-request.yml
vendored
11
.github/workflows/semantic-pull-request.yml
vendored
@@ -13,16 +13,7 @@ jobs:
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
- uses: pnpm/action-setup@v3
|
||||
with:
|
||||
package_json_file: 'cyborg/package.json'
|
||||
run_install: true
|
||||
- name: Describe the Environment
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx -v
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Check Pull Request
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/check-pull-request.ts
|
||||
|
||||
992
Cargo.lock
generated
992
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
12
Cargo.toml
12
Cargo.toml
@@ -4,6 +4,7 @@ members = [
|
||||
"src/api",
|
||||
"src/auth",
|
||||
"src/catalog",
|
||||
"src/cache",
|
||||
"src/client",
|
||||
"src/cmd",
|
||||
"src/common/base",
|
||||
@@ -63,7 +64,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.7.2"
|
||||
version = "0.8.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -99,6 +100,7 @@ bytemuck = "1.12"
|
||||
bytes = { version = "1.5", features = ["serde"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "5.4"
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
@@ -116,7 +118,7 @@ etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "219b2409bb701f75b43fc0ba64967d2ed8e75491" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a11db14b8502f55ca5348917fd18e6fcf140f55e" }
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
@@ -154,10 +156,10 @@ serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||
serde_with = "3"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.7"
|
||||
snafu = "0.8"
|
||||
sysinfo = "0.30"
|
||||
# on branch v0.44.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "c919990bf62ad38d2b0c0a3bc90b26ad919d51b0", features = [
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "e4e496b8d62416ad50ce70a1b460c7313610cf5d", features = [
|
||||
"visitor",
|
||||
] }
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
@@ -173,6 +175,7 @@ zstd = "0.13"
|
||||
## workspaces members
|
||||
api = { path = "src/api" }
|
||||
auth = { path = "src/auth" }
|
||||
cache = { path = "src/cache" }
|
||||
catalog = { path = "src/catalog" }
|
||||
client = { path = "src/client" }
|
||||
cmd = { path = "src/cmd" }
|
||||
@@ -204,6 +207,7 @@ common-wal = { path = "src/common/wal" }
|
||||
datanode = { path = "src/datanode" }
|
||||
datatypes = { path = "src/datatypes" }
|
||||
file-engine = { path = "src/file-engine" }
|
||||
flow = { path = "src/flow" }
|
||||
frontend = { path = "src/frontend" }
|
||||
index = { path = "src/index" }
|
||||
log-store = { path = "src/log-store" }
|
||||
|
||||
@@ -127,6 +127,8 @@
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
|
||||
## Cluster Mode
|
||||
@@ -184,7 +186,6 @@
|
||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||
| `datanode` | -- | -- | Datanode options. |
|
||||
| `datanode.client` | -- | -- | Datanode client options. |
|
||||
| `datanode.client.timeout` | String | `10s` | -- |
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
@@ -203,6 +204,8 @@
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
|
||||
### Metasrv
|
||||
@@ -259,6 +262,8 @@
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
|
||||
### Datanode
|
||||
@@ -370,3 +375,5 @@
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
@@ -428,3 +428,9 @@ url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -136,7 +136,6 @@ metadata_cache_tti = "5m"
|
||||
[datanode]
|
||||
## Datanode client options.
|
||||
[datanode.client]
|
||||
timeout = "10s"
|
||||
connect_timeout = "10s"
|
||||
tcp_nodelay = true
|
||||
|
||||
@@ -186,3 +185,9 @@ url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -141,3 +141,9 @@ url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -471,3 +471,9 @@ url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
106
cyborg/bin/follow-up-docs-issue.ts
Normal file
106
cyborg/bin/follow-up-docs-issue.ts
Normal file
@@ -0,0 +1,106 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from '@actions/core'
|
||||
import {handleError, obtainClient} from "@/common";
|
||||
import {context} from "@actions/github";
|
||||
import {PullRequestEditedEvent, PullRequestEvent, PullRequestOpenedEvent} from "@octokit/webhooks-types";
|
||||
// @ts-expect-error moduleResolution:nodenext issue 54523
|
||||
import {RequestError} from "@octokit/request-error";
|
||||
|
||||
const needFollowUpDocs = "[x] This PR requires documentation updates."
|
||||
const labelDocsNotRequired = "docs-not-required"
|
||||
const labelDocsRequired = "docs-required"
|
||||
|
||||
async function main() {
|
||||
if (!context.payload.pull_request) {
|
||||
throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
|
||||
}
|
||||
|
||||
const client = obtainClient("GITHUB_TOKEN")
|
||||
const docsClient = obtainClient("DOCS_REPO_TOKEN")
|
||||
const payload = context.payload as PullRequestEvent
|
||||
const { owner, repo, number, actor, title, html_url } = {
|
||||
owner: payload.pull_request.base.user.login,
|
||||
repo: payload.pull_request.base.repo.name,
|
||||
number: payload.pull_request.number,
|
||||
title: payload.pull_request.title,
|
||||
html_url: payload.pull_request.html_url,
|
||||
actor: payload.pull_request.user.login,
|
||||
}
|
||||
const followUpDocs = checkPullRequestEvent(payload)
|
||||
if (followUpDocs) {
|
||||
core.info("Follow up docs.")
|
||||
await client.rest.issues.removeLabel({
|
||||
owner, repo, issue_number: number, name: labelDocsNotRequired,
|
||||
}).catch((e: RequestError) => {
|
||||
if (e.status != 404) {
|
||||
throw e;
|
||||
}
|
||||
core.debug(`Label ${labelDocsNotRequired} not exist.`)
|
||||
})
|
||||
await client.rest.issues.addLabels({
|
||||
owner, repo, issue_number: number, labels: [labelDocsRequired],
|
||||
})
|
||||
await docsClient.rest.issues.create({
|
||||
owner: 'GreptimeTeam',
|
||||
repo: 'docs',
|
||||
title: `Update docs for ${title}`,
|
||||
body: `A document change request is generated from ${html_url}`,
|
||||
assignee: actor,
|
||||
}).then((res) => {
|
||||
core.info(`Created issue ${res.data}`)
|
||||
})
|
||||
} else {
|
||||
core.info("No need to follow up docs.")
|
||||
await client.rest.issues.removeLabel({
|
||||
owner, repo, issue_number: number, name: labelDocsRequired
|
||||
}).catch((e: RequestError) => {
|
||||
if (e.status != 404) {
|
||||
throw e;
|
||||
}
|
||||
core.debug(`Label ${labelDocsRequired} not exist.`)
|
||||
})
|
||||
await client.rest.issues.addLabels({
|
||||
owner, repo, issue_number: number, labels: [labelDocsNotRequired],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
function checkPullRequestEvent(payload: PullRequestEvent) {
|
||||
switch (payload.action) {
|
||||
case "opened":
|
||||
return checkPullRequestOpenedEvent(payload as PullRequestOpenedEvent)
|
||||
case "edited":
|
||||
return checkPullRequestEditedEvent(payload as PullRequestEditedEvent)
|
||||
default:
|
||||
throw new Error(`${payload.action} is unsupported.`)
|
||||
}
|
||||
}
|
||||
|
||||
function checkPullRequestOpenedEvent(event: PullRequestOpenedEvent): boolean {
|
||||
// @ts-ignore
|
||||
return event.pull_request.body?.includes(needFollowUpDocs)
|
||||
}
|
||||
|
||||
function checkPullRequestEditedEvent(event: PullRequestEditedEvent): boolean {
|
||||
const previous = event.changes.body?.from.includes(needFollowUpDocs)
|
||||
const current = event.pull_request.body?.includes(needFollowUpDocs)
|
||||
// from docs-not-need to docs-required
|
||||
return (!previous) && current
|
||||
}
|
||||
|
||||
main().catch(handleError)
|
||||
@@ -7,6 +7,7 @@
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.10.1",
|
||||
"@actions/github": "^6.0.0",
|
||||
"@octokit/request-error": "^6.1.1",
|
||||
"@octokit/webhooks-types": "^7.5.1",
|
||||
"conventional-commit-types": "^3.0.0",
|
||||
"conventional-commits-parser": "^5.0.0",
|
||||
|
||||
10
cyborg/pnpm-lock.yaml
generated
10
cyborg/pnpm-lock.yaml
generated
@@ -11,6 +11,9 @@ dependencies:
|
||||
'@actions/github':
|
||||
specifier: ^6.0.0
|
||||
version: 6.0.0
|
||||
'@octokit/request-error':
|
||||
specifier: ^6.1.1
|
||||
version: 6.1.1
|
||||
'@octokit/webhooks-types':
|
||||
specifier: ^7.5.1
|
||||
version: 7.5.1
|
||||
@@ -359,6 +362,13 @@ packages:
|
||||
once: 1.4.0
|
||||
dev: false
|
||||
|
||||
/@octokit/request-error@6.1.1:
|
||||
resolution: {integrity: sha512-1mw1gqT3fR/WFvnoVpY/zUM2o/XkMs/2AszUUG9I69xn0JFLv6PGkPhNk5lbfvROs79wiS0bqiJNxfCZcRJJdg==}
|
||||
engines: {node: '>= 18'}
|
||||
dependencies:
|
||||
'@octokit/types': 13.5.0
|
||||
dev: false
|
||||
|
||||
/@octokit/request@8.4.0:
|
||||
resolution: {integrity: sha512-9Bb014e+m2TgBeEJGEbdplMVWwPmL1FPtggHQRkV+WVsMggPtEkLKPlcVYm/o8xKLkpJ7B+6N8WfQMtDLX2Dpw==}
|
||||
engines: {node: '>= 18'}
|
||||
|
||||
@@ -23,28 +23,28 @@
|
||||
|
||||
## Write performance
|
||||
|
||||
| Environment | Ingest rate (rows/s) |
|
||||
| ------------------ | --------------------- |
|
||||
| Local | 3695814.64 |
|
||||
| EC2 c5d.2xlarge | 2987166.64 |
|
||||
| Environment | Ingest rate (rows/s) |
|
||||
| --------------- | -------------------- |
|
||||
| Local | 369581.464 |
|
||||
| EC2 c5d.2xlarge | 298716.664 |
|
||||
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
||||
| --------------------- | ---------- | ---------------------- |
|
||||
| cpu-max-all-1 | 30.56 | 54.74 |
|
||||
| cpu-max-all-8 | 52.69 | 70.50 |
|
||||
| double-groupby-1 | 664.30 | 1366.63 |
|
||||
| double-groupby-5 | 1391.26 | 2141.71 |
|
||||
| double-groupby-all | 2828.94 | 3389.59 |
|
||||
| groupby-orderby-limit | 718.92 | 1213.90 |
|
||||
| high-cpu-1 | 29.21 | 52.98 |
|
||||
| high-cpu-all | 5514.12 | 7194.91 |
|
||||
| lastpoint | 7571.40 | 9423.41 |
|
||||
| single-groupby-1-1-1 | 19.09 | 7.77 |
|
||||
| single-groupby-1-1-12 | 27.28 | 51.64 |
|
||||
| single-groupby-1-8-1 | 31.85 | 11.64 |
|
||||
| single-groupby-5-1-1 | 16.14 | 9.67 |
|
||||
| single-groupby-5-1-12 | 27.21 | 53.62 |
|
||||
| single-groupby-5-8-1 | 39.62 | 14.96 |
|
||||
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
||||
| --------------------- | ---------- | -------------------- |
|
||||
| cpu-max-all-1 | 30.56 | 54.74 |
|
||||
| cpu-max-all-8 | 52.69 | 70.50 |
|
||||
| double-groupby-1 | 664.30 | 1366.63 |
|
||||
| double-groupby-5 | 1391.26 | 2141.71 |
|
||||
| double-groupby-all | 2828.94 | 3389.59 |
|
||||
| groupby-orderby-limit | 718.92 | 1213.90 |
|
||||
| high-cpu-1 | 29.21 | 52.98 |
|
||||
| high-cpu-all | 5514.12 | 7194.91 |
|
||||
| lastpoint | 7571.40 | 9423.41 |
|
||||
| single-groupby-1-1-1 | 19.09 | 7.77 |
|
||||
| single-groupby-1-1-12 | 27.28 | 51.64 |
|
||||
| single-groupby-1-8-1 | 31.85 | 11.64 |
|
||||
| single-groupby-5-1-1 | 16.14 | 9.67 |
|
||||
| single-groupby-5-1-12 | 27.21 | 53.62 |
|
||||
| single-groupby-5-8-1 | 39.62 | 14.96 |
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2024-04-18"
|
||||
channel = "nightly-2024-04-20"
|
||||
|
||||
@@ -30,6 +30,7 @@ pub enum Error {
|
||||
#[snafu(display("Unknown proto column datatype: {}", datatype))]
|
||||
UnknownColumnDataType {
|
||||
datatype: i32,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: prost::DecodeError,
|
||||
@@ -38,12 +39,14 @@ pub enum Error {
|
||||
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
||||
IntoColumnDataType {
|
||||
from: ConcreteDataType,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert column default constraint, column: {}", column))]
|
||||
ConvertColumnDefaultConstraint {
|
||||
column: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
@@ -51,6 +54,7 @@ pub enum Error {
|
||||
#[snafu(display("Invalid column default constraint, column: {}", column))]
|
||||
InvalidColumnDefaultConstraint {
|
||||
column: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
@@ -480,6 +480,8 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
||||
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
|
||||
Some(Expr::CreateFlow(_)) => "ddl.create_flow",
|
||||
Some(Expr::DropFlow(_)) => "ddl.drop_flow",
|
||||
Some(Expr::CreateView(_)) => "ddl.create_view",
|
||||
Some(Expr::DropView(_)) => "ddl.drop_view",
|
||||
None => "ddl.empty",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,11 +34,13 @@ pub enum Error {
|
||||
Io {
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Auth failed"))]
|
||||
AuthBackend {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
@@ -72,7 +74,10 @@ pub enum Error {
|
||||
},
|
||||
|
||||
#[snafu(display("User is not authorized to perform this action"))]
|
||||
PermissionDenied { location: Location },
|
||||
PermissionDenied {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
|
||||
13
src/cache/Cargo.toml
vendored
Normal file
13
src/cache/Cargo.toml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
[package]
|
||||
name = "cache"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
moka.workspace = true
|
||||
snafu.workspace = true
|
||||
44
src/cache/src/error.rs
vendored
Normal file
44
src/cache/src/error.rs
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to get cache from cache registry: {}", name))]
|
||||
CacheRequired {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
name: String,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::CacheRequired { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
122
src/cache/src/lib.rs
vendored
Normal file
122
src/cache/src/lib.rs
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod error;
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use catalog::kvbackend::new_table_cache;
|
||||
use common_meta::cache::{
|
||||
new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
|
||||
new_table_route_cache, CacheRegistry, CacheRegistryBuilder, LayeredCacheRegistryBuilder,
|
||||
};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use moka::future::CacheBuilder;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
const DEFAULT_CACHE_MAX_CAPACITY: u64 = 65536;
|
||||
const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
|
||||
const DEFAULT_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
||||
|
||||
pub const TABLE_INFO_CACHE_NAME: &str = "table_info_cache";
|
||||
pub const TABLE_NAME_CACHE_NAME: &str = "table_name_cache";
|
||||
pub const TABLE_CACHE_NAME: &str = "table_cache";
|
||||
pub const TABLE_FLOWNODE_SET_CACHE_NAME: &str = "table_flownode_set_cache";
|
||||
pub const TABLE_ROUTE_CACHE_NAME: &str = "table_route_cache";
|
||||
|
||||
pub fn build_fundamental_cache_registry(kv_backend: KvBackendRef) -> CacheRegistry {
|
||||
// Builds table info cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let table_info_cache = Arc::new(new_table_info_cache(
|
||||
TABLE_INFO_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
kv_backend.clone(),
|
||||
));
|
||||
|
||||
// Builds table name cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let table_name_cache = Arc::new(new_table_name_cache(
|
||||
TABLE_NAME_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
kv_backend.clone(),
|
||||
));
|
||||
|
||||
// Builds table route cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let table_route_cache = Arc::new(new_table_route_cache(
|
||||
TABLE_ROUTE_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
kv_backend.clone(),
|
||||
));
|
||||
|
||||
// Builds table flownode set cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let table_flownode_set_cache = Arc::new(new_table_flownode_set_cache(
|
||||
TABLE_FLOWNODE_SET_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
kv_backend.clone(),
|
||||
));
|
||||
|
||||
CacheRegistryBuilder::default()
|
||||
.add_cache(table_info_cache)
|
||||
.add_cache(table_name_cache)
|
||||
.add_cache(table_route_cache)
|
||||
.add_cache(table_flownode_set_cache)
|
||||
.build()
|
||||
}
|
||||
|
||||
// TODO(weny): Make the cache configurable.
|
||||
pub fn with_default_composite_cache_registry(
|
||||
builder: LayeredCacheRegistryBuilder,
|
||||
) -> Result<LayeredCacheRegistryBuilder> {
|
||||
let table_info_cache = builder.get().context(error::CacheRequiredSnafu {
|
||||
name: TABLE_INFO_CACHE_NAME,
|
||||
})?;
|
||||
let table_name_cache = builder.get().context(error::CacheRequiredSnafu {
|
||||
name: TABLE_NAME_CACHE_NAME,
|
||||
})?;
|
||||
|
||||
// Builds table cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let table_cache = Arc::new(new_table_cache(
|
||||
TABLE_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
table_info_cache,
|
||||
table_name_cache,
|
||||
));
|
||||
|
||||
let registry = CacheRegistryBuilder::default()
|
||||
.add_cache(table_cache)
|
||||
.build();
|
||||
|
||||
Ok(builder.add_cache_registry(registry))
|
||||
}
|
||||
@@ -30,12 +30,14 @@ use tokio::task::JoinError;
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to list catalogs"))]
|
||||
ListCatalogs {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list {}'s schemas", catalog))]
|
||||
ListSchemas {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
catalog: String,
|
||||
source: BoxedError,
|
||||
@@ -43,6 +45,7 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to list {}.{}'s tables", catalog, schema))]
|
||||
ListTables {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
catalog: String,
|
||||
schema: String,
|
||||
@@ -51,23 +54,27 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to list nodes in cluster: {source}"))]
|
||||
ListNodes {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
||||
CompileScriptInternal {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
#[snafu(display("Failed to open system catalog table"))]
|
||||
OpenSystemCatalog {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create system catalog table"))]
|
||||
CreateSystemCatalog {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
@@ -75,12 +82,17 @@ pub enum Error {
|
||||
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
||||
CreateTable {
|
||||
table_info: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("System catalog is not valid: {}", msg))]
|
||||
SystemCatalog { msg: String, location: Location },
|
||||
SystemCatalog {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"System catalog table type mismatch, expected: binary, found: {:?}",
|
||||
@@ -88,34 +100,42 @@ pub enum Error {
|
||||
))]
|
||||
SystemCatalogTypeMismatch {
|
||||
data_type: ConcreteDataType,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid system catalog entry type: {:?}", entry_type))]
|
||||
InvalidEntryType {
|
||||
entry_type: Option<u8>,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid system catalog key: {:?}", key))]
|
||||
InvalidKey {
|
||||
key: Option<String>,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Catalog value is not present"))]
|
||||
EmptyValue { location: Location },
|
||||
EmptyValue {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to deserialize value"))]
|
||||
ValueDeserialize {
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table engine not found: {}", engine_name))]
|
||||
TableEngineNotFound {
|
||||
engine_name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
@@ -123,6 +143,7 @@ pub enum Error {
|
||||
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
|
||||
CatalogNotFound {
|
||||
catalog_name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -130,30 +151,49 @@ pub enum Error {
|
||||
SchemaNotFound {
|
||||
catalog: String,
|
||||
schema: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table `{}` already exists", table))]
|
||||
TableExists { table: String, location: Location },
|
||||
TableExists {
|
||||
table: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table not found: {}", table))]
|
||||
TableNotExist { table: String, location: Location },
|
||||
TableNotExist {
|
||||
table: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Schema {} already exists", schema))]
|
||||
SchemaExists { schema: String, location: Location },
|
||||
SchemaExists {
|
||||
schema: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Operation {} not implemented yet", operation))]
|
||||
Unimplemented {
|
||||
operation: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Operation {} not supported", op))]
|
||||
NotSupported { op: String, location: Location },
|
||||
NotSupported {
|
||||
op: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to open table {table_id}"))]
|
||||
OpenTable {
|
||||
table_id: TableId,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
@@ -167,6 +207,7 @@ pub enum Error {
|
||||
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
|
||||
TableNotFound {
|
||||
table_info: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -178,57 +219,69 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to read system catalog table records"))]
|
||||
ReadSystemCatalog {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create recordbatch"))]
|
||||
CreateRecordBatch {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to insert table creation record to system catalog"))]
|
||||
InsertCatalogRecord {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to scan system catalog table"))]
|
||||
SystemCatalogTableScan {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Internal error"))]
|
||||
Internal {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to upgrade weak catalog manager reference"))]
|
||||
UpgradeWeakCatalogManagerRef { location: Location },
|
||||
UpgradeWeakCatalogManagerRef {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute system catalog table scan"))]
|
||||
SystemCatalogTableScanExec {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot parse catalog value"))]
|
||||
InvalidCatalogValue {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to perform metasrv operation"))]
|
||||
Metasrv {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid table info in catalog"))]
|
||||
InvalidTableInfoInCatalog {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
@@ -240,29 +293,37 @@ pub enum Error {
|
||||
Datafusion {
|
||||
#[snafu(source)]
|
||||
error: DataFusionError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table schema mismatch"))]
|
||||
TableSchemaMismatch {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("A generic error has occurred, msg: {}", msg))]
|
||||
Generic { msg: String, location: Location },
|
||||
Generic {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table metadata manager error"))]
|
||||
TableMetadataManager {
|
||||
source: common_meta::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Get null from table cache, key: {}", key))]
|
||||
TableCacheNotGet { key: String, location: Location },
|
||||
|
||||
#[snafu(display("Failed to get table cache, err: {}", err_msg))]
|
||||
GetTableCache { err_msg: String },
|
||||
#[snafu(display("Failed to get table cache"))]
|
||||
GetTableCache {
|
||||
source: common_meta::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -324,7 +385,7 @@ impl ErrorExt for Error {
|
||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
||||
Error::TableMetadataManager { source, .. } => source.status_code(),
|
||||
Error::TableCacheNotGet { .. } | Error::GetTableCache { .. } => StatusCode::Internal,
|
||||
Error::GetTableCache { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -21,11 +21,11 @@ use common_config::Mode;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cluster::{ClusterInfo, NodeInfo, NodeStatus};
|
||||
use common_meta::peer::Peer;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_telemetry::warn;
|
||||
use common_time::timestamp::Timestamp;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -20,9 +20,9 @@ use common_catalog::consts::{
|
||||
SEMANTIC_TYPE_TIME_INDEX,
|
||||
};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_KEY_COLUMN_USAGE_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -17,9 +17,9 @@ use std::sync::Arc;
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -18,10 +18,10 @@ use std::sync::{Arc, Weak};
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_PARTITIONS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_time::datetime::DateTime;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -19,9 +19,9 @@ use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_REGION_PEERS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::rpc::router::RegionRoute;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -17,10 +17,10 @@ use std::sync::Arc;
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_time::util::current_time_millis;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_SCHEMATA_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_TABLES_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -16,5 +16,7 @@ pub use client::{CachedMetaKvBackend, CachedMetaKvBackendBuilder, MetaKvBackend}
|
||||
|
||||
mod client;
|
||||
mod manager;
|
||||
mod table_cache;
|
||||
|
||||
pub use manager::KvBackendCatalogManager;
|
||||
pub use table_cache::{new_table_cache, TableCache, TableCacheRef};
|
||||
|
||||
@@ -350,6 +350,13 @@ pub struct MetaKvBackend {
|
||||
pub client: Arc<MetaClient>,
|
||||
}
|
||||
|
||||
impl MetaKvBackend {
|
||||
/// Constructs a [MetaKvBackend].
|
||||
pub fn new(client: Arc<MetaClient>) -> MetaKvBackend {
|
||||
MetaKvBackend { client }
|
||||
}
|
||||
}
|
||||
|
||||
impl TxnService for MetaKvBackend {
|
||||
type Error = Error;
|
||||
}
|
||||
@@ -450,9 +457,8 @@ mod tests {
|
||||
use common_meta::kv_backend::{KvBackend, TxnService};
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse,
|
||||
BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse,
|
||||
DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest,
|
||||
RangeResponse,
|
||||
BatchPutRequest, BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest,
|
||||
PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use dashmap::DashMap;
|
||||
@@ -512,13 +518,6 @@ mod tests {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn compare_and_put(
|
||||
&self,
|
||||
_req: CompareAndPutRequest,
|
||||
) -> Result<CompareAndPutResponse, Self::Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn delete_range(
|
||||
&self,
|
||||
_req: DeleteRangeRequest,
|
||||
|
||||
@@ -15,27 +15,24 @@
|
||||
use std::any::Any;
|
||||
use std::collections::BTreeSet;
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::time::Duration;
|
||||
|
||||
use async_stream::try_stream;
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
|
||||
};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_config::Mode;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::{CacheInvalidator, Context, MultiCacheInvalidator};
|
||||
use common_meta::instruction::CacheIdent;
|
||||
use common_meta::cache::TableRouteCacheRef;
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::table_info::TableInfoValue;
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::table_name::TableName;
|
||||
use futures_util::stream::BoxStream;
|
||||
use futures_util::{StreamExt, TryStreamExt};
|
||||
use meta_client::client::MetaClient;
|
||||
use moka::future::{Cache as AsyncCache, CacheBuilder};
|
||||
use moka::sync::Cache;
|
||||
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||
use snafu::prelude::*;
|
||||
@@ -43,12 +40,12 @@ use table::dist_table::DistTable;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::Error::{GetTableCache, TableCacheNotGet};
|
||||
use crate::error::{
|
||||
InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu, ListSchemasSnafu, ListTablesSnafu, Result,
|
||||
TableCacheNotGetSnafu, TableMetadataManagerSnafu,
|
||||
GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu, ListSchemasSnafu,
|
||||
ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||
};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::kvbackend::TableCacheRef;
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// Access all existing catalog, schema and tables.
|
||||
@@ -64,64 +61,26 @@ pub struct KvBackendCatalogManager {
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
/// A sub-CatalogManager that handles system tables
|
||||
system_catalog: SystemCatalog,
|
||||
table_cache: AsyncCache<String, TableRef>,
|
||||
}
|
||||
|
||||
struct TableCacheInvalidator {
|
||||
table_cache: AsyncCache<String, TableRef>,
|
||||
}
|
||||
|
||||
impl TableCacheInvalidator {
|
||||
pub fn new(table_cache: AsyncCache<String, TableRef>) -> Self {
|
||||
Self { table_cache }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CacheInvalidator for TableCacheInvalidator {
|
||||
async fn invalidate(
|
||||
&self,
|
||||
_ctx: &Context,
|
||||
caches: Vec<CacheIdent>,
|
||||
) -> common_meta::error::Result<()> {
|
||||
for cache in caches {
|
||||
if let CacheIdent::TableName(table_name) = cache {
|
||||
let table_cache_key = format_full_table_name(
|
||||
&table_name.catalog_name,
|
||||
&table_name.schema_name,
|
||||
&table_name.table_name,
|
||||
);
|
||||
self.table_cache.invalidate(&table_cache_key).await;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
table_cache: TableCacheRef,
|
||||
}
|
||||
|
||||
const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
|
||||
const TABLE_CACHE_MAX_CAPACITY: u64 = 65536;
|
||||
const TABLE_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
|
||||
const TABLE_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
||||
|
||||
impl KvBackendCatalogManager {
|
||||
pub async fn new(
|
||||
mode: Mode,
|
||||
meta_client: Option<Arc<MetaClient>>,
|
||||
backend: KvBackendRef,
|
||||
multi_cache_invalidator: Arc<MultiCacheInvalidator>,
|
||||
table_cache: TableCacheRef,
|
||||
table_route_cache: TableRouteCacheRef,
|
||||
) -> Arc<Self> {
|
||||
let table_cache: AsyncCache<String, TableRef> = CacheBuilder::new(TABLE_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(TABLE_CACHE_TTL)
|
||||
.time_to_idle(TABLE_CACHE_TTI)
|
||||
.build();
|
||||
multi_cache_invalidator
|
||||
.add_invalidator(Arc::new(TableCacheInvalidator::new(table_cache.clone())))
|
||||
.await;
|
||||
|
||||
Arc::new_cyclic(|me| Self {
|
||||
mode,
|
||||
meta_client,
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(
|
||||
backend.clone(),
|
||||
table_route_cache,
|
||||
)),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
@@ -218,7 +177,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
}
|
||||
|
||||
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool> {
|
||||
if self.system_catalog.schema_exist(schema) {
|
||||
if self.system_catalog.schema_exists(schema) {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
@@ -230,7 +189,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
}
|
||||
|
||||
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
|
||||
if self.system_catalog.table_exist(schema, table) {
|
||||
if self.system_catalog.table_exists(schema, table) {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
@@ -245,60 +204,25 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
|
||||
async fn table(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableRef>> {
|
||||
if let Some(table) = self.system_catalog.table(catalog, schema, table_name) {
|
||||
if let Some(table) = self
|
||||
.system_catalog
|
||||
.table(catalog_name, schema_name, table_name)
|
||||
{
|
||||
return Ok(Some(table));
|
||||
}
|
||||
|
||||
let init = async {
|
||||
let table_name_key = TableNameKey::new(catalog, schema, table_name);
|
||||
let Some(table_name_value) = self
|
||||
.table_metadata_manager
|
||||
.table_name_manager()
|
||||
.get(table_name_key)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
else {
|
||||
return TableCacheNotGetSnafu {
|
||||
key: table_name_key.to_string(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
let table_id = table_name_value.table_id();
|
||||
|
||||
let Some(table_info_value) = self
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(table_id)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
.map(|v| v.into_inner())
|
||||
else {
|
||||
return TableCacheNotGetSnafu {
|
||||
key: table_name_key.to_string(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
build_table(table_info_value)
|
||||
};
|
||||
|
||||
match self
|
||||
.table_cache
|
||||
.try_get_with_by_ref(&format_full_table_name(catalog, schema, table_name), init)
|
||||
self.table_cache
|
||||
.get_by_ref(&TableName {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(table) => Ok(Some(table)),
|
||||
Err(err) => match err.as_ref() {
|
||||
TableCacheNotGet { .. } => Ok(None),
|
||||
_ => Err(err),
|
||||
},
|
||||
}
|
||||
.map_err(|err| GetTableCache {
|
||||
err_msg: err.to_string(),
|
||||
})
|
||||
.context(GetTableCacheSnafu)
|
||||
}
|
||||
|
||||
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>> {
|
||||
@@ -382,11 +306,11 @@ impl SystemCatalog {
|
||||
}
|
||||
}
|
||||
|
||||
fn schema_exist(&self, schema: &str) -> bool {
|
||||
fn schema_exists(&self, schema: &str) -> bool {
|
||||
schema == INFORMATION_SCHEMA_NAME
|
||||
}
|
||||
|
||||
fn table_exist(&self, schema: &str, table: &str) -> bool {
|
||||
fn table_exists(&self, schema: &str, table: &str) -> bool {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
self.information_schema_provider.table(table).is_some()
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
|
||||
80
src/catalog/src/kvbackend/table_cache.rs
Normal file
80
src/catalog/src/kvbackend/table_cache.rs
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_meta::cache::{CacheContainer, Initializer, TableInfoCacheRef, TableNameCacheRef};
|
||||
use common_meta::error::{Result as MetaResult, ValueNotExistSnafu};
|
||||
use common_meta::instruction::CacheIdent;
|
||||
use common_meta::table_name::TableName;
|
||||
use futures::future::BoxFuture;
|
||||
use moka::future::Cache;
|
||||
use snafu::OptionExt;
|
||||
use table::dist_table::DistTable;
|
||||
use table::TableRef;
|
||||
|
||||
pub type TableCacheRef = Arc<TableCache>;
|
||||
|
||||
/// [TableCache] caches the [TableName] to [TableRef] mapping.
|
||||
pub type TableCache = CacheContainer<TableName, TableRef, CacheIdent>;
|
||||
|
||||
/// Constructs a [TableCache].
|
||||
pub fn new_table_cache(
|
||||
name: String,
|
||||
cache: Cache<TableName, TableRef>,
|
||||
table_info_cache: TableInfoCacheRef,
|
||||
table_name_cache: TableNameCacheRef,
|
||||
) -> TableCache {
|
||||
let init = init_factory(table_info_cache, table_name_cache);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
}
|
||||
|
||||
fn init_factory(
|
||||
table_info_cache: TableInfoCacheRef,
|
||||
table_name_cache: TableNameCacheRef,
|
||||
) -> Initializer<TableName, TableRef> {
|
||||
Arc::new(move |table_name| {
|
||||
let table_info_cache = table_info_cache.clone();
|
||||
let table_name_cache = table_name_cache.clone();
|
||||
Box::pin(async move {
|
||||
let table_id = table_name_cache
|
||||
.get_by_ref(table_name)
|
||||
.await?
|
||||
.context(ValueNotExistSnafu)?;
|
||||
let table_info = table_info_cache
|
||||
.get_by_ref(&table_id)
|
||||
.await?
|
||||
.context(ValueNotExistSnafu)?;
|
||||
|
||||
Ok(Some(DistTable::table(table_info)))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn invalidator<'a>(
|
||||
cache: &'a Cache<TableName, TableRef>,
|
||||
ident: &'a CacheIdent,
|
||||
) -> BoxFuture<'a, MetaResult<()>> {
|
||||
Box::pin(async move {
|
||||
if let CacheIdent::TableName(table_name) = ident {
|
||||
cache.invalidate(table_name).await
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn filter(ident: &CacheIdent) -> bool {
|
||||
matches!(ident, CacheIdent::TableName(_))
|
||||
}
|
||||
@@ -18,7 +18,7 @@ use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_error::{GREPTIME_DB_HEADER_ERROR_CODE, GREPTIME_DB_HEADER_ERROR_MSG};
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
use snafu::{location, Location, Snafu};
|
||||
use tonic::{Code, Status};
|
||||
|
||||
#[derive(Snafu)]
|
||||
@@ -26,7 +26,11 @@ use tonic::{Code, Status};
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Illegal Flight messages, reason: {}", reason))]
|
||||
IllegalFlightMessages { reason: String, location: Location },
|
||||
IllegalFlightMessages {
|
||||
reason: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to do Flight get, code: {}", tonic_code))]
|
||||
FlightGet {
|
||||
@@ -37,47 +41,77 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failure occurs during handling request"))]
|
||||
HandleRequest {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert FlightData"))]
|
||||
ConvertFlightData {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_grpc::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Column datatype error"))]
|
||||
ColumnDataType {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal GRPC client state: {}", err_msg))]
|
||||
IllegalGrpcClientState { err_msg: String, location: Location },
|
||||
IllegalGrpcClientState {
|
||||
err_msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing required field in protobuf, field: {}", field))]
|
||||
MissingField { field: String, location: Location },
|
||||
MissingField {
|
||||
field: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create gRPC channel, peer address: {}", addr))]
|
||||
CreateChannel {
|
||||
addr: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_grpc::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to request RegionServer, code: {}", code))]
|
||||
RegionServer { code: Code, source: BoxedError },
|
||||
RegionServer {
|
||||
code: Code,
|
||||
source: BoxedError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
// Server error carried in Tonic Status's metadata.
|
||||
#[snafu(display("{}", msg))]
|
||||
Server { code: StatusCode, msg: String },
|
||||
Server {
|
||||
code: StatusCode,
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal Database response: {err_msg}"))]
|
||||
IllegalDatabaseResponse { err_msg: String },
|
||||
IllegalDatabaseResponse {
|
||||
err_msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to send request with streaming: {}", err_msg))]
|
||||
ClientStreaming { err_msg: String, location: Location },
|
||||
ClientStreaming {
|
||||
err_msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -128,7 +162,11 @@ impl From<Status> for Error {
|
||||
let msg = get_metadata_value(&e, GREPTIME_DB_HEADER_ERROR_MSG)
|
||||
.unwrap_or_else(|| e.message().to_string());
|
||||
|
||||
Self::Server { code, msg }
|
||||
Self::Server {
|
||||
code,
|
||||
msg,
|
||||
location: location!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -189,6 +189,7 @@ impl RegionRequester {
|
||||
error::Error::RegionServer {
|
||||
code,
|
||||
source: BoxedError::new(err),
|
||||
location: location!(),
|
||||
}
|
||||
})?
|
||||
.into_inner();
|
||||
@@ -272,7 +273,7 @@ mod test {
|
||||
err_msg: "blabla".to_string(),
|
||||
}),
|
||||
}));
|
||||
let Server { code, msg } = result.unwrap_err() else {
|
||||
let Server { code, msg, .. } = result.unwrap_err() else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(code, StatusCode::Internal);
|
||||
|
||||
@@ -19,6 +19,7 @@ workspace = true
|
||||
async-trait.workspace = true
|
||||
auth.workspace = true
|
||||
base64.workspace = true
|
||||
cache.workspace = true
|
||||
catalog.workspace = true
|
||||
chrono.workspace = true
|
||||
clap.workspace = true
|
||||
@@ -27,6 +28,7 @@ common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-config.workspace = true
|
||||
common-error.workspace = true
|
||||
common-grpc.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-procedure.workspace = true
|
||||
@@ -39,12 +41,12 @@ common-telemetry = { workspace = true, features = [
|
||||
common-time.workspace = true
|
||||
common-version.workspace = true
|
||||
common-wal.workspace = true
|
||||
config = "0.13"
|
||||
datanode.workspace = true
|
||||
datatypes.workspace = true
|
||||
either = "1.8"
|
||||
etcd-client.workspace = true
|
||||
file-engine.workspace = true
|
||||
flow.workspace = true
|
||||
frontend.workspace = true
|
||||
futures.workspace = true
|
||||
human-panic = "1.2.2"
|
||||
@@ -52,6 +54,7 @@ lazy_static.workspace = true
|
||||
meta-client.workspace = true
|
||||
meta-srv.workspace = true
|
||||
mito2.workspace = true
|
||||
moka.workspace = true
|
||||
nu-ansi-term = "0.46"
|
||||
plugins.workspace = true
|
||||
prometheus.workspace = true
|
||||
|
||||
@@ -128,7 +128,7 @@ async fn start(cli: Command) -> Result<()> {
|
||||
let _guard = common_telemetry::init_global_logging(
|
||||
&app_name,
|
||||
opts.logging_options(),
|
||||
cli.global_options.tracing_options(),
|
||||
&cli.global_options.tracing_options(),
|
||||
opts.node_id(),
|
||||
);
|
||||
|
||||
|
||||
@@ -64,6 +64,10 @@ impl App for Instance {
|
||||
self.tool.do_work().await
|
||||
}
|
||||
|
||||
fn wait_signal(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
async fn stop(&self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -28,6 +29,7 @@ use snafu::{OptionExt, ResultExt};
|
||||
use tokio::fs::File;
|
||||
use tokio::io::{AsyncWriteExt, BufWriter};
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio::time::Instant;
|
||||
|
||||
use crate::cli::{Instance, Tool};
|
||||
use crate::error::{
|
||||
@@ -174,8 +176,34 @@ impl Export {
|
||||
}
|
||||
|
||||
/// Return a list of [`TableReference`] to be exported.
|
||||
/// Includes all tables under the given `catalog` and `schema`
|
||||
async fn get_table_list(&self, catalog: &str, schema: &str) -> Result<Vec<TableReference>> {
|
||||
/// Includes all tables under the given `catalog` and `schema`.
|
||||
async fn get_table_list(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
) -> Result<(Vec<TableReference>, Vec<TableReference>)> {
|
||||
// Puts all metric table first
|
||||
let sql = format!(
|
||||
"select table_catalog, table_schema, table_name from \
|
||||
information_schema.columns where column_name = '__tsid' \
|
||||
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'"
|
||||
);
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
};
|
||||
let mut metric_physical_tables = HashSet::with_capacity(records.len());
|
||||
for value in records {
|
||||
let mut t = Vec::with_capacity(3);
|
||||
for v in &value {
|
||||
let serde_json::Value::String(value) = v else {
|
||||
unreachable!()
|
||||
};
|
||||
t.push(value);
|
||||
}
|
||||
metric_physical_tables.insert((t[0].clone(), t[1].clone(), t[2].clone()));
|
||||
}
|
||||
|
||||
// TODO: SQL injection hurts
|
||||
let sql = format!(
|
||||
"select table_catalog, table_schema, table_name from \
|
||||
@@ -190,10 +218,10 @@ impl Export {
|
||||
debug!("Fetched table list: {:?}", records);
|
||||
|
||||
if records.is_empty() {
|
||||
return Ok(vec![]);
|
||||
return Ok((vec![], vec![]));
|
||||
}
|
||||
|
||||
let mut result = Vec::with_capacity(records.len());
|
||||
let mut remaining_tables = Vec::with_capacity(records.len());
|
||||
for value in records {
|
||||
let mut t = Vec::with_capacity(3);
|
||||
for v in &value {
|
||||
@@ -202,10 +230,17 @@ impl Export {
|
||||
};
|
||||
t.push(value);
|
||||
}
|
||||
result.push((t[0].clone(), t[1].clone(), t[2].clone()));
|
||||
let table = (t[0].clone(), t[1].clone(), t[2].clone());
|
||||
// Ignores the physical table
|
||||
if !metric_physical_tables.contains(&table) {
|
||||
remaining_tables.push(table);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
Ok((
|
||||
metric_physical_tables.into_iter().collect(),
|
||||
remaining_tables,
|
||||
))
|
||||
}
|
||||
|
||||
async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> {
|
||||
@@ -225,6 +260,7 @@ impl Export {
|
||||
}
|
||||
|
||||
async fn export_create_table(&self) -> Result<()> {
|
||||
let timer = Instant::now();
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.iter_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
@@ -233,15 +269,16 @@ impl Export {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
let table_list = self.get_table_list(&catalog, &schema).await?;
|
||||
let table_count = table_list.len();
|
||||
let (metric_physical_tables, remaining_tables) =
|
||||
self.get_table_list(&catalog, &schema).await?;
|
||||
let table_count = metric_physical_tables.len() + remaining_tables.len();
|
||||
tokio::fs::create_dir_all(&self.output_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let output_file =
|
||||
Path::new(&self.output_dir).join(format!("{catalog}-{schema}.sql"));
|
||||
let mut file = File::create(output_file).await.context(FileIoSnafu)?;
|
||||
for (c, s, t) in table_list {
|
||||
for (c, s, t) in metric_physical_tables.into_iter().chain(remaining_tables) {
|
||||
match self.show_create_table(&c, &s, &t).await {
|
||||
Err(e) => {
|
||||
error!(e; r#"Failed to export table "{}"."{}"."{}""#, c, s, t)
|
||||
@@ -270,12 +307,14 @@ impl Export {
|
||||
})
|
||||
.count();
|
||||
|
||||
info!("success {success}/{db_count} jobs");
|
||||
let elapsed = timer.elapsed();
|
||||
info!("Success {success}/{db_count} jobs, cost: {:?}", elapsed);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn export_table_data(&self) -> Result<()> {
|
||||
let timer = Instant::now();
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.iter_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
@@ -288,15 +327,25 @@ impl Export {
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let output_dir = Path::new(&self.output_dir).join(format!("{catalog}-{schema}/"));
|
||||
|
||||
// copy database to
|
||||
let sql = format!(
|
||||
"copy database {} to '{}' with (format='parquet');",
|
||||
schema,
|
||||
output_dir.to_str().unwrap()
|
||||
);
|
||||
self.sql(&sql).await?;
|
||||
info!("finished exporting {catalog}.{schema} data");
|
||||
// Ignores metric physical tables
|
||||
let (metrics_tables, table_list) = self.get_table_list(&catalog, &schema).await?;
|
||||
for (_, _, table_name) in metrics_tables {
|
||||
warn!("Ignores metric physical table: {table_name}");
|
||||
}
|
||||
for (catalog_name, schema_name, table_name) in table_list {
|
||||
// copy table to
|
||||
let sql = format!(
|
||||
r#"Copy "{}"."{}"."{}" TO '{}{}.parquet' WITH (format='parquet');"#,
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
output_dir.to_str().unwrap(),
|
||||
table_name,
|
||||
);
|
||||
info!("Executing sql: {sql}");
|
||||
self.sql(&sql).await?;
|
||||
}
|
||||
info!("Finished exporting {catalog}.{schema} data");
|
||||
|
||||
// export copy from sql
|
||||
let dir_filenames = match output_dir.read_dir() {
|
||||
@@ -351,8 +400,8 @@ impl Export {
|
||||
}
|
||||
})
|
||||
.count();
|
||||
|
||||
info!("success {success}/{db_count} jobs");
|
||||
let elapsed = timer.elapsed();
|
||||
info!("Success {success}/{db_count} jobs, costs: {:?}", elapsed);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -565,11 +565,16 @@ mod v1_helper {
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Invalid catalog info: {}", key))]
|
||||
InvalidCatalog { key: String, location: Location },
|
||||
InvalidCatalog {
|
||||
key: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to deserialize catalog entry value: {}", raw))]
|
||||
DeserializeCatalogEntryValue {
|
||||
raw: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: serde_json::error::Error,
|
||||
},
|
||||
|
||||
@@ -18,7 +18,9 @@ use std::time::Duration;
|
||||
use async_trait::async_trait;
|
||||
use catalog::kvbackend::MetaKvBackend;
|
||||
use clap::Parser;
|
||||
use common_config::Configurable;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::DatanodeOptions;
|
||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
@@ -27,7 +29,9 @@ use meta_client::MetaClientOptions;
|
||||
use servers::Mode;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
||||
use crate::error::{
|
||||
LoadLayeredConfigSnafu, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu,
|
||||
};
|
||||
use crate::options::{GlobalOptions, Options};
|
||||
use crate::App;
|
||||
|
||||
@@ -114,8 +118,8 @@ struct StartCommand {
|
||||
rpc_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
rpc_hostname: Option<String>,
|
||||
#[clap(long, value_delimiter = ',', num_args = 1..)]
|
||||
metasrv_addr: Option<Vec<String>>,
|
||||
#[clap(long, aliases = ["metasrv-addr"], value_delimiter = ',', num_args = 1..)]
|
||||
metasrv_addrs: Option<Vec<String>>,
|
||||
#[clap(short, long)]
|
||||
config_file: Option<String>,
|
||||
#[clap(long)]
|
||||
@@ -132,12 +136,24 @@ struct StartCommand {
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||
let mut opts: DatanodeOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
DatanodeOptions::env_list_keys(),
|
||||
)?;
|
||||
Ok(Options::Datanode(Box::new(
|
||||
self.merge_with_cli_options(
|
||||
global_options,
|
||||
DatanodeOptions::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
)
|
||||
.context(LoadLayeredConfigSnafu)?,
|
||||
)?,
|
||||
)))
|
||||
}
|
||||
|
||||
// The precedence order is: cli > config file > environment variables > default values.
|
||||
fn merge_with_cli_options(
|
||||
&self,
|
||||
global_options: &GlobalOptions,
|
||||
mut opts: DatanodeOptions,
|
||||
) -> Result<DatanodeOptions> {
|
||||
if let Some(dir) = &global_options.log_dir {
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
@@ -146,6 +162,11 @@ impl StartCommand {
|
||||
opts.logging.level.clone_from(&global_options.log_level);
|
||||
}
|
||||
|
||||
opts.tracing = TracingOptions {
|
||||
#[cfg(feature = "tokio-console")]
|
||||
tokio_console_addr: global_options.tokio_console_addr.clone(),
|
||||
};
|
||||
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
opts.rpc_addr.clone_from(addr);
|
||||
}
|
||||
@@ -158,7 +179,7 @@ impl StartCommand {
|
||||
opts.node_id = Some(node_id);
|
||||
}
|
||||
|
||||
if let Some(metasrv_addrs) = &self.metasrv_addr {
|
||||
if let Some(metasrv_addrs) = &self.metasrv_addrs {
|
||||
opts.meta_client
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs
|
||||
@@ -202,7 +223,7 @@ impl StartCommand {
|
||||
// Disable dashboard in datanode.
|
||||
opts.http.disable_dashboard = true;
|
||||
|
||||
Ok(Options::Datanode(Box::new(opts)))
|
||||
Ok(opts)
|
||||
}
|
||||
|
||||
async fn build(self, mut opts: DatanodeOptions) -> Result<Instance> {
|
||||
@@ -253,13 +274,14 @@ mod tests {
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_config::ENV_VAR_SEP;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::config::{FileConfig, GcsConfig, ObjectStoreConfig, S3Config};
|
||||
use servers::heartbeat_options::HeartbeatOptions;
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
use crate::options::{GlobalOptions, ENV_VAR_SEP};
|
||||
use crate::options::GlobalOptions;
|
||||
|
||||
#[test]
|
||||
fn test_read_from_config_file() {
|
||||
@@ -386,7 +408,7 @@ mod tests {
|
||||
|
||||
if let Options::Datanode(opt) = (StartCommand {
|
||||
node_id: Some(42),
|
||||
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||
metasrv_addrs: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(&GlobalOptions::default())
|
||||
@@ -396,7 +418,7 @@ mod tests {
|
||||
}
|
||||
|
||||
assert!((StartCommand {
|
||||
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||
metasrv_addrs: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(&GlobalOptions::default())
|
||||
|
||||
@@ -17,7 +17,6 @@ use std::any::Any;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use config::ConfigError;
|
||||
use rustyline::error::ReadlineError;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
@@ -27,97 +26,120 @@ use snafu::{Location, Snafu};
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to create default catalog and schema"))]
|
||||
InitMetadata {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to iter stream"))]
|
||||
IterStream {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to init DDL manager"))]
|
||||
InitDdlManager {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to init default timezone"))]
|
||||
InitTimezone {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_time::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start procedure manager"))]
|
||||
StartProcedureManager {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_procedure::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to stop procedure manager"))]
|
||||
StopProcedureManager {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_procedure::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start wal options allocator"))]
|
||||
StartWalOptionsAllocator {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start datanode"))]
|
||||
StartDatanode {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown datanode"))]
|
||||
ShutdownDatanode {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start frontend"))]
|
||||
StartFrontend {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown frontend"))]
|
||||
ShutdownFrontend {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build meta server"))]
|
||||
BuildMetaServer {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start meta server"))]
|
||||
StartMetaServer {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown meta server"))]
|
||||
ShutdownMetaServer {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing config, msg: {}", msg))]
|
||||
MissingConfig { msg: String, location: Location },
|
||||
MissingConfig {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal config: {}", msg))]
|
||||
IllegalConfig { msg: String, location: Location },
|
||||
IllegalConfig {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported selector type: {}", selector_type))]
|
||||
UnsupportedSelectorType {
|
||||
selector_type: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
@@ -129,6 +151,7 @@ pub enum Error {
|
||||
ReplCreation {
|
||||
#[snafu(source)]
|
||||
error: ReadlineError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -136,23 +159,27 @@ pub enum Error {
|
||||
Readline {
|
||||
#[snafu(source)]
|
||||
error: ReadlineError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to collect RecordBatches"))]
|
||||
CollectRecordBatches {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to pretty print Recordbatches"))]
|
||||
PrettyPrintRecordBatches {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start Meta client"))]
|
||||
StartMetaClient {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
@@ -160,31 +187,36 @@ pub enum Error {
|
||||
#[snafu(display("Failed to parse SQL: {}", sql))]
|
||||
ParseSql {
|
||||
sql: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to plan statement"))]
|
||||
PlanStatement {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode logical plan in substrait"))]
|
||||
SubstraitEncodeLogicalPlan {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: substrait::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to load layered config"))]
|
||||
LoadLayeredConfig {
|
||||
#[snafu(source)]
|
||||
error: ConfigError,
|
||||
#[snafu(source(from(common_config::error::Error, Box::new)))]
|
||||
source: Box<common_config::error::Error>,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start catalog manager"))]
|
||||
StartCatalogManager {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: catalog::error::Error,
|
||||
},
|
||||
@@ -194,6 +226,7 @@ pub enum Error {
|
||||
etcd_addr: String,
|
||||
#[snafu(source)]
|
||||
error: etcd_client::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -201,6 +234,7 @@ pub enum Error {
|
||||
ConnectServer {
|
||||
addr: String,
|
||||
source: client::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -208,6 +242,7 @@ pub enum Error {
|
||||
SerdeJson {
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -216,17 +251,25 @@ pub enum Error {
|
||||
reason: String,
|
||||
#[snafu(source)]
|
||||
error: reqwest::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Expect data from output, but got another thing"))]
|
||||
NotDataFromOutput { location: Location },
|
||||
NotDataFromOutput {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Empty result from output"))]
|
||||
EmptyResult { location: Location },
|
||||
EmptyResult {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to manipulate file"))]
|
||||
FileIo {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
@@ -234,6 +277,7 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Invalid database name: {}", database))]
|
||||
InvalidDatabaseName {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
database: String,
|
||||
},
|
||||
@@ -248,14 +292,30 @@ pub enum Error {
|
||||
#[snafu(display("Other error"))]
|
||||
Other {
|
||||
source: BoxedError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build runtime"))]
|
||||
BuildRuntime {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_runtime::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to get cache from cache registry: {}", name))]
|
||||
CacheRequired {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
name: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build cache registry"))]
|
||||
BuildCacheRegistry {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: cache::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -305,6 +365,8 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::SerdeJson { .. } | Error::FileIo { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::CacheRequired { .. } | Error::BuildCacheRegistry { .. } => StatusCode::Internal,
|
||||
|
||||
Error::Other { source, .. } => source.status_code(),
|
||||
|
||||
Error::BuildRuntime { source, .. } => source.status_code(),
|
||||
|
||||
@@ -16,13 +16,20 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager};
|
||||
use cache::{
|
||||
build_fundamental_cache_registry, with_default_composite_cache_registry, TABLE_CACHE_NAME,
|
||||
TABLE_ROUTE_CACHE_NAME,
|
||||
};
|
||||
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use clap::Parser;
|
||||
use client::client_manager::DatanodeClients;
|
||||
use common_meta::cache_invalidator::MultiCacheInvalidator;
|
||||
use common_config::Configurable;
|
||||
use common_grpc::channel_manager::ChannelConfig;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
use common_time::timezone::set_default_timezone;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
|
||||
@@ -35,7 +42,9 @@ use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{self, InitTimezoneSnafu, MissingConfigSnafu, Result, StartFrontendSnafu};
|
||||
use crate::error::{
|
||||
self, InitTimezoneSnafu, LoadLayeredConfigSnafu, MissingConfigSnafu, Result, StartFrontendSnafu,
|
||||
};
|
||||
use crate::options::{GlobalOptions, Options};
|
||||
use crate::App;
|
||||
|
||||
@@ -130,8 +139,8 @@ pub struct StartCommand {
|
||||
config_file: Option<String>,
|
||||
#[clap(short, long)]
|
||||
influxdb_enable: Option<bool>,
|
||||
#[clap(long, value_delimiter = ',', num_args = 1..)]
|
||||
metasrv_addr: Option<Vec<String>>,
|
||||
#[clap(long, aliases = ["metasrv-addr"], value_delimiter = ',', num_args = 1..)]
|
||||
metasrv_addrs: Option<Vec<String>>,
|
||||
#[clap(long)]
|
||||
tls_mode: Option<TlsMode>,
|
||||
#[clap(long)]
|
||||
@@ -148,12 +157,24 @@ pub struct StartCommand {
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||
let mut opts: FrontendOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
FrontendOptions::env_list_keys(),
|
||||
)?;
|
||||
Ok(Options::Frontend(Box::new(
|
||||
self.merge_with_cli_options(
|
||||
global_options,
|
||||
FrontendOptions::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
)
|
||||
.context(LoadLayeredConfigSnafu)?,
|
||||
)?,
|
||||
)))
|
||||
}
|
||||
|
||||
// The precedence order is: cli > config file > environment variables > default values.
|
||||
fn merge_with_cli_options(
|
||||
&self,
|
||||
global_options: &GlobalOptions,
|
||||
mut opts: FrontendOptions,
|
||||
) -> Result<FrontendOptions> {
|
||||
if let Some(dir) = &global_options.log_dir {
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
@@ -162,6 +183,11 @@ impl StartCommand {
|
||||
opts.logging.level.clone_from(&global_options.log_level);
|
||||
}
|
||||
|
||||
opts.tracing = TracingOptions {
|
||||
#[cfg(feature = "tokio-console")]
|
||||
tokio_console_addr: global_options.tokio_console_addr.clone(),
|
||||
};
|
||||
|
||||
let tls_opts = TlsOption::new(
|
||||
self.tls_mode.clone(),
|
||||
self.tls_cert_path.clone(),
|
||||
@@ -200,7 +226,7 @@ impl StartCommand {
|
||||
opts.influxdb.enable = enable;
|
||||
}
|
||||
|
||||
if let Some(metasrv_addrs) = &self.metasrv_addr {
|
||||
if let Some(metasrv_addrs) = &self.metasrv_addrs {
|
||||
opts.meta_client
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs
|
||||
@@ -210,7 +236,7 @@ impl StartCommand {
|
||||
|
||||
opts.user_provider.clone_from(&self.user_provider);
|
||||
|
||||
Ok(Options::Frontend(Box::new(opts)))
|
||||
Ok(opts)
|
||||
}
|
||||
|
||||
async fn build(self, mut opts: FrontendOptions) -> Result<Instance> {
|
||||
@@ -242,21 +268,47 @@ impl StartCommand {
|
||||
.cache_tti(cache_tti)
|
||||
.build();
|
||||
let cached_meta_backend = Arc::new(cached_meta_backend);
|
||||
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::with_invalidators(vec![
|
||||
cached_meta_backend.clone(),
|
||||
]));
|
||||
|
||||
// Builds cache registry
|
||||
let layered_cache_builder = LayeredCacheRegistryBuilder::default().add_cache_registry(
|
||||
CacheRegistryBuilder::default()
|
||||
.add_cache(cached_meta_backend.clone())
|
||||
.build(),
|
||||
);
|
||||
let fundamental_cache_registry =
|
||||
build_fundamental_cache_registry(Arc::new(MetaKvBackend::new(meta_client.clone())));
|
||||
let layered_cache_registry = Arc::new(
|
||||
with_default_composite_cache_registry(
|
||||
layered_cache_builder.add_cache_registry(fundamental_cache_registry),
|
||||
)
|
||||
.context(error::BuildCacheRegistrySnafu)?
|
||||
.build(),
|
||||
);
|
||||
|
||||
let table_cache = layered_cache_registry
|
||||
.get()
|
||||
.context(error::CacheRequiredSnafu {
|
||||
name: TABLE_CACHE_NAME,
|
||||
})?;
|
||||
let table_route_cache =
|
||||
layered_cache_registry
|
||||
.get()
|
||||
.context(error::CacheRequiredSnafu {
|
||||
name: TABLE_ROUTE_CACHE_NAME,
|
||||
})?;
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
opts.mode,
|
||||
Some(meta_client.clone()),
|
||||
cached_meta_backend.clone(),
|
||||
multi_cache_invalidator.clone(),
|
||||
table_cache,
|
||||
table_route_cache,
|
||||
)
|
||||
.await;
|
||||
|
||||
let executor = HandlerGroupExecutor::new(vec![
|
||||
Arc::new(ParseMailboxMessageHandler),
|
||||
Arc::new(InvalidateTableCacheHandler::new(
|
||||
multi_cache_invalidator.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
)),
|
||||
]);
|
||||
|
||||
@@ -267,14 +319,23 @@ impl StartCommand {
|
||||
Arc::new(executor),
|
||||
);
|
||||
|
||||
// frontend to datanode need not timeout.
|
||||
// Some queries are expected to take long time.
|
||||
let channel_config = ChannelConfig {
|
||||
timeout: None,
|
||||
..Default::default()
|
||||
};
|
||||
let client = DatanodeClients::new(channel_config);
|
||||
|
||||
let mut instance = FrontendBuilder::new(
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
catalog_manager,
|
||||
Arc::new(DatanodeClients::default()),
|
||||
Arc::new(client),
|
||||
meta_client,
|
||||
)
|
||||
.with_plugin(plugins.clone())
|
||||
.with_cache_invalidator(multi_cache_invalidator)
|
||||
.with_local_cache_invalidator(layered_cache_registry)
|
||||
.with_heartbeat_task(heartbeat_task)
|
||||
.try_build()
|
||||
.await
|
||||
@@ -299,12 +360,13 @@ mod tests {
|
||||
|
||||
use auth::{Identity, Password, UserProviderRef};
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_config::ENV_VAR_SEP;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use frontend::service_config::GrpcOptions;
|
||||
use servers::http::HttpOptions;
|
||||
|
||||
use super::*;
|
||||
use crate::options::{GlobalOptions, ENV_VAR_SEP};
|
||||
use crate::options::GlobalOptions;
|
||||
|
||||
#[test]
|
||||
fn test_try_from_start_command() {
|
||||
|
||||
@@ -41,6 +41,11 @@ pub trait App: Send {
|
||||
|
||||
async fn start(&mut self) -> error::Result<()>;
|
||||
|
||||
/// Waits the quit signal by default.
|
||||
fn wait_signal(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
async fn stop(&self) -> error::Result<()>;
|
||||
}
|
||||
|
||||
@@ -51,11 +56,13 @@ pub async fn start_app(mut app: Box<dyn App>) -> error::Result<()> {
|
||||
|
||||
app.start().await?;
|
||||
|
||||
if let Err(e) = tokio::signal::ctrl_c().await {
|
||||
error!("Failed to listen for ctrl-c signal: {}", e);
|
||||
// It's unusual to fail to listen for ctrl-c signal, maybe there's something unexpected in
|
||||
// the underlying system. So we stop the app instead of running nonetheless to let people
|
||||
// investigate the issue.
|
||||
if app.wait_signal() {
|
||||
if let Err(e) = tokio::signal::ctrl_c().await {
|
||||
error!("Failed to listen for ctrl-c signal: {}", e);
|
||||
// It's unusual to fail to listen for ctrl-c signal, maybe there's something unexpected in
|
||||
// the underlying system. So we stop the app instead of running nonetheless to let people
|
||||
// investigate the issue.
|
||||
}
|
||||
}
|
||||
|
||||
app.stop().await?;
|
||||
|
||||
@@ -16,12 +16,14 @@ use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_config::Configurable;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
use meta_srv::bootstrap::MetasrvInstance;
|
||||
use meta_srv::metasrv::MetasrvOptions;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result, StartMetaServerSnafu};
|
||||
use crate::error::{self, LoadLayeredConfigSnafu, Result, StartMetaServerSnafu};
|
||||
use crate::options::{GlobalOptions, Options};
|
||||
use crate::App;
|
||||
|
||||
@@ -98,8 +100,8 @@ struct StartCommand {
|
||||
bind_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
server_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
store_addr: Option<String>,
|
||||
#[clap(long, aliases = ["store-addr"], value_delimiter = ',', num_args = 1..)]
|
||||
store_addrs: Option<Vec<String>>,
|
||||
#[clap(short, long)]
|
||||
config_file: Option<String>,
|
||||
#[clap(short, long)]
|
||||
@@ -127,12 +129,24 @@ struct StartCommand {
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||
let mut opts: MetasrvOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
MetasrvOptions::env_list_keys(),
|
||||
)?;
|
||||
Ok(Options::Metasrv(Box::new(
|
||||
self.merge_with_cli_options(
|
||||
global_options,
|
||||
MetasrvOptions::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
)
|
||||
.context(LoadLayeredConfigSnafu)?,
|
||||
)?,
|
||||
)))
|
||||
}
|
||||
|
||||
// The precedence order is: cli > config file > environment variables > default values.
|
||||
fn merge_with_cli_options(
|
||||
&self,
|
||||
global_options: &GlobalOptions,
|
||||
mut opts: MetasrvOptions,
|
||||
) -> Result<MetasrvOptions> {
|
||||
if let Some(dir) = &global_options.log_dir {
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
@@ -141,6 +155,11 @@ impl StartCommand {
|
||||
opts.logging.level.clone_from(&global_options.log_level);
|
||||
}
|
||||
|
||||
opts.tracing = TracingOptions {
|
||||
#[cfg(feature = "tokio-console")]
|
||||
tokio_console_addr: global_options.tokio_console_addr.clone(),
|
||||
};
|
||||
|
||||
if let Some(addr) = &self.bind_addr {
|
||||
opts.bind_addr.clone_from(addr);
|
||||
}
|
||||
@@ -149,8 +168,8 @@ impl StartCommand {
|
||||
opts.server_addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.store_addr {
|
||||
opts.store_addr.clone_from(addr);
|
||||
if let Some(addrs) = &self.store_addrs {
|
||||
opts.store_addrs.clone_from(addrs);
|
||||
}
|
||||
|
||||
if let Some(selector_type) = &self.selector {
|
||||
@@ -190,7 +209,7 @@ impl StartCommand {
|
||||
// Disable dashboard in metasrv.
|
||||
opts.http.disable_dashboard = true;
|
||||
|
||||
Ok(Options::Metasrv(Box::new(opts)))
|
||||
Ok(opts)
|
||||
}
|
||||
|
||||
async fn build(self, mut opts: MetasrvOptions) -> Result<Instance> {
|
||||
@@ -219,18 +238,18 @@ mod tests {
|
||||
use std::io::Write;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_config::ENV_VAR_SEP;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use meta_srv::selector::SelectorType;
|
||||
|
||||
use super::*;
|
||||
use crate::options::ENV_VAR_SEP;
|
||||
|
||||
#[test]
|
||||
fn test_read_from_cmd() {
|
||||
let cmd = StartCommand {
|
||||
bind_addr: Some("127.0.0.1:3002".to_string()),
|
||||
server_addr: Some("127.0.0.1:3002".to_string()),
|
||||
store_addr: Some("127.0.0.1:2380".to_string()),
|
||||
store_addrs: Some(vec!["127.0.0.1:2380".to_string()]),
|
||||
selector: Some("LoadBased".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
@@ -239,7 +258,7 @@ mod tests {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
assert_eq!("127.0.0.1:2380".to_string(), options.store_addr);
|
||||
assert_eq!(vec!["127.0.0.1:2380".to_string()], options.store_addrs);
|
||||
assert_eq!(SelectorType::LoadBased, options.selector);
|
||||
}
|
||||
|
||||
@@ -275,7 +294,7 @@ mod tests {
|
||||
};
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
|
||||
assert_eq!("127.0.0.1:2379".to_string(), options.store_addr);
|
||||
assert_eq!(vec!["127.0.0.1:2379".to_string()], options.store_addrs);
|
||||
assert_eq!(SelectorType::LeaseBased, options.selector);
|
||||
assert_eq!("debug", options.logging.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
@@ -309,7 +328,7 @@ mod tests {
|
||||
let cmd = StartCommand {
|
||||
bind_addr: Some("127.0.0.1:3002".to_string()),
|
||||
server_addr: Some("127.0.0.1:3002".to_string()),
|
||||
store_addr: Some("127.0.0.1:2380".to_string()),
|
||||
store_addrs: Some(vec!["127.0.0.1:2380".to_string()]),
|
||||
selector: Some("LoadBased".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
@@ -395,7 +414,7 @@ mod tests {
|
||||
assert_eq!(opts.http.addr, "127.0.0.1:14000");
|
||||
|
||||
// Should be default value.
|
||||
assert_eq!(opts.store_addr, "127.0.0.1:2379");
|
||||
assert_eq!(opts.store_addrs, vec!["127.0.0.1:2379".to_string()]);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
@@ -13,51 +13,18 @@
|
||||
// limitations under the License.
|
||||
|
||||
use clap::Parser;
|
||||
use common_config::KvBackendConfig;
|
||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||
use common_wal::config::MetasrvWalConfig;
|
||||
use config::{Config, Environment, File, FileFormat};
|
||||
use datanode::config::{DatanodeOptions, ProcedureConfig};
|
||||
use frontend::error::{Result as FeResult, TomlFormatSnafu};
|
||||
use frontend::frontend::{FrontendOptions, TomlSerializable};
|
||||
use datanode::config::DatanodeOptions;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use meta_srv::metasrv::MetasrvOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{LoadLayeredConfigSnafu, Result, SerdeJsonSnafu};
|
||||
|
||||
pub const ENV_VAR_SEP: &str = "__";
|
||||
pub const ENV_LIST_SEP: &str = ",";
|
||||
|
||||
/// Options mixed up from datanode, frontend and metasrv.
|
||||
#[derive(Serialize, Debug, Clone)]
|
||||
pub struct MixOptions {
|
||||
pub data_home: String,
|
||||
pub procedure: ProcedureConfig,
|
||||
pub metadata_store: KvBackendConfig,
|
||||
pub frontend: FrontendOptions,
|
||||
pub datanode: DatanodeOptions,
|
||||
pub logging: LoggingOptions,
|
||||
pub wal_meta: MetasrvWalConfig,
|
||||
}
|
||||
|
||||
impl From<MixOptions> for FrontendOptions {
|
||||
fn from(value: MixOptions) -> Self {
|
||||
value.frontend
|
||||
}
|
||||
}
|
||||
|
||||
impl TomlSerializable for MixOptions {
|
||||
fn to_toml(&self) -> FeResult<String> {
|
||||
toml::to_string(self).context(TomlFormatSnafu)
|
||||
}
|
||||
}
|
||||
use crate::standalone::StandaloneOptions;
|
||||
|
||||
pub enum Options {
|
||||
Datanode(Box<DatanodeOptions>),
|
||||
Frontend(Box<FrontendOptions>),
|
||||
Metasrv(Box<MetasrvOptions>),
|
||||
Standalone(Box<MixOptions>),
|
||||
Standalone(Box<StandaloneOptions>),
|
||||
Cli(Box<LoggingOptions>),
|
||||
}
|
||||
|
||||
@@ -97,195 +64,11 @@ impl Options {
|
||||
}
|
||||
}
|
||||
|
||||
/// Load the configuration from multiple sources and merge them.
|
||||
/// The precedence order is: config file > environment variables > default values.
|
||||
/// `env_prefix` is the prefix of environment variables, e.g. "FRONTEND__xxx".
|
||||
/// The function will use dunder(double underscore) `__` as the separator for environment variables, for example:
|
||||
/// `DATANODE__STORAGE__MANIFEST__CHECKPOINT_MARGIN` will be mapped to `DatanodeOptions.storage.manifest.checkpoint_margin` field in the configuration.
|
||||
/// `list_keys` is the list of keys that should be parsed as a list, for example, you can pass `Some(&["meta_client_options.metasrv_addrs"]` to parse `GREPTIMEDB_METASRV__META_CLIENT_OPTIONS__METASRV_ADDRS` as a list.
|
||||
/// The function will use comma `,` as the separator for list values, for example: `127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003`.
|
||||
pub fn load_layered_options<'de, T: Serialize + Deserialize<'de> + Default>(
|
||||
config_file: Option<&str>,
|
||||
env_prefix: &str,
|
||||
list_keys: Option<&[&str]>,
|
||||
) -> Result<T> {
|
||||
let default_opts = T::default();
|
||||
|
||||
let env_source = {
|
||||
let mut env = Environment::default();
|
||||
|
||||
if !env_prefix.is_empty() {
|
||||
env = env.prefix(env_prefix);
|
||||
}
|
||||
|
||||
if let Some(list_keys) = list_keys {
|
||||
env = env.list_separator(ENV_LIST_SEP);
|
||||
for key in list_keys {
|
||||
env = env.with_list_parse_key(key);
|
||||
}
|
||||
}
|
||||
|
||||
env.try_parsing(true)
|
||||
.separator(ENV_VAR_SEP)
|
||||
.ignore_empty(true)
|
||||
};
|
||||
|
||||
// Workaround: Replacement for `Config::try_from(&default_opts)` due to
|
||||
// `ConfigSerializer` cannot handle the case of an empty struct contained
|
||||
// within an iterative structure.
|
||||
// See: https://github.com/mehcode/config-rs/issues/461
|
||||
let json_str = serde_json::to_string(&default_opts).context(SerdeJsonSnafu)?;
|
||||
let default_config = File::from_str(&json_str, FileFormat::Json);
|
||||
|
||||
// Add default values and environment variables as the sources of the configuration.
|
||||
let mut layered_config = Config::builder()
|
||||
.add_source(default_config)
|
||||
.add_source(env_source);
|
||||
|
||||
// Add config file as the source of the configuration if it is specified.
|
||||
if let Some(config_file) = config_file {
|
||||
layered_config = layered_config.add_source(File::new(config_file, FileFormat::Toml));
|
||||
}
|
||||
|
||||
let opts = layered_config
|
||||
.build()
|
||||
.context(LoadLayeredConfigSnafu)?
|
||||
.try_deserialize()
|
||||
.context(LoadLayeredConfigSnafu)?;
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
|
||||
pub fn node_id(&self) -> Option<String> {
|
||||
match self {
|
||||
Options::Metasrv(_) | Options::Cli(_) => None,
|
||||
Options::Metasrv(_) | Options::Cli(_) | Options::Standalone(_) => None,
|
||||
Options::Datanode(opt) => opt.node_id.map(|x| x.to_string()),
|
||||
Options::Frontend(opt) => opt.node_id.clone(),
|
||||
Options::Standalone(opt) => opt.frontend.node_id.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{DatanodeOptions, ObjectStoreConfig};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_load_layered_options() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
mode = "distributed"
|
||||
enable_memory_catalog = false
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
rpc_hostname = "127.0.0.1"
|
||||
rpc_runtime_size = 8
|
||||
mysql_addr = "127.0.0.1:4406"
|
||||
mysql_runtime_size = 2
|
||||
|
||||
[meta_client]
|
||||
timeout = "3s"
|
||||
connect_timeout = "5s"
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
provider = "raft_engine"
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let env_prefix = "DATANODE_UT";
|
||||
temp_env::with_vars(
|
||||
// The following environment variables will be used to override the values in the config file.
|
||||
[
|
||||
(
|
||||
// storage.type = S3
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"type".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("S3"),
|
||||
),
|
||||
(
|
||||
// storage.bucket = mybucket
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"bucket".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("mybucket"),
|
||||
),
|
||||
(
|
||||
// wal.dir = /other/wal/dir
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"wal".to_uppercase(),
|
||||
"dir".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("/other/wal/dir"),
|
||||
),
|
||||
(
|
||||
// meta_client.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"meta_client".to_uppercase(),
|
||||
"metasrv_addrs".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003"),
|
||||
),
|
||||
],
|
||||
|| {
|
||||
let opts: DatanodeOptions = Options::load_layered_options(
|
||||
Some(file.path().to_str().unwrap()),
|
||||
env_prefix,
|
||||
DatanodeOptions::env_list_keys(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Check the configs from environment variables.
|
||||
match &opts.storage.store {
|
||||
ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(s3_config.bucket, "mybucket".to_string());
|
||||
}
|
||||
_ => panic!("unexpected store type"),
|
||||
}
|
||||
assert_eq!(
|
||||
opts.meta_client.unwrap().metasrv_addrs,
|
||||
vec![
|
||||
"127.0.0.1:3001".to_string(),
|
||||
"127.0.0.1:3002".to_string(),
|
||||
"127.0.0.1:3003".to_string()
|
||||
]
|
||||
);
|
||||
|
||||
// Should be the values from config file, not environment variables.
|
||||
let DatanodeWalConfig::RaftEngine(raft_engine_config) = opts.wal else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(raft_engine_config.dir.unwrap(), "/tmp/greptimedb/wal");
|
||||
|
||||
// Should be default values.
|
||||
assert_eq!(opts.node_id, None);
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,11 +16,16 @@ use std::sync::Arc;
|
||||
use std::{fs, path};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use cache::{
|
||||
build_fundamental_cache_registry, with_default_composite_cache_registry, TABLE_CACHE_NAME,
|
||||
TABLE_ROUTE_CACHE_NAME,
|
||||
};
|
||||
use catalog::kvbackend::KvBackendCatalogManager;
|
||||
use clap::Parser;
|
||||
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
||||
use common_config::{metadata_store_dir, KvBackendConfig};
|
||||
use common_meta::cache_invalidator::{CacheInvalidatorRef, MultiCacheInvalidator};
|
||||
use common_config::{metadata_store_dir, Configurable, KvBackendConfig};
|
||||
use common_meta::cache::LayeredCacheRegistryBuilder;
|
||||
use common_meta::cache_invalidator::CacheInvalidatorRef;
|
||||
use common_meta::ddl::flow_meta::{FlowMetadataAllocator, FlowMetadataAllocatorRef};
|
||||
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
|
||||
use common_meta::ddl::{DdlContext, ProcedureExecutorRef};
|
||||
@@ -34,12 +39,13 @@ use common_meta::sequence::SequenceBuilder;
|
||||
use common_meta::wal_options_allocator::{WalOptionsAllocator, WalOptionsAllocatorRef};
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||
use common_time::timezone::set_default_timezone;
|
||||
use common_wal::config::StandaloneWalConfig;
|
||||
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
|
||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||
use flow::FlownodeBuilder;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
||||
@@ -54,14 +60,15 @@ use servers::export_metrics::ExportMetricsOption;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{
|
||||
CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu, InitMetadataSnafu, InitTimezoneSnafu,
|
||||
Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
|
||||
BuildCacheRegistrySnafu, CacheRequiredSnafu, CreateDirSnafu, IllegalConfigSnafu,
|
||||
InitDdlManagerSnafu, InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, Result,
|
||||
ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
|
||||
StartProcedureManagerSnafu, StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
|
||||
};
|
||||
use crate::options::{GlobalOptions, MixOptions, Options};
|
||||
use crate::options::{GlobalOptions, Options};
|
||||
use crate::App;
|
||||
|
||||
#[derive(Parser)]
|
||||
@@ -71,7 +78,7 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn build(self, opts: MixOptions) -> Result<Instance> {
|
||||
pub async fn build(self, opts: StandaloneOptions) -> Result<Instance> {
|
||||
self.subcmd.build(opts).await
|
||||
}
|
||||
|
||||
@@ -86,7 +93,7 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn build(self, opts: MixOptions) -> Result<Instance> {
|
||||
async fn build(self, opts: StandaloneOptions) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.build(opts).await,
|
||||
}
|
||||
@@ -121,12 +128,7 @@ pub struct StandaloneOptions {
|
||||
/// Options for different store engines.
|
||||
pub region_engine: Vec<RegionEngineConfig>,
|
||||
pub export_metrics: ExportMetricsOption,
|
||||
}
|
||||
|
||||
impl StandaloneOptions {
|
||||
pub fn env_list_keys() -> Option<&'static [&'static str]> {
|
||||
Some(&["wal.broker_endpoints"])
|
||||
}
|
||||
pub tracing: TracingOptions,
|
||||
}
|
||||
|
||||
impl Default for StandaloneOptions {
|
||||
@@ -153,39 +155,48 @@ impl Default for StandaloneOptions {
|
||||
RegionEngineConfig::Mito(MitoConfig::default()),
|
||||
RegionEngineConfig::File(FileEngineConfig::default()),
|
||||
],
|
||||
tracing: TracingOptions::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Configurable<'_> for StandaloneOptions {
|
||||
fn env_list_keys() -> Option<&'static [&'static str]> {
|
||||
Some(&["wal.broker_endpoints"])
|
||||
}
|
||||
}
|
||||
|
||||
impl StandaloneOptions {
|
||||
fn frontend_options(self) -> FrontendOptions {
|
||||
pub fn frontend_options(&self) -> FrontendOptions {
|
||||
let cloned_opts = self.clone();
|
||||
FrontendOptions {
|
||||
mode: self.mode,
|
||||
default_timezone: self.default_timezone,
|
||||
http: self.http,
|
||||
grpc: self.grpc,
|
||||
mysql: self.mysql,
|
||||
postgres: self.postgres,
|
||||
opentsdb: self.opentsdb,
|
||||
influxdb: self.influxdb,
|
||||
prom_store: self.prom_store,
|
||||
mode: cloned_opts.mode,
|
||||
default_timezone: cloned_opts.default_timezone,
|
||||
http: cloned_opts.http,
|
||||
grpc: cloned_opts.grpc,
|
||||
mysql: cloned_opts.mysql,
|
||||
postgres: cloned_opts.postgres,
|
||||
opentsdb: cloned_opts.opentsdb,
|
||||
influxdb: cloned_opts.influxdb,
|
||||
prom_store: cloned_opts.prom_store,
|
||||
meta_client: None,
|
||||
logging: self.logging,
|
||||
user_provider: self.user_provider,
|
||||
logging: cloned_opts.logging,
|
||||
user_provider: cloned_opts.user_provider,
|
||||
// Handle the export metrics task run by standalone to frontend for execution
|
||||
export_metrics: self.export_metrics,
|
||||
export_metrics: cloned_opts.export_metrics,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn datanode_options(self) -> DatanodeOptions {
|
||||
pub fn datanode_options(&self) -> DatanodeOptions {
|
||||
let cloned_opts = self.clone();
|
||||
DatanodeOptions {
|
||||
node_id: Some(0),
|
||||
enable_telemetry: self.enable_telemetry,
|
||||
wal: self.wal.into(),
|
||||
storage: self.storage,
|
||||
region_engine: self.region_engine,
|
||||
rpc_addr: self.grpc.addr,
|
||||
enable_telemetry: cloned_opts.enable_telemetry,
|
||||
wal: cloned_opts.wal.into(),
|
||||
storage: cloned_opts.storage,
|
||||
region_engine: cloned_opts.region_engine,
|
||||
rpc_addr: cloned_opts.grpc.addr,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -277,20 +288,25 @@ pub struct StartCommand {
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
||||
let opts: StandaloneOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
StandaloneOptions::env_list_keys(),
|
||||
)?;
|
||||
|
||||
self.convert_options(global_options, opts)
|
||||
Ok(Options::Standalone(Box::new(
|
||||
self.merge_with_cli_options(
|
||||
global_options,
|
||||
StandaloneOptions::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
)
|
||||
.context(LoadLayeredConfigSnafu)?,
|
||||
)?,
|
||||
)))
|
||||
}
|
||||
|
||||
pub fn convert_options(
|
||||
// The precedence order is: cli > config file > environment variables > default values.
|
||||
fn merge_with_cli_options(
|
||||
&self,
|
||||
global_options: &GlobalOptions,
|
||||
mut opts: StandaloneOptions,
|
||||
) -> Result<Options> {
|
||||
) -> Result<StandaloneOptions> {
|
||||
// Should always be standalone mode.
|
||||
opts.mode = Mode::Standalone;
|
||||
|
||||
if let Some(dir) = &global_options.log_dir {
|
||||
@@ -301,6 +317,11 @@ impl StartCommand {
|
||||
opts.logging.level.clone_from(&global_options.log_level);
|
||||
}
|
||||
|
||||
opts.tracing = TracingOptions {
|
||||
#[cfg(feature = "tokio-console")]
|
||||
tokio_console_addr: global_options.tokio_console_addr.clone(),
|
||||
};
|
||||
|
||||
let tls_opts = TlsOption::new(
|
||||
self.tls_mode.clone(),
|
||||
self.tls_cert_path.clone(),
|
||||
@@ -323,8 +344,7 @@ impl StartCommand {
|
||||
msg: format!(
|
||||
"gRPC listen address conflicts with datanode reserved gRPC addr: {datanode_grpc_addr}",
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}.fail();
|
||||
}
|
||||
opts.grpc.addr.clone_from(addr)
|
||||
}
|
||||
@@ -347,47 +367,32 @@ impl StartCommand {
|
||||
|
||||
opts.user_provider.clone_from(&self.user_provider);
|
||||
|
||||
let metadata_store = opts.metadata_store.clone();
|
||||
let procedure = opts.procedure.clone();
|
||||
let frontend = opts.clone().frontend_options();
|
||||
let logging = opts.logging.clone();
|
||||
let wal_meta = opts.wal.clone().into();
|
||||
let datanode = opts.datanode_options().clone();
|
||||
|
||||
Ok(Options::Standalone(Box::new(MixOptions {
|
||||
procedure,
|
||||
metadata_store,
|
||||
data_home: datanode.storage.data_home.to_string(),
|
||||
frontend,
|
||||
datanode,
|
||||
logging,
|
||||
wal_meta,
|
||||
})))
|
||||
Ok(opts)
|
||||
}
|
||||
|
||||
#[allow(unreachable_code)]
|
||||
#[allow(unused_variables)]
|
||||
#[allow(clippy::diverging_sub_expression)]
|
||||
async fn build(self, opts: MixOptions) -> Result<Instance> {
|
||||
async fn build(self, opts: StandaloneOptions) -> Result<Instance> {
|
||||
info!("Standalone start command: {:#?}", self);
|
||||
info!("Building standalone instance with {opts:#?}");
|
||||
|
||||
let mut fe_opts = opts.frontend;
|
||||
let mut fe_opts = opts.frontend_options();
|
||||
#[allow(clippy::unnecessary_mut_passed)]
|
||||
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts) // mut ref is MUST, DO NOT change it
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let dn_opts = opts.datanode;
|
||||
let dn_opts = opts.datanode_options();
|
||||
|
||||
set_default_timezone(fe_opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?;
|
||||
|
||||
let data_home = &dn_opts.storage.data_home;
|
||||
// Ensure the data_home directory exists.
|
||||
fs::create_dir_all(path::Path::new(&opts.data_home)).context(CreateDirSnafu {
|
||||
dir: &opts.data_home,
|
||||
})?;
|
||||
fs::create_dir_all(path::Path::new(data_home))
|
||||
.context(CreateDirSnafu { dir: data_home })?;
|
||||
|
||||
let metadata_dir = metadata_store_dir(&opts.data_home);
|
||||
let metadata_dir = metadata_store_dir(data_home);
|
||||
let (kv_backend, procedure_manager) = FeInstance::try_build_standalone_components(
|
||||
metadata_dir,
|
||||
opts.metadata_store.clone(),
|
||||
@@ -396,20 +401,52 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::default());
|
||||
// Builds cache registry
|
||||
let layered_cache_builder = LayeredCacheRegistryBuilder::default();
|
||||
let fundamental_cache_registry = build_fundamental_cache_registry(kv_backend.clone());
|
||||
let layered_cache_registry = Arc::new(
|
||||
with_default_composite_cache_registry(
|
||||
layered_cache_builder.add_cache_registry(fundamental_cache_registry),
|
||||
)
|
||||
.context(BuildCacheRegistrySnafu)?
|
||||
.build(),
|
||||
);
|
||||
|
||||
let table_cache = layered_cache_registry.get().context(CacheRequiredSnafu {
|
||||
name: TABLE_CACHE_NAME,
|
||||
})?;
|
||||
let table_route_cache = layered_cache_registry.get().context(CacheRequiredSnafu {
|
||||
name: TABLE_ROUTE_CACHE_NAME,
|
||||
})?;
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
dn_opts.mode,
|
||||
None,
|
||||
kv_backend.clone(),
|
||||
multi_cache_invalidator.clone(),
|
||||
table_cache,
|
||||
table_route_cache,
|
||||
)
|
||||
.await;
|
||||
|
||||
let table_metadata_manager =
|
||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||
|
||||
let flow_builder = FlownodeBuilder::new(
|
||||
1,
|
||||
Default::default(),
|
||||
fe_plugins.clone(),
|
||||
table_metadata_manager.clone(),
|
||||
catalog_manager.clone(),
|
||||
);
|
||||
let flownode = Arc::new(flow_builder.build().await);
|
||||
|
||||
let builder =
|
||||
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
|
||||
let datanode = builder.build().await.context(StartDatanodeSnafu)?;
|
||||
|
||||
let node_manager = Arc::new(StandaloneDatanodeManager(datanode.region_server()));
|
||||
let node_manager = Arc::new(StandaloneDatanodeManager {
|
||||
region_server: datanode.region_server(),
|
||||
flow_server: flownode.clone(),
|
||||
});
|
||||
|
||||
let table_id_sequence = Arc::new(
|
||||
SequenceBuilder::new(TABLE_ID_SEQ, kv_backend.clone())
|
||||
@@ -424,11 +461,9 @@ impl StartCommand {
|
||||
.build(),
|
||||
);
|
||||
let wal_options_allocator = Arc::new(WalOptionsAllocator::new(
|
||||
opts.wal_meta.clone(),
|
||||
opts.wal.into(),
|
||||
kv_backend.clone(),
|
||||
));
|
||||
let table_metadata_manager =
|
||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
|
||||
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
|
||||
table_id_sequence,
|
||||
@@ -441,7 +476,7 @@ impl StartCommand {
|
||||
let ddl_task_executor = Self::create_ddl_task_executor(
|
||||
procedure_manager.clone(),
|
||||
node_manager.clone(),
|
||||
multi_cache_invalidator,
|
||||
layered_cache_registry.clone(),
|
||||
table_metadata_manager,
|
||||
table_meta_allocator,
|
||||
flow_metadata_manager,
|
||||
@@ -449,12 +484,24 @@ impl StartCommand {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut frontend =
|
||||
FrontendBuilder::new(kv_backend, catalog_manager, node_manager, ddl_task_executor)
|
||||
.with_plugin(fe_plugins.clone())
|
||||
.try_build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
let mut frontend = FrontendBuilder::new(
|
||||
kv_backend,
|
||||
layered_cache_registry,
|
||||
catalog_manager,
|
||||
node_manager,
|
||||
ddl_task_executor,
|
||||
)
|
||||
.with_plugin(fe_plugins.clone())
|
||||
.try_build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
// flow server need to be able to use frontend to write insert requests back
|
||||
flownode
|
||||
.set_frontend_invoker(Box::new(frontend.clone()))
|
||||
.await;
|
||||
// TODO(discord9): unify with adding `start` and `shutdown` method to flownode too.
|
||||
let _handle = flownode.clone().run_background();
|
||||
|
||||
let servers = Services::new(fe_opts.clone(), Arc::new(frontend.clone()), fe_plugins)
|
||||
.build()
|
||||
@@ -523,13 +570,14 @@ mod tests {
|
||||
|
||||
use auth::{Identity, Password, UserProviderRef};
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_config::ENV_VAR_SEP;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{FileConfig, GcsConfig};
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
use crate::options::{GlobalOptions, ENV_VAR_SEP};
|
||||
use crate::options::GlobalOptions;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_try_from_start_command_to_anymap() {
|
||||
@@ -621,8 +669,8 @@ mod tests {
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
let fe_opts = options.frontend;
|
||||
let dn_opts = options.datanode;
|
||||
let fe_opts = options.frontend_options();
|
||||
let dn_opts = options.datanode_options();
|
||||
let logging_opts = options.logging;
|
||||
assert_eq!(Mode::Standalone, fe_opts.mode);
|
||||
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
|
||||
@@ -759,19 +807,20 @@ mod tests {
|
||||
assert_eq!(opts.logging.level.as_ref().unwrap(), "debug");
|
||||
|
||||
// Should be read from cli, cli > config file > env > default values.
|
||||
assert_eq!(opts.frontend.http.addr, "127.0.0.1:14000");
|
||||
assert_eq!(ReadableSize::mb(64), opts.frontend.http.body_limit);
|
||||
let fe_opts = opts.frontend_options();
|
||||
assert_eq!(fe_opts.http.addr, "127.0.0.1:14000");
|
||||
assert_eq!(ReadableSize::mb(64), fe_opts.http.body_limit);
|
||||
|
||||
// Should be default value.
|
||||
assert_eq!(opts.frontend.grpc.addr, GrpcOptions::default().addr);
|
||||
assert_eq!(fe_opts.grpc.addr, GrpcOptions::default().addr);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_default_standalone_options() {
|
||||
let options: StandaloneOptions =
|
||||
Options::load_layered_options(None, "GREPTIMEDB_FRONTEND", None).unwrap();
|
||||
let options =
|
||||
StandaloneOptions::load_layered_options(None, "GREPTIMEDB_STANDALONE").unwrap();
|
||||
let default_options = StandaloneOptions::default();
|
||||
assert_eq!(options.mode, default_options.mode);
|
||||
assert_eq!(options.enable_telemetry, default_options.enable_telemetry);
|
||||
|
||||
@@ -33,16 +33,21 @@ pub enum Error {
|
||||
Overflow {
|
||||
src_len: usize,
|
||||
dst_len: usize,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Buffer underflow"))]
|
||||
Underflow { location: Location },
|
||||
Underflow {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("IO operation reach EOF"))]
|
||||
Eof {
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ pub enum Error {
|
||||
#[snafu(display("Invalid full table name: {}", table_name))]
|
||||
InvalidFullTableName {
|
||||
table_name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -9,6 +9,22 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
config.workspace = true
|
||||
num_cpus.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
sysinfo.workspace = true
|
||||
toml.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-telemetry.workspace = true
|
||||
common-test-util.workspace = true
|
||||
common-wal.workspace = true
|
||||
datanode.workspace = true
|
||||
meta-client.workspace = true
|
||||
serde.workspace = true
|
||||
temp-env = "0.3"
|
||||
tempfile.workspace = true
|
||||
|
||||
248
src/common/config/src/config.rs
Normal file
248
src/common/config/src/config.rs
Normal file
@@ -0,0 +1,248 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use config::{Environment, File, FileFormat};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{LoadLayeredConfigSnafu, Result, SerdeJsonSnafu, TomlFormatSnafu};
|
||||
|
||||
/// Separator for environment variables. For example, `DATANODE__STORAGE__MANIFEST__CHECKPOINT_MARGIN`.
|
||||
pub const ENV_VAR_SEP: &str = "__";
|
||||
|
||||
/// Separator for list values in environment variables. For example, `localhost:3001,localhost:3002,localhost:3003`.
|
||||
pub const ENV_LIST_SEP: &str = ",";
|
||||
|
||||
/// Configuration trait defines the common interface for configuration that can be loaded from multiple sources and serialized to TOML.
|
||||
pub trait Configurable<'de>: Serialize + Deserialize<'de> + Default + Sized {
|
||||
/// Load the configuration from multiple sources and merge them.
|
||||
/// The precedence order is: config file > environment variables > default values.
|
||||
/// `env_prefix` is the prefix of environment variables, e.g. "FRONTEND__xxx".
|
||||
/// The function will use dunder(double underscore) `__` as the separator for environment variables, for example:
|
||||
/// `DATANODE__STORAGE__MANIFEST__CHECKPOINT_MARGIN` will be mapped to `DatanodeOptions.storage.manifest.checkpoint_margin` field in the configuration.
|
||||
/// `list_keys` is the list of keys that should be parsed as a list, for example, you can pass `Some(&["meta_client_options.metasrv_addrs"]` to parse `GREPTIMEDB_METASRV__META_CLIENT_OPTIONS__METASRV_ADDRS` as a list.
|
||||
/// The function will use comma `,` as the separator for list values, for example: `127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003`.
|
||||
fn load_layered_options(config_file: Option<&str>, env_prefix: &str) -> Result<Self> {
|
||||
let default_opts = Self::default();
|
||||
|
||||
let env_source = {
|
||||
let mut env = Environment::default();
|
||||
|
||||
if !env_prefix.is_empty() {
|
||||
env = env.prefix(env_prefix);
|
||||
}
|
||||
|
||||
if let Some(list_keys) = Self::env_list_keys() {
|
||||
env = env.list_separator(ENV_LIST_SEP);
|
||||
for key in list_keys {
|
||||
env = env.with_list_parse_key(key);
|
||||
}
|
||||
}
|
||||
|
||||
env.try_parsing(true)
|
||||
.separator(ENV_VAR_SEP)
|
||||
.ignore_empty(true)
|
||||
};
|
||||
|
||||
// Workaround: Replacement for `Config::try_from(&default_opts)` due to
|
||||
// `ConfigSerializer` cannot handle the case of an empty struct contained
|
||||
// within an iterative structure.
|
||||
// See: https://github.com/mehcode/config-rs/issues/461
|
||||
let json_str = serde_json::to_string(&default_opts).context(SerdeJsonSnafu)?;
|
||||
let default_config = File::from_str(&json_str, FileFormat::Json);
|
||||
|
||||
// Add default values and environment variables as the sources of the configuration.
|
||||
let mut layered_config = config::Config::builder()
|
||||
.add_source(default_config)
|
||||
.add_source(env_source);
|
||||
|
||||
// Add config file as the source of the configuration if it is specified.
|
||||
if let Some(config_file) = config_file {
|
||||
layered_config = layered_config.add_source(File::new(config_file, FileFormat::Toml));
|
||||
}
|
||||
|
||||
let opts = layered_config
|
||||
.build()
|
||||
.and_then(|x| x.try_deserialize())
|
||||
.context(LoadLayeredConfigSnafu)?;
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
|
||||
/// List of toml keys that should be parsed as a list.
|
||||
fn env_list_keys() -> Option<&'static [&'static str]> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Serialize the configuration to a TOML string.
|
||||
fn to_toml(&self) -> Result<String> {
|
||||
toml::to_string(&self).context(TomlFormatSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{ObjectStoreConfig, StorageConfig};
|
||||
use meta_client::MetaClientOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::*;
|
||||
use crate::Mode;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct TestDatanodeConfig {
|
||||
mode: Mode,
|
||||
node_id: Option<u64>,
|
||||
logging: LoggingOptions,
|
||||
meta_client: Option<MetaClientOptions>,
|
||||
wal: DatanodeWalConfig,
|
||||
storage: StorageConfig,
|
||||
}
|
||||
|
||||
impl Default for TestDatanodeConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
mode: Mode::Distributed,
|
||||
node_id: None,
|
||||
logging: LoggingOptions::default(),
|
||||
meta_client: None,
|
||||
wal: DatanodeWalConfig::default(),
|
||||
storage: StorageConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Configurable<'_> for TestDatanodeConfig {
|
||||
fn env_list_keys() -> Option<&'static [&'static str]> {
|
||||
Some(&["meta_client.metasrv_addrs"])
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_layered_options() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
mode = "distributed"
|
||||
enable_memory_catalog = false
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
rpc_hostname = "127.0.0.1"
|
||||
rpc_runtime_size = 8
|
||||
mysql_addr = "127.0.0.1:4406"
|
||||
mysql_runtime_size = 2
|
||||
|
||||
[meta_client]
|
||||
timeout = "3s"
|
||||
connect_timeout = "5s"
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
provider = "raft_engine"
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let env_prefix = "DATANODE_UT";
|
||||
temp_env::with_vars(
|
||||
// The following environment variables will be used to override the values in the config file.
|
||||
[
|
||||
(
|
||||
// storage.type = S3
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"type".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("S3"),
|
||||
),
|
||||
(
|
||||
// storage.bucket = mybucket
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"bucket".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("mybucket"),
|
||||
),
|
||||
(
|
||||
// wal.dir = /other/wal/dir
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"wal".to_uppercase(),
|
||||
"dir".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("/other/wal/dir"),
|
||||
),
|
||||
(
|
||||
// meta_client.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"meta_client".to_uppercase(),
|
||||
"metasrv_addrs".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003"),
|
||||
),
|
||||
],
|
||||
|| {
|
||||
let opts = TestDatanodeConfig::load_layered_options(
|
||||
Some(file.path().to_str().unwrap()),
|
||||
env_prefix,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Check the configs from environment variables.
|
||||
match &opts.storage.store {
|
||||
ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(s3_config.bucket, "mybucket".to_string());
|
||||
}
|
||||
_ => panic!("unexpected store type"),
|
||||
}
|
||||
assert_eq!(
|
||||
opts.meta_client.unwrap().metasrv_addrs,
|
||||
vec![
|
||||
"127.0.0.1:3001".to_string(),
|
||||
"127.0.0.1:3002".to_string(),
|
||||
"127.0.0.1:3003".to_string()
|
||||
]
|
||||
);
|
||||
|
||||
// Should be the values from config file, not environment variables.
|
||||
let DatanodeWalConfig::RaftEngine(raft_engine_config) = opts.wal else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(raft_engine_config.dir.unwrap(), "/tmp/greptimedb/wal");
|
||||
|
||||
// Should be default values.
|
||||
assert_eq!(opts.node_id, None);
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
67
src/common/config/src/error.rs
Normal file
67
src/common/config/src/error.rs
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use config::ConfigError;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to load layered config"))]
|
||||
LoadLayeredConfig {
|
||||
#[snafu(source)]
|
||||
error: ConfigError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serde json"))]
|
||||
SerdeJson {
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serialize options to TOML"))]
|
||||
TomlFormat {
|
||||
#[snafu(source)]
|
||||
error: toml::ser::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::TomlFormat { .. } | Error::LoadLayeredConfig { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::SerdeJson { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
@@ -12,9 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod config;
|
||||
pub mod error;
|
||||
pub mod utils;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
pub use config::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub fn metadata_store_dir(store_dir: &str) -> String {
|
||||
|
||||
@@ -30,7 +30,7 @@ derive_builder.workspace = true
|
||||
futures.workspace = true
|
||||
lazy_static.workspace = true
|
||||
object-store.workspace = true
|
||||
orc-rust = { git = "https://github.com/MichaelScofield/orc-rs.git", rev = "17347f5f084ac937863317df882218055c4ea8c1" }
|
||||
orc-rust = { git = "https://github.com/datafusion-contrib/datafusion-orc.git", rev = "502217315726314c4008808fe169764529640599" }
|
||||
parquet.workspace = true
|
||||
paste = "1.0"
|
||||
regex = "1.7"
|
||||
|
||||
@@ -29,27 +29,38 @@ pub enum Error {
|
||||
#[snafu(display("Unsupported compression type: {}", compression_type))]
|
||||
UnsupportedCompressionType {
|
||||
compression_type: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported backend protocol: {}, url: {}", protocol, url))]
|
||||
UnsupportedBackendProtocol {
|
||||
protocol: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
url: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported format protocol: {}", format))]
|
||||
UnsupportedFormat { format: String, location: Location },
|
||||
UnsupportedFormat {
|
||||
format: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("empty host: {}", url))]
|
||||
EmptyHostPath { url: String, location: Location },
|
||||
EmptyHostPath {
|
||||
url: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid url: {}", url))]
|
||||
InvalidUrl {
|
||||
url: String,
|
||||
#[snafu(source)]
|
||||
error: ParseError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -57,19 +68,22 @@ pub enum Error {
|
||||
BuildBackend {
|
||||
#[snafu(source)]
|
||||
error: object_store::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build orc reader"))]
|
||||
OrcReader {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: orc_rust::error::Error,
|
||||
error: orc_rust::error::OrcError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read object from path: {}", path))]
|
||||
ReadObject {
|
||||
path: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: object_store::Error,
|
||||
@@ -78,6 +92,7 @@ pub enum Error {
|
||||
#[snafu(display("Failed to write object to path: {}", path))]
|
||||
WriteObject {
|
||||
path: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: object_store::Error,
|
||||
@@ -87,11 +102,13 @@ pub enum Error {
|
||||
AsyncWrite {
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write record batch"))]
|
||||
WriteRecordBatch {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: ArrowError,
|
||||
@@ -99,6 +116,7 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to encode record batch"))]
|
||||
EncodeRecordBatch {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: ParquetError,
|
||||
@@ -106,6 +124,7 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to read record batch"))]
|
||||
ReadRecordBatch {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: datafusion::error::DataFusionError,
|
||||
@@ -113,6 +132,7 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to read parquet"))]
|
||||
ReadParquetSnafu {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: datafusion::parquet::errors::ParquetError,
|
||||
@@ -120,6 +140,7 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to convert parquet to schema"))]
|
||||
ParquetToSchema {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: datafusion::parquet::errors::ParquetError,
|
||||
@@ -127,6 +148,7 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to infer schema from file"))]
|
||||
InferSchema {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: arrow_schema::ArrowError,
|
||||
@@ -135,16 +157,22 @@ pub enum Error {
|
||||
#[snafu(display("Failed to list object in path: {}", path))]
|
||||
ListObjects {
|
||||
path: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid connection: {}", msg))]
|
||||
InvalidConnection { msg: String, location: Location },
|
||||
InvalidConnection {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to join handle"))]
|
||||
JoinHandle {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: tokio::task::JoinError,
|
||||
@@ -154,6 +182,7 @@ pub enum Error {
|
||||
ParseFormat {
|
||||
key: &'static str,
|
||||
value: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -161,15 +190,20 @@ pub enum Error {
|
||||
MergeSchema {
|
||||
#[snafu(source)]
|
||||
error: arrow_schema::ArrowError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Buffered writer closed"))]
|
||||
BufferedWriterClosed { location: Location },
|
||||
BufferedWriterClosed {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write parquet file, path: {}", path))]
|
||||
WriteParquet {
|
||||
path: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: parquet::errors::ParquetError,
|
||||
|
||||
@@ -21,9 +21,8 @@ use datafusion::datasource::physical_plan::{FileMeta, FileOpenFuture, FileOpener
|
||||
use datafusion::error::{DataFusionError, Result as DfResult};
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
use object_store::ObjectStore;
|
||||
use orc_rust::arrow_reader::{create_arrow_schema, Cursor};
|
||||
use orc_rust::arrow_reader::ArrowReaderBuilder;
|
||||
use orc_rust::async_arrow_reader::ArrowStreamReader;
|
||||
use orc_rust::reader::Reader;
|
||||
use snafu::ResultExt;
|
||||
use tokio::io::{AsyncRead, AsyncSeek};
|
||||
|
||||
@@ -33,28 +32,20 @@ use crate::file_format::FileFormat;
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub struct OrcFormat;
|
||||
|
||||
pub async fn new_orc_cursor<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
|
||||
reader: R,
|
||||
) -> Result<Cursor<R>> {
|
||||
let reader = Reader::new_async(reader)
|
||||
.await
|
||||
.context(error::OrcReaderSnafu)?;
|
||||
let cursor = Cursor::root(reader).context(error::OrcReaderSnafu)?;
|
||||
Ok(cursor)
|
||||
}
|
||||
|
||||
pub async fn new_orc_stream_reader<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
|
||||
reader: R,
|
||||
) -> Result<ArrowStreamReader<R>> {
|
||||
let cursor = new_orc_cursor(reader).await?;
|
||||
Ok(ArrowStreamReader::new(cursor, None))
|
||||
let reader_build = ArrowReaderBuilder::try_new_async(reader)
|
||||
.await
|
||||
.context(error::OrcReaderSnafu)?;
|
||||
Ok(reader_build.build_async())
|
||||
}
|
||||
|
||||
pub async fn infer_orc_schema<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
|
||||
reader: R,
|
||||
) -> Result<Schema> {
|
||||
let cursor = new_orc_cursor(reader).await?;
|
||||
Ok(create_arrow_schema(&cursor))
|
||||
let reader = new_orc_stream_reader(reader).await?;
|
||||
Ok(reader.schema().as_ref().clone())
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
|
||||
@@ -25,6 +25,7 @@ pub enum Error {
|
||||
#[snafu(display("Decimal out of range, decimal value: {}", value))]
|
||||
BigDecimalOutOfRange {
|
||||
value: BigDecimal,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -43,7 +44,11 @@ pub enum Error {
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid precision or scale, resion: {}", reason))]
|
||||
InvalidPrecisionOrScale { reason: String, location: Location },
|
||||
InvalidPrecisionOrScale {
|
||||
reason: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
|
||||
@@ -23,6 +23,7 @@ use snafu::{Location, Snafu};
|
||||
pub enum Error {
|
||||
#[snafu(display("External error"))]
|
||||
External {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
@@ -24,10 +24,15 @@ use snafu::{Location, Snafu};
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Illegal delete request, reason: {reason}"))]
|
||||
IllegalDeleteRequest { reason: String, location: Location },
|
||||
IllegalDeleteRequest {
|
||||
reason: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Column datatype error"))]
|
||||
ColumnDataType {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: api::error::Error,
|
||||
},
|
||||
@@ -40,39 +45,63 @@ pub enum Error {
|
||||
DuplicatedTimestampColumn {
|
||||
exists: String,
|
||||
duplicated: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Duplicated column name in gRPC requests, name: {}", name,))]
|
||||
DuplicatedColumnName { name: String, location: Location },
|
||||
DuplicatedColumnName {
|
||||
name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing timestamp column, msg: {}", msg))]
|
||||
MissingTimestampColumn { msg: String, location: Location },
|
||||
MissingTimestampColumn {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid column proto: {}", err_msg))]
|
||||
InvalidColumnProto { err_msg: String, location: Location },
|
||||
InvalidColumnProto {
|
||||
err_msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
#[snafu(display("Failed to create vector"))]
|
||||
CreateVector {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing required field in protobuf, field: {}", field))]
|
||||
MissingField { field: String, location: Location },
|
||||
MissingField {
|
||||
field: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid column proto definition, column: {}", column))]
|
||||
InvalidColumnDef {
|
||||
column: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unexpected values length, reason: {}", reason))]
|
||||
UnexpectedValuesLength { reason: String, location: Location },
|
||||
UnexpectedValuesLength {
|
||||
reason: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unknown location type: {}", location_type))]
|
||||
UnknownLocationType {
|
||||
location_type: i32,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -291,88 +291,68 @@ impl ChannelConfig {
|
||||
}
|
||||
|
||||
/// A timeout to each request.
|
||||
pub fn timeout(self, timeout: Duration) -> Self {
|
||||
Self {
|
||||
timeout: Some(timeout),
|
||||
..self
|
||||
}
|
||||
pub fn timeout(mut self, timeout: Duration) -> Self {
|
||||
self.timeout = Some(timeout);
|
||||
self
|
||||
}
|
||||
|
||||
/// A timeout to connecting to the uri.
|
||||
///
|
||||
/// Defaults to no timeout.
|
||||
pub fn connect_timeout(self, timeout: Duration) -> Self {
|
||||
Self {
|
||||
connect_timeout: Some(timeout),
|
||||
..self
|
||||
}
|
||||
pub fn connect_timeout(mut self, timeout: Duration) -> Self {
|
||||
self.connect_timeout = Some(timeout);
|
||||
self
|
||||
}
|
||||
|
||||
/// A concurrency limit to each request.
|
||||
pub fn concurrency_limit(self, limit: usize) -> Self {
|
||||
Self {
|
||||
concurrency_limit: Some(limit),
|
||||
..self
|
||||
}
|
||||
pub fn concurrency_limit(mut self, limit: usize) -> Self {
|
||||
self.concurrency_limit = Some(limit);
|
||||
self
|
||||
}
|
||||
|
||||
/// A rate limit to each request.
|
||||
pub fn rate_limit(self, limit: u64, duration: Duration) -> Self {
|
||||
Self {
|
||||
rate_limit: Some((limit, duration)),
|
||||
..self
|
||||
}
|
||||
pub fn rate_limit(mut self, limit: u64, duration: Duration) -> Self {
|
||||
self.rate_limit = Some((limit, duration));
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the SETTINGS_INITIAL_WINDOW_SIZE option for HTTP2 stream-level flow control.
|
||||
/// Default is 65,535
|
||||
pub fn initial_stream_window_size(self, size: u32) -> Self {
|
||||
Self {
|
||||
initial_stream_window_size: Some(size),
|
||||
..self
|
||||
}
|
||||
pub fn initial_stream_window_size(mut self, size: u32) -> Self {
|
||||
self.initial_stream_window_size = Some(size);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the max connection-level flow control for HTTP2
|
||||
///
|
||||
/// Default is 65,535
|
||||
pub fn initial_connection_window_size(self, size: u32) -> Self {
|
||||
Self {
|
||||
initial_connection_window_size: Some(size),
|
||||
..self
|
||||
}
|
||||
pub fn initial_connection_window_size(mut self, size: u32) -> Self {
|
||||
self.initial_connection_window_size = Some(size);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set http2 KEEP_ALIVE_INTERVAL. Uses hyper’s default otherwise.
|
||||
pub fn http2_keep_alive_interval(self, duration: Duration) -> Self {
|
||||
Self {
|
||||
http2_keep_alive_interval: Some(duration),
|
||||
..self
|
||||
}
|
||||
pub fn http2_keep_alive_interval(mut self, duration: Duration) -> Self {
|
||||
self.http2_keep_alive_interval = Some(duration);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set http2 KEEP_ALIVE_TIMEOUT. Uses hyper’s default otherwise.
|
||||
pub fn http2_keep_alive_timeout(self, duration: Duration) -> Self {
|
||||
Self {
|
||||
http2_keep_alive_timeout: Some(duration),
|
||||
..self
|
||||
}
|
||||
pub fn http2_keep_alive_timeout(mut self, duration: Duration) -> Self {
|
||||
self.http2_keep_alive_timeout = Some(duration);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set http2 KEEP_ALIVE_WHILE_IDLE. Uses hyper’s default otherwise.
|
||||
pub fn http2_keep_alive_while_idle(self, enabled: bool) -> Self {
|
||||
Self {
|
||||
http2_keep_alive_while_idle: Some(enabled),
|
||||
..self
|
||||
}
|
||||
pub fn http2_keep_alive_while_idle(mut self, enabled: bool) -> Self {
|
||||
self.http2_keep_alive_while_idle = Some(enabled);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets whether to use an adaptive flow control. Uses hyper’s default otherwise.
|
||||
pub fn http2_adaptive_window(self, enabled: bool) -> Self {
|
||||
Self {
|
||||
http2_adaptive_window: Some(enabled),
|
||||
..self
|
||||
}
|
||||
pub fn http2_adaptive_window(mut self, enabled: bool) -> Self {
|
||||
self.http2_adaptive_window = Some(enabled);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set whether TCP keepalive messages are enabled on accepted connections.
|
||||
@@ -381,31 +361,25 @@ impl ChannelConfig {
|
||||
/// will be the time to remain idle before sending TCP keepalive probes.
|
||||
///
|
||||
/// Default is no keepalive (None)
|
||||
pub fn tcp_keepalive(self, duration: Duration) -> Self {
|
||||
Self {
|
||||
tcp_keepalive: Some(duration),
|
||||
..self
|
||||
}
|
||||
pub fn tcp_keepalive(mut self, duration: Duration) -> Self {
|
||||
self.tcp_keepalive = Some(duration);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the value of TCP_NODELAY option for accepted connections.
|
||||
///
|
||||
/// Enabled by default.
|
||||
pub fn tcp_nodelay(self, enabled: bool) -> Self {
|
||||
Self {
|
||||
tcp_nodelay: enabled,
|
||||
..self
|
||||
}
|
||||
pub fn tcp_nodelay(mut self, enabled: bool) -> Self {
|
||||
self.tcp_nodelay = enabled;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the value of tls client auth.
|
||||
///
|
||||
/// Disabled by default.
|
||||
pub fn client_tls_config(self, client_tls_option: ClientTlsOption) -> Self {
|
||||
Self {
|
||||
client_tls: Some(client_tls_option),
|
||||
..self
|
||||
}
|
||||
pub fn client_tls_config(mut self, client_tls_option: ClientTlsOption) -> Self {
|
||||
self.client_tls = Some(client_tls_option);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -33,6 +33,7 @@ pub enum Error {
|
||||
InvalidConfigFilePath {
|
||||
#[snafu(source)]
|
||||
error: io::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -46,6 +47,7 @@ pub enum Error {
|
||||
column_name: String,
|
||||
expected: String,
|
||||
actual: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -53,30 +55,42 @@ pub enum Error {
|
||||
CreateChannel {
|
||||
#[snafu(source)]
|
||||
error: tonic::transport::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create RecordBatch"))]
|
||||
CreateRecordBatch {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert Arrow type: {}", from))]
|
||||
Conversion { from: String, location: Location },
|
||||
Conversion {
|
||||
from: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to decode FlightData"))]
|
||||
DecodeFlightData {
|
||||
#[snafu(source)]
|
||||
error: api::DecodeError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid FlightData, reason: {}", reason))]
|
||||
InvalidFlightData { reason: String, location: Location },
|
||||
InvalidFlightData {
|
||||
reason: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert Arrow Schema"))]
|
||||
ConvertArrowSchema {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
@@ -34,7 +34,11 @@ pub enum Error {
|
||||
ProfilingNotEnabled,
|
||||
|
||||
#[snafu(display("Failed to build temp file from given path: {:?}", path))]
|
||||
BuildTempPath { path: PathBuf, location: Location },
|
||||
BuildTempPath {
|
||||
path: PathBuf,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to open temp file: {}", path))]
|
||||
OpenTempFile {
|
||||
|
||||
@@ -11,6 +11,7 @@ testing = []
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
anymap2 = "0.13.0"
|
||||
api.workspace = true
|
||||
async-recursion = "1.0"
|
||||
async-trait.workspace = true
|
||||
|
||||
@@ -14,6 +14,17 @@
|
||||
|
||||
mod container;
|
||||
mod flow;
|
||||
mod registry;
|
||||
mod table;
|
||||
|
||||
pub use container::{CacheContainer, Initializer, Invalidator, TokenFilter};
|
||||
pub use flow::{new_table_flownode_set_cache, TableFlownodeSetCache};
|
||||
pub use flow::{new_table_flownode_set_cache, TableFlownodeSetCache, TableFlownodeSetCacheRef};
|
||||
pub use registry::{
|
||||
CacheRegistry, CacheRegistryBuilder, CacheRegistryRef, LayeredCacheRegistry,
|
||||
LayeredCacheRegistryBuilder, LayeredCacheRegistryRef,
|
||||
};
|
||||
pub use table::{
|
||||
new_table_info_cache, new_table_name_cache, new_table_route_cache, TableInfoCache,
|
||||
TableInfoCacheRef, TableNameCache, TableNameCacheRef, TableRoute, TableRouteCache,
|
||||
TableRouteCacheRef,
|
||||
};
|
||||
|
||||
53
src/common/meta/src/cache/container.rs
vendored
53
src/common/meta/src/cache/container.rs
vendored
@@ -16,7 +16,7 @@ use std::borrow::Borrow;
|
||||
use std::hash::Hash;
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::future::BoxFuture;
|
||||
use futures::future::{join_all, BoxFuture};
|
||||
use moka::future::Cache;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
@@ -68,6 +68,11 @@ where
|
||||
token_filter,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `name`.
|
||||
pub fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -76,13 +81,15 @@ where
|
||||
K: Send + Sync,
|
||||
V: Send + Sync,
|
||||
{
|
||||
async fn invalidate(&self, _ctx: &Context, caches: Vec<CacheIdent>) -> Result<()> {
|
||||
for token in caches
|
||||
.into_iter()
|
||||
async fn invalidate(&self, _ctx: &Context, caches: &[CacheIdent]) -> Result<()> {
|
||||
let tasks = caches
|
||||
.iter()
|
||||
.filter(|token| (self.token_filter)(token))
|
||||
{
|
||||
(self.invalidator)(&self.cache, &token).await?;
|
||||
}
|
||||
.map(|token| (self.invalidator)(&self.cache, token));
|
||||
join_all(tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -94,9 +101,18 @@ where
|
||||
{
|
||||
/// Returns a _clone_ of the value corresponding to the key.
|
||||
pub async fn get(&self, key: K) -> Result<Option<V>> {
|
||||
metrics::CACHE_CONTAINER_CACHE_GET
|
||||
.with_label_values(&[&self.name])
|
||||
.inc();
|
||||
let moved_init = self.initializer.clone();
|
||||
let moved_key = key;
|
||||
let init = async move {
|
||||
metrics::CACHE_CONTAINER_CACHE_MISS
|
||||
.with_label_values(&[&self.name])
|
||||
.inc();
|
||||
let _timer = metrics::CACHE_CONTAINER_LOAD_CACHE
|
||||
.with_label_values(&[&self.name])
|
||||
.start_timer();
|
||||
moved_init(&moved_key)
|
||||
.await
|
||||
.transpose()
|
||||
@@ -120,12 +136,26 @@ where
|
||||
{
|
||||
/// Invalidates cache by [CacheToken].
|
||||
pub async fn invalidate(&self, caches: &[CacheToken]) -> Result<()> {
|
||||
for token in caches.iter().filter(|token| (self.token_filter)(token)) {
|
||||
(self.invalidator)(&self.cache, token).await?;
|
||||
}
|
||||
let tasks = caches
|
||||
.iter()
|
||||
.filter(|token| (self.token_filter)(token))
|
||||
.map(|token| (self.invalidator)(&self.cache, token));
|
||||
join_all(tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns true if the cache contains a value for the key.
|
||||
pub fn contains_key<Q>(&self, key: &Q) -> bool
|
||||
where
|
||||
K: Borrow<Q>,
|
||||
Q: Hash + Eq + ?Sized,
|
||||
{
|
||||
self.cache.contains_key(key)
|
||||
}
|
||||
|
||||
/// Returns a _clone_ of the value corresponding to the key.
|
||||
pub async fn get_by_ref<Q>(&self, key: &Q) -> Result<Option<V>>
|
||||
where
|
||||
@@ -142,6 +172,9 @@ where
|
||||
metrics::CACHE_CONTAINER_CACHE_MISS
|
||||
.with_label_values(&[&self.name])
|
||||
.inc();
|
||||
let _timer = metrics::CACHE_CONTAINER_LOAD_CACHE
|
||||
.with_label_values(&[&self.name])
|
||||
.start_timer();
|
||||
|
||||
moved_init(&moved_key)
|
||||
.await
|
||||
|
||||
4
src/common/meta/src/cache/flow.rs
vendored
4
src/common/meta/src/cache/flow.rs
vendored
@@ -13,4 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
mod table_flownode;
|
||||
pub use table_flownode::{new_table_flownode_set_cache, TableFlownodeSetCache};
|
||||
pub use table_flownode::{
|
||||
new_table_flownode_set_cache, TableFlownodeSetCache, TableFlownodeSetCacheRef,
|
||||
};
|
||||
|
||||
@@ -30,6 +30,8 @@ use crate::FlownodeId;
|
||||
|
||||
type FlownodeSet = HashSet<FlownodeId>;
|
||||
|
||||
pub type TableFlownodeSetCacheRef = Arc<TableFlownodeSetCache>;
|
||||
|
||||
/// [TableFlownodeSetCache] caches the [TableId] to [FlownodeSet] mapping.
|
||||
pub type TableFlownodeSetCache = CacheContainer<TableId, FlownodeSet, CacheIdent>;
|
||||
|
||||
@@ -54,6 +56,10 @@ fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId,
|
||||
.map_ok(|key| key.flownode_id())
|
||||
.try_collect::<HashSet<_>>()
|
||||
.await
|
||||
// We must cache the `HashSet` even if it's empty,
|
||||
// to avoid future requests to the remote storage next time;
|
||||
// If the value is added to the remote storage,
|
||||
// we have a corresponding cache invalidation mechanism to invalidate `(Key, EmptyHashSet)`.
|
||||
.map(Some)
|
||||
})
|
||||
})
|
||||
|
||||
248
src/common/meta/src/cache/registry.rs
vendored
Normal file
248
src/common/meta/src/cache/registry.rs
vendored
Normal file
@@ -0,0 +1,248 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use anymap2::SendSyncAnyMap;
|
||||
use futures::future::join_all;
|
||||
|
||||
use crate::cache_invalidator::{CacheInvalidator, Context};
|
||||
use crate::error::Result;
|
||||
use crate::instruction::CacheIdent;
|
||||
|
||||
pub type CacheRegistryRef = Arc<CacheRegistry>;
|
||||
pub type LayeredCacheRegistryRef = Arc<LayeredCacheRegistry>;
|
||||
|
||||
/// [LayeredCacheRegistry] Builder.
|
||||
#[derive(Default)]
|
||||
pub struct LayeredCacheRegistryBuilder {
|
||||
registry: LayeredCacheRegistry,
|
||||
}
|
||||
|
||||
impl LayeredCacheRegistryBuilder {
|
||||
/// Adds [CacheRegistry] into the next layer.
|
||||
///
|
||||
/// During cache invalidation, [LayeredCacheRegistry] ensures sequential invalidation
|
||||
/// of each layer (after the previous layer).
|
||||
pub fn add_cache_registry(mut self, registry: CacheRegistry) -> Self {
|
||||
self.registry.layers.push(registry);
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns __cloned__ the value stored in the collection for the type `T`, if it exists.
|
||||
pub fn get<T: Send + Sync + Clone + 'static>(&self) -> Option<T> {
|
||||
self.registry.get()
|
||||
}
|
||||
|
||||
/// Builds the [LayeredCacheRegistry]
|
||||
pub fn build(self) -> LayeredCacheRegistry {
|
||||
self.registry
|
||||
}
|
||||
}
|
||||
|
||||
/// [LayeredCacheRegistry] invalidate caches sequentially from the first layer.
|
||||
#[derive(Default)]
|
||||
pub struct LayeredCacheRegistry {
|
||||
layers: Vec<CacheRegistry>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CacheInvalidator for LayeredCacheRegistry {
|
||||
async fn invalidate(&self, ctx: &Context, caches: &[CacheIdent]) -> Result<()> {
|
||||
let mut results = Vec::with_capacity(self.layers.len());
|
||||
for registry in &self.layers {
|
||||
results.push(registry.invalidate(ctx, caches).await);
|
||||
}
|
||||
results.into_iter().collect::<Result<Vec<_>>>().map(|_| ())
|
||||
}
|
||||
}
|
||||
|
||||
impl LayeredCacheRegistry {
|
||||
/// Returns __cloned__ the value stored in the collection for the type `T`, if it exists.
|
||||
pub fn get<T: Send + Sync + Clone + 'static>(&self) -> Option<T> {
|
||||
for registry in &self.layers {
|
||||
if let Some(cache) = registry.get::<T>() {
|
||||
return Some(cache);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// [CacheRegistryBuilder] provides ability of
|
||||
/// - Register the `cache` which implements the [CacheInvalidator] trait into [CacheRegistry].
|
||||
/// - Build a [CacheRegistry]
|
||||
#[derive(Default)]
|
||||
pub struct CacheRegistryBuilder {
|
||||
registry: CacheRegistry,
|
||||
}
|
||||
|
||||
impl CacheRegistryBuilder {
|
||||
/// Adds the cache.
|
||||
pub fn add_cache<T: CacheInvalidator + 'static>(mut self, cache: Arc<T>) -> Self {
|
||||
self.registry.register(cache);
|
||||
self
|
||||
}
|
||||
|
||||
/// Builds [CacheRegistry].
|
||||
pub fn build(self) -> CacheRegistry {
|
||||
self.registry
|
||||
}
|
||||
}
|
||||
|
||||
/// [CacheRegistry] provides ability of
|
||||
/// - Get a specific `cache`.
|
||||
#[derive(Default)]
|
||||
pub struct CacheRegistry {
|
||||
indexes: Vec<Arc<dyn CacheInvalidator>>,
|
||||
registry: SendSyncAnyMap,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CacheInvalidator for CacheRegistry {
|
||||
async fn invalidate(&self, ctx: &Context, caches: &[CacheIdent]) -> Result<()> {
|
||||
let tasks = self
|
||||
.indexes
|
||||
.iter()
|
||||
.map(|invalidator| invalidator.invalidate(ctx, caches));
|
||||
join_all(tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl CacheRegistry {
|
||||
/// Sets the value stored in the collection for the type `T`.
|
||||
/// Returns true if the collection already had a value of type `T`
|
||||
fn register<T: CacheInvalidator + 'static>(&mut self, cache: Arc<T>) -> bool {
|
||||
self.indexes.push(cache.clone());
|
||||
self.registry.insert(cache).is_some()
|
||||
}
|
||||
|
||||
/// Returns __cloned__ the value stored in the collection for the type `T`, if it exists.
|
||||
pub fn get<T: Send + Sync + Clone + 'static>(&self) -> Option<T> {
|
||||
self.registry.get().cloned()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::atomic::{AtomicBool, AtomicI32, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use moka::future::{Cache, CacheBuilder};
|
||||
|
||||
use crate::cache::registry::CacheRegistryBuilder;
|
||||
use crate::cache::*;
|
||||
use crate::instruction::CacheIdent;
|
||||
|
||||
fn test_cache(
|
||||
name: &str,
|
||||
invalidator: Invalidator<String, String, CacheIdent>,
|
||||
) -> CacheContainer<String, String, CacheIdent> {
|
||||
let cache: Cache<String, String> = CacheBuilder::new(128).build();
|
||||
let filter: TokenFilter<CacheIdent> = Box::new(|_| true);
|
||||
let counter = Arc::new(AtomicI32::new(0));
|
||||
let moved_counter = counter.clone();
|
||||
let init: Initializer<String, String> = Arc::new(move |_| {
|
||||
moved_counter.fetch_add(1, Ordering::Relaxed);
|
||||
Box::pin(async { Ok(Some("hi".to_string())) })
|
||||
});
|
||||
|
||||
CacheContainer::new(name.to_string(), cache, invalidator, init, filter)
|
||||
}
|
||||
|
||||
fn test_i32_cache(
|
||||
name: &str,
|
||||
invalidator: Invalidator<i32, String, CacheIdent>,
|
||||
) -> CacheContainer<i32, String, CacheIdent> {
|
||||
let cache: Cache<i32, String> = CacheBuilder::new(128).build();
|
||||
let filter: TokenFilter<CacheIdent> = Box::new(|_| true);
|
||||
let counter = Arc::new(AtomicI32::new(0));
|
||||
let moved_counter = counter.clone();
|
||||
let init: Initializer<i32, String> = Arc::new(move |_| {
|
||||
moved_counter.fetch_add(1, Ordering::Relaxed);
|
||||
Box::pin(async { Ok(Some("foo".to_string())) })
|
||||
});
|
||||
|
||||
CacheContainer::new(name.to_string(), cache, invalidator, init, filter)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_register() {
|
||||
let builder = CacheRegistryBuilder::default();
|
||||
let invalidator: Invalidator<_, String, CacheIdent> =
|
||||
Box::new(|_, _| Box::pin(async { Ok(()) }));
|
||||
let i32_cache = Arc::new(test_i32_cache("i32_cache", invalidator));
|
||||
let invalidator: Invalidator<_, String, CacheIdent> =
|
||||
Box::new(|_, _| Box::pin(async { Ok(()) }));
|
||||
let cache = Arc::new(test_cache("string_cache", invalidator));
|
||||
let registry = builder.add_cache(i32_cache).add_cache(cache).build();
|
||||
|
||||
let cache = registry
|
||||
.get::<Arc<CacheContainer<i32, String, CacheIdent>>>()
|
||||
.unwrap();
|
||||
assert_eq!(cache.name(), "i32_cache");
|
||||
|
||||
let cache = registry
|
||||
.get::<Arc<CacheContainer<String, String, CacheIdent>>>()
|
||||
.unwrap();
|
||||
assert_eq!(cache.name(), "string_cache");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_layered_registry() {
|
||||
let builder = LayeredCacheRegistryBuilder::default();
|
||||
// 1st layer
|
||||
let counter = Arc::new(AtomicBool::new(false));
|
||||
let moved_counter = counter.clone();
|
||||
let invalidator: Invalidator<String, String, CacheIdent> = Box::new(move |_, _| {
|
||||
let counter = moved_counter.clone();
|
||||
Box::pin(async move {
|
||||
assert!(!counter.load(Ordering::Relaxed));
|
||||
counter.store(true, Ordering::Relaxed);
|
||||
Ok(())
|
||||
})
|
||||
});
|
||||
let cache = Arc::new(test_cache("string_cache", invalidator));
|
||||
let builder =
|
||||
builder.add_cache_registry(CacheRegistryBuilder::default().add_cache(cache).build());
|
||||
// 2nd layer
|
||||
let moved_counter = counter.clone();
|
||||
let invalidator: Invalidator<i32, String, CacheIdent> = Box::new(move |_, _| {
|
||||
let counter = moved_counter.clone();
|
||||
Box::pin(async move {
|
||||
assert!(counter.load(Ordering::Relaxed));
|
||||
Ok(())
|
||||
})
|
||||
});
|
||||
let i32_cache = Arc::new(test_i32_cache("i32_cache", invalidator));
|
||||
let builder = builder
|
||||
.add_cache_registry(CacheRegistryBuilder::default().add_cache(i32_cache).build());
|
||||
|
||||
let registry = builder.build();
|
||||
let cache = registry
|
||||
.get::<Arc<CacheContainer<i32, String, CacheIdent>>>()
|
||||
.unwrap();
|
||||
assert_eq!(cache.name(), "i32_cache");
|
||||
let cache = registry
|
||||
.get::<Arc<CacheContainer<String, String, CacheIdent>>>()
|
||||
.unwrap();
|
||||
assert_eq!(cache.name(), "string_cache");
|
||||
}
|
||||
}
|
||||
20
src/common/meta/src/cache/table.rs
vendored
Normal file
20
src/common/meta/src/cache/table.rs
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod table_info;
|
||||
mod table_name;
|
||||
mod table_route;
|
||||
pub use table_info::{new_table_info_cache, TableInfoCache, TableInfoCacheRef};
|
||||
pub use table_name::{new_table_name_cache, TableNameCache, TableNameCacheRef};
|
||||
pub use table_route::{new_table_route_cache, TableRoute, TableRouteCache, TableRouteCacheRef};
|
||||
121
src/common/meta/src/cache/table/table_info.rs
vendored
Normal file
121
src/common/meta/src/cache/table/table_info.rs
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::future::BoxFuture;
|
||||
use moka::future::Cache;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::TableId;
|
||||
use table::metadata::TableInfo;
|
||||
|
||||
use crate::cache::{CacheContainer, Initializer};
|
||||
use crate::error;
|
||||
use crate::error::Result;
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_info::{TableInfoManager, TableInfoManagerRef};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
|
||||
/// [TableInfoCache] caches the [TableId] to [TableInfo] mapping.
|
||||
pub type TableInfoCache = CacheContainer<TableId, Arc<TableInfo>, CacheIdent>;
|
||||
|
||||
pub type TableInfoCacheRef = Arc<TableInfoCache>;
|
||||
|
||||
/// Constructs a [TableInfoCache].
|
||||
pub fn new_table_info_cache(
|
||||
name: String,
|
||||
cache: Cache<TableId, Arc<TableInfo>>,
|
||||
kv_backend: KvBackendRef,
|
||||
) -> TableInfoCache {
|
||||
let table_info_manager = Arc::new(TableInfoManager::new(kv_backend));
|
||||
let init = init_factory(table_info_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
}
|
||||
|
||||
fn init_factory(table_info_manager: TableInfoManagerRef) -> Initializer<TableId, Arc<TableInfo>> {
|
||||
Arc::new(move |table_id| {
|
||||
let table_info_manager = table_info_manager.clone();
|
||||
Box::pin(async move {
|
||||
let raw_table_info = table_info_manager
|
||||
.get(*table_id)
|
||||
.await?
|
||||
.context(error::ValueNotExistSnafu {})?
|
||||
.into_inner()
|
||||
.table_info;
|
||||
Ok(Some(Arc::new(
|
||||
TableInfo::try_from(raw_table_info).context(error::ConvertRawTableInfoSnafu)?,
|
||||
)))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn invalidator<'a>(
|
||||
cache: &'a Cache<TableId, Arc<TableInfo>>,
|
||||
ident: &'a CacheIdent,
|
||||
) -> BoxFuture<'a, Result<()>> {
|
||||
Box::pin(async move {
|
||||
if let CacheIdent::TableId(table_id) = ident {
|
||||
cache.invalidate(table_id).await
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn filter(ident: &CacheIdent) -> bool {
|
||||
matches!(ident, CacheIdent::TableId(_))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use moka::future::CacheBuilder;
|
||||
|
||||
use super::*;
|
||||
use crate::ddl::test_util::create_table::test_create_table_task;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::key::TableMetadataManager;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cache() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let table_metadata_manager = TableMetadataManager::new(mem_kv.clone());
|
||||
let cache = CacheBuilder::new(128).build();
|
||||
let cache = new_table_info_cache("test".to_string(), cache, mem_kv.clone());
|
||||
|
||||
let result = cache.get(1024).await.unwrap();
|
||||
assert!(result.is_none());
|
||||
let task = test_create_table_task("my_table", 1024);
|
||||
table_metadata_manager
|
||||
.create_table_metadata(
|
||||
task.table_info.clone(),
|
||||
TableRouteValue::physical(vec![]),
|
||||
HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let table_info = cache.get(1024).await.unwrap().unwrap();
|
||||
assert_eq!(*table_info, TableInfo::try_from(task.table_info).unwrap());
|
||||
|
||||
assert!(cache.contains_key(&1024));
|
||||
cache
|
||||
.invalidate(&[CacheIdent::TableId(1024)])
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!cache.contains_key(&1024));
|
||||
}
|
||||
}
|
||||
173
src/common/meta/src/cache/table/table_name.rs
vendored
Normal file
173
src/common/meta/src/cache/table/table_name.rs
vendored
Normal file
@@ -0,0 +1,173 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::future::BoxFuture;
|
||||
use moka::future::Cache;
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::cache::{CacheContainer, Initializer};
|
||||
use crate::error;
|
||||
use crate::error::Result;
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_name::{TableNameKey, TableNameManager, TableNameManagerRef};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::table_name::TableName;
|
||||
|
||||
/// [TableNameCache] caches the [TableName] to [TableId] mapping.
|
||||
pub type TableNameCache = CacheContainer<TableName, TableId, CacheIdent>;
|
||||
|
||||
pub type TableNameCacheRef = Arc<TableNameCache>;
|
||||
|
||||
/// Constructs a [TableNameCache].
|
||||
pub fn new_table_name_cache(
|
||||
name: String,
|
||||
cache: Cache<TableName, TableId>,
|
||||
kv_backend: KvBackendRef,
|
||||
) -> TableNameCache {
|
||||
let table_name_manager = Arc::new(TableNameManager::new(kv_backend));
|
||||
let init = init_factory(table_name_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
}
|
||||
|
||||
fn init_factory(table_name_manager: TableNameManagerRef) -> Initializer<TableName, TableId> {
|
||||
Arc::new(
|
||||
move |TableName {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
}| {
|
||||
let table_name_manager = table_name_manager.clone();
|
||||
Box::pin(async move {
|
||||
Ok(Some(
|
||||
table_name_manager
|
||||
.get(TableNameKey {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
table: table_name,
|
||||
})
|
||||
.await?
|
||||
.context(error::ValueNotExistSnafu {})?
|
||||
.table_id(),
|
||||
))
|
||||
})
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn invalidator<'a>(
|
||||
cache: &'a Cache<TableName, TableId>,
|
||||
ident: &'a CacheIdent,
|
||||
) -> BoxFuture<'a, Result<()>> {
|
||||
Box::pin(async move {
|
||||
if let CacheIdent::TableName(table_name) = ident {
|
||||
cache.invalidate(table_name).await
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn filter(ident: &CacheIdent) -> bool {
|
||||
matches!(ident, CacheIdent::TableName(_))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use moka::future::CacheBuilder;
|
||||
|
||||
use super::*;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::kv_backend::txn::TxnService;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cache_get() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let cache = CacheBuilder::new(128).build();
|
||||
let cache = new_table_name_cache("test".to_string(), cache, mem_kv.clone());
|
||||
let result = cache
|
||||
.get_by_ref(&TableName {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "my_table".to_string(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(result.is_none());
|
||||
// Puts a new value.
|
||||
let table_name_manager = TableNameManager::new(mem_kv.clone());
|
||||
let table_id = 1024;
|
||||
let txn = table_name_manager
|
||||
.build_create_txn(
|
||||
&TableNameKey {
|
||||
catalog: DEFAULT_CATALOG_NAME,
|
||||
schema: DEFAULT_SCHEMA_NAME,
|
||||
table: "my_table",
|
||||
},
|
||||
table_id,
|
||||
)
|
||||
.unwrap();
|
||||
mem_kv.txn(txn).await.unwrap();
|
||||
let got = cache
|
||||
.get_by_ref(&TableName {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "my_table".to_string(),
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(got, table_id);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_invalidate_cache() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let cache = CacheBuilder::new(128).build();
|
||||
let cache = new_table_name_cache("test".to_string(), cache, mem_kv.clone());
|
||||
// Puts a new value.
|
||||
let table_name_manager = TableNameManager::new(mem_kv.clone());
|
||||
let table_id = 1024;
|
||||
let table_name = TableName {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "my_table".to_string(),
|
||||
};
|
||||
let txn = table_name_manager
|
||||
.build_create_txn(
|
||||
&TableNameKey {
|
||||
catalog: DEFAULT_CATALOG_NAME,
|
||||
schema: DEFAULT_SCHEMA_NAME,
|
||||
table: "my_table",
|
||||
},
|
||||
table_id,
|
||||
)
|
||||
.unwrap();
|
||||
mem_kv.txn(txn).await.unwrap();
|
||||
let got = cache.get_by_ref(&table_name).await.unwrap().unwrap();
|
||||
assert_eq!(got, table_id);
|
||||
|
||||
assert!(cache.contains_key(&table_name));
|
||||
cache
|
||||
.invalidate(&[CacheIdent::TableName(table_name.clone())])
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!cache.contains_key(&table_name));
|
||||
}
|
||||
}
|
||||
176
src/common/meta/src/cache/table/table_route.rs
vendored
Normal file
176
src/common/meta/src/cache/table/table_route.rs
vendored
Normal file
@@ -0,0 +1,176 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::future::BoxFuture;
|
||||
use moka::future::Cache;
|
||||
use snafu::OptionExt;
|
||||
use store_api::storage::TableId;
|
||||
|
||||
use crate::cache::{CacheContainer, Initializer};
|
||||
use crate::error;
|
||||
use crate::error::Result;
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_route::{
|
||||
LogicalTableRouteValue, PhysicalTableRouteValue, TableRouteManager, TableRouteManagerRef,
|
||||
TableRouteValue,
|
||||
};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
|
||||
/// [TableRoute] stores `Arc` wrapped table route.
|
||||
#[derive(Clone)]
|
||||
pub enum TableRoute {
|
||||
Physical(Arc<PhysicalTableRouteValue>),
|
||||
Logical(Arc<LogicalTableRouteValue>),
|
||||
}
|
||||
|
||||
impl TableRoute {
|
||||
/// Returns true if it's physical table.
|
||||
pub fn is_physical(&self) -> bool {
|
||||
matches!(self, TableRoute::Physical(_))
|
||||
}
|
||||
|
||||
/// Returns [PhysicalTableRouteValue] reference if it's [TableRoute::Physical]; Otherwise it returns [None].
|
||||
pub fn as_physical_table_route_ref(&self) -> Option<&Arc<PhysicalTableRouteValue>> {
|
||||
match self {
|
||||
TableRoute::Physical(table_route) => Some(table_route),
|
||||
TableRoute::Logical(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns [LogicalTableRouteValue] reference if it's [TableRoute::Logical]; Otherwise it returns [None].
|
||||
pub fn as_logical_table_route_ref(&self) -> Option<&Arc<LogicalTableRouteValue>> {
|
||||
match self {
|
||||
TableRoute::Physical(_) => None,
|
||||
TableRoute::Logical(table_route) => Some(table_route),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// [TableRouteCache] caches the [TableId] to [TableRoute] mapping.
|
||||
pub type TableRouteCache = CacheContainer<TableId, Arc<TableRoute>, CacheIdent>;
|
||||
|
||||
pub type TableRouteCacheRef = Arc<TableRouteCache>;
|
||||
|
||||
/// Constructs a [TableRouteCache].
|
||||
pub fn new_table_route_cache(
|
||||
name: String,
|
||||
cache: Cache<TableId, Arc<TableRoute>>,
|
||||
kv_backend: KvBackendRef,
|
||||
) -> TableRouteCache {
|
||||
let table_info_manager = Arc::new(TableRouteManager::new(kv_backend));
|
||||
let init = init_factory(table_info_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
}
|
||||
|
||||
fn init_factory(
|
||||
table_route_manager: TableRouteManagerRef,
|
||||
) -> Initializer<TableId, Arc<TableRoute>> {
|
||||
Arc::new(move |table_id| {
|
||||
let table_route_manager = table_route_manager.clone();
|
||||
Box::pin(async move {
|
||||
let table_route_value = table_route_manager
|
||||
.table_route_storage()
|
||||
.get(*table_id)
|
||||
.await?
|
||||
.context(error::ValueNotExistSnafu {})?;
|
||||
|
||||
let table_route = match table_route_value {
|
||||
TableRouteValue::Physical(physical) => TableRoute::Physical(Arc::new(physical)),
|
||||
TableRouteValue::Logical(logical) => TableRoute::Logical(Arc::new(logical)),
|
||||
};
|
||||
|
||||
Ok(Some(Arc::new(table_route)))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn invalidator<'a>(
|
||||
cache: &'a Cache<TableId, Arc<TableRoute>>,
|
||||
ident: &'a CacheIdent,
|
||||
) -> BoxFuture<'a, Result<()>> {
|
||||
Box::pin(async move {
|
||||
if let CacheIdent::TableId(table_id) = ident {
|
||||
cache.invalidate(table_id).await
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn filter(ident: &CacheIdent) -> bool {
|
||||
matches!(ident, CacheIdent::TableId(_))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use moka::future::CacheBuilder;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use super::*;
|
||||
use crate::ddl::test_util::create_table::test_create_table_task;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::key::TableMetadataManager;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::router::{Region, RegionRoute};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cache() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let table_metadata_manager = TableMetadataManager::new(mem_kv.clone());
|
||||
let cache = CacheBuilder::new(128).build();
|
||||
let cache = new_table_route_cache("test".to_string(), cache, mem_kv.clone());
|
||||
|
||||
let result = cache.get(1024).await.unwrap();
|
||||
assert!(result.is_none());
|
||||
let task = test_create_table_task("my_table", 1024);
|
||||
let table_id = 10;
|
||||
let region_id = RegionId::new(table_id, 1);
|
||||
let peer = Peer::empty(1);
|
||||
let region_routes = vec![RegionRoute {
|
||||
region: Region::new_test(region_id),
|
||||
leader_peer: Some(peer.clone()),
|
||||
..Default::default()
|
||||
}];
|
||||
table_metadata_manager
|
||||
.create_table_metadata(
|
||||
task.table_info.clone(),
|
||||
TableRouteValue::physical(region_routes.clone()),
|
||||
HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let table_route = cache.get(1024).await.unwrap().unwrap();
|
||||
assert_eq!(
|
||||
(*table_route)
|
||||
.clone()
|
||||
.as_physical_table_route_ref()
|
||||
.unwrap()
|
||||
.region_routes,
|
||||
region_routes
|
||||
);
|
||||
|
||||
assert!(cache.contains_key(&1024));
|
||||
cache
|
||||
.invalidate(&[CacheIdent::TableId(1024)])
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!cache.contains_key(&1024));
|
||||
}
|
||||
}
|
||||
@@ -14,10 +14,11 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::flow_name::FlowName;
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::flow::flow_info::FlowInfoKey;
|
||||
use crate::key::flow::flow_name::FlowNameKey;
|
||||
use crate::key::schema_name::SchemaNameKey;
|
||||
use crate::key::table_info::TableInfoKey;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
@@ -47,7 +48,7 @@ pub struct Context {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait CacheInvalidator: Send + Sync {
|
||||
async fn invalidate(&self, ctx: &Context, caches: Vec<CacheIdent>) -> Result<()>;
|
||||
async fn invalidate(&self, ctx: &Context, caches: &[CacheIdent]) -> Result<()>;
|
||||
}
|
||||
|
||||
pub type CacheInvalidatorRef = Arc<dyn CacheInvalidator>;
|
||||
@@ -56,35 +57,7 @@ pub struct DummyCacheInvalidator;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CacheInvalidator for DummyCacheInvalidator {
|
||||
async fn invalidate(&self, _ctx: &Context, _caches: Vec<CacheIdent>) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MultiCacheInvalidator {
|
||||
invalidators: RwLock<Vec<CacheInvalidatorRef>>,
|
||||
}
|
||||
|
||||
impl MultiCacheInvalidator {
|
||||
pub fn with_invalidators(invalidators: Vec<CacheInvalidatorRef>) -> Self {
|
||||
Self {
|
||||
invalidators: RwLock::new(invalidators),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn add_invalidator(&self, invalidator: CacheInvalidatorRef) {
|
||||
self.invalidators.write().await.push(invalidator);
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CacheInvalidator for MultiCacheInvalidator {
|
||||
async fn invalidate(&self, ctx: &Context, caches: Vec<CacheIdent>) -> Result<()> {
|
||||
let invalidators = self.invalidators.read().await;
|
||||
for invalidator in invalidators.iter() {
|
||||
invalidator.invalidate(ctx, caches.clone()).await?;
|
||||
}
|
||||
async fn invalidate(&self, _ctx: &Context, _caches: &[CacheIdent]) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -94,27 +67,37 @@ impl<T> CacheInvalidator for T
|
||||
where
|
||||
T: KvCacheInvalidator,
|
||||
{
|
||||
async fn invalidate(&self, _ctx: &Context, caches: Vec<CacheIdent>) -> Result<()> {
|
||||
async fn invalidate(&self, _ctx: &Context, caches: &[CacheIdent]) -> Result<()> {
|
||||
for cache in caches {
|
||||
match cache {
|
||||
CacheIdent::TableId(table_id) => {
|
||||
let key = TableInfoKey::new(table_id);
|
||||
let key = TableInfoKey::new(*table_id);
|
||||
self.invalidate_key(&key.to_bytes()).await;
|
||||
|
||||
let key = &TableRouteKey { table_id };
|
||||
let key = TableRouteKey::new(*table_id);
|
||||
self.invalidate_key(&key.to_bytes()).await;
|
||||
}
|
||||
CacheIdent::TableName(table_name) => {
|
||||
let key: TableNameKey = (&table_name).into();
|
||||
let key: TableNameKey = table_name.into();
|
||||
self.invalidate_key(&key.to_bytes()).await
|
||||
}
|
||||
CacheIdent::SchemaName(schema_name) => {
|
||||
let key: SchemaNameKey = (&schema_name).into();
|
||||
let key: SchemaNameKey = schema_name.into();
|
||||
self.invalidate_key(&key.to_bytes()).await;
|
||||
}
|
||||
CacheIdent::CreateFlow(_) | CacheIdent::DropFlow(_) => {
|
||||
// TODO(weny): implements it
|
||||
unimplemented!()
|
||||
// Do nothing
|
||||
}
|
||||
CacheIdent::FlowName(FlowName {
|
||||
catalog_name,
|
||||
flow_name,
|
||||
}) => {
|
||||
let key = FlowNameKey::new(catalog_name, flow_name);
|
||||
self.invalidate_key(&key.to_bytes()).await
|
||||
}
|
||||
CacheIdent::FlowId(flow_id) => {
|
||||
let key = FlowInfoKey::new(*flow_id);
|
||||
self.invalidate_key(&key.to_bytes()).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ pub mod create_flow;
|
||||
pub mod create_logical_tables;
|
||||
pub mod create_table;
|
||||
mod create_table_template;
|
||||
pub mod create_view;
|
||||
pub mod drop_database;
|
||||
pub mod drop_flow;
|
||||
pub mod drop_table;
|
||||
@@ -89,6 +90,7 @@ pub struct TableMetadataAllocatorContext {
|
||||
}
|
||||
|
||||
/// Metadata allocated to a table.
|
||||
#[derive(Default)]
|
||||
pub struct TableMetadata {
|
||||
/// Table id.
|
||||
pub table_id: TableId,
|
||||
|
||||
@@ -175,7 +175,7 @@ impl AlterLogicalTablesProcedure {
|
||||
|
||||
self.context
|
||||
.cache_invalidator
|
||||
.invalidate(&ctx, to_invalidate)
|
||||
.invalidate(&ctx, &to_invalidate)
|
||||
.await?;
|
||||
Ok(Status::done())
|
||||
}
|
||||
|
||||
@@ -172,20 +172,16 @@ impl AlterTableProcedure {
|
||||
|
||||
/// Broadcasts the invalidating table cache instructions.
|
||||
async fn on_broadcast(&mut self) -> Result<Status> {
|
||||
// Safety: Checked in `AlterTableProcedure::new`.
|
||||
let alter_kind = self.data.task.alter_table.kind.as_ref().unwrap();
|
||||
let cache_invalidator = &self.context.cache_invalidator;
|
||||
let cache_keys = if matches!(alter_kind, Kind::RenameTable { .. }) {
|
||||
vec![CacheIdent::TableName(self.data.table_ref().into())]
|
||||
} else {
|
||||
vec![
|
||||
CacheIdent::TableId(self.data.table_id()),
|
||||
CacheIdent::TableName(self.data.table_ref().into()),
|
||||
]
|
||||
};
|
||||
|
||||
cache_invalidator
|
||||
.invalidate(&Context::default(), cache_keys)
|
||||
.invalidate(
|
||||
&Context::default(),
|
||||
&[
|
||||
CacheIdent::TableId(self.data.table_id()),
|
||||
CacheIdent::TableName(self.data.table_ref().into()),
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(Status::done())
|
||||
|
||||
@@ -18,6 +18,7 @@ use async_trait::async_trait;
|
||||
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
|
||||
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::{serde_as, DefaultOnNull};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use strum::AsRefStr;
|
||||
|
||||
@@ -39,7 +40,7 @@ impl CreateDatabaseProcedure {
|
||||
catalog: String,
|
||||
schema: String,
|
||||
create_if_not_exists: bool,
|
||||
options: Option<HashMap<String, String>>,
|
||||
options: HashMap<String, String>,
|
||||
context: DdlContext,
|
||||
) -> Self {
|
||||
Self {
|
||||
@@ -85,19 +86,14 @@ impl CreateDatabaseProcedure {
|
||||
}
|
||||
|
||||
pub async fn on_create_metadata(&mut self) -> Result<Status> {
|
||||
let value: Option<SchemaNameValue> = self
|
||||
.data
|
||||
.options
|
||||
.as_ref()
|
||||
.map(|hash_map_ref| hash_map_ref.try_into())
|
||||
.transpose()?;
|
||||
let value: SchemaNameValue = (&self.data.options).try_into()?;
|
||||
|
||||
self.context
|
||||
.table_metadata_manager
|
||||
.schema_manager()
|
||||
.create(
|
||||
SchemaNameKey::new(&self.data.catalog, &self.data.schema),
|
||||
value,
|
||||
Some(value),
|
||||
self.data.create_if_not_exists,
|
||||
)
|
||||
.await?;
|
||||
@@ -142,11 +138,13 @@ pub enum CreateDatabaseState {
|
||||
CreateMetadata,
|
||||
}
|
||||
|
||||
#[serde_as]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CreateDatabaseData {
|
||||
pub state: CreateDatabaseState,
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
pub create_if_not_exists: bool,
|
||||
pub options: Option<HashMap<String, String>>,
|
||||
#[serde_as(deserialize_as = "DefaultOnNull")]
|
||||
pub options: HashMap<String, String>,
|
||||
}
|
||||
|
||||
@@ -34,9 +34,11 @@ use strum::AsRefStr;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use super::utils::add_peer_context_if_needed;
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::utils::handle_retry_error;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{self, Result};
|
||||
use crate::instruction::{CacheIdent, CreateFlow};
|
||||
use crate::key::flow::flow_info::FlowInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::FlowId;
|
||||
@@ -117,12 +119,11 @@ impl CreateFlowProcedure {
|
||||
&sink_table_name.table_name,
|
||||
))
|
||||
.await?;
|
||||
ensure!(
|
||||
!exists,
|
||||
error::TableAlreadyExistsSnafu {
|
||||
table_name: sink_table_name.to_string(),
|
||||
}
|
||||
);
|
||||
// TODO(discord9): due to undefined behavior in flow's plan in how to transform types in mfp, sometime flow can't deduce correct schema
|
||||
// and require manually create sink table
|
||||
if exists {
|
||||
common_telemetry::warn!("Table already exists, table: {}", sink_table_name);
|
||||
}
|
||||
|
||||
self.collect_source_tables().await?;
|
||||
self.allocate_flow_id().await?;
|
||||
@@ -173,6 +174,28 @@ impl CreateFlowProcedure {
|
||||
.create_flow_metadata(flow_id, (&self.data).into())
|
||||
.await?;
|
||||
info!("Created flow metadata for flow {flow_id}");
|
||||
self.data.state = CreateFlowState::InvalidateFlowCache;
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
async fn on_broadcast(&mut self) -> Result<Status> {
|
||||
// Safety: The flow id must be allocated.
|
||||
let flow_id = self.data.flow_id.unwrap();
|
||||
let ctx = Context {
|
||||
subject: Some("Invalidate flow cache by creating flow".to_string()),
|
||||
};
|
||||
|
||||
self.context
|
||||
.cache_invalidator
|
||||
.invalidate(
|
||||
&ctx,
|
||||
&[CacheIdent::CreateFlow(CreateFlow {
|
||||
source_table_ids: self.data.source_table_ids.clone(),
|
||||
flownode_ids: self.data.peers.iter().map(|peer| peer.id).collect(),
|
||||
})],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(Status::done_with_output(flow_id))
|
||||
}
|
||||
}
|
||||
@@ -194,6 +217,7 @@ impl Procedure for CreateFlowProcedure {
|
||||
CreateFlowState::Prepare => self.on_prepare().await,
|
||||
CreateFlowState::CreateFlows => self.on_flownode_create_flows().await,
|
||||
CreateFlowState::CreateMetadata => self.on_create_metadata().await,
|
||||
CreateFlowState::InvalidateFlowCache => self.on_broadcast().await,
|
||||
}
|
||||
.map_err(handle_retry_error)
|
||||
}
|
||||
@@ -227,6 +251,8 @@ pub enum CreateFlowState {
|
||||
Prepare,
|
||||
/// Creates flows on the flownode.
|
||||
CreateFlows,
|
||||
/// Invalidate flow cache.
|
||||
InvalidateFlowCache,
|
||||
/// Create metadata.
|
||||
CreateMetadata,
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ impl CreateLogicalTablesProcedure {
|
||||
} else {
|
||||
self.context
|
||||
.table_metadata_allocator
|
||||
.allocate_table_id(task)
|
||||
.allocate_table_id(&task.create_table.table_id)
|
||||
.await?
|
||||
};
|
||||
task.set_table_id(table_id);
|
||||
|
||||
@@ -69,7 +69,7 @@ impl CreateLogicalTablesProcedure {
|
||||
.cache_invalidator
|
||||
.invalidate(
|
||||
&Context::default(),
|
||||
vec![
|
||||
&[
|
||||
CacheIdent::TableId(self.data.physical_table_id),
|
||||
CacheIdent::TableName(physical_table_name),
|
||||
],
|
||||
|
||||
276
src/common/meta/src/ddl/create_view.rs
Normal file
276
src/common/meta/src/ddl/create_view.rs
Normal file
@@ -0,0 +1,276 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
|
||||
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
|
||||
use common_telemetry::info;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use strum::AsRefStr;
|
||||
use table::metadata::{RawTableInfo, TableId, TableType};
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::ddl::utils::handle_retry_error;
|
||||
use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableNameLock};
|
||||
use crate::rpc::ddl::CreateViewTask;
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
// The proceudure to execute `[CreateViewTask]`.
|
||||
pub struct CreateViewProcedure {
|
||||
pub context: DdlContext,
|
||||
pub creator: ViewCreator,
|
||||
}
|
||||
|
||||
impl CreateViewProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateView";
|
||||
|
||||
pub fn new(cluster_id: ClusterId, task: CreateViewTask, context: DdlContext) -> Self {
|
||||
Self {
|
||||
context,
|
||||
creator: ViewCreator::new(cluster_id, task),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
|
||||
let data = serde_json::from_str(json).context(FromJsonSnafu)?;
|
||||
|
||||
let creator = ViewCreator { data };
|
||||
|
||||
Ok(CreateViewProcedure { context, creator })
|
||||
}
|
||||
|
||||
fn view_info(&self) -> &RawTableInfo {
|
||||
&self.creator.data.task.view_info
|
||||
}
|
||||
|
||||
fn need_update(&self) -> bool {
|
||||
self.creator.data.need_update
|
||||
}
|
||||
|
||||
pub(crate) fn view_id(&self) -> TableId {
|
||||
self.view_info().ident.table_id
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn set_allocated_metadata(&mut self, view_id: TableId) {
|
||||
self.creator.set_allocated_metadata(view_id, false)
|
||||
}
|
||||
|
||||
/// On the prepare step, it performs:
|
||||
/// - Checks whether the view exists.
|
||||
/// - Allocates the view id.
|
||||
///
|
||||
/// Abort(non-retry):
|
||||
/// - ViewName exists and `create_if_not_exists` is false.
|
||||
/// - Failed to allocate [ViewMetadata].
|
||||
pub(crate) async fn on_prepare(&mut self) -> Result<Status> {
|
||||
let expr = &self.creator.data.task.create_view;
|
||||
let view_name_value = self
|
||||
.context
|
||||
.table_metadata_manager
|
||||
.table_name_manager()
|
||||
.get(TableNameKey::new(
|
||||
&expr.catalog_name,
|
||||
&expr.schema_name,
|
||||
&expr.view_name,
|
||||
))
|
||||
.await?;
|
||||
|
||||
// If `view_id` is None, creating the new view,
|
||||
// otherwise:
|
||||
// - replaces the exists one when `or_replace` is true.
|
||||
// - returns the exists one when `create_if_not_exists` is true.
|
||||
// - throws the `[ViewAlreadyExistsSnafu]` error.
|
||||
let mut view_id = None;
|
||||
|
||||
if let Some(value) = view_name_value {
|
||||
ensure!(
|
||||
expr.create_if_not_exists || expr.or_replace,
|
||||
error::ViewAlreadyExistsSnafu {
|
||||
view_name: self.creator.data.table_ref().to_string(),
|
||||
}
|
||||
);
|
||||
|
||||
let exists_view_id = value.table_id();
|
||||
|
||||
if !expr.or_replace {
|
||||
return Ok(Status::done_with_output(exists_view_id));
|
||||
}
|
||||
view_id = Some(exists_view_id);
|
||||
}
|
||||
|
||||
if let Some(view_id) = view_id {
|
||||
let view_info_value = self
|
||||
.context
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(view_id)
|
||||
.await?
|
||||
.with_context(|| error::TableInfoNotFoundSnafu {
|
||||
table: self.creator.data.table_ref().to_string(),
|
||||
})?;
|
||||
|
||||
// Ensure the exists one is view, we can't replace a table.
|
||||
ensure!(
|
||||
view_info_value.table_info.table_type == TableType::View,
|
||||
error::TableAlreadyExistsSnafu {
|
||||
table_name: self.creator.data.table_ref().to_string(),
|
||||
}
|
||||
);
|
||||
|
||||
self.creator.set_allocated_metadata(view_id, true);
|
||||
} else {
|
||||
// Allocate the new `view_id`.
|
||||
let TableMetadata { table_id, .. } = self
|
||||
.context
|
||||
.table_metadata_allocator
|
||||
.create_view(
|
||||
&TableMetadataAllocatorContext {
|
||||
cluster_id: self.creator.data.cluster_id,
|
||||
},
|
||||
&None,
|
||||
)
|
||||
.await?;
|
||||
self.creator.set_allocated_metadata(table_id, false);
|
||||
}
|
||||
|
||||
self.creator.data.state = CreateViewState::CreateMetadata;
|
||||
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
/// Creates view metadata
|
||||
///
|
||||
/// Abort(not-retry):
|
||||
/// - Failed to create view metadata.
|
||||
async fn on_create_metadata(&mut self, ctx: &ProcedureContext) -> Result<Status> {
|
||||
let view_id = self.view_id();
|
||||
let manager = &self.context.table_metadata_manager;
|
||||
|
||||
if self.need_update() {
|
||||
// Retrieve the current view info and try to update it.
|
||||
let current_view_info = manager
|
||||
.view_info_manager()
|
||||
.get(view_id)
|
||||
.await?
|
||||
.with_context(|| error::ViewNotFoundSnafu {
|
||||
view_name: self.creator.data.table_ref().to_string(),
|
||||
})?;
|
||||
let new_logical_plan = self.creator.data.task.raw_logical_plan().clone();
|
||||
manager
|
||||
.update_view_info(view_id, ¤t_view_info, new_logical_plan)
|
||||
.await?;
|
||||
|
||||
info!("Updated view metadata for view {view_id}");
|
||||
} else {
|
||||
let raw_view_info = self.view_info().clone();
|
||||
manager
|
||||
.create_view_metadata(raw_view_info, self.creator.data.task.raw_logical_plan())
|
||||
.await?;
|
||||
|
||||
info!(
|
||||
"Created view metadata for view {view_id} with procedure: {}",
|
||||
ctx.procedure_id
|
||||
);
|
||||
}
|
||||
|
||||
Ok(Status::done_with_output(view_id))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Procedure for CreateViewProcedure {
|
||||
fn type_name(&self) -> &str {
|
||||
Self::TYPE_NAME
|
||||
}
|
||||
|
||||
async fn execute(&mut self, ctx: &ProcedureContext) -> ProcedureResult<Status> {
|
||||
let state = &self.creator.data.state;
|
||||
|
||||
let _timer = metrics::METRIC_META_PROCEDURE_CREATE_VIEW
|
||||
.with_label_values(&[state.as_ref()])
|
||||
.start_timer();
|
||||
|
||||
match state {
|
||||
CreateViewState::Prepare => self.on_prepare().await,
|
||||
CreateViewState::CreateMetadata => self.on_create_metadata(ctx).await,
|
||||
}
|
||||
.map_err(handle_retry_error)
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
serde_json::to_string(&self.creator.data).context(ToJsonSnafu)
|
||||
}
|
||||
|
||||
fn lock_key(&self) -> LockKey {
|
||||
let table_ref = &self.creator.data.table_ref();
|
||||
|
||||
LockKey::new(vec![
|
||||
CatalogLock::Read(table_ref.catalog).into(),
|
||||
SchemaLock::read(table_ref.catalog, table_ref.schema).into(),
|
||||
TableNameLock::new(table_ref.catalog, table_ref.schema, table_ref.table).into(),
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
/// The VIEW creator
|
||||
pub struct ViewCreator {
|
||||
/// The serializable data.
|
||||
pub data: CreateViewData,
|
||||
}
|
||||
|
||||
impl ViewCreator {
|
||||
pub fn new(cluster_id: u64, task: CreateViewTask) -> Self {
|
||||
Self {
|
||||
data: CreateViewData {
|
||||
state: CreateViewState::Prepare,
|
||||
cluster_id,
|
||||
task,
|
||||
need_update: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn set_allocated_metadata(&mut self, view_id: TableId, need_update: bool) {
|
||||
self.data.task.view_info.ident.table_id = view_id;
|
||||
self.data.need_update = need_update;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, AsRefStr, PartialEq)]
|
||||
pub enum CreateViewState {
|
||||
/// Prepares to create the table
|
||||
Prepare,
|
||||
/// Creates metadata
|
||||
CreateMetadata,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CreateViewData {
|
||||
pub state: CreateViewState,
|
||||
pub task: CreateViewTask,
|
||||
pub cluster_id: ClusterId,
|
||||
/// Whether to update the view info.
|
||||
pub need_update: bool,
|
||||
}
|
||||
|
||||
impl CreateViewData {
|
||||
fn table_ref(&self) -> TableReference<'_> {
|
||||
self.task.table_ref()
|
||||
}
|
||||
}
|
||||
@@ -68,7 +68,7 @@ impl DropMetadataBroadcast {
|
||||
cache_invalidator
|
||||
.invalidate(
|
||||
&ctx,
|
||||
vec![CacheIdent::SchemaName(SchemaName {
|
||||
&[CacheIdent::SchemaName(SchemaName {
|
||||
catalog_name: db_ctx.catalog.clone(),
|
||||
schema_name: db_ctx.schema.clone(),
|
||||
})],
|
||||
|
||||
@@ -29,8 +29,11 @@ use snafu::{ensure, ResultExt};
|
||||
use strum::AsRefStr;
|
||||
|
||||
use super::utils::{add_peer_context_if_needed, handle_retry_error};
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{self, Result};
|
||||
use crate::flow_name::FlowName;
|
||||
use crate::instruction::{CacheIdent, DropFlow};
|
||||
use crate::key::flow::flow_info::FlowInfoValue;
|
||||
use crate::lock_key::{CatalogLock, FlowLock};
|
||||
use crate::peer::Peer;
|
||||
@@ -145,7 +148,29 @@ impl DropFlowProcedure {
|
||||
}
|
||||
|
||||
async fn on_broadcast(&mut self) -> Result<Status> {
|
||||
// TODO(weny): invalidates cache.
|
||||
let flow_id = self.data.task.flow_id;
|
||||
let ctx = Context {
|
||||
subject: Some("Invalidate flow cache by dropping flow".to_string()),
|
||||
};
|
||||
let flow_info_value = self.data.flow_info_value.as_ref().unwrap();
|
||||
|
||||
self.context
|
||||
.cache_invalidator
|
||||
.invalidate(
|
||||
&ctx,
|
||||
&[
|
||||
CacheIdent::FlowId(flow_id),
|
||||
CacheIdent::FlowName(FlowName {
|
||||
catalog_name: flow_info_value.catalog_name.to_string(),
|
||||
flow_name: flow_info_value.flow_name.to_string(),
|
||||
}),
|
||||
CacheIdent::DropFlow(DropFlow {
|
||||
source_table_ids: flow_info_value.source_table_ids.clone(),
|
||||
flownode_ids: flow_info_value.flownode_ids.values().cloned().collect(),
|
||||
}),
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
self.data.state = DropFlowState::DropFlows;
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user