mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
111 Commits
v0.9.0-nig
...
v0.9.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c595a56ac8 | ||
|
|
d6c7b848da | ||
|
|
2010a2a33d | ||
|
|
be3ea0fae7 | ||
|
|
7b28da277d | ||
|
|
b2c5f8eefa | ||
|
|
072d7c2022 | ||
|
|
7900367433 | ||
|
|
9fbc4ba649 | ||
|
|
2e7b12c344 | ||
|
|
2b912d93fb | ||
|
|
04ac0c8da0 | ||
|
|
64cad4e891 | ||
|
|
20d9c0a345 | ||
|
|
9501318ce5 | ||
|
|
b8bd8456f0 | ||
|
|
4b8b04ffa2 | ||
|
|
15ac8116ea | ||
|
|
377a513690 | ||
|
|
5a1732279b | ||
|
|
16075ada67 | ||
|
|
67dfdd6c61 | ||
|
|
9f2d53c3df | ||
|
|
05c7d3eb42 | ||
|
|
63acc30ce7 | ||
|
|
285ffc5850 | ||
|
|
ab22bbac84 | ||
|
|
7ad248d6f6 | ||
|
|
50e4539667 | ||
|
|
da1ea253ba | ||
|
|
da0c840261 | ||
|
|
20417e646a | ||
|
|
9271b3b7bd | ||
|
|
374cfe74bf | ||
|
|
52a9a748a1 | ||
|
|
33ed745049 | ||
|
|
458e5d7e66 | ||
|
|
1ddf19d886 | ||
|
|
185953e586 | ||
|
|
7fe3f496ac | ||
|
|
1a9314a581 | ||
|
|
23bb9d92cb | ||
|
|
f1d17a8ba5 | ||
|
|
d1f1fad440 | ||
|
|
00308218b3 | ||
|
|
81308b9063 | ||
|
|
aa4d10eef7 | ||
|
|
4811fe83f5 | ||
|
|
96861137b2 | ||
|
|
8e69543704 | ||
|
|
e5730a3745 | ||
|
|
c0e9b3dbe2 | ||
|
|
59afa70311 | ||
|
|
bb32230f00 | ||
|
|
fe0be1583a | ||
|
|
08c415c729 | ||
|
|
58f991b864 | ||
|
|
a710676d06 | ||
|
|
3f4928effc | ||
|
|
bc398cf197 | ||
|
|
09fff24ac4 | ||
|
|
30b65ca99e | ||
|
|
b1219fa456 | ||
|
|
4f0984c1d7 | ||
|
|
0b624dc337 | ||
|
|
60f599c3ef | ||
|
|
f71b7b997d | ||
|
|
8a119aa0b2 | ||
|
|
d2f6daf7b7 | ||
|
|
d9efa564ee | ||
|
|
849e0b9249 | ||
|
|
c21e969329 | ||
|
|
9393a1c51e | ||
|
|
69bb7ded6a | ||
|
|
b5c6c72b02 | ||
|
|
8399dcada3 | ||
|
|
6e2c21dd3f | ||
|
|
70f7baffda | ||
|
|
4ec247f34d | ||
|
|
22f4d43b10 | ||
|
|
d9175213fd | ||
|
|
03c933c006 | ||
|
|
65c9fbbd2f | ||
|
|
ee9a5d7611 | ||
|
|
8e306f3f51 | ||
|
|
76fac359cd | ||
|
|
705b22411b | ||
|
|
c9177cceeb | ||
|
|
ddf2e6a3c0 | ||
|
|
967b2cada6 | ||
|
|
0f4b9e576d | ||
|
|
c4db9e8aa7 | ||
|
|
11cf9c827e | ||
|
|
be29e48a60 | ||
|
|
226136011e | ||
|
|
fd4a928521 | ||
|
|
ef5d1a6a65 | ||
|
|
e64379d4f7 | ||
|
|
f2c08b8ddd | ||
|
|
db5d1162f0 | ||
|
|
ea081c95bf | ||
|
|
6276e006b9 | ||
|
|
2665616f72 | ||
|
|
e5313260d0 | ||
|
|
b69b24a237 | ||
|
|
f035a7c79c | ||
|
|
a4e99f5666 | ||
|
|
5d396bd6d7 | ||
|
|
fe2c5c3735 | ||
|
|
6a634f8e5d | ||
|
|
214fd38f69 |
@@ -28,3 +28,8 @@ GT_MYSQL_ADDR = localhost:4002
|
||||
# Setting for unstable fuzz tests
|
||||
GT_FUZZ_BINARY_PATH=/path/to/
|
||||
GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime
|
||||
GT_FUZZ_INPUT_MAX_ROWS=2048
|
||||
GT_FUZZ_INPUT_MAX_TABLES=32
|
||||
GT_FUZZ_INPUT_MAX_COLUMNS=32
|
||||
GT_FUZZ_INPUT_MAX_ALTER_ACTIONS=256
|
||||
GT_FUZZ_INPUT_MAX_INSERT_ACTIONS=8
|
||||
|
||||
@@ -123,10 +123,10 @@ runs:
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:${{ inputs.version }} \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
|
||||
- name: Push greptimedb-centos image from DockerHub to ACR
|
||||
- name: Push latest greptimedb-centos image from DockerHub to ACR
|
||||
shell: bash
|
||||
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
|
||||
env:
|
||||
|
||||
@@ -31,17 +31,21 @@ runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install GreptimeDB operator
|
||||
shell: bash
|
||||
run: |
|
||||
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
||||
helm repo update
|
||||
helm upgrade \
|
||||
--install \
|
||||
--create-namespace \
|
||||
greptimedb-operator greptime/greptimedb-operator \
|
||||
-n greptimedb-admin \
|
||||
--wait \
|
||||
--wait-for-jobs
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
timeout_minutes: 3
|
||||
max_attempts: 3
|
||||
shell: bash
|
||||
command: |
|
||||
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
||||
helm repo update
|
||||
helm upgrade \
|
||||
--install \
|
||||
--create-namespace \
|
||||
greptimedb-operator greptime/greptimedb-operator \
|
||||
-n greptimedb-admin \
|
||||
--wait \
|
||||
--wait-for-jobs
|
||||
- name: Install GreptimeDB cluster
|
||||
shell: bash
|
||||
run: |
|
||||
|
||||
6
.github/workflows/develop.yml
vendored
6
.github/workflows/develop.yml
vendored
@@ -139,6 +139,7 @@ jobs:
|
||||
name: Fuzz Test
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
@@ -186,6 +187,7 @@ jobs:
|
||||
name: Unstable Fuzz Test
|
||||
needs: build-greptime-ci
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
matrix:
|
||||
target: [ "unstable_fuzz_create_table_standalone" ]
|
||||
@@ -278,6 +280,7 @@ jobs:
|
||||
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
@@ -413,9 +416,10 @@ jobs:
|
||||
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
matrix:
|
||||
target: ["fuzz_failover_mito_regions"]
|
||||
target: ["fuzz_migrate_mito_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
||||
mode:
|
||||
- name: "Remote WAL"
|
||||
minio: true
|
||||
|
||||
4
.github/workflows/nightly-build.yml
vendored
4
.github/workflows/nightly-build.yml
vendored
@@ -199,7 +199,7 @@ jobs:
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
push-latest-tag: true
|
||||
|
||||
- name: Set nightly build result
|
||||
id: set-nightly-build-result
|
||||
@@ -240,7 +240,7 @@ jobs:
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: false
|
||||
update-version-info: false # Don't update version info in S3.
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
push-latest-tag: true
|
||||
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -91,7 +91,7 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.9.0
|
||||
NEXT_RELEASE_VERSION: v0.10.0
|
||||
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
|
||||
@@ -16,6 +16,7 @@ repos:
|
||||
hooks:
|
||||
- id: fmt
|
||||
- id: clippy
|
||||
args: ["--workspace", "--all-targets", "--", "-D", "warnings", "-D", "clippy::print_stdout", "-D", "clippy::print_stderr"]
|
||||
args: ["--workspace", "--all-targets", "--all-features", "--", "-D", "warnings"]
|
||||
stages: [push]
|
||||
- id: cargo-check
|
||||
args: ["--workspace", "--all-targets", "--all-features"]
|
||||
|
||||
43
AUTHOR.md
Normal file
43
AUTHOR.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# GreptimeDB Authors
|
||||
|
||||
## Individual Committers (in alphabetical order)
|
||||
|
||||
* [CookiePieWw](https://github.com/CookiePieWw)
|
||||
* [KKould](https://github.com/KKould)
|
||||
* [NiwakaDev](https://github.com/NiwakaDev)
|
||||
* [etolbakov](https://github.com/etolbakov)
|
||||
* [irenjj](https://github.com/irenjj)
|
||||
|
||||
## Team Members (in alphabetical order)
|
||||
|
||||
* [Breeze-P](https://github.com/Breeze-P)
|
||||
* [GrepTime](https://github.com/GrepTime)
|
||||
* [MichaelScofield](https://github.com/MichaelScofield)
|
||||
* [Wenjie0329](https://github.com/Wenjie0329)
|
||||
* [WenyXu](https://github.com/WenyXu)
|
||||
* [ZonaHex](https://github.com/ZonaHex)
|
||||
* [apdong2022](https://github.com/apdong2022)
|
||||
* [beryl678](https://github.com/beryl678)
|
||||
* [daviderli614](https://github.com/daviderli614)
|
||||
* [discord9](https://github.com/discord9)
|
||||
* [evenyag](https://github.com/evenyag)
|
||||
* [fengjiachun](https://github.com/fengjiachun)
|
||||
* [fengys1996](https://github.com/fengys1996)
|
||||
* [holalengyu](https://github.com/holalengyu)
|
||||
* [killme2008](https://github.com/killme2008)
|
||||
* [nicecui](https://github.com/nicecui)
|
||||
* [paomian](https://github.com/paomian)
|
||||
* [shuiyisong](https://github.com/shuiyisong)
|
||||
* [sunchanglong](https://github.com/sunchanglong)
|
||||
* [sunng87](https://github.com/sunng87)
|
||||
* [tisonkun](https://github.com/tisonkun)
|
||||
* [v0y4g3r](https://github.com/v0y4g3r)
|
||||
* [waynexia](https://github.com/waynexia)
|
||||
* [xtang](https://github.com/xtang)
|
||||
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
||||
* [zhongzc](https://github.com/zhongzc)
|
||||
* [zyy17](https://github.com/zyy17)
|
||||
|
||||
## All Contributors
|
||||
|
||||
[](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)
|
||||
@@ -4,10 +4,7 @@ Thanks a lot for considering contributing to GreptimeDB. We believe people like
|
||||
|
||||
You can find our contributors at https://github.com/GreptimeTeam/greptimedb/graphs/contributors. When you dedicate to GreptimeDB for a few months and keep bringing high-quality contributions (code, docs, advocate, etc.), you will be a candidate of a committer.
|
||||
|
||||
A committer will be granted both read & write access to GreptimeDB repos. Here is a list of current committers except GreptimeDB team members:
|
||||
|
||||
* [Eugene Tolbakov](https://github.com/etolbakov): PromQL support, SQL engine, InfluxDB APIs, and more.
|
||||
* [@NiwakaDev](https://github.com/NiwakaDev): SQL engine and storage layer.
|
||||
A committer will be granted both read & write access to GreptimeDB repos. Check the [AUTHOR.md](AUTHOR.md) file for all current individual committers.
|
||||
|
||||
Please read the guidelines, and they can help you get started. Communicate respectfully with the developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
||||
|
||||
@@ -58,7 +55,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
|
||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||
- Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run`.
|
||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||
|
||||
#### `pre-commit` Hooks
|
||||
|
||||
1374
Cargo.lock
generated
1374
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
25
Cargo.toml
25
Cargo.toml
@@ -64,7 +64,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.8.2"
|
||||
version = "0.9.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -104,23 +104,22 @@ clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "5.4"
|
||||
datafusion = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
derive_builder = "0.12"
|
||||
dotenv = "0.15"
|
||||
# TODO(LFC): Wait for https://github.com/etcdv3/etcd-client/pull/76
|
||||
etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev = "4c371e9b3ea8e0a8ee2f9cbd7ded26e54a45df3b" }
|
||||
etcd-client = { version = "0.13" }
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a70a6af9c69e40f9a918936a48717343402b4393" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "5c801650435d464891114502539b701c77a1b914" }
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
|
||||
8
Makefile
8
Makefile
@@ -205,10 +205,14 @@ run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
||||
|
||||
.PHONY: run-cluster-with-etcd
|
||||
run-cluster-with-etcd: ## Run greptime cluster with etcd in docker-compose.
|
||||
.PHONY: start-cluster
|
||||
start-cluster: ## Start the greptimedb cluster with etcd by using docker compose.
|
||||
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml up
|
||||
|
||||
.PHONY: stop-cluster
|
||||
stop-cluster: ## Stop the greptimedb cluster that created by docker compose.
|
||||
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml stop
|
||||
|
||||
##@ Docs
|
||||
config-docs: ## Generate configuration documentation from toml files.
|
||||
docker run --rm \
|
||||
|
||||
19
README.md
19
README.md
@@ -6,12 +6,12 @@
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
<h1 align="center">Cloud-scale, Fast and Efficient Time Series Database</h1>
|
||||
<h2 align="center">Unified Time Series Database for Metrics, Events, and Logs</h2>
|
||||
|
||||
<div align="center">
|
||||
<h3 align="center">
|
||||
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
||||
<a href="https://docs.greptime.com/">User guide</a> |
|
||||
<a href="https://docs.greptime.com/">User Guide</a> |
|
||||
<a href="https://greptimedb.rs/">API Docs</a> |
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
|
||||
</h4>
|
||||
@@ -50,24 +50,23 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
**GreptimeDB** is an open-source time-series database focusing on efficiency, scalability, and analytical capabilities.
|
||||
Designed to work on infrastructure of the cloud era, GreptimeDB benefits users with its elasticity and commodity storage, offering a fast and cost-effective **alternative to InfluxDB** and a **long-term storage for Prometheus**.
|
||||
**GreptimeDB** is an open-source unified time-series database for **Metrics**, **Events**, and **Logs** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at any scale.
|
||||
|
||||
## Why GreptimeDB
|
||||
|
||||
Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you:
|
||||
|
||||
* **Easy horizontal scaling**
|
||||
* **Unified all kinds of time series**
|
||||
|
||||
Seamless scalability from a standalone binary at edge to a robust, highly available distributed cluster in cloud, with a transparent experience for both developers and administrators.
|
||||
GreptimeDB treats all time series as contextual events with timestamp, and thus unifies the processing of metrics and events. It supports analyzing metrics and events with SQL and PromQL, and doing streaming with continuous aggregation.
|
||||
|
||||
* **Analyzing time-series data**
|
||||
* **Cloud-Edge collaboration**
|
||||
|
||||
Query your time-series data with SQL and PromQL. Use Python scripts to facilitate complex analytical tasks.
|
||||
GreptimeDB can be deployed on ARM architecture-compatible Android/Linux systems as well as cloud environments from various vendors. Both sides run the same software, providing identical APIs and control planes, so your application can run at the edge or on the cloud without modification, and data synchronization also becomes extremely easy and efficient.
|
||||
|
||||
* **Cloud-native distributed database**
|
||||
|
||||
Fully open-source distributed cluster architecture that harnesses the power of cloud-native elastic computing resources.
|
||||
By leveraging object storage (S3 and others), separating compute and storage, scaling stateless compute nodes arbitrarily, GreptimeDB implements seamless scalability. It also supports cross-cloud deployment with a built-in unified data access layer over different object storages.
|
||||
|
||||
* **Performance and Cost-effective**
|
||||
|
||||
@@ -183,6 +182,8 @@ Please refer to [contribution guidelines](CONTRIBUTING.md) and [internal concept
|
||||
|
||||
## Acknowledgement
|
||||
|
||||
Special thanks to all the contributors who have propelled GreptimeDB forward. For a complete list of contributors, please refer to [AUTHOR.md](AUTHOR.md).
|
||||
|
||||
- GreptimeDB uses [Apache Arrow™](https://arrow.apache.org/) as the memory model and [Apache Parquet™](https://parquet.apache.org/) as the persistent file format.
|
||||
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
|
||||
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
# Configurations
|
||||
|
||||
- [Standalone Mode](#standalone-mode)
|
||||
- [Distributed Mode](#distributed-mode)
|
||||
- [Configurations](#configurations)
|
||||
- [Standalone Mode](#standalone-mode)
|
||||
- [Distributed Mode](#distributed-mode)
|
||||
- [Frontend](#frontend)
|
||||
- [Metasrv](#metasrv)
|
||||
- [Datanode](#datanode)
|
||||
- [Flownode](#flownode)
|
||||
|
||||
## Standalone Mode
|
||||
|
||||
@@ -23,3 +25,7 @@
|
||||
### Datanode
|
||||
|
||||
{{ toml2docs "./datanode.example.toml" }}
|
||||
|
||||
### Flownode
|
||||
|
||||
{{ toml2docs "./flownode.example.toml"}}
|
||||
@@ -1,10 +1,12 @@
|
||||
# Configurations
|
||||
|
||||
- [Standalone Mode](#standalone-mode)
|
||||
- [Distributed Mode](#distributed-mode)
|
||||
- [Configurations](#configurations)
|
||||
- [Standalone Mode](#standalone-mode)
|
||||
- [Distributed Mode](#distributed-mode)
|
||||
- [Frontend](#frontend)
|
||||
- [Metasrv](#metasrv)
|
||||
- [Datanode](#datanode)
|
||||
- [Flownode](#flownode)
|
||||
|
||||
## Standalone Mode
|
||||
|
||||
@@ -118,17 +120,26 @@
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
|
||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.memtable` | -- | -- | -- |
|
||||
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
@@ -248,6 +259,7 @@
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
|
||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
@@ -259,7 +271,7 @@
|
||||
| `failure_detector` | -- | -- | -- |
|
||||
| `failure_detector.threshold` | Float | `8.0` | -- |
|
||||
| `failure_detector.min_std_deviation` | String | `100ms` | -- |
|
||||
| `failure_detector.acceptable_heartbeat_pause` | String | `3000ms` | -- |
|
||||
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | -- |
|
||||
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | -- |
|
||||
| `datanode` | -- | -- | Datanode options. |
|
||||
| `datanode.client` | -- | -- | Datanode client options. |
|
||||
@@ -399,17 +411,26 @@
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
|
||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.memtable` | -- | -- | -- |
|
||||
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
@@ -428,3 +449,40 @@
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
|
||||
### Flownode
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
|
||||
| `node_id` | Integer | `None` | The flownode identifier and should be unique in the cluster. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
|
||||
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
@@ -394,31 +394,72 @@ parallel_scan_channel_size = 32
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
## The options for index in Mito engine.
|
||||
[region_engine.mito.index]
|
||||
|
||||
## Auxiliary directory path for the index in filesystem, used to store intermediate files for
|
||||
## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
|
||||
## The default name for this directory is `index_intermediate` for backward compatibility.
|
||||
##
|
||||
## This path contains two subdirectories:
|
||||
## - `__intm`: for storing intermediate files used during creating index.
|
||||
## - `staging`: for storing staging files used during searching index.
|
||||
aux_path = ""
|
||||
|
||||
## The max capacity of the staging directory.
|
||||
staging_size = "2GB"
|
||||
|
||||
## The options for inverted index in Mito engine.
|
||||
[region_engine.mito.inverted_index]
|
||||
|
||||
## Whether to create the index on flush.
|
||||
## - `auto`: automatically
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the index on compaction.
|
||||
## - `auto`: automatically
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the index on query
|
||||
## - `auto`: automatically
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for performing an external sort during index creation.
|
||||
## Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
|
||||
mem_threshold_on_create = "64M"
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
## File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
|
||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||
intermediate_path = ""
|
||||
|
||||
## The options for full-text index in Mito engine.
|
||||
[region_engine.mito.fulltext_index]
|
||||
|
||||
## Whether to create the index on flush.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the index on compaction.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the index on query
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for index creation.
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
[region_engine.mito.memtable]
|
||||
## Memtable type.
|
||||
## - `time_series`: time-series memtable
|
||||
@@ -437,6 +478,10 @@ data_freeze_threshold = 32768
|
||||
## Only available for `partition_tree` memtable.
|
||||
fork_dictionary_bytes = "1GiB"
|
||||
|
||||
[[region_engine]]
|
||||
## Enable the file engine.
|
||||
[region_engine.file]
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files.
|
||||
|
||||
90
config/flownode.example.toml
Normal file
90
config/flownode.example.toml
Normal file
@@ -0,0 +1,90 @@
|
||||
## The running mode of the flownode. It can be `standalone` or `distributed`.
|
||||
mode = "distributed"
|
||||
|
||||
## The flownode identifier and should be unique in the cluster.
|
||||
## +toml2docs:none-default
|
||||
node_id = 14
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
addr = "127.0.0.1:6800"
|
||||
## The hostname advertised to the metasrv,
|
||||
## and used for connections from outside the host
|
||||
hostname = "127.0.0.1"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
## The maximum receive message size for gRPC server.
|
||||
max_recv_message_size = "512MB"
|
||||
## The maximum send message size for gRPC server.
|
||||
max_send_message_size = "512MB"
|
||||
|
||||
|
||||
## The metasrv client options.
|
||||
[meta_client]
|
||||
## The addresses of the metasrv.
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
## Connect server timeout.
|
||||
connect_timeout = "1s"
|
||||
|
||||
## `TCP_NODELAY` option for accepted connections.
|
||||
tcp_nodelay = true
|
||||
|
||||
## The configuration about the cache of the metadata.
|
||||
metadata_cache_max_capacity = 100000
|
||||
|
||||
## TTL of the metadata cache.
|
||||
metadata_cache_ttl = "10m"
|
||||
|
||||
# TTI of the metadata cache.
|
||||
metadata_cache_tti = "5m"
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
## Interval for sending heartbeat messages to the metasrv.
|
||||
interval = "3s"
|
||||
|
||||
## Interval for retrying to send heartbeat messages to the metasrv.
|
||||
retry_interval = "3s"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## +toml2docs:none-default
|
||||
level = "info"
|
||||
|
||||
## Enable OTLP tracing.
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
## +toml2docs:none-default
|
||||
otlp_endpoint = ""
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -25,6 +25,12 @@ enable_telemetry = true
|
||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||
store_key_prefix = ""
|
||||
|
||||
## Whether to enable region failover.
|
||||
## This feature is only available on GreptimeDB running on cluster mode and
|
||||
## - Using Remote WAL
|
||||
## - Using shared storage (e.g., s3).
|
||||
enable_region_failover = false
|
||||
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
@@ -54,7 +60,7 @@ max_metadata_value_size = "1500KiB"
|
||||
[failure_detector]
|
||||
threshold = 8.0
|
||||
min_std_deviation = "100ms"
|
||||
acceptable_heartbeat_pause = "3000ms"
|
||||
acceptable_heartbeat_pause = "10000ms"
|
||||
first_heartbeat_estimate = "1000ms"
|
||||
|
||||
## Datanode options.
|
||||
|
||||
@@ -417,31 +417,72 @@ parallel_scan_channel_size = 32
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
## The options for index in Mito engine.
|
||||
[region_engine.mito.index]
|
||||
|
||||
## Auxiliary directory path for the index in filesystem, used to store intermediate files for
|
||||
## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
|
||||
## The default name for this directory is `index_intermediate` for backward compatibility.
|
||||
##
|
||||
## This path contains two subdirectories:
|
||||
## - `__intm`: for storing intermediate files used during creating index.
|
||||
## - `staging`: for storing staging files used during searching index.
|
||||
aux_path = ""
|
||||
|
||||
## The max capacity of the staging directory.
|
||||
staging_size = "2GB"
|
||||
|
||||
## The options for inverted index in Mito engine.
|
||||
[region_engine.mito.inverted_index]
|
||||
|
||||
## Whether to create the index on flush.
|
||||
## - `auto`: automatically
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the index on compaction.
|
||||
## - `auto`: automatically
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the index on query
|
||||
## - `auto`: automatically
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for performing an external sort during index creation.
|
||||
## Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
|
||||
mem_threshold_on_create = "64M"
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
## File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
|
||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||
intermediate_path = ""
|
||||
|
||||
## The options for full-text index in Mito engine.
|
||||
[region_engine.mito.fulltext_index]
|
||||
|
||||
## Whether to create the index on flush.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the index on compaction.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the index on query
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for index creation.
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
[region_engine.mito.memtable]
|
||||
## Memtable type.
|
||||
## - `time_series`: time-series memtable
|
||||
@@ -460,6 +501,10 @@ data_freeze_threshold = 32768
|
||||
## Only available for `partition_tree` memtable.
|
||||
fork_dictionary_bytes = "1GiB"
|
||||
|
||||
[[region_engine]]
|
||||
## Enable the file engine.
|
||||
[region_engine.file]
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files.
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
FROM centos:7
|
||||
|
||||
# Note: CentOS 7 has reached EOL since 2024-07-01 thus `mirror.centos.org` is no longer available and we need to use `vault.centos.org` instead.
|
||||
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
|
||||
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
|
||||
|
||||
RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
|
||||
@@ -2,6 +2,10 @@ FROM centos:7 as builder
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
|
||||
# Note: CentOS 7 has reached EOL since 2024-07-01 thus `mirror.centos.org` is no longer available and we need to use `vault.centos.org` instead.
|
||||
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
|
||||
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
|
||||
|
||||
# Install dependencies
|
||||
RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
|
||||
RUN yum install -y epel-release \
|
||||
@@ -25,6 +29,10 @@ ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
|
||||
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
|
||||
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
|
||||
RUN cargo install cargo-binstall --version 1.6.6 --locked
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo install cargo-binstall --locked
|
||||
RUN cargo binstall cargo-nextest --no-confirm
|
||||
|
||||
@@ -55,6 +55,9 @@ ENV PATH /root/.cargo/bin/:$PATH
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
|
||||
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
|
||||
RUN cargo install cargo-binstall --version 1.6.6 --locked
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo install cargo-binstall --locked
|
||||
RUN cargo binstall cargo-nextest --no-confirm
|
||||
|
||||
@@ -43,6 +43,9 @@ ENV PATH /root/.cargo/bin/:$PATH
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
|
||||
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
|
||||
RUN cargo install cargo-binstall --version 1.6.6 --locked
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo install cargo-binstall --locked
|
||||
RUN cargo binstall cargo-nextest --no-confirm
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
x-custom:
|
||||
initial_cluster_token: &initial_cluster_token "--initial-cluster-token=etcd-cluster"
|
||||
common_settings: &common_settings
|
||||
etcd_initial_cluster_token: &etcd_initial_cluster_token "--initial-cluster-token=etcd-cluster"
|
||||
etcd_common_settings: &etcd_common_settings
|
||||
image: quay.io/coreos/etcd:v3.5.10
|
||||
entrypoint: /usr/local/bin/etcd
|
||||
greptimedb_image: &greptimedb_image docker.io/greptimedb/greptimedb:latest
|
||||
|
||||
services:
|
||||
etcd0:
|
||||
<<: *common_settings
|
||||
<<: *etcd_common_settings
|
||||
container_name: etcd0
|
||||
ports:
|
||||
- 2379:2379
|
||||
@@ -22,7 +23,7 @@ services:
|
||||
- --election-timeout=1250
|
||||
- --initial-cluster=etcd0=http://etcd0:2380
|
||||
- --initial-cluster-state=new
|
||||
- *initial_cluster_token
|
||||
- *etcd_initial_cluster_token
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
|
||||
healthcheck:
|
||||
@@ -34,7 +35,7 @@ services:
|
||||
- greptimedb
|
||||
|
||||
metasrv:
|
||||
image: docker.io/greptime/greptimedb:latest
|
||||
image: *greptimedb_image
|
||||
container_name: metasrv
|
||||
ports:
|
||||
- 3002:3002
|
||||
@@ -56,19 +57,26 @@ services:
|
||||
- greptimedb
|
||||
|
||||
datanode0:
|
||||
image: docker.io/greptime/greptimedb:latest
|
||||
image: *greptimedb_image
|
||||
container_name: datanode0
|
||||
ports:
|
||||
- 3001:3001
|
||||
- 5000:5000
|
||||
command:
|
||||
- datanode
|
||||
- start
|
||||
- --node-id=0
|
||||
- --rpc-addr=0.0.0.0:3001
|
||||
- --rpc-hostname=datanode0:3001
|
||||
- --metasrv-addr=metasrv:3002
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --http-addr=0.0.0.0:5000
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://datanode0:5000/health" ]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
depends_on:
|
||||
metasrv:
|
||||
condition: service_healthy
|
||||
@@ -76,7 +84,7 @@ services:
|
||||
- greptimedb
|
||||
|
||||
frontend0:
|
||||
image: docker.io/greptime/greptimedb:latest
|
||||
image: *greptimedb_image
|
||||
container_name: frontend0
|
||||
ports:
|
||||
- 4000:4000
|
||||
@@ -91,8 +99,31 @@ services:
|
||||
- --rpc-addr=0.0.0.0:4001
|
||||
- --mysql-addr=0.0.0.0:4002
|
||||
- --postgres-addr=0.0.0.0:4003
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://frontend0:4000/health" ]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
depends_on:
|
||||
metasrv:
|
||||
datanode0:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
flownode0:
|
||||
image: *greptimedb_image
|
||||
container_name: flownode0
|
||||
ports:
|
||||
- 4004:4004
|
||||
command:
|
||||
- flownode
|
||||
- start
|
||||
- --node-id=0
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --rpc-addr=0.0.0.0:4004
|
||||
- --rpc-hostname=flownode0:4004
|
||||
depends_on:
|
||||
frontend0:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
@@ -17,10 +17,11 @@ datatypes.workspace = true
|
||||
greptime-proto.workspace = true
|
||||
paste = "1.0"
|
||||
prost.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = "0.9"
|
||||
tonic-build = "0.11"
|
||||
|
||||
[dev-dependencies]
|
||||
paste = "1.0"
|
||||
|
||||
@@ -58,13 +58,23 @@ pub enum Error {
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serialize JSON"))]
|
||||
SerializeJson {
|
||||
#[snafu(source)]
|
||||
error: serde_json::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::UnknownColumnDataType { .. } => StatusCode::InvalidArguments,
|
||||
Error::IntoColumnDataType { .. } => StatusCode::Unexpected,
|
||||
Error::IntoColumnDataType { .. } | Error::SerializeJson { .. } => {
|
||||
StatusCode::Unexpected
|
||||
}
|
||||
Error::ConvertColumnDefaultConstraint { source, .. }
|
||||
| Error::InvalidColumnDefaultConstraint { source, .. } => source.status_code(),
|
||||
}
|
||||
|
||||
@@ -1843,6 +1843,7 @@ mod tests {
|
||||
null_mask: vec![2],
|
||||
datatype: ColumnDataType::Boolean as i32,
|
||||
datatype_extension: None,
|
||||
options: None,
|
||||
};
|
||||
assert!(is_column_type_value_eq(
|
||||
column1.datatype,
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(let_chains)]
|
||||
|
||||
pub mod error;
|
||||
pub mod helper;
|
||||
|
||||
|
||||
@@ -14,13 +14,19 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, COMMENT_KEY};
|
||||
use datatypes::schema::{
|
||||
ColumnDefaultConstraint, ColumnSchema, FulltextOptions, COMMENT_KEY, FULLTEXT_KEY,
|
||||
};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::helper::ColumnDataTypeWrapper;
|
||||
use crate::v1::ColumnDef;
|
||||
use crate::v1::{ColumnDef, ColumnOptions, SemanticType};
|
||||
|
||||
/// Key used to store fulltext options in gRPC column options.
|
||||
const FULLTEXT_GRPC_KEY: &str = "fulltext";
|
||||
|
||||
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
|
||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
let data_type = ColumnDataTypeWrapper::try_new(
|
||||
column_def.data_type,
|
||||
@@ -43,13 +49,147 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
if !column_def.comment.is_empty() {
|
||||
metadata.insert(COMMENT_KEY.to_string(), column_def.comment.clone());
|
||||
}
|
||||
if let Some(options) = column_def.options.as_ref()
|
||||
&& let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY)
|
||||
{
|
||||
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.to_string());
|
||||
}
|
||||
|
||||
Ok(
|
||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||
.with_default_constraint(constraint)
|
||||
.context(error::InvalidColumnDefaultConstraintSnafu {
|
||||
column: &column_def.name,
|
||||
})?
|
||||
.with_metadata(metadata),
|
||||
)
|
||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||
.with_metadata(metadata)
|
||||
.with_time_index(column_def.semantic_type() == SemanticType::Timestamp)
|
||||
.with_default_constraint(constraint)
|
||||
.context(error::InvalidColumnDefaultConstraintSnafu {
|
||||
column: &column_def.name,
|
||||
})
|
||||
}
|
||||
|
||||
/// Constructs a `ColumnOptions` from the given `ColumnSchema`.
|
||||
pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<ColumnOptions> {
|
||||
let mut options = ColumnOptions::default();
|
||||
if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) {
|
||||
options
|
||||
.options
|
||||
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.to_string());
|
||||
}
|
||||
|
||||
(!options.options.is_empty()).then_some(options)
|
||||
}
|
||||
|
||||
/// Checks if the `ColumnOptions` contains fulltext options.
|
||||
pub fn contains_fulltext(options: &Option<ColumnOptions>) -> bool {
|
||||
options
|
||||
.as_ref()
|
||||
.map_or(false, |o| o.options.contains_key(FULLTEXT_GRPC_KEY))
|
||||
}
|
||||
|
||||
/// Tries to construct a `ColumnOptions` from the given `FulltextOptions`.
|
||||
pub fn options_from_fulltext(fulltext: &FulltextOptions) -> Result<Option<ColumnOptions>> {
|
||||
let mut options = ColumnOptions::default();
|
||||
|
||||
let v = serde_json::to_string(fulltext).context(error::SerializeJsonSnafu)?;
|
||||
options.options.insert(FULLTEXT_GRPC_KEY.to_string(), v);
|
||||
|
||||
Ok((!options.options.is_empty()).then_some(options))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::FulltextAnalyzer;
|
||||
|
||||
use super::*;
|
||||
use crate::v1::ColumnDataType;
|
||||
|
||||
#[test]
|
||||
fn test_try_as_column_schema() {
|
||||
let column_def = ColumnDef {
|
||||
name: "test".to_string(),
|
||||
data_type: ColumnDataType::String as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: ColumnDefaultConstraint::Value("test_default".into())
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: "test_comment".to_string(),
|
||||
datatype_extension: None,
|
||||
options: Some(ColumnOptions {
|
||||
options: HashMap::from([(
|
||||
FULLTEXT_GRPC_KEY.to_string(),
|
||||
"{\"enable\":true}".to_string(),
|
||||
)]),
|
||||
}),
|
||||
};
|
||||
|
||||
let schema = try_as_column_schema(&column_def).unwrap();
|
||||
assert_eq!(schema.name, "test");
|
||||
assert_eq!(schema.data_type, ConcreteDataType::string_datatype());
|
||||
assert!(!schema.is_time_index());
|
||||
assert!(schema.is_nullable());
|
||||
assert_eq!(
|
||||
schema.default_constraint().unwrap(),
|
||||
&ColumnDefaultConstraint::Value("test_default".into())
|
||||
);
|
||||
assert_eq!(schema.metadata().get(COMMENT_KEY).unwrap(), "test_comment");
|
||||
assert_eq!(
|
||||
schema.fulltext_options().unwrap().unwrap(),
|
||||
FulltextOptions {
|
||||
enable: true,
|
||||
..Default::default()
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_options_from_column_schema() {
|
||||
let schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true);
|
||||
let options = options_from_column_schema(&schema);
|
||||
assert!(options.is_none());
|
||||
|
||||
let schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
|
||||
.with_fulltext_options(FulltextOptions {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
})
|
||||
.unwrap();
|
||||
let options = options_from_column_schema(&schema).unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_options_with_fulltext() {
|
||||
let fulltext = FulltextOptions {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
};
|
||||
let options = options_from_fulltext(&fulltext).unwrap().unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_contains_fulltext() {
|
||||
let options = ColumnOptions {
|
||||
options: HashMap::from([(
|
||||
FULLTEXT_GRPC_KEY.to_string(),
|
||||
"{\"enable\":true}".to_string(),
|
||||
)]),
|
||||
};
|
||||
assert!(contains_fulltext(&Some(options)));
|
||||
|
||||
let options = ColumnOptions {
|
||||
options: HashMap::new(),
|
||||
};
|
||||
assert!(!contains_fulltext(&Some(options)));
|
||||
|
||||
assert!(!contains_fulltext(&None));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
@@ -38,13 +38,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Auth failed"))]
|
||||
AuthBackend {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("User not found, username: {}", username))]
|
||||
UserNotFound { username: String },
|
||||
|
||||
@@ -87,8 +80,7 @@ impl ErrorExt for Error {
|
||||
Error::IllegalParam { .. } => StatusCode::InvalidArguments,
|
||||
Error::FileWatch { .. } => StatusCode::InvalidArguments,
|
||||
Error::InternalState { .. } => StatusCode::Unexpected,
|
||||
Error::Io { .. } => StatusCode::Internal,
|
||||
Error::AuthBackend { .. } => StatusCode::Internal,
|
||||
Error::Io { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
Error::UserNotFound { .. } => StatusCode::UserNotFound,
|
||||
Error::UnsupportedPasswordType { .. } => StatusCode::UnsupportedPasswordType,
|
||||
|
||||
@@ -42,7 +42,7 @@ pub enum PermissionResp {
|
||||
pub trait PermissionChecker: Send + Sync {
|
||||
fn check_permission(
|
||||
&self,
|
||||
user_info: Option<UserInfoRef>,
|
||||
user_info: UserInfoRef,
|
||||
req: PermissionReq,
|
||||
) -> Result<PermissionResp>;
|
||||
}
|
||||
@@ -50,7 +50,7 @@ pub trait PermissionChecker: Send + Sync {
|
||||
impl PermissionChecker for Option<&PermissionCheckerRef> {
|
||||
fn check_permission(
|
||||
&self,
|
||||
user_info: Option<UserInfoRef>,
|
||||
user_info: UserInfoRef,
|
||||
req: PermissionReq,
|
||||
) -> Result<PermissionResp> {
|
||||
match self {
|
||||
|
||||
@@ -27,7 +27,7 @@ struct DummyPermissionChecker;
|
||||
impl PermissionChecker for DummyPermissionChecker {
|
||||
fn check_permission(
|
||||
&self,
|
||||
_user_info: Option<UserInfoRef>,
|
||||
_user_info: UserInfoRef,
|
||||
req: PermissionReq,
|
||||
) -> auth::error::Result<PermissionResp> {
|
||||
match req {
|
||||
@@ -45,13 +45,13 @@ fn test_permission_checker() {
|
||||
let checker: PermissionCheckerRef = Arc::new(DummyPermissionChecker);
|
||||
|
||||
let grpc_result = checker.check_permission(
|
||||
None,
|
||||
auth::userinfo_by_name(None),
|
||||
PermissionReq::GrpcRequest(&Request::Query(Default::default())),
|
||||
);
|
||||
assert_matches!(grpc_result, Ok(PermissionResp::Allow));
|
||||
|
||||
let sql_result = checker.check_permission(
|
||||
None,
|
||||
auth::userinfo_by_name(None),
|
||||
PermissionReq::SqlStatement(&Statement::ShowDatabases(ShowDatabases::new(
|
||||
ShowKind::All,
|
||||
false,
|
||||
@@ -59,6 +59,7 @@ fn test_permission_checker() {
|
||||
);
|
||||
assert_matches!(sql_result, Ok(PermissionResp::Reject));
|
||||
|
||||
let err_result = checker.check_permission(None, PermissionReq::Opentsdb);
|
||||
let err_result =
|
||||
checker.check_permission(auth::userinfo_by_name(None), PermissionReq::Opentsdb);
|
||||
assert_matches!(err_result, Err(InternalState { msg }) if msg == "testing");
|
||||
}
|
||||
|
||||
2
src/cache/src/error.rs
vendored
2
src/cache/src/error.rs
vendored
@@ -34,7 +34,7 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::CacheRequired { .. } => StatusCode::Internal,
|
||||
Error::CacheRequired { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ use std::fmt::Debug;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_query::error::datafusion_status_code;
|
||||
use datafusion::error::DataFusionError;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
@@ -114,6 +115,18 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"View plan columns changed from: {} to: {}",
|
||||
origin_names,
|
||||
actual_names
|
||||
))]
|
||||
ViewPlanColumnsChanged {
|
||||
origin_names: String,
|
||||
actual_names: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to find table partitions"))]
|
||||
FindPartitions { source: partition::error::Error },
|
||||
|
||||
@@ -173,6 +186,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to project view columns"))]
|
||||
ProjectViewColumns {
|
||||
#[snafu(source)]
|
||||
error: DataFusionError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table metadata manager error"))]
|
||||
TableMetadataManager {
|
||||
source: common_meta::error::Error,
|
||||
@@ -208,6 +229,21 @@ pub enum Error {
|
||||
},
|
||||
}
|
||||
|
||||
impl Error {
|
||||
pub fn should_fail(&self) -> bool {
|
||||
use Error::*;
|
||||
|
||||
matches!(
|
||||
self,
|
||||
GetViewCache { .. }
|
||||
| ViewInfoNotFound { .. }
|
||||
| DecodePlan { .. }
|
||||
| ViewPlanColumnsChanged { .. }
|
||||
| ProjectViewColumns { .. }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -220,6 +256,8 @@ impl ErrorExt for Error {
|
||||
| Error::CacheNotFound { .. }
|
||||
| Error::CastManager { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::ViewPlanColumnsChanged { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::ViewInfoNotFound { .. } => StatusCode::TableNotFound,
|
||||
|
||||
Error::SystemCatalog { .. } => StatusCode::StorageUnavailable,
|
||||
@@ -245,7 +283,8 @@ impl ErrorExt for Error {
|
||||
}
|
||||
|
||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
||||
Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None),
|
||||
Error::ProjectViewColumns { .. } => StatusCode::EngineExecuteQuery,
|
||||
Error::TableMetadataManager { source, .. } => source.status_code(),
|
||||
Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => {
|
||||
source.status_code()
|
||||
@@ -260,7 +299,7 @@ impl ErrorExt for Error {
|
||||
|
||||
impl From<Error> for DataFusionError {
|
||||
fn from(e: Error) -> Self {
|
||||
DataFusionError::Internal(e.to_string())
|
||||
DataFusionError::External(Box::new(e))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -299,7 +338,7 @@ mod tests {
|
||||
}
|
||||
.into();
|
||||
match e {
|
||||
DataFusionError::Internal(_) => {}
|
||||
DataFusionError::External(_) => {}
|
||||
_ => {
|
||||
panic!("catalog error should be converted to DataFusionError::Internal")
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ use std::sync::{Arc, Weak};
|
||||
use async_stream::try_stream;
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
|
||||
PG_CATALOG_NAME,
|
||||
};
|
||||
use common_config::Mode;
|
||||
use common_error::ext::BoxedError;
|
||||
@@ -46,6 +47,8 @@ use crate::error::{
|
||||
};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::kvbackend::TableCacheRef;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// Access all existing catalog, schema and tables.
|
||||
@@ -86,10 +89,15 @@ impl KvBackendCatalogManager {
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
pg_catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
information_schema_provider: Arc::new(InformationSchemaProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
)),
|
||||
pg_catalog_provider: Arc::new(PGCatalogProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
)),
|
||||
},
|
||||
cache_registry,
|
||||
})
|
||||
@@ -295,30 +303,40 @@ fn build_table(table_info_value: TableInfoValue) -> Result<TableRef> {
|
||||
/// Existing system tables:
|
||||
/// - public.numbers
|
||||
/// - information_schema.{tables}
|
||||
/// - pg_catalog.{tables}
|
||||
#[derive(Clone)]
|
||||
struct SystemCatalog {
|
||||
catalog_manager: Weak<KvBackendCatalogManager>,
|
||||
catalog_cache: Cache<String, Arc<InformationSchemaProvider>>,
|
||||
pg_catalog_cache: Cache<String, Arc<PGCatalogProvider>>,
|
||||
|
||||
// system_schema_provier for default catalog
|
||||
information_schema_provider: Arc<InformationSchemaProvider>,
|
||||
pg_catalog_provider: Arc<PGCatalogProvider>,
|
||||
}
|
||||
|
||||
impl SystemCatalog {
|
||||
// TODO(j0hn50n133): remove the duplicated hard-coded table names logic
|
||||
fn schema_names(&self) -> Vec<String> {
|
||||
vec![INFORMATION_SCHEMA_NAME.to_string()]
|
||||
vec![
|
||||
INFORMATION_SCHEMA_NAME.to_string(),
|
||||
PG_CATALOG_NAME.to_string(),
|
||||
]
|
||||
}
|
||||
|
||||
fn table_names(&self, schema: &str) -> Vec<String> {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
self.information_schema_provider.table_names()
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
vec![NUMBERS_TABLE_NAME.to_string()]
|
||||
} else {
|
||||
vec![]
|
||||
match schema {
|
||||
INFORMATION_SCHEMA_NAME => self.information_schema_provider.table_names(),
|
||||
PG_CATALOG_NAME => self.pg_catalog_provider.table_names(),
|
||||
DEFAULT_SCHEMA_NAME => {
|
||||
vec![NUMBERS_TABLE_NAME.to_string()]
|
||||
}
|
||||
_ => vec![],
|
||||
}
|
||||
}
|
||||
|
||||
fn schema_exists(&self, schema: &str) -> bool {
|
||||
schema == INFORMATION_SCHEMA_NAME
|
||||
schema == INFORMATION_SCHEMA_NAME || schema == PG_CATALOG_NAME
|
||||
}
|
||||
|
||||
fn table_exists(&self, schema: &str, table: &str) -> bool {
|
||||
@@ -326,6 +344,8 @@ impl SystemCatalog {
|
||||
self.information_schema_provider.table(table).is_some()
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
table == NUMBERS_TABLE_NAME
|
||||
} else if schema == PG_CATALOG_NAME {
|
||||
self.pg_catalog_provider.table(table).is_some()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
@@ -341,6 +361,19 @@ impl SystemCatalog {
|
||||
))
|
||||
});
|
||||
information_schema_provider.table(table_name)
|
||||
} else if schema == PG_CATALOG_NAME {
|
||||
if catalog == DEFAULT_CATALOG_NAME {
|
||||
self.pg_catalog_provider.table(table_name)
|
||||
} else {
|
||||
let pg_catalog_provider =
|
||||
self.pg_catalog_cache.get_with_by_ref(catalog, move || {
|
||||
Arc::new(PGCatalogProvider::new(
|
||||
catalog.to_string(),
|
||||
self.catalog_manager.clone(),
|
||||
))
|
||||
});
|
||||
pg_catalog_provider.table(table_name)
|
||||
}
|
||||
} else if schema == DEFAULT_SCHEMA_NAME && table_name == NUMBERS_TABLE_NAME {
|
||||
Some(NumbersTable::table(NUMBERS_TABLE_ID))
|
||||
} else {
|
||||
|
||||
@@ -28,12 +28,16 @@ use table::TableRef;
|
||||
use crate::error::Result;
|
||||
|
||||
pub mod error;
|
||||
pub mod information_schema;
|
||||
pub mod kvbackend;
|
||||
pub mod memory;
|
||||
mod metrics;
|
||||
pub mod table_source;
|
||||
pub mod system_schema;
|
||||
pub mod information_schema {
|
||||
// TODO(j0hn50n133): re-export to make it compatible with the legacy code, migrate to the new path later
|
||||
pub use crate::system_schema::information_schema::*;
|
||||
}
|
||||
|
||||
pub mod table_source;
|
||||
#[async_trait::async_trait]
|
||||
pub trait CatalogManager: Send + Sync {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
@@ -20,7 +20,8 @@ use std::sync::{Arc, RwLock, Weak};
|
||||
use async_stream::{stream, try_stream};
|
||||
use common_catalog::build_db_string;
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_PRIVATE_SCHEMA_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME,
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_PRIVATE_SCHEMA_NAME, DEFAULT_SCHEMA_NAME,
|
||||
INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME,
|
||||
};
|
||||
use futures_util::stream::BoxStream;
|
||||
use snafu::OptionExt;
|
||||
@@ -28,6 +29,7 @@ use table::TableRef;
|
||||
|
||||
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
use crate::{CatalogManager, DeregisterTableRequest, RegisterSchemaRequest, RegisterTableRequest};
|
||||
|
||||
type SchemaEntries = HashMap<String, HashMap<String, TableRef>>;
|
||||
@@ -173,6 +175,12 @@ impl MemoryCatalogManager {
|
||||
schema: DEFAULT_PRIVATE_SCHEMA_NAME.to_string(),
|
||||
})
|
||||
.unwrap();
|
||||
manager
|
||||
.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: PG_CATALOG_NAME.to_string(),
|
||||
})
|
||||
.unwrap();
|
||||
manager
|
||||
.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
@@ -196,7 +204,7 @@ impl MemoryCatalogManager {
|
||||
}
|
||||
|
||||
fn catalog_exist_sync(&self, catalog: &str) -> Result<bool> {
|
||||
Ok(self.catalogs.read().unwrap().get(catalog).is_some())
|
||||
Ok(self.catalogs.read().unwrap().contains_key(catalog))
|
||||
}
|
||||
|
||||
/// Registers a catalog if it does not exist and returns false if the schema exists.
|
||||
|
||||
164
src/catalog/src/system_schema.rs
Normal file
164
src/catalog/src/system_schema.rs
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod information_schema;
|
||||
mod memory_table;
|
||||
pub mod pg_catalog;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||
use datatypes::schema::SchemaRef;
|
||||
use futures_util::StreamExt;
|
||||
use snafu::ResultExt;
|
||||
use store_api::data_source::DataSource;
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
|
||||
use table::metadata::{
|
||||
FilterPushDownType, TableId, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType,
|
||||
};
|
||||
use table::{Table, TableRef};
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
pub trait SystemSchemaProvider {
|
||||
/// Returns a map of [TableRef] in information schema.
|
||||
fn tables(&self) -> &HashMap<String, TableRef>;
|
||||
|
||||
/// Returns the [TableRef] by table name.
|
||||
fn table(&self, name: &str) -> Option<TableRef> {
|
||||
self.tables().get(name).cloned()
|
||||
}
|
||||
|
||||
/// Returns table names in the order of table id.
|
||||
fn table_names(&self) -> Vec<String> {
|
||||
let mut tables = self.tables().values().clone().collect::<Vec<_>>();
|
||||
|
||||
tables.sort_by(|t1, t2| {
|
||||
t1.table_info()
|
||||
.table_id()
|
||||
.partial_cmp(&t2.table_info().table_id())
|
||||
.unwrap()
|
||||
});
|
||||
tables
|
||||
.into_iter()
|
||||
.map(|t| t.table_info().name.clone())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
trait SystemSchemaProviderInner {
|
||||
fn catalog_name(&self) -> &str;
|
||||
fn schema_name() -> &'static str;
|
||||
fn build_table(&self, name: &str) -> Option<TableRef> {
|
||||
self.system_table(name).map(|table| {
|
||||
let table_info = Self::table_info(self.catalog_name().to_string(), &table);
|
||||
let filter_pushdown = FilterPushDownType::Inexact;
|
||||
let data_source = Arc::new(SystemTableDataSource::new(table));
|
||||
let table = Table::new(table_info, filter_pushdown, data_source);
|
||||
Arc::new(table)
|
||||
})
|
||||
}
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef>;
|
||||
|
||||
fn table_info(catalog_name: String, table: &SystemTableRef) -> TableInfoRef {
|
||||
let table_meta = TableMetaBuilder::default()
|
||||
.schema(table.schema())
|
||||
.primary_key_indices(vec![])
|
||||
.next_column_id(0)
|
||||
.build()
|
||||
.unwrap();
|
||||
let table_info = TableInfoBuilder::default()
|
||||
.table_id(table.table_id())
|
||||
.name(table.table_name().to_string())
|
||||
.catalog_name(catalog_name)
|
||||
.schema_name(Self::schema_name().to_string())
|
||||
.meta(table_meta)
|
||||
.table_type(table.table_type())
|
||||
.build()
|
||||
.unwrap();
|
||||
Arc::new(table_info)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) trait SystemTable {
|
||||
fn table_id(&self) -> TableId;
|
||||
|
||||
fn table_name(&self) -> &'static str;
|
||||
|
||||
fn schema(&self) -> SchemaRef;
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream>;
|
||||
|
||||
fn table_type(&self) -> TableType {
|
||||
TableType::Temporary
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) type SystemTableRef = Arc<dyn SystemTable + Send + Sync>;
|
||||
|
||||
struct SystemTableDataSource {
|
||||
table: SystemTableRef,
|
||||
}
|
||||
|
||||
impl SystemTableDataSource {
|
||||
fn new(table: SystemTableRef) -> Self {
|
||||
Self { table }
|
||||
}
|
||||
|
||||
fn try_project(&self, projection: &[usize]) -> std::result::Result<SchemaRef, BoxedError> {
|
||||
let schema = self
|
||||
.table
|
||||
.schema()
|
||||
.try_project(projection)
|
||||
.context(SchemaConversionSnafu)
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(Arc::new(schema))
|
||||
}
|
||||
}
|
||||
|
||||
impl DataSource for SystemTableDataSource {
|
||||
fn get_stream(
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> std::result::Result<SendableRecordBatchStream, BoxedError> {
|
||||
let projection = request.projection.clone();
|
||||
let projected_schema = match &projection {
|
||||
Some(projection) => self.try_project(projection)?,
|
||||
None => self.table.schema(),
|
||||
};
|
||||
|
||||
let stream = self
|
||||
.table
|
||||
.to_stream(request)
|
||||
.map_err(BoxedError::new)
|
||||
.context(TablesRecordBatchSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.map(move |batch| match &projection {
|
||||
Some(p) => batch.and_then(|b| b.try_project(p)),
|
||||
None => batch,
|
||||
});
|
||||
|
||||
let stream = RecordBatchStreamWrapper {
|
||||
schema: projected_schema,
|
||||
stream: Box::pin(stream),
|
||||
output_ordering: None,
|
||||
metrics: Default::default(),
|
||||
};
|
||||
|
||||
Ok(Box::pin(stream))
|
||||
}
|
||||
}
|
||||
@@ -14,8 +14,8 @@
|
||||
|
||||
mod cluster_info;
|
||||
pub mod columns;
|
||||
mod information_memory_table;
|
||||
pub mod key_column_usage;
|
||||
mod memory_table;
|
||||
mod partitions;
|
||||
mod predicate;
|
||||
mod region_peers;
|
||||
@@ -25,39 +25,37 @@ mod table_constraints;
|
||||
mod table_names;
|
||||
pub mod tables;
|
||||
pub(crate) mod utils;
|
||||
mod views;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use futures_util::StreamExt;
|
||||
use lazy_static::lazy_static;
|
||||
use paste::paste;
|
||||
pub(crate) use predicate::Predicates;
|
||||
use snafu::ResultExt;
|
||||
use store_api::data_source::DataSource;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
|
||||
use table::metadata::{
|
||||
FilterPushDownType, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType,
|
||||
};
|
||||
use table::{Table, TableRef};
|
||||
use table::metadata::TableType;
|
||||
use table::TableRef;
|
||||
pub use table_names::*;
|
||||
use views::InformationSchemaViews;
|
||||
|
||||
use self::columns::InformationSchemaColumns;
|
||||
use super::{SystemSchemaProviderInner, SystemTable, SystemTableRef};
|
||||
use crate::error::Result;
|
||||
use crate::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||
use crate::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
||||
use crate::information_schema::memory_table::{get_schema_columns, MemoryTable};
|
||||
use crate::information_schema::partitions::InformationSchemaPartitions;
|
||||
use crate::information_schema::region_peers::InformationSchemaRegionPeers;
|
||||
use crate::information_schema::runtime_metrics::InformationSchemaMetrics;
|
||||
use crate::information_schema::schemata::InformationSchemaSchemata;
|
||||
use crate::information_schema::table_constraints::InformationSchemaTableConstraints;
|
||||
use crate::information_schema::tables::InformationSchemaTables;
|
||||
use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||
use crate::system_schema::information_schema::information_memory_table::get_schema_columns;
|
||||
use crate::system_schema::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
||||
use crate::system_schema::information_schema::partitions::InformationSchemaPartitions;
|
||||
use crate::system_schema::information_schema::region_peers::InformationSchemaRegionPeers;
|
||||
use crate::system_schema::information_schema::runtime_metrics::InformationSchemaMetrics;
|
||||
use crate::system_schema::information_schema::schemata::InformationSchemaSchemata;
|
||||
use crate::system_schema::information_schema::table_constraints::InformationSchemaTableConstraints;
|
||||
use crate::system_schema::information_schema::tables::InformationSchemaTables;
|
||||
use crate::system_schema::memory_table::MemoryTable;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
use crate::CatalogManager;
|
||||
|
||||
lazy_static! {
|
||||
@@ -109,104 +107,22 @@ pub struct InformationSchemaProvider {
|
||||
tables: HashMap<String, TableRef>,
|
||||
}
|
||||
|
||||
impl InformationSchemaProvider {
|
||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
let mut provider = Self {
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
tables: HashMap::new(),
|
||||
};
|
||||
|
||||
provider.build_tables();
|
||||
|
||||
provider
|
||||
}
|
||||
|
||||
/// Returns table names in the order of table id.
|
||||
pub fn table_names(&self) -> Vec<String> {
|
||||
let mut tables = self.tables.values().clone().collect::<Vec<_>>();
|
||||
|
||||
tables.sort_by(|t1, t2| {
|
||||
t1.table_info()
|
||||
.table_id()
|
||||
.partial_cmp(&t2.table_info().table_id())
|
||||
.unwrap()
|
||||
});
|
||||
tables
|
||||
.into_iter()
|
||||
.map(|t| t.table_info().name.clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns a map of [TableRef] in information schema.
|
||||
pub fn tables(&self) -> &HashMap<String, TableRef> {
|
||||
impl SystemSchemaProvider for InformationSchemaProvider {
|
||||
fn tables(&self) -> &HashMap<String, TableRef> {
|
||||
assert!(!self.tables.is_empty());
|
||||
|
||||
&self.tables
|
||||
}
|
||||
|
||||
/// Returns the [TableRef] by table name.
|
||||
pub fn table(&self, name: &str) -> Option<TableRef> {
|
||||
self.tables.get(name).cloned()
|
||||
}
|
||||
impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
fn catalog_name(&self) -> &str {
|
||||
&self.catalog_name
|
||||
}
|
||||
fn schema_name() -> &'static str {
|
||||
INFORMATION_SCHEMA_NAME
|
||||
}
|
||||
|
||||
fn build_tables(&mut self) {
|
||||
let mut tables = HashMap::new();
|
||||
|
||||
// SECURITY NOTE:
|
||||
// Carefully consider the tables that may expose sensitive cluster configurations,
|
||||
// authentication details, and other critical information.
|
||||
// Only put these tables under `greptime` catalog to prevent info leak.
|
||||
if self.catalog_name == DEFAULT_CATALOG_NAME {
|
||||
tables.insert(
|
||||
RUNTIME_METRICS.to_string(),
|
||||
self.build_table(RUNTIME_METRICS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
BUILD_INFO.to_string(),
|
||||
self.build_table(BUILD_INFO).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
REGION_PEERS.to_string(),
|
||||
self.build_table(REGION_PEERS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
CLUSTER_INFO.to_string(),
|
||||
self.build_table(CLUSTER_INFO).unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
|
||||
tables.insert(SCHEMATA.to_string(), self.build_table(SCHEMATA).unwrap());
|
||||
tables.insert(COLUMNS.to_string(), self.build_table(COLUMNS).unwrap());
|
||||
tables.insert(
|
||||
KEY_COLUMN_USAGE.to_string(),
|
||||
self.build_table(KEY_COLUMN_USAGE).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
TABLE_CONSTRAINTS.to_string(),
|
||||
self.build_table(TABLE_CONSTRAINTS).unwrap(),
|
||||
);
|
||||
|
||||
// Add memory tables
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
tables.insert((*name).to_string(), self.build_table(name).expect(name));
|
||||
}
|
||||
|
||||
self.tables = tables;
|
||||
}
|
||||
|
||||
fn build_table(&self, name: &str) -> Option<TableRef> {
|
||||
self.information_table(name).map(|table| {
|
||||
let table_info = Self::table_info(self.catalog_name.clone(), &table);
|
||||
let filter_pushdown = FilterPushDownType::Inexact;
|
||||
let data_source = Arc::new(InformationTableDataSource::new(table));
|
||||
let table = Table::new(table_info, filter_pushdown, data_source);
|
||||
Arc::new(table)
|
||||
})
|
||||
}
|
||||
|
||||
fn information_table(&self, name: &str) -> Option<InformationTableRef> {
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
||||
match name.to_ascii_lowercase().as_str() {
|
||||
TABLES => Some(Arc::new(InformationSchemaTables::new(
|
||||
self.catalog_name.clone(),
|
||||
@@ -262,27 +178,73 @@ impl InformationSchemaProvider {
|
||||
CLUSTER_INFO => Some(Arc::new(InformationSchemaClusterInfo::new(
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
VIEWS => Some(Arc::new(InformationSchemaViews::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn table_info(catalog_name: String, table: &InformationTableRef) -> TableInfoRef {
|
||||
let table_meta = TableMetaBuilder::default()
|
||||
.schema(table.schema())
|
||||
.primary_key_indices(vec![])
|
||||
.next_column_id(0)
|
||||
.build()
|
||||
.unwrap();
|
||||
let table_info = TableInfoBuilder::default()
|
||||
.table_id(table.table_id())
|
||||
.name(table.table_name().to_string())
|
||||
.catalog_name(catalog_name)
|
||||
.schema_name(INFORMATION_SCHEMA_NAME.to_string())
|
||||
.meta(table_meta)
|
||||
.table_type(table.table_type())
|
||||
.build()
|
||||
.unwrap();
|
||||
Arc::new(table_info)
|
||||
impl InformationSchemaProvider {
|
||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
let mut provider = Self {
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
tables: HashMap::new(),
|
||||
};
|
||||
|
||||
provider.build_tables();
|
||||
|
||||
provider
|
||||
}
|
||||
|
||||
fn build_tables(&mut self) {
|
||||
let mut tables = HashMap::new();
|
||||
|
||||
// SECURITY NOTE:
|
||||
// Carefully consider the tables that may expose sensitive cluster configurations,
|
||||
// authentication details, and other critical information.
|
||||
// Only put these tables under `greptime` catalog to prevent info leak.
|
||||
if self.catalog_name == DEFAULT_CATALOG_NAME {
|
||||
tables.insert(
|
||||
RUNTIME_METRICS.to_string(),
|
||||
self.build_table(RUNTIME_METRICS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
BUILD_INFO.to_string(),
|
||||
self.build_table(BUILD_INFO).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
REGION_PEERS.to_string(),
|
||||
self.build_table(REGION_PEERS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
CLUSTER_INFO.to_string(),
|
||||
self.build_table(CLUSTER_INFO).unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
|
||||
tables.insert(VIEWS.to_string(), self.build_table(VIEWS).unwrap());
|
||||
tables.insert(SCHEMATA.to_string(), self.build_table(SCHEMATA).unwrap());
|
||||
tables.insert(COLUMNS.to_string(), self.build_table(COLUMNS).unwrap());
|
||||
tables.insert(
|
||||
KEY_COLUMN_USAGE.to_string(),
|
||||
self.build_table(KEY_COLUMN_USAGE).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
TABLE_CONSTRAINTS.to_string(),
|
||||
self.build_table(TABLE_CONSTRAINTS).unwrap(),
|
||||
);
|
||||
|
||||
// Add memory tables
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
tables.insert((*name).to_string(), self.build_table(name).expect(name));
|
||||
}
|
||||
|
||||
self.tables = tables;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -300,57 +262,28 @@ trait InformationTable {
|
||||
}
|
||||
}
|
||||
|
||||
type InformationTableRef = Arc<dyn InformationTable + Send + Sync>;
|
||||
|
||||
struct InformationTableDataSource {
|
||||
table: InformationTableRef,
|
||||
}
|
||||
|
||||
impl InformationTableDataSource {
|
||||
fn new(table: InformationTableRef) -> Self {
|
||||
Self { table }
|
||||
// Provide compatibility for legacy `information_schema` code.
|
||||
impl<T> SystemTable for T
|
||||
where
|
||||
T: InformationTable,
|
||||
{
|
||||
fn table_id(&self) -> TableId {
|
||||
InformationTable::table_id(self)
|
||||
}
|
||||
|
||||
fn try_project(&self, projection: &[usize]) -> std::result::Result<SchemaRef, BoxedError> {
|
||||
let schema = self
|
||||
.table
|
||||
.schema()
|
||||
.try_project(projection)
|
||||
.context(SchemaConversionSnafu)
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(Arc::new(schema))
|
||||
}
|
||||
}
|
||||
|
||||
impl DataSource for InformationTableDataSource {
|
||||
fn get_stream(
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> std::result::Result<SendableRecordBatchStream, BoxedError> {
|
||||
let projection = request.projection.clone();
|
||||
let projected_schema = match &projection {
|
||||
Some(projection) => self.try_project(projection)?,
|
||||
None => self.table.schema(),
|
||||
};
|
||||
|
||||
let stream = self
|
||||
.table
|
||||
.to_stream(request)
|
||||
.map_err(BoxedError::new)
|
||||
.context(TablesRecordBatchSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.map(move |batch| match &projection {
|
||||
Some(p) => batch.and_then(|b| b.try_project(p)),
|
||||
None => batch,
|
||||
});
|
||||
|
||||
let stream = RecordBatchStreamWrapper {
|
||||
schema: projected_schema,
|
||||
stream: Box::pin(stream),
|
||||
output_ordering: None,
|
||||
metrics: Default::default(),
|
||||
};
|
||||
|
||||
Ok(Box::pin(stream))
|
||||
fn table_name(&self) -> &'static str {
|
||||
InformationTable::table_name(self)
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
InformationTable::schema(self)
|
||||
}
|
||||
|
||||
fn table_type(&self) -> TableType {
|
||||
InformationTable::table_type(self)
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
InformationTable::to_stream(self, request)
|
||||
}
|
||||
}
|
||||
@@ -41,7 +41,7 @@ use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::CLUSTER_INFO;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, ListNodesSnafu, Result};
|
||||
use crate::information_schema::{utils, InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{utils, InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const PEER_ID: &str = "peer_id";
|
||||
@@ -15,17 +15,19 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
|
||||
use datatypes::prelude::{ConcreteDataType, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int64Vector, StringVector};
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int64Vector, StringVector, VectorRef};
|
||||
|
||||
use crate::information_schema::table_names::*;
|
||||
use super::table_names::*;
|
||||
use crate::system_schema::memory_table::tables::{
|
||||
bigint_column, datetime_column, string_column, string_columns,
|
||||
};
|
||||
|
||||
const NO_VALUE: &str = "NO";
|
||||
|
||||
/// Find the schema and columns by the table_name, only valid for memory tables.
|
||||
/// Safety: the user MUST ensure the table schema exists, panic otherwise.
|
||||
pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
let (column_schemas, columns): (_, Vec<VectorRef>) = match table_name {
|
||||
COLUMN_PRIVILEGES => (
|
||||
string_columns(&[
|
||||
@@ -80,7 +82,7 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
"GIT_BRANCH",
|
||||
"GIT_COMMIT",
|
||||
"GIT_COMMIT_SHORT",
|
||||
"GIT_DIRTY",
|
||||
"GIT_CLEAN",
|
||||
"PKG_VERSION",
|
||||
]),
|
||||
vec![
|
||||
@@ -89,7 +91,7 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
Arc::new(StringVector::from(vec![build_info
|
||||
.commit_short
|
||||
.to_string()])),
|
||||
Arc::new(StringVector::from(vec![build_info.dirty.to_string()])),
|
||||
Arc::new(StringVector::from(vec![build_info.clean.to_string()])),
|
||||
Arc::new(StringVector::from(vec![build_info.version.to_string()])),
|
||||
],
|
||||
)
|
||||
@@ -414,50 +416,3 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
|
||||
(Arc::new(Schema::new(column_schemas)), columns)
|
||||
}
|
||||
|
||||
fn string_columns(names: &[&'static str]) -> Vec<ColumnSchema> {
|
||||
names.iter().map(|name| string_column(name)).collect()
|
||||
}
|
||||
|
||||
fn string_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::string_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
fn bigint_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
fn datetime_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_string_columns() {
|
||||
let columns = ["a", "b", "c"];
|
||||
let column_schemas = string_columns(&columns);
|
||||
|
||||
assert_eq!(3, column_schemas.len());
|
||||
for (i, name) in columns.iter().enumerate() {
|
||||
let cs = column_schemas.get(i).unwrap();
|
||||
|
||||
assert_eq!(*name, cs.name);
|
||||
assert_eq!(ConcreteDataType::string_datatype(), cs.data_type);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -27,6 +27,7 @@ use datatypes::prelude::{ConcreteDataType, MutableVector, ScalarVectorBuilder, V
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{ConstantVector, StringVector, StringVectorBuilder, UInt32VectorBuilder};
|
||||
use futures_util::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
@@ -34,7 +35,7 @@ use super::KEY_COLUMN_USAGE;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const CONSTRAINT_SCHEMA: &str = "constraint_schema";
|
||||
@@ -211,71 +212,58 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
let mut primary_constraints = vec![];
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
if !catalog_manager
|
||||
.schema_exists(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
continue;
|
||||
}
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
|
||||
for table_name in catalog_manager
|
||||
.table_names(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
if let Some(table) = catalog_manager
|
||||
.table(&catalog_name, &schema_name, &table_name)
|
||||
.await?
|
||||
{
|
||||
let keys = &table.table_info().meta.primary_key_indices;
|
||||
let schema = table.schema();
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let mut primary_constraints = vec![];
|
||||
|
||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||
if column.is_time_index() {
|
||||
self.add_key_column_usage(
|
||||
&predicates,
|
||||
&schema_name,
|
||||
TIME_INDEX_CONSTRAINT_NAME,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
&column.name,
|
||||
1, //always 1 for time index
|
||||
);
|
||||
}
|
||||
if keys.contains(&idx) {
|
||||
primary_constraints.push((
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
table_name.clone(),
|
||||
column.name.clone(),
|
||||
));
|
||||
}
|
||||
// TODO(dimbtp): foreign key constraint not supported yet
|
||||
let table_info = table.table_info();
|
||||
let table_name = &table_info.name;
|
||||
let keys = &table_info.meta.primary_key_indices;
|
||||
let schema = table.schema();
|
||||
|
||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||
if column.is_time_index() {
|
||||
self.add_key_column_usage(
|
||||
&predicates,
|
||||
&schema_name,
|
||||
TIME_INDEX_CONSTRAINT_NAME,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
table_name,
|
||||
&column.name,
|
||||
1, //always 1 for time index
|
||||
);
|
||||
}
|
||||
} else {
|
||||
unreachable!();
|
||||
if keys.contains(&idx) {
|
||||
primary_constraints.push((
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
table_name.to_string(),
|
||||
column.name.clone(),
|
||||
));
|
||||
}
|
||||
// TODO(dimbtp): foreign key constraint not supported yet
|
||||
}
|
||||
|
||||
for (i, (catalog_name, schema_name, table_name, column_name)) in
|
||||
primary_constraints.into_iter().enumerate()
|
||||
{
|
||||
self.add_key_column_usage(
|
||||
&predicates,
|
||||
&schema_name,
|
||||
PRI_CONSTRAINT_NAME,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
&column_name,
|
||||
i as u32 + 1,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (i, (catalog_name, schema_name, table_name, column_name)) in
|
||||
primary_constraints.into_iter().enumerate()
|
||||
{
|
||||
self.add_key_column_usage(
|
||||
&predicates,
|
||||
&schema_name,
|
||||
PRI_CONSTRAINT_NAME,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
&column_name,
|
||||
i as u32 + 1,
|
||||
);
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
@@ -44,8 +44,8 @@ use crate::error::{
|
||||
CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, Result,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const TABLE_CATALOG: &str = "table_catalog";
|
||||
@@ -39,8 +39,8 @@ use crate::error::{
|
||||
CreateRecordBatchSnafu, FindRegionRoutesSnafu, InternalSnafu, Result,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const REGION_ID: &str = "region_id";
|
||||
@@ -33,10 +33,10 @@ use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::SCHEMATA;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, SchemaNotFoundSnafu, TableMetadataManagerSnafu,
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, TableMetadataManagerSnafu,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{utils, InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{utils, InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const CATALOG_NAME: &str = "catalog_name";
|
||||
@@ -172,17 +172,14 @@ impl InformationSchemaSchemataBuilder {
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let opts = if let Some(table_metadata_manager) = &table_metadata_manager {
|
||||
let schema_opts = table_metadata_manager
|
||||
table_metadata_manager
|
||||
.schema_manager()
|
||||
.get(SchemaNameKey::new(&catalog_name, &schema_name))
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: &catalog_name,
|
||||
schema: &schema_name,
|
||||
})?;
|
||||
|
||||
Some(format!("{schema_opts}"))
|
||||
// information_schema is not available from this
|
||||
// table_metadata_manager and we return None
|
||||
.map(|schema_opts| format!("{schema_opts}"))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@@ -43,3 +43,4 @@ pub const PARTITIONS: &str = "partitions";
|
||||
pub const REGION_PEERS: &str = "region_peers";
|
||||
pub const TABLE_CONSTRAINTS: &str = "table_constraints";
|
||||
pub const CLUSTER_INFO: &str = "cluster_info";
|
||||
pub const VIEWS: &str = "views";
|
||||
372
src/catalog/src/system_schema/information_schema/tables.rs
Normal file
372
src/catalog/src/system_schema/information_schema/tables.rs
Normal file
@@ -0,0 +1,372 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_TABLES_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
DateTimeVectorBuilder, StringVectorBuilder, UInt32VectorBuilder, UInt64VectorBuilder,
|
||||
};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::metadata::{TableInfo, TableType};
|
||||
|
||||
use super::TABLES;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||
pub const TABLE_NAME: &str = "table_name";
|
||||
pub const TABLE_TYPE: &str = "table_type";
|
||||
pub const VERSION: &str = "version";
|
||||
pub const ROW_FORMAT: &str = "row_format";
|
||||
pub const TABLE_ROWS: &str = "table_rows";
|
||||
pub const DATA_LENGTH: &str = "data_length";
|
||||
pub const INDEX_LENGTH: &str = "index_length";
|
||||
pub const MAX_DATA_LENGTH: &str = "max_data_length";
|
||||
pub const AVG_ROW_LENGTH: &str = "avg_row_length";
|
||||
pub const DATA_FREE: &str = "data_free";
|
||||
pub const AUTO_INCREMENT: &str = "auto_increment";
|
||||
pub const CREATE_TIME: &str = "create_time";
|
||||
pub const UPDATE_TIME: &str = "update_time";
|
||||
pub const CHECK_TIME: &str = "check_time";
|
||||
pub const TABLE_COLLATION: &str = "table_collation";
|
||||
pub const CHECKSUM: &str = "checksum";
|
||||
pub const CREATE_OPTIONS: &str = "create_options";
|
||||
pub const TABLE_COMMENT: &str = "table_comment";
|
||||
pub const MAX_INDEX_LENGTH: &str = "max_index_length";
|
||||
pub const TEMPORARY: &str = "temporary";
|
||||
const TABLE_ID: &str = "table_id";
|
||||
pub const ENGINE: &str = "engine";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
pub(super) struct InformationSchemaTables {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaTables {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_ID, ConcreteDataType::uint32_datatype(), true),
|
||||
ColumnSchema::new(DATA_LENGTH, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(MAX_DATA_LENGTH, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(INDEX_LENGTH, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(MAX_INDEX_LENGTH, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(AVG_ROW_LENGTH, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(ENGINE, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(VERSION, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(ROW_FORMAT, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(TABLE_ROWS, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(DATA_FREE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(AUTO_INCREMENT, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(CREATE_TIME, ConcreteDataType::datetime_datatype(), true),
|
||||
ColumnSchema::new(UPDATE_TIME, ConcreteDataType::datetime_datatype(), true),
|
||||
ColumnSchema::new(CHECK_TIME, ConcreteDataType::datetime_datatype(), true),
|
||||
ColumnSchema::new(TABLE_COLLATION, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(CHECKSUM, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(CREATE_OPTIONS, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(TABLE_COMMENT, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(TEMPORARY, ConcreteDataType::string_datatype(), true),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaTablesBuilder {
|
||||
InformationSchemaTablesBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaTables {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_TABLES_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
TABLES
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_tables(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(|err| datafusion::error::DataFusionError::External(Box::new(err)))
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `information_schema.TABLE` table row by row
|
||||
///
|
||||
/// Columns are based on <https://www.postgresql.org/docs/current/infoschema-columns.html>
|
||||
struct InformationSchemaTablesBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
catalog_names: StringVectorBuilder,
|
||||
schema_names: StringVectorBuilder,
|
||||
table_names: StringVectorBuilder,
|
||||
table_types: StringVectorBuilder,
|
||||
table_ids: UInt32VectorBuilder,
|
||||
version: UInt64VectorBuilder,
|
||||
row_format: StringVectorBuilder,
|
||||
table_rows: UInt64VectorBuilder,
|
||||
data_length: UInt64VectorBuilder,
|
||||
max_data_length: UInt64VectorBuilder,
|
||||
index_length: UInt64VectorBuilder,
|
||||
avg_row_length: UInt64VectorBuilder,
|
||||
max_index_length: UInt64VectorBuilder,
|
||||
data_free: UInt64VectorBuilder,
|
||||
auto_increment: UInt64VectorBuilder,
|
||||
create_time: DateTimeVectorBuilder,
|
||||
update_time: DateTimeVectorBuilder,
|
||||
check_time: DateTimeVectorBuilder,
|
||||
table_collation: StringVectorBuilder,
|
||||
checksum: UInt64VectorBuilder,
|
||||
create_options: StringVectorBuilder,
|
||||
table_comment: StringVectorBuilder,
|
||||
engines: StringVectorBuilder,
|
||||
temporary: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaTablesBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
catalog_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
schema_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
data_length: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
max_data_length: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
index_length: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
avg_row_length: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
engines: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
version: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
row_format: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_rows: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
max_index_length: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
data_free: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
auto_increment: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_time: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
update_time: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
check_time: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_collation: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
checksum: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_options: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_comment: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
temporary: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.tables` virtual table
|
||||
async fn make_tables(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let table_info = table.table_info();
|
||||
self.add_table(
|
||||
&predicates,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
table_info,
|
||||
table.table_type(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn add_table(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_info: Arc<TableInfo>,
|
||||
table_type: TableType,
|
||||
) {
|
||||
let table_name = table_info.name.as_ref();
|
||||
let table_id = table_info.table_id();
|
||||
let engine = table_info.meta.engine.as_ref();
|
||||
|
||||
let table_type_text = match table_type {
|
||||
TableType::Base => "BASE TABLE",
|
||||
TableType::View => "VIEW",
|
||||
TableType::Temporary => "LOCAL TEMPORARY",
|
||||
};
|
||||
|
||||
let row = [
|
||||
(TABLE_CATALOG, &Value::from(catalog_name)),
|
||||
(TABLE_SCHEMA, &Value::from(schema_name)),
|
||||
(TABLE_NAME, &Value::from(table_name)),
|
||||
(TABLE_TYPE, &Value::from(table_type_text)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
|
||||
self.catalog_names.push(Some(catalog_name));
|
||||
self.schema_names.push(Some(schema_name));
|
||||
self.table_names.push(Some(table_name));
|
||||
self.table_types.push(Some(table_type_text));
|
||||
self.table_ids.push(Some(table_id));
|
||||
// TODO(sunng87): use real data for these fields
|
||||
self.data_length.push(Some(0));
|
||||
self.max_data_length.push(Some(0));
|
||||
self.index_length.push(Some(0));
|
||||
self.avg_row_length.push(Some(0));
|
||||
self.max_index_length.push(Some(0));
|
||||
self.checksum.push(Some(0));
|
||||
self.table_rows.push(Some(0));
|
||||
self.data_free.push(Some(0));
|
||||
self.auto_increment.push(Some(0));
|
||||
self.row_format.push(Some("Fixed"));
|
||||
self.table_collation.push(None);
|
||||
self.update_time.push(None);
|
||||
self.check_time.push(None);
|
||||
|
||||
// use mariadb default table version number here
|
||||
self.version.push(Some(11));
|
||||
self.table_comment.push(table_info.desc.as_deref());
|
||||
self.create_options
|
||||
.push(Some(table_info.meta.options.to_string().as_ref()));
|
||||
self.create_time
|
||||
.push(Some(table_info.meta.created_on.timestamp_millis().into()));
|
||||
|
||||
self.temporary
|
||||
.push(if matches!(table_type, TableType::Temporary) {
|
||||
Some("Y")
|
||||
} else {
|
||||
Some("N")
|
||||
});
|
||||
self.engines.push(Some(engine));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.catalog_names.finish()),
|
||||
Arc::new(self.schema_names.finish()),
|
||||
Arc::new(self.table_names.finish()),
|
||||
Arc::new(self.table_types.finish()),
|
||||
Arc::new(self.table_ids.finish()),
|
||||
Arc::new(self.data_length.finish()),
|
||||
Arc::new(self.max_data_length.finish()),
|
||||
Arc::new(self.index_length.finish()),
|
||||
Arc::new(self.max_index_length.finish()),
|
||||
Arc::new(self.avg_row_length.finish()),
|
||||
Arc::new(self.engines.finish()),
|
||||
Arc::new(self.version.finish()),
|
||||
Arc::new(self.row_format.finish()),
|
||||
Arc::new(self.table_rows.finish()),
|
||||
Arc::new(self.data_free.finish()),
|
||||
Arc::new(self.auto_increment.finish()),
|
||||
Arc::new(self.create_time.finish()),
|
||||
Arc::new(self.update_time.finish()),
|
||||
Arc::new(self.check_time.finish()),
|
||||
Arc::new(self.table_collation.finish()),
|
||||
Arc::new(self.checksum.finish()),
|
||||
Arc::new(self.create_options.finish()),
|
||||
Arc::new(self.table_comment.finish()),
|
||||
Arc::new(self.temporary.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for InformationSchemaTables {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_tables(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -15,7 +15,7 @@
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_TABLES_TABLE_ID;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_VIEW_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
@@ -26,34 +26,41 @@ use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatc
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder};
|
||||
use datatypes::vectors::{BooleanVectorBuilder, StringVectorBuilder};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::metadata::TableType;
|
||||
|
||||
use super::TABLES;
|
||||
use super::VIEWS;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
CastManagerSnafu, CreateRecordBatchSnafu, GetViewCacheSnafu, InternalSnafu, Result,
|
||||
UpgradeWeakCatalogManagerRefSnafu, ViewInfoNotFoundSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||
pub const TABLE_NAME: &str = "table_name";
|
||||
pub const TABLE_TYPE: &str = "table_type";
|
||||
const TABLE_ID: &str = "table_id";
|
||||
const ENGINE: &str = "engine";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
pub const VIEW_DEFINITION: &str = "view_definition";
|
||||
pub const CHECK_OPTION: &str = "check_option";
|
||||
pub const IS_UPDATABLE: &str = "is_updatable";
|
||||
pub const DEFINER: &str = "definer";
|
||||
pub const SECURITY_TYPE: &str = "security_type";
|
||||
pub const CHARACTER_SET_CLIENT: &str = "character_set_client";
|
||||
pub const COLLATION_CONNECTION: &str = "collation_connection";
|
||||
|
||||
pub(super) struct InformationSchemaTables {
|
||||
/// The `information_schema.views` to provides information about views in databases.
|
||||
pub(super) struct InformationSchemaViews {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaTables {
|
||||
impl InformationSchemaViews {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
@@ -67,14 +74,26 @@ impl InformationSchemaTables {
|
||||
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_ID, ConcreteDataType::uint32_datatype(), true),
|
||||
ColumnSchema::new(ENGINE, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(VIEW_DEFINITION, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(CHECK_OPTION, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(IS_UPDATABLE, ConcreteDataType::boolean_datatype(), true),
|
||||
ColumnSchema::new(DEFINER, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(SECURITY_TYPE, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
CHARACTER_SET_CLIENT,
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
COLLATION_CONNECTION,
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaTablesBuilder {
|
||||
InformationSchemaTablesBuilder::new(
|
||||
fn builder(&self) -> InformationSchemaViewsBuilder {
|
||||
InformationSchemaViewsBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
@@ -82,13 +101,13 @@ impl InformationSchemaTables {
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaTables {
|
||||
impl InformationTable for InformationSchemaViews {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_TABLES_TABLE_ID
|
||||
INFORMATION_SCHEMA_VIEW_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
TABLES
|
||||
VIEWS
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
@@ -102,12 +121,10 @@ impl InformationTable for InformationSchemaTables {
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_tables(Some(request))
|
||||
.make_views(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(|err| {
|
||||
datafusion::error::DataFusionError::External(format!("{err:?}").into())
|
||||
})
|
||||
.map_err(|err| datafusion::error::DataFusionError::External(Box::new(err)))
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
@@ -118,10 +135,10 @@ impl InformationTable for InformationSchemaTables {
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `information_schema.TABLE` table row by row
|
||||
/// Builds the `information_schema.VIEWS` table row by row
|
||||
///
|
||||
/// Columns are based on <https://www.postgresql.org/docs/current/infoschema-columns.html>
|
||||
struct InformationSchemaTablesBuilder {
|
||||
/// Columns are based on <https://dev.mysql.com/doc/refman/8.4/en/information-schema-views-table.html>
|
||||
struct InformationSchemaViewsBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
@@ -129,12 +146,16 @@ struct InformationSchemaTablesBuilder {
|
||||
catalog_names: StringVectorBuilder,
|
||||
schema_names: StringVectorBuilder,
|
||||
table_names: StringVectorBuilder,
|
||||
table_types: StringVectorBuilder,
|
||||
table_ids: UInt32VectorBuilder,
|
||||
engines: StringVectorBuilder,
|
||||
view_definitions: StringVectorBuilder,
|
||||
check_options: StringVectorBuilder,
|
||||
is_updatable: BooleanVectorBuilder,
|
||||
definer: StringVectorBuilder,
|
||||
security_type: StringVectorBuilder,
|
||||
character_set_client: StringVectorBuilder,
|
||||
collation_connection: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaTablesBuilder {
|
||||
impl InformationSchemaViewsBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
@@ -147,75 +168,84 @@ impl InformationSchemaTablesBuilder {
|
||||
catalog_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
schema_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
engines: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
view_definitions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
check_options: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
is_updatable: BooleanVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
definer: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
security_type: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
character_set_client: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
collation_connection: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.tables` virtual table
|
||||
async fn make_tables(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
/// Construct the `information_schema.views` virtual table
|
||||
async fn make_views(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
let view_info_cache = catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.context(CastManagerSnafu)?
|
||||
.view_info_cache()?;
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let table_info = table.table_info();
|
||||
self.add_table(
|
||||
&predicates,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_info.name,
|
||||
table.table_type(),
|
||||
Some(table_info.ident.table_id),
|
||||
Some(&table_info.meta.engine),
|
||||
);
|
||||
if table_info.table_type == TableType::View {
|
||||
let view_info = view_info_cache
|
||||
.get(table_info.ident.table_id)
|
||||
.await
|
||||
.context(GetViewCacheSnafu)?
|
||||
.context(ViewInfoNotFoundSnafu {
|
||||
name: &table_info.name,
|
||||
})?;
|
||||
self.add_view(
|
||||
&predicates,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_info.name,
|
||||
&view_info.definition,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn add_table(
|
||||
fn add_view(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
table_type: TableType,
|
||||
table_id: Option<u32>,
|
||||
engine: Option<&str>,
|
||||
definition: &str,
|
||||
) {
|
||||
let table_type = match table_type {
|
||||
TableType::Base => "BASE TABLE",
|
||||
TableType::View => "VIEW",
|
||||
TableType::Temporary => "LOCAL TEMPORARY",
|
||||
};
|
||||
|
||||
let row = [
|
||||
(TABLE_CATALOG, &Value::from(catalog_name)),
|
||||
(TABLE_SCHEMA, &Value::from(schema_name)),
|
||||
(TABLE_NAME, &Value::from(table_name)),
|
||||
(TABLE_TYPE, &Value::from(table_type)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
|
||||
self.catalog_names.push(Some(catalog_name));
|
||||
self.schema_names.push(Some(schema_name));
|
||||
self.table_names.push(Some(table_name));
|
||||
self.table_types.push(Some(table_type));
|
||||
self.table_ids.push(table_id);
|
||||
self.engines.push(engine);
|
||||
self.view_definitions.push(Some(definition));
|
||||
self.check_options.push(None);
|
||||
self.is_updatable.push(Some(true));
|
||||
self.definer.push(None);
|
||||
self.security_type.push(None);
|
||||
self.character_set_client.push(Some("utf8"));
|
||||
self.collation_connection.push(Some("utf8_bin"));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
@@ -223,15 +253,19 @@ impl InformationSchemaTablesBuilder {
|
||||
Arc::new(self.catalog_names.finish()),
|
||||
Arc::new(self.schema_names.finish()),
|
||||
Arc::new(self.table_names.finish()),
|
||||
Arc::new(self.table_types.finish()),
|
||||
Arc::new(self.table_ids.finish()),
|
||||
Arc::new(self.engines.finish()),
|
||||
Arc::new(self.view_definitions.finish()),
|
||||
Arc::new(self.check_options.finish()),
|
||||
Arc::new(self.is_updatable.finish()),
|
||||
Arc::new(self.definer.finish()),
|
||||
Arc::new(self.security_type.finish()),
|
||||
Arc::new(self.character_set_client.finish()),
|
||||
Arc::new(self.collation_connection.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for InformationSchemaTables {
|
||||
impl DfPartitionStream for InformationSchemaViews {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
@@ -243,7 +277,7 @@ impl DfPartitionStream for InformationSchemaTables {
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_tables(None)
|
||||
.make_views(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
@@ -12,7 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod tables;
|
||||
mod table_columns;
|
||||
pub mod tables;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
@@ -27,22 +29,21 @@ use datatypes::schema::SchemaRef;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
pub use tables::get_schema_columns;
|
||||
|
||||
use super::SystemTable;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::information_schema::InformationTable;
|
||||
|
||||
/// A memory table with specified schema and columns.
|
||||
pub(super) struct MemoryTable {
|
||||
table_id: TableId,
|
||||
table_name: &'static str,
|
||||
schema: SchemaRef,
|
||||
columns: Vec<VectorRef>,
|
||||
pub(crate) struct MemoryTable {
|
||||
pub(crate) table_id: TableId,
|
||||
pub(crate) table_name: &'static str,
|
||||
pub(crate) schema: SchemaRef,
|
||||
pub(crate) columns: Vec<VectorRef>,
|
||||
}
|
||||
|
||||
impl MemoryTable {
|
||||
/// Creates a memory table with table id, name, schema and columns.
|
||||
pub(super) fn new(
|
||||
pub fn new(
|
||||
table_id: TableId,
|
||||
table_name: &'static str,
|
||||
schema: SchemaRef,
|
||||
@@ -56,12 +57,54 @@ impl MemoryTable {
|
||||
}
|
||||
}
|
||||
|
||||
fn builder(&self) -> MemoryTableBuilder {
|
||||
pub fn builder(&self) -> MemoryTableBuilder {
|
||||
MemoryTableBuilder::new(self.schema.clone(), self.columns.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for MemoryTable {
|
||||
pub(crate) struct MemoryTableBuilder {
|
||||
schema: SchemaRef,
|
||||
columns: Vec<VectorRef>,
|
||||
}
|
||||
|
||||
impl MemoryTableBuilder {
|
||||
fn new(schema: SchemaRef, columns: Vec<VectorRef>) -> Self {
|
||||
Self { schema, columns }
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.{table_name}` virtual table
|
||||
pub async fn memory_records(&mut self) -> Result<RecordBatch> {
|
||||
if self.columns.is_empty() {
|
||||
RecordBatch::new_empty(self.schema.clone()).context(CreateRecordBatchSnafu)
|
||||
} else {
|
||||
RecordBatch::new(self.schema.clone(), std::mem::take(&mut self.columns))
|
||||
.context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for MemoryTable {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.memory_records()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for MemoryTable {
|
||||
fn table_id(&self) -> TableId {
|
||||
self.table_id
|
||||
}
|
||||
@@ -95,48 +138,6 @@ impl InformationTable for MemoryTable {
|
||||
}
|
||||
}
|
||||
|
||||
struct MemoryTableBuilder {
|
||||
schema: SchemaRef,
|
||||
columns: Vec<VectorRef>,
|
||||
}
|
||||
|
||||
impl MemoryTableBuilder {
|
||||
fn new(schema: SchemaRef, columns: Vec<VectorRef>) -> Self {
|
||||
Self { schema, columns }
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.{table_name}` virtual table
|
||||
async fn memory_records(&mut self) -> Result<RecordBatch> {
|
||||
if self.columns.is_empty() {
|
||||
RecordBatch::new_empty(self.schema.clone()).context(CreateRecordBatchSnafu)
|
||||
} else {
|
||||
RecordBatch::new(self.schema.clone(), std::mem::take(&mut self.columns))
|
||||
.context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for MemoryTable {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.memory_records()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
@@ -147,6 +148,7 @@ mod tests {
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
use crate::system_schema::SystemTable;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_memory_table() {
|
||||
@@ -166,8 +168,8 @@ mod tests {
|
||||
);
|
||||
|
||||
assert_eq!(42, table.table_id());
|
||||
assert_eq!("test", table.table_name());
|
||||
assert_eq!(schema, InformationTable::schema(&table));
|
||||
assert_eq!("test", table.table_name);
|
||||
assert_eq!(schema, SystemTable::schema(&table));
|
||||
|
||||
let stream = table.to_stream(ScanRequest::default()).unwrap();
|
||||
|
||||
@@ -196,7 +198,7 @@ mod tests {
|
||||
|
||||
assert_eq!(42, table.table_id());
|
||||
assert_eq!("test", table.table_name());
|
||||
assert_eq!(schema, InformationTable::schema(&table));
|
||||
assert_eq!(schema, SystemTable::schema(&table));
|
||||
|
||||
let stream = table.to_stream(ScanRequest::default()).unwrap();
|
||||
|
||||
50
src/catalog/src/system_schema/memory_table/table_columns.rs
Normal file
50
src/catalog/src/system_schema/memory_table/table_columns.rs
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! memory_table_cols{
|
||||
([$($colname:ident),*], $t:expr) => {
|
||||
let t = &$t;
|
||||
$(
|
||||
let mut $colname = Vec::with_capacity(t.len());
|
||||
)*
|
||||
paste::paste!{
|
||||
for &($([<r_ $colname>]),*) in t {
|
||||
$(
|
||||
$colname.push([<r_ $colname>]);
|
||||
)*
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_memory_table_columns() {
|
||||
memory_table_cols!(
|
||||
[oid, typname, typlen],
|
||||
[
|
||||
(1, "String", -1),
|
||||
(2, "Binary", -1),
|
||||
(3, "Time", 8),
|
||||
(4, "Datetime", 8)
|
||||
]
|
||||
);
|
||||
assert_eq!(&oid[..], &[1, 2, 3, 4]);
|
||||
assert_eq!(&typname[..], &["String", "Binary", "Time", "Datetime"]);
|
||||
assert_eq!(&typlen[..], &[-1, -1, 8, 8]);
|
||||
}
|
||||
}
|
||||
79
src/catalog/src/system_schema/memory_table/tables.rs
Normal file
79
src/catalog/src/system_schema/memory_table/tables.rs
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
|
||||
pub fn string_columns(names: &[&'static str]) -> Vec<ColumnSchema> {
|
||||
names.iter().map(|name| string_column(name)).collect()
|
||||
}
|
||||
|
||||
pub fn string_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::string_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn u32_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn i16_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::int16_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn bigint_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn datetime_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_string_columns() {
|
||||
let columns = ["a", "b", "c"];
|
||||
let column_schemas = string_columns(&columns);
|
||||
|
||||
assert_eq!(3, column_schemas.len());
|
||||
for (i, name) in columns.iter().enumerate() {
|
||||
let cs = column_schemas.get(i).unwrap();
|
||||
|
||||
assert_eq!(*name, cs.name);
|
||||
assert_eq!(ConcreteDataType::string_datatype(), cs.data_type);
|
||||
}
|
||||
}
|
||||
}
|
||||
115
src/catalog/src/system_schema/pg_catalog.rs
Normal file
115
src/catalog/src/system_schema/pg_catalog.rs
Normal file
@@ -0,0 +1,115 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod pg_catalog_memory_table;
|
||||
mod table_names;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{self, PG_CATALOG_NAME};
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use lazy_static::lazy_static;
|
||||
use paste::paste;
|
||||
use pg_catalog_memory_table::get_schema_columns;
|
||||
use table::TableRef;
|
||||
pub use table_names::*;
|
||||
|
||||
use super::memory_table::tables::u32_column;
|
||||
use super::memory_table::MemoryTable;
|
||||
use super::{SystemSchemaProvider, SystemSchemaProviderInner, SystemTableRef};
|
||||
use crate::CatalogManager;
|
||||
|
||||
lazy_static! {
|
||||
static ref MEMORY_TABLES: &'static [&'static str] = &[table_names::PG_TYPE];
|
||||
}
|
||||
|
||||
/// The column name for the OID column.
|
||||
/// The OID column is a unique identifier of type u32 for each object in the database.
|
||||
const OID_COLUMN_NAME: &str = "oid";
|
||||
|
||||
fn oid_column() -> ColumnSchema {
|
||||
u32_column(OID_COLUMN_NAME)
|
||||
}
|
||||
|
||||
/// [`PGCatalogProvider`] is the provider for a schema named `pg_catalog`, it is not a catalog.
|
||||
pub struct PGCatalogProvider {
|
||||
catalog_name: String,
|
||||
_catalog_manager: Weak<dyn CatalogManager>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
}
|
||||
|
||||
impl SystemSchemaProvider for PGCatalogProvider {
|
||||
fn tables(&self) -> &HashMap<String, TableRef> {
|
||||
assert!(!self.tables.is_empty());
|
||||
|
||||
&self.tables
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(j0hn50n133): Not sure whether to avoid duplication with `information_schema` or not.
|
||||
macro_rules! setup_memory_table {
|
||||
($name: expr) => {
|
||||
paste! {
|
||||
{
|
||||
let (schema, columns) = get_schema_columns($name);
|
||||
Some(Arc::new(MemoryTable::new(
|
||||
consts::[<PG_CATALOG_ $name _TABLE_ID>],
|
||||
$name,
|
||||
schema,
|
||||
columns
|
||||
)) as _)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl PGCatalogProvider {
|
||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
let mut provider = Self {
|
||||
catalog_name,
|
||||
_catalog_manager: catalog_manager,
|
||||
tables: HashMap::new(),
|
||||
};
|
||||
provider.build_tables();
|
||||
provider
|
||||
}
|
||||
|
||||
fn build_tables(&mut self) {
|
||||
// SECURITY NOTE:
|
||||
// Must follow the same security rules as [`InformationSchemaProvider::build_tables`].
|
||||
let mut tables = HashMap::new();
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
tables.insert(name.to_string(), self.build_table(name).expect(name));
|
||||
}
|
||||
self.tables = tables;
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemSchemaProviderInner for PGCatalogProvider {
|
||||
fn schema_name() -> &'static str {
|
||||
PG_CATALOG_NAME
|
||||
}
|
||||
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
||||
match name {
|
||||
table_names::PG_TYPE => setup_memory_table!(PG_TYPE),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn catalog_name(&self) -> &str {
|
||||
&self.catalog_name
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,69 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int16Vector, StringVector, UInt32Vector, VectorRef};
|
||||
|
||||
use super::oid_column;
|
||||
use super::table_names::PG_TYPE;
|
||||
use crate::memory_table_cols;
|
||||
use crate::system_schema::memory_table::tables::{i16_column, string_column};
|
||||
|
||||
fn pg_type_schema_columns() -> (Vec<ColumnSchema>, Vec<VectorRef>) {
|
||||
// TODO(j0hn50n133): acquire this information from `DataType` instead of hardcoding it to avoid regression.
|
||||
memory_table_cols!(
|
||||
[oid, typname, typlen],
|
||||
[
|
||||
(1, "String", -1),
|
||||
(2, "Binary", -1),
|
||||
(3, "Int8", 1),
|
||||
(4, "Int16", 2),
|
||||
(5, "Int32", 4),
|
||||
(6, "Int64", 8),
|
||||
(7, "UInt8", 1),
|
||||
(8, "UInt16", 2),
|
||||
(9, "UInt32", 4),
|
||||
(10, "UInt64", 8),
|
||||
(11, "Float32", 4),
|
||||
(12, "Float64", 8),
|
||||
(13, "Decimal", 16),
|
||||
(14, "Date", 4),
|
||||
(15, "DateTime", 8),
|
||||
(16, "Timestamp", 8),
|
||||
(17, "Time", 8),
|
||||
(18, "Duration", 8),
|
||||
(19, "Interval", 16),
|
||||
(20, "List", -1),
|
||||
]
|
||||
);
|
||||
(
|
||||
// not quiet identical with pg, we only follow the definition in pg
|
||||
vec![oid_column(), string_column("typname"), i16_column("typlen")],
|
||||
vec![
|
||||
Arc::new(UInt32Vector::from_vec(oid)), // oid
|
||||
Arc::new(StringVector::from(typname)),
|
||||
Arc::new(Int16Vector::from_vec(typlen)), // typlen in bytes
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
let (column_schemas, columns): (_, Vec<VectorRef>) = match table_name {
|
||||
PG_TYPE => pg_type_schema_columns(),
|
||||
_ => unreachable!("Unknown table in pg_catalog: {}", table_name),
|
||||
};
|
||||
(Arc::new(Schema::new(column_schemas)), columns)
|
||||
}
|
||||
@@ -12,9 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
fn main() {
|
||||
// Trigger this script if the git branch/commit changes
|
||||
println!("cargo:rerun-if-changed=.git/refs/heads");
|
||||
|
||||
common_version::setup_build_info();
|
||||
}
|
||||
pub const PG_DATABASE: &str = "pg_databases";
|
||||
pub const PG_NAMESPACE: &str = "pg_namespace";
|
||||
pub const PG_CLASS: &str = "pg_class";
|
||||
pub const PG_TYPE: &str = "pg_type";
|
||||
@@ -17,21 +17,24 @@ use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_query::logical_plan::SubstraitPlanDecoderRef;
|
||||
use common_query::logical_plan::{rename_logical_plan_columns, SubstraitPlanDecoderRef};
|
||||
use datafusion::common::{ResolvedTableReference, TableReference};
|
||||
use datafusion::datasource::view::ViewTable;
|
||||
use datafusion::datasource::{provider_as_source, TableProvider};
|
||||
use datafusion::logical_expr::TableSource;
|
||||
use itertools::Itertools;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::TableType;
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
mod dummy_catalog;
|
||||
use dummy_catalog::DummyCatalogList;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
CastManagerSnafu, DatafusionSnafu, DecodePlanSnafu, GetViewCacheSnafu, QueryAccessDeniedSnafu,
|
||||
Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
|
||||
CastManagerSnafu, DatafusionSnafu, DecodePlanSnafu, GetViewCacheSnafu, ProjectViewColumnsSnafu,
|
||||
QueryAccessDeniedSnafu, Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
|
||||
ViewPlanColumnsChangedSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::CatalogManagerRef;
|
||||
@@ -43,6 +46,7 @@ pub struct DfTableSourceProvider {
|
||||
default_catalog: String,
|
||||
default_schema: String,
|
||||
plan_decoder: SubstraitPlanDecoderRef,
|
||||
enable_ident_normalization: bool,
|
||||
}
|
||||
|
||||
impl DfTableSourceProvider {
|
||||
@@ -51,14 +55,16 @@ impl DfTableSourceProvider {
|
||||
disallow_cross_catalog_query: bool,
|
||||
query_ctx: &QueryContext,
|
||||
plan_decoder: SubstraitPlanDecoderRef,
|
||||
enable_ident_normalization: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
catalog_manager,
|
||||
disallow_cross_catalog_query,
|
||||
resolved_tables: HashMap::new(),
|
||||
default_catalog: query_ctx.current_catalog().to_owned(),
|
||||
default_schema: query_ctx.current_schema().to_owned(),
|
||||
default_schema: query_ctx.current_schema(),
|
||||
plan_decoder,
|
||||
enable_ident_normalization,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,32 +114,7 @@ impl DfTableSourceProvider {
|
||||
})?;
|
||||
|
||||
let provider: Arc<dyn TableProvider> = if table.table_info().table_type == TableType::View {
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.context(CastManagerSnafu)?;
|
||||
|
||||
let view_info = catalog_manager
|
||||
.view_info_cache()?
|
||||
.get(table.table_info().ident.table_id)
|
||||
.await
|
||||
.context(GetViewCacheSnafu)?
|
||||
.context(ViewInfoNotFoundSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
// Build the catalog list provider for deserialization.
|
||||
let catalog_list = Arc::new(DummyCatalogList::new(self.catalog_manager.clone()));
|
||||
let logical_plan = self
|
||||
.plan_decoder
|
||||
.decode(Bytes::from(view_info.view_info.clone()), catalog_list, true)
|
||||
.await
|
||||
.context(DecodePlanSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
Arc::new(ViewTable::try_new(logical_plan, None).context(DatafusionSnafu)?)
|
||||
self.create_view_provider(&table).await?
|
||||
} else {
|
||||
Arc::new(DfTableProviderAdapter::new(table))
|
||||
};
|
||||
@@ -143,6 +124,80 @@ impl DfTableSourceProvider {
|
||||
let _ = self.resolved_tables.insert(resolved_name, source.clone());
|
||||
Ok(source)
|
||||
}
|
||||
|
||||
async fn create_view_provider(&self, table: &TableRef) -> Result<Arc<dyn TableProvider>> {
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.context(CastManagerSnafu)?;
|
||||
|
||||
let view_info = catalog_manager
|
||||
.view_info_cache()?
|
||||
.get(table.table_info().ident.table_id)
|
||||
.await
|
||||
.context(GetViewCacheSnafu)?
|
||||
.context(ViewInfoNotFoundSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
// Build the catalog list provider for deserialization.
|
||||
let catalog_list = Arc::new(DummyCatalogList::new(self.catalog_manager.clone()));
|
||||
let logical_plan = self
|
||||
.plan_decoder
|
||||
.decode(Bytes::from(view_info.view_info.clone()), catalog_list, true)
|
||||
.await
|
||||
.context(DecodePlanSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
let columns: Vec<_> = view_info.columns.iter().map(|c| c.as_str()).collect();
|
||||
|
||||
let original_plan_columns: Vec<_> =
|
||||
view_info.plan_columns.iter().map(|c| c.as_str()).collect();
|
||||
|
||||
let plan_columns: Vec<_> = logical_plan
|
||||
.schema()
|
||||
.columns()
|
||||
.into_iter()
|
||||
.map(|c| c.name)
|
||||
.collect();
|
||||
|
||||
// Only check columns number, because substrait doesn't include aliases currently.
|
||||
// See https://github.com/apache/datafusion/issues/10815#issuecomment-2158666881
|
||||
// and https://github.com/apache/datafusion/issues/6489
|
||||
// TODO(dennis): check column names
|
||||
ensure!(
|
||||
original_plan_columns.len() == plan_columns.len(),
|
||||
ViewPlanColumnsChangedSnafu {
|
||||
origin_names: original_plan_columns.iter().join(","),
|
||||
actual_names: plan_columns.iter().join(","),
|
||||
}
|
||||
);
|
||||
|
||||
// We have to do `columns` projection here, because
|
||||
// substrait doesn't include aliases neither for tables nor for columns:
|
||||
// https://github.com/apache/datafusion/issues/10815#issuecomment-2158666881
|
||||
let logical_plan = if !columns.is_empty() {
|
||||
rename_logical_plan_columns(
|
||||
self.enable_ident_normalization,
|
||||
logical_plan,
|
||||
plan_columns
|
||||
.iter()
|
||||
.map(|c| c.as_str())
|
||||
.zip(columns.into_iter())
|
||||
.collect(),
|
||||
)
|
||||
.context(ProjectViewColumnsSnafu)?
|
||||
} else {
|
||||
logical_plan
|
||||
};
|
||||
|
||||
Ok(Arc::new(
|
||||
ViewTable::try_new(logical_plan, Some(view_info.definition.to_string()))
|
||||
.context(DatafusionSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -162,6 +217,7 @@ mod tests {
|
||||
true,
|
||||
query_ctx,
|
||||
DummyDecoder::arc(),
|
||||
true,
|
||||
);
|
||||
|
||||
let table_ref = TableReference::bare("table_name");
|
||||
@@ -277,12 +333,19 @@ mod tests {
|
||||
let logical_plan = vec![1, 2, 3];
|
||||
// Create view metadata
|
||||
table_metadata_manager
|
||||
.create_view_metadata(view_info.clone().into(), logical_plan, HashSet::new())
|
||||
.create_view_metadata(
|
||||
view_info.clone().into(),
|
||||
logical_plan,
|
||||
HashSet::new(),
|
||||
vec!["a".to_string(), "b".to_string()],
|
||||
vec!["id".to_string(), "name".to_string()],
|
||||
"definition".to_string(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut table_provider =
|
||||
DfTableSourceProvider::new(catalog_manager, true, query_ctx, MockDecoder::arc());
|
||||
DfTableSourceProvider::new(catalog_manager, true, query_ctx, MockDecoder::arc(), true);
|
||||
|
||||
// View not found
|
||||
let table_ref = TableReference::bare("not_exists_view");
|
||||
@@ -290,6 +353,12 @@ mod tests {
|
||||
|
||||
let table_ref = TableReference::bare(view_info.name);
|
||||
let source = table_provider.resolve_table(table_ref).await.unwrap();
|
||||
assert_eq!(*source.get_logical_plan().unwrap(), mock_plan());
|
||||
assert_eq!(
|
||||
r#"
|
||||
Projection: person.id AS a, person.name AS b
|
||||
Filter: person.id > Int32(500)
|
||||
TableScan: person"#,
|
||||
format!("\n{:?}", source.get_logical_plan().unwrap())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,4 +49,4 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
[dev-dependencies.substrait_proto]
|
||||
package = "substrait"
|
||||
version = "0.17"
|
||||
version = "0.37"
|
||||
|
||||
@@ -53,13 +53,6 @@ pub enum Error {
|
||||
source: common_grpc::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Column datatype error"))]
|
||||
ColumnDataType {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal GRPC client state: {}", err_msg))]
|
||||
IllegalGrpcClientState {
|
||||
err_msg: String,
|
||||
@@ -137,7 +130,6 @@ impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::IllegalFlightMessages { .. }
|
||||
| Error::ColumnDataType { .. }
|
||||
| Error::MissingField { .. }
|
||||
| Error::IllegalDatabaseResponse { .. }
|
||||
| Error::ClientStreaming { .. } => StatusCode::Internal,
|
||||
|
||||
@@ -74,6 +74,7 @@ substrait.workspace = true
|
||||
table.workspace = true
|
||||
tokio.workspace = true
|
||||
toml.workspace = true
|
||||
tonic.workspace = true
|
||||
tracing-appender = "0.2"
|
||||
|
||||
[target.'cfg(not(windows))'.dependencies]
|
||||
@@ -82,12 +83,10 @@ tikv-jemallocator = "0.5"
|
||||
[dev-dependencies]
|
||||
client = { workspace = true, features = ["testing"] }
|
||||
common-test-util.workspace = true
|
||||
common-version.workspace = true
|
||||
serde.workspace = true
|
||||
temp-env = "0.3"
|
||||
tempfile.workspace = true
|
||||
|
||||
[target.'cfg(not(windows))'.dev-dependencies]
|
||||
rexpect = "0.5"
|
||||
|
||||
[build-dependencies]
|
||||
common-version.workspace = true
|
||||
|
||||
@@ -17,11 +17,11 @@
|
||||
use clap::{Parser, Subcommand};
|
||||
use cmd::error::Result;
|
||||
use cmd::options::GlobalOptions;
|
||||
use cmd::{cli, datanode, frontend, metasrv, standalone, App};
|
||||
use cmd::{cli, datanode, flownode, frontend, metasrv, standalone, App};
|
||||
use common_version::version;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "greptime", author, version, long_version = version!(), about)]
|
||||
#[command(name = "greptime", author, version, long_version = version(), about)]
|
||||
#[command(propagate_version = true)]
|
||||
pub(crate) struct Command {
|
||||
#[clap(subcommand)]
|
||||
@@ -37,6 +37,10 @@ enum SubCommand {
|
||||
#[clap(name = "datanode")]
|
||||
Datanode(datanode::Command),
|
||||
|
||||
/// Start flownode service.
|
||||
#[clap(name = "flownode")]
|
||||
Flownode(flownode::Command),
|
||||
|
||||
/// Start frontend service.
|
||||
#[clap(name = "frontend")]
|
||||
Frontend(frontend::Command),
|
||||
@@ -72,6 +76,12 @@ async fn start(cli: Command) -> Result<()> {
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
SubCommand::Flownode(cmd) => {
|
||||
cmd.build(cmd.load_options(&cli.global_options)?)
|
||||
.await?
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
SubCommand::Frontend(cmd) => {
|
||||
cmd.build(cmd.load_options(&cli.global_options)?)
|
||||
.await?
|
||||
|
||||
@@ -25,13 +25,14 @@ use common_version::{short_version, version};
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use datanode::service::DatanodeServiceBuilder;
|
||||
use meta_client::MetaClientOptions;
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use servers::Mode;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{
|
||||
LoadLayeredConfigSnafu, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu,
|
||||
LoadLayeredConfigSnafu, MetaClientInitSnafu, MissingConfigSnafu, Result, ShutdownDatanodeSnafu,
|
||||
StartDatanodeSnafu,
|
||||
};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{log_versions, App};
|
||||
@@ -125,7 +126,7 @@ struct StartCommand {
|
||||
rpc_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
rpc_hostname: Option<String>,
|
||||
#[clap(long, aliases = ["metasrv-addr"], value_delimiter = ',', num_args = 1..)]
|
||||
#[clap(long, value_delimiter = ',', num_args = 1..)]
|
||||
metasrv_addrs: Option<Vec<String>>,
|
||||
#[clap(short, long)]
|
||||
config_file: Option<String>,
|
||||
@@ -265,7 +266,7 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
opts.component.node_id.map(|x| x.to_string()),
|
||||
);
|
||||
log_versions(version!(), short_version!());
|
||||
log_versions(version(), short_version());
|
||||
|
||||
info!("Datanode start command: {:#?}", self);
|
||||
info!("Datanode options: {:#?}", opts);
|
||||
@@ -275,7 +276,8 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
let node_id = opts
|
||||
let cluster_id = 0; // TODO(hl): read from config
|
||||
let member_id = opts
|
||||
.node_id
|
||||
.context(MissingConfigSnafu { msg: "'node_id'" })?;
|
||||
|
||||
@@ -283,12 +285,16 @@ impl StartCommand {
|
||||
msg: "'meta_client_options'",
|
||||
})?;
|
||||
|
||||
let meta_client = datanode::heartbeat::new_metasrv_client(node_id, meta_config)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
let meta_client = meta_client::create_meta_client(
|
||||
cluster_id,
|
||||
MetaClientType::Datanode { member_id },
|
||||
meta_config,
|
||||
)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
|
||||
let meta_backend = Arc::new(MetaKvBackend {
|
||||
client: Arc::new(meta_client.clone()),
|
||||
client: meta_client.clone(),
|
||||
});
|
||||
|
||||
let mut datanode = DatanodeBuilder::new(opts.clone(), plugins)
|
||||
@@ -332,7 +338,7 @@ mod tests {
|
||||
mode = "distributed"
|
||||
enable_memory_catalog = false
|
||||
node_id = 42
|
||||
|
||||
|
||||
rpc_addr = "127.0.0.1:4001"
|
||||
rpc_hostname = "192.168.0.1"
|
||||
[grpc]
|
||||
@@ -359,7 +365,7 @@ mod tests {
|
||||
mode = "distributed"
|
||||
enable_memory_catalog = false
|
||||
node_id = 42
|
||||
|
||||
|
||||
[grpc]
|
||||
addr = "127.0.0.1:3001"
|
||||
hostname = "127.0.0.1"
|
||||
|
||||
@@ -87,6 +87,20 @@ pub enum Error {
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start flownode"))]
|
||||
StartFlownode {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: flow::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown flownode"))]
|
||||
ShutdownFlownode {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: flow::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start frontend"))]
|
||||
StartFrontend {
|
||||
#[snafu(implicit)]
|
||||
@@ -325,6 +339,22 @@ pub enum Error {
|
||||
location: Location,
|
||||
source: cache::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to initialize meta client"))]
|
||||
MetaClientInit {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Tonic transport error: {error:?} with msg: {msg:?}"))]
|
||||
TonicTransport {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: tonic::transport::Error,
|
||||
msg: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -380,6 +410,11 @@ impl ErrorExt for Error {
|
||||
Error::BuildRuntime { source, .. } => source.status_code(),
|
||||
|
||||
Error::CacheRequired { .. } | Error::BuildCacheRegistry { .. } => StatusCode::Internal,
|
||||
Self::StartFlownode { source, .. } | Self::ShutdownFlownode { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::MetaClientInit { source, .. } => source.status_code(),
|
||||
Error::TonicTransport { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
334
src/cmd/src/flownode.rs
Normal file
334
src/cmd/src/flownode.rs
Normal file
@@ -0,0 +1,334 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use clap::Parser;
|
||||
use client::client_manager::NodeClients;
|
||||
use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_grpc::channel_manager::ChannelConfig;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
use common_version::{short_version, version};
|
||||
use flow::{FlownodeBuilder, FlownodeInstance, FrontendInvoker};
|
||||
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use servers::Mode;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{
|
||||
BuildCacheRegistrySnafu, InitMetadataSnafu, LoadLayeredConfigSnafu, MetaClientInitSnafu,
|
||||
MissingConfigSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
|
||||
};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{log_versions, App};
|
||||
|
||||
pub const APP_NAME: &str = "greptime-flownode";
|
||||
|
||||
type FlownodeOptions = GreptimeOptions<flow::FlownodeOptions>;
|
||||
|
||||
pub struct Instance {
|
||||
flownode: FlownodeInstance,
|
||||
|
||||
// Keep the logging guard to prevent the worker from being dropped.
|
||||
_guard: Vec<WorkerGuard>,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub fn new(flownode: FlownodeInstance, guard: Vec<WorkerGuard>) -> Self {
|
||||
Self {
|
||||
flownode,
|
||||
_guard: guard,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
|
||||
&mut self.flownode
|
||||
}
|
||||
|
||||
pub fn flownode(&self) -> &FlownodeInstance {
|
||||
&self.flownode
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl App for Instance {
|
||||
fn name(&self) -> &str {
|
||||
APP_NAME
|
||||
}
|
||||
|
||||
async fn start(&mut self) -> Result<()> {
|
||||
self.flownode.start().await.context(StartFlownodeSnafu)
|
||||
}
|
||||
|
||||
async fn stop(&self) -> Result<()> {
|
||||
self.flownode
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownFlownodeSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
subcmd: SubCommand,
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn build(&self, opts: FlownodeOptions) -> Result<Instance> {
|
||||
self.subcmd.build(opts).await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<FlownodeOptions> {
|
||||
match &self.subcmd {
|
||||
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
enum SubCommand {
|
||||
Start(StartCommand),
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn build(&self, opts: FlownodeOptions) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.build(opts).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser, Default)]
|
||||
struct StartCommand {
|
||||
/// Flownode's id
|
||||
#[clap(long)]
|
||||
node_id: Option<u64>,
|
||||
/// Bind address for the gRPC server.
|
||||
#[clap(long)]
|
||||
rpc_addr: Option<String>,
|
||||
/// Hostname for the gRPC server.
|
||||
#[clap(long)]
|
||||
rpc_hostname: Option<String>,
|
||||
/// Metasrv address list;
|
||||
#[clap(long, value_delimiter = ',', num_args = 1..)]
|
||||
metasrv_addrs: Option<Vec<String>>,
|
||||
/// The configuration file for flownode
|
||||
#[clap(short, long)]
|
||||
config_file: Option<String>,
|
||||
/// The prefix of environment variables, default is `GREPTIMEDB_FLOWNODE`;
|
||||
#[clap(long, default_value = "GREPTIMEDB_FLOWNODE")]
|
||||
env_prefix: String,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<FlownodeOptions> {
|
||||
let mut opts = FlownodeOptions::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
)
|
||||
.context(LoadLayeredConfigSnafu)?;
|
||||
|
||||
self.merge_with_cli_options(global_options, &mut opts)?;
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
|
||||
// The precedence order is: cli > config file > environment variables > default values.
|
||||
fn merge_with_cli_options(
|
||||
&self,
|
||||
global_options: &GlobalOptions,
|
||||
opts: &mut FlownodeOptions,
|
||||
) -> Result<()> {
|
||||
let opts = &mut opts.component;
|
||||
|
||||
if let Some(dir) = &global_options.log_dir {
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
|
||||
if global_options.log_level.is_some() {
|
||||
opts.logging.level.clone_from(&global_options.log_level);
|
||||
}
|
||||
|
||||
opts.tracing = TracingOptions {
|
||||
#[cfg(feature = "tokio-console")]
|
||||
tokio_console_addr: global_options.tokio_console_addr.clone(),
|
||||
};
|
||||
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
opts.grpc.addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(hostname) = &self.rpc_hostname {
|
||||
opts.grpc.hostname.clone_from(hostname);
|
||||
}
|
||||
|
||||
if let Some(node_id) = self.node_id {
|
||||
opts.node_id = Some(node_id);
|
||||
}
|
||||
|
||||
if let Some(metasrv_addrs) = &self.metasrv_addrs {
|
||||
opts.meta_client
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs
|
||||
.clone_from(metasrv_addrs);
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
|
||||
if let (Mode::Distributed, None) = (&opts.mode, &opts.node_id) {
|
||||
return MissingConfigSnafu {
|
||||
msg: "Missing node id option",
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn build(&self, opts: FlownodeOptions) -> Result<Instance> {
|
||||
common_runtime::init_global_runtimes(&opts.runtime);
|
||||
|
||||
let guard = common_telemetry::init_global_logging(
|
||||
APP_NAME,
|
||||
&opts.component.logging,
|
||||
&opts.component.tracing,
|
||||
opts.component.node_id.map(|x| x.to_string()),
|
||||
);
|
||||
log_versions(version(), short_version());
|
||||
|
||||
info!("Flownode start command: {:#?}", self);
|
||||
info!("Flownode options: {:#?}", opts);
|
||||
|
||||
let opts = opts.component;
|
||||
|
||||
// TODO(discord9): make it not optionale after cluster id is required
|
||||
let cluster_id = opts.cluster_id.unwrap_or(0);
|
||||
|
||||
let member_id = opts
|
||||
.node_id
|
||||
.context(MissingConfigSnafu { msg: "'node_id'" })?;
|
||||
|
||||
let meta_config = opts.meta_client.as_ref().context(MissingConfigSnafu {
|
||||
msg: "'meta_client_options'",
|
||||
})?;
|
||||
|
||||
let meta_client = meta_client::create_meta_client(
|
||||
cluster_id,
|
||||
MetaClientType::Flownode { member_id },
|
||||
meta_config,
|
||||
)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
|
||||
let cache_max_capacity = meta_config.metadata_cache_max_capacity;
|
||||
let cache_ttl = meta_config.metadata_cache_ttl;
|
||||
let cache_tti = meta_config.metadata_cache_tti;
|
||||
|
||||
// TODO(discord9): add helper function to ease the creation of cache registry&such
|
||||
let cached_meta_backend = CachedMetaKvBackendBuilder::new(meta_client.clone())
|
||||
.cache_max_capacity(cache_max_capacity)
|
||||
.cache_ttl(cache_ttl)
|
||||
.cache_tti(cache_tti)
|
||||
.build();
|
||||
let cached_meta_backend = Arc::new(cached_meta_backend);
|
||||
|
||||
// Builds cache registry
|
||||
let layered_cache_builder = LayeredCacheRegistryBuilder::default().add_cache_registry(
|
||||
CacheRegistryBuilder::default()
|
||||
.add_cache(cached_meta_backend.clone())
|
||||
.build(),
|
||||
);
|
||||
let fundamental_cache_registry =
|
||||
build_fundamental_cache_registry(Arc::new(MetaKvBackend::new(meta_client.clone())));
|
||||
let layered_cache_registry = Arc::new(
|
||||
with_default_composite_cache_registry(
|
||||
layered_cache_builder.add_cache_registry(fundamental_cache_registry),
|
||||
)
|
||||
.context(BuildCacheRegistrySnafu)?
|
||||
.build(),
|
||||
);
|
||||
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
opts.mode,
|
||||
Some(meta_client.clone()),
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
);
|
||||
|
||||
let table_metadata_manager =
|
||||
Arc::new(TableMetadataManager::new(cached_meta_backend.clone()));
|
||||
table_metadata_manager
|
||||
.init()
|
||||
.await
|
||||
.context(InitMetadataSnafu)?;
|
||||
|
||||
let executor = HandlerGroupExecutor::new(vec![
|
||||
Arc::new(ParseMailboxMessageHandler),
|
||||
Arc::new(InvalidateTableCacheHandler::new(
|
||||
layered_cache_registry.clone(),
|
||||
)),
|
||||
]);
|
||||
|
||||
let heartbeat_task = flow::heartbeat::HeartbeatTask::new(
|
||||
&opts,
|
||||
meta_client.clone(),
|
||||
opts.heartbeat.clone(),
|
||||
Arc::new(executor),
|
||||
);
|
||||
|
||||
let flownode_builder = FlownodeBuilder::new(
|
||||
opts,
|
||||
Plugins::new(),
|
||||
table_metadata_manager,
|
||||
catalog_manager.clone(),
|
||||
)
|
||||
.with_heartbeat_task(heartbeat_task);
|
||||
|
||||
let flownode = flownode_builder.build().await.context(StartFlownodeSnafu)?;
|
||||
|
||||
// flownode's frontend to datanode need not timeout.
|
||||
// Some queries are expected to take long time.
|
||||
let channel_config = ChannelConfig {
|
||||
timeout: None,
|
||||
..Default::default()
|
||||
};
|
||||
let client = Arc::new(NodeClients::new(channel_config));
|
||||
|
||||
let invoker = FrontendInvoker::build_from(
|
||||
flownode.flow_worker_manager().clone(),
|
||||
catalog_manager.clone(),
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
meta_client.clone(),
|
||||
client,
|
||||
)
|
||||
.await
|
||||
.context(StartFlownodeSnafu)?;
|
||||
flownode
|
||||
.flow_worker_manager()
|
||||
.set_frontend_invoker(invoker)
|
||||
.await;
|
||||
|
||||
Ok(Instance::new(flownode, guard))
|
||||
}
|
||||
}
|
||||
@@ -34,14 +34,15 @@ use frontend::heartbeat::HeartbeatTask;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use frontend::server::Services;
|
||||
use meta_client::MetaClientOptions;
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{
|
||||
self, InitTimezoneSnafu, LoadLayeredConfigSnafu, MissingConfigSnafu, Result, StartFrontendSnafu,
|
||||
self, InitTimezoneSnafu, LoadLayeredConfigSnafu, MetaClientInitSnafu, MissingConfigSnafu,
|
||||
Result, StartFrontendSnafu,
|
||||
};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{log_versions, App};
|
||||
@@ -147,7 +148,7 @@ pub struct StartCommand {
|
||||
config_file: Option<String>,
|
||||
#[clap(short, long)]
|
||||
influxdb_enable: Option<bool>,
|
||||
#[clap(long, aliases = ["metasrv-addr"], value_delimiter = ',', num_args = 1..)]
|
||||
#[clap(long, value_delimiter = ',', num_args = 1..)]
|
||||
metasrv_addrs: Option<Vec<String>>,
|
||||
#[clap(long)]
|
||||
tls_mode: Option<TlsMode>,
|
||||
@@ -258,7 +259,7 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
opts.component.node_id.clone(),
|
||||
);
|
||||
log_versions(version!(), short_version!());
|
||||
log_versions(version(), short_version());
|
||||
|
||||
info!("Frontend start command: {:#?}", self);
|
||||
info!("Frontend options: {:#?}", opts);
|
||||
@@ -279,10 +280,16 @@ impl StartCommand {
|
||||
let cache_ttl = meta_client_options.metadata_cache_ttl;
|
||||
let cache_tti = meta_client_options.metadata_cache_tti;
|
||||
|
||||
let meta_client = FeInstance::create_meta_client(meta_client_options)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
let cluster_id = 0; // (TODO: jeremy): It is currently a reserved field and has not been enabled.
|
||||
let meta_client = meta_client::create_meta_client(
|
||||
cluster_id,
|
||||
MetaClientType::Frontend,
|
||||
meta_client_options,
|
||||
)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
|
||||
// TODO(discord9): add helper function to ease the creation of cache registry&such
|
||||
let cached_meta_backend = CachedMetaKvBackendBuilder::new(meta_client.clone())
|
||||
.cache_max_capacity(cache_max_capacity)
|
||||
.cache_ttl(cache_ttl)
|
||||
@@ -336,6 +343,7 @@ impl StartCommand {
|
||||
let client = NodeClients::new(channel_config);
|
||||
|
||||
let mut instance = FrontendBuilder::new(
|
||||
opts.clone(),
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
catalog_manager,
|
||||
@@ -349,12 +357,12 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let servers = Services::new(opts.clone(), Arc::new(instance.clone()), plugins)
|
||||
let servers = Services::new(opts, Arc::new(instance.clone()), plugins)
|
||||
.build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
instance
|
||||
.build_servers(opts, servers)
|
||||
.build_servers(servers)
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
Ok(Instance::new(instance, guard))
|
||||
|
||||
@@ -22,6 +22,7 @@ use crate::error::Result;
|
||||
pub mod cli;
|
||||
pub mod datanode;
|
||||
pub mod error;
|
||||
pub mod flownode;
|
||||
pub mod frontend;
|
||||
pub mod metasrv;
|
||||
pub mod options;
|
||||
|
||||
@@ -233,7 +233,7 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
None,
|
||||
);
|
||||
log_versions(version!(), short_version!());
|
||||
log_versions(version(), short_version());
|
||||
|
||||
info!("Metasrv start command: {:#?}", self);
|
||||
info!("Metasrv options: {:#?}", opts);
|
||||
@@ -296,7 +296,7 @@ mod tests {
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
|
||||
|
||||
[failure_detector]
|
||||
threshold = 8.0
|
||||
min_std_deviation = "100ms"
|
||||
|
||||
@@ -21,17 +21,17 @@ use catalog::kvbackend::KvBackendCatalogManager;
|
||||
use clap::Parser;
|
||||
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
||||
use common_config::{metadata_store_dir, Configurable, KvBackendConfig};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::LayeredCacheRegistryBuilder;
|
||||
use common_meta::cache_invalidator::CacheInvalidatorRef;
|
||||
use common_meta::ddl::flow_meta::{FlowMetadataAllocator, FlowMetadataAllocatorRef};
|
||||
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
|
||||
use common_meta::ddl::{DdlContext, ProcedureExecutorRef};
|
||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl, ProcedureExecutorRef};
|
||||
use common_meta::ddl_manager::DdlManager;
|
||||
use common_meta::key::flow::{FlowMetadataManager, FlowMetadataManagerRef};
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::node_manager::NodeManagerRef;
|
||||
use common_meta::peer::StandalonePeerLookupService;
|
||||
use common_meta::region_keeper::MemoryRegionKeeper;
|
||||
use common_meta::sequence::SequenceBuilder;
|
||||
use common_meta::wal_options_allocator::{WalOptionsAllocator, WalOptionsAllocatorRef};
|
||||
@@ -40,11 +40,11 @@ use common_telemetry::info;
|
||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||
use common_time::timezone::set_default_timezone;
|
||||
use common_version::{short_version, version};
|
||||
use common_wal::config::StandaloneWalConfig;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
|
||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||
use flow::FlownodeBuilder;
|
||||
use flow::{FlowWorkerManager, FlownodeBuilder, FrontendInvoker};
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
||||
@@ -61,12 +61,14 @@ use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
use tokio::sync::broadcast;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{
|
||||
BuildCacheRegistrySnafu, CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu,
|
||||
InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, Result, ShutdownDatanodeSnafu,
|
||||
ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
|
||||
InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, OtherSnafu, Result,
|
||||
ShutdownDatanodeSnafu, ShutdownFlownodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu,
|
||||
StartFlownodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
|
||||
StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
|
||||
};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
@@ -128,7 +130,7 @@ pub struct StandaloneOptions {
|
||||
pub opentsdb: OpentsdbOptions,
|
||||
pub influxdb: InfluxdbOptions,
|
||||
pub prom_store: PromStoreOptions,
|
||||
pub wal: StandaloneWalConfig,
|
||||
pub wal: DatanodeWalConfig,
|
||||
pub storage: StorageConfig,
|
||||
pub metadata_store: KvBackendConfig,
|
||||
pub procedure: ProcedureConfig,
|
||||
@@ -153,7 +155,7 @@ impl Default for StandaloneOptions {
|
||||
opentsdb: OpentsdbOptions::default(),
|
||||
influxdb: InfluxdbOptions::default(),
|
||||
prom_store: PromStoreOptions::default(),
|
||||
wal: StandaloneWalConfig::default(),
|
||||
wal: DatanodeWalConfig::default(),
|
||||
storage: StorageConfig::default(),
|
||||
metadata_store: KvBackendConfig::default(),
|
||||
procedure: ProcedureConfig::default(),
|
||||
@@ -202,7 +204,7 @@ impl StandaloneOptions {
|
||||
DatanodeOptions {
|
||||
node_id: Some(0),
|
||||
enable_telemetry: cloned_opts.enable_telemetry,
|
||||
wal: cloned_opts.wal.into(),
|
||||
wal: cloned_opts.wal,
|
||||
storage: cloned_opts.storage,
|
||||
region_engine: cloned_opts.region_engine,
|
||||
grpc: cloned_opts.grpc,
|
||||
@@ -214,6 +216,9 @@ impl StandaloneOptions {
|
||||
pub struct Instance {
|
||||
datanode: Datanode,
|
||||
frontend: FeInstance,
|
||||
// TODO(discord9): wrapped it in flownode instance instead
|
||||
flow_worker_manager: Arc<FlowWorkerManager>,
|
||||
flow_shutdown: broadcast::Sender<()>,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
wal_options_allocator: WalOptionsAllocatorRef,
|
||||
|
||||
@@ -245,6 +250,9 @@ impl App for Instance {
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
self.frontend.start().await.context(StartFrontendSnafu)?;
|
||||
self.flow_worker_manager
|
||||
.clone()
|
||||
.run_background(Some(self.flow_shutdown.subscribe()));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -263,6 +271,15 @@ impl App for Instance {
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownDatanodeSnafu)?;
|
||||
self.flow_shutdown
|
||||
.send(())
|
||||
.map_err(|_e| {
|
||||
flow::error::InternalSnafu {
|
||||
reason: "Failed to send shutdown signal to flow worker manager, all receiver end already closed".to_string(),
|
||||
}
|
||||
.build()
|
||||
})
|
||||
.context(ShutdownFlownodeSnafu)?;
|
||||
info!("Datanode instance stopped.");
|
||||
|
||||
Ok(())
|
||||
@@ -396,7 +413,7 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
None,
|
||||
);
|
||||
log_versions(version!(), short_version!());
|
||||
log_versions(version(), short_version());
|
||||
|
||||
info!("Standalone start command: {:#?}", self);
|
||||
info!("Standalone options: {opts:#?}");
|
||||
@@ -447,24 +464,29 @@ impl StartCommand {
|
||||
let table_metadata_manager =
|
||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||
|
||||
let flow_builder = FlownodeBuilder::new(
|
||||
1,
|
||||
Default::default(),
|
||||
fe_plugins.clone(),
|
||||
table_metadata_manager.clone(),
|
||||
catalog_manager.clone(),
|
||||
);
|
||||
let flownode = Arc::new(flow_builder.build().await);
|
||||
|
||||
let datanode = DatanodeBuilder::new(dn_opts, fe_plugins.clone())
|
||||
.with_kv_backend(kv_backend.clone())
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
let flow_builder = FlownodeBuilder::new(
|
||||
Default::default(),
|
||||
fe_plugins.clone(),
|
||||
table_metadata_manager.clone(),
|
||||
catalog_manager.clone(),
|
||||
);
|
||||
let flownode = Arc::new(
|
||||
flow_builder
|
||||
.build()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(OtherSnafu)?,
|
||||
);
|
||||
|
||||
let node_manager = Arc::new(StandaloneDatanodeManager {
|
||||
region_server: datanode.region_server(),
|
||||
flow_server: flownode.clone(),
|
||||
flow_server: flownode.flow_worker_manager(),
|
||||
});
|
||||
|
||||
let table_id_sequence = Arc::new(
|
||||
@@ -504,35 +526,47 @@ impl StartCommand {
|
||||
.await?;
|
||||
|
||||
let mut frontend = FrontendBuilder::new(
|
||||
kv_backend,
|
||||
layered_cache_registry,
|
||||
catalog_manager,
|
||||
node_manager,
|
||||
ddl_task_executor,
|
||||
fe_opts.clone(),
|
||||
kv_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
catalog_manager.clone(),
|
||||
node_manager.clone(),
|
||||
ddl_task_executor.clone(),
|
||||
)
|
||||
.with_plugin(fe_plugins.clone())
|
||||
.try_build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let flow_worker_manager = flownode.flow_worker_manager();
|
||||
// flow server need to be able to use frontend to write insert requests back
|
||||
flownode
|
||||
.set_frontend_invoker(Box::new(frontend.clone()))
|
||||
.await;
|
||||
// TODO(discord9): unify with adding `start` and `shutdown` method to flownode too.
|
||||
let _handle = flownode.clone().run_background();
|
||||
let invoker = FrontendInvoker::build_from(
|
||||
flow_worker_manager.clone(),
|
||||
catalog_manager.clone(),
|
||||
kv_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
ddl_task_executor.clone(),
|
||||
node_manager,
|
||||
)
|
||||
.await
|
||||
.context(StartFlownodeSnafu)?;
|
||||
flow_worker_manager.set_frontend_invoker(invoker).await;
|
||||
|
||||
let servers = Services::new(fe_opts.clone(), Arc::new(frontend.clone()), fe_plugins)
|
||||
let (tx, _rx) = broadcast::channel(1);
|
||||
|
||||
let servers = Services::new(fe_opts, Arc::new(frontend.clone()), fe_plugins)
|
||||
.build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
frontend
|
||||
.build_servers(fe_opts, servers)
|
||||
.build_servers(servers)
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
Ok(Instance {
|
||||
datanode,
|
||||
frontend,
|
||||
flow_worker_manager,
|
||||
flow_shutdown: tx,
|
||||
procedure_manager,
|
||||
wal_options_allocator,
|
||||
_guard: guard,
|
||||
@@ -558,7 +592,7 @@ impl StartCommand {
|
||||
table_metadata_allocator,
|
||||
flow_metadata_manager,
|
||||
flow_metadata_allocator,
|
||||
peer_lookup_service: Arc::new(StandalonePeerLookupService::new()),
|
||||
region_failure_detector_controller: Arc::new(NoopRegionFailureDetectorControl),
|
||||
},
|
||||
procedure_manager,
|
||||
true,
|
||||
|
||||
@@ -24,8 +24,9 @@ use common_grpc::channel_manager::{
|
||||
use common_runtime::global::RuntimeOptions;
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||
use common_wal::config::{DatanodeWalConfig, StandaloneWalConfig};
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
|
||||
use file_engine::config::EngineConfig;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::service_config::datanode::DatanodeClientOptions;
|
||||
use meta_client::MetaClientOptions;
|
||||
@@ -71,18 +72,21 @@ fn test_load_datanode_example_config() {
|
||||
data_home: "/tmp/greptimedb/".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
region_engine: vec![RegionEngineConfig::Mito(MitoConfig {
|
||||
num_workers: 8,
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
scan_parallelism: 0,
|
||||
global_write_buffer_size: ReadableSize::gb(1),
|
||||
global_write_buffer_reject_size: ReadableSize::gb(2),
|
||||
sst_meta_cache_size: ReadableSize::mb(128),
|
||||
vector_cache_size: ReadableSize::mb(512),
|
||||
page_cache_size: ReadableSize::mb(512),
|
||||
max_background_jobs: 4,
|
||||
..Default::default()
|
||||
})],
|
||||
region_engine: vec![
|
||||
RegionEngineConfig::Mito(MitoConfig {
|
||||
num_workers: 8,
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
scan_parallelism: 0,
|
||||
global_write_buffer_size: ReadableSize::gb(1),
|
||||
global_write_buffer_reject_size: ReadableSize::gb(2),
|
||||
sst_meta_cache_size: ReadableSize::mb(128),
|
||||
vector_cache_size: ReadableSize::mb(512),
|
||||
page_cache_size: ReadableSize::mb(512),
|
||||
max_background_jobs: 4,
|
||||
..Default::default()
|
||||
}),
|
||||
RegionEngineConfig::File(EngineConfig {}),
|
||||
],
|
||||
logging: LoggingOptions {
|
||||
level: Some("info".to_string()),
|
||||
otlp_endpoint: Some("".to_string()),
|
||||
@@ -202,23 +206,26 @@ fn test_load_standalone_example_config() {
|
||||
},
|
||||
component: StandaloneOptions {
|
||||
default_timezone: Some("UTC".to_string()),
|
||||
wal: StandaloneWalConfig::RaftEngine(RaftEngineConfig {
|
||||
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
||||
dir: Some("/tmp/greptimedb/wal".to_string()),
|
||||
sync_period: Some(Duration::from_secs(10)),
|
||||
..Default::default()
|
||||
}),
|
||||
region_engine: vec![RegionEngineConfig::Mito(MitoConfig {
|
||||
num_workers: 8,
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
scan_parallelism: 0,
|
||||
global_write_buffer_size: ReadableSize::gb(1),
|
||||
global_write_buffer_reject_size: ReadableSize::gb(2),
|
||||
sst_meta_cache_size: ReadableSize::mb(128),
|
||||
vector_cache_size: ReadableSize::mb(512),
|
||||
page_cache_size: ReadableSize::mb(512),
|
||||
max_background_jobs: 4,
|
||||
..Default::default()
|
||||
})],
|
||||
region_engine: vec![
|
||||
RegionEngineConfig::Mito(MitoConfig {
|
||||
num_workers: 8,
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
scan_parallelism: 0,
|
||||
global_write_buffer_size: ReadableSize::gb(1),
|
||||
global_write_buffer_reject_size: ReadableSize::gb(2),
|
||||
sst_meta_cache_size: ReadableSize::mb(128),
|
||||
vector_cache_size: ReadableSize::mb(512),
|
||||
page_cache_size: ReadableSize::mb(512),
|
||||
max_background_jobs: 4,
|
||||
..Default::default()
|
||||
}),
|
||||
RegionEngineConfig::File(EngineConfig {}),
|
||||
],
|
||||
storage: StorageConfig {
|
||||
data_home: "/tmp/greptimedb/".to_string(),
|
||||
..Default::default()
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
pub const SYSTEM_CATALOG_NAME: &str = "system";
|
||||
pub const INFORMATION_SCHEMA_NAME: &str = "information_schema";
|
||||
pub const PG_CATALOG_NAME: &str = "pg_catalog";
|
||||
pub const SYSTEM_CATALOG_TABLE_NAME: &str = "system_catalog";
|
||||
pub const DEFAULT_CATALOG_NAME: &str = "greptime";
|
||||
pub const DEFAULT_SCHEMA_NAME: &str = "public";
|
||||
@@ -93,8 +94,15 @@ pub const INFORMATION_SCHEMA_REGION_PEERS_TABLE_ID: u32 = 29;
|
||||
pub const INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID: u32 = 30;
|
||||
/// id for information_schema.cluster_info
|
||||
pub const INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID: u32 = 31;
|
||||
/// id for information_schema.VIEWS
|
||||
pub const INFORMATION_SCHEMA_VIEW_TABLE_ID: u32 = 32;
|
||||
/// ----- End of information_schema tables -----
|
||||
|
||||
/// ----- Begin of pg_catalog tables -----
|
||||
pub const PG_CATALOG_PG_CLASS_TABLE_ID: u32 = 256;
|
||||
pub const PG_CATALOG_PG_TYPE_TABLE_ID: u32 = 257;
|
||||
|
||||
/// ----- End of pg_catalog tables -----
|
||||
pub const MITO_ENGINE: &str = "mito";
|
||||
pub const MITO2_ENGINE: &str = "mito2";
|
||||
pub const METRIC_ENGINE: &str = "metric";
|
||||
@@ -108,3 +116,7 @@ pub const FILE_ENGINE: &str = "file";
|
||||
pub const SEMANTIC_TYPE_PRIMARY_KEY: &str = "TAG";
|
||||
pub const SEMANTIC_TYPE_FIELD: &str = "FIELD";
|
||||
pub const SEMANTIC_TYPE_TIME_INDEX: &str = "TIMESTAMP";
|
||||
|
||||
pub fn is_readonly_schema(schema: &str) -> bool {
|
||||
matches!(schema, INFORMATION_SCHEMA_NAME)
|
||||
}
|
||||
|
||||
@@ -31,7 +31,9 @@ derive_builder.workspace = true
|
||||
futures.workspace = true
|
||||
lazy_static.workspace = true
|
||||
object-store.workspace = true
|
||||
orc-rust = { git = "https://github.com/datafusion-contrib/datafusion-orc.git", rev = "502217315726314c4008808fe169764529640599" }
|
||||
orc-rust = { git = "https://github.com/datafusion-contrib/datafusion-orc.git", rev = "502217315726314c4008808fe169764529640599", default-features = false, features = [
|
||||
"async",
|
||||
] }
|
||||
parquet.workspace = true
|
||||
paste = "1.0"
|
||||
rand.workspace = true
|
||||
|
||||
@@ -149,7 +149,9 @@ pub fn open_with_decoder<T: ArrowDecoder, F: Fn() -> DataFusionResult<T>>(
|
||||
.reader(&path)
|
||||
.await
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))?
|
||||
.into_bytes_stream(..);
|
||||
.into_bytes_stream(..)
|
||||
.await
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||
|
||||
let mut upstream = compression_type.convert_stream(reader).fuse();
|
||||
|
||||
|
||||
@@ -169,11 +169,14 @@ impl FileFormat for CsvFormat {
|
||||
.stat(path)
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path })?;
|
||||
|
||||
let reader = store
|
||||
.reader(path)
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path })?
|
||||
.into_futures_async_read(0..meta.content_length())
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path })?
|
||||
.compat();
|
||||
|
||||
let decoded = self.compression_type.convert_async_read(reader);
|
||||
|
||||
@@ -87,11 +87,14 @@ impl FileFormat for JsonFormat {
|
||||
.stat(path)
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path })?;
|
||||
|
||||
let reader = store
|
||||
.reader(path)
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path })?
|
||||
.into_futures_async_read(0..meta.content_length())
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path })?
|
||||
.compat();
|
||||
|
||||
let decoded = self.compression_type.convert_async_read(reader);
|
||||
|
||||
@@ -27,12 +27,14 @@ use datafusion::parquet::file::metadata::ParquetMetaData;
|
||||
use datafusion::parquet::format::FileMetaData;
|
||||
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::StreamExt;
|
||||
use object_store::{FuturesAsyncReader, ObjectStore};
|
||||
use parquet::arrow::AsyncArrowWriter;
|
||||
use parquet::basic::{Compression, ZstdLevel};
|
||||
use parquet::file::properties::WriterProperties;
|
||||
use parquet::basic::{Compression, Encoding, ZstdLevel};
|
||||
use parquet::file::properties::{WriterProperties, WriterPropertiesBuilder};
|
||||
use parquet::schema::types::ColumnPath;
|
||||
use snafu::ResultExt;
|
||||
use tokio_util::compat::{Compat, FuturesAsyncReadCompatExt, FuturesAsyncWriteCompatExt};
|
||||
|
||||
@@ -52,11 +54,14 @@ impl FileFormat for ParquetFormat {
|
||||
.stat(path)
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path })?;
|
||||
|
||||
let mut reader = store
|
||||
.reader(path)
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path })?
|
||||
.into_futures_async_read(0..meta.content_length())
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path })?
|
||||
.compat();
|
||||
|
||||
let metadata = reader
|
||||
@@ -129,6 +134,7 @@ impl LazyParquetFileReader {
|
||||
.reader(&self.path)
|
||||
.await?
|
||||
.into_futures_async_read(0..meta.content_length())
|
||||
.await?
|
||||
.compat();
|
||||
self.reader = Some(reader);
|
||||
}
|
||||
@@ -180,14 +186,16 @@ impl ArrowWriterCloser for ArrowWriter<SharedBuffer> {
|
||||
/// Returns number of rows written.
|
||||
pub async fn stream_to_parquet(
|
||||
mut stream: SendableRecordBatchStream,
|
||||
schema: datatypes::schema::SchemaRef,
|
||||
store: ObjectStore,
|
||||
path: &str,
|
||||
concurrency: usize,
|
||||
) -> Result<usize> {
|
||||
let write_props = WriterProperties::builder()
|
||||
.set_compression(Compression::ZSTD(ZstdLevel::default()))
|
||||
.build();
|
||||
let schema = stream.schema();
|
||||
let write_props = column_wise_config(
|
||||
WriterProperties::builder().set_compression(Compression::ZSTD(ZstdLevel::default())),
|
||||
schema,
|
||||
)
|
||||
.build();
|
||||
let inner_writer = store
|
||||
.writer_with(path)
|
||||
.concurrent(concurrency)
|
||||
@@ -196,7 +204,7 @@ pub async fn stream_to_parquet(
|
||||
.map(|w| w.into_futures_async_write().compat_write())
|
||||
.context(WriteObjectSnafu { path })?;
|
||||
|
||||
let mut writer = AsyncArrowWriter::try_new(inner_writer, schema, Some(write_props))
|
||||
let mut writer = AsyncArrowWriter::try_new(inner_writer, stream.schema(), Some(write_props))
|
||||
.context(WriteParquetSnafu { path })?;
|
||||
let mut rows_written = 0;
|
||||
|
||||
@@ -212,6 +220,24 @@ pub async fn stream_to_parquet(
|
||||
Ok(rows_written)
|
||||
}
|
||||
|
||||
/// Customizes per-column properties.
|
||||
fn column_wise_config(
|
||||
mut props: WriterPropertiesBuilder,
|
||||
schema: SchemaRef,
|
||||
) -> WriterPropertiesBuilder {
|
||||
// Disable dictionary for timestamp column, since for increasing timestamp column,
|
||||
// the dictionary pages will be larger than data pages.
|
||||
for col in schema.column_schemas() {
|
||||
if col.data_type.is_timestamp() {
|
||||
let path = ColumnPath::new(vec![col.name.clone()]);
|
||||
props = props
|
||||
.set_column_dictionary_enabled(path.clone(), false)
|
||||
.set_column_encoding(path, Encoding::DELTA_BINARY_PACKED)
|
||||
}
|
||||
}
|
||||
props
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_test_util::find_workspace_path;
|
||||
|
||||
@@ -31,7 +31,7 @@ pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
|
||||
.expect("input error level must be valid"),
|
||||
)
|
||||
.layer(object_store::layers::TracingLayer)
|
||||
.layer(object_store::layers::PrometheusMetricsLayer)
|
||||
.layer(object_store::layers::PrometheusMetricsLayer::new(true))
|
||||
.finish();
|
||||
Ok(object_store)
|
||||
}
|
||||
|
||||
@@ -94,7 +94,7 @@ pub fn build_s3_backend(
|
||||
.expect("input error level must be valid"),
|
||||
)
|
||||
.layer(object_store::layers::TracingLayer)
|
||||
.layer(object_store::layers::PrometheusMetricsLayer)
|
||||
.layer(object_store::layers::PrometheusMetricsLayer::new(true))
|
||||
.finish())
|
||||
}
|
||||
|
||||
|
||||
@@ -121,6 +121,11 @@ impl Decimal128 {
|
||||
let value = (hi | lo) as i128;
|
||||
Self::new(value, precision, scale)
|
||||
}
|
||||
|
||||
pub fn negative(mut self) -> Self {
|
||||
self.value = -self.value;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// The default value of Decimal128 is 0, and its precision is 1 and scale is 0.
|
||||
|
||||
@@ -54,8 +54,8 @@ pub enum Error {
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::BigDecimalOutOfRange { .. } => StatusCode::Internal,
|
||||
Error::ParseRustDecimalStr { .. }
|
||||
Error::BigDecimalOutOfRange { .. }
|
||||
| Error::ParseRustDecimalStr { .. }
|
||||
| Error::InvalidPrecisionOrScale { .. }
|
||||
| Error::ParseBigDecimalStr { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
|
||||
@@ -36,6 +36,8 @@ pub enum StatusCode {
|
||||
InvalidArguments = 1004,
|
||||
/// The task is cancelled.
|
||||
Cancelled = 1005,
|
||||
/// Illegal state, can be exposed to users
|
||||
IllegalState = 1006,
|
||||
// ====== End of common status code ================
|
||||
|
||||
// ====== Begin of SQL related status code =========
|
||||
@@ -53,18 +55,27 @@ pub enum StatusCode {
|
||||
// ====== Begin of catalog related status code =====
|
||||
/// Table already exists.
|
||||
TableAlreadyExists = 4000,
|
||||
/// Table not found
|
||||
TableNotFound = 4001,
|
||||
/// Table column not found
|
||||
TableColumnNotFound = 4002,
|
||||
/// Table column already exists
|
||||
TableColumnExists = 4003,
|
||||
/// Database not found
|
||||
DatabaseNotFound = 4004,
|
||||
/// Region not found
|
||||
RegionNotFound = 4005,
|
||||
/// Region already exists
|
||||
RegionAlreadyExists = 4006,
|
||||
RegionReadonly = 4007,
|
||||
/// Region is not in a proper state to handle specific request.
|
||||
RegionNotReady = 4008,
|
||||
// If mutually exclusive operations are reached at the same time,
|
||||
// only one can be executed, another one will get region busy.
|
||||
/// Region is temporarily in busy state
|
||||
RegionBusy = 4009,
|
||||
/// Table is temporarily unable to handle the request
|
||||
TableUnavailable = 4010,
|
||||
/// Database not found
|
||||
DatabaseAlreadyExists = 4011,
|
||||
// ====== End of catalog related status code =======
|
||||
|
||||
// ====== Begin of storage related status code =====
|
||||
@@ -118,15 +129,18 @@ impl StatusCode {
|
||||
| StatusCode::RuntimeResourcesExhausted
|
||||
| StatusCode::Internal
|
||||
| StatusCode::RegionNotReady
|
||||
| StatusCode::TableUnavailable
|
||||
| StatusCode::RegionBusy => true,
|
||||
|
||||
StatusCode::Success
|
||||
| StatusCode::Unknown
|
||||
| StatusCode::Unsupported
|
||||
| StatusCode::IllegalState
|
||||
| StatusCode::Unexpected
|
||||
| StatusCode::InvalidArguments
|
||||
| StatusCode::Cancelled
|
||||
| StatusCode::InvalidSyntax
|
||||
| StatusCode::DatabaseAlreadyExists
|
||||
| StatusCode::PlanQuery
|
||||
| StatusCode::EngineExecuteQuery
|
||||
| StatusCode::TableAlreadyExists
|
||||
@@ -159,7 +173,7 @@ impl StatusCode {
|
||||
| StatusCode::Unexpected
|
||||
| StatusCode::Internal
|
||||
| StatusCode::Cancelled
|
||||
| StatusCode::PlanQuery
|
||||
| StatusCode::IllegalState
|
||||
| StatusCode::EngineExecuteQuery
|
||||
| StatusCode::StorageUnavailable
|
||||
| StatusCode::RuntimeResourcesExhausted => true,
|
||||
@@ -171,6 +185,7 @@ impl StatusCode {
|
||||
| StatusCode::TableNotFound
|
||||
| StatusCode::RegionAlreadyExists
|
||||
| StatusCode::RegionNotFound
|
||||
| StatusCode::PlanQuery
|
||||
| StatusCode::FlowAlreadyExists
|
||||
| StatusCode::FlowNotFound
|
||||
| StatusCode::RegionNotReady
|
||||
@@ -181,6 +196,8 @@ impl StatusCode {
|
||||
| StatusCode::DatabaseNotFound
|
||||
| StatusCode::RateLimited
|
||||
| StatusCode::UserNotFound
|
||||
| StatusCode::TableUnavailable
|
||||
| StatusCode::DatabaseAlreadyExists
|
||||
| StatusCode::UnsupportedPasswordType
|
||||
| StatusCode::UserPasswordMismatch
|
||||
| StatusCode::AuthHeaderNotFound
|
||||
@@ -241,6 +258,7 @@ pub fn status_to_tonic_code(status_code: StatusCode) -> Code {
|
||||
StatusCode::Unknown => Code::Unknown,
|
||||
StatusCode::Unsupported => Code::Unimplemented,
|
||||
StatusCode::Unexpected
|
||||
| StatusCode::IllegalState
|
||||
| StatusCode::Internal
|
||||
| StatusCode::PlanQuery
|
||||
| StatusCode::EngineExecuteQuery => Code::Internal,
|
||||
@@ -251,6 +269,7 @@ pub fn status_to_tonic_code(status_code: StatusCode) -> Code {
|
||||
StatusCode::TableAlreadyExists
|
||||
| StatusCode::TableColumnExists
|
||||
| StatusCode::RegionAlreadyExists
|
||||
| StatusCode::DatabaseAlreadyExists
|
||||
| StatusCode::FlowAlreadyExists => Code::AlreadyExists,
|
||||
StatusCode::TableNotFound
|
||||
| StatusCode::RegionNotFound
|
||||
@@ -258,7 +277,9 @@ pub fn status_to_tonic_code(status_code: StatusCode) -> Code {
|
||||
| StatusCode::DatabaseNotFound
|
||||
| StatusCode::UserNotFound
|
||||
| StatusCode::FlowNotFound => Code::NotFound,
|
||||
StatusCode::StorageUnavailable | StatusCode::RegionNotReady => Code::Unavailable,
|
||||
StatusCode::TableUnavailable
|
||||
| StatusCode::StorageUnavailable
|
||||
| StatusCode::RegionNotReady => Code::Unavailable,
|
||||
StatusCode::RuntimeResourcesExhausted
|
||||
| StatusCode::RateLimited
|
||||
| StatusCode::RegionBusy => Code::ResourceExhausted,
|
||||
|
||||
@@ -13,4 +13,3 @@
|
||||
// limitations under the License.
|
||||
|
||||
pub mod error;
|
||||
pub mod handler;
|
||||
|
||||
@@ -22,6 +22,7 @@ use crate::function::FunctionRef;
|
||||
use crate::scalars::aggregate::{AggregateFunctionMetaRef, AggregateFunctions};
|
||||
use crate::scalars::date::DateFunction;
|
||||
use crate::scalars::expression::ExpressionFunction;
|
||||
use crate::scalars::matches::MatchesFunction;
|
||||
use crate::scalars::math::MathFunction;
|
||||
use crate::scalars::numpy::NumpyFunction;
|
||||
use crate::scalars::timestamp::TimestampFunction;
|
||||
@@ -86,6 +87,9 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
// Aggregate functions
|
||||
AggregateFunctions::register(&function_registry);
|
||||
|
||||
// Full text search function
|
||||
MatchesFunction::register(&function_registry);
|
||||
|
||||
// System and administration functions
|
||||
SystemFunction::register(&function_registry);
|
||||
TableFunction::register(&function_registry);
|
||||
|
||||
@@ -12,6 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(let_chains)]
|
||||
#![feature(try_blocks)]
|
||||
|
||||
mod macros;
|
||||
pub mod scalars;
|
||||
mod system;
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
pub mod aggregate;
|
||||
pub(crate) mod date;
|
||||
pub mod expression;
|
||||
pub mod matches;
|
||||
pub mod math;
|
||||
pub mod numpy;
|
||||
#[cfg(test)]
|
||||
|
||||
1343
src/common/function/src/scalars/matches.rs
Normal file
1343
src/common/function/src/scalars/matches.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -44,7 +44,7 @@ impl Function for DatabaseFunction {
|
||||
fn eval(&self, func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
let db = func_ctx.query_ctx.current_schema();
|
||||
|
||||
Ok(Arc::new(StringVector::from_slice(&[db])) as _)
|
||||
Ok(Arc::new(StringVector::from_slice(&[&db])) as _)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ workspace = true
|
||||
async-trait.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-version.workspace = true
|
||||
reqwest.workspace = true
|
||||
serde.workspace = true
|
||||
tokio.workspace = true
|
||||
@@ -20,6 +21,3 @@ uuid.workspace = true
|
||||
common-test-util.workspace = true
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
tempfile.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
common-version.workspace = true
|
||||
|
||||
@@ -22,6 +22,7 @@ use std::time::Duration;
|
||||
use common_runtime::error::{Error, Result};
|
||||
use common_runtime::{BoxedTaskFunction, RepeatedTask, TaskFunction};
|
||||
use common_telemetry::{debug, info};
|
||||
use common_version::build_info;
|
||||
use reqwest::{Client, Response};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -114,11 +115,11 @@ pub enum Mode {
|
||||
#[async_trait::async_trait]
|
||||
pub trait Collector {
|
||||
fn get_version(&self) -> String {
|
||||
env!("CARGO_PKG_VERSION").to_string()
|
||||
build_info().version.to_string()
|
||||
}
|
||||
|
||||
fn get_git_hash(&self) -> String {
|
||||
env!("GIT_COMMIT").to_string()
|
||||
build_info().commit.to_string()
|
||||
}
|
||||
|
||||
fn get_os(&self) -> String {
|
||||
@@ -286,6 +287,7 @@ mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use common_test_util::ports;
|
||||
use common_version::build_info;
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
use hyper::Server;
|
||||
use reqwest::{Client, Response};
|
||||
@@ -431,8 +433,8 @@ mod tests {
|
||||
let body = response.json::<StatisticData>().await.unwrap();
|
||||
assert_eq!(env::consts::ARCH, body.arch);
|
||||
assert_eq!(env::consts::OS, body.os);
|
||||
assert_eq!(env!("CARGO_PKG_VERSION"), body.version);
|
||||
assert_eq!(env!("GIT_COMMIT"), body.git_commit);
|
||||
assert_eq!(build_info().version, body.version);
|
||||
assert_eq!(build_info().commit, body.git_commit);
|
||||
assert_eq!(Mode::Standalone, body.mode);
|
||||
assert_eq!(1, body.nodes.unwrap());
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ common-macro.workspace = true
|
||||
common-query.workspace = true
|
||||
common-time.workspace = true
|
||||
datatypes.workspace = true
|
||||
prost.workspace = true
|
||||
snafu.workspace = true
|
||||
table.workspace = true
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use api::v1::ColumnDataType;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
@@ -104,6 +105,25 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unknown proto column datatype: {}", datatype))]
|
||||
UnknownColumnDataType {
|
||||
datatype: i32,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: prost::DecodeError,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Fulltext index only supports string type, column: {column_name}, unexpected type: {column_type:?}"
|
||||
))]
|
||||
InvalidFulltextColumnType {
|
||||
column_name: String,
|
||||
column_type: ColumnDataType,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -124,6 +144,10 @@ impl ErrorExt for Error {
|
||||
Error::UnexpectedValuesLength { .. } | Error::UnknownLocationType { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
|
||||
Error::UnknownColumnDataType { .. } | Error::InvalidFulltextColumnType { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -474,6 +474,7 @@ mod tests {
|
||||
scale: 10,
|
||||
})),
|
||||
}),
|
||||
options: None,
|
||||
};
|
||||
|
||||
(
|
||||
|
||||
@@ -14,24 +14,26 @@
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use api::v1::column_def::contains_fulltext;
|
||||
use api::v1::{
|
||||
AddColumn, AddColumns, Column, ColumnDataTypeExtension, ColumnDef, ColumnSchema,
|
||||
CreateTableExpr, SemanticType,
|
||||
AddColumn, AddColumns, Column, ColumnDataType, ColumnDataTypeExtension, ColumnDef,
|
||||
ColumnOptions, ColumnSchema, CreateTableExpr, SemanticType,
|
||||
};
|
||||
use datatypes::schema::Schema;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::TableId;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::error::{
|
||||
DuplicatedColumnNameSnafu, DuplicatedTimestampColumnSnafu, MissingTimestampColumnSnafu, Result,
|
||||
DuplicatedColumnNameSnafu, DuplicatedTimestampColumnSnafu, InvalidFulltextColumnTypeSnafu,
|
||||
MissingTimestampColumnSnafu, Result, UnknownColumnDataTypeSnafu,
|
||||
};
|
||||
|
||||
pub struct ColumnExpr<'a> {
|
||||
pub column_name: &'a str,
|
||||
pub datatype: i32,
|
||||
pub semantic_type: i32,
|
||||
pub datatype_extension: &'a Option<ColumnDataTypeExtension>,
|
||||
pub options: &'a Option<ColumnOptions>,
|
||||
}
|
||||
|
||||
impl<'a> ColumnExpr<'a> {
|
||||
@@ -53,6 +55,7 @@ impl<'a> From<&'a Column> for ColumnExpr<'a> {
|
||||
datatype: column.datatype,
|
||||
semantic_type: column.semantic_type,
|
||||
datatype_extension: &column.datatype_extension,
|
||||
options: &column.options,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -64,6 +67,7 @@ impl<'a> From<&'a ColumnSchema> for ColumnExpr<'a> {
|
||||
datatype: schema.datatype,
|
||||
semantic_type: schema.semantic_type,
|
||||
datatype_extension: &schema.datatype_extension,
|
||||
options: &schema.options,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -99,6 +103,7 @@ pub fn build_create_table_expr(
|
||||
datatype,
|
||||
semantic_type,
|
||||
datatype_extension,
|
||||
options,
|
||||
} in column_exprs
|
||||
{
|
||||
let mut is_nullable = true;
|
||||
@@ -119,6 +124,17 @@ pub fn build_create_table_expr(
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let column_type =
|
||||
ColumnDataType::try_from(datatype).context(UnknownColumnDataTypeSnafu { datatype })?;
|
||||
|
||||
ensure!(
|
||||
!contains_fulltext(options) || column_type == ColumnDataType::String,
|
||||
InvalidFulltextColumnTypeSnafu {
|
||||
column_name,
|
||||
column_type,
|
||||
}
|
||||
);
|
||||
|
||||
let column_def = ColumnDef {
|
||||
name: column_name.to_string(),
|
||||
data_type: datatype,
|
||||
@@ -127,6 +143,7 @@ pub fn build_create_table_expr(
|
||||
semantic_type,
|
||||
comment: String::new(),
|
||||
datatype_extension: datatype_extension.clone(),
|
||||
options: options.clone(),
|
||||
};
|
||||
column_defs.push(column_def);
|
||||
}
|
||||
@@ -168,6 +185,7 @@ pub fn extract_new_columns(
|
||||
semantic_type: expr.semantic_type,
|
||||
comment: String::new(),
|
||||
datatype_extension: expr.datatype_extension.clone(),
|
||||
options: expr.options.clone(),
|
||||
});
|
||||
AddColumn {
|
||||
column_def,
|
||||
|
||||
70
src/common/meta/src/cache/flow/table_flownode.rs
vendored
70
src/common/meta/src/cache/flow/table_flownode.rs
vendored
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::future::BoxFuture;
|
||||
@@ -26,9 +26,10 @@ use crate::error::Result;
|
||||
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
|
||||
use crate::key::flow::{TableFlowManager, TableFlowManagerRef};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::peer::Peer;
|
||||
use crate::FlownodeId;
|
||||
|
||||
type FlownodeSet = HashSet<FlownodeId>;
|
||||
type FlownodeSet = Arc<HashMap<FlownodeId, Peer>>;
|
||||
|
||||
pub type TableFlownodeSetCacheRef = Arc<TableFlownodeSetCache>;
|
||||
|
||||
@@ -53,13 +54,14 @@ fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId,
|
||||
Box::pin(async move {
|
||||
table_flow_manager
|
||||
.flows(table_id)
|
||||
.map_ok(|key| key.flownode_id())
|
||||
.try_collect::<HashSet<_>>()
|
||||
.map_ok(|(key, value)| (key.flownode_id(), value.peer))
|
||||
.try_collect::<HashMap<_, _>>()
|
||||
.await
|
||||
// We must cache the `HashSet` even if it's empty,
|
||||
// to avoid future requests to the remote storage next time;
|
||||
// If the value is added to the remote storage,
|
||||
// we have a corresponding cache invalidation mechanism to invalidate `(Key, EmptyHashSet)`.
|
||||
.map(Arc::new)
|
||||
.map(Some)
|
||||
})
|
||||
})
|
||||
@@ -69,21 +71,23 @@ async fn handle_create_flow(
|
||||
cache: &Cache<TableId, FlownodeSet>,
|
||||
CreateFlow {
|
||||
source_table_ids,
|
||||
flownode_ids,
|
||||
flownodes: flownode_peers,
|
||||
}: &CreateFlow,
|
||||
) {
|
||||
for table_id in source_table_ids {
|
||||
let entry = cache.entry(*table_id);
|
||||
entry
|
||||
.and_compute_with(
|
||||
async |entry: Option<moka::Entry<u32, HashSet<u64>>>| match entry {
|
||||
async |entry: Option<moka::Entry<u32, Arc<HashMap<u64, _>>>>| match entry {
|
||||
Some(entry) => {
|
||||
let mut set = entry.into_value();
|
||||
set.extend(flownode_ids.iter().cloned());
|
||||
let mut map = entry.into_value().as_ref().clone();
|
||||
map.extend(flownode_peers.iter().map(|peer| (peer.id, peer.clone())));
|
||||
|
||||
Op::Put(set)
|
||||
Op::Put(Arc::new(map))
|
||||
}
|
||||
None => Op::Put(HashSet::from_iter(flownode_ids.iter().cloned())),
|
||||
None => Op::Put(Arc::new(HashMap::from_iter(
|
||||
flownode_peers.iter().map(|peer| (peer.id, peer.clone())),
|
||||
))),
|
||||
},
|
||||
)
|
||||
.await;
|
||||
@@ -101,14 +105,14 @@ async fn handle_drop_flow(
|
||||
let entry = cache.entry(*table_id);
|
||||
entry
|
||||
.and_compute_with(
|
||||
async |entry: Option<moka::Entry<u32, HashSet<u64>>>| match entry {
|
||||
async |entry: Option<moka::Entry<u32, Arc<HashMap<u64, _>>>>| match entry {
|
||||
Some(entry) => {
|
||||
let mut set = entry.into_value();
|
||||
let mut set = entry.into_value().as_ref().clone();
|
||||
for flownode_id in flownode_ids {
|
||||
set.remove(flownode_id);
|
||||
}
|
||||
|
||||
Op::Put(set)
|
||||
Op::Put(Arc::new(set))
|
||||
}
|
||||
None => {
|
||||
// Do nothing
|
||||
@@ -140,7 +144,7 @@ fn filter(ident: &CacheIdent) -> bool {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::{BTreeMap, HashSet};
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
@@ -150,8 +154,10 @@ mod tests {
|
||||
use crate::cache::flow::table_flownode::new_table_flownode_set_cache;
|
||||
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
|
||||
use crate::key::flow::flow_info::FlowInfoValue;
|
||||
use crate::key::flow::flow_route::FlowRouteValue;
|
||||
use crate::key::flow::FlowMetadataManager;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::peer::Peer;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cache_empty_set() {
|
||||
@@ -184,15 +190,31 @@ mod tests {
|
||||
comment: "comment".to_string(),
|
||||
options: Default::default(),
|
||||
},
|
||||
(1..=3)
|
||||
.map(|i| {
|
||||
(
|
||||
(i - 1) as u32,
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(i),
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let cache = CacheBuilder::new(128).build();
|
||||
let cache = new_table_flownode_set_cache("test".to_string(), cache, mem_kv);
|
||||
let set = cache.get(1024).await.unwrap().unwrap();
|
||||
assert_eq!(set, HashSet::from([1, 2, 3]));
|
||||
assert_eq!(
|
||||
set.as_ref().clone(),
|
||||
HashMap::from_iter((1..=3).map(|i| { (i, Peer::empty(i),) }))
|
||||
);
|
||||
let set = cache.get(1025).await.unwrap().unwrap();
|
||||
assert_eq!(set, HashSet::from([1, 2, 3]));
|
||||
assert_eq!(
|
||||
set.as_ref().clone(),
|
||||
HashMap::from_iter((1..=3).map(|i| { (i, Peer::empty(i),) }))
|
||||
);
|
||||
let result = cache.get(1026).await.unwrap().unwrap();
|
||||
assert_eq!(result.len(), 0);
|
||||
}
|
||||
@@ -204,7 +226,7 @@ mod tests {
|
||||
let cache = new_table_flownode_set_cache("test".to_string(), cache, mem_kv);
|
||||
let ident = vec![CacheIdent::CreateFlow(CreateFlow {
|
||||
source_table_ids: vec![1024, 1025],
|
||||
flownode_ids: vec![1, 2, 3, 4, 5],
|
||||
flownodes: (1..=5).map(Peer::empty).collect(),
|
||||
})];
|
||||
cache.invalidate(&ident).await.unwrap();
|
||||
let set = cache.get(1024).await.unwrap().unwrap();
|
||||
@@ -221,11 +243,11 @@ mod tests {
|
||||
let ident = vec![
|
||||
CacheIdent::CreateFlow(CreateFlow {
|
||||
source_table_ids: vec![1024, 1025],
|
||||
flownode_ids: vec![1, 2, 3, 4, 5],
|
||||
flownodes: (1..=5).map(Peer::empty).collect(),
|
||||
}),
|
||||
CacheIdent::CreateFlow(CreateFlow {
|
||||
source_table_ids: vec![1024, 1025],
|
||||
flownode_ids: vec![11, 12],
|
||||
flownodes: (11..=12).map(Peer::empty).collect(),
|
||||
}),
|
||||
];
|
||||
cache.invalidate(&ident).await.unwrap();
|
||||
@@ -240,8 +262,14 @@ mod tests {
|
||||
})];
|
||||
cache.invalidate(&ident).await.unwrap();
|
||||
let set = cache.get(1024).await.unwrap().unwrap();
|
||||
assert_eq!(set, HashSet::from([11, 12]));
|
||||
assert_eq!(
|
||||
set.as_ref().clone(),
|
||||
HashMap::from_iter((11..=12).map(|i| { (i, Peer::empty(i),) }))
|
||||
);
|
||||
let set = cache.get(1025).await.unwrap().unwrap();
|
||||
assert_eq!(set, HashSet::from([11, 12]));
|
||||
assert_eq!(
|
||||
set.as_ref().clone(),
|
||||
HashMap::from_iter((11..=12).map(|i| { (i, Peer::empty(i),) }))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
11
src/common/meta/src/cache/table/view_info.rs
vendored
11
src/common/meta/src/cache/table/view_info.rs
vendored
@@ -63,8 +63,8 @@ fn invalidator<'a>(
|
||||
ident: &'a CacheIdent,
|
||||
) -> BoxFuture<'a, Result<()>> {
|
||||
Box::pin(async move {
|
||||
if let CacheIdent::TableId(table_id) = ident {
|
||||
cache.invalidate(table_id).await
|
||||
if let CacheIdent::TableId(view_id) = ident {
|
||||
cache.invalidate(view_id).await
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
@@ -111,6 +111,7 @@ mod tests {
|
||||
});
|
||||
set
|
||||
};
|
||||
let definition = "CREATE VIEW test AS SELECT * FROM numbers";
|
||||
|
||||
task.view_info.ident.table_id = 1024;
|
||||
table_metadata_manager
|
||||
@@ -118,6 +119,9 @@ mod tests {
|
||||
task.view_info.clone(),
|
||||
task.create_view.logical_plan.clone(),
|
||||
table_names,
|
||||
vec!["a".to_string()],
|
||||
vec!["number".to_string()],
|
||||
definition.to_string(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -132,6 +136,9 @@ mod tests {
|
||||
.map(|t| t.clone().into())
|
||||
.collect::<HashSet<_>>()
|
||||
);
|
||||
assert_eq!(view_info.definition, task.create_view.definition);
|
||||
assert_eq!(view_info.columns, task.create_view.columns);
|
||||
assert_eq!(view_info.plan_columns, task.create_view.plan_columns);
|
||||
|
||||
assert!(cache.contains_key(&1024));
|
||||
cache
|
||||
|
||||
@@ -23,6 +23,7 @@ use crate::key::schema_name::SchemaNameKey;
|
||||
use crate::key::table_info::TableInfoKey;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteKey;
|
||||
use crate::key::view_info::ViewInfoKey;
|
||||
use crate::key::MetaKey;
|
||||
|
||||
/// KvBackend cache invalidator
|
||||
@@ -76,6 +77,9 @@ where
|
||||
|
||||
let key = TableRouteKey::new(*table_id);
|
||||
self.invalidate_key(&key.to_bytes()).await;
|
||||
|
||||
let key = ViewInfoKey::new(*table_id);
|
||||
self.invalidate_key(&key.to_bytes()).await;
|
||||
}
|
||||
CacheIdent::TableName(table_name) => {
|
||||
let key: TableNameKey = table_name.into();
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::tracing_context::W3cTrace;
|
||||
use store_api::storage::{RegionNumber, TableId};
|
||||
use store_api::storage::{RegionId, RegionNumber, TableId};
|
||||
|
||||
use crate::cache_invalidator::CacheInvalidatorRef;
|
||||
use crate::ddl::flow_meta::FlowMetadataAllocatorRef;
|
||||
@@ -26,11 +26,10 @@ use crate::key::flow::FlowMetadataManagerRef;
|
||||
use crate::key::table_route::PhysicalTableRouteValue;
|
||||
use crate::key::TableMetadataManagerRef;
|
||||
use crate::node_manager::NodeManagerRef;
|
||||
use crate::peer::PeerLookupServiceRef;
|
||||
use crate::region_keeper::MemoryRegionKeeperRef;
|
||||
use crate::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse};
|
||||
use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse};
|
||||
use crate::ClusterId;
|
||||
use crate::{ClusterId, DatanodeId};
|
||||
|
||||
pub mod alter_logical_tables;
|
||||
pub mod alter_table;
|
||||
@@ -43,6 +42,7 @@ pub mod create_view;
|
||||
pub mod drop_database;
|
||||
pub mod drop_flow;
|
||||
pub mod drop_table;
|
||||
pub mod drop_view;
|
||||
pub mod flow_meta;
|
||||
mod physical_table_metadata;
|
||||
pub mod table_meta;
|
||||
@@ -102,6 +102,33 @@ pub struct TableMetadata {
|
||||
pub region_wal_options: HashMap<RegionNumber, String>,
|
||||
}
|
||||
|
||||
pub type RegionFailureDetectorControllerRef = Arc<dyn RegionFailureDetectorController>;
|
||||
|
||||
pub type DetectingRegion = (ClusterId, DatanodeId, RegionId);
|
||||
|
||||
/// Used for actively registering Region failure detectors.
|
||||
///
|
||||
/// Ensuring the Region Supervisor can detect Region failures without relying on the first heartbeat from the datanode.
|
||||
#[async_trait::async_trait]
|
||||
pub trait RegionFailureDetectorController: Send + Sync {
|
||||
/// Registers failure detectors for the given identifiers.
|
||||
async fn register_failure_detectors(&self, detecting_regions: Vec<DetectingRegion>);
|
||||
|
||||
/// Deregisters failure detectors for the given identifiers.
|
||||
async fn deregister_failure_detectors(&self, detecting_regions: Vec<DetectingRegion>);
|
||||
}
|
||||
|
||||
/// A noop implementation of [`RegionFailureDetectorController`].
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct NoopRegionFailureDetectorControl;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl RegionFailureDetectorController for NoopRegionFailureDetectorControl {
|
||||
async fn register_failure_detectors(&self, _detecting_regions: Vec<DetectingRegion>) {}
|
||||
|
||||
async fn deregister_failure_detectors(&self, _detecting_regions: Vec<DetectingRegion>) {}
|
||||
}
|
||||
|
||||
/// The context of ddl.
|
||||
#[derive(Clone)]
|
||||
pub struct DdlContext {
|
||||
@@ -119,6 +146,28 @@ pub struct DdlContext {
|
||||
pub flow_metadata_manager: FlowMetadataManagerRef,
|
||||
/// Allocator for flow metadata.
|
||||
pub flow_metadata_allocator: FlowMetadataAllocatorRef,
|
||||
/// look up peer by id.
|
||||
pub peer_lookup_service: PeerLookupServiceRef,
|
||||
/// controller of region failure detector.
|
||||
pub region_failure_detector_controller: RegionFailureDetectorControllerRef,
|
||||
}
|
||||
|
||||
impl DdlContext {
|
||||
/// Notifies the RegionSupervisor to register failure detector of new created regions.
|
||||
///
|
||||
/// The datanode may crash without sending a heartbeat that contains information about newly created regions,
|
||||
/// which may prevent the RegionSupervisor from detecting failures in these newly created regions.
|
||||
pub async fn register_failure_detectors(&self, detecting_regions: Vec<DetectingRegion>) {
|
||||
self.region_failure_detector_controller
|
||||
.register_failure_detectors(detecting_regions)
|
||||
.await;
|
||||
}
|
||||
|
||||
/// Notifies the RegionSupervisor to remove failure detectors.
|
||||
///
|
||||
/// Once the regions were dropped, subsequent heartbeats no longer include these regions.
|
||||
/// Therefore, we should remove the failure detectors for these dropped regions.
|
||||
async fn deregister_failure_detectors(&self, detecting_regions: Vec<DetectingRegion>) {
|
||||
self.region_failure_detector_controller
|
||||
.deregister_failure_detectors(detecting_regions)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,8 +41,9 @@ use crate::ddl::DdlContext;
|
||||
use crate::error::{self, Result};
|
||||
use crate::instruction::{CacheIdent, CreateFlow};
|
||||
use crate::key::flow::flow_info::FlowInfoValue;
|
||||
use crate::key::flow::flow_route::FlowRouteValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::FlowId;
|
||||
use crate::key::{FlowId, FlowPartitionId};
|
||||
use crate::lock_key::{CatalogLock, FlowNameLock, TableNameLock};
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::ddl::{CreateFlowTask, QueryContext};
|
||||
@@ -170,9 +171,10 @@ impl CreateFlowProcedure {
|
||||
// Safety: The flow id must be allocated.
|
||||
let flow_id = self.data.flow_id.unwrap();
|
||||
// TODO(weny): Support `or_replace`.
|
||||
let (flow_info, flow_routes) = (&self.data).into();
|
||||
self.context
|
||||
.flow_metadata_manager
|
||||
.create_flow_metadata(flow_id, (&self.data).into())
|
||||
.create_flow_metadata(flow_id, flow_info, flow_routes)
|
||||
.await?;
|
||||
info!("Created flow metadata for flow {flow_id}");
|
||||
self.data.state = CreateFlowState::InvalidateFlowCache;
|
||||
@@ -192,7 +194,7 @@ impl CreateFlowProcedure {
|
||||
&ctx,
|
||||
&[CacheIdent::CreateFlow(CreateFlow {
|
||||
source_table_ids: self.data.source_table_ids.clone(),
|
||||
flownode_ids: self.data.peers.iter().map(|peer| peer.id).collect(),
|
||||
flownodes: self.data.peers.clone(),
|
||||
})],
|
||||
)
|
||||
.await?;
|
||||
@@ -292,7 +294,7 @@ impl From<&CreateFlowData> for CreateRequest {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&CreateFlowData> for FlowInfoValue {
|
||||
impl From<&CreateFlowData> for (FlowInfoValue, Vec<(FlowPartitionId, FlowRouteValue)>) {
|
||||
fn from(value: &CreateFlowData) -> Self {
|
||||
let CreateFlowTask {
|
||||
catalog_name,
|
||||
@@ -311,17 +313,26 @@ impl From<&CreateFlowData> for FlowInfoValue {
|
||||
.enumerate()
|
||||
.map(|(idx, peer)| (idx as u32, peer.id))
|
||||
.collect::<BTreeMap<_, _>>();
|
||||
let flow_routes = value
|
||||
.peers
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, peer)| (idx as u32, FlowRouteValue { peer: peer.clone() }))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
FlowInfoValue {
|
||||
source_table_ids: value.source_table_ids.clone(),
|
||||
sink_table_name,
|
||||
flownode_ids,
|
||||
catalog_name,
|
||||
flow_name,
|
||||
raw_sql: sql,
|
||||
expire_after,
|
||||
comment,
|
||||
options,
|
||||
}
|
||||
(
|
||||
FlowInfoValue {
|
||||
source_table_ids: value.source_table_ids.clone(),
|
||||
sink_table_name,
|
||||
flownode_ids,
|
||||
catalog_name,
|
||||
flow_name,
|
||||
raw_sql: sql,
|
||||
expire_after,
|
||||
comment,
|
||||
options,
|
||||
},
|
||||
flow_routes,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user