mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-27 08:29:59 +00:00
Compare commits
101 Commits
v0.9.0-nig
...
v0.9.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fe1cfbf2b3 | ||
|
|
ded874da04 | ||
|
|
fe2d29a2a0 | ||
|
|
b388829a96 | ||
|
|
8e7c027bf5 | ||
|
|
9d5d7c1f9a | ||
|
|
efe5eeef14 | ||
|
|
ca54b05be3 | ||
|
|
d67314789c | ||
|
|
6c4b8b63a5 | ||
|
|
62a0defd63 | ||
|
|
291d9d55a4 | ||
|
|
90301a6250 | ||
|
|
c66d3090b6 | ||
|
|
656050722c | ||
|
|
b741a7181b | ||
|
|
dd23d47743 | ||
|
|
80aaa7725e | ||
|
|
c24de8b908 | ||
|
|
f382a7695f | ||
|
|
1ea43da9ea | ||
|
|
6113f46284 | ||
|
|
6d8a502430 | ||
|
|
2d992f4f12 | ||
|
|
7daf24c47f | ||
|
|
567f5105bf | ||
|
|
78962015dd | ||
|
|
1138f32af9 | ||
|
|
53fc14a50b | ||
|
|
1895a5478b | ||
|
|
f0c953f84a | ||
|
|
1a38f36d2d | ||
|
|
cb94bd45d3 | ||
|
|
b298b35b3b | ||
|
|
164232e073 | ||
|
|
9a5fa49955 | ||
|
|
92d6d4e64a | ||
|
|
021ec7b6ac | ||
|
|
0710e6ff36 | ||
|
|
db3a07804e | ||
|
|
bdd3d2d9ce | ||
|
|
b81d3a28e6 | ||
|
|
89b86c87a2 | ||
|
|
0b0ed03ee6 | ||
|
|
ea4a71b387 | ||
|
|
4cd5ec7769 | ||
|
|
c8f4a85720 | ||
|
|
024dac8171 | ||
|
|
918be099cd | ||
|
|
91dbac4141 | ||
|
|
e935bf7574 | ||
|
|
f7872654cc | ||
|
|
547730a467 | ||
|
|
49f22f0fc5 | ||
|
|
2ae2a6674e | ||
|
|
c8cf3b1677 | ||
|
|
7aae19aa8b | ||
|
|
b90267dd80 | ||
|
|
9fa9156bde | ||
|
|
ce900e850a | ||
|
|
5274c5a407 | ||
|
|
0b13ac6e16 | ||
|
|
8ab6136d1c | ||
|
|
e39f49fe56 | ||
|
|
c595a56ac8 | ||
|
|
d6c7b848da | ||
|
|
2010a2a33d | ||
|
|
be3ea0fae7 | ||
|
|
7b28da277d | ||
|
|
b2c5f8eefa | ||
|
|
072d7c2022 | ||
|
|
7900367433 | ||
|
|
9fbc4ba649 | ||
|
|
2e7b12c344 | ||
|
|
2b912d93fb | ||
|
|
04ac0c8da0 | ||
|
|
64cad4e891 | ||
|
|
20d9c0a345 | ||
|
|
9501318ce5 | ||
|
|
b8bd8456f0 | ||
|
|
4b8b04ffa2 | ||
|
|
15ac8116ea | ||
|
|
377a513690 | ||
|
|
5a1732279b | ||
|
|
16075ada67 | ||
|
|
67dfdd6c61 | ||
|
|
9f2d53c3df | ||
|
|
05c7d3eb42 | ||
|
|
63acc30ce7 | ||
|
|
285ffc5850 | ||
|
|
ab22bbac84 | ||
|
|
7ad248d6f6 | ||
|
|
50e4539667 | ||
|
|
da1ea253ba | ||
|
|
da0c840261 | ||
|
|
20417e646a | ||
|
|
9271b3b7bd | ||
|
|
374cfe74bf | ||
|
|
52a9a748a1 | ||
|
|
33ed745049 | ||
|
|
458e5d7e66 |
15
.coderabbit.yaml
Normal file
15
.coderabbit.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
|
||||
language: "en-US"
|
||||
early_access: false
|
||||
reviews:
|
||||
profile: "chill"
|
||||
request_changes_workflow: false
|
||||
high_level_summary: true
|
||||
poem: true
|
||||
review_status: true
|
||||
collapse_walkthrough: false
|
||||
auto_review:
|
||||
enabled: false
|
||||
drafts: false
|
||||
chat:
|
||||
auto_reply: true
|
||||
@@ -62,12 +62,13 @@ runs:
|
||||
# Get proper backtraces in mac Sonoma. Currently there's an issue with the new
|
||||
# linker that prevents backtraces from getting printed correctly.
|
||||
#
|
||||
# <https://github.com/rust-lang/rust/issues/113783>
|
||||
# <https://github.com/rust-lang/rust/issues/113783>
|
||||
- name: Run integration tests
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: bash
|
||||
env:
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
||||
SQLNESS_OPTS: "--preserve-state"
|
||||
run: |
|
||||
make test sqlness-test
|
||||
|
||||
@@ -81,7 +82,7 @@ runs:
|
||||
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
env:
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
||||
run: |
|
||||
make build \
|
||||
|
||||
@@ -40,7 +40,7 @@ runs:
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Install PyArrow Package
|
||||
shell: pwsh
|
||||
@@ -62,13 +62,14 @@ runs:
|
||||
env:
|
||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||
RUST_BACKTRACE: 1
|
||||
SQLNESS_OPTS: "--preserve-state"
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
path: C:\tmp\greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
- name: Build greptime binary
|
||||
|
||||
@@ -2,7 +2,7 @@ name: Setup Etcd cluster
|
||||
description: Deploy Etcd cluster on Kubernetes
|
||||
inputs:
|
||||
etcd-replicas:
|
||||
default: 3
|
||||
default: 1
|
||||
description: "Etcd replicas"
|
||||
namespace:
|
||||
default: "etcd-cluster"
|
||||
|
||||
@@ -1,18 +1,13 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
@@ -1,19 +1,16 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
|
||||
global_rt_size = 4
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
|
||||
[storage]
|
||||
cache_path = "/data/greptimedb/s3cache"
|
||||
@@ -21,9 +18,7 @@ datanode:
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
@@ -11,15 +9,12 @@ meta:
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
|
||||
[wal]
|
||||
provider = "kafka"
|
||||
@@ -17,9 +15,8 @@ meta:
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
|
||||
[wal]
|
||||
provider = "kafka"
|
||||
@@ -28,9 +25,7 @@ datanode:
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
@@ -43,3 +38,8 @@ objectStorage:
|
||||
credentials:
|
||||
accessKeyId: rootuser
|
||||
secretAccessKey: rootpass123
|
||||
remoteWal:
|
||||
enabled: true
|
||||
kafka:
|
||||
brokerEndpoints:
|
||||
- "kafka.kafka-cluster.svc.cluster.local:9092"
|
||||
|
||||
1
.github/workflows/develop.yml
vendored
1
.github/workflows/develop.yml
vendored
@@ -141,6 +141,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
steps:
|
||||
|
||||
16
.github/workflows/nightly-ci.yml
vendored
16
.github/workflows/nightly-ci.yml
vendored
@@ -51,13 +51,15 @@ jobs:
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run sqlness
|
||||
run: cargo sqlness
|
||||
run: make sqlness-test
|
||||
env:
|
||||
SQLNESS_OPTS: "--preserve-state"
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
path: C:\tmp\greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
test-on-windows:
|
||||
@@ -109,11 +111,7 @@ jobs:
|
||||
|
||||
check-status:
|
||||
name: Check status
|
||||
needs: [
|
||||
sqlness-test,
|
||||
sqlness-windows,
|
||||
test-on-windows,
|
||||
]
|
||||
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
@@ -127,9 +125,7 @@ jobs:
|
||||
notification:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
check-status
|
||||
]
|
||||
needs: [check-status]
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -91,7 +91,7 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.9.0
|
||||
NEXT_RELEASE_VERSION: v0.10.0
|
||||
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
|
||||
@@ -14,7 +14,7 @@ Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get th
|
||||
|
||||
It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity.
|
||||
|
||||
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/main/CODE_OF_CONDUCT.md)
|
||||
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md)
|
||||
- Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working.
|
||||
- Check the closed issues before opening your issue.
|
||||
- Try to follow the existing style of the code.
|
||||
@@ -30,7 +30,7 @@ Pull requests are great, but we accept all kinds of other help if you like. Such
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/main/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
|
||||
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
|
||||
|
||||
## License
|
||||
|
||||
@@ -55,7 +55,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
|
||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||
- Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run`.
|
||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||
|
||||
#### `pre-commit` Hooks
|
||||
|
||||
485
Cargo.lock
generated
485
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
30
Cargo.toml
30
Cargo.toml
@@ -64,7 +64,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.8.2"
|
||||
version = "0.9.1"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -104,27 +104,27 @@ clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "5.4"
|
||||
datafusion = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
derive_builder = "0.12"
|
||||
dotenv = "0.15"
|
||||
etcd-client = { version = "0.13" }
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "b83f00958fe4cbc77b85b7407bca206e98bdc845" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "c437b55725b7f5224fe9d46db21072b4a682ee4b" }
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80b72716dcde47ec4161478416a5c6c21343364d" }
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "049171eb16cb4249d8099751a0c46750d1fe88e7" }
|
||||
mockall = "0.11.4"
|
||||
moka = "0.12"
|
||||
notify = "6.1"
|
||||
@@ -183,7 +183,7 @@ auth = { path = "src/auth" }
|
||||
cache = { path = "src/cache" }
|
||||
catalog = { path = "src/catalog" }
|
||||
client = { path = "src/client" }
|
||||
cmd = { path = "src/cmd" }
|
||||
cmd = { path = "src/cmd", default-features = false }
|
||||
common-base = { path = "src/common/base" }
|
||||
common-catalog = { path = "src/common/catalog" }
|
||||
common-config = { path = "src/common/config" }
|
||||
@@ -213,7 +213,7 @@ datanode = { path = "src/datanode" }
|
||||
datatypes = { path = "src/datatypes" }
|
||||
file-engine = { path = "src/file-engine" }
|
||||
flow = { path = "src/flow" }
|
||||
frontend = { path = "src/frontend" }
|
||||
frontend = { path = "src/frontend", default-features = false }
|
||||
index = { path = "src/index" }
|
||||
log-store = { path = "src/log-store" }
|
||||
meta-client = { path = "src/meta-client" }
|
||||
@@ -238,7 +238,7 @@ table = { path = "src/table" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "80b72716dcde47ec4161478416a5c6c21343364d"
|
||||
rev = "049171eb16cb4249d8099751a0c46750d1fe88e7"
|
||||
|
||||
[profile.release]
|
||||
debug = 1
|
||||
|
||||
13
Makefile
13
Makefile
@@ -15,6 +15,7 @@ RUST_TOOLCHAIN ?= $(shell cat rust-toolchain.toml | grep channel | cut -d'"' -f2
|
||||
CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
|
||||
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
||||
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
|
||||
SQLNESS_OPTS ?=
|
||||
|
||||
# The arguments for running integration tests.
|
||||
ETCD_VERSION ?= v3.5.9
|
||||
@@ -161,7 +162,7 @@ nextest: ## Install nextest tools.
|
||||
|
||||
.PHONY: sqlness-test
|
||||
sqlness-test: ## Run sqlness test.
|
||||
cargo sqlness
|
||||
cargo sqlness ${SQLNESS_OPTS}
|
||||
|
||||
# Run fuzz test ${FUZZ_TARGET}.
|
||||
RUNS ?= 1
|
||||
@@ -172,7 +173,7 @@ fuzz:
|
||||
|
||||
.PHONY: fuzz-ls
|
||||
fuzz-ls:
|
||||
cargo fuzz list --fuzz-dir tests-fuzz
|
||||
cargo fuzz list --fuzz-dir tests-fuzz
|
||||
|
||||
.PHONY: check
|
||||
check: ## Cargo check all the targets.
|
||||
@@ -205,10 +206,14 @@ run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
||||
|
||||
.PHONY: run-cluster-with-etcd
|
||||
run-cluster-with-etcd: ## Run greptime cluster with etcd in docker-compose.
|
||||
.PHONY: start-cluster
|
||||
start-cluster: ## Start the greptimedb cluster with etcd by using docker compose.
|
||||
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml up
|
||||
|
||||
.PHONY: stop-cluster
|
||||
stop-cluster: ## Stop the greptimedb cluster that created by docker compose.
|
||||
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml stop
|
||||
|
||||
##@ Docs
|
||||
config-docs: ## Generate configuration documentation from toml files.
|
||||
docker run --rm \
|
||||
|
||||
32
README.md
32
README.md
@@ -6,12 +6,12 @@
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
<h1 align="center">Cloud-scale, Fast and Efficient Time Series Database</h1>
|
||||
<h2 align="center">Unified Time Series Database for Metrics, Logs, and Events</h2>
|
||||
|
||||
<div align="center">
|
||||
<h3 align="center">
|
||||
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
||||
<a href="https://docs.greptime.com/">User guide</a> |
|
||||
<a href="https://docs.greptime.com/">User Guide</a> |
|
||||
<a href="https://greptimedb.rs/">API Docs</a> |
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
|
||||
</h4>
|
||||
@@ -50,24 +50,23 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
**GreptimeDB** is an open-source time-series database focusing on efficiency, scalability, and analytical capabilities.
|
||||
Designed to work on infrastructure of the cloud era, GreptimeDB benefits users with its elasticity and commodity storage, offering a fast and cost-effective **alternative to InfluxDB** and a **long-term storage for Prometheus**.
|
||||
**GreptimeDB** is an open-source unified time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at any scale.
|
||||
|
||||
## Why GreptimeDB
|
||||
|
||||
Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you:
|
||||
|
||||
* **Easy horizontal scaling**
|
||||
* **Unified all kinds of time series**
|
||||
|
||||
Seamless scalability from a standalone binary at edge to a robust, highly available distributed cluster in cloud, with a transparent experience for both developers and administrators.
|
||||
GreptimeDB treats all time series as contextual events with timestamp, and thus unifies the processing of metrics, logs, and events. It supports analyzing metrics, logs, and events with SQL and PromQL, and doing streaming with continuous aggregation.
|
||||
|
||||
* **Analyzing time-series data**
|
||||
* **Cloud-Edge collaboration**
|
||||
|
||||
Query your time-series data with SQL and PromQL. Use Python scripts to facilitate complex analytical tasks.
|
||||
GreptimeDB can be deployed on ARM architecture-compatible Android/Linux systems as well as cloud environments from various vendors. Both sides run the same software, providing identical APIs and control planes, so your application can run at the edge or on the cloud without modification, and data synchronization also becomes extremely easy and efficient.
|
||||
|
||||
* **Cloud-native distributed database**
|
||||
|
||||
Fully open-source distributed cluster architecture that harnesses the power of cloud-native elastic computing resources.
|
||||
By leveraging object storage (S3 and others), separating compute and storage, scaling stateless compute nodes arbitrarily, GreptimeDB implements seamless scalability. It also supports cross-cloud deployment with a built-in unified data access layer over different object storages.
|
||||
|
||||
* **Performance and Cost-effective**
|
||||
|
||||
@@ -105,10 +104,10 @@ Read more about [Installation](https://docs.greptime.com/getting-started/install
|
||||
|
||||
## Getting Started
|
||||
|
||||
* [Quickstart](https://docs.greptime.com/getting-started/quick-start/overview)
|
||||
* [Write Data](https://docs.greptime.com/user-guide/clients/overview)
|
||||
* [Query Data](https://docs.greptime.com/user-guide/query-data/overview)
|
||||
* [Operations](https://docs.greptime.com/user-guide/operations/overview)
|
||||
* [Quickstart](https://docs.greptime.com/getting-started/quick-start)
|
||||
* [User Guide](https://docs.greptime.com/user-guide/overview)
|
||||
* [Demos](https://github.com/GreptimeTeam/demo-scene)
|
||||
* [FAQ](https://docs.greptime.com/faq-and-others/faq)
|
||||
|
||||
## Build
|
||||
|
||||
@@ -151,9 +150,10 @@ Our official Grafana dashboard is available at [grafana](grafana/README.md) dire
|
||||
|
||||
## Project Status
|
||||
|
||||
The current version has not yet reached General Availability version standards.
|
||||
In line with our Greptime 2024 Roadmap, we plan to achieve a production-level
|
||||
version with the update to v1.0 in August. [[Join Force]](https://github.com/GreptimeTeam/greptimedb/issues/3412)
|
||||
The current version has not yet reached the standards for General Availability.
|
||||
According to our Greptime 2024 Roadmap, we aim to achieve a production-level version with the release of v1.0 by the end of 2024. [Join Us](https://github.com/GreptimeTeam/greptimedb/issues/3412)
|
||||
|
||||
We welcome you to test and use GreptimeDB. Some users have already adopted it in their production environments. If you're interested in trying it out, please use the latest stable release available.
|
||||
|
||||
## Community
|
||||
|
||||
|
||||
@@ -16,9 +16,8 @@
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
@@ -129,6 +128,8 @@
|
||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
@@ -139,6 +140,7 @@
|
||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
@@ -165,12 +167,10 @@
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
@@ -258,24 +258,24 @@
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
|
||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `procedure` | -- | -- | Procedure storage options. |
|
||||
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
|
||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
|
||||
| `failure_detector` | -- | -- | -- |
|
||||
| `failure_detector.threshold` | Float | `8.0` | -- |
|
||||
| `failure_detector.min_std_deviation` | String | `100ms` | -- |
|
||||
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | -- |
|
||||
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | -- |
|
||||
| `failure_detector.threshold` | Float | `8.0` | The threshold value used by the failure detector to determine failure conditions. |
|
||||
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations. |
|
||||
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable. |
|
||||
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | The initial estimate of the heartbeat interval used by the failure detector. |
|
||||
| `datanode` | -- | -- | Datanode options. |
|
||||
| `datanode.client` | -- | -- | Datanode client options. |
|
||||
| `datanode.client.timeout` | String | `10s` | -- |
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||
| `datanode.client.timeout` | String | `10s` | Operation timeout. |
|
||||
| `datanode.client.connect_timeout` | String | `10s` | Connect server timeout. |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
| `wal` | -- | -- | -- |
|
||||
| `wal.provider` | String | `raft_engine` | -- |
|
||||
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
|
||||
@@ -335,9 +335,8 @@
|
||||
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
@@ -428,6 +427,7 @@
|
||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
|
||||
@@ -73,11 +73,9 @@ watch = false
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
compact_rt_size = 4
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
@@ -478,6 +476,10 @@ data_freeze_threshold = 32768
|
||||
## Only available for `partition_tree` memtable.
|
||||
fork_dictionary_bytes = "1GiB"
|
||||
|
||||
[[region_engine]]
|
||||
## Enable the file engine.
|
||||
[region_engine.file]
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files.
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||
mode = "standalone"
|
||||
|
||||
## The default timezone of the server.
|
||||
## +toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
@@ -8,11 +5,9 @@ default_timezone = "UTC"
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
compact_rt_size = 4
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
|
||||
@@ -25,14 +25,18 @@ enable_telemetry = true
|
||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||
store_key_prefix = ""
|
||||
|
||||
## Whether to enable region failover.
|
||||
## This feature is only available on GreptimeDB running on cluster mode and
|
||||
## - Using Remote WAL
|
||||
## - Using shared storage (e.g., s3).
|
||||
enable_region_failover = false
|
||||
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
compact_rt_size = 4
|
||||
|
||||
## Procedure storage options.
|
||||
[procedure]
|
||||
@@ -52,17 +56,32 @@ max_metadata_value_size = "1500KiB"
|
||||
|
||||
# Failure detectors options.
|
||||
[failure_detector]
|
||||
|
||||
## The threshold value used by the failure detector to determine failure conditions.
|
||||
threshold = 8.0
|
||||
|
||||
## The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations.
|
||||
min_std_deviation = "100ms"
|
||||
|
||||
## The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable.
|
||||
acceptable_heartbeat_pause = "10000ms"
|
||||
|
||||
## The initial estimate of the heartbeat interval used by the failure detector.
|
||||
first_heartbeat_estimate = "1000ms"
|
||||
|
||||
## Datanode options.
|
||||
[datanode]
|
||||
|
||||
## Datanode client options.
|
||||
[datanode.client]
|
||||
|
||||
## Operation timeout.
|
||||
timeout = "10s"
|
||||
|
||||
## Connect server timeout.
|
||||
connect_timeout = "10s"
|
||||
|
||||
## `TCP_NODELAY` option for accepted connections.
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
|
||||
@@ -11,11 +11,9 @@ default_timezone = "UTC"
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
compact_rt_size = 4
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
@@ -459,6 +457,12 @@ mem_threshold_on_create = "auto"
|
||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||
intermediate_path = ""
|
||||
|
||||
## Cache size for inverted index metadata.
|
||||
metadata_cache_size = "64MiB"
|
||||
|
||||
## Cache size for inverted index content.
|
||||
content_cache_size = "128MiB"
|
||||
|
||||
## The options for full-text index in Mito engine.
|
||||
[region_engine.mito.fulltext_index]
|
||||
|
||||
@@ -501,6 +505,10 @@ data_freeze_threshold = 32768
|
||||
## Only available for `partition_tree` memtable.
|
||||
fork_dictionary_bytes = "1GiB"
|
||||
|
||||
[[region_engine]]
|
||||
## Enable the file engine.
|
||||
[region_engine.file]
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files.
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
x-custom:
|
||||
initial_cluster_token: &initial_cluster_token "--initial-cluster-token=etcd-cluster"
|
||||
common_settings: &common_settings
|
||||
image: quay.io/coreos/etcd:v3.5.10
|
||||
etcd_initial_cluster_token: &etcd_initial_cluster_token "--initial-cluster-token=etcd-cluster"
|
||||
etcd_common_settings: &etcd_common_settings
|
||||
image: "${ETCD_REGISTRY:-quay.io}/${ETCD_NAMESPACE:-coreos}/etcd:${ETCD_VERSION:-v3.5.10}"
|
||||
entrypoint: /usr/local/bin/etcd
|
||||
greptimedb_image: &greptimedb_image "${GREPTIMEDB_REGISTRY:-docker.io}/${GREPTIMEDB_NAMESPACE:-greptime}/greptimedb:${GREPTIMEDB_VERSION:-latest}"
|
||||
|
||||
services:
|
||||
etcd0:
|
||||
<<: *common_settings
|
||||
<<: *etcd_common_settings
|
||||
container_name: etcd0
|
||||
ports:
|
||||
- 2379:2379
|
||||
@@ -22,7 +23,7 @@ services:
|
||||
- --election-timeout=1250
|
||||
- --initial-cluster=etcd0=http://etcd0:2380
|
||||
- --initial-cluster-state=new
|
||||
- *initial_cluster_token
|
||||
- *etcd_initial_cluster_token
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
|
||||
healthcheck:
|
||||
@@ -34,7 +35,7 @@ services:
|
||||
- greptimedb
|
||||
|
||||
metasrv:
|
||||
image: docker.io/greptime/greptimedb:latest
|
||||
image: *greptimedb_image
|
||||
container_name: metasrv
|
||||
ports:
|
||||
- 3002:3002
|
||||
@@ -56,10 +57,11 @@ services:
|
||||
- greptimedb
|
||||
|
||||
datanode0:
|
||||
image: docker.io/greptime/greptimedb:latest
|
||||
image: *greptimedb_image
|
||||
container_name: datanode0
|
||||
ports:
|
||||
- 3001:3001
|
||||
- 5000:5000
|
||||
command:
|
||||
- datanode
|
||||
- start
|
||||
@@ -67,8 +69,14 @@ services:
|
||||
- --rpc-addr=0.0.0.0:3001
|
||||
- --rpc-hostname=datanode0:3001
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --http-addr=0.0.0.0:5000
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://datanode0:5000/health" ]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
depends_on:
|
||||
metasrv:
|
||||
condition: service_healthy
|
||||
@@ -76,7 +84,7 @@ services:
|
||||
- greptimedb
|
||||
|
||||
frontend0:
|
||||
image: docker.io/greptime/greptimedb:latest
|
||||
image: *greptimedb_image
|
||||
container_name: frontend0
|
||||
ports:
|
||||
- 4000:4000
|
||||
@@ -91,8 +99,31 @@ services:
|
||||
- --rpc-addr=0.0.0.0:4001
|
||||
- --mysql-addr=0.0.0.0:4002
|
||||
- --postgres-addr=0.0.0.0:4003
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://frontend0:4000/health" ]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
depends_on:
|
||||
metasrv:
|
||||
datanode0:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
flownode0:
|
||||
image: *greptimedb_image
|
||||
container_name: flownode0
|
||||
ports:
|
||||
- 4004:4004
|
||||
command:
|
||||
- flownode
|
||||
- start
|
||||
- --node-id=0
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --rpc-addr=0.0.0.0:4004
|
||||
- --rpc-hostname=flownode0:4004
|
||||
depends_on:
|
||||
frontend0:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
@@ -105,7 +105,7 @@ use tests_fuzz::utils::{init_greptime_connections, Connections};
|
||||
|
||||
fuzz_target!(|input: FuzzInput| {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
common_runtime::block_on_write(async {
|
||||
common_runtime::block_on_global(async {
|
||||
let Connections { mysql } = init_greptime_connections().await;
|
||||
let mut rng = ChaChaRng::seed_from_u64(input.seed);
|
||||
let columns = rng.gen_range(2..30);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -38,10 +38,11 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Auth failed"))]
|
||||
#[snafu(display("Authentication source failure"))]
|
||||
AuthBackend {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
@@ -87,7 +88,7 @@ impl ErrorExt for Error {
|
||||
Error::IllegalParam { .. } => StatusCode::InvalidArguments,
|
||||
Error::FileWatch { .. } => StatusCode::InvalidArguments,
|
||||
Error::InternalState { .. } => StatusCode::Unexpected,
|
||||
Error::Io { .. } => StatusCode::Internal,
|
||||
Error::Io { .. } => StatusCode::StorageUnavailable,
|
||||
Error::AuthBackend { .. } => StatusCode::Internal,
|
||||
|
||||
Error::UserNotFound { .. } => StatusCode::UserNotFound,
|
||||
|
||||
2
src/cache/src/error.rs
vendored
2
src/cache/src/error.rs
vendored
@@ -34,7 +34,7 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::CacheRequired { .. } => StatusCode::Internal,
|
||||
Error::CacheRequired { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ use std::fmt::Debug;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_query::error::datafusion_status_code;
|
||||
use datafusion::error::DataFusionError;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
@@ -56,6 +57,31 @@ pub enum Error {
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list flows in catalog {catalog}"))]
|
||||
ListFlows {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
catalog: String,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Flow info not found: {flow_name} in catalog {catalog_name}"))]
|
||||
FlowInfoNotFound {
|
||||
flow_name: String,
|
||||
catalog_name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Can't convert value to json, input={input}"))]
|
||||
Json {
|
||||
input: String,
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
||||
CompileScriptInternal {
|
||||
#[snafu(implicit)]
|
||||
@@ -114,6 +140,18 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"View plan columns changed from: {} to: {}",
|
||||
origin_names,
|
||||
actual_names
|
||||
))]
|
||||
ViewPlanColumnsChanged {
|
||||
origin_names: String,
|
||||
actual_names: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to find table partitions"))]
|
||||
FindPartitions { source: partition::error::Error },
|
||||
|
||||
@@ -173,6 +211,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to project view columns"))]
|
||||
ProjectViewColumns {
|
||||
#[snafu(source)]
|
||||
error: DataFusionError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table metadata manager error"))]
|
||||
TableMetadataManager {
|
||||
source: common_meta::error::Error,
|
||||
@@ -208,6 +254,21 @@ pub enum Error {
|
||||
},
|
||||
}
|
||||
|
||||
impl Error {
|
||||
pub fn should_fail(&self) -> bool {
|
||||
use Error::*;
|
||||
|
||||
matches!(
|
||||
self,
|
||||
GetViewCache { .. }
|
||||
| ViewInfoNotFound { .. }
|
||||
| DecodePlan { .. }
|
||||
| ViewPlanColumnsChanged { .. }
|
||||
| ProjectViewColumns { .. }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -218,10 +279,15 @@ impl ErrorExt for Error {
|
||||
| Error::FindPartitions { .. }
|
||||
| Error::FindRegionRoutes { .. }
|
||||
| Error::CacheNotFound { .. }
|
||||
| Error::CastManager { .. } => StatusCode::Unexpected,
|
||||
| Error::CastManager { .. }
|
||||
| Error::Json { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::ViewPlanColumnsChanged { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::ViewInfoNotFound { .. } => StatusCode::TableNotFound,
|
||||
|
||||
Error::FlowInfoNotFound { .. } => StatusCode::FlowNotFound,
|
||||
|
||||
Error::SystemCatalog { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
||||
@@ -232,7 +298,8 @@ impl ErrorExt for Error {
|
||||
Error::ListCatalogs { source, .. }
|
||||
| Error::ListNodes { source, .. }
|
||||
| Error::ListSchemas { source, .. }
|
||||
| Error::ListTables { source, .. } => source.status_code(),
|
||||
| Error::ListTables { source, .. }
|
||||
| Error::ListFlows { source, .. } => source.status_code(),
|
||||
|
||||
Error::CreateTable { source, .. } => source.status_code(),
|
||||
|
||||
@@ -245,7 +312,8 @@ impl ErrorExt for Error {
|
||||
}
|
||||
|
||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
||||
Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None),
|
||||
Error::ProjectViewColumns { .. } => StatusCode::EngineExecuteQuery,
|
||||
Error::TableMetadataManager { source, .. } => source.status_code(),
|
||||
Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => {
|
||||
source.status_code()
|
||||
@@ -260,7 +328,7 @@ impl ErrorExt for Error {
|
||||
|
||||
impl From<Error> for DataFusionError {
|
||||
fn from(e: Error) -> Self {
|
||||
DataFusionError::Internal(e.to_string())
|
||||
DataFusionError::External(Box::new(e))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -299,7 +367,7 @@ mod tests {
|
||||
}
|
||||
.into();
|
||||
match e {
|
||||
DataFusionError::Internal(_) => {}
|
||||
DataFusionError::External(_) => {}
|
||||
_ => {
|
||||
panic!("catalog error should be converted to DataFusionError::Internal")
|
||||
}
|
||||
|
||||
@@ -19,11 +19,13 @@ use std::sync::{Arc, Weak};
|
||||
use async_stream::try_stream;
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
|
||||
PG_CATALOG_NAME,
|
||||
};
|
||||
use common_config::Mode;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::{LayeredCacheRegistryRef, ViewInfoCacheRef};
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::table_info::TableInfoValue;
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
@@ -46,6 +48,8 @@ use crate::error::{
|
||||
};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::kvbackend::TableCacheRef;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// Access all existing catalog, schema and tables.
|
||||
@@ -82,14 +86,21 @@ impl KvBackendCatalogManager {
|
||||
.get()
|
||||
.expect("Failed to get table_route_cache"),
|
||||
)),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend.clone())),
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
pg_catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
information_schema_provider: Arc::new(InformationSchemaProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
Arc::new(FlowMetadataManager::new(backend.clone())),
|
||||
)),
|
||||
pg_catalog_provider: Arc::new(PGCatalogProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
)),
|
||||
backend,
|
||||
},
|
||||
cache_registry,
|
||||
})
|
||||
@@ -295,30 +306,41 @@ fn build_table(table_info_value: TableInfoValue) -> Result<TableRef> {
|
||||
/// Existing system tables:
|
||||
/// - public.numbers
|
||||
/// - information_schema.{tables}
|
||||
/// - pg_catalog.{tables}
|
||||
#[derive(Clone)]
|
||||
struct SystemCatalog {
|
||||
catalog_manager: Weak<KvBackendCatalogManager>,
|
||||
catalog_cache: Cache<String, Arc<InformationSchemaProvider>>,
|
||||
pg_catalog_cache: Cache<String, Arc<PGCatalogProvider>>,
|
||||
|
||||
// system_schema_provier for default catalog
|
||||
information_schema_provider: Arc<InformationSchemaProvider>,
|
||||
pg_catalog_provider: Arc<PGCatalogProvider>,
|
||||
backend: KvBackendRef,
|
||||
}
|
||||
|
||||
impl SystemCatalog {
|
||||
// TODO(j0hn50n133): remove the duplicated hard-coded table names logic
|
||||
fn schema_names(&self) -> Vec<String> {
|
||||
vec![INFORMATION_SCHEMA_NAME.to_string()]
|
||||
vec![
|
||||
INFORMATION_SCHEMA_NAME.to_string(),
|
||||
PG_CATALOG_NAME.to_string(),
|
||||
]
|
||||
}
|
||||
|
||||
fn table_names(&self, schema: &str) -> Vec<String> {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
self.information_schema_provider.table_names()
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
vec![NUMBERS_TABLE_NAME.to_string()]
|
||||
} else {
|
||||
vec![]
|
||||
match schema {
|
||||
INFORMATION_SCHEMA_NAME => self.information_schema_provider.table_names(),
|
||||
PG_CATALOG_NAME => self.pg_catalog_provider.table_names(),
|
||||
DEFAULT_SCHEMA_NAME => {
|
||||
vec![NUMBERS_TABLE_NAME.to_string()]
|
||||
}
|
||||
_ => vec![],
|
||||
}
|
||||
}
|
||||
|
||||
fn schema_exists(&self, schema: &str) -> bool {
|
||||
schema == INFORMATION_SCHEMA_NAME
|
||||
schema == INFORMATION_SCHEMA_NAME || schema == PG_CATALOG_NAME
|
||||
}
|
||||
|
||||
fn table_exists(&self, schema: &str, table: &str) -> bool {
|
||||
@@ -326,6 +348,8 @@ impl SystemCatalog {
|
||||
self.information_schema_provider.table(table).is_some()
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
table == NUMBERS_TABLE_NAME
|
||||
} else if schema == PG_CATALOG_NAME {
|
||||
self.pg_catalog_provider.table(table).is_some()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
@@ -338,9 +362,23 @@ impl SystemCatalog {
|
||||
Arc::new(InformationSchemaProvider::new(
|
||||
catalog.to_string(),
|
||||
self.catalog_manager.clone(),
|
||||
Arc::new(FlowMetadataManager::new(self.backend.clone())),
|
||||
))
|
||||
});
|
||||
information_schema_provider.table(table_name)
|
||||
} else if schema == PG_CATALOG_NAME {
|
||||
if catalog == DEFAULT_CATALOG_NAME {
|
||||
self.pg_catalog_provider.table(table_name)
|
||||
} else {
|
||||
let pg_catalog_provider =
|
||||
self.pg_catalog_cache.get_with_by_ref(catalog, move || {
|
||||
Arc::new(PGCatalogProvider::new(
|
||||
catalog.to_string(),
|
||||
self.catalog_manager.clone(),
|
||||
))
|
||||
});
|
||||
pg_catalog_provider.table(table_name)
|
||||
}
|
||||
} else if schema == DEFAULT_SCHEMA_NAME && table_name == NUMBERS_TABLE_NAME {
|
||||
Some(NumbersTable::table(NUMBERS_TABLE_ID))
|
||||
} else {
|
||||
|
||||
@@ -28,12 +28,16 @@ use table::TableRef;
|
||||
use crate::error::Result;
|
||||
|
||||
pub mod error;
|
||||
pub mod information_schema;
|
||||
pub mod kvbackend;
|
||||
pub mod memory;
|
||||
mod metrics;
|
||||
pub mod table_source;
|
||||
pub mod system_schema;
|
||||
pub mod information_schema {
|
||||
// TODO(j0hn50n133): re-export to make it compatible with the legacy code, migrate to the new path later
|
||||
pub use crate::system_schema::information_schema::*;
|
||||
}
|
||||
|
||||
pub mod table_source;
|
||||
#[async_trait::async_trait]
|
||||
pub trait CatalogManager: Send + Sync {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
@@ -20,14 +20,18 @@ use std::sync::{Arc, RwLock, Weak};
|
||||
use async_stream::{stream, try_stream};
|
||||
use common_catalog::build_db_string;
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_PRIVATE_SCHEMA_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME,
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_PRIVATE_SCHEMA_NAME, DEFAULT_SCHEMA_NAME,
|
||||
INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME,
|
||||
};
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use futures_util::stream::BoxStream;
|
||||
use snafu::OptionExt;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
use crate::{CatalogManager, DeregisterTableRequest, RegisterSchemaRequest, RegisterTableRequest};
|
||||
|
||||
type SchemaEntries = HashMap<String, HashMap<String, TableRef>>;
|
||||
@@ -173,6 +177,12 @@ impl MemoryCatalogManager {
|
||||
schema: DEFAULT_PRIVATE_SCHEMA_NAME.to_string(),
|
||||
})
|
||||
.unwrap();
|
||||
manager
|
||||
.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: PG_CATALOG_NAME.to_string(),
|
||||
})
|
||||
.unwrap();
|
||||
manager
|
||||
.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
@@ -196,7 +206,7 @@ impl MemoryCatalogManager {
|
||||
}
|
||||
|
||||
fn catalog_exist_sync(&self, catalog: &str) -> Result<bool> {
|
||||
Ok(self.catalogs.read().unwrap().get(catalog).is_some())
|
||||
Ok(self.catalogs.read().unwrap().contains_key(catalog))
|
||||
}
|
||||
|
||||
/// Registers a catalog if it does not exist and returns false if the schema exists.
|
||||
@@ -290,6 +300,7 @@ impl MemoryCatalogManager {
|
||||
let information_schema_provider = InformationSchemaProvider::new(
|
||||
catalog,
|
||||
Arc::downgrade(self) as Weak<dyn CatalogManager>,
|
||||
Arc::new(FlowMetadataManager::new(Arc::new(MemoryKvBackend::new()))),
|
||||
);
|
||||
let information_schema = information_schema_provider.tables().clone();
|
||||
|
||||
|
||||
166
src/catalog/src/system_schema.rs
Normal file
166
src/catalog/src/system_schema.rs
Normal file
@@ -0,0 +1,166 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod information_schema;
|
||||
mod memory_table;
|
||||
pub mod pg_catalog;
|
||||
mod predicate;
|
||||
mod utils;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||
use datatypes::schema::SchemaRef;
|
||||
use futures_util::StreamExt;
|
||||
use snafu::ResultExt;
|
||||
use store_api::data_source::DataSource;
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
|
||||
use table::metadata::{
|
||||
FilterPushDownType, TableId, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType,
|
||||
};
|
||||
use table::{Table, TableRef};
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
pub trait SystemSchemaProvider {
|
||||
/// Returns a map of [TableRef] in information schema.
|
||||
fn tables(&self) -> &HashMap<String, TableRef>;
|
||||
|
||||
/// Returns the [TableRef] by table name.
|
||||
fn table(&self, name: &str) -> Option<TableRef> {
|
||||
self.tables().get(name).cloned()
|
||||
}
|
||||
|
||||
/// Returns table names in the order of table id.
|
||||
fn table_names(&self) -> Vec<String> {
|
||||
let mut tables = self.tables().values().clone().collect::<Vec<_>>();
|
||||
|
||||
tables.sort_by(|t1, t2| {
|
||||
t1.table_info()
|
||||
.table_id()
|
||||
.partial_cmp(&t2.table_info().table_id())
|
||||
.unwrap()
|
||||
});
|
||||
tables
|
||||
.into_iter()
|
||||
.map(|t| t.table_info().name.clone())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
trait SystemSchemaProviderInner {
|
||||
fn catalog_name(&self) -> &str;
|
||||
fn schema_name() -> &'static str;
|
||||
fn build_table(&self, name: &str) -> Option<TableRef> {
|
||||
self.system_table(name).map(|table| {
|
||||
let table_info = Self::table_info(self.catalog_name().to_string(), &table);
|
||||
let filter_pushdown = FilterPushDownType::Inexact;
|
||||
let data_source = Arc::new(SystemTableDataSource::new(table));
|
||||
let table = Table::new(table_info, filter_pushdown, data_source);
|
||||
Arc::new(table)
|
||||
})
|
||||
}
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef>;
|
||||
|
||||
fn table_info(catalog_name: String, table: &SystemTableRef) -> TableInfoRef {
|
||||
let table_meta = TableMetaBuilder::default()
|
||||
.schema(table.schema())
|
||||
.primary_key_indices(vec![])
|
||||
.next_column_id(0)
|
||||
.build()
|
||||
.unwrap();
|
||||
let table_info = TableInfoBuilder::default()
|
||||
.table_id(table.table_id())
|
||||
.name(table.table_name().to_string())
|
||||
.catalog_name(catalog_name)
|
||||
.schema_name(Self::schema_name().to_string())
|
||||
.meta(table_meta)
|
||||
.table_type(table.table_type())
|
||||
.build()
|
||||
.unwrap();
|
||||
Arc::new(table_info)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) trait SystemTable {
|
||||
fn table_id(&self) -> TableId;
|
||||
|
||||
fn table_name(&self) -> &'static str;
|
||||
|
||||
fn schema(&self) -> SchemaRef;
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream>;
|
||||
|
||||
fn table_type(&self) -> TableType {
|
||||
TableType::Temporary
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) type SystemTableRef = Arc<dyn SystemTable + Send + Sync>;
|
||||
|
||||
struct SystemTableDataSource {
|
||||
table: SystemTableRef,
|
||||
}
|
||||
|
||||
impl SystemTableDataSource {
|
||||
fn new(table: SystemTableRef) -> Self {
|
||||
Self { table }
|
||||
}
|
||||
|
||||
fn try_project(&self, projection: &[usize]) -> std::result::Result<SchemaRef, BoxedError> {
|
||||
let schema = self
|
||||
.table
|
||||
.schema()
|
||||
.try_project(projection)
|
||||
.context(SchemaConversionSnafu)
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(Arc::new(schema))
|
||||
}
|
||||
}
|
||||
|
||||
impl DataSource for SystemTableDataSource {
|
||||
fn get_stream(
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> std::result::Result<SendableRecordBatchStream, BoxedError> {
|
||||
let projection = request.projection.clone();
|
||||
let projected_schema = match &projection {
|
||||
Some(projection) => self.try_project(projection)?,
|
||||
None => self.table.schema(),
|
||||
};
|
||||
|
||||
let stream = self
|
||||
.table
|
||||
.to_stream(request)
|
||||
.map_err(BoxedError::new)
|
||||
.context(TablesRecordBatchSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.map(move |batch| match &projection {
|
||||
Some(p) => batch.and_then(|b| b.try_project(p)),
|
||||
None => batch,
|
||||
});
|
||||
|
||||
let stream = RecordBatchStreamWrapper {
|
||||
schema: projected_schema,
|
||||
stream: Box::pin(stream),
|
||||
output_ordering: None,
|
||||
metrics: Default::default(),
|
||||
};
|
||||
|
||||
Ok(Box::pin(stream))
|
||||
}
|
||||
}
|
||||
@@ -14,50 +14,49 @@
|
||||
|
||||
mod cluster_info;
|
||||
pub mod columns;
|
||||
pub mod flows;
|
||||
mod information_memory_table;
|
||||
pub mod key_column_usage;
|
||||
mod memory_table;
|
||||
mod partitions;
|
||||
mod predicate;
|
||||
mod region_peers;
|
||||
mod runtime_metrics;
|
||||
pub mod schemata;
|
||||
mod table_constraints;
|
||||
mod table_names;
|
||||
pub mod tables;
|
||||
pub(crate) mod utils;
|
||||
mod views;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use futures_util::StreamExt;
|
||||
use lazy_static::lazy_static;
|
||||
use paste::paste;
|
||||
pub(crate) use predicate::Predicates;
|
||||
use snafu::ResultExt;
|
||||
use store_api::data_source::DataSource;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
|
||||
use table::metadata::{
|
||||
FilterPushDownType, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType,
|
||||
};
|
||||
use table::{Table, TableRef};
|
||||
use table::metadata::TableType;
|
||||
use table::TableRef;
|
||||
pub use table_names::*;
|
||||
use views::InformationSchemaViews;
|
||||
|
||||
use self::columns::InformationSchemaColumns;
|
||||
use super::{SystemSchemaProviderInner, SystemTable, SystemTableRef};
|
||||
use crate::error::Result;
|
||||
use crate::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||
use crate::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
||||
use crate::information_schema::memory_table::{get_schema_columns, MemoryTable};
|
||||
use crate::information_schema::partitions::InformationSchemaPartitions;
|
||||
use crate::information_schema::region_peers::InformationSchemaRegionPeers;
|
||||
use crate::information_schema::runtime_metrics::InformationSchemaMetrics;
|
||||
use crate::information_schema::schemata::InformationSchemaSchemata;
|
||||
use crate::information_schema::table_constraints::InformationSchemaTableConstraints;
|
||||
use crate::information_schema::tables::InformationSchemaTables;
|
||||
use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||
use crate::system_schema::information_schema::flows::InformationSchemaFlows;
|
||||
use crate::system_schema::information_schema::information_memory_table::get_schema_columns;
|
||||
use crate::system_schema::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
||||
use crate::system_schema::information_schema::partitions::InformationSchemaPartitions;
|
||||
use crate::system_schema::information_schema::region_peers::InformationSchemaRegionPeers;
|
||||
use crate::system_schema::information_schema::runtime_metrics::InformationSchemaMetrics;
|
||||
use crate::system_schema::information_schema::schemata::InformationSchemaSchemata;
|
||||
use crate::system_schema::information_schema::table_constraints::InformationSchemaTableConstraints;
|
||||
use crate::system_schema::information_schema::tables::InformationSchemaTables;
|
||||
use crate::system_schema::memory_table::MemoryTable;
|
||||
pub(crate) use crate::system_schema::predicate::Predicates;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
use crate::CatalogManager;
|
||||
|
||||
lazy_static! {
|
||||
@@ -106,107 +105,26 @@ macro_rules! setup_memory_table {
|
||||
pub struct InformationSchemaProvider {
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
}
|
||||
|
||||
impl InformationSchemaProvider {
|
||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
let mut provider = Self {
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
tables: HashMap::new(),
|
||||
};
|
||||
|
||||
provider.build_tables();
|
||||
|
||||
provider
|
||||
}
|
||||
|
||||
/// Returns table names in the order of table id.
|
||||
pub fn table_names(&self) -> Vec<String> {
|
||||
let mut tables = self.tables.values().clone().collect::<Vec<_>>();
|
||||
|
||||
tables.sort_by(|t1, t2| {
|
||||
t1.table_info()
|
||||
.table_id()
|
||||
.partial_cmp(&t2.table_info().table_id())
|
||||
.unwrap()
|
||||
});
|
||||
tables
|
||||
.into_iter()
|
||||
.map(|t| t.table_info().name.clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns a map of [TableRef] in information schema.
|
||||
pub fn tables(&self) -> &HashMap<String, TableRef> {
|
||||
impl SystemSchemaProvider for InformationSchemaProvider {
|
||||
fn tables(&self) -> &HashMap<String, TableRef> {
|
||||
assert!(!self.tables.is_empty());
|
||||
|
||||
&self.tables
|
||||
}
|
||||
|
||||
/// Returns the [TableRef] by table name.
|
||||
pub fn table(&self, name: &str) -> Option<TableRef> {
|
||||
self.tables.get(name).cloned()
|
||||
}
|
||||
impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
fn catalog_name(&self) -> &str {
|
||||
&self.catalog_name
|
||||
}
|
||||
fn schema_name() -> &'static str {
|
||||
INFORMATION_SCHEMA_NAME
|
||||
}
|
||||
|
||||
fn build_tables(&mut self) {
|
||||
let mut tables = HashMap::new();
|
||||
|
||||
// SECURITY NOTE:
|
||||
// Carefully consider the tables that may expose sensitive cluster configurations,
|
||||
// authentication details, and other critical information.
|
||||
// Only put these tables under `greptime` catalog to prevent info leak.
|
||||
if self.catalog_name == DEFAULT_CATALOG_NAME {
|
||||
tables.insert(
|
||||
RUNTIME_METRICS.to_string(),
|
||||
self.build_table(RUNTIME_METRICS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
BUILD_INFO.to_string(),
|
||||
self.build_table(BUILD_INFO).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
REGION_PEERS.to_string(),
|
||||
self.build_table(REGION_PEERS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
CLUSTER_INFO.to_string(),
|
||||
self.build_table(CLUSTER_INFO).unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
|
||||
tables.insert(SCHEMATA.to_string(), self.build_table(SCHEMATA).unwrap());
|
||||
tables.insert(COLUMNS.to_string(), self.build_table(COLUMNS).unwrap());
|
||||
tables.insert(
|
||||
KEY_COLUMN_USAGE.to_string(),
|
||||
self.build_table(KEY_COLUMN_USAGE).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
TABLE_CONSTRAINTS.to_string(),
|
||||
self.build_table(TABLE_CONSTRAINTS).unwrap(),
|
||||
);
|
||||
|
||||
// Add memory tables
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
tables.insert((*name).to_string(), self.build_table(name).expect(name));
|
||||
}
|
||||
|
||||
self.tables = tables;
|
||||
}
|
||||
|
||||
fn build_table(&self, name: &str) -> Option<TableRef> {
|
||||
self.information_table(name).map(|table| {
|
||||
let table_info = Self::table_info(self.catalog_name.clone(), &table);
|
||||
let filter_pushdown = FilterPushDownType::Inexact;
|
||||
let data_source = Arc::new(InformationTableDataSource::new(table));
|
||||
let table = Table::new(table_info, filter_pushdown, data_source);
|
||||
Arc::new(table)
|
||||
})
|
||||
}
|
||||
|
||||
fn information_table(&self, name: &str) -> Option<InformationTableRef> {
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
||||
match name.to_ascii_lowercase().as_str() {
|
||||
TABLES => Some(Arc::new(InformationSchemaTables::new(
|
||||
self.catalog_name.clone(),
|
||||
@@ -262,27 +180,83 @@ impl InformationSchemaProvider {
|
||||
CLUSTER_INFO => Some(Arc::new(InformationSchemaClusterInfo::new(
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
VIEWS => Some(Arc::new(InformationSchemaViews::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
FLOWS => Some(Arc::new(InformationSchemaFlows::new(
|
||||
self.catalog_name.clone(),
|
||||
self.flow_metadata_manager.clone(),
|
||||
)) as _),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn table_info(catalog_name: String, table: &InformationTableRef) -> TableInfoRef {
|
||||
let table_meta = TableMetaBuilder::default()
|
||||
.schema(table.schema())
|
||||
.primary_key_indices(vec![])
|
||||
.next_column_id(0)
|
||||
.build()
|
||||
.unwrap();
|
||||
let table_info = TableInfoBuilder::default()
|
||||
.table_id(table.table_id())
|
||||
.name(table.table_name().to_string())
|
||||
.catalog_name(catalog_name)
|
||||
.schema_name(INFORMATION_SCHEMA_NAME.to_string())
|
||||
.meta(table_meta)
|
||||
.table_type(table.table_type())
|
||||
.build()
|
||||
.unwrap();
|
||||
Arc::new(table_info)
|
||||
impl InformationSchemaProvider {
|
||||
pub fn new(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
) -> Self {
|
||||
let mut provider = Self {
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
flow_metadata_manager,
|
||||
tables: HashMap::new(),
|
||||
};
|
||||
|
||||
provider.build_tables();
|
||||
|
||||
provider
|
||||
}
|
||||
|
||||
fn build_tables(&mut self) {
|
||||
let mut tables = HashMap::new();
|
||||
|
||||
// SECURITY NOTE:
|
||||
// Carefully consider the tables that may expose sensitive cluster configurations,
|
||||
// authentication details, and other critical information.
|
||||
// Only put these tables under `greptime` catalog to prevent info leak.
|
||||
if self.catalog_name == DEFAULT_CATALOG_NAME {
|
||||
tables.insert(
|
||||
RUNTIME_METRICS.to_string(),
|
||||
self.build_table(RUNTIME_METRICS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
BUILD_INFO.to_string(),
|
||||
self.build_table(BUILD_INFO).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
REGION_PEERS.to_string(),
|
||||
self.build_table(REGION_PEERS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
CLUSTER_INFO.to_string(),
|
||||
self.build_table(CLUSTER_INFO).unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
|
||||
tables.insert(VIEWS.to_string(), self.build_table(VIEWS).unwrap());
|
||||
tables.insert(SCHEMATA.to_string(), self.build_table(SCHEMATA).unwrap());
|
||||
tables.insert(COLUMNS.to_string(), self.build_table(COLUMNS).unwrap());
|
||||
tables.insert(
|
||||
KEY_COLUMN_USAGE.to_string(),
|
||||
self.build_table(KEY_COLUMN_USAGE).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
TABLE_CONSTRAINTS.to_string(),
|
||||
self.build_table(TABLE_CONSTRAINTS).unwrap(),
|
||||
);
|
||||
tables.insert(FLOWS.to_string(), self.build_table(FLOWS).unwrap());
|
||||
|
||||
// Add memory tables
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
tables.insert((*name).to_string(), self.build_table(name).expect(name));
|
||||
}
|
||||
|
||||
self.tables = tables;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -300,57 +274,28 @@ trait InformationTable {
|
||||
}
|
||||
}
|
||||
|
||||
type InformationTableRef = Arc<dyn InformationTable + Send + Sync>;
|
||||
|
||||
struct InformationTableDataSource {
|
||||
table: InformationTableRef,
|
||||
}
|
||||
|
||||
impl InformationTableDataSource {
|
||||
fn new(table: InformationTableRef) -> Self {
|
||||
Self { table }
|
||||
// Provide compatibility for legacy `information_schema` code.
|
||||
impl<T> SystemTable for T
|
||||
where
|
||||
T: InformationTable,
|
||||
{
|
||||
fn table_id(&self) -> TableId {
|
||||
InformationTable::table_id(self)
|
||||
}
|
||||
|
||||
fn try_project(&self, projection: &[usize]) -> std::result::Result<SchemaRef, BoxedError> {
|
||||
let schema = self
|
||||
.table
|
||||
.schema()
|
||||
.try_project(projection)
|
||||
.context(SchemaConversionSnafu)
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(Arc::new(schema))
|
||||
}
|
||||
}
|
||||
|
||||
impl DataSource for InformationTableDataSource {
|
||||
fn get_stream(
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> std::result::Result<SendableRecordBatchStream, BoxedError> {
|
||||
let projection = request.projection.clone();
|
||||
let projected_schema = match &projection {
|
||||
Some(projection) => self.try_project(projection)?,
|
||||
None => self.table.schema(),
|
||||
};
|
||||
|
||||
let stream = self
|
||||
.table
|
||||
.to_stream(request)
|
||||
.map_err(BoxedError::new)
|
||||
.context(TablesRecordBatchSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.map(move |batch| match &projection {
|
||||
Some(p) => batch.and_then(|b| b.try_project(p)),
|
||||
None => batch,
|
||||
});
|
||||
|
||||
let stream = RecordBatchStreamWrapper {
|
||||
schema: projected_schema,
|
||||
stream: Box::pin(stream),
|
||||
output_ordering: None,
|
||||
metrics: Default::default(),
|
||||
};
|
||||
|
||||
Ok(Box::pin(stream))
|
||||
fn table_name(&self) -> &'static str {
|
||||
InformationTable::table_name(self)
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
InformationTable::schema(self)
|
||||
}
|
||||
|
||||
fn table_type(&self) -> TableType {
|
||||
InformationTable::table_type(self)
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
InformationTable::to_stream(self, request)
|
||||
}
|
||||
}
|
||||
@@ -41,7 +41,8 @@ use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::CLUSTER_INFO;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, ListNodesSnafu, Result};
|
||||
use crate::information_schema::{utils, InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
const PEER_ID: &str = "peer_id";
|
||||
305
src/catalog/src/system_schema/information_schema/flows.rs
Normal file
305
src/catalog/src/system_schema/information_schema/flows.rs
Normal file
@@ -0,0 +1,305 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_FLOW_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::flow::flow_info::FlowInfoValue;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::FlowId;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::prelude::ConcreteDataType as CDT;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{Int64VectorBuilder, StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, FlowInfoNotFoundSnafu, InternalSnafu, JsonSnafu, ListFlowsSnafu, Result,
|
||||
};
|
||||
use crate::information_schema::{Predicates, FLOWS};
|
||||
use crate::system_schema::information_schema::InformationTable;
|
||||
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
// rows of information_schema.flows
|
||||
// pk is (flow_name, flow_id, table_catalog)
|
||||
pub const FLOW_NAME: &str = "flow_name";
|
||||
pub const FLOW_ID: &str = "flow_id";
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const FLOW_DEFINITION: &str = "flow_definition";
|
||||
pub const COMMENT: &str = "comment";
|
||||
pub const EXPIRE_AFTER: &str = "expire_after";
|
||||
pub const SOURCE_TABLE_IDS: &str = "source_table_ids";
|
||||
pub const SINK_TABLE_NAME: &str = "sink_table_name";
|
||||
pub const FLOWNODE_IDS: &str = "flownode_ids";
|
||||
pub const OPTIONS: &str = "options";
|
||||
|
||||
/// The `information_schema.flows` to provides information about flows in databases.
|
||||
pub(super) struct InformationSchemaFlows {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaFlows {
|
||||
pub(super) fn new(
|
||||
catalog_name: String,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
flow_metadata_manager,
|
||||
}
|
||||
}
|
||||
|
||||
/// for complex fields(including [`SOURCE_TABLE_IDS`], [`FLOWNODE_IDS`] and [`OPTIONS`]), it will be serialized to json string for now
|
||||
/// TODO(discord9): use a better way to store complex fields like json type
|
||||
pub(crate) fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(
|
||||
vec![
|
||||
(FLOW_NAME, CDT::string_datatype(), false),
|
||||
(FLOW_ID, CDT::uint32_datatype(), false),
|
||||
(TABLE_CATALOG, CDT::string_datatype(), false),
|
||||
(FLOW_DEFINITION, CDT::string_datatype(), false),
|
||||
(COMMENT, CDT::string_datatype(), true),
|
||||
(EXPIRE_AFTER, CDT::int64_datatype(), true),
|
||||
(SOURCE_TABLE_IDS, CDT::string_datatype(), true),
|
||||
(SINK_TABLE_NAME, CDT::string_datatype(), false),
|
||||
(FLOWNODE_IDS, CDT::string_datatype(), true),
|
||||
(OPTIONS, CDT::string_datatype(), true),
|
||||
]
|
||||
.into_iter()
|
||||
.map(|(name, ty, nullable)| ColumnSchema::new(name, ty, nullable))
|
||||
.collect(),
|
||||
))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaFlowsBuilder {
|
||||
InformationSchemaFlowsBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
&self.flow_metadata_manager,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaFlows {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_FLOW_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
FLOWS
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_flows(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(|err| datafusion::error::DataFusionError::External(Box::new(err)))
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `information_schema.FLOWS` table row by row
|
||||
///
|
||||
/// columns are based on [`FlowInfoValue`]
|
||||
struct InformationSchemaFlowsBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
|
||||
flow_names: StringVectorBuilder,
|
||||
flow_ids: UInt32VectorBuilder,
|
||||
table_catalogs: StringVectorBuilder,
|
||||
raw_sqls: StringVectorBuilder,
|
||||
comments: StringVectorBuilder,
|
||||
expire_afters: Int64VectorBuilder,
|
||||
source_table_id_groups: StringVectorBuilder,
|
||||
sink_table_names: StringVectorBuilder,
|
||||
flownode_id_groups: StringVectorBuilder,
|
||||
option_groups: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaFlowsBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
flow_metadata_manager: &Arc<FlowMetadataManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
flow_metadata_manager: flow_metadata_manager.clone(),
|
||||
|
||||
flow_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
flow_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_catalogs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
raw_sqls: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
comments: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
expire_afters: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
source_table_id_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
sink_table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
flownode_id_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
option_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.flows` virtual table
|
||||
async fn make_flows(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
let flow_info_manager = self.flow_metadata_manager.clone();
|
||||
|
||||
// TODO(discord9): use `AsyncIterator` once it's stable-ish
|
||||
let mut stream = flow_info_manager
|
||||
.flow_name_manager()
|
||||
.flow_names(&catalog_name)
|
||||
.await;
|
||||
|
||||
while let Some((flow_name, flow_id)) = stream
|
||||
.try_next()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ListFlowsSnafu {
|
||||
catalog: &catalog_name,
|
||||
})?
|
||||
{
|
||||
let flow_info = flow_info_manager
|
||||
.flow_info_manager()
|
||||
.get(flow_id.flow_id())
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?
|
||||
.context(FlowInfoNotFoundSnafu {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
flow_name: flow_name.to_string(),
|
||||
})?;
|
||||
self.add_flow(&predicates, flow_id.flow_id(), flow_info)?;
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_flow(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
flow_id: FlowId,
|
||||
flow_info: FlowInfoValue,
|
||||
) -> Result<()> {
|
||||
let row = [
|
||||
(FLOW_NAME, &Value::from(flow_info.flow_name().to_string())),
|
||||
(FLOW_ID, &Value::from(flow_id)),
|
||||
(
|
||||
TABLE_CATALOG,
|
||||
&Value::from(flow_info.catalog_name().to_string()),
|
||||
),
|
||||
];
|
||||
if !predicates.eval(&row) {
|
||||
return Ok(());
|
||||
}
|
||||
self.flow_names.push(Some(flow_info.flow_name()));
|
||||
self.flow_ids.push(Some(flow_id));
|
||||
self.table_catalogs.push(Some(flow_info.catalog_name()));
|
||||
self.raw_sqls.push(Some(flow_info.raw_sql()));
|
||||
self.comments.push(Some(flow_info.comment()));
|
||||
self.expire_afters.push(flow_info.expire_after());
|
||||
self.source_table_id_groups.push(Some(
|
||||
&serde_json::to_string(flow_info.source_table_ids()).context(JsonSnafu {
|
||||
input: format!("{:?}", flow_info.source_table_ids()),
|
||||
})?,
|
||||
));
|
||||
self.sink_table_names
|
||||
.push(Some(&flow_info.sink_table_name().to_string()));
|
||||
self.flownode_id_groups.push(Some(
|
||||
&serde_json::to_string(flow_info.flownode_ids()).context({
|
||||
JsonSnafu {
|
||||
input: format!("{:?}", flow_info.flownode_ids()),
|
||||
}
|
||||
})?,
|
||||
));
|
||||
self.option_groups
|
||||
.push(Some(&serde_json::to_string(flow_info.options()).context(
|
||||
JsonSnafu {
|
||||
input: format!("{:?}", flow_info.options()),
|
||||
},
|
||||
)?));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.flow_names.finish()),
|
||||
Arc::new(self.flow_ids.finish()),
|
||||
Arc::new(self.table_catalogs.finish()),
|
||||
Arc::new(self.raw_sqls.finish()),
|
||||
Arc::new(self.comments.finish()),
|
||||
Arc::new(self.expire_afters.finish()),
|
||||
Arc::new(self.source_table_id_groups.finish()),
|
||||
Arc::new(self.sink_table_names.finish()),
|
||||
Arc::new(self.flownode_id_groups.finish()),
|
||||
Arc::new(self.option_groups.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for InformationSchemaFlows {
|
||||
fn schema(&self) -> &arrow_schema::SchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema: Arc<arrow_schema::Schema> = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_flows(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -15,17 +15,19 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
|
||||
use datatypes::prelude::{ConcreteDataType, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int64Vector, StringVector};
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int64Vector, StringVector, VectorRef};
|
||||
|
||||
use crate::information_schema::table_names::*;
|
||||
use super::table_names::*;
|
||||
use crate::system_schema::utils::tables::{
|
||||
bigint_column, datetime_column, string_column, string_columns,
|
||||
};
|
||||
|
||||
const NO_VALUE: &str = "NO";
|
||||
|
||||
/// Find the schema and columns by the table_name, only valid for memory tables.
|
||||
/// Safety: the user MUST ensure the table schema exists, panic otherwise.
|
||||
pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
let (column_schemas, columns): (_, Vec<VectorRef>) = match table_name {
|
||||
COLUMN_PRIVILEGES => (
|
||||
string_columns(&[
|
||||
@@ -80,7 +82,7 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
"GIT_BRANCH",
|
||||
"GIT_COMMIT",
|
||||
"GIT_COMMIT_SHORT",
|
||||
"GIT_DIRTY",
|
||||
"GIT_CLEAN",
|
||||
"PKG_VERSION",
|
||||
]),
|
||||
vec![
|
||||
@@ -89,7 +91,7 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
Arc::new(StringVector::from(vec![build_info
|
||||
.commit_short
|
||||
.to_string()])),
|
||||
Arc::new(StringVector::from(vec![build_info.dirty.to_string()])),
|
||||
Arc::new(StringVector::from(vec![build_info.clean.to_string()])),
|
||||
Arc::new(StringVector::from(vec![build_info.version.to_string()])),
|
||||
],
|
||||
)
|
||||
@@ -414,50 +416,3 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
|
||||
(Arc::new(Schema::new(column_schemas)), columns)
|
||||
}
|
||||
|
||||
fn string_columns(names: &[&'static str]) -> Vec<ColumnSchema> {
|
||||
names.iter().map(|name| string_column(name)).collect()
|
||||
}
|
||||
|
||||
fn string_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::string_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
fn bigint_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
fn datetime_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_string_columns() {
|
||||
let columns = ["a", "b", "c"];
|
||||
let column_schemas = string_columns(&columns);
|
||||
|
||||
assert_eq!(3, column_schemas.len());
|
||||
for (i, name) in columns.iter().enumerate() {
|
||||
let cs = column_schemas.get(i).unwrap();
|
||||
|
||||
assert_eq!(*name, cs.name);
|
||||
assert_eq!(ConcreteDataType::string_datatype(), cs.data_type);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -35,7 +35,7 @@ use super::KEY_COLUMN_USAGE;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const CONSTRAINT_SCHEMA: &str = "constraint_schema";
|
||||
@@ -44,8 +44,8 @@ use crate::error::{
|
||||
CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, Result,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const TABLE_CATALOG: &str = "table_catalog";
|
||||
@@ -39,8 +39,8 @@ use crate::error::{
|
||||
CreateRecordBatchSnafu, FindRegionRoutesSnafu, InternalSnafu, Result,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const REGION_ID: &str = "region_id";
|
||||
@@ -36,7 +36,8 @@ use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, TableMetadataManagerSnafu,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{utils, InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const CATALOG_NAME: &str = "catalog_name";
|
||||
@@ -43,3 +43,5 @@ pub const PARTITIONS: &str = "partitions";
|
||||
pub const REGION_PEERS: &str = "region_peers";
|
||||
pub const TABLE_CONSTRAINTS: &str = "table_constraints";
|
||||
pub const CLUSTER_INFO: &str = "cluster_info";
|
||||
pub const VIEWS: &str = "views";
|
||||
pub const FLOWS: &str = "flows";
|
||||
@@ -38,7 +38,7 @@ use super::TABLES;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
@@ -143,9 +143,7 @@ impl InformationTable for InformationSchemaTables {
|
||||
.make_tables(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(|err| {
|
||||
datafusion::error::DataFusionError::External(format!("{err:?}").into())
|
||||
})
|
||||
.map_err(|err| datafusion::error::DataFusionError::External(Box::new(err)))
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
@@ -300,7 +298,7 @@ impl InformationSchemaTablesBuilder {
|
||||
self.data_free.push(Some(0));
|
||||
self.auto_increment.push(Some(0));
|
||||
self.row_format.push(Some("Fixed"));
|
||||
self.table_collation.push(None);
|
||||
self.table_collation.push(Some("utf8_bin"));
|
||||
self.update_time.push(None);
|
||||
self.check_time.push(None);
|
||||
|
||||
288
src/catalog/src/system_schema/information_schema/views.rs
Normal file
288
src/catalog/src/system_schema/information_schema/views.rs
Normal file
@@ -0,0 +1,288 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_VIEW_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::StringVectorBuilder;
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::metadata::TableType;
|
||||
|
||||
use super::VIEWS;
|
||||
use crate::error::{
|
||||
CastManagerSnafu, CreateRecordBatchSnafu, GetViewCacheSnafu, InternalSnafu, Result,
|
||||
UpgradeWeakCatalogManagerRefSnafu, ViewInfoNotFoundSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||
pub const TABLE_NAME: &str = "table_name";
|
||||
pub const VIEW_DEFINITION: &str = "view_definition";
|
||||
pub const CHECK_OPTION: &str = "check_option";
|
||||
pub const IS_UPDATABLE: &str = "is_updatable";
|
||||
pub const DEFINER: &str = "definer";
|
||||
pub const SECURITY_TYPE: &str = "security_type";
|
||||
pub const CHARACTER_SET_CLIENT: &str = "character_set_client";
|
||||
pub const COLLATION_CONNECTION: &str = "collation_connection";
|
||||
|
||||
/// The `information_schema.views` to provides information about views in databases.
|
||||
pub(super) struct InformationSchemaViews {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaViews {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(VIEW_DEFINITION, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(CHECK_OPTION, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(IS_UPDATABLE, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(DEFINER, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(SECURITY_TYPE, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
CHARACTER_SET_CLIENT,
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
COLLATION_CONNECTION,
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaViewsBuilder {
|
||||
InformationSchemaViewsBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaViews {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_VIEW_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
VIEWS
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_views(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(|err| datafusion::error::DataFusionError::External(Box::new(err)))
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `information_schema.VIEWS` table row by row
|
||||
///
|
||||
/// Columns are based on <https://dev.mysql.com/doc/refman/8.4/en/information-schema-views-table.html>
|
||||
struct InformationSchemaViewsBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
catalog_names: StringVectorBuilder,
|
||||
schema_names: StringVectorBuilder,
|
||||
table_names: StringVectorBuilder,
|
||||
view_definitions: StringVectorBuilder,
|
||||
check_options: StringVectorBuilder,
|
||||
is_updatable: StringVectorBuilder,
|
||||
definer: StringVectorBuilder,
|
||||
security_type: StringVectorBuilder,
|
||||
character_set_client: StringVectorBuilder,
|
||||
collation_connection: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaViewsBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
catalog_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
schema_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
view_definitions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
check_options: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
is_updatable: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
definer: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
security_type: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
character_set_client: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
collation_connection: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.views` virtual table
|
||||
async fn make_views(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
let view_info_cache = catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.context(CastManagerSnafu)?
|
||||
.view_info_cache()?;
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let table_info = table.table_info();
|
||||
if table_info.table_type == TableType::View {
|
||||
let view_info = view_info_cache
|
||||
.get(table_info.ident.table_id)
|
||||
.await
|
||||
.context(GetViewCacheSnafu)?
|
||||
.context(ViewInfoNotFoundSnafu {
|
||||
name: &table_info.name,
|
||||
})?;
|
||||
self.add_view(
|
||||
&predicates,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_info.name,
|
||||
&view_info.definition,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_view(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
definition: &str,
|
||||
) {
|
||||
let row = [
|
||||
(TABLE_CATALOG, &Value::from(catalog_name)),
|
||||
(TABLE_SCHEMA, &Value::from(schema_name)),
|
||||
(TABLE_NAME, &Value::from(table_name)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
self.catalog_names.push(Some(catalog_name));
|
||||
self.schema_names.push(Some(schema_name));
|
||||
self.table_names.push(Some(table_name));
|
||||
self.view_definitions.push(Some(definition));
|
||||
self.check_options.push(None);
|
||||
// View is not updatable, statements such UPDATE , DELETE , and INSERT are illegal and are rejected.
|
||||
self.is_updatable.push(Some("NO"));
|
||||
self.definer.push(None);
|
||||
self.security_type.push(None);
|
||||
self.character_set_client.push(Some("utf8"));
|
||||
self.collation_connection.push(Some("utf8_bin"));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.catalog_names.finish()),
|
||||
Arc::new(self.schema_names.finish()),
|
||||
Arc::new(self.table_names.finish()),
|
||||
Arc::new(self.view_definitions.finish()),
|
||||
Arc::new(self.check_options.finish()),
|
||||
Arc::new(self.is_updatable.finish()),
|
||||
Arc::new(self.definer.finish()),
|
||||
Arc::new(self.security_type.finish()),
|
||||
Arc::new(self.character_set_client.finish()),
|
||||
Arc::new(self.collation_connection.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for InformationSchemaViews {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_views(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod tables;
|
||||
mod table_columns;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
@@ -27,22 +28,21 @@ use datatypes::schema::SchemaRef;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
pub use tables::get_schema_columns;
|
||||
|
||||
use super::SystemTable;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::information_schema::InformationTable;
|
||||
|
||||
/// A memory table with specified schema and columns.
|
||||
pub(super) struct MemoryTable {
|
||||
table_id: TableId,
|
||||
table_name: &'static str,
|
||||
schema: SchemaRef,
|
||||
columns: Vec<VectorRef>,
|
||||
pub(crate) struct MemoryTable {
|
||||
pub(crate) table_id: TableId,
|
||||
pub(crate) table_name: &'static str,
|
||||
pub(crate) schema: SchemaRef,
|
||||
pub(crate) columns: Vec<VectorRef>,
|
||||
}
|
||||
|
||||
impl MemoryTable {
|
||||
/// Creates a memory table with table id, name, schema and columns.
|
||||
pub(super) fn new(
|
||||
pub fn new(
|
||||
table_id: TableId,
|
||||
table_name: &'static str,
|
||||
schema: SchemaRef,
|
||||
@@ -56,12 +56,54 @@ impl MemoryTable {
|
||||
}
|
||||
}
|
||||
|
||||
fn builder(&self) -> MemoryTableBuilder {
|
||||
pub fn builder(&self) -> MemoryTableBuilder {
|
||||
MemoryTableBuilder::new(self.schema.clone(), self.columns.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for MemoryTable {
|
||||
pub(crate) struct MemoryTableBuilder {
|
||||
schema: SchemaRef,
|
||||
columns: Vec<VectorRef>,
|
||||
}
|
||||
|
||||
impl MemoryTableBuilder {
|
||||
fn new(schema: SchemaRef, columns: Vec<VectorRef>) -> Self {
|
||||
Self { schema, columns }
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.{table_name}` virtual table
|
||||
pub async fn memory_records(&mut self) -> Result<RecordBatch> {
|
||||
if self.columns.is_empty() {
|
||||
RecordBatch::new_empty(self.schema.clone()).context(CreateRecordBatchSnafu)
|
||||
} else {
|
||||
RecordBatch::new(self.schema.clone(), std::mem::take(&mut self.columns))
|
||||
.context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for MemoryTable {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.memory_records()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for MemoryTable {
|
||||
fn table_id(&self) -> TableId {
|
||||
self.table_id
|
||||
}
|
||||
@@ -95,48 +137,6 @@ impl InformationTable for MemoryTable {
|
||||
}
|
||||
}
|
||||
|
||||
struct MemoryTableBuilder {
|
||||
schema: SchemaRef,
|
||||
columns: Vec<VectorRef>,
|
||||
}
|
||||
|
||||
impl MemoryTableBuilder {
|
||||
fn new(schema: SchemaRef, columns: Vec<VectorRef>) -> Self {
|
||||
Self { schema, columns }
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.{table_name}` virtual table
|
||||
async fn memory_records(&mut self) -> Result<RecordBatch> {
|
||||
if self.columns.is_empty() {
|
||||
RecordBatch::new_empty(self.schema.clone()).context(CreateRecordBatchSnafu)
|
||||
} else {
|
||||
RecordBatch::new(self.schema.clone(), std::mem::take(&mut self.columns))
|
||||
.context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for MemoryTable {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.memory_records()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
@@ -147,6 +147,7 @@ mod tests {
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
use crate::system_schema::SystemTable;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_memory_table() {
|
||||
@@ -166,8 +167,8 @@ mod tests {
|
||||
);
|
||||
|
||||
assert_eq!(42, table.table_id());
|
||||
assert_eq!("test", table.table_name());
|
||||
assert_eq!(schema, InformationTable::schema(&table));
|
||||
assert_eq!("test", table.table_name);
|
||||
assert_eq!(schema, SystemTable::schema(&table));
|
||||
|
||||
let stream = table.to_stream(ScanRequest::default()).unwrap();
|
||||
|
||||
@@ -196,7 +197,7 @@ mod tests {
|
||||
|
||||
assert_eq!(42, table.table_id());
|
||||
assert_eq!("test", table.table_name());
|
||||
assert_eq!(schema, InformationTable::schema(&table));
|
||||
assert_eq!(schema, SystemTable::schema(&table));
|
||||
|
||||
let stream = table.to_stream(ScanRequest::default()).unwrap();
|
||||
|
||||
50
src/catalog/src/system_schema/memory_table/table_columns.rs
Normal file
50
src/catalog/src/system_schema/memory_table/table_columns.rs
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! memory_table_cols{
|
||||
([$($colname:ident),*], $t:expr) => {
|
||||
let t = &$t;
|
||||
$(
|
||||
let mut $colname = Vec::with_capacity(t.len());
|
||||
)*
|
||||
paste::paste!{
|
||||
for &($([<r_ $colname>]),*) in t {
|
||||
$(
|
||||
$colname.push([<r_ $colname>]);
|
||||
)*
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_memory_table_columns() {
|
||||
memory_table_cols!(
|
||||
[oid, typname, typlen],
|
||||
[
|
||||
(1, "String", -1),
|
||||
(2, "Binary", -1),
|
||||
(3, "Time", 8),
|
||||
(4, "Datetime", 8)
|
||||
]
|
||||
);
|
||||
assert_eq!(&oid[..], &[1, 2, 3, 4]);
|
||||
assert_eq!(&typname[..], &["String", "Binary", "Time", "Datetime"]);
|
||||
assert_eq!(&typlen[..], &[-1, -1, 8, 8]);
|
||||
}
|
||||
}
|
||||
137
src/catalog/src/system_schema/pg_catalog.rs
Normal file
137
src/catalog/src/system_schema/pg_catalog.rs
Normal file
@@ -0,0 +1,137 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod pg_catalog_memory_table;
|
||||
mod pg_class;
|
||||
mod pg_namespace;
|
||||
mod table_names;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{self, PG_CATALOG_NAME};
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use lazy_static::lazy_static;
|
||||
use paste::paste;
|
||||
use pg_catalog_memory_table::get_schema_columns;
|
||||
use pg_class::PGClass;
|
||||
use pg_namespace::PGNamespace;
|
||||
use table::TableRef;
|
||||
pub use table_names::*;
|
||||
|
||||
use super::memory_table::MemoryTable;
|
||||
use super::utils::tables::u32_column;
|
||||
use super::{SystemSchemaProvider, SystemSchemaProviderInner, SystemTableRef};
|
||||
use crate::CatalogManager;
|
||||
|
||||
lazy_static! {
|
||||
static ref MEMORY_TABLES: &'static [&'static str] = &[table_names::PG_TYPE];
|
||||
}
|
||||
|
||||
/// The column name for the OID column.
|
||||
/// The OID column is a unique identifier of type u32 for each object in the database.
|
||||
const OID_COLUMN_NAME: &str = "oid";
|
||||
|
||||
fn oid_column() -> ColumnSchema {
|
||||
u32_column(OID_COLUMN_NAME)
|
||||
}
|
||||
|
||||
/// [`PGCatalogProvider`] is the provider for a schema named `pg_catalog`, it is not a catalog.
|
||||
pub struct PGCatalogProvider {
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
}
|
||||
|
||||
impl SystemSchemaProvider for PGCatalogProvider {
|
||||
fn tables(&self) -> &HashMap<String, TableRef> {
|
||||
assert!(!self.tables.is_empty());
|
||||
|
||||
&self.tables
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(j0hn50n133): Not sure whether to avoid duplication with `information_schema` or not.
|
||||
macro_rules! setup_memory_table {
|
||||
($name: expr) => {
|
||||
paste! {
|
||||
{
|
||||
let (schema, columns) = get_schema_columns($name);
|
||||
Some(Arc::new(MemoryTable::new(
|
||||
consts::[<PG_CATALOG_ $name _TABLE_ID>],
|
||||
$name,
|
||||
schema,
|
||||
columns
|
||||
)) as _)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl PGCatalogProvider {
|
||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
let mut provider = Self {
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
tables: HashMap::new(),
|
||||
};
|
||||
provider.build_tables();
|
||||
provider
|
||||
}
|
||||
|
||||
fn build_tables(&mut self) {
|
||||
// SECURITY NOTE:
|
||||
// Must follow the same security rules as [`InformationSchemaProvider::build_tables`].
|
||||
let mut tables = HashMap::new();
|
||||
// TODO(J0HN50N133): modeling the table_name as a enum type to get rid of expect/unwrap here
|
||||
// It's safe to unwrap here because we are sure that the constants have been handle correctly inside system_table.
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
tables.insert(name.to_string(), self.build_table(name).expect(name));
|
||||
}
|
||||
tables.insert(
|
||||
PG_NAMESPACE.to_string(),
|
||||
self.build_table(PG_NAMESPACE).expect(PG_NAMESPACE),
|
||||
);
|
||||
tables.insert(
|
||||
PG_CLASS.to_string(),
|
||||
self.build_table(PG_CLASS).expect(PG_NAMESPACE),
|
||||
);
|
||||
self.tables = tables;
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemSchemaProviderInner for PGCatalogProvider {
|
||||
fn schema_name() -> &'static str {
|
||||
PG_CATALOG_NAME
|
||||
}
|
||||
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
||||
match name {
|
||||
table_names::PG_TYPE => setup_memory_table!(PG_TYPE),
|
||||
table_names::PG_NAMESPACE => Some(Arc::new(PGNamespace::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
))),
|
||||
table_names::PG_CLASS => Some(Arc::new(PGClass::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
))),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn catalog_name(&self) -> &str {
|
||||
&self.catalog_name
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,69 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int16Vector, StringVector, UInt32Vector, VectorRef};
|
||||
|
||||
use super::oid_column;
|
||||
use super::table_names::PG_TYPE;
|
||||
use crate::memory_table_cols;
|
||||
use crate::system_schema::utils::tables::{i16_column, string_column};
|
||||
|
||||
fn pg_type_schema_columns() -> (Vec<ColumnSchema>, Vec<VectorRef>) {
|
||||
// TODO(j0hn50n133): acquire this information from `DataType` instead of hardcoding it to avoid regression.
|
||||
memory_table_cols!(
|
||||
[oid, typname, typlen],
|
||||
[
|
||||
(1, "String", -1),
|
||||
(2, "Binary", -1),
|
||||
(3, "Int8", 1),
|
||||
(4, "Int16", 2),
|
||||
(5, "Int32", 4),
|
||||
(6, "Int64", 8),
|
||||
(7, "UInt8", 1),
|
||||
(8, "UInt16", 2),
|
||||
(9, "UInt32", 4),
|
||||
(10, "UInt64", 8),
|
||||
(11, "Float32", 4),
|
||||
(12, "Float64", 8),
|
||||
(13, "Decimal", 16),
|
||||
(14, "Date", 4),
|
||||
(15, "DateTime", 8),
|
||||
(16, "Timestamp", 8),
|
||||
(17, "Time", 8),
|
||||
(18, "Duration", 8),
|
||||
(19, "Interval", 16),
|
||||
(20, "List", -1),
|
||||
]
|
||||
);
|
||||
(
|
||||
// not quiet identical with pg, we only follow the definition in pg
|
||||
vec![oid_column(), string_column("typname"), i16_column("typlen")],
|
||||
vec![
|
||||
Arc::new(UInt32Vector::from_vec(oid)), // oid
|
||||
Arc::new(StringVector::from(typname)),
|
||||
Arc::new(Int16Vector::from_vec(typlen)), // typlen in bytes
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
let (column_schemas, columns): (_, Vec<VectorRef>) = match table_name {
|
||||
PG_TYPE => pg_type_schema_columns(),
|
||||
_ => unreachable!("Unknown table in pg_catalog: {}", table_name),
|
||||
};
|
||||
(Arc::new(Schema::new(column_schemas)), columns)
|
||||
}
|
||||
249
src/catalog/src/system_schema/pg_catalog/pg_class.rs
Normal file
249
src/catalog/src/system_schema/pg_catalog/pg_class.rs
Normal file
@@ -0,0 +1,249 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::PG_CATALOG_PG_CLASS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::metadata::TableType;
|
||||
|
||||
use super::{OID_COLUMN_NAME, PG_CLASS};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
// === column name ===
|
||||
pub const RELNAME: &str = "relname";
|
||||
pub const RELNAMESPACE: &str = "relnamespace";
|
||||
pub const RELKIND: &str = "relkind";
|
||||
pub const RELOWNER: &str = "relowner";
|
||||
|
||||
// === enum value of relkind ===
|
||||
pub const RELKIND_TABLE: &str = "r";
|
||||
pub const RELKIND_VIEW: &str = "v";
|
||||
|
||||
/// The initial capacity of the vector builders.
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
/// The dummy owner id for the namespace.
|
||||
const DUMMY_OWNER_ID: u32 = 0;
|
||||
|
||||
/// The `pg_catalog.pg_class` table implementation.
|
||||
pub(super) struct PGClass {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl PGClass {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
u32_column(OID_COLUMN_NAME),
|
||||
string_column(RELNAME),
|
||||
string_column(RELNAMESPACE),
|
||||
string_column(RELKIND),
|
||||
u32_column(RELOWNER),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> PGClassBuilder {
|
||||
PGClassBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for PGClass {
|
||||
fn table_id(&self) -> table::metadata::TableId {
|
||||
PG_CATALOG_PG_CLASS_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
PG_CLASS
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> Result<common_recordbatch::SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_class(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for PGClass {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_class(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `pg_catalog.pg_class` table row by row
|
||||
/// TODO(J0HN50N133): `relowner` is always the [`DUMMY_OWNER_ID`] cuz we don't have user.
|
||||
/// Once we have user system, make it the actual owner of the table.
|
||||
struct PGClassBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
oid: UInt32VectorBuilder,
|
||||
relname: StringVectorBuilder,
|
||||
relnamespace: StringVectorBuilder,
|
||||
relkind: StringVectorBuilder,
|
||||
relowner: UInt32VectorBuilder,
|
||||
}
|
||||
|
||||
impl PGClassBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
|
||||
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relnamespace: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relkind: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relowner: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
async fn make_class(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let table_info = table.table_info();
|
||||
self.add_class(
|
||||
&predicates,
|
||||
table_info.table_id(),
|
||||
&schema_name,
|
||||
&table_info.name,
|
||||
if table_info.table_type == TableType::View {
|
||||
RELKIND_VIEW
|
||||
} else {
|
||||
RELKIND_TABLE
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_class(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
oid: u32,
|
||||
schema: &str,
|
||||
table: &str,
|
||||
kind: &str,
|
||||
) {
|
||||
let row = [
|
||||
(OID_COLUMN_NAME, &Value::from(oid)),
|
||||
(RELNAMESPACE, &Value::from(schema)),
|
||||
(RELNAME, &Value::from(table)),
|
||||
(RELKIND, &Value::from(kind)),
|
||||
(RELOWNER, &Value::from(DUMMY_OWNER_ID)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
|
||||
self.oid.push(Some(oid));
|
||||
self.relnamespace.push(Some(schema));
|
||||
self.relname.push(Some(table));
|
||||
self.relkind.push(Some(kind));
|
||||
self.relowner.push(Some(DUMMY_OWNER_ID));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.oid.finish()),
|
||||
Arc::new(self.relname.finish()),
|
||||
Arc::new(self.relnamespace.finish()),
|
||||
Arc::new(self.relkind.finish()),
|
||||
Arc::new(self.relowner.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
191
src/catalog/src/system_schema/pg_catalog/pg_namespace.rs
Normal file
191
src/catalog/src/system_schema/pg_catalog/pg_namespace.rs
Normal file
@@ -0,0 +1,191 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::PG_CATALOG_PG_NAMESPACE_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVectorBuilder, VectorRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
|
||||
use super::{OID_COLUMN_NAME, PG_NAMESPACE};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::utils::tables::string_column;
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// The `pg_catalog.pg_namespace` table implementation.
|
||||
/// namespace is a schema in greptime
|
||||
|
||||
const NSPNAME: &str = "nspname";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
pub(super) struct PGNamespace {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl PGNamespace {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
// TODO(J0HN50N133): we do not have a numeric schema id, use schema name as a workaround. Use a proper schema id once we have it.
|
||||
string_column(OID_COLUMN_NAME),
|
||||
string_column(NSPNAME),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> PGNamespaceBuilder {
|
||||
PGNamespaceBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for PGNamespace {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn table_id(&self) -> table::metadata::TableId {
|
||||
PG_CATALOG_PG_NAMESPACE_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
PG_NAMESPACE
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_namespace(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for PGNamespace {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_namespace(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `pg_catalog.pg_namespace` table row by row
|
||||
/// `oid` use schema name as a workaround since we don't have numeric schema id.
|
||||
/// `nspname` is the schema name.
|
||||
struct PGNamespaceBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
oid: StringVectorBuilder,
|
||||
nspname: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl PGNamespaceBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
oid: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
nspname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `pg_catalog.pg_namespace` virtual table
|
||||
async fn make_namespace(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
self.add_namespace(&predicates, &schema_name);
|
||||
}
|
||||
self.finish()
|
||||
}
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> =
|
||||
vec![Arc::new(self.oid.finish()), Arc::new(self.nspname.finish())];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
|
||||
fn add_namespace(&mut self, predicates: &Predicates, schema_name: &str) {
|
||||
let row = [
|
||||
(OID_COLUMN_NAME, &Value::from(schema_name)),
|
||||
(NSPNAME, &Value::from(schema_name)),
|
||||
];
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
self.oid.push(Some(schema_name));
|
||||
self.nspname.push(Some(schema_name));
|
||||
}
|
||||
}
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
fn main() {
|
||||
common_version::setup_build_info();
|
||||
}
|
||||
pub const PG_DATABASE: &str = "pg_databases";
|
||||
pub const PG_NAMESPACE: &str = "pg_namespace";
|
||||
pub const PG_CLASS: &str = "pg_class";
|
||||
pub const PG_TYPE: &str = "pg_type";
|
||||
@@ -25,7 +25,7 @@ type ColumnName = String;
|
||||
/// we only support these simple predicates currently.
|
||||
/// TODO(dennis): supports more predicate types.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
enum Predicate {
|
||||
pub(crate) enum Predicate {
|
||||
Eq(ColumnName, Value),
|
||||
Like(ColumnName, String, bool),
|
||||
NotEq(ColumnName, Value),
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod tables;
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_config::Mode;
|
||||
79
src/catalog/src/system_schema/utils/tables.rs
Normal file
79
src/catalog/src/system_schema/utils/tables.rs
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
|
||||
pub fn string_columns(names: &[&'static str]) -> Vec<ColumnSchema> {
|
||||
names.iter().map(|name| string_column(name)).collect()
|
||||
}
|
||||
|
||||
pub fn string_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::string_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn u32_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn i16_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::int16_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn bigint_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn datetime_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_string_columns() {
|
||||
let columns = ["a", "b", "c"];
|
||||
let column_schemas = string_columns(&columns);
|
||||
|
||||
assert_eq!(3, column_schemas.len());
|
||||
for (i, name) in columns.iter().enumerate() {
|
||||
let cs = column_schemas.get(i).unwrap();
|
||||
|
||||
assert_eq!(*name, cs.name);
|
||||
assert_eq!(ConcreteDataType::string_datatype(), cs.data_type);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -17,21 +17,24 @@ use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_query::logical_plan::SubstraitPlanDecoderRef;
|
||||
use common_query::logical_plan::{rename_logical_plan_columns, SubstraitPlanDecoderRef};
|
||||
use datafusion::common::{ResolvedTableReference, TableReference};
|
||||
use datafusion::datasource::view::ViewTable;
|
||||
use datafusion::datasource::{provider_as_source, TableProvider};
|
||||
use datafusion::logical_expr::TableSource;
|
||||
use itertools::Itertools;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::TableType;
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
mod dummy_catalog;
|
||||
use dummy_catalog::DummyCatalogList;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
CastManagerSnafu, DatafusionSnafu, DecodePlanSnafu, GetViewCacheSnafu, QueryAccessDeniedSnafu,
|
||||
Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
|
||||
CastManagerSnafu, DatafusionSnafu, DecodePlanSnafu, GetViewCacheSnafu, ProjectViewColumnsSnafu,
|
||||
QueryAccessDeniedSnafu, Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
|
||||
ViewPlanColumnsChangedSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::CatalogManagerRef;
|
||||
@@ -43,6 +46,7 @@ pub struct DfTableSourceProvider {
|
||||
default_catalog: String,
|
||||
default_schema: String,
|
||||
plan_decoder: SubstraitPlanDecoderRef,
|
||||
enable_ident_normalization: bool,
|
||||
}
|
||||
|
||||
impl DfTableSourceProvider {
|
||||
@@ -51,6 +55,7 @@ impl DfTableSourceProvider {
|
||||
disallow_cross_catalog_query: bool,
|
||||
query_ctx: &QueryContext,
|
||||
plan_decoder: SubstraitPlanDecoderRef,
|
||||
enable_ident_normalization: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
catalog_manager,
|
||||
@@ -59,6 +64,7 @@ impl DfTableSourceProvider {
|
||||
default_catalog: query_ctx.current_catalog().to_owned(),
|
||||
default_schema: query_ctx.current_schema(),
|
||||
plan_decoder,
|
||||
enable_ident_normalization,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,32 +114,7 @@ impl DfTableSourceProvider {
|
||||
})?;
|
||||
|
||||
let provider: Arc<dyn TableProvider> = if table.table_info().table_type == TableType::View {
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.context(CastManagerSnafu)?;
|
||||
|
||||
let view_info = catalog_manager
|
||||
.view_info_cache()?
|
||||
.get(table.table_info().ident.table_id)
|
||||
.await
|
||||
.context(GetViewCacheSnafu)?
|
||||
.context(ViewInfoNotFoundSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
// Build the catalog list provider for deserialization.
|
||||
let catalog_list = Arc::new(DummyCatalogList::new(self.catalog_manager.clone()));
|
||||
let logical_plan = self
|
||||
.plan_decoder
|
||||
.decode(Bytes::from(view_info.view_info.clone()), catalog_list, true)
|
||||
.await
|
||||
.context(DecodePlanSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
Arc::new(ViewTable::try_new(logical_plan, None).context(DatafusionSnafu)?)
|
||||
self.create_view_provider(&table).await?
|
||||
} else {
|
||||
Arc::new(DfTableProviderAdapter::new(table))
|
||||
};
|
||||
@@ -143,6 +124,80 @@ impl DfTableSourceProvider {
|
||||
let _ = self.resolved_tables.insert(resolved_name, source.clone());
|
||||
Ok(source)
|
||||
}
|
||||
|
||||
async fn create_view_provider(&self, table: &TableRef) -> Result<Arc<dyn TableProvider>> {
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.context(CastManagerSnafu)?;
|
||||
|
||||
let view_info = catalog_manager
|
||||
.view_info_cache()?
|
||||
.get(table.table_info().ident.table_id)
|
||||
.await
|
||||
.context(GetViewCacheSnafu)?
|
||||
.context(ViewInfoNotFoundSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
// Build the catalog list provider for deserialization.
|
||||
let catalog_list = Arc::new(DummyCatalogList::new(self.catalog_manager.clone()));
|
||||
let logical_plan = self
|
||||
.plan_decoder
|
||||
.decode(Bytes::from(view_info.view_info.clone()), catalog_list, true)
|
||||
.await
|
||||
.context(DecodePlanSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
let columns: Vec<_> = view_info.columns.iter().map(|c| c.as_str()).collect();
|
||||
|
||||
let original_plan_columns: Vec<_> =
|
||||
view_info.plan_columns.iter().map(|c| c.as_str()).collect();
|
||||
|
||||
let plan_columns: Vec<_> = logical_plan
|
||||
.schema()
|
||||
.columns()
|
||||
.into_iter()
|
||||
.map(|c| c.name)
|
||||
.collect();
|
||||
|
||||
// Only check columns number, because substrait doesn't include aliases currently.
|
||||
// See https://github.com/apache/datafusion/issues/10815#issuecomment-2158666881
|
||||
// and https://github.com/apache/datafusion/issues/6489
|
||||
// TODO(dennis): check column names
|
||||
ensure!(
|
||||
original_plan_columns.len() == plan_columns.len(),
|
||||
ViewPlanColumnsChangedSnafu {
|
||||
origin_names: original_plan_columns.iter().join(","),
|
||||
actual_names: plan_columns.iter().join(","),
|
||||
}
|
||||
);
|
||||
|
||||
// We have to do `columns` projection here, because
|
||||
// substrait doesn't include aliases neither for tables nor for columns:
|
||||
// https://github.com/apache/datafusion/issues/10815#issuecomment-2158666881
|
||||
let logical_plan = if !columns.is_empty() {
|
||||
rename_logical_plan_columns(
|
||||
self.enable_ident_normalization,
|
||||
logical_plan,
|
||||
plan_columns
|
||||
.iter()
|
||||
.map(|c| c.as_str())
|
||||
.zip(columns.into_iter())
|
||||
.collect(),
|
||||
)
|
||||
.context(ProjectViewColumnsSnafu)?
|
||||
} else {
|
||||
logical_plan
|
||||
};
|
||||
|
||||
Ok(Arc::new(
|
||||
ViewTable::try_new(logical_plan, Some(view_info.definition.to_string()))
|
||||
.context(DatafusionSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -162,6 +217,7 @@ mod tests {
|
||||
true,
|
||||
query_ctx,
|
||||
DummyDecoder::arc(),
|
||||
true,
|
||||
);
|
||||
|
||||
let table_ref = TableReference::bare("table_name");
|
||||
@@ -277,12 +333,19 @@ mod tests {
|
||||
let logical_plan = vec![1, 2, 3];
|
||||
// Create view metadata
|
||||
table_metadata_manager
|
||||
.create_view_metadata(view_info.clone().into(), logical_plan, HashSet::new())
|
||||
.create_view_metadata(
|
||||
view_info.clone().into(),
|
||||
logical_plan,
|
||||
HashSet::new(),
|
||||
vec!["a".to_string(), "b".to_string()],
|
||||
vec!["id".to_string(), "name".to_string()],
|
||||
"definition".to_string(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut table_provider =
|
||||
DfTableSourceProvider::new(catalog_manager, true, query_ctx, MockDecoder::arc());
|
||||
DfTableSourceProvider::new(catalog_manager, true, query_ctx, MockDecoder::arc(), true);
|
||||
|
||||
// View not found
|
||||
let table_ref = TableReference::bare("not_exists_view");
|
||||
@@ -290,6 +353,12 @@ mod tests {
|
||||
|
||||
let table_ref = TableReference::bare(view_info.name);
|
||||
let source = table_provider.resolve_table(table_ref).await.unwrap();
|
||||
assert_eq!(*source.get_logical_plan().unwrap(), mock_plan());
|
||||
assert_eq!(
|
||||
r#"
|
||||
Projection: person.id AS a, person.name AS b
|
||||
Filter: person.id > Int32(500)
|
||||
TableScan: person"#,
|
||||
format!("\n{:?}", source.get_logical_plan().unwrap())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,4 +49,4 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
[dev-dependencies.substrait_proto]
|
||||
package = "substrait"
|
||||
version = "0.17"
|
||||
version = "0.37"
|
||||
|
||||
@@ -33,9 +33,12 @@ use common_telemetry::tracing_context::W3cTrace;
|
||||
use futures_util::StreamExt;
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use tonic::metadata::AsciiMetadataKey;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
use crate::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, InvalidAsciiSnafu, ServerSnafu,
|
||||
};
|
||||
use crate::{from_grpc_response, Client, Result};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
@@ -130,6 +133,36 @@ impl Database {
|
||||
self.handle(Request::Inserts(requests)).await
|
||||
}
|
||||
|
||||
pub async fn insert_with_hints(
|
||||
&self,
|
||||
requests: InsertRequests,
|
||||
hints: &[(&str, &str)],
|
||||
) -> Result<u32> {
|
||||
let mut client = make_database_client(&self.client)?.inner;
|
||||
let request = self.to_rpc_request(Request::Inserts(requests));
|
||||
|
||||
let mut request = tonic::Request::new(request);
|
||||
let metadata = request.metadata_mut();
|
||||
for (key, value) in hints {
|
||||
let key = AsciiMetadataKey::from_bytes(format!("x-greptime-hint-{}", key).as_bytes())
|
||||
.map_err(|_| {
|
||||
InvalidAsciiSnafu {
|
||||
value: key.to_string(),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let value = value.parse().map_err(|_| {
|
||||
InvalidAsciiSnafu {
|
||||
value: value.to_string(),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
metadata.insert(key, value);
|
||||
}
|
||||
let response = client.handle(request).await?.into_inner();
|
||||
from_grpc_response(response)
|
||||
}
|
||||
|
||||
async fn handle(&self, request: Request) -> Result<u32> {
|
||||
let mut client = make_database_client(&self.client)?.inner;
|
||||
let request = self.to_rpc_request(request);
|
||||
|
||||
@@ -53,13 +53,6 @@ pub enum Error {
|
||||
source: common_grpc::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Column datatype error"))]
|
||||
ColumnDataType {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal GRPC client state: {}", err_msg))]
|
||||
IllegalGrpcClientState {
|
||||
err_msg: String,
|
||||
@@ -129,6 +122,13 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse ascii string: {}", value))]
|
||||
InvalidAscii {
|
||||
value: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -137,7 +137,6 @@ impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::IllegalFlightMessages { .. }
|
||||
| Error::ColumnDataType { .. }
|
||||
| Error::MissingField { .. }
|
||||
| Error::IllegalDatabaseResponse { .. }
|
||||
| Error::ClientStreaming { .. } => StatusCode::Internal,
|
||||
@@ -151,6 +150,8 @@ impl ErrorExt for Error {
|
||||
| Error::ConvertFlightData { source, .. }
|
||||
| Error::CreateTlsChannel { source, .. } => source.status_code(),
|
||||
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::InvalidAscii { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,9 @@ name = "greptime"
|
||||
path = "src/bin/greptime.rs"
|
||||
|
||||
[features]
|
||||
default = ["python"]
|
||||
tokio-console = ["common-telemetry/tokio-console"]
|
||||
python = ["frontend/python"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -47,7 +49,7 @@ either = "1.8"
|
||||
etcd-client.workspace = true
|
||||
file-engine.workspace = true
|
||||
flow.workspace = true
|
||||
frontend.workspace = true
|
||||
frontend = { workspace = true, default-features = false }
|
||||
futures.workspace = true
|
||||
human-panic = "1.2.2"
|
||||
lazy_static.workspace = true
|
||||
@@ -83,12 +85,10 @@ tikv-jemallocator = "0.5"
|
||||
[dev-dependencies]
|
||||
client = { workspace = true, features = ["testing"] }
|
||||
common-test-util.workspace = true
|
||||
common-version.workspace = true
|
||||
serde.workspace = true
|
||||
temp-env = "0.3"
|
||||
tempfile.workspace = true
|
||||
|
||||
[target.'cfg(not(windows))'.dev-dependencies]
|
||||
rexpect = "0.5"
|
||||
|
||||
[build-dependencies]
|
||||
common-version.workspace = true
|
||||
|
||||
@@ -21,7 +21,7 @@ use cmd::{cli, datanode, flownode, frontend, metasrv, standalone, App};
|
||||
use common_version::version;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "greptime", author, version, long_version = version!(), about)]
|
||||
#[command(name = "greptime", author, version, long_version = version(), about)]
|
||||
#[command(propagate_version = true)]
|
||||
pub(crate) struct Command {
|
||||
#[clap(subcommand)]
|
||||
|
||||
@@ -21,11 +21,12 @@ use base64::engine::general_purpose;
|
||||
use base64::Engine;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use client::DEFAULT_SCHEMA_NAME;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use common_telemetry::{debug, error, info};
|
||||
use serde_json::Value;
|
||||
use servers::http::greptime_result_v1::GreptimedbV1Response;
|
||||
use servers::http::GreptimeQueryOutput;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use snafu::ResultExt;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::{AsyncWriteExt, BufWriter};
|
||||
use tokio::sync::Semaphore;
|
||||
@@ -34,19 +35,20 @@ use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::cli::{Instance, Tool};
|
||||
use crate::error::{
|
||||
EmptyResultSnafu, Error, FileIoSnafu, HttpQuerySqlSnafu, InvalidDatabaseNameSnafu, Result,
|
||||
SerdeJsonSnafu,
|
||||
EmptyResultSnafu, Error, FileIoSnafu, HttpQuerySqlSnafu, Result, SerdeJsonSnafu,
|
||||
};
|
||||
|
||||
type TableReference = (String, String, String);
|
||||
|
||||
#[derive(Debug, Default, Clone, ValueEnum)]
|
||||
enum ExportTarget {
|
||||
/// Corresponding to `SHOW CREATE TABLE`
|
||||
/// Export all table schemas, corresponding to `SHOW CREATE TABLE`.
|
||||
Schema,
|
||||
/// Export all table data, corresponding to `COPY DATABASE TO`.
|
||||
Data,
|
||||
/// Export all table schemas and data at once.
|
||||
#[default]
|
||||
CreateTable,
|
||||
/// Corresponding to `EXPORT TABLE`
|
||||
TableData,
|
||||
All,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
@@ -72,10 +74,20 @@ pub struct ExportCommand {
|
||||
max_retry: usize,
|
||||
|
||||
/// Things to export
|
||||
#[clap(long, short = 't', value_enum)]
|
||||
#[clap(long, short = 't', value_enum, default_value = "all")]
|
||||
target: ExportTarget,
|
||||
|
||||
/// basic authentication for connecting to the server
|
||||
/// A half-open time range: [start_time, end_time).
|
||||
/// The start of the time range (time-index column) for data export.
|
||||
#[clap(long)]
|
||||
start_time: Option<String>,
|
||||
|
||||
/// A half-open time range: [start_time, end_time).
|
||||
/// The end of the time range (time-index column) for data export.
|
||||
#[clap(long)]
|
||||
end_time: Option<String>,
|
||||
|
||||
/// The basic authentication for connecting to the server
|
||||
#[clap(long)]
|
||||
auth_basic: Option<String>,
|
||||
}
|
||||
@@ -99,6 +111,8 @@ impl ExportCommand {
|
||||
output_dir: self.output_dir.clone(),
|
||||
parallelism: self.export_jobs,
|
||||
target: self.target.clone(),
|
||||
start_time: self.start_time.clone(),
|
||||
end_time: self.end_time.clone(),
|
||||
auth_header,
|
||||
}),
|
||||
guard,
|
||||
@@ -113,6 +127,8 @@ pub struct Export {
|
||||
output_dir: String,
|
||||
parallelism: usize,
|
||||
target: ExportTarget,
|
||||
start_time: Option<String>,
|
||||
end_time: Option<String>,
|
||||
auth_header: Option<String>,
|
||||
}
|
||||
|
||||
@@ -161,13 +177,13 @@ impl Export {
|
||||
if let Some(schema) = &self.schema {
|
||||
Ok(vec![(self.catalog.clone(), schema.clone())])
|
||||
} else {
|
||||
let result = self.sql("show databases").await?;
|
||||
let result = self.sql("SHOW DATABASES").await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
};
|
||||
let mut result = Vec::with_capacity(records.len());
|
||||
for value in records {
|
||||
let serde_json::Value::String(schema) = &value[0] else {
|
||||
let Value::String(schema) = &value[0] else {
|
||||
unreachable!()
|
||||
};
|
||||
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
|
||||
@@ -188,9 +204,11 @@ impl Export {
|
||||
) -> Result<(Vec<TableReference>, Vec<TableReference>)> {
|
||||
// Puts all metric table first
|
||||
let sql = format!(
|
||||
"select table_catalog, table_schema, table_name from \
|
||||
information_schema.columns where column_name = '__tsid' \
|
||||
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'"
|
||||
"SELECT table_catalog, table_schema, table_name \
|
||||
FROM information_schema.columns \
|
||||
WHERE column_name = '__tsid' \
|
||||
and table_catalog = \'{catalog}\' \
|
||||
and table_schema = \'{schema}\'"
|
||||
);
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
@@ -210,9 +228,11 @@ impl Export {
|
||||
|
||||
// TODO: SQL injection hurts
|
||||
let sql = format!(
|
||||
"select table_catalog, table_schema, table_name from \
|
||||
information_schema.tables where table_type = \'BASE TABLE\' \
|
||||
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'",
|
||||
"SELECT table_catalog, table_schema, table_name \
|
||||
FROM information_schema.tables \
|
||||
WHERE table_type = \'BASE TABLE\' \
|
||||
and table_catalog = \'{catalog}\' \
|
||||
and table_schema = \'{schema}\'",
|
||||
);
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
@@ -249,14 +269,14 @@ impl Export {
|
||||
|
||||
async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> {
|
||||
let sql = format!(
|
||||
r#"show create table "{}"."{}"."{}""#,
|
||||
r#"SHOW CREATE TABLE "{}"."{}"."{}""#,
|
||||
catalog, schema, table
|
||||
);
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
};
|
||||
let serde_json::Value::String(create_table) = &records[0][1] else {
|
||||
let Value::String(create_table) = &records[0][1] else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
@@ -276,11 +296,13 @@ impl Export {
|
||||
let (metric_physical_tables, remaining_tables) =
|
||||
self.get_table_list(&catalog, &schema).await?;
|
||||
let table_count = metric_physical_tables.len() + remaining_tables.len();
|
||||
tokio::fs::create_dir_all(&self.output_dir)
|
||||
let output_dir = Path::new(&self.output_dir)
|
||||
.join(&catalog)
|
||||
.join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&output_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let output_file =
|
||||
Path::new(&self.output_dir).join(format!("{catalog}-{schema}.sql"));
|
||||
let output_file = Path::new(&output_dir).join("create_tables.sql");
|
||||
let mut file = File::create(output_file).await.context(FileIoSnafu)?;
|
||||
for (c, s, t) in metric_physical_tables.into_iter().chain(remaining_tables) {
|
||||
match self.show_create_table(&c, &s, &t).await {
|
||||
@@ -294,7 +316,12 @@ impl Export {
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("finished exporting {catalog}.{schema} with {table_count} tables",);
|
||||
|
||||
info!(
|
||||
"Finished exporting {catalog}.{schema} with {table_count} table schemas to path: {}",
|
||||
output_dir.to_string_lossy()
|
||||
);
|
||||
|
||||
Ok::<(), Error>(())
|
||||
});
|
||||
}
|
||||
@@ -317,7 +344,7 @@ impl Export {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn export_table_data(&self) -> Result<()> {
|
||||
async fn export_database_data(&self) -> Result<()> {
|
||||
let timer = Instant::now();
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.iter_db_names().await?;
|
||||
@@ -327,70 +354,66 @@ impl Export {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
tokio::fs::create_dir_all(&self.output_dir)
|
||||
let output_dir = Path::new(&self.output_dir)
|
||||
.join(&catalog)
|
||||
.join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&output_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let output_dir = Path::new(&self.output_dir).join(format!("{catalog}-{schema}/"));
|
||||
// Ignores metric physical tables
|
||||
let (metrics_tables, table_list) = self.get_table_list(&catalog, &schema).await?;
|
||||
for (_, _, table_name) in metrics_tables {
|
||||
warn!("Ignores metric physical table: {table_name}");
|
||||
}
|
||||
for (catalog_name, schema_name, table_name) in table_list {
|
||||
// copy table to
|
||||
let sql = format!(
|
||||
r#"Copy "{}"."{}"."{}" TO '{}{}.parquet' WITH (format='parquet');"#,
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
output_dir.to_str().unwrap(),
|
||||
table_name,
|
||||
);
|
||||
info!("Executing sql: {sql}");
|
||||
self.sql(&sql).await?;
|
||||
}
|
||||
info!("Finished exporting {catalog}.{schema} data");
|
||||
|
||||
// export copy from sql
|
||||
let dir_filenames = match output_dir.read_dir() {
|
||||
Ok(dir) => dir,
|
||||
Err(_) => {
|
||||
warn!("empty database {catalog}.{schema}");
|
||||
return Ok(());
|
||||
let with_options = match (&self.start_time, &self.end_time) {
|
||||
(Some(start_time), Some(end_time)) => {
|
||||
format!(
|
||||
"WITH (FORMAT='parquet', start_time='{}', end_time='{}')",
|
||||
start_time, end_time
|
||||
)
|
||||
}
|
||||
(Some(start_time), None) => {
|
||||
format!("WITH (FORMAT='parquet', start_time='{}')", start_time)
|
||||
}
|
||||
(None, Some(end_time)) => {
|
||||
format!("WITH (FORMAT='parquet', end_time='{}')", end_time)
|
||||
}
|
||||
(None, None) => "WITH (FORMAT='parquet')".to_string(),
|
||||
};
|
||||
|
||||
let copy_from_file =
|
||||
Path::new(&self.output_dir).join(format!("{catalog}-{schema}_copy_from.sql"));
|
||||
let sql = format!(
|
||||
r#"COPY DATABASE "{}"."{}" TO '{}' {};"#,
|
||||
catalog,
|
||||
schema,
|
||||
output_dir.to_str().unwrap(),
|
||||
with_options
|
||||
);
|
||||
|
||||
info!("Executing sql: {sql}");
|
||||
|
||||
self.sql(&sql).await?;
|
||||
|
||||
info!(
|
||||
"Finished exporting {catalog}.{schema} data into path: {}",
|
||||
output_dir.to_string_lossy()
|
||||
);
|
||||
|
||||
// The export copy from sql
|
||||
let copy_from_file = output_dir.join("copy_from.sql");
|
||||
let mut writer =
|
||||
BufWriter::new(File::create(copy_from_file).await.context(FileIoSnafu)?);
|
||||
|
||||
for table_file in dir_filenames {
|
||||
let table_file = table_file.unwrap();
|
||||
let table_name = table_file
|
||||
.file_name()
|
||||
.into_string()
|
||||
.unwrap()
|
||||
.replace(".parquet", "");
|
||||
|
||||
writer
|
||||
.write(
|
||||
format!(
|
||||
"copy {} from '{}' with (format='parquet');\n",
|
||||
table_name,
|
||||
table_file.path().to_str().unwrap()
|
||||
)
|
||||
.as_bytes(),
|
||||
)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
let copy_database_from_sql = format!(
|
||||
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH (FORMAT='parquet');"#,
|
||||
catalog,
|
||||
schema,
|
||||
output_dir.to_str().unwrap()
|
||||
);
|
||||
writer
|
||||
.write(copy_database_from_sql.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
writer.flush().await.context(FileIoSnafu)?;
|
||||
|
||||
info!("finished exporting {catalog}.{schema} copy_from.sql");
|
||||
info!("Finished exporting {catalog}.{schema} copy_from.sql");
|
||||
|
||||
Ok::<(), Error>(())
|
||||
});
|
||||
})
|
||||
}
|
||||
|
||||
let success = futures::future::join_all(tasks)
|
||||
@@ -399,35 +422,41 @@ impl Export {
|
||||
.filter(|r| match r {
|
||||
Ok(_) => true,
|
||||
Err(e) => {
|
||||
error!(e; "export job failed");
|
||||
error!(e; "export database job failed");
|
||||
false
|
||||
}
|
||||
})
|
||||
.count();
|
||||
let elapsed = timer.elapsed();
|
||||
|
||||
info!("Success {success}/{db_count} jobs, costs: {:?}", elapsed);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
#[async_trait]
|
||||
impl Tool for Export {
|
||||
async fn do_work(&self) -> Result<()> {
|
||||
match self.target {
|
||||
ExportTarget::CreateTable => self.export_create_table().await,
|
||||
ExportTarget::TableData => self.export_table_data().await,
|
||||
ExportTarget::Schema => self.export_create_table().await,
|
||||
ExportTarget::Data => self.export_database_data().await,
|
||||
ExportTarget::All => {
|
||||
self.export_create_table().await?;
|
||||
self.export_database_data().await
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Split at `-`.
|
||||
fn split_database(database: &str) -> Result<(String, Option<String>)> {
|
||||
let (catalog, schema) = database
|
||||
.split_once('-')
|
||||
.with_context(|| InvalidDatabaseNameSnafu {
|
||||
database: database.to_string(),
|
||||
})?;
|
||||
let (catalog, schema) = match database.split_once('-') {
|
||||
Some((catalog, schema)) => (catalog, schema),
|
||||
None => (DEFAULT_CATALOG_NAME, database),
|
||||
};
|
||||
|
||||
if schema == "*" {
|
||||
Ok((catalog.to_string(), None))
|
||||
} else {
|
||||
@@ -442,10 +471,26 @@ mod tests {
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
|
||||
use crate::cli::export::split_database;
|
||||
use crate::error::Result as CmdResult;
|
||||
use crate::options::GlobalOptions;
|
||||
use crate::{cli, standalone, App};
|
||||
|
||||
#[test]
|
||||
fn test_split_database() {
|
||||
let result = split_database("catalog-schema").unwrap();
|
||||
assert_eq!(result, ("catalog".to_string(), Some("schema".to_string())));
|
||||
|
||||
let result = split_database("schema").unwrap();
|
||||
assert_eq!(result, ("greptime".to_string(), Some("schema".to_string())));
|
||||
|
||||
let result = split_database("catalog-*").unwrap();
|
||||
assert_eq!(result, ("catalog".to_string(), None));
|
||||
|
||||
let result = split_database("*").unwrap();
|
||||
assert_eq!(result, ("greptime".to_string(), None));
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
|
||||
let output_dir = tempfile::tempdir().unwrap();
|
||||
@@ -487,7 +532,7 @@ mod tests {
|
||||
"--output-dir",
|
||||
&*output_dir.path().to_string_lossy(),
|
||||
"--target",
|
||||
"create-table",
|
||||
"schema",
|
||||
]);
|
||||
let mut cli_app = cli.build(LoggingOptions::default()).await?;
|
||||
cli_app.start().await?;
|
||||
@@ -496,7 +541,9 @@ mod tests {
|
||||
|
||||
let output_file = output_dir
|
||||
.path()
|
||||
.join("greptime-cli.export.create_table.sql");
|
||||
.join("greptime")
|
||||
.join("cli.export.create_table")
|
||||
.join("create_tables.sql");
|
||||
let res = std::fs::read_to_string(output_file).unwrap();
|
||||
let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
|
||||
"ts" TIMESTAMP(3) NOT NULL,
|
||||
|
||||
@@ -34,7 +34,6 @@ use common_telemetry::debug;
|
||||
use either::Either;
|
||||
use meta_client::client::MetaClientBuilder;
|
||||
use query::datafusion::DatafusionQueryEngine;
|
||||
use query::logical_optimizer::LogicalOptimizer;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use query::plan::LogicalPlan;
|
||||
use query::query_engine::{DefaultSerializer, QueryEngineState};
|
||||
@@ -289,6 +288,7 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
plugins.clone(),
|
||||
));
|
||||
|
||||
@@ -18,6 +18,7 @@ use std::time::Duration;
|
||||
use async_trait::async_trait;
|
||||
use catalog::kvbackend::MetaKvBackend;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
use common_telemetry::{info, warn};
|
||||
@@ -266,13 +267,14 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
opts.component.node_id.map(|x| x.to_string()),
|
||||
);
|
||||
log_versions(version!(), short_version!());
|
||||
log_versions(version(), short_version());
|
||||
|
||||
info!("Datanode start command: {:#?}", self);
|
||||
info!("Datanode options: {:#?}", opts);
|
||||
|
||||
let mut opts = opts.component;
|
||||
let plugins = plugins::setup_datanode_plugins(&mut opts)
|
||||
let opts = opts.component;
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_datanode_plugins(&mut plugins, &opts)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
@@ -338,7 +340,7 @@ mod tests {
|
||||
mode = "distributed"
|
||||
enable_memory_catalog = false
|
||||
node_id = 42
|
||||
|
||||
|
||||
rpc_addr = "127.0.0.1:4001"
|
||||
rpc_hostname = "192.168.0.1"
|
||||
[grpc]
|
||||
@@ -365,7 +367,7 @@ mod tests {
|
||||
mode = "distributed"
|
||||
enable_memory_catalog = false
|
||||
node_id = 42
|
||||
|
||||
|
||||
[grpc]
|
||||
addr = "127.0.0.1:3001"
|
||||
hostname = "127.0.0.1"
|
||||
|
||||
@@ -298,13 +298,6 @@ pub enum Error {
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid database name: {}", database))]
|
||||
InvalidDatabaseName {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
database: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create directory {}", dir))]
|
||||
CreateDir {
|
||||
dir: String,
|
||||
@@ -384,8 +377,7 @@ impl ErrorExt for Error {
|
||||
| Error::ConnectEtcd { .. }
|
||||
| Error::NotDataFromOutput { .. }
|
||||
| Error::CreateDir { .. }
|
||||
| Error::EmptyResult { .. }
|
||||
| Error::InvalidDatabaseName { .. } => StatusCode::InvalidArguments,
|
||||
| Error::EmptyResult { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::StartProcedureManager { source, .. }
|
||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||
|
||||
@@ -214,7 +214,7 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
opts.component.node_id.map(|x| x.to_string()),
|
||||
);
|
||||
log_versions(version!(), short_version!());
|
||||
log_versions(version(), short_version());
|
||||
|
||||
info!("Flownode start command: {:#?}", self);
|
||||
info!("Flownode options: {:#?}", opts);
|
||||
|
||||
@@ -20,6 +20,7 @@ use cache::{build_fundamental_cache_registry, with_default_composite_cache_regis
|
||||
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use clap::Parser;
|
||||
use client::client_manager::NodeClients;
|
||||
use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_grpc::channel_manager::ChannelConfig;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
@@ -242,10 +243,11 @@ impl StartCommand {
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs
|
||||
.clone_from(metasrv_addrs);
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
|
||||
opts.user_provider.clone_from(&self.user_provider);
|
||||
if let Some(user_provider) = &self.user_provider {
|
||||
opts.user_provider = Some(user_provider.clone());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -259,14 +261,14 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
opts.component.node_id.clone(),
|
||||
);
|
||||
log_versions(version!(), short_version!());
|
||||
log_versions(version(), short_version());
|
||||
|
||||
info!("Frontend start command: {:#?}", self);
|
||||
info!("Frontend options: {:#?}", opts);
|
||||
|
||||
let mut opts = opts.component;
|
||||
#[allow(clippy::unnecessary_mut_passed)]
|
||||
let plugins = plugins::setup_frontend_plugins(&mut opts)
|
||||
let opts = opts.component;
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_frontend_plugins(&mut plugins, &opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
@@ -314,7 +316,7 @@ impl StartCommand {
|
||||
);
|
||||
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
opts.mode,
|
||||
Mode::Distributed,
|
||||
Some(meta_client.clone()),
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
@@ -445,7 +447,6 @@ mod tests {
|
||||
|
||||
let fe_opts = command.load_options(&Default::default()).unwrap().component;
|
||||
|
||||
assert_eq!(Mode::Distributed, fe_opts.mode);
|
||||
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
|
||||
assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
|
||||
|
||||
@@ -458,7 +459,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_try_from_start_command_to_anymap() {
|
||||
let mut fe_opts = frontend::frontend::FrontendOptions {
|
||||
let fe_opts = frontend::frontend::FrontendOptions {
|
||||
http: HttpOptions {
|
||||
disable_dashboard: false,
|
||||
..Default::default()
|
||||
@@ -467,8 +468,10 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
#[allow(clippy::unnecessary_mut_passed)]
|
||||
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_frontend_plugins(&mut plugins, &fe_opts)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let provider = plugins.get::<UserProviderRef>().unwrap();
|
||||
let result = provider
|
||||
|
||||
@@ -16,6 +16,7 @@ use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
@@ -233,13 +234,14 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
None,
|
||||
);
|
||||
log_versions(version!(), short_version!());
|
||||
log_versions(version(), short_version());
|
||||
|
||||
info!("Metasrv start command: {:#?}", self);
|
||||
info!("Metasrv options: {:#?}", opts);
|
||||
|
||||
let mut opts = opts.component;
|
||||
let plugins = plugins::setup_metasrv_plugins(&mut opts)
|
||||
let opts = opts.component;
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_metasrv_plugins(&mut plugins, &opts)
|
||||
.await
|
||||
.context(StartMetaServerSnafu)?;
|
||||
|
||||
@@ -296,7 +298,7 @@ mod tests {
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
|
||||
|
||||
[failure_detector]
|
||||
threshold = 8.0
|
||||
min_std_deviation = "100ms"
|
||||
|
||||
@@ -19,6 +19,7 @@ use async_trait::async_trait;
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::kvbackend::KvBackendCatalogManager;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
||||
use common_config::{metadata_store_dir, Configurable, KvBackendConfig};
|
||||
use common_error::ext::BoxedError;
|
||||
@@ -40,7 +41,7 @@ use common_telemetry::info;
|
||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||
use common_time::timezone::set_default_timezone;
|
||||
use common_version::{short_version, version};
|
||||
use common_wal::config::StandaloneWalConfig;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
|
||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||
@@ -130,7 +131,7 @@ pub struct StandaloneOptions {
|
||||
pub opentsdb: OpentsdbOptions,
|
||||
pub influxdb: InfluxdbOptions,
|
||||
pub prom_store: PromStoreOptions,
|
||||
pub wal: StandaloneWalConfig,
|
||||
pub wal: DatanodeWalConfig,
|
||||
pub storage: StorageConfig,
|
||||
pub metadata_store: KvBackendConfig,
|
||||
pub procedure: ProcedureConfig,
|
||||
@@ -155,7 +156,7 @@ impl Default for StandaloneOptions {
|
||||
opentsdb: OpentsdbOptions::default(),
|
||||
influxdb: InfluxdbOptions::default(),
|
||||
prom_store: PromStoreOptions::default(),
|
||||
wal: StandaloneWalConfig::default(),
|
||||
wal: DatanodeWalConfig::default(),
|
||||
storage: StorageConfig::default(),
|
||||
metadata_store: KvBackendConfig::default(),
|
||||
procedure: ProcedureConfig::default(),
|
||||
@@ -181,7 +182,6 @@ impl StandaloneOptions {
|
||||
pub fn frontend_options(&self) -> FrontendOptions {
|
||||
let cloned_opts = self.clone();
|
||||
FrontendOptions {
|
||||
mode: cloned_opts.mode,
|
||||
default_timezone: cloned_opts.default_timezone,
|
||||
http: cloned_opts.http,
|
||||
grpc: cloned_opts.grpc,
|
||||
@@ -204,7 +204,7 @@ impl StandaloneOptions {
|
||||
DatanodeOptions {
|
||||
node_id: Some(0),
|
||||
enable_telemetry: cloned_opts.enable_telemetry,
|
||||
wal: cloned_opts.wal.into(),
|
||||
wal: cloned_opts.wal,
|
||||
storage: cloned_opts.storage,
|
||||
region_engine: cloned_opts.region_engine,
|
||||
grpc: cloned_opts.grpc,
|
||||
@@ -396,7 +396,9 @@ impl StartCommand {
|
||||
opts.influxdb.enable = self.influxdb_enable;
|
||||
}
|
||||
|
||||
opts.user_provider.clone_from(&self.user_provider);
|
||||
if let Some(user_provider) = &self.user_provider {
|
||||
opts.user_provider = Some(user_provider.clone());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -413,19 +415,23 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
None,
|
||||
);
|
||||
log_versions(version!(), short_version!());
|
||||
log_versions(version(), short_version());
|
||||
|
||||
info!("Standalone start command: {:#?}", self);
|
||||
info!("Standalone options: {opts:#?}");
|
||||
|
||||
let mut plugins = Plugins::new();
|
||||
let opts = opts.component;
|
||||
let mut fe_opts = opts.frontend_options();
|
||||
#[allow(clippy::unnecessary_mut_passed)]
|
||||
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts) // mut ref is MUST, DO NOT change it
|
||||
let fe_opts = opts.frontend_options();
|
||||
let dn_opts = opts.datanode_options();
|
||||
|
||||
plugins::setup_frontend_plugins(&mut plugins, &fe_opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let dn_opts = opts.datanode_options();
|
||||
plugins::setup_datanode_plugins(&mut plugins, &dn_opts)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
set_default_timezone(fe_opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?;
|
||||
|
||||
@@ -464,7 +470,7 @@ impl StartCommand {
|
||||
let table_metadata_manager =
|
||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||
|
||||
let datanode = DatanodeBuilder::new(dn_opts, fe_plugins.clone())
|
||||
let datanode = DatanodeBuilder::new(dn_opts, plugins.clone())
|
||||
.with_kv_backend(kv_backend.clone())
|
||||
.build()
|
||||
.await
|
||||
@@ -472,7 +478,7 @@ impl StartCommand {
|
||||
|
||||
let flow_builder = FlownodeBuilder::new(
|
||||
Default::default(),
|
||||
fe_plugins.clone(),
|
||||
plugins.clone(),
|
||||
table_metadata_manager.clone(),
|
||||
catalog_manager.clone(),
|
||||
);
|
||||
@@ -533,7 +539,7 @@ impl StartCommand {
|
||||
node_manager.clone(),
|
||||
ddl_task_executor.clone(),
|
||||
)
|
||||
.with_plugin(fe_plugins.clone())
|
||||
.with_plugin(plugins.clone())
|
||||
.try_build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
@@ -554,7 +560,7 @@ impl StartCommand {
|
||||
|
||||
let (tx, _rx) = broadcast::channel(1);
|
||||
|
||||
let servers = Services::new(fe_opts, Arc::new(frontend.clone()), fe_plugins)
|
||||
let servers = Services::new(fe_opts, Arc::new(frontend.clone()), plugins)
|
||||
.build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
@@ -629,20 +635,21 @@ mod tests {
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{FileConfig, GcsConfig};
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
use crate::options::GlobalOptions;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_try_from_start_command_to_anymap() {
|
||||
let mut fe_opts = FrontendOptions {
|
||||
let fe_opts = FrontendOptions {
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
#[allow(clippy::unnecessary_mut_passed)]
|
||||
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_frontend_plugins(&mut plugins, &fe_opts)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let provider = plugins.get::<UserProviderRef>().unwrap();
|
||||
let result = provider
|
||||
@@ -727,7 +734,6 @@ mod tests {
|
||||
let fe_opts = options.frontend_options();
|
||||
let dn_opts = options.datanode_options();
|
||||
let logging_opts = options.logging;
|
||||
assert_eq!(Mode::Standalone, fe_opts.mode);
|
||||
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
|
||||
assert_eq!(Duration::from_secs(33), fe_opts.http.timeout);
|
||||
assert_eq!(ReadableSize::mb(128), fe_opts.http.body_limit);
|
||||
|
||||
@@ -24,8 +24,9 @@ use common_grpc::channel_manager::{
|
||||
use common_runtime::global::RuntimeOptions;
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||
use common_wal::config::{DatanodeWalConfig, StandaloneWalConfig};
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
|
||||
use file_engine::config::EngineConfig;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::service_config::datanode::DatanodeClientOptions;
|
||||
use meta_client::MetaClientOptions;
|
||||
@@ -45,9 +46,8 @@ fn test_load_datanode_example_config() {
|
||||
|
||||
let expected = GreptimeOptions::<DatanodeOptions> {
|
||||
runtime: RuntimeOptions {
|
||||
read_rt_size: 8,
|
||||
write_rt_size: 8,
|
||||
bg_rt_size: 4,
|
||||
global_rt_size: 8,
|
||||
compact_rt_size: 4,
|
||||
},
|
||||
component: DatanodeOptions {
|
||||
node_id: Some(42),
|
||||
@@ -71,18 +71,21 @@ fn test_load_datanode_example_config() {
|
||||
data_home: "/tmp/greptimedb/".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
region_engine: vec![RegionEngineConfig::Mito(MitoConfig {
|
||||
num_workers: 8,
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
scan_parallelism: 0,
|
||||
global_write_buffer_size: ReadableSize::gb(1),
|
||||
global_write_buffer_reject_size: ReadableSize::gb(2),
|
||||
sst_meta_cache_size: ReadableSize::mb(128),
|
||||
vector_cache_size: ReadableSize::mb(512),
|
||||
page_cache_size: ReadableSize::mb(512),
|
||||
max_background_jobs: 4,
|
||||
..Default::default()
|
||||
})],
|
||||
region_engine: vec![
|
||||
RegionEngineConfig::Mito(MitoConfig {
|
||||
num_workers: 8,
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
scan_parallelism: 0,
|
||||
global_write_buffer_size: ReadableSize::gb(1),
|
||||
global_write_buffer_reject_size: ReadableSize::gb(2),
|
||||
sst_meta_cache_size: ReadableSize::mb(128),
|
||||
vector_cache_size: ReadableSize::mb(512),
|
||||
page_cache_size: ReadableSize::mb(512),
|
||||
max_background_jobs: 4,
|
||||
..Default::default()
|
||||
}),
|
||||
RegionEngineConfig::File(EngineConfig {}),
|
||||
],
|
||||
logging: LoggingOptions {
|
||||
level: Some("info".to_string()),
|
||||
otlp_endpoint: Some("".to_string()),
|
||||
@@ -115,9 +118,8 @@ fn test_load_frontend_example_config() {
|
||||
.unwrap();
|
||||
let expected = GreptimeOptions::<FrontendOptions> {
|
||||
runtime: RuntimeOptions {
|
||||
read_rt_size: 8,
|
||||
write_rt_size: 8,
|
||||
bg_rt_size: 4,
|
||||
global_rt_size: 8,
|
||||
compact_rt_size: 4,
|
||||
},
|
||||
component: FrontendOptions {
|
||||
default_timezone: Some("UTC".to_string()),
|
||||
@@ -163,9 +165,8 @@ fn test_load_metasrv_example_config() {
|
||||
.unwrap();
|
||||
let expected = GreptimeOptions::<MetasrvOptions> {
|
||||
runtime: RuntimeOptions {
|
||||
read_rt_size: 8,
|
||||
write_rt_size: 8,
|
||||
bg_rt_size: 4,
|
||||
global_rt_size: 8,
|
||||
compact_rt_size: 4,
|
||||
},
|
||||
component: MetasrvOptions {
|
||||
selector: SelectorType::LeaseBased,
|
||||
@@ -196,29 +197,31 @@ fn test_load_standalone_example_config() {
|
||||
.unwrap();
|
||||
let expected = GreptimeOptions::<StandaloneOptions> {
|
||||
runtime: RuntimeOptions {
|
||||
read_rt_size: 8,
|
||||
write_rt_size: 8,
|
||||
bg_rt_size: 4,
|
||||
global_rt_size: 8,
|
||||
compact_rt_size: 4,
|
||||
},
|
||||
component: StandaloneOptions {
|
||||
default_timezone: Some("UTC".to_string()),
|
||||
wal: StandaloneWalConfig::RaftEngine(RaftEngineConfig {
|
||||
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
||||
dir: Some("/tmp/greptimedb/wal".to_string()),
|
||||
sync_period: Some(Duration::from_secs(10)),
|
||||
..Default::default()
|
||||
}),
|
||||
region_engine: vec![RegionEngineConfig::Mito(MitoConfig {
|
||||
num_workers: 8,
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
scan_parallelism: 0,
|
||||
global_write_buffer_size: ReadableSize::gb(1),
|
||||
global_write_buffer_reject_size: ReadableSize::gb(2),
|
||||
sst_meta_cache_size: ReadableSize::mb(128),
|
||||
vector_cache_size: ReadableSize::mb(512),
|
||||
page_cache_size: ReadableSize::mb(512),
|
||||
max_background_jobs: 4,
|
||||
..Default::default()
|
||||
})],
|
||||
region_engine: vec![
|
||||
RegionEngineConfig::Mito(MitoConfig {
|
||||
num_workers: 8,
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
scan_parallelism: 0,
|
||||
global_write_buffer_size: ReadableSize::gb(1),
|
||||
global_write_buffer_reject_size: ReadableSize::gb(2),
|
||||
sst_meta_cache_size: ReadableSize::mb(128),
|
||||
vector_cache_size: ReadableSize::mb(512),
|
||||
page_cache_size: ReadableSize::mb(512),
|
||||
max_background_jobs: 4,
|
||||
..Default::default()
|
||||
}),
|
||||
RegionEngineConfig::File(EngineConfig {}),
|
||||
],
|
||||
storage: StorageConfig {
|
||||
data_home: "/tmp/greptimedb/".to_string(),
|
||||
..Default::default()
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
pub const SYSTEM_CATALOG_NAME: &str = "system";
|
||||
pub const INFORMATION_SCHEMA_NAME: &str = "information_schema";
|
||||
pub const PG_CATALOG_NAME: &str = "pg_catalog";
|
||||
pub const SYSTEM_CATALOG_TABLE_NAME: &str = "system_catalog";
|
||||
pub const DEFAULT_CATALOG_NAME: &str = "greptime";
|
||||
pub const DEFAULT_SCHEMA_NAME: &str = "public";
|
||||
@@ -93,8 +94,18 @@ pub const INFORMATION_SCHEMA_REGION_PEERS_TABLE_ID: u32 = 29;
|
||||
pub const INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID: u32 = 30;
|
||||
/// id for information_schema.cluster_info
|
||||
pub const INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID: u32 = 31;
|
||||
/// id for information_schema.VIEWS
|
||||
pub const INFORMATION_SCHEMA_VIEW_TABLE_ID: u32 = 32;
|
||||
/// id for information_schema.FLOWS
|
||||
pub const INFORMATION_SCHEMA_FLOW_TABLE_ID: u32 = 33;
|
||||
/// ----- End of information_schema tables -----
|
||||
|
||||
/// ----- Begin of pg_catalog tables -----
|
||||
pub const PG_CATALOG_PG_CLASS_TABLE_ID: u32 = 256;
|
||||
pub const PG_CATALOG_PG_TYPE_TABLE_ID: u32 = 257;
|
||||
pub const PG_CATALOG_PG_NAMESPACE_TABLE_ID: u32 = 258;
|
||||
|
||||
/// ----- End of pg_catalog tables -----
|
||||
pub const MITO_ENGINE: &str = "mito";
|
||||
pub const MITO2_ENGINE: &str = "mito2";
|
||||
pub const METRIC_ENGINE: &str = "metric";
|
||||
|
||||
@@ -185,7 +185,7 @@ impl FileFormat for CsvFormat {
|
||||
let schema_infer_max_record = self.schema_infer_max_record;
|
||||
let has_header = self.has_header;
|
||||
|
||||
common_runtime::spawn_blocking_read(move || {
|
||||
common_runtime::spawn_blocking_global(move || {
|
||||
let reader = SyncIoBridge::new(decoded);
|
||||
|
||||
let (schema, _records_read) =
|
||||
|
||||
@@ -101,7 +101,7 @@ impl FileFormat for JsonFormat {
|
||||
|
||||
let schema_infer_max_record = self.schema_infer_max_record;
|
||||
|
||||
common_runtime::spawn_blocking_read(move || {
|
||||
common_runtime::spawn_blocking_global(move || {
|
||||
let mut reader = BufReader::new(SyncIoBridge::new(decoded));
|
||||
|
||||
let iter = ValueIter::new(&mut reader, schema_infer_max_record);
|
||||
|
||||
@@ -27,12 +27,14 @@ use datafusion::parquet::file::metadata::ParquetMetaData;
|
||||
use datafusion::parquet::format::FileMetaData;
|
||||
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::StreamExt;
|
||||
use object_store::{FuturesAsyncReader, ObjectStore};
|
||||
use parquet::arrow::AsyncArrowWriter;
|
||||
use parquet::basic::{Compression, ZstdLevel};
|
||||
use parquet::file::properties::WriterProperties;
|
||||
use parquet::basic::{Compression, Encoding, ZstdLevel};
|
||||
use parquet::file::properties::{WriterProperties, WriterPropertiesBuilder};
|
||||
use parquet::schema::types::ColumnPath;
|
||||
use snafu::ResultExt;
|
||||
use tokio_util::compat::{Compat, FuturesAsyncReadCompatExt, FuturesAsyncWriteCompatExt};
|
||||
|
||||
@@ -184,14 +186,16 @@ impl ArrowWriterCloser for ArrowWriter<SharedBuffer> {
|
||||
/// Returns number of rows written.
|
||||
pub async fn stream_to_parquet(
|
||||
mut stream: SendableRecordBatchStream,
|
||||
schema: datatypes::schema::SchemaRef,
|
||||
store: ObjectStore,
|
||||
path: &str,
|
||||
concurrency: usize,
|
||||
) -> Result<usize> {
|
||||
let write_props = WriterProperties::builder()
|
||||
.set_compression(Compression::ZSTD(ZstdLevel::default()))
|
||||
.build();
|
||||
let schema = stream.schema();
|
||||
let write_props = column_wise_config(
|
||||
WriterProperties::builder().set_compression(Compression::ZSTD(ZstdLevel::default())),
|
||||
schema,
|
||||
)
|
||||
.build();
|
||||
let inner_writer = store
|
||||
.writer_with(path)
|
||||
.concurrent(concurrency)
|
||||
@@ -200,7 +204,7 @@ pub async fn stream_to_parquet(
|
||||
.map(|w| w.into_futures_async_write().compat_write())
|
||||
.context(WriteObjectSnafu { path })?;
|
||||
|
||||
let mut writer = AsyncArrowWriter::try_new(inner_writer, schema, Some(write_props))
|
||||
let mut writer = AsyncArrowWriter::try_new(inner_writer, stream.schema(), Some(write_props))
|
||||
.context(WriteParquetSnafu { path })?;
|
||||
let mut rows_written = 0;
|
||||
|
||||
@@ -216,6 +220,24 @@ pub async fn stream_to_parquet(
|
||||
Ok(rows_written)
|
||||
}
|
||||
|
||||
/// Customizes per-column properties.
|
||||
fn column_wise_config(
|
||||
mut props: WriterPropertiesBuilder,
|
||||
schema: SchemaRef,
|
||||
) -> WriterPropertiesBuilder {
|
||||
// Disable dictionary for timestamp column, since for increasing timestamp column,
|
||||
// the dictionary pages will be larger than data pages.
|
||||
for col in schema.column_schemas() {
|
||||
if col.data_type.is_timestamp() {
|
||||
let path = ColumnPath::new(vec![col.name.clone()]);
|
||||
props = props
|
||||
.set_column_dictionary_enabled(path.clone(), false)
|
||||
.set_column_encoding(path, Encoding::DELTA_BINARY_PACKED)
|
||||
}
|
||||
}
|
||||
props
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_test_util::find_workspace_path;
|
||||
|
||||
@@ -31,7 +31,7 @@ pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
|
||||
.expect("input error level must be valid"),
|
||||
)
|
||||
.layer(object_store::layers::TracingLayer)
|
||||
.layer(object_store::layers::PrometheusMetricsLayer)
|
||||
.layer(object_store::layers::PrometheusMetricsLayer::new(true))
|
||||
.finish();
|
||||
Ok(object_store)
|
||||
}
|
||||
|
||||
@@ -94,7 +94,7 @@ pub fn build_s3_backend(
|
||||
.expect("input error level must be valid"),
|
||||
)
|
||||
.layer(object_store::layers::TracingLayer)
|
||||
.layer(object_store::layers::PrometheusMetricsLayer)
|
||||
.layer(object_store::layers::PrometheusMetricsLayer::new(true))
|
||||
.finish())
|
||||
}
|
||||
|
||||
|
||||
@@ -54,8 +54,8 @@ pub enum Error {
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::BigDecimalOutOfRange { .. } => StatusCode::Internal,
|
||||
Error::ParseRustDecimalStr { .. }
|
||||
Error::BigDecimalOutOfRange { .. }
|
||||
| Error::ParseRustDecimalStr { .. }
|
||||
| Error::InvalidPrecisionOrScale { .. }
|
||||
| Error::ParseBigDecimalStr { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
|
||||
@@ -53,6 +53,20 @@ pub trait ErrorExt: StackError {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Find out root level error for nested error
|
||||
fn root_cause(&self) -> Option<&dyn std::error::Error>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
let error = self.last();
|
||||
if let Some(external_error) = error.source() {
|
||||
let external_root = external_error.sources().last().unwrap();
|
||||
Some(external_root)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait StackError: std::error::Error {
|
||||
|
||||
@@ -36,6 +36,8 @@ pub enum StatusCode {
|
||||
InvalidArguments = 1004,
|
||||
/// The task is cancelled.
|
||||
Cancelled = 1005,
|
||||
/// Illegal state, can be exposed to users.
|
||||
IllegalState = 1006,
|
||||
// ====== End of common status code ================
|
||||
|
||||
// ====== Begin of SQL related status code =========
|
||||
@@ -53,24 +55,34 @@ pub enum StatusCode {
|
||||
// ====== Begin of catalog related status code =====
|
||||
/// Table already exists.
|
||||
TableAlreadyExists = 4000,
|
||||
/// Table not found.
|
||||
TableNotFound = 4001,
|
||||
/// Table column not found.
|
||||
TableColumnNotFound = 4002,
|
||||
/// Table column already exists.
|
||||
TableColumnExists = 4003,
|
||||
/// Database not found.
|
||||
DatabaseNotFound = 4004,
|
||||
/// Region not found.
|
||||
RegionNotFound = 4005,
|
||||
/// Region already exists.
|
||||
RegionAlreadyExists = 4006,
|
||||
/// Region is read-only in current state.
|
||||
RegionReadonly = 4007,
|
||||
/// Region is not in a proper state to handle specific request.
|
||||
RegionNotReady = 4008,
|
||||
// If mutually exclusive operations are reached at the same time,
|
||||
// only one can be executed, another one will get region busy.
|
||||
/// Region is temporarily in busy state.
|
||||
RegionBusy = 4009,
|
||||
/// Table is temporarily unable to handle the request.
|
||||
TableUnavailable = 4010,
|
||||
/// Database already exists.
|
||||
DatabaseAlreadyExists = 4011,
|
||||
// ====== End of catalog related status code =======
|
||||
|
||||
// ====== Begin of storage related status code =====
|
||||
/// Storage is temporarily unable to handle the request
|
||||
/// Storage is temporarily unable to handle the request.
|
||||
StorageUnavailable = 5000,
|
||||
/// Request is outdated, e.g., version mismatch
|
||||
/// Request is outdated, e.g., version mismatch.
|
||||
RequestOutdated = 5001,
|
||||
// ====== End of storage related status code =======
|
||||
|
||||
@@ -78,24 +90,24 @@ pub enum StatusCode {
|
||||
/// Runtime resources exhausted, like creating threads failed.
|
||||
RuntimeResourcesExhausted = 6000,
|
||||
|
||||
/// Rate limit exceeded
|
||||
/// Rate limit exceeded.
|
||||
RateLimited = 6001,
|
||||
// ====== End of server related status code =======
|
||||
|
||||
// ====== Begin of auth related status code =====
|
||||
/// User not exist
|
||||
/// User not exist.
|
||||
UserNotFound = 7000,
|
||||
/// Unsupported password type
|
||||
/// Unsupported password type.
|
||||
UnsupportedPasswordType = 7001,
|
||||
/// Username and password does not match
|
||||
/// Username and password does not match.
|
||||
UserPasswordMismatch = 7002,
|
||||
/// Not found http authorization header
|
||||
/// Not found http authorization header.
|
||||
AuthHeaderNotFound = 7003,
|
||||
/// Invalid http authorization header
|
||||
/// Invalid http authorization header.
|
||||
InvalidAuthHeader = 7004,
|
||||
/// Illegal request to connect catalog-schema
|
||||
/// Illegal request to connect catalog-schema.
|
||||
AccessDenied = 7005,
|
||||
/// User is not authorized to perform the operation
|
||||
/// User is not authorized to perform the operation.
|
||||
PermissionDenied = 7006,
|
||||
// ====== End of auth related status code =====
|
||||
|
||||
@@ -118,15 +130,18 @@ impl StatusCode {
|
||||
| StatusCode::RuntimeResourcesExhausted
|
||||
| StatusCode::Internal
|
||||
| StatusCode::RegionNotReady
|
||||
| StatusCode::TableUnavailable
|
||||
| StatusCode::RegionBusy => true,
|
||||
|
||||
StatusCode::Success
|
||||
| StatusCode::Unknown
|
||||
| StatusCode::Unsupported
|
||||
| StatusCode::IllegalState
|
||||
| StatusCode::Unexpected
|
||||
| StatusCode::InvalidArguments
|
||||
| StatusCode::Cancelled
|
||||
| StatusCode::InvalidSyntax
|
||||
| StatusCode::DatabaseAlreadyExists
|
||||
| StatusCode::PlanQuery
|
||||
| StatusCode::EngineExecuteQuery
|
||||
| StatusCode::TableAlreadyExists
|
||||
@@ -159,6 +174,7 @@ impl StatusCode {
|
||||
| StatusCode::Unexpected
|
||||
| StatusCode::Internal
|
||||
| StatusCode::Cancelled
|
||||
| StatusCode::IllegalState
|
||||
| StatusCode::EngineExecuteQuery
|
||||
| StatusCode::StorageUnavailable
|
||||
| StatusCode::RuntimeResourcesExhausted => true,
|
||||
@@ -181,6 +197,8 @@ impl StatusCode {
|
||||
| StatusCode::DatabaseNotFound
|
||||
| StatusCode::RateLimited
|
||||
| StatusCode::UserNotFound
|
||||
| StatusCode::TableUnavailable
|
||||
| StatusCode::DatabaseAlreadyExists
|
||||
| StatusCode::UnsupportedPasswordType
|
||||
| StatusCode::UserPasswordMismatch
|
||||
| StatusCode::AuthHeaderNotFound
|
||||
@@ -241,6 +259,7 @@ pub fn status_to_tonic_code(status_code: StatusCode) -> Code {
|
||||
StatusCode::Unknown => Code::Unknown,
|
||||
StatusCode::Unsupported => Code::Unimplemented,
|
||||
StatusCode::Unexpected
|
||||
| StatusCode::IllegalState
|
||||
| StatusCode::Internal
|
||||
| StatusCode::PlanQuery
|
||||
| StatusCode::EngineExecuteQuery => Code::Internal,
|
||||
@@ -251,6 +270,7 @@ pub fn status_to_tonic_code(status_code: StatusCode) -> Code {
|
||||
StatusCode::TableAlreadyExists
|
||||
| StatusCode::TableColumnExists
|
||||
| StatusCode::RegionAlreadyExists
|
||||
| StatusCode::DatabaseAlreadyExists
|
||||
| StatusCode::FlowAlreadyExists => Code::AlreadyExists,
|
||||
StatusCode::TableNotFound
|
||||
| StatusCode::RegionNotFound
|
||||
@@ -258,7 +278,9 @@ pub fn status_to_tonic_code(status_code: StatusCode) -> Code {
|
||||
| StatusCode::DatabaseNotFound
|
||||
| StatusCode::UserNotFound
|
||||
| StatusCode::FlowNotFound => Code::NotFound,
|
||||
StatusCode::StorageUnavailable | StatusCode::RegionNotReady => Code::Unavailable,
|
||||
StatusCode::TableUnavailable
|
||||
| StatusCode::StorageUnavailable
|
||||
| StatusCode::RegionNotReady => Code::Unavailable,
|
||||
StatusCode::RuntimeResourcesExhausted
|
||||
| StatusCode::RateLimited
|
||||
| StatusCode::RegionBusy => Code::ResourceExhausted,
|
||||
|
||||
@@ -31,6 +31,7 @@ serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
sql.workspace = true
|
||||
statrs = "0.16"
|
||||
store-api.workspace = true
|
||||
table.workspace = true
|
||||
|
||||
164
src/common/function/src/flush_flow.rs
Normal file
164
src/common/function/src/flush_flow.rs
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::{
|
||||
ExecuteSnafu, InvalidFuncArgsSnafu, MissingFlowServiceHandlerSnafu, Result,
|
||||
UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::Signature;
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::value::{Value, ValueRef};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use sql::parser::ParserContext;
|
||||
use store_api::storage::ConcreteDataType;
|
||||
|
||||
use crate::handlers::FlowServiceHandlerRef;
|
||||
|
||||
fn flush_signature() -> Signature {
|
||||
Signature::uniform(
|
||||
1,
|
||||
vec![ConcreteDataType::string_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
#[admin_fn(
|
||||
name = FlushFlowFunction,
|
||||
display_name = flush_flow,
|
||||
sig_fn = flush_signature,
|
||||
ret = uint64
|
||||
)]
|
||||
pub(crate) async fn flush_flow(
|
||||
flow_service_handler: &FlowServiceHandlerRef,
|
||||
query_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
let (catalog_name, flow_name) = parse_flush_flow(params, query_ctx)?;
|
||||
|
||||
let res = flow_service_handler
|
||||
.flush(&catalog_name, &flow_name, query_ctx.clone())
|
||||
.await?;
|
||||
let affected_rows = res.affected_rows;
|
||||
|
||||
Ok(Value::from(affected_rows))
|
||||
}
|
||||
|
||||
fn parse_flush_flow(
|
||||
params: &[ValueRef<'_>],
|
||||
query_ctx: &QueryContextRef,
|
||||
) -> Result<(String, String)> {
|
||||
ensure!(
|
||||
params.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 1, have: {}",
|
||||
params.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let ValueRef::String(flow_name) = params[0] else {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: "flush_flow",
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
let obj_name = ParserContext::parse_table_name(flow_name, query_ctx.sql_dialect())
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExecuteSnafu)?;
|
||||
|
||||
let (catalog_name, flow_name) = match &obj_name.0[..] {
|
||||
[flow_name] => (
|
||||
query_ctx.current_catalog().to_string(),
|
||||
flow_name.value.clone(),
|
||||
),
|
||||
[catalog, flow_name] => (catalog.value.clone(), flow_name.value.clone()),
|
||||
_ => {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"expect flow name to be <catalog>.<flow-name> or <flow-name>, actual: {}",
|
||||
obj_name
|
||||
),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
};
|
||||
Ok((catalog_name, flow_name))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::vectors::StringVector;
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
#[test]
|
||||
fn test_flush_flow_metadata() {
|
||||
let f = FlushFlowFunction;
|
||||
assert_eq!("flush_flow", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
f.signature(),
|
||||
Signature::uniform(
|
||||
1,
|
||||
vec![ConcreteDataType::string_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_missing_flow_service() {
|
||||
let f = FlushFlowFunction;
|
||||
|
||||
let args = vec!["flow_name"];
|
||||
let args = args
|
||||
.into_iter()
|
||||
.map(|arg| Arc::new(StringVector::from_slice(&[arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::default(), &args).unwrap_err();
|
||||
assert_eq!(
|
||||
"Missing FlowServiceHandler, not expected",
|
||||
result.to_string()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_flow_args() {
|
||||
let testcases = [
|
||||
("flow_name", ("greptime", "flow_name")),
|
||||
("catalog.flow_name", ("catalog", "flow_name")),
|
||||
];
|
||||
for (input, expected) in testcases.iter() {
|
||||
let args = vec![*input];
|
||||
let args = args.into_iter().map(ValueRef::String).collect::<Vec<_>>();
|
||||
|
||||
let result = parse_flush_flow(&args, &QueryContext::arc()).unwrap();
|
||||
assert_eq!(*expected, (result.0.as_str(), result.1.as_str()));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -65,6 +65,19 @@ pub trait ProcedureServiceHandler: Send + Sync {
|
||||
async fn query_procedure_state(&self, pid: &str) -> Result<ProcedureStateResponse>;
|
||||
}
|
||||
|
||||
/// This flow service handler is only use for flush flow for now.
|
||||
#[async_trait]
|
||||
pub trait FlowServiceHandler: Send + Sync {
|
||||
async fn flush(
|
||||
&self,
|
||||
catalog: &str,
|
||||
flow: &str,
|
||||
ctx: QueryContextRef,
|
||||
) -> Result<api::v1::flow::FlowResponse>;
|
||||
}
|
||||
|
||||
pub type TableMutationHandlerRef = Arc<dyn TableMutationHandler>;
|
||||
|
||||
pub type ProcedureServiceHandlerRef = Arc<dyn ProcedureServiceHandler>;
|
||||
|
||||
pub type FlowServiceHandlerRef = Arc<dyn FlowServiceHandler>;
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#![feature(let_chains)]
|
||||
#![feature(try_blocks)]
|
||||
|
||||
mod flush_flow;
|
||||
mod macros;
|
||||
pub mod scalars;
|
||||
mod system;
|
||||
|
||||
@@ -19,8 +19,6 @@ use snafu::ResultExt;
|
||||
|
||||
use crate::scalars::expression::ctx::EvalContext;
|
||||
|
||||
/// TODO: remove the allow_unused when it's used.
|
||||
#[allow(unused)]
|
||||
pub fn scalar_unary_op<L: Scalar, O: Scalar, F>(
|
||||
l: &VectorRef,
|
||||
f: F,
|
||||
|
||||
@@ -14,11 +14,9 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
mod greatest;
|
||||
mod to_timezone;
|
||||
mod to_unixtime;
|
||||
|
||||
use greatest::GreatestFunction;
|
||||
use to_timezone::ToTimezoneFunction;
|
||||
use to_unixtime::ToUnixtimeFunction;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
@@ -27,7 +25,6 @@ pub(crate) struct TimestampFunction;
|
||||
|
||||
impl TimestampFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(ToTimezoneFunction));
|
||||
registry.register(Arc::new(ToUnixtimeFunction));
|
||||
registry.register(Arc::new(GreatestFunction));
|
||||
}
|
||||
|
||||
@@ -1,313 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::prelude::Signature;
|
||||
use common_time::{Timestamp, Timezone};
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::prelude::VectorRef;
|
||||
use datatypes::types::TimestampType;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
Int64Vector, StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
TimestampNanosecondVector, TimestampSecondVector, Vector,
|
||||
};
|
||||
use snafu::{ensure, OptionExt};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ToTimezoneFunction;
|
||||
|
||||
const NAME: &str = "to_timezone";
|
||||
|
||||
fn convert_to_timezone(arg: &str) -> Option<Timezone> {
|
||||
Timezone::from_tz_string(arg).ok()
|
||||
}
|
||||
|
||||
fn convert_to_timestamp(arg: &Value) -> Option<Timestamp> {
|
||||
match arg {
|
||||
Value::Timestamp(ts) => Some(*ts),
|
||||
Value::Int64(i) => Some(Timestamp::new_millisecond(*i)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ToTimezoneFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "TO_TIMEZONE")
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for ToTimezoneFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
// type checked by signature - MUST BE timestamp
|
||||
Ok(input_types[0].clone())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
],
|
||||
vec![ConcreteDataType::string_datatype()],
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly 2, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let array = columns[0].to_arrow_array();
|
||||
let times = match columns[0].data_type() {
|
||||
ConcreteDataType::Int64(_) | ConcreteDataType::Int32(_) => {
|
||||
let vector = Int64Vector::try_from_arrow_array(array).unwrap();
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timestamp(&vector.get(i)))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
ConcreteDataType::Timestamp(ts) => match ts {
|
||||
TimestampType::Second(_) => {
|
||||
let vector = TimestampSecondVector::try_from_arrow_array(array).unwrap();
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timestamp(&vector.get(i)))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
TimestampType::Millisecond(_) => {
|
||||
let vector = TimestampMillisecondVector::try_from_arrow_array(array).unwrap();
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timestamp(&vector.get(i)))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
TimestampType::Microsecond(_) => {
|
||||
let vector = TimestampMicrosecondVector::try_from_arrow_array(array).unwrap();
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timestamp(&vector.get(i)))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
TimestampType::Nanosecond(_) => {
|
||||
let vector = TimestampNanosecondVector::try_from_arrow_array(array).unwrap();
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timestamp(&vector.get(i)))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
},
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail()?,
|
||||
};
|
||||
|
||||
let tzs = {
|
||||
let array = columns[1].to_arrow_array();
|
||||
let vector = StringVector::try_from_arrow_array(&array)
|
||||
.ok()
|
||||
.with_context(|| UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
})?;
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timezone(&vector.get(i).to_string()))
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
let result = times
|
||||
.iter()
|
||||
.zip(tzs.iter())
|
||||
.map(|(time, tz)| match (time, tz) {
|
||||
(Some(time), _) => Some(time.to_timezone_aware_string(tz.as_ref())),
|
||||
_ => None,
|
||||
})
|
||||
.collect::<Vec<Option<String>>>();
|
||||
Ok(Arc::new(StringVector::from(result)))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::timestamp::{
|
||||
TimestampMicrosecond, TimestampMillisecond, TimestampNanosecond, TimestampSecond,
|
||||
};
|
||||
use datatypes::vectors::{Int64Vector, StringVector};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_to_timezone() {
|
||||
let f = ToTimezoneFunction;
|
||||
assert_eq!("to_timezone", f.name());
|
||||
|
||||
let results = vec![
|
||||
Some("1969-12-31 19:00:01"),
|
||||
None,
|
||||
Some("1970-01-01 03:00:01"),
|
||||
None,
|
||||
];
|
||||
let times: Vec<Option<TimestampSecond>> = vec![
|
||||
Some(TimestampSecond::new(1)),
|
||||
None,
|
||||
Some(TimestampSecond::new(1)),
|
||||
None,
|
||||
];
|
||||
let ts_vector: TimestampSecondVector =
|
||||
TimestampSecondVector::from_owned_iterator(times.into_iter());
|
||||
let tzs = vec![Some("America/New_York"), None, Some("Europe/Moscow"), None];
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ts_vector),
|
||||
Arc::new(StringVector::from(tzs.clone())),
|
||||
];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
let expect_times: VectorRef = Arc::new(StringVector::from(results));
|
||||
assert_eq!(expect_times, vector);
|
||||
|
||||
let results = vec![
|
||||
Some("1969-12-31 19:00:00.001"),
|
||||
None,
|
||||
Some("1970-01-01 03:00:00.001"),
|
||||
None,
|
||||
];
|
||||
let times: Vec<Option<TimestampMillisecond>> = vec![
|
||||
Some(TimestampMillisecond::new(1)),
|
||||
None,
|
||||
Some(TimestampMillisecond::new(1)),
|
||||
None,
|
||||
];
|
||||
let ts_vector: TimestampMillisecondVector =
|
||||
TimestampMillisecondVector::from_owned_iterator(times.into_iter());
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ts_vector),
|
||||
Arc::new(StringVector::from(tzs.clone())),
|
||||
];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
let expect_times: VectorRef = Arc::new(StringVector::from(results));
|
||||
assert_eq!(expect_times, vector);
|
||||
|
||||
let results = vec![
|
||||
Some("1969-12-31 19:00:00.000001"),
|
||||
None,
|
||||
Some("1970-01-01 03:00:00.000001"),
|
||||
None,
|
||||
];
|
||||
let times: Vec<Option<TimestampMicrosecond>> = vec![
|
||||
Some(TimestampMicrosecond::new(1)),
|
||||
None,
|
||||
Some(TimestampMicrosecond::new(1)),
|
||||
None,
|
||||
];
|
||||
let ts_vector: TimestampMicrosecondVector =
|
||||
TimestampMicrosecondVector::from_owned_iterator(times.into_iter());
|
||||
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ts_vector),
|
||||
Arc::new(StringVector::from(tzs.clone())),
|
||||
];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
let expect_times: VectorRef = Arc::new(StringVector::from(results));
|
||||
assert_eq!(expect_times, vector);
|
||||
|
||||
let results = vec![
|
||||
Some("1969-12-31 19:00:00.000000001"),
|
||||
None,
|
||||
Some("1970-01-01 03:00:00.000000001"),
|
||||
None,
|
||||
];
|
||||
let times: Vec<Option<TimestampNanosecond>> = vec![
|
||||
Some(TimestampNanosecond::new(1)),
|
||||
None,
|
||||
Some(TimestampNanosecond::new(1)),
|
||||
None,
|
||||
];
|
||||
let ts_vector: TimestampNanosecondVector =
|
||||
TimestampNanosecondVector::from_owned_iterator(times.into_iter());
|
||||
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ts_vector),
|
||||
Arc::new(StringVector::from(tzs.clone())),
|
||||
];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
let expect_times: VectorRef = Arc::new(StringVector::from(results));
|
||||
assert_eq!(expect_times, vector);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_numerical_to_timezone() {
|
||||
let f = ToTimezoneFunction;
|
||||
let results = vec![
|
||||
Some("1969-12-31 19:00:00.001"),
|
||||
None,
|
||||
Some("1970-01-01 03:00:00.001"),
|
||||
None,
|
||||
Some("2024-03-26 23:01:50"),
|
||||
None,
|
||||
Some("2024-03-27 06:02:00"),
|
||||
None,
|
||||
];
|
||||
let times: Vec<Option<i64>> = vec![
|
||||
Some(1),
|
||||
None,
|
||||
Some(1),
|
||||
None,
|
||||
Some(1711508510000),
|
||||
None,
|
||||
Some(1711508520000),
|
||||
None,
|
||||
];
|
||||
let ts_vector: Int64Vector = Int64Vector::from_owned_iterator(times.into_iter());
|
||||
let tzs = vec![
|
||||
Some("America/New_York"),
|
||||
None,
|
||||
Some("Europe/Moscow"),
|
||||
None,
|
||||
Some("America/New_York"),
|
||||
None,
|
||||
Some("Europe/Moscow"),
|
||||
None,
|
||||
];
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ts_vector),
|
||||
Arc::new(StringVector::from(tzs.clone())),
|
||||
];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(8, vector.len());
|
||||
let expect_times: VectorRef = Arc::new(StringVector::from(results));
|
||||
assert_eq!(expect_times, vector);
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::handlers::{ProcedureServiceHandlerRef, TableMutationHandlerRef};
|
||||
use crate::handlers::{FlowServiceHandlerRef, ProcedureServiceHandlerRef, TableMutationHandlerRef};
|
||||
|
||||
/// Shared state for SQL functions.
|
||||
/// The handlers in state may be `None` in cli command-line or test cases.
|
||||
@@ -22,6 +22,8 @@ pub struct FunctionState {
|
||||
pub table_mutation_handler: Option<TableMutationHandlerRef>,
|
||||
// The procedure service handler
|
||||
pub procedure_service_handler: Option<ProcedureServiceHandlerRef>,
|
||||
// The flownode handler
|
||||
pub flow_service_handler: Option<FlowServiceHandlerRef>,
|
||||
}
|
||||
|
||||
impl FunctionState {
|
||||
@@ -42,9 +44,10 @@ impl FunctionState {
|
||||
CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest,
|
||||
};
|
||||
|
||||
use crate::handlers::{ProcedureServiceHandler, TableMutationHandler};
|
||||
use crate::handlers::{FlowServiceHandler, ProcedureServiceHandler, TableMutationHandler};
|
||||
struct MockProcedureServiceHandler;
|
||||
struct MockTableMutationHandler;
|
||||
struct MockFlowServiceHandler;
|
||||
const ROWS: usize = 42;
|
||||
|
||||
#[async_trait]
|
||||
@@ -116,9 +119,22 @@ impl FunctionState {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl FlowServiceHandler for MockFlowServiceHandler {
|
||||
async fn flush(
|
||||
&self,
|
||||
_catalog: &str,
|
||||
_flow: &str,
|
||||
_ctx: QueryContextRef,
|
||||
) -> Result<api::v1::flow::FlowResponse> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
Self {
|
||||
table_mutation_handler: Some(Arc::new(MockTableMutationHandler)),
|
||||
procedure_service_handler: Some(Arc::new(MockProcedureServiceHandler)),
|
||||
flow_service_handler: Some(Arc::new(MockFlowServiceHandler)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
mod build;
|
||||
mod database;
|
||||
mod pg_catalog;
|
||||
mod procedure_state;
|
||||
mod timezone;
|
||||
mod version;
|
||||
@@ -22,6 +23,7 @@ use std::sync::Arc;
|
||||
|
||||
use build::BuildFunction;
|
||||
use database::DatabaseFunction;
|
||||
use pg_catalog::PGCatalogFunction;
|
||||
use procedure_state::ProcedureStateFunction;
|
||||
use timezone::TimezoneFunction;
|
||||
use version::VersionFunction;
|
||||
@@ -37,5 +39,6 @@ impl SystemFunction {
|
||||
registry.register(Arc::new(DatabaseFunction));
|
||||
registry.register(Arc::new(TimezoneFunction));
|
||||
registry.register(Arc::new(ProcedureStateFunction));
|
||||
PGCatalogFunction::register(registry);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,22 +12,28 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod pg_get_userbyid;
|
||||
mod table_is_visible;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use datafusion::physical_plan::ExecutionPlan;
|
||||
use pg_get_userbyid::PGGetUserByIdFunction;
|
||||
use table_is_visible::PGTableIsVisibleFunction;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::plan::LogicalPlan;
|
||||
use crate::query_engine::QueryEngineContext;
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
/// Physical query planner that converts a `LogicalPlan` to an
|
||||
/// `ExecutionPlan` suitable for execution.
|
||||
#[async_trait::async_trait]
|
||||
pub trait PhysicalPlanner {
|
||||
/// Create a physical plan from a logical plan
|
||||
async fn create_physical_plan(
|
||||
&self,
|
||||
ctx: &mut QueryEngineContext,
|
||||
logical_plan: &LogicalPlan,
|
||||
) -> Result<Arc<dyn ExecutionPlan>>;
|
||||
#[macro_export]
|
||||
macro_rules! pg_catalog_func_fullname {
|
||||
($name:literal) => {
|
||||
concat!("pg_catalog.", $name)
|
||||
};
|
||||
}
|
||||
|
||||
pub(super) struct PGCatalogFunction;
|
||||
|
||||
impl PGCatalogFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(PGTableIsVisibleFunction));
|
||||
registry.register(Arc::new(PGGetUserByIdFunction));
|
||||
}
|
||||
}
|
||||
72
src/common/function/src/system/pg_catalog/pg_get_userbyid.rs
Normal file
72
src/common/function/src/system/pg_catalog/pg_get_userbyid.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{self};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Result;
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::prelude::{ConcreteDataType, DataType, VectorRef};
|
||||
use datatypes::types::LogicalPrimitiveType;
|
||||
use datatypes::with_match_primitive_type_id;
|
||||
use num_traits::AsPrimitive;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::expression::{scalar_unary_op, EvalContext};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct PGGetUserByIdFunction;
|
||||
|
||||
const NAME: &str = crate::pg_catalog_func_fullname!("pg_get_userbyid");
|
||||
|
||||
impl fmt::Display for PGGetUserByIdFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, crate::pg_catalog_func_fullname!("PG_GET_USERBYID"))
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for PGGetUserByIdFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::uniform(
|
||||
1,
|
||||
vec![ConcreteDataType::uint32_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
with_match_primitive_type_id!(columns[0].data_type().logical_type_id(), |$T| {
|
||||
let col = scalar_unary_op::<<$T as LogicalPrimitiveType>::Native, String, _>(&columns[0], pg_get_user_by_id, &mut EvalContext::default())?;
|
||||
Ok(Arc::new(col))
|
||||
}, {
|
||||
unreachable!()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn pg_get_user_by_id<I>(table_oid: Option<I>, _ctx: &mut EvalContext) -> Option<String>
|
||||
where
|
||||
I: AsPrimitive<u32>,
|
||||
{
|
||||
// TODO(J0HN50N133): We lack way to get the user_info by a numeric value. Once we have it, we can implement this function.
|
||||
table_oid.map(|_| "".to_string())
|
||||
}
|
||||
@@ -0,0 +1,72 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{self};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Result;
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::prelude::{ConcreteDataType, DataType, VectorRef};
|
||||
use datatypes::types::LogicalPrimitiveType;
|
||||
use datatypes::with_match_primitive_type_id;
|
||||
use num_traits::AsPrimitive;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::expression::{scalar_unary_op, EvalContext};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct PGTableIsVisibleFunction;
|
||||
|
||||
const NAME: &str = crate::pg_catalog_func_fullname!("pg_table_is_visible");
|
||||
|
||||
impl fmt::Display for PGTableIsVisibleFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, crate::pg_catalog_func_fullname!("PG_TABLE_IS_VISIBLE"))
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for PGTableIsVisibleFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::boolean_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::uniform(
|
||||
1,
|
||||
vec![ConcreteDataType::uint32_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
with_match_primitive_type_id!(columns[0].data_type().logical_type_id(), |$T| {
|
||||
let col = scalar_unary_op::<<$T as LogicalPrimitiveType>::Native, bool, _>(&columns[0], pg_table_is_visible, &mut EvalContext::default())?;
|
||||
Ok(Arc::new(col))
|
||||
}, {
|
||||
unreachable!()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn pg_table_is_visible<I>(table_oid: Option<I>, _ctx: &mut EvalContext) -> Option<bool>
|
||||
where
|
||||
I: AsPrimitive<u32>,
|
||||
{
|
||||
// There is no table visibility in greptime, so we always return true
|
||||
table_oid.map(|_| true)
|
||||
}
|
||||
@@ -12,26 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use api::v1::meta::ProcedureStatus;
|
||||
use common_macro::admin_fn;
|
||||
use common_meta::rpc::procedure::ProcedureStateResponse;
|
||||
use common_query::error::Error::ThreadJoin;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingProcedureServiceHandlerSnafu, Result,
|
||||
UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use common_telemetry::error;
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use serde::Serialize;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, Location, OptionExt};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::ensure_greptime;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::handlers::ProcedureServiceHandlerRef;
|
||||
|
||||
#[derive(Serialize)]
|
||||
@@ -103,6 +96,7 @@ mod tests {
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
#[test]
|
||||
fn test_procedure_state_misc() {
|
||||
|
||||
@@ -22,6 +22,7 @@ use flush_compact_region::{CompactRegionFunction, FlushRegionFunction};
|
||||
use flush_compact_table::{CompactTableFunction, FlushTableFunction};
|
||||
use migrate_region::MigrateRegionFunction;
|
||||
|
||||
use crate::flush_flow::FlushFlowFunction;
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
/// Table functions
|
||||
@@ -35,5 +36,6 @@ impl TableFunction {
|
||||
registry.register(Arc::new(CompactRegionFunction));
|
||||
registry.register(Arc::new(FlushTableFunction));
|
||||
registry.register(Arc::new(CompactTableFunction));
|
||||
registry.register(Arc::new(FlushFlowFunction));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,23 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::Error::ThreadJoin;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingTableMutationHandlerSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use common_telemetry::error;
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, Location, OptionExt};
|
||||
use snafu::ensure;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::ensure_greptime;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::handlers::TableMutationHandlerRef;
|
||||
use crate::helper::cast_u64;
|
||||
|
||||
@@ -84,6 +77,7 @@ mod tests {
|
||||
use datatypes::vectors::UInt64Vector;
|
||||
|
||||
use super::*;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
macro_rules! define_region_function_test {
|
||||
($name: ident, $func: ident) => {
|
||||
|
||||
@@ -12,28 +12,23 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::str::FromStr;
|
||||
|
||||
use api::v1::region::{compact_request, StrictWindow};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::Error::ThreadJoin;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingTableMutationHandlerSnafu, Result, TableMutationSnafu,
|
||||
UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use common_telemetry::{error, info};
|
||||
use common_telemetry::info;
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use session::context::QueryContextRef;
|
||||
use session::table_name::table_name_to_full_name;
|
||||
use snafu::{ensure, Location, OptionExt, ResultExt};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use table::requests::{CompactTableRequest, FlushTableRequest};
|
||||
|
||||
use crate::ensure_greptime;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::handlers::TableMutationHandlerRef;
|
||||
|
||||
/// Compact type: strict window.
|
||||
@@ -209,6 +204,7 @@ mod tests {
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
macro_rules! define_table_function_test {
|
||||
($name: ident, $func: ident) => {
|
||||
|
||||
@@ -12,24 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{self};
|
||||
use std::time::Duration;
|
||||
|
||||
use common_macro::admin_fn;
|
||||
use common_meta::rpc::procedure::MigrateRegionRequest;
|
||||
use common_query::error::Error::ThreadJoin;
|
||||
use common_query::error::{InvalidFuncArgsSnafu, MissingProcedureServiceHandlerSnafu, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use common_telemetry::error;
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::{Value, ValueRef};
|
||||
use datatypes::vectors::VectorRef;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{Location, OptionExt};
|
||||
|
||||
use crate::ensure_greptime;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::handlers::ProcedureServiceHandlerRef;
|
||||
use crate::helper::cast_u64;
|
||||
|
||||
@@ -128,9 +120,10 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::vectors::{StringVector, UInt64Vector};
|
||||
use datatypes::vectors::{StringVector, UInt64Vector, VectorRef};
|
||||
|
||||
use super::*;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
#[test]
|
||||
fn test_migrate_region_misc() {
|
||||
|
||||
@@ -11,6 +11,7 @@ workspace = true
|
||||
async-trait.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-version.workspace = true
|
||||
reqwest.workspace = true
|
||||
serde.workspace = true
|
||||
tokio.workspace = true
|
||||
@@ -20,6 +21,3 @@ uuid.workspace = true
|
||||
common-test-util.workspace = true
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
tempfile.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
common-version.workspace = true
|
||||
|
||||
@@ -22,6 +22,7 @@ use std::time::Duration;
|
||||
use common_runtime::error::{Error, Result};
|
||||
use common_runtime::{BoxedTaskFunction, RepeatedTask, TaskFunction};
|
||||
use common_telemetry::{debug, info};
|
||||
use common_version::build_info;
|
||||
use reqwest::{Client, Response};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -71,7 +72,7 @@ impl GreptimeDBTelemetryTask {
|
||||
match self {
|
||||
GreptimeDBTelemetryTask::Enable((task, _)) => {
|
||||
print_anonymous_usage_data_disclaimer();
|
||||
task.start(common_runtime::bg_runtime())
|
||||
task.start(common_runtime::global_runtime())
|
||||
}
|
||||
GreptimeDBTelemetryTask::Disable => Ok(()),
|
||||
}
|
||||
@@ -114,11 +115,11 @@ pub enum Mode {
|
||||
#[async_trait::async_trait]
|
||||
pub trait Collector {
|
||||
fn get_version(&self) -> String {
|
||||
env!("CARGO_PKG_VERSION").to_string()
|
||||
build_info().version.to_string()
|
||||
}
|
||||
|
||||
fn get_git_hash(&self) -> String {
|
||||
env!("GIT_COMMIT").to_string()
|
||||
build_info().commit.to_string()
|
||||
}
|
||||
|
||||
fn get_os(&self) -> String {
|
||||
@@ -286,6 +287,7 @@ mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use common_test_util::ports;
|
||||
use common_version::build_info;
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
use hyper::Server;
|
||||
use reqwest::{Client, Response};
|
||||
@@ -431,8 +433,8 @@ mod tests {
|
||||
let body = response.json::<StatisticData>().await.unwrap();
|
||||
assert_eq!(env::consts::ARCH, body.arch);
|
||||
assert_eq!(env::consts::OS, body.os);
|
||||
assert_eq!(env!("CARGO_PKG_VERSION"), body.version);
|
||||
assert_eq!(env!("GIT_COMMIT"), body.git_commit);
|
||||
assert_eq!(build_info().version, body.version);
|
||||
assert_eq!(build_info().commit, body.git_commit);
|
||||
assert_eq!(Mode::Standalone, body.mode);
|
||||
assert_eq!(1, body.nodes.unwrap());
|
||||
|
||||
|
||||
@@ -225,7 +225,7 @@ impl ChannelManager {
|
||||
}
|
||||
|
||||
let pool = self.pool.clone();
|
||||
let _handle = common_runtime::spawn_bg(async {
|
||||
let _handle = common_runtime::spawn_global(async {
|
||||
recycle_channel_in_loop(pool, RECYCLE_CHANNEL_INTERVAL_SECS).await;
|
||||
});
|
||||
info!(
|
||||
|
||||
@@ -153,6 +153,7 @@ fn build_struct(
|
||||
let ret = Ident::new(&format!("{ret}_datatype"), ret.span());
|
||||
let uppcase_display_name = display_name.to_uppercase();
|
||||
// Get the handler name in function state by the argument ident
|
||||
// TODO(discord9): consider simple depend injection if more handlers are needed
|
||||
let (handler, snafu_type) = match handler_type.to_string().as_str() {
|
||||
"ProcedureServiceHandlerRef" => (
|
||||
Ident::new("procedure_service_handler", handler_type.span()),
|
||||
@@ -163,6 +164,11 @@ fn build_struct(
|
||||
Ident::new("table_mutation_handler", handler_type.span()),
|
||||
Ident::new("MissingTableMutationHandlerSnafu", handler_type.span()),
|
||||
),
|
||||
|
||||
"FlowServiceHandlerRef" => (
|
||||
Ident::new("flow_service_handler", handler_type.span()),
|
||||
Ident::new("MissingFlowServiceHandlerSnafu", handler_type.span()),
|
||||
),
|
||||
handler => ok!(error!(
|
||||
handler_type.span(),
|
||||
format!("Unknown handler type: {handler}")
|
||||
@@ -174,29 +180,29 @@ fn build_struct(
|
||||
#[derive(Debug)]
|
||||
#vis struct #name;
|
||||
|
||||
impl fmt::Display for #name {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
impl std::fmt::Display for #name {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(f, #uppcase_display_name)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl Function for #name {
|
||||
impl crate::function::Function for #name {
|
||||
fn name(&self) -> &'static str {
|
||||
#display_name
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::#ret())
|
||||
fn return_type(&self, _input_types: &[store_api::storage::ConcreteDataType]) -> common_query::error::Result<store_api::storage::ConcreteDataType> {
|
||||
Ok(store_api::storage::ConcreteDataType::#ret())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
#sig_fn()
|
||||
}
|
||||
|
||||
fn eval(&self, func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
fn eval(&self, func_ctx: crate::function::FunctionContext, columns: &[datatypes::vectors::VectorRef]) -> common_query::error::Result<datatypes::vectors::VectorRef> {
|
||||
// Ensure under the `greptime` catalog for security
|
||||
ensure_greptime!(func_ctx);
|
||||
crate::ensure_greptime!(func_ctx);
|
||||
|
||||
let columns_num = columns.len();
|
||||
let rows_num = if columns.is_empty() {
|
||||
@@ -208,6 +214,9 @@ fn build_struct(
|
||||
|
||||
// TODO(dennis): DataFusion doesn't support async UDF currently
|
||||
std::thread::spawn(move || {
|
||||
use snafu::OptionExt;
|
||||
use datatypes::data_type::DataType;
|
||||
|
||||
let query_ctx = &func_ctx.query_ctx;
|
||||
let handler = func_ctx
|
||||
.state
|
||||
@@ -215,11 +224,11 @@ fn build_struct(
|
||||
.as_ref()
|
||||
.context(#snafu_type)?;
|
||||
|
||||
let mut builder = ConcreteDataType::#ret()
|
||||
let mut builder = store_api::storage::ConcreteDataType::#ret()
|
||||
.create_mutable_vector(rows_num);
|
||||
|
||||
if columns_num == 0 {
|
||||
let result = common_runtime::block_on_read(async move {
|
||||
let result = common_runtime::block_on_global(async move {
|
||||
#fn_name(handler, query_ctx, &[]).await
|
||||
})?;
|
||||
|
||||
@@ -230,7 +239,7 @@ fn build_struct(
|
||||
.map(|vector| vector.get_ref(i))
|
||||
.collect();
|
||||
|
||||
let result = common_runtime::block_on_read(async move {
|
||||
let result = common_runtime::block_on_global(async move {
|
||||
#fn_name(handler, query_ctx, &args).await
|
||||
})?;
|
||||
|
||||
@@ -242,9 +251,9 @@ fn build_struct(
|
||||
})
|
||||
.join()
|
||||
.map_err(|e| {
|
||||
error!(e; "Join thread error");
|
||||
ThreadJoin {
|
||||
location: Location::default(),
|
||||
common_telemetry::error!(e; "Join thread error");
|
||||
common_query::error::Error::ThreadJoin {
|
||||
location: snafu::Location::default(),
|
||||
}
|
||||
})?
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user