mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-04 12:22:55 +00:00
Compare commits
15 Commits
v0.1.0-alp
...
v0.1.0-alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4085fc7899 | ||
|
|
30940e692a | ||
|
|
b371ce0f48 | ||
|
|
ac7f52d303 | ||
|
|
051768b735 | ||
|
|
c5b0d2431f | ||
|
|
4038dd4067 | ||
|
|
8be0f05570 | ||
|
|
69f06eec8b | ||
|
|
7b37e99a45 | ||
|
|
c09775d17f | ||
|
|
4a9cf49637 | ||
|
|
9f865b50ab | ||
|
|
b407ebf6bb | ||
|
|
c144a1b20e |
7
.github/workflows/develop.yml
vendored
7
.github/workflows/develop.yml
vendored
@@ -26,6 +26,13 @@ env:
|
||||
RUST_TOOLCHAIN: nightly-2022-07-14
|
||||
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: crate-ci/typos@v1.0.4
|
||||
|
||||
check:
|
||||
name: Check
|
||||
if: github.event.pull_request.draft == false
|
||||
|
||||
32
.github/workflows/release.yml
vendored
32
.github/workflows/release.yml
vendored
@@ -3,9 +3,8 @@ on:
|
||||
tags:
|
||||
- "v*.*.*"
|
||||
schedule:
|
||||
# At 00:00 Everyday
|
||||
# https://crontab.guru/every-day-at-midnight
|
||||
- cron: '0 0 * * *'
|
||||
# At 00:00 on Monday.
|
||||
- cron: '0 0 * * 1'
|
||||
workflow_dispatch:
|
||||
|
||||
name: Release
|
||||
@@ -14,7 +13,10 @@ env:
|
||||
RUST_TOOLCHAIN: nightly-2022-07-14
|
||||
|
||||
# FIXME(zyy17): Would be better to use `gh release list -L 1 | cut -f 3` to get the latest release version tag, but for a long time, we will stay at 'v0.1.0-alpha-*'.
|
||||
NIGHTLY_BUILD_VERSION_PREFIX: v0.1.0-alpha
|
||||
SCHEDULED_BUILD_VERSION_PREFIX: v0.1.0-alpha
|
||||
|
||||
# In the future, we can change SCHEDULED_PERIOD to nightly.
|
||||
SCHEDULED_PERIOD: weekly
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -113,25 +115,25 @@ jobs:
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
|
||||
- name: Configure nightly build version # the version would be ${NIGHTLY_BUILD_VERSION_PREFIX}-YYYYMMDD-nightly, like v0.1.0-alpha-20221119-nightly.
|
||||
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}, like v0.1.0-alpha-20221119-weekly.
|
||||
shell: bash
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
NIGHTLY_VERSION=${{ env.NIGHTLY_BUILD_VERSION_PREFIX }}-$buildTime-nightly
|
||||
echo "NIGHTLY_VERSION=${NIGHTLY_VERSION}" >> $GITHUB_ENV
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
echo "SCHEDULED_BUILD_VERSION=${SCHEDULED_BUILD_VERSION}" >> $GITHUB_ENV
|
||||
|
||||
- name: Create nightly git tag
|
||||
- name: Create scheduled build git tag
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
git tag ${{ env.NIGHTLY_VERSION }}
|
||||
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
|
||||
|
||||
- name: Publish nightly release # configure the different release title and tags.
|
||||
- name: Publish scheduled release # configure the different release title and tags.
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: github.event_name == 'schedule'
|
||||
with:
|
||||
name: "Release ${{ env.NIGHTLY_VERSION }}"
|
||||
tag_name: ${{ env.NIGHTLY_VERSION }}
|
||||
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
|
||||
tag_name: ${{ env.SCHEDULED_BUILD_VERSION }}
|
||||
generate_release_notes: true
|
||||
files: |
|
||||
**/greptime-*
|
||||
@@ -189,13 +191,13 @@ jobs:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Configure nightly build image tag # the tag would be ${NIGHTLY_BUILD_VERSION_PREFIX}-YYYYMMDD-nightly
|
||||
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
shell: bash
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
NIGHTLY_VERSION=${{ env.NIGHTLY_BUILD_VERSION_PREFIX }}-$buildTime-nightly
|
||||
echo "IMAGE_TAG=${NIGHTLY_VERSION:1}" >> $GITHUB_ENV
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
|
||||
shell: bash
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -18,6 +18,7 @@ debug/
|
||||
|
||||
# JetBrains IDE config directory
|
||||
.idea/
|
||||
*.iml
|
||||
|
||||
# VSCode IDE config directory
|
||||
.vscode/
|
||||
|
||||
285
Cargo.lock
generated
285
Cargo.lock
generated
@@ -35,7 +35,7 @@ version = "0.7.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
"getrandom 0.2.7",
|
||||
"once_cell",
|
||||
"version_check",
|
||||
]
|
||||
@@ -161,6 +161,12 @@ version = "0.3.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544"
|
||||
|
||||
[[package]]
|
||||
name = "arrayvec"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
|
||||
|
||||
[[package]]
|
||||
name = "arrayvec"
|
||||
version = "0.7.2"
|
||||
@@ -498,7 +504,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"getrandom",
|
||||
"getrandom 0.2.7",
|
||||
"instant",
|
||||
"pin-project-lite",
|
||||
"rand 0.8.5",
|
||||
@@ -645,6 +651,17 @@ dependencies = [
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "blake2b_simd"
|
||||
version = "0.5.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587"
|
||||
dependencies = [
|
||||
"arrayref",
|
||||
"arrayvec 0.5.2",
|
||||
"constant_time_eq",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "blake3"
|
||||
version = "1.3.1"
|
||||
@@ -652,7 +669,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a08e53fc5a564bb15bfe6fae56bd71522205f1f91893f9c0116edad6496c183f"
|
||||
dependencies = [
|
||||
"arrayref",
|
||||
"arrayvec",
|
||||
"arrayvec 0.7.2",
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"constant_time_eq",
|
||||
@@ -1045,7 +1062,7 @@ dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
"common-grpc",
|
||||
"common-insert",
|
||||
"common-grpc-expr",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-time",
|
||||
@@ -1116,6 +1133,18 @@ dependencies = [
|
||||
"unicode-width",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "comfy-table"
|
||||
version = "6.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1090f39f45786ec6dc6286f8ea9c75d0a7ef0a0d3cda674cef0c3af7b307fbc2"
|
||||
dependencies = [
|
||||
"crossterm",
|
||||
"strum 0.24.1",
|
||||
"strum_macros 0.24.3",
|
||||
"unicode-width",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.1.0"
|
||||
@@ -1213,13 +1242,15 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-insert"
|
||||
name = "common-grpc-expr"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
"common-base",
|
||||
"common-catalog",
|
||||
"common-error",
|
||||
"common-grpc",
|
||||
"common-query",
|
||||
"common-telemetry",
|
||||
"common-time",
|
||||
@@ -1563,6 +1594,31 @@ dependencies = [
|
||||
"once_cell",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossterm"
|
||||
version = "0.25.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e64e6c0fbe2c17357405f7c758c1ef960fce08bdfb2c03d88d2a18d7e09c4b67"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"crossterm_winapi",
|
||||
"libc",
|
||||
"mio",
|
||||
"parking_lot",
|
||||
"signal-hook",
|
||||
"signal-hook-mio",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossterm_winapi"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2ae1b35a484aa10e07fe0638d02301c5ad24de82d310ccbd2f3693da5f09bf1c"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crunchy"
|
||||
version = "0.2.2"
|
||||
@@ -1658,7 +1714,7 @@ dependencies = [
|
||||
"arrow2",
|
||||
"async-trait",
|
||||
"chrono",
|
||||
"comfy-table",
|
||||
"comfy-table 5.0.1",
|
||||
"datafusion-common",
|
||||
"datafusion-expr",
|
||||
"datafusion-physical-expr",
|
||||
@@ -1734,13 +1790,14 @@ dependencies = [
|
||||
"axum 0.6.0-rc.2",
|
||||
"axum-macros",
|
||||
"axum-test-helper",
|
||||
"backon",
|
||||
"catalog",
|
||||
"client",
|
||||
"common-base",
|
||||
"common-catalog",
|
||||
"common-error",
|
||||
"common-grpc",
|
||||
"common-insert",
|
||||
"common-grpc-expr",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-runtime",
|
||||
@@ -1849,6 +1906,17 @@ dependencies = [
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dirs"
|
||||
version = "1.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3fd78930633bd1c6e35c4b42b1df7b0cbc6bc191146e512bb3bedf243fcc3901"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"redox_users 0.3.5",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dirs"
|
||||
version = "4.0.0"
|
||||
@@ -1875,7 +1943,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"redox_users",
|
||||
"redox_users 0.4.3",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
@@ -1886,7 +1954,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"redox_users",
|
||||
"redox_users 0.4.3",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
@@ -2119,7 +2187,7 @@ dependencies = [
|
||||
"common-catalog",
|
||||
"common-error",
|
||||
"common-grpc",
|
||||
"common-insert",
|
||||
"common-grpc-expr",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-runtime",
|
||||
@@ -2353,6 +2421,17 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.1.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"wasi 0.9.0+wasi-snapshot-preview1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.2.7"
|
||||
@@ -2534,6 +2613,16 @@ version = "2.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
|
||||
|
||||
[[package]]
|
||||
name = "humantime-serde"
|
||||
version = "1.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c"
|
||||
dependencies = [
|
||||
"humantime",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hyper"
|
||||
version = "0.14.20"
|
||||
@@ -3572,9 +3661,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
|
||||
|
||||
[[package]]
|
||||
name = "opendal"
|
||||
version = "0.20.1"
|
||||
version = "0.21.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "63b17b778cf11d10fbaaae4a5a0f82d5c6f527f96a9e4843f4e2dd6cd0dbe580"
|
||||
checksum = "8c9be1e30ca12b989107a5ee5bb75468a7f538059e43255ccd4743089b42aeeb"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-compat",
|
||||
@@ -3588,6 +3677,7 @@ dependencies = [
|
||||
"http",
|
||||
"log",
|
||||
"md-5",
|
||||
"metrics",
|
||||
"once_cell",
|
||||
"parking_lot",
|
||||
"percent-encoding",
|
||||
@@ -3597,9 +3687,9 @@ dependencies = [
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"thiserror",
|
||||
"time 0.3.14",
|
||||
"tokio",
|
||||
"tracing",
|
||||
"ureq",
|
||||
]
|
||||
|
||||
@@ -3755,7 +3845,7 @@ dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"petgraph",
|
||||
"redox_syscall",
|
||||
"redox_syscall 0.2.16",
|
||||
"smallvec",
|
||||
"thread-id",
|
||||
"windows-sys",
|
||||
@@ -4152,6 +4242,17 @@ version = "0.2.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872"
|
||||
|
||||
[[package]]
|
||||
name = "prettydiff"
|
||||
version = "0.6.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b6176190f1637d46034820b82fbe758727ccb40da9c9fc2255d695eb05ea29c"
|
||||
dependencies = [
|
||||
"ansi_term",
|
||||
"prettytable-rs",
|
||||
"structopt",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prettyplease"
|
||||
version = "0.1.19"
|
||||
@@ -4162,6 +4263,20 @@ dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prettytable-rs"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fd04b170004fa2daccf418a7f8253aaf033c27760b5f225889024cf66d7ac2e"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"csv",
|
||||
"encode_unicode",
|
||||
"lazy_static",
|
||||
"term",
|
||||
"unicode-width",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-crate"
|
||||
version = "1.2.1"
|
||||
@@ -4476,7 +4591,7 @@ version = "0.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
"getrandom 0.2.7",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4537,6 +4652,12 @@ dependencies = [
|
||||
"rand_core 0.3.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.1.57"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.2.16"
|
||||
@@ -4546,14 +4667,25 @@ dependencies = [
|
||||
"bitflags",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_users"
|
||||
version = "0.3.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d"
|
||||
dependencies = [
|
||||
"getrandom 0.1.16",
|
||||
"redox_syscall 0.1.57",
|
||||
"rust-argon2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_users"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
"redox_syscall",
|
||||
"getrandom 0.2.7",
|
||||
"redox_syscall 0.2.16",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
@@ -4594,15 +4726,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "reqsign"
|
||||
version = "0.6.4"
|
||||
version = "0.6.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e22524be78041476bf8673f2720fa1000f34432b384d9ad5846b024569a4b150"
|
||||
checksum = "d34ea360414ee77ddab3a8360a0c241fc77ab5e27892dcde1d2cfcc29d4e0f55"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"backon",
|
||||
"base64",
|
||||
"bytes",
|
||||
"dirs",
|
||||
"dirs 4.0.0",
|
||||
"form_urlencoded",
|
||||
"hex",
|
||||
"hmac",
|
||||
@@ -4710,6 +4842,18 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rust-argon2"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"blake2b_simd",
|
||||
"constant_time_eq",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rust-ini"
|
||||
version = "0.18.0"
|
||||
@@ -4726,7 +4870,7 @@ version = "1.26.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ee9164faf726e4f3ece4978b25ca877ddc6802fa77f38cdccb32c7f805ecd70c"
|
||||
dependencies = [
|
||||
"arrayvec",
|
||||
"arrayvec 0.7.2",
|
||||
"num-traits",
|
||||
"serde",
|
||||
]
|
||||
@@ -4954,7 +5098,7 @@ dependencies = [
|
||||
"crossbeam-utils",
|
||||
"exitcode",
|
||||
"flate2",
|
||||
"getrandom",
|
||||
"getrandom 0.2.7",
|
||||
"half",
|
||||
"hex",
|
||||
"hexf-parse",
|
||||
@@ -5285,6 +5429,7 @@ dependencies = [
|
||||
"datatypes",
|
||||
"futures",
|
||||
"hex",
|
||||
"humantime-serde",
|
||||
"hyper",
|
||||
"influxdb_line_protocol",
|
||||
"metrics",
|
||||
@@ -5363,6 +5508,27 @@ version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3"
|
||||
|
||||
[[package]]
|
||||
name = "signal-hook"
|
||||
version = "0.3.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a253b5e89e2698464fc26b545c9edceb338e18a89effeeecfea192c3025be29d"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"signal-hook-registry",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "signal-hook-mio"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "29ad2e15f37ec9a6cc544097b78a1ec90001e9f71b81338ca39f430adaca99af"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"mio",
|
||||
"signal-hook",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "signal-hook-registry"
|
||||
version = "1.4.0"
|
||||
@@ -5517,6 +5683,32 @@ dependencies = [
|
||||
"sqlparser",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sqlness"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/ceresdb/sqlness.git#c077b17d73ab25460c152dc34e8f80f904522a57"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"derive_builder",
|
||||
"prettydiff",
|
||||
"serde",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"toml",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"client",
|
||||
"comfy-table 6.1.2",
|
||||
"sqlness",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sqlparser"
|
||||
version = "0.15.0"
|
||||
@@ -5684,6 +5876,30 @@ version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
|
||||
|
||||
[[package]]
|
||||
name = "structopt"
|
||||
version = "0.3.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10"
|
||||
dependencies = [
|
||||
"clap 2.34.0",
|
||||
"lazy_static",
|
||||
"structopt-derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "structopt-derive"
|
||||
version = "0.4.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0"
|
||||
dependencies = [
|
||||
"heck 0.3.3",
|
||||
"proc-macro-error",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strum"
|
||||
version = "0.23.0"
|
||||
@@ -5851,11 +6067,22 @@ dependencies = [
|
||||
"cfg-if",
|
||||
"fastrand",
|
||||
"libc",
|
||||
"redox_syscall",
|
||||
"redox_syscall 0.2.16",
|
||||
"remove_dir_all",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "term"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "edd106a334b7657c10b7c540a0106114feadeb4dc314513e97df481d5d966f42"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"dirs 1.0.5",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "1.1.3"
|
||||
@@ -5917,7 +6144,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5fdfe0627923f7411a43ec9ec9c39c3a9b4151be313e0922042581fb6c9b717f"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"redox_syscall",
|
||||
"redox_syscall 0.2.16",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
@@ -6419,7 +6646,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"rand 0.4.6",
|
||||
"rand 0.8.5",
|
||||
"static_assertions",
|
||||
]
|
||||
|
||||
@@ -6628,7 +6855,7 @@ version = "1.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dd6469f4314d5f1ffec476e05f17cc9a78bc7a27a6a857842170bdf8d6f98d2f"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
"getrandom 0.2.7",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6694,6 +6921,12 @@ dependencies = [
|
||||
"try-lock",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.9.0+wasi-snapshot-preview1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.10.2+wasi-snapshot-preview1"
|
||||
|
||||
@@ -15,7 +15,7 @@ members = [
|
||||
"src/common/recordbatch",
|
||||
"src/common/runtime",
|
||||
"src/common/substrait",
|
||||
"src/common/insert",
|
||||
"src/common/grpc-expr",
|
||||
"src/common/telemetry",
|
||||
"src/common/time",
|
||||
"src/datanode",
|
||||
@@ -33,6 +33,7 @@ members = [
|
||||
"src/store-api",
|
||||
"src/table",
|
||||
"src/mito",
|
||||
"tests/runner",
|
||||
]
|
||||
|
||||
[profile.release]
|
||||
|
||||
@@ -28,9 +28,8 @@ use arrow::datatypes::{DataType, Float64Type, Int64Type};
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use clap::Parser;
|
||||
use client::admin::Admin;
|
||||
use client::api::v1::codec::InsertBatch;
|
||||
use client::api::v1::column::Values;
|
||||
use client::api::v1::{insert_expr, Column, ColumnDataType, ColumnDef, CreateExpr, InsertExpr};
|
||||
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateExpr, InsertExpr};
|
||||
use client::{Client, Database, Select};
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::{ArrowReader, ParquetFileArrowReader};
|
||||
@@ -100,16 +99,13 @@ async fn write_data(
|
||||
|
||||
for record_batch in record_batch_reader {
|
||||
let record_batch = record_batch.unwrap();
|
||||
let row_count = record_batch.num_rows();
|
||||
let insert_batch = convert_record_batch(record_batch).into();
|
||||
let (columns, row_count) = convert_record_batch(record_batch);
|
||||
let insert_expr = InsertExpr {
|
||||
schema_name: "public".to_string(),
|
||||
table_name: TABLE_NAME.to_string(),
|
||||
expr: Some(insert_expr::Expr::Values(insert_expr::Values {
|
||||
values: vec![insert_batch],
|
||||
})),
|
||||
options: HashMap::default(),
|
||||
region_number: 0,
|
||||
columns,
|
||||
row_count,
|
||||
};
|
||||
let now = Instant::now();
|
||||
db.insert(insert_expr).await.unwrap();
|
||||
@@ -125,7 +121,7 @@ async fn write_data(
|
||||
total_rpc_elapsed_ms
|
||||
}
|
||||
|
||||
fn convert_record_batch(record_batch: RecordBatch) -> InsertBatch {
|
||||
fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
let schema = record_batch.schema();
|
||||
let fields = schema.fields();
|
||||
let row_count = record_batch.num_rows();
|
||||
@@ -143,10 +139,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> InsertBatch {
|
||||
columns.push(column);
|
||||
}
|
||||
|
||||
InsertBatch {
|
||||
columns,
|
||||
row_count: row_count as _,
|
||||
}
|
||||
(columns, row_count as _)
|
||||
}
|
||||
|
||||
fn build_values(column: &ArrayRef) -> Values {
|
||||
|
||||
@@ -7,3 +7,4 @@ coverage:
|
||||
patch: off
|
||||
ignore:
|
||||
- "**/error*.rs" # ignore all error.rs files
|
||||
- "tests/runner/*.rs" # ignore integration test runner
|
||||
|
||||
@@ -5,6 +5,7 @@ wal_dir = '/tmp/greptimedb/wal'
|
||||
rpc_runtime_size = 8
|
||||
mysql_addr = '127.0.0.1:4406'
|
||||
mysql_runtime_size = 4
|
||||
enable_memory_catalog = false
|
||||
|
||||
[storage]
|
||||
type = 'File'
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
mode = 'distributed'
|
||||
datanode_rpc_addr = '127.0.0.1:3001'
|
||||
http_addr = '127.0.0.1:4000'
|
||||
|
||||
[http_options]
|
||||
addr = '127.0.0.1:4000'
|
||||
timeout = "30s"
|
||||
|
||||
[meta_client_opts]
|
||||
metasrv_addrs = ['127.0.0.1:3002']
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
node_id = 0
|
||||
mode = 'standalone'
|
||||
http_addr = '127.0.0.1:4000'
|
||||
wal_dir = '/tmp/greptimedb/wal/'
|
||||
enable_memory_catalog = false
|
||||
|
||||
[http_options]
|
||||
addr = '127.0.0.1:4000'
|
||||
timeout = "30s"
|
||||
|
||||
[storage]
|
||||
type = 'File'
|
||||
|
||||
@@ -20,7 +20,6 @@ fn main() {
|
||||
.file_descriptor_set_path(default_out_dir.join("greptime_fd.bin"))
|
||||
.compile(
|
||||
&[
|
||||
"greptime/v1/insert.proto",
|
||||
"greptime/v1/select.proto",
|
||||
"greptime/v1/physical_plan.proto",
|
||||
"greptime/v1/greptime.proto",
|
||||
|
||||
@@ -20,6 +20,7 @@ message AdminExpr {
|
||||
CreateExpr create = 2;
|
||||
AlterExpr alter = 3;
|
||||
CreateDatabaseExpr create_database = 4;
|
||||
DropTableExpr drop_table = 5;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,6 +56,12 @@ message AlterExpr {
|
||||
}
|
||||
}
|
||||
|
||||
message DropTableExpr {
|
||||
string catalog_name = 1;
|
||||
string schema_name = 2;
|
||||
string table_name = 3;
|
||||
}
|
||||
|
||||
message AddColumns {
|
||||
repeated AddColumn add_columns = 1;
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
import "greptime/v1/column.proto";
|
||||
import "greptime/v1/common.proto";
|
||||
|
||||
message DatabaseRequest {
|
||||
@@ -41,26 +42,16 @@ message InsertExpr {
|
||||
string schema_name = 1;
|
||||
string table_name = 2;
|
||||
|
||||
message Values {
|
||||
repeated bytes values = 1;
|
||||
}
|
||||
// Data is represented here.
|
||||
repeated Column columns = 3;
|
||||
|
||||
oneof expr {
|
||||
Values values = 3;
|
||||
// The row_count of all columns, which include null and non-null values.
|
||||
//
|
||||
// Note: the row_count of all columns in a InsertExpr must be same.
|
||||
uint32 row_count = 4;
|
||||
|
||||
// TODO(LFC): Remove field "sql" in InsertExpr.
|
||||
// When Frontend instance received an insertion SQL (`insert into ...`), it's anticipated to parse the SQL and
|
||||
// assemble the values to insert to feed Datanode. In other words, inserting data through Datanode instance's GRPC
|
||||
// interface shouldn't use SQL directly.
|
||||
// Then why the "sql" field exists here? It's because the Frontend needs table schema to create the values to insert,
|
||||
// which is currently not able to find anywhere. (Maybe the table schema is suppose to be fetched from Meta?)
|
||||
// The "sql" field is meant to be removed in the future.
|
||||
string sql = 4;
|
||||
}
|
||||
|
||||
/// The region number of current insert request.
|
||||
// The region number of current insert request.
|
||||
uint32 region_number = 5;
|
||||
map<string, bytes> options = 6;
|
||||
}
|
||||
|
||||
// TODO(jiachun)
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1.codec;
|
||||
|
||||
import "greptime/v1/column.proto";
|
||||
|
||||
message InsertBatch {
|
||||
repeated Column columns = 1;
|
||||
uint32 row_count = 2;
|
||||
}
|
||||
|
||||
message RegionNumber {
|
||||
uint32 id = 1;
|
||||
}
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod column_def;
|
||||
pub mod error;
|
||||
pub mod helper;
|
||||
pub mod prometheus;
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
pub use prost::DecodeError;
|
||||
use prost::Message;
|
||||
|
||||
use crate::v1::codec::{InsertBatch, PhysicalPlanNode, RegionNumber, SelectResult};
|
||||
use crate::v1::codec::{PhysicalPlanNode, SelectResult};
|
||||
use crate::v1::meta::TableRouteValue;
|
||||
|
||||
macro_rules! impl_convert_with_bytes {
|
||||
@@ -36,10 +36,8 @@ macro_rules! impl_convert_with_bytes {
|
||||
};
|
||||
}
|
||||
|
||||
impl_convert_with_bytes!(InsertBatch);
|
||||
impl_convert_with_bytes!(SelectResult);
|
||||
impl_convert_with_bytes!(PhysicalPlanNode);
|
||||
impl_convert_with_bytes!(RegionNumber);
|
||||
impl_convert_with_bytes!(TableRouteValue);
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -51,52 +49,6 @@ mod tests {
|
||||
|
||||
const SEMANTIC_TAG: i32 = 0;
|
||||
|
||||
#[test]
|
||||
fn test_convert_insert_batch() {
|
||||
let insert_batch = mock_insert_batch();
|
||||
|
||||
let bytes: Vec<u8> = insert_batch.into();
|
||||
let insert: InsertBatch = bytes.deref().try_into().unwrap();
|
||||
|
||||
assert_eq!(8, insert.row_count);
|
||||
assert_eq!(1, insert.columns.len());
|
||||
|
||||
let column = &insert.columns[0];
|
||||
assert_eq!("foo", column.column_name);
|
||||
assert_eq!(SEMANTIC_TAG, column.semantic_type);
|
||||
assert_eq!(vec![1], column.null_mask);
|
||||
assert_eq!(
|
||||
vec![2, 3, 4, 5, 6, 7, 8],
|
||||
column.values.as_ref().unwrap().i32_values
|
||||
);
|
||||
}
|
||||
|
||||
#[should_panic]
|
||||
#[test]
|
||||
fn test_convert_insert_batch_wrong() {
|
||||
let insert_batch = mock_insert_batch();
|
||||
|
||||
let mut bytes: Vec<u8> = insert_batch.into();
|
||||
|
||||
// modify some bytes
|
||||
bytes[0] = 0b1;
|
||||
bytes[1] = 0b1;
|
||||
|
||||
let insert: InsertBatch = bytes.deref().try_into().unwrap();
|
||||
|
||||
assert_eq!(8, insert.row_count);
|
||||
assert_eq!(1, insert.columns.len());
|
||||
|
||||
let column = &insert.columns[0];
|
||||
assert_eq!("foo", column.column_name);
|
||||
assert_eq!(SEMANTIC_TAG, column.semantic_type);
|
||||
assert_eq!(vec![1], column.null_mask);
|
||||
assert_eq!(
|
||||
vec![2, 3, 4, 5, 6, 7, 8],
|
||||
column.values.as_ref().unwrap().i32_values
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_select_result() {
|
||||
let select_result = mock_select_result();
|
||||
@@ -143,35 +95,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_region_id() {
|
||||
let region_id = RegionNumber { id: 12 };
|
||||
|
||||
let bytes: Vec<u8> = region_id.into();
|
||||
let region_id: RegionNumber = bytes.deref().try_into().unwrap();
|
||||
|
||||
assert_eq!(12, region_id.id);
|
||||
}
|
||||
|
||||
fn mock_insert_batch() -> InsertBatch {
|
||||
let values = column::Values {
|
||||
i32_values: vec![2, 3, 4, 5, 6, 7, 8],
|
||||
..Default::default()
|
||||
};
|
||||
let null_mask = vec![1];
|
||||
let column = Column {
|
||||
column_name: "foo".to_string(),
|
||||
semantic_type: SEMANTIC_TAG,
|
||||
values: Some(values),
|
||||
null_mask,
|
||||
..Default::default()
|
||||
};
|
||||
InsertBatch {
|
||||
columns: vec![column],
|
||||
row_count: 8,
|
||||
}
|
||||
}
|
||||
|
||||
fn mock_select_result() -> SelectResult {
|
||||
let values = column::Values {
|
||||
i32_values: vec![2, 3, 4, 5, 6, 7, 8],
|
||||
|
||||
@@ -21,4 +21,5 @@ pub mod codec {
|
||||
tonic::include_proto!("greptime.v1.codec");
|
||||
}
|
||||
|
||||
mod column_def;
|
||||
pub mod meta;
|
||||
|
||||
@@ -27,7 +27,7 @@ futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
lazy_static = "1.4"
|
||||
meta-client = { path = "../meta-client" }
|
||||
opendal = "0.20"
|
||||
opendal = "0.21"
|
||||
regex = "1.6"
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
@@ -40,8 +40,8 @@ tokio = { version = "1.18", features = ["full"] }
|
||||
chrono = "0.4"
|
||||
log-store = { path = "../log-store" }
|
||||
object-store = { path = "../object-store" }
|
||||
opendal = "0.20"
|
||||
opendal = "0.21"
|
||||
storage = { path = "../storage" }
|
||||
mito = { path = "../mito" }
|
||||
mito = { path = "../mito", features = ["test"] }
|
||||
tempdir = "0.3"
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
|
||||
@@ -94,7 +94,7 @@ pub enum Error {
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Table {} already exists", table))]
|
||||
#[snafu(display("Table `{}` already exists", table))]
|
||||
TableExists { table: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Schema {} already exists", schema))]
|
||||
@@ -109,6 +109,12 @@ pub enum Error {
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Operation {} not implemented yet", operation))]
|
||||
Unimplemented {
|
||||
operation: String,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to open table, table info: {}, source: {}", table_info, source))]
|
||||
OpenTable {
|
||||
table_info: String,
|
||||
@@ -216,11 +222,12 @@ impl ErrorExt for Error {
|
||||
| Error::ValueDeserialize { .. }
|
||||
| Error::Io { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
Error::RegisterTable { .. } => StatusCode::Internal,
|
||||
|
||||
Error::ReadSystemCatalog { source, .. } => source.status_code(),
|
||||
Error::SystemCatalogTypeMismatch { source, .. } => source.status_code(),
|
||||
Error::InvalidCatalogValue { source, .. } => source.status_code(),
|
||||
|
||||
Error::RegisterTable { .. } => StatusCode::Internal,
|
||||
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
|
||||
Error::SchemaExists { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
@@ -235,6 +242,8 @@ impl ErrorExt for Error {
|
||||
Error::InvalidTableSchema { source, .. } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { .. } => StatusCode::Unexpected,
|
||||
Error::Internal { source, .. } => source.status_code(),
|
||||
|
||||
Error::Unimplemented { .. } => StatusCode::Unsupported,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#![feature(assert_matches)]
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::info;
|
||||
@@ -83,12 +84,17 @@ pub trait CatalogManager: CatalogList {
|
||||
/// Starts a catalog manager.
|
||||
async fn start(&self) -> Result<()>;
|
||||
|
||||
/// Registers a table given given catalog/schema to catalog manager,
|
||||
/// returns table registered.
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<usize>;
|
||||
/// Registers a table within given catalog/schema to catalog manager,
|
||||
/// returns whether the table registered.
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool>;
|
||||
|
||||
/// Register a schema with catalog name and schema name.
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<usize>;
|
||||
/// Deregisters a table within given catalog/schema to catalog manager,
|
||||
/// returns whether the table deregistered.
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool>;
|
||||
|
||||
/// Register a schema with catalog name and schema name. Retuens whether the
|
||||
/// schema registered.
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool>;
|
||||
|
||||
/// Register a system table, should be called before starting the manager.
|
||||
async fn register_system_table(&self, request: RegisterSystemTableRequest)
|
||||
@@ -123,6 +129,25 @@ pub struct RegisterTableRequest {
|
||||
pub table: TableRef,
|
||||
}
|
||||
|
||||
impl Debug for RegisterTableRequest {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("RegisterTableRequest")
|
||||
.field("catalog", &self.catalog)
|
||||
.field("schema", &self.schema)
|
||||
.field("table_name", &self.table_name)
|
||||
.field("table_id", &self.table_id)
|
||||
.field("table", &self.table.table_info())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DeregisterTableRequest {
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
pub table_name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RegisterSchemaRequest {
|
||||
pub catalog: String,
|
||||
|
||||
@@ -21,7 +21,7 @@ use common_catalog::consts::{
|
||||
SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_NAME,
|
||||
};
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::{error, info};
|
||||
use datatypes::prelude::ScalarVector;
|
||||
use datatypes::vectors::{BinaryVector, UInt8Vector};
|
||||
use futures_util::lock::Mutex;
|
||||
@@ -36,7 +36,7 @@ use table::TableRef;
|
||||
use crate::error::{
|
||||
CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu, Result,
|
||||
SchemaExistsSnafu, SchemaNotFoundSnafu, SystemCatalogSnafu, SystemCatalogTypeMismatchSnafu,
|
||||
TableExistsSnafu, TableNotFoundSnafu,
|
||||
TableExistsSnafu, TableNotFoundSnafu, UnimplementedSnafu,
|
||||
};
|
||||
use crate::local::memory::{MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
|
||||
use crate::system::{
|
||||
@@ -46,8 +46,8 @@ use crate::system::{
|
||||
use crate::tables::SystemCatalog;
|
||||
use crate::{
|
||||
format_full_table_name, handle_system_table_request, CatalogList, CatalogManager,
|
||||
CatalogProvider, CatalogProviderRef, RegisterSchemaRequest, RegisterSystemTableRequest,
|
||||
RegisterTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
CatalogProvider, CatalogProviderRef, DeregisterTableRequest, RegisterSchemaRequest,
|
||||
RegisterSystemTableRequest, RegisterTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// A `CatalogManager` consists of a system catalog and a bunch of user catalogs.
|
||||
@@ -57,6 +57,7 @@ pub struct LocalCatalogManager {
|
||||
engine: TableEngineRef,
|
||||
next_table_id: AtomicU32,
|
||||
init_lock: Mutex<bool>,
|
||||
register_lock: Mutex<()>,
|
||||
system_table_requests: Mutex<Vec<RegisterSystemTableRequest>>,
|
||||
}
|
||||
|
||||
@@ -76,6 +77,7 @@ impl LocalCatalogManager {
|
||||
engine,
|
||||
next_table_id: AtomicU32::new(MIN_USER_TABLE_ID),
|
||||
init_lock: Mutex::new(false),
|
||||
register_lock: Mutex::new(()),
|
||||
system_table_requests: Mutex::new(Vec::default()),
|
||||
})
|
||||
}
|
||||
@@ -309,7 +311,7 @@ impl CatalogManager for LocalCatalogManager {
|
||||
self.init().await
|
||||
}
|
||||
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<usize> {
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
|
||||
let started = self.init_lock.lock().await;
|
||||
|
||||
ensure!(
|
||||
@@ -332,27 +334,50 @@ impl CatalogManager for LocalCatalogManager {
|
||||
schema_info: format!("{}.{}", catalog_name, schema_name),
|
||||
})?;
|
||||
|
||||
if schema.table_exist(&request.table_name)? {
|
||||
return TableExistsSnafu {
|
||||
table: format_full_table_name(catalog_name, schema_name, &request.table_name),
|
||||
{
|
||||
let _lock = self.register_lock.lock().await;
|
||||
if let Some(existing) = schema.table(&request.table_name)? {
|
||||
if existing.table_info().ident.table_id != request.table_id {
|
||||
error!(
|
||||
"Unexpected table register request: {:?}, existing: {:?}",
|
||||
request,
|
||||
existing.table_info()
|
||||
);
|
||||
return TableExistsSnafu {
|
||||
table: format_full_table_name(
|
||||
catalog_name,
|
||||
schema_name,
|
||||
&request.table_name,
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
// Try to register table with same table id, just ignore.
|
||||
Ok(false)
|
||||
} else {
|
||||
// table does not exist
|
||||
self.system
|
||||
.register_table(
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
request.table_name.clone(),
|
||||
request.table_id,
|
||||
)
|
||||
.await?;
|
||||
schema.register_table(request.table_name, request.table)?;
|
||||
Ok(true)
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
self.system
|
||||
.register_table(
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
request.table_name.clone(),
|
||||
request.table_id,
|
||||
)
|
||||
.await?;
|
||||
|
||||
schema.register_table(request.table_name, request.table)?;
|
||||
Ok(1)
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<usize> {
|
||||
async fn deregister_table(&self, _request: DeregisterTableRequest) -> Result<bool> {
|
||||
UnimplementedSnafu {
|
||||
operation: "deregister table",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
let started = self.init_lock.lock().await;
|
||||
ensure!(
|
||||
*started,
|
||||
@@ -367,17 +392,21 @@ impl CatalogManager for LocalCatalogManager {
|
||||
.catalogs
|
||||
.catalog(catalog_name)?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
if catalog.schema(schema_name)?.is_some() {
|
||||
return SchemaExistsSnafu {
|
||||
schema: schema_name,
|
||||
}
|
||||
.fail();
|
||||
|
||||
{
|
||||
let _lock = self.register_lock.lock().await;
|
||||
ensure!(
|
||||
catalog.schema(schema_name)?.is_none(),
|
||||
SchemaExistsSnafu {
|
||||
schema: schema_name,
|
||||
}
|
||||
);
|
||||
self.system
|
||||
.register_schema(request.catalog, schema_name.clone())
|
||||
.await?;
|
||||
catalog.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))?;
|
||||
Ok(true)
|
||||
}
|
||||
self.system
|
||||
.register_schema(request.catalog, schema_name.clone())
|
||||
.await?;
|
||||
catalog.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))?;
|
||||
Ok(1)
|
||||
}
|
||||
|
||||
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
|
||||
|
||||
@@ -19,6 +19,7 @@ use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use common_catalog::consts::MIN_USER_TABLE_ID;
|
||||
use common_telemetry::error;
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
use table::table::TableIdProvider;
|
||||
@@ -27,8 +28,8 @@ use table::TableRef;
|
||||
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
||||
use crate::schema::SchemaProvider;
|
||||
use crate::{
|
||||
CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef, RegisterSchemaRequest,
|
||||
RegisterSystemTableRequest, RegisterTableRequest, SchemaProviderRef,
|
||||
CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef, DeregisterTableRequest,
|
||||
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// Simple in-memory list of catalogs
|
||||
@@ -69,7 +70,7 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<usize> {
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get(&request.catalog)
|
||||
@@ -84,10 +85,28 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
})?;
|
||||
schema
|
||||
.register_table(request.table_name, request.table)
|
||||
.map(|v| if v.is_some() { 0 } else { 1 })
|
||||
.map(|v| v.is_none())
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<usize> {
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.clone();
|
||||
let schema = catalog
|
||||
.schema(&request.schema)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
schema_info: format!("{}.{}", &request.catalog, &request.schema),
|
||||
})?;
|
||||
schema
|
||||
.deregister_table(&request.table_name)
|
||||
.map(|v| v.is_some())
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get(&request.catalog)
|
||||
@@ -95,11 +114,12 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
catalog_name: &request.catalog,
|
||||
})?;
|
||||
catalog.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))?;
|
||||
Ok(1)
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn register_system_table(&self, _request: RegisterSystemTableRequest) -> Result<()> {
|
||||
unimplemented!()
|
||||
// TODO(ruihang): support register system table request
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>> {
|
||||
@@ -251,11 +271,21 @@ impl SchemaProvider for MemorySchemaProvider {
|
||||
}
|
||||
|
||||
fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
|
||||
if self.table_exist(name.as_str())? {
|
||||
return TableExistsSnafu { table: name }.fail()?;
|
||||
}
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
Ok(tables.insert(name, table))
|
||||
if let Some(existing) = tables.get(name.as_str()) {
|
||||
// if table with the same name but different table id exists, then it's a fatal bug
|
||||
if existing.table_info().ident.table_id != table.table_info().ident.table_id {
|
||||
error!(
|
||||
"Unexpected table register: {:?}, existing: {:?}",
|
||||
table.table_info(),
|
||||
existing.table_info()
|
||||
);
|
||||
return TableExistsSnafu { table: name }.fail()?;
|
||||
}
|
||||
Ok(Some(existing.clone()))
|
||||
} else {
|
||||
Ok(tables.insert(name, table))
|
||||
}
|
||||
}
|
||||
|
||||
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
@@ -315,7 +345,7 @@ mod tests {
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(provider.table_exist(table_name).unwrap());
|
||||
let other_table = NumbersTable::default();
|
||||
let other_table = NumbersTable::new(12);
|
||||
let result = provider.register_table(table_name.to_string(), Arc::new(other_table));
|
||||
let err = result.err().unwrap();
|
||||
assert!(err.backtrace_opt().is_some());
|
||||
@@ -340,4 +370,34 @@ mod tests {
|
||||
.downcast_ref::<MemoryCatalogManager>()
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
pub async fn test_catalog_deregister_table() {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
let schema = catalog
|
||||
.schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let register_table_req = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "numbers".to_string(),
|
||||
table_id: 2333,
|
||||
table: Arc::new(NumbersTable::default()),
|
||||
};
|
||||
catalog.register_table(register_table_req).await.unwrap();
|
||||
assert!(schema.table_exist("numbers").unwrap());
|
||||
|
||||
let deregister_table_req = DeregisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "numbers".to_string(),
|
||||
};
|
||||
catalog
|
||||
.deregister_table(deregister_table_req)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!schema.table_exist("numbers").unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,13 +37,13 @@ use tokio::sync::Mutex;
|
||||
|
||||
use crate::error::{
|
||||
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, InvalidTableSchemaSnafu,
|
||||
OpenTableSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu,
|
||||
OpenTableSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, UnimplementedSnafu,
|
||||
};
|
||||
use crate::remote::{Kv, KvBackendRef};
|
||||
use crate::{
|
||||
handle_system_table_request, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
|
||||
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, SchemaProvider,
|
||||
SchemaProviderRef,
|
||||
DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
|
||||
RegisterTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// Catalog manager based on metasrv.
|
||||
@@ -154,8 +154,8 @@ impl RemoteCatalogManager {
|
||||
}
|
||||
let table_key = TableGlobalKey::parse(&String::from_utf8_lossy(&k))
|
||||
.context(InvalidCatalogValueSnafu)?;
|
||||
let table_value = TableGlobalValue::parse(&String::from_utf8_lossy(&v))
|
||||
.context(InvalidCatalogValueSnafu)?;
|
||||
let table_value =
|
||||
TableGlobalValue::from_bytes(&v).context(InvalidCatalogValueSnafu)?;
|
||||
|
||||
info!(
|
||||
"Found catalog table entry, key: {}, value: {:?}",
|
||||
@@ -411,7 +411,7 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<usize> {
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
|
||||
let catalog_name = request.catalog;
|
||||
let schema_name = request.schema;
|
||||
let catalog_provider = self.catalog(&catalog_name)?.context(CatalogNotFoundSnafu {
|
||||
@@ -430,10 +430,17 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
.fail();
|
||||
}
|
||||
schema_provider.register_table(request.table_name, request.table)?;
|
||||
Ok(1)
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<usize> {
|
||||
async fn deregister_table(&self, _request: DeregisterTableRequest) -> Result<bool> {
|
||||
UnimplementedSnafu {
|
||||
operation: "deregister table",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
let catalog_name = request.catalog;
|
||||
let schema_name = request.schema;
|
||||
let catalog_provider = self.catalog(&catalog_name)?.context(CatalogNotFoundSnafu {
|
||||
@@ -441,7 +448,7 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
})?;
|
||||
let schema_provider = self.new_schema_provider(&catalog_name, &schema_name);
|
||||
catalog_provider.register_schema(schema_name, schema_provider)?;
|
||||
Ok(1)
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
|
||||
|
||||
@@ -456,7 +456,7 @@ mod tests {
|
||||
pub async fn prepare_table_engine() -> (TempDir, TableEngineRef) {
|
||||
let dir = TempDir::new("system-table-test").unwrap();
|
||||
let store_dir = dir.path().to_string_lossy();
|
||||
let accessor = opendal::services::fs::Builder::default()
|
||||
let accessor = object_store::backend::fs::Builder::default()
|
||||
.root(&store_dir)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
132
src/catalog/tests/local_catalog_tests.rs
Normal file
132
src/catalog/tests/local_catalog_tests.rs
Normal file
@@ -0,0 +1,132 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use catalog::local::LocalCatalogManager;
|
||||
use catalog::{CatalogManager, RegisterTableRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_telemetry::{error, info};
|
||||
use mito::config::EngineConfig;
|
||||
use table::table::numbers::NumbersTable;
|
||||
use table::TableRef;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
async fn create_local_catalog_manager() -> Result<LocalCatalogManager, catalog::error::Error> {
|
||||
let (_dir, object_store) =
|
||||
mito::table::test_util::new_test_object_store("setup_mock_engine_and_table").await;
|
||||
let mock_engine = Arc::new(mito::table::test_util::MockMitoEngine::new(
|
||||
EngineConfig::default(),
|
||||
mito::table::test_util::MockEngine::default(),
|
||||
object_store,
|
||||
));
|
||||
let catalog_manager = LocalCatalogManager::try_new(mock_engine).await.unwrap();
|
||||
catalog_manager.start().await?;
|
||||
Ok(catalog_manager)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_duplicate_register() {
|
||||
let catalog_manager = create_local_catalog_manager().await.unwrap();
|
||||
let request = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "test_table".to_string(),
|
||||
table_id: 42,
|
||||
table: Arc::new(NumbersTable::new(42)),
|
||||
};
|
||||
assert!(catalog_manager
|
||||
.register_table(request.clone())
|
||||
.await
|
||||
.unwrap());
|
||||
|
||||
// register table with same table id will succeed with 0 as return val.
|
||||
assert!(!catalog_manager.register_table(request).await.unwrap());
|
||||
|
||||
let err = catalog_manager
|
||||
.register_table(RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "test_table".to_string(),
|
||||
table_id: 43,
|
||||
table: Arc::new(NumbersTable::new(43)),
|
||||
})
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(
|
||||
err.to_string()
|
||||
.contains("Table `greptime.public.test_table` already exists"),
|
||||
"Actual error message: {}",
|
||||
err
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concurrent_register() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let rt = Arc::new(tokio::runtime::Builder::new_multi_thread().build().unwrap());
|
||||
let catalog_manager =
|
||||
Arc::new(rt.block_on(async { create_local_catalog_manager().await.unwrap() }));
|
||||
|
||||
let succeed: Arc<Mutex<Option<TableRef>>> = Arc::new(Mutex::new(None));
|
||||
|
||||
let mut handles = Vec::with_capacity(8);
|
||||
for i in 0..8 {
|
||||
let catalog = catalog_manager.clone();
|
||||
let succeed = succeed.clone();
|
||||
let handle = rt.spawn(async move {
|
||||
let table_id = 42 + i;
|
||||
let table = Arc::new(NumbersTable::new(table_id));
|
||||
let req = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "test_table".to_string(),
|
||||
table_id,
|
||||
table: table.clone(),
|
||||
};
|
||||
match catalog.register_table(req).await {
|
||||
Ok(res) => {
|
||||
if res {
|
||||
let mut succeed = succeed.lock().await;
|
||||
info!("Successfully registered table: {}", table_id);
|
||||
*succeed = Some(table);
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
error!("Failed to register table {}", table_id);
|
||||
}
|
||||
}
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
rt.block_on(async move {
|
||||
for handle in handles {
|
||||
handle.await.unwrap();
|
||||
}
|
||||
let guard = succeed.lock().await;
|
||||
let table = guard.as_ref().unwrap();
|
||||
let table_registered = catalog_manager
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "test_table")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
table_registered.table_info().ident.table_id,
|
||||
table.table_info().ident.table_id
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -217,7 +217,7 @@ impl TableEngine for MockTableEngine {
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
_request: DropTableRequest,
|
||||
) -> table::Result<()> {
|
||||
) -> table::Result<bool> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,7 +202,7 @@ mod tests {
|
||||
table_id,
|
||||
table,
|
||||
};
|
||||
assert_eq!(1, catalog_manager.register_table(reg_req).await.unwrap());
|
||||
assert!(catalog_manager.register_table(reg_req).await.unwrap());
|
||||
assert_eq!(
|
||||
HashSet::from([table_name, "numbers".to_string()]),
|
||||
default_schema
|
||||
@@ -287,7 +287,7 @@ mod tests {
|
||||
.register_schema(schema_name.clone(), schema.clone())
|
||||
.expect("Register schema should not fail");
|
||||
assert!(prev.is_none());
|
||||
assert_eq!(1, catalog_manager.register_table(reg_req).await.unwrap());
|
||||
assert!(catalog_manager.register_table(reg_req).await.unwrap());
|
||||
|
||||
assert_eq!(
|
||||
HashSet::from([schema_name.clone()]),
|
||||
|
||||
@@ -13,7 +13,7 @@ common-error = { path = "../common/error" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-insert = { path = "../common/insert" }
|
||||
common-grpc-expr = { path = "../common/grpc-expr" }
|
||||
common-time = { path = "../common/time" }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = [
|
||||
"simd",
|
||||
|
||||
@@ -12,11 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::v1::codec::InsertBatch;
|
||||
use api::v1::*;
|
||||
use client::{Client, Database};
|
||||
|
||||
fn main() {
|
||||
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
|
||||
.unwrap();
|
||||
@@ -29,19 +27,19 @@ async fn run() {
|
||||
let client = Client::with_urls(vec!["127.0.0.1:3001"]);
|
||||
let db = Database::new("greptime", client);
|
||||
|
||||
let (columns, row_count) = insert_data();
|
||||
|
||||
let expr = InsertExpr {
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "demo".to_string(),
|
||||
expr: Some(insert_expr::Expr::Values(insert_expr::Values {
|
||||
values: insert_batches(),
|
||||
})),
|
||||
options: HashMap::default(),
|
||||
region_number: 0,
|
||||
columns,
|
||||
row_count,
|
||||
};
|
||||
db.insert(expr).await.unwrap();
|
||||
}
|
||||
|
||||
fn insert_batches() -> Vec<Vec<u8>> {
|
||||
fn insert_data() -> (Vec<Column>, u32) {
|
||||
const SEMANTIC_TAG: i32 = 0;
|
||||
const SEMANTIC_FIELD: i32 = 1;
|
||||
const SEMANTIC_TS: i32 = 2;
|
||||
@@ -101,9 +99,8 @@ fn insert_batches() -> Vec<Vec<u8>> {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let insert_batch = InsertBatch {
|
||||
columns: vec![host_column, cpu_column, mem_column, ts_column],
|
||||
(
|
||||
vec![host_column, cpu_column, mem_column, ts_column],
|
||||
row_count,
|
||||
};
|
||||
vec![insert_batch.into()]
|
||||
)
|
||||
}
|
||||
|
||||
@@ -58,7 +58,19 @@ impl Admin {
|
||||
header: Some(header),
|
||||
expr: Some(admin_expr::Expr::Alter(expr)),
|
||||
};
|
||||
Ok(self.do_requests(vec![expr]).await?.remove(0))
|
||||
self.do_request(expr).await
|
||||
}
|
||||
|
||||
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<AdminResult> {
|
||||
let header = ExprHeader {
|
||||
version: PROTOCOL_VERSION,
|
||||
};
|
||||
let expr = AdminExpr {
|
||||
header: Some(header),
|
||||
expr: Some(admin_expr::Expr::DropTable(expr)),
|
||||
};
|
||||
|
||||
self.do_request(expr).await
|
||||
}
|
||||
|
||||
/// Invariants: the lengths of input vec (`Vec<AdminExpr>`) and output vec (`Vec<AdminResult>`) are equal.
|
||||
|
||||
@@ -23,7 +23,7 @@ use api::v1::{
|
||||
};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_grpc::{AsExecutionPlan, DefaultAsPlanImpl};
|
||||
use common_insert::column_to_vector;
|
||||
use common_grpc_expr::column_to_vector;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::{RecordBatch, RecordBatches};
|
||||
use datafusion::physical_plan::ExecutionPlan;
|
||||
|
||||
@@ -103,7 +103,7 @@ pub enum Error {
|
||||
#[snafu(display("Failed to convert column to vector, source: {}", source))]
|
||||
ColumnToVector {
|
||||
#[snafu(backtrace)]
|
||||
source: common_insert::error::Error,
|
||||
source: common_grpc_expr::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -97,10 +97,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_start_node_error() {
|
||||
fn throw_datanode_error() -> StdResult<datanode::error::Error> {
|
||||
datanode::error::MissingFieldSnafu {
|
||||
field: "test_field",
|
||||
}
|
||||
.fail()
|
||||
datanode::error::MissingNodeIdSnafu {}.fail()
|
||||
}
|
||||
|
||||
let e = throw_datanode_error()
|
||||
|
||||
@@ -21,6 +21,7 @@ use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
use meta_client::MetaClientOpts;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -96,7 +97,10 @@ impl TryFrom<StartCommand> for FrontendOptions {
|
||||
};
|
||||
|
||||
if let Some(addr) = cmd.http_addr {
|
||||
opts.http_addr = Some(addr);
|
||||
opts.http_options = Some(HttpOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
if let Some(addr) = cmd.grpc_addr {
|
||||
opts.grpc_options = Some(GrpcOptions {
|
||||
@@ -141,6 +145,8 @@ impl TryFrom<StartCommand> for FrontendOptions {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
@@ -157,7 +163,7 @@ mod tests {
|
||||
};
|
||||
|
||||
let opts: FrontendOptions = command.try_into().unwrap();
|
||||
assert_eq!(opts.http_addr, Some("127.0.0.1:1234".to_string()));
|
||||
assert_eq!(opts.http_options.as_ref().unwrap().addr, "127.0.0.1:1234");
|
||||
assert_eq!(opts.mysql_options.as_ref().unwrap().addr, "127.0.0.1:5678");
|
||||
assert_eq!(
|
||||
opts.postgres_options.as_ref().unwrap().addr,
|
||||
@@ -188,4 +194,33 @@ mod tests {
|
||||
|
||||
assert!(!opts.influxdb_options.unwrap().enable);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_from_config_file() {
|
||||
let command = StartCommand {
|
||||
http_addr: None,
|
||||
grpc_addr: None,
|
||||
mysql_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
influxdb_enable: None,
|
||||
config_file: Some(format!(
|
||||
"{}/../../config/frontend.example.toml",
|
||||
std::env::current_dir().unwrap().as_path().to_str().unwrap()
|
||||
)),
|
||||
metasrv_addr: None,
|
||||
};
|
||||
|
||||
let fe_opts = FrontendOptions::try_from(command).unwrap();
|
||||
assert_eq!(Mode::Distributed, fe_opts.mode);
|
||||
assert_eq!("127.0.0.1:3001".to_string(), fe_opts.datanode_rpc_addr);
|
||||
assert_eq!(
|
||||
"127.0.0.1:4000".to_string(),
|
||||
fe_opts.http_options.as_ref().unwrap().addr
|
||||
);
|
||||
assert_eq!(
|
||||
Duration::from_secs(30),
|
||||
fe_opts.http_options.as_ref().unwrap().timeout
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
use frontend::prometheus::PrometheusOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::http::HttpOptions;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
use tokio::try_join;
|
||||
@@ -61,7 +62,7 @@ impl SubCommand {
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct StandaloneOptions {
|
||||
pub http_addr: Option<String>,
|
||||
pub http_options: Option<HttpOptions>,
|
||||
pub grpc_options: Option<GrpcOptions>,
|
||||
pub mysql_options: Option<MysqlOptions>,
|
||||
pub postgres_options: Option<PostgresOptions>,
|
||||
@@ -71,12 +72,13 @@ pub struct StandaloneOptions {
|
||||
pub mode: Mode,
|
||||
pub wal_dir: String,
|
||||
pub storage: ObjectStoreConfig,
|
||||
pub enable_memory_catalog: bool,
|
||||
}
|
||||
|
||||
impl Default for StandaloneOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
http_addr: Some("127.0.0.1:4000".to_string()),
|
||||
http_options: Some(HttpOptions::default()),
|
||||
grpc_options: Some(GrpcOptions::default()),
|
||||
mysql_options: Some(MysqlOptions::default()),
|
||||
postgres_options: Some(PostgresOptions::default()),
|
||||
@@ -86,6 +88,7 @@ impl Default for StandaloneOptions {
|
||||
mode: Mode::Standalone,
|
||||
wal_dir: "/tmp/greptimedb/wal".to_string(),
|
||||
storage: ObjectStoreConfig::default(),
|
||||
enable_memory_catalog: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -93,7 +96,7 @@ impl Default for StandaloneOptions {
|
||||
impl StandaloneOptions {
|
||||
fn frontend_options(self) -> FrontendOptions {
|
||||
FrontendOptions {
|
||||
http_addr: self.http_addr,
|
||||
http_options: self.http_options,
|
||||
grpc_options: self.grpc_options,
|
||||
mysql_options: self.mysql_options,
|
||||
postgres_options: self.postgres_options,
|
||||
@@ -110,6 +113,7 @@ impl StandaloneOptions {
|
||||
DatanodeOptions {
|
||||
wal_dir: self.wal_dir,
|
||||
storage: self.storage,
|
||||
enable_memory_catalog: self.enable_memory_catalog,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -131,18 +135,22 @@ struct StartCommand {
|
||||
influxdb_enable: bool,
|
||||
#[clap(short, long)]
|
||||
config_file: Option<String>,
|
||||
#[clap(short = 'm', long = "memory-catalog")]
|
||||
enable_memory_catalog: bool,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
let enable_memory_catalog = self.enable_memory_catalog;
|
||||
let config_file = self.config_file.clone();
|
||||
let fe_opts = FrontendOptions::try_from(self)?;
|
||||
let dn_opts: DatanodeOptions = {
|
||||
let opts: StandaloneOptions = if let Some(path) = config_file {
|
||||
let mut opts: StandaloneOptions = if let Some(path) = config_file {
|
||||
toml_loader::from_file!(&path)?
|
||||
} else {
|
||||
StandaloneOptions::default()
|
||||
};
|
||||
opts.enable_memory_catalog = enable_memory_catalog;
|
||||
opts.datanode_options()
|
||||
};
|
||||
|
||||
@@ -156,8 +164,15 @@ impl StartCommand {
|
||||
.context(StartDatanodeSnafu)?;
|
||||
let mut frontend = build_frontend(fe_opts, &dn_opts, datanode.get_instance()).await?;
|
||||
|
||||
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
|
||||
datanode
|
||||
.start_instance()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
info!("Datanode instance started");
|
||||
|
||||
try_join!(
|
||||
async { datanode.start().await.context(StartDatanodeSnafu) },
|
||||
async { datanode.start_services().await.context(StartDatanodeSnafu) },
|
||||
async { frontend.start().await.context(StartFrontendSnafu) }
|
||||
)?;
|
||||
|
||||
@@ -199,7 +214,10 @@ impl TryFrom<StartCommand> for FrontendOptions {
|
||||
opts.mode = Mode::Standalone;
|
||||
|
||||
if let Some(addr) = cmd.http_addr {
|
||||
opts.http_addr = Some(addr);
|
||||
opts.http_options = Some(HttpOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
if let Some(addr) = cmd.rpc_addr {
|
||||
// frontend grpc addr conflict with datanode default grpc addr
|
||||
@@ -249,6 +267,8 @@ impl TryFrom<StartCommand> for FrontendOptions {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
@@ -264,12 +284,20 @@ mod tests {
|
||||
std::env::current_dir().unwrap().as_path().to_str().unwrap()
|
||||
)),
|
||||
influxdb_enable: false,
|
||||
enable_memory_catalog: false,
|
||||
};
|
||||
|
||||
let fe_opts = FrontendOptions::try_from(cmd).unwrap();
|
||||
assert_eq!(Mode::Standalone, fe_opts.mode);
|
||||
assert_eq!("127.0.0.1:3001".to_string(), fe_opts.datanode_rpc_addr);
|
||||
assert_eq!(Some("127.0.0.1:4000".to_string()), fe_opts.http_addr);
|
||||
assert_eq!(
|
||||
"127.0.0.1:4000".to_string(),
|
||||
fe_opts.http_options.as_ref().unwrap().addr
|
||||
);
|
||||
assert_eq!(
|
||||
Duration::from_secs(30),
|
||||
fe_opts.http_options.as_ref().unwrap().timeout
|
||||
);
|
||||
assert_eq!(
|
||||
"127.0.0.1:4001".to_string(),
|
||||
fe_opts.grpc_options.unwrap().addr
|
||||
|
||||
@@ -261,6 +261,10 @@ macro_rules! define_catalog_value {
|
||||
.context(DeserializeCatalogEntryValueSnafu { raw: s.as_ref() })
|
||||
}
|
||||
|
||||
pub fn from_bytes(bytes: impl AsRef<[u8]>) -> Result<Self, Error> {
|
||||
Self::parse(&String::from_utf8_lossy(bytes.as_ref()))
|
||||
}
|
||||
|
||||
pub fn as_bytes(&self) -> Result<Vec<u8>, Error> {
|
||||
Ok(serde_json::to_string(self)
|
||||
.context(SerializeCatalogEntryValueSnafu)?
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[package]
|
||||
name = "common-insert"
|
||||
name = "common-grpc-expr"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
@@ -9,8 +9,10 @@ api = { path = "../../api" }
|
||||
async-trait = "0.1"
|
||||
common-base = { path = "../base" }
|
||||
common-error = { path = "../error" }
|
||||
common-grpc = { path = "../grpc" }
|
||||
common-telemetry = { path = "../telemetry" }
|
||||
common-time = { path = "../time" }
|
||||
common-catalog = { path = "../catalog" }
|
||||
common-query = { path = "../query" }
|
||||
datatypes = { path = "../../datatypes" }
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
234
src/common/grpc-expr/src/alter.rs
Normal file
234
src/common/grpc-expr/src/alter.rs
Normal file
@@ -0,0 +1,234 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::{AlterExpr, CreateExpr, DropColumns};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, CreateTableRequest};
|
||||
|
||||
use crate::error::{
|
||||
ColumnNotFoundSnafu, CreateSchemaSnafu, InvalidColumnDefSnafu, MissingFieldSnafu,
|
||||
MissingTimestampColumnSnafu, Result,
|
||||
};
|
||||
|
||||
/// Convert an [`AlterExpr`] to an optional [`AlterTableRequest`]
|
||||
pub fn alter_expr_to_request(expr: AlterExpr) -> Result<Option<AlterTableRequest>> {
|
||||
match expr.kind {
|
||||
Some(Kind::AddColumns(add_columns)) => {
|
||||
let add_column_requests = add_columns
|
||||
.add_columns
|
||||
.into_iter()
|
||||
.map(|ac| {
|
||||
let column_def = ac.column_def.context(MissingFieldSnafu {
|
||||
field: "column_def",
|
||||
})?;
|
||||
|
||||
let schema =
|
||||
column_def
|
||||
.try_as_column_schema()
|
||||
.context(InvalidColumnDefSnafu {
|
||||
column: &column_def.name,
|
||||
})?;
|
||||
Ok(AddColumnRequest {
|
||||
column_schema: schema,
|
||||
is_key: ac.is_key,
|
||||
})
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
let alter_kind = AlterKind::AddColumns {
|
||||
columns: add_column_requests,
|
||||
};
|
||||
|
||||
let request = AlterTableRequest {
|
||||
catalog_name: expr.catalog_name,
|
||||
schema_name: expr.schema_name,
|
||||
table_name: expr.table_name,
|
||||
alter_kind,
|
||||
};
|
||||
Ok(Some(request))
|
||||
}
|
||||
Some(Kind::DropColumns(DropColumns { drop_columns })) => {
|
||||
let alter_kind = AlterKind::DropColumns {
|
||||
names: drop_columns.into_iter().map(|c| c.name).collect(),
|
||||
};
|
||||
|
||||
let request = AlterTableRequest {
|
||||
catalog_name: expr.catalog_name,
|
||||
schema_name: expr.schema_name,
|
||||
table_name: expr.table_name,
|
||||
alter_kind,
|
||||
};
|
||||
Ok(Some(request))
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_table_schema(expr: &CreateExpr) -> Result<SchemaRef> {
|
||||
let column_schemas = expr
|
||||
.column_defs
|
||||
.iter()
|
||||
.map(|x| {
|
||||
x.try_as_column_schema()
|
||||
.context(InvalidColumnDefSnafu { column: &x.name })
|
||||
})
|
||||
.collect::<Result<Vec<ColumnSchema>>>()?;
|
||||
|
||||
ensure!(
|
||||
column_schemas
|
||||
.iter()
|
||||
.any(|column| column.name == expr.time_index),
|
||||
MissingTimestampColumnSnafu {
|
||||
msg: format!("CreateExpr: {:?}", expr)
|
||||
}
|
||||
);
|
||||
|
||||
let column_schemas = column_schemas
|
||||
.into_iter()
|
||||
.map(|column_schema| {
|
||||
if column_schema.name == expr.time_index {
|
||||
column_schema.with_time_index(true)
|
||||
} else {
|
||||
column_schema
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(Arc::new(
|
||||
SchemaBuilder::try_from(column_schemas)
|
||||
.context(CreateSchemaSnafu)?
|
||||
.build()
|
||||
.context(CreateSchemaSnafu)?,
|
||||
))
|
||||
}
|
||||
|
||||
pub fn create_expr_to_request(table_id: TableId, expr: CreateExpr) -> Result<CreateTableRequest> {
|
||||
let schema = create_table_schema(&expr)?;
|
||||
let primary_key_indices = expr
|
||||
.primary_keys
|
||||
.iter()
|
||||
.map(|key| {
|
||||
schema
|
||||
.column_index_by_name(key)
|
||||
.context(ColumnNotFoundSnafu {
|
||||
column_name: key,
|
||||
table_name: &expr.table_name,
|
||||
})
|
||||
})
|
||||
.collect::<Result<Vec<usize>>>()?;
|
||||
|
||||
let catalog_name = expr
|
||||
.catalog_name
|
||||
.unwrap_or_else(|| DEFAULT_CATALOG_NAME.to_string());
|
||||
let schema_name = expr
|
||||
.schema_name
|
||||
.unwrap_or_else(|| DEFAULT_SCHEMA_NAME.to_string());
|
||||
|
||||
let region_ids = if expr.region_ids.is_empty() {
|
||||
vec![0]
|
||||
} else {
|
||||
expr.region_ids
|
||||
};
|
||||
|
||||
Ok(CreateTableRequest {
|
||||
id: table_id,
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name: expr.table_name,
|
||||
desc: expr.desc,
|
||||
schema,
|
||||
region_numbers: region_ids,
|
||||
primary_key_indices,
|
||||
create_if_not_exists: expr.create_if_not_exists,
|
||||
table_options: expr.table_options,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use api::v1::{AddColumn, AddColumns, ColumnDataType, ColumnDef, DropColumn};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_alter_expr_to_request() {
|
||||
let expr = AlterExpr {
|
||||
catalog_name: None,
|
||||
schema_name: None,
|
||||
table_name: "monitor".to_string(),
|
||||
|
||||
kind: Some(Kind::AddColumns(AddColumns {
|
||||
add_columns: vec![AddColumn {
|
||||
column_def: Some(ColumnDef {
|
||||
name: "mem_usage".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
is_nullable: false,
|
||||
default_constraint: None,
|
||||
}),
|
||||
is_key: false,
|
||||
}],
|
||||
})),
|
||||
};
|
||||
|
||||
let alter_request = alter_expr_to_request(expr).unwrap().unwrap();
|
||||
assert_eq!(None, alter_request.catalog_name);
|
||||
assert_eq!(None, alter_request.schema_name);
|
||||
assert_eq!("monitor".to_string(), alter_request.table_name);
|
||||
let add_column = match alter_request.alter_kind {
|
||||
AlterKind::AddColumns { mut columns } => columns.pop().unwrap(),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
assert!(!add_column.is_key);
|
||||
assert_eq!("mem_usage", add_column.column_schema.name);
|
||||
assert_eq!(
|
||||
ConcreteDataType::float64_datatype(),
|
||||
add_column.column_schema.data_type
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_drop_column_expr() {
|
||||
let expr = AlterExpr {
|
||||
catalog_name: Some("test_catalog".to_string()),
|
||||
schema_name: Some("test_schema".to_string()),
|
||||
table_name: "monitor".to_string(),
|
||||
|
||||
kind: Some(Kind::DropColumns(DropColumns {
|
||||
drop_columns: vec![DropColumn {
|
||||
name: "mem_usage".to_string(),
|
||||
}],
|
||||
})),
|
||||
};
|
||||
|
||||
let alter_request = alter_expr_to_request(expr).unwrap().unwrap();
|
||||
assert_eq!(Some("test_catalog".to_string()), alter_request.catalog_name);
|
||||
assert_eq!(Some("test_schema".to_string()), alter_request.schema_name);
|
||||
assert_eq!("monitor".to_string(), alter_request.table_name);
|
||||
|
||||
let mut drop_names = match alter_request.alter_kind {
|
||||
AlterKind::DropColumns { names } => names,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
assert_eq!(1, drop_names.len());
|
||||
assert_eq!("mem_usage".to_string(), drop_names.pop().unwrap());
|
||||
}
|
||||
}
|
||||
@@ -22,7 +22,7 @@ use snafu::{Backtrace, ErrorCompat};
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Column {} not found in table {}", column_name, table_name))]
|
||||
#[snafu(display("Column `{}` not found in table `{}`", column_name, table_name))]
|
||||
ColumnNotFound {
|
||||
column_name: String,
|
||||
table_name: String,
|
||||
@@ -57,8 +57,8 @@ pub enum Error {
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing timestamp column in request"))]
|
||||
MissingTimestampColumn { backtrace: Backtrace },
|
||||
#[snafu(display("Missing timestamp column, msg: {}", msg))]
|
||||
MissingTimestampColumn { msg: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Invalid column proto: {}", err_msg))]
|
||||
InvalidColumnProto {
|
||||
@@ -70,6 +70,26 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing required field in protobuf, field: {}", field))]
|
||||
MissingField { field: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Invalid column default constraint, source: {}", source))]
|
||||
ColumnDefaultConstraint {
|
||||
#[snafu(backtrace)]
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Invalid column proto definition, column: {}, source: {}",
|
||||
column,
|
||||
source
|
||||
))]
|
||||
InvalidColumnDef {
|
||||
column: String,
|
||||
#[snafu(backtrace)]
|
||||
source: api::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -87,6 +107,9 @@ impl ErrorExt for Error {
|
||||
| Error::MissingTimestampColumn { .. } => StatusCode::InvalidArguments,
|
||||
Error::InvalidColumnProto { .. } => StatusCode::InvalidArguments,
|
||||
Error::CreateVector { .. } => StatusCode::InvalidArguments,
|
||||
Error::MissingField { .. } => StatusCode::InvalidArguments,
|
||||
Error::ColumnDefaultConstraint { source, .. } => source.status_code(),
|
||||
Error::InvalidColumnDef { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
fn backtrace_opt(&self) -> Option<&Backtrace> {
|
||||
@@ -14,11 +14,9 @@
|
||||
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::codec::InsertBatch;
|
||||
use api::v1::column::{SemanticType, Values};
|
||||
use api::v1::{AddColumn, AddColumns, Column, ColumnDataType, ColumnDef, CreateExpr};
|
||||
use common_base::BitVec;
|
||||
@@ -35,9 +33,8 @@ use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, InsertRequ
|
||||
use table::Table;
|
||||
|
||||
use crate::error::{
|
||||
ColumnDataTypeSnafu, ColumnNotFoundSnafu, CreateVectorSnafu, DecodeInsertSnafu,
|
||||
DuplicatedTimestampColumnSnafu, IllegalInsertDataSnafu, InvalidColumnProtoSnafu,
|
||||
MissingTimestampColumnSnafu, Result,
|
||||
ColumnDataTypeSnafu, ColumnNotFoundSnafu, CreateVectorSnafu, DuplicatedTimestampColumnSnafu,
|
||||
IllegalInsertDataSnafu, InvalidColumnProtoSnafu, MissingTimestampColumnSnafu, Result,
|
||||
};
|
||||
const TAG_SEMANTIC_TYPE: i32 = SemanticType::Tag as i32;
|
||||
const TIMESTAMP_SEMANTIC_TYPE: i32 = SemanticType::Timestamp as i32;
|
||||
@@ -52,35 +49,25 @@ fn build_column_def(column_name: &str, datatype: i32, nullable: bool) -> ColumnD
|
||||
}
|
||||
}
|
||||
|
||||
pub fn find_new_columns(
|
||||
schema: &SchemaRef,
|
||||
insert_batches: &[InsertBatch],
|
||||
) -> Result<Option<AddColumns>> {
|
||||
pub fn find_new_columns(schema: &SchemaRef, columns: &[Column]) -> Result<Option<AddColumns>> {
|
||||
let mut columns_to_add = Vec::default();
|
||||
let mut new_columns: HashSet<String> = HashSet::default();
|
||||
|
||||
for InsertBatch { columns, row_count } in insert_batches {
|
||||
if *row_count == 0 || columns.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
for Column {
|
||||
column_name,
|
||||
semantic_type,
|
||||
datatype,
|
||||
..
|
||||
} in columns
|
||||
for Column {
|
||||
column_name,
|
||||
semantic_type,
|
||||
datatype,
|
||||
..
|
||||
} in columns
|
||||
{
|
||||
if schema.column_schema_by_name(column_name).is_none() && !new_columns.contains(column_name)
|
||||
{
|
||||
if schema.column_schema_by_name(column_name).is_none()
|
||||
&& !new_columns.contains(column_name)
|
||||
{
|
||||
let column_def = Some(build_column_def(column_name, *datatype, true));
|
||||
columns_to_add.push(AddColumn {
|
||||
column_def,
|
||||
is_key: *semantic_type == TAG_SEMANTIC_TYPE,
|
||||
});
|
||||
new_columns.insert(column_name.to_string());
|
||||
}
|
||||
let column_def = Some(build_column_def(column_name, *datatype, true));
|
||||
columns_to_add.push(AddColumn {
|
||||
column_def,
|
||||
is_key: *semantic_type == TAG_SEMANTIC_TYPE,
|
||||
});
|
||||
new_columns.insert(column_name.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -201,89 +188,84 @@ pub fn build_create_expr_from_insertion(
|
||||
schema_name: &str,
|
||||
table_id: Option<TableId>,
|
||||
table_name: &str,
|
||||
insert_batches: &[InsertBatch],
|
||||
columns: &[Column],
|
||||
) -> Result<CreateExpr> {
|
||||
let mut new_columns: HashSet<String> = HashSet::default();
|
||||
let mut column_defs = Vec::default();
|
||||
let mut primary_key_indices = Vec::default();
|
||||
let mut timestamp_index = usize::MAX;
|
||||
|
||||
for InsertBatch { columns, row_count } in insert_batches {
|
||||
if *row_count == 0 || columns.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
for Column {
|
||||
column_name,
|
||||
semantic_type,
|
||||
datatype,
|
||||
..
|
||||
} in columns
|
||||
{
|
||||
if !new_columns.contains(column_name) {
|
||||
let mut is_nullable = true;
|
||||
match *semantic_type {
|
||||
TAG_SEMANTIC_TYPE => primary_key_indices.push(column_defs.len()),
|
||||
TIMESTAMP_SEMANTIC_TYPE => {
|
||||
ensure!(
|
||||
timestamp_index == usize::MAX,
|
||||
DuplicatedTimestampColumnSnafu {
|
||||
exists: &columns[timestamp_index].column_name,
|
||||
duplicated: column_name,
|
||||
}
|
||||
);
|
||||
timestamp_index = column_defs.len();
|
||||
// Timestamp column must not be null.
|
||||
is_nullable = false;
|
||||
}
|
||||
_ => {}
|
||||
for Column {
|
||||
column_name,
|
||||
semantic_type,
|
||||
datatype,
|
||||
..
|
||||
} in columns
|
||||
{
|
||||
if !new_columns.contains(column_name) {
|
||||
let mut is_nullable = true;
|
||||
match *semantic_type {
|
||||
TAG_SEMANTIC_TYPE => primary_key_indices.push(column_defs.len()),
|
||||
TIMESTAMP_SEMANTIC_TYPE => {
|
||||
ensure!(
|
||||
timestamp_index == usize::MAX,
|
||||
DuplicatedTimestampColumnSnafu {
|
||||
exists: &columns[timestamp_index].column_name,
|
||||
duplicated: column_name,
|
||||
}
|
||||
);
|
||||
timestamp_index = column_defs.len();
|
||||
// Timestamp column must not be null.
|
||||
is_nullable = false;
|
||||
}
|
||||
|
||||
let column_def = build_column_def(column_name, *datatype, is_nullable);
|
||||
column_defs.push(column_def);
|
||||
new_columns.insert(column_name.to_string());
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let column_def = build_column_def(column_name, *datatype, is_nullable);
|
||||
column_defs.push(column_def);
|
||||
new_columns.insert(column_name.to_string());
|
||||
}
|
||||
|
||||
ensure!(timestamp_index != usize::MAX, MissingTimestampColumnSnafu);
|
||||
let timestamp_field_name = columns[timestamp_index].column_name.clone();
|
||||
|
||||
let primary_keys = primary_key_indices
|
||||
.iter()
|
||||
.map(|idx| columns[*idx].column_name.clone())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let expr = CreateExpr {
|
||||
catalog_name: Some(catalog_name.to_string()),
|
||||
schema_name: Some(schema_name.to_string()),
|
||||
table_name: table_name.to_string(),
|
||||
desc: Some("Created on insertion".to_string()),
|
||||
column_defs,
|
||||
time_index: timestamp_field_name,
|
||||
primary_keys,
|
||||
create_if_not_exists: true,
|
||||
table_options: Default::default(),
|
||||
table_id,
|
||||
region_ids: vec![0], // TODO:(hl): region id should be allocated by frontend
|
||||
};
|
||||
|
||||
return Ok(expr);
|
||||
}
|
||||
|
||||
IllegalInsertDataSnafu.fail()
|
||||
ensure!(
|
||||
timestamp_index != usize::MAX,
|
||||
MissingTimestampColumnSnafu { msg: table_name }
|
||||
);
|
||||
let timestamp_field_name = columns[timestamp_index].column_name.clone();
|
||||
|
||||
let primary_keys = primary_key_indices
|
||||
.iter()
|
||||
.map(|idx| columns[*idx].column_name.clone())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let expr = CreateExpr {
|
||||
catalog_name: Some(catalog_name.to_string()),
|
||||
schema_name: Some(schema_name.to_string()),
|
||||
table_name: table_name.to_string(),
|
||||
desc: Some("Created on insertion".to_string()),
|
||||
column_defs,
|
||||
time_index: timestamp_field_name,
|
||||
primary_keys,
|
||||
create_if_not_exists: true,
|
||||
table_options: Default::default(),
|
||||
table_id,
|
||||
region_ids: vec![0], // TODO:(hl): region id should be allocated by frontend
|
||||
};
|
||||
|
||||
Ok(expr)
|
||||
}
|
||||
|
||||
pub fn insertion_expr_to_request(
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
insert_batches: Vec<InsertBatch>,
|
||||
insert_batches: Vec<(Vec<Column>, u32)>,
|
||||
table: Arc<dyn Table>,
|
||||
) -> Result<InsertRequest> {
|
||||
let schema = table.schema();
|
||||
let mut columns_builders = HashMap::with_capacity(schema.column_schemas().len());
|
||||
|
||||
for InsertBatch { columns, row_count } in insert_batches {
|
||||
for (columns, row_count) in insert_batches {
|
||||
for Column {
|
||||
column_name,
|
||||
values,
|
||||
@@ -329,14 +311,6 @@ pub fn insertion_expr_to_request(
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn insert_batches(bytes_vec: &[Vec<u8>]) -> Result<Vec<InsertBatch>> {
|
||||
bytes_vec
|
||||
.iter()
|
||||
.map(|bytes| bytes.deref().try_into().context(DecodeInsertSnafu))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn add_values_to_builder(
|
||||
builder: &mut VectorBuilder,
|
||||
values: Values,
|
||||
@@ -463,9 +437,8 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::codec::InsertBatch;
|
||||
use api::v1::column::{self, SemanticType, Values};
|
||||
use api::v1::{insert_expr, Column, ColumnDataType};
|
||||
use api::v1::{Column, ColumnDataType};
|
||||
use common_base::BitVec;
|
||||
use common_query::physical_plan::PhysicalPlanRef;
|
||||
use common_query::prelude::Expr;
|
||||
@@ -479,11 +452,12 @@ mod tests {
|
||||
use table::Table;
|
||||
|
||||
use super::{
|
||||
build_create_expr_from_insertion, convert_values, find_new_columns, insert_batches,
|
||||
insertion_expr_to_request, is_null, TAG_SEMANTIC_TYPE, TIMESTAMP_SEMANTIC_TYPE,
|
||||
build_create_expr_from_insertion, convert_values, insertion_expr_to_request, is_null,
|
||||
TAG_SEMANTIC_TYPE, TIMESTAMP_SEMANTIC_TYPE,
|
||||
};
|
||||
use crate::error;
|
||||
use crate::error::ColumnDataTypeSnafu;
|
||||
use crate::insert::find_new_columns;
|
||||
|
||||
#[inline]
|
||||
fn build_column_schema(
|
||||
@@ -508,11 +482,10 @@ mod tests {
|
||||
|
||||
assert!(build_create_expr_from_insertion("", "", table_id, table_name, &[]).is_err());
|
||||
|
||||
let mock_batch_bytes = mock_insert_batches();
|
||||
let insert_batches = insert_batches(&mock_batch_bytes).unwrap();
|
||||
let insert_batch = mock_insert_batch();
|
||||
|
||||
let create_expr =
|
||||
build_create_expr_from_insertion("", "", table_id, table_name, &insert_batches)
|
||||
build_create_expr_from_insertion("", "", table_id, table_name, &insert_batch.0)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(table_id, create_expr.table_id);
|
||||
@@ -598,9 +571,9 @@ mod tests {
|
||||
|
||||
assert!(find_new_columns(&schema, &[]).unwrap().is_none());
|
||||
|
||||
let mock_insert_bytes = mock_insert_batches();
|
||||
let insert_batches = insert_batches(&mock_insert_bytes).unwrap();
|
||||
let add_columns = find_new_columns(&schema, &insert_batches).unwrap().unwrap();
|
||||
let insert_batch = mock_insert_batch();
|
||||
|
||||
let add_columns = find_new_columns(&schema, &insert_batch.0).unwrap().unwrap();
|
||||
|
||||
assert_eq!(2, add_columns.add_columns.len());
|
||||
let host_column = &add_columns.add_columns[0];
|
||||
@@ -630,10 +603,7 @@ mod tests {
|
||||
fn test_insertion_expr_to_request() {
|
||||
let table: Arc<dyn Table> = Arc::new(DemoTable {});
|
||||
|
||||
let values = insert_expr::Values {
|
||||
values: mock_insert_batches(),
|
||||
};
|
||||
let insert_batches = insert_batches(&values.values).unwrap();
|
||||
let insert_batches = vec![mock_insert_batch()];
|
||||
let insert_req =
|
||||
insertion_expr_to_request("greptime", "public", "demo", insert_batches, table).unwrap();
|
||||
|
||||
@@ -731,7 +701,7 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn mock_insert_batches() -> Vec<Vec<u8>> {
|
||||
fn mock_insert_batch() -> (Vec<Column>, u32) {
|
||||
let row_count = 2;
|
||||
|
||||
let host_vals = column::Values {
|
||||
@@ -782,10 +752,9 @@ mod tests {
|
||||
datatype: ColumnDataType::Timestamp as i32,
|
||||
};
|
||||
|
||||
let insert_batch = InsertBatch {
|
||||
columns: vec![host_column, cpu_column, mem_column, ts_column],
|
||||
(
|
||||
vec![host_column, cpu_column, mem_column, ts_column],
|
||||
row_count,
|
||||
};
|
||||
vec![insert_batch.into()]
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,4 @@
|
||||
#![feature(assert_matches)]
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -12,9 +13,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod alter;
|
||||
pub mod error;
|
||||
mod insert;
|
||||
|
||||
pub use alter::{alter_expr_to_request, create_expr_to_request, create_table_schema};
|
||||
pub use insert::{
|
||||
build_alter_table_request, build_create_expr_from_insertion, column_to_vector,
|
||||
find_new_columns, insert_batches, insertion_expr_to_request,
|
||||
find_new_columns, insertion_expr_to_request,
|
||||
};
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::v1::codec::InsertBatch;
|
||||
use api::v1::column::{SemanticType, Values};
|
||||
use api::v1::{Column, ColumnDataType};
|
||||
use common_base::BitVec;
|
||||
@@ -24,12 +23,14 @@ use crate::error::{Result, TypeMismatchSnafu};
|
||||
|
||||
type ColumnName = String;
|
||||
|
||||
type RowCount = u32;
|
||||
|
||||
// TODO(fys): will remove in the future.
|
||||
#[derive(Default)]
|
||||
pub struct LinesWriter {
|
||||
column_name_index: HashMap<ColumnName, usize>,
|
||||
null_masks: Vec<BitVec>,
|
||||
batch: InsertBatch,
|
||||
batch: (Vec<Column>, RowCount),
|
||||
lines: usize,
|
||||
}
|
||||
|
||||
@@ -171,20 +172,20 @@ impl LinesWriter {
|
||||
|
||||
pub fn commit(&mut self) {
|
||||
let batch = &mut self.batch;
|
||||
batch.row_count += 1;
|
||||
batch.1 += 1;
|
||||
|
||||
for i in 0..batch.columns.len() {
|
||||
for i in 0..batch.0.len() {
|
||||
let null_mask = &mut self.null_masks[i];
|
||||
if batch.row_count as usize > null_mask.len() {
|
||||
if batch.1 as usize > null_mask.len() {
|
||||
null_mask.push(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn finish(mut self) -> InsertBatch {
|
||||
pub fn finish(mut self) -> (Vec<Column>, RowCount) {
|
||||
let null_masks = self.null_masks;
|
||||
for (i, null_mask) in null_masks.into_iter().enumerate() {
|
||||
let columns = &mut self.batch.columns;
|
||||
let columns = &mut self.batch.0;
|
||||
columns[i].null_mask = null_mask.into_vec();
|
||||
}
|
||||
self.batch
|
||||
@@ -204,9 +205,9 @@ impl LinesWriter {
|
||||
let batch = &mut self.batch;
|
||||
let to_insert = self.lines;
|
||||
let mut null_mask = BitVec::with_capacity(to_insert);
|
||||
null_mask.extend(BitVec::repeat(true, batch.row_count as usize));
|
||||
null_mask.extend(BitVec::repeat(true, batch.1 as usize));
|
||||
self.null_masks.push(null_mask);
|
||||
batch.columns.push(Column {
|
||||
batch.0.push(Column {
|
||||
column_name: column_name.to_string(),
|
||||
semantic_type: semantic_type.into(),
|
||||
values: Some(Values::with_capacity(datatype, to_insert)),
|
||||
@@ -217,7 +218,7 @@ impl LinesWriter {
|
||||
new_idx
|
||||
}
|
||||
};
|
||||
(column_idx, &mut self.batch.columns[column_idx])
|
||||
(column_idx, &mut self.batch.0[column_idx])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -282,9 +283,9 @@ mod tests {
|
||||
writer.commit();
|
||||
|
||||
let insert_batch = writer.finish();
|
||||
assert_eq!(3, insert_batch.row_count);
|
||||
assert_eq!(3, insert_batch.1);
|
||||
|
||||
let columns = insert_batch.columns;
|
||||
let columns = insert_batch.0;
|
||||
assert_eq!(9, columns.len());
|
||||
|
||||
let column = &columns[0];
|
||||
|
||||
@@ -13,6 +13,7 @@ api = { path = "../api" }
|
||||
async-trait = "0.1"
|
||||
axum = "0.6.0-rc.2"
|
||||
axum-macros = "0.3.0-rc.1"
|
||||
backon = "0.2"
|
||||
catalog = { path = "../catalog" }
|
||||
common-base = { path = "../common/base" }
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
@@ -23,7 +24,7 @@ common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-runtime = { path = "../common/runtime" }
|
||||
common-telemetry = { path = "../common/telemetry" }
|
||||
common-time = { path = "../common/time" }
|
||||
common-insert = { path = "../common/insert" }
|
||||
common-grpc-expr = { path = "../common/grpc-expr" }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = [
|
||||
"simd",
|
||||
] }
|
||||
|
||||
@@ -47,6 +47,7 @@ pub struct DatanodeOptions {
|
||||
pub meta_client_opts: Option<MetaClientOpts>,
|
||||
pub wal_dir: String,
|
||||
pub storage: ObjectStoreConfig,
|
||||
pub enable_memory_catalog: bool,
|
||||
pub mode: Mode,
|
||||
}
|
||||
|
||||
@@ -61,6 +62,7 @@ impl Default for DatanodeOptions {
|
||||
meta_client_opts: None,
|
||||
wal_dir: "/tmp/greptimedb/wal".to_string(),
|
||||
storage: ObjectStoreConfig::default(),
|
||||
enable_memory_catalog: false,
|
||||
mode: Mode::Standalone,
|
||||
}
|
||||
}
|
||||
@@ -86,9 +88,18 @@ impl Datanode {
|
||||
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
info!("Starting datanode instance...");
|
||||
self.instance.start().await?;
|
||||
self.services.start(&self.opts).await?;
|
||||
Ok(())
|
||||
self.start_instance().await?;
|
||||
self.start_services().await
|
||||
}
|
||||
|
||||
/// Start only the internal component of datanode.
|
||||
pub async fn start_instance(&mut self) -> Result<()> {
|
||||
self.instance.start().await
|
||||
}
|
||||
|
||||
/// Start services of datanode. This method call will block until services are shutdown.
|
||||
pub async fn start_services(&mut self) -> Result<()> {
|
||||
self.services.start(&self.opts).await
|
||||
}
|
||||
|
||||
pub fn get_instance(&self) -> InstanceRef {
|
||||
|
||||
@@ -73,6 +73,13 @@ pub enum Error {
|
||||
source: TableError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to drop table {}, source: {}", table_name, source))]
|
||||
DropTable {
|
||||
table_name: String,
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Table not found: {}", table_name))]
|
||||
TableNotFound { table_name: String },
|
||||
|
||||
@@ -82,9 +89,6 @@ pub enum Error {
|
||||
table_name: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing required field in protobuf, field: {}", field))]
|
||||
MissingField { field: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Missing timestamp column in request"))]
|
||||
MissingTimestampColumn { backtrace: Backtrace },
|
||||
|
||||
@@ -141,7 +145,7 @@ pub enum Error {
|
||||
#[snafu(display("Failed to init backend, dir: {}, source: {}", dir, source))]
|
||||
InitBackend {
|
||||
dir: String,
|
||||
source: std::io::Error,
|
||||
source: object_store::Error,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
@@ -202,21 +206,16 @@ pub enum Error {
|
||||
source: common_grpc::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Column datatype error, source: {}", source))]
|
||||
ColumnDataType {
|
||||
#[snafu(display("Failed to convert alter expr to request: {}", source))]
|
||||
AlterExprToRequest {
|
||||
#[snafu(backtrace)]
|
||||
source: api::error::Error,
|
||||
source: common_grpc_expr::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Invalid column proto definition, column: {}, source: {}",
|
||||
column,
|
||||
source
|
||||
))]
|
||||
InvalidColumnDef {
|
||||
column: String,
|
||||
#[snafu(display("Failed to convert create expr to request: {}", source))]
|
||||
CreateExprToRequest {
|
||||
#[snafu(backtrace)]
|
||||
source: api::error::Error,
|
||||
source: common_grpc_expr::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse SQL, source: {}", source))]
|
||||
@@ -263,7 +262,7 @@ pub enum Error {
|
||||
#[snafu(display("Failed to insert data, source: {}", source))]
|
||||
InsertData {
|
||||
#[snafu(backtrace)]
|
||||
source: common_insert::error::Error,
|
||||
source: common_grpc_expr::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Insert batch is empty"))]
|
||||
@@ -306,6 +305,7 @@ impl ErrorExt for Error {
|
||||
Error::CreateTable { source, .. }
|
||||
| Error::GetTable { source, .. }
|
||||
| Error::AlterTable { source, .. } => source.status_code(),
|
||||
Error::DropTable { source, .. } => source.status_code(),
|
||||
|
||||
Error::Insert { source, .. } => source.status_code(),
|
||||
|
||||
@@ -316,6 +316,8 @@ impl ErrorExt for Error {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::AlterExprToRequest { source, .. }
|
||||
| Error::CreateExprToRequest { source, .. } => source.status_code(),
|
||||
Error::CreateSchema { source, .. }
|
||||
| Error::ConvertSchema { source, .. }
|
||||
| Error::VectorComputation { source } => source.status_code(),
|
||||
@@ -324,7 +326,6 @@ impl ErrorExt for Error {
|
||||
| Error::InvalidSql { .. }
|
||||
| Error::KeyColumnNotFound { .. }
|
||||
| Error::InvalidPrimaryKey { .. }
|
||||
| Error::MissingField { .. }
|
||||
| Error::MissingTimestampColumn { .. }
|
||||
| Error::CatalogNotFound { .. }
|
||||
| Error::SchemaNotFound { .. }
|
||||
@@ -343,10 +344,6 @@ impl ErrorExt for Error {
|
||||
| Error::UnsupportedExpr { .. }
|
||||
| Error::Catalog { .. } => StatusCode::Internal,
|
||||
|
||||
Error::ColumnDataType { source } | Error::InvalidColumnDef { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::InitBackend { .. } => StatusCode::StorageUnavailable,
|
||||
Error::OpenLogStore { source } => source.status_code(),
|
||||
Error::StartScriptManager { source } => source.status_code(),
|
||||
|
||||
@@ -16,6 +16,7 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::{fs, path};
|
||||
|
||||
use backon::ExponentialBackoff;
|
||||
use catalog::remote::MetaKvBackend;
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
@@ -26,7 +27,7 @@ use meta_client::client::{MetaClient, MetaClientBuilder};
|
||||
use meta_client::MetaClientOpts;
|
||||
use mito::config::EngineConfig as TableEngineConfig;
|
||||
use mito::engine::MitoEngine;
|
||||
use object_store::layers::LoggingLayer;
|
||||
use object_store::layers::{LoggingLayer, MetricsLayer, RetryLayer, TracingLayer};
|
||||
use object_store::services::fs::Builder;
|
||||
use object_store::{util, ObjectStore};
|
||||
use query::query_engine::{QueryEngineFactory, QueryEngineRef};
|
||||
@@ -99,17 +100,29 @@ impl Instance {
|
||||
// create remote catalog manager
|
||||
let (catalog_manager, factory, table_id_provider) = match opts.mode {
|
||||
Mode::Standalone => {
|
||||
let catalog = Arc::new(
|
||||
catalog::local::LocalCatalogManager::try_new(table_engine.clone())
|
||||
.await
|
||||
.context(CatalogSnafu)?,
|
||||
);
|
||||
let factory = QueryEngineFactory::new(catalog.clone());
|
||||
(
|
||||
catalog.clone() as CatalogManagerRef,
|
||||
factory,
|
||||
Some(catalog as TableIdProviderRef),
|
||||
)
|
||||
if opts.enable_memory_catalog {
|
||||
let catalog = Arc::new(catalog::local::MemoryCatalogManager::default());
|
||||
let factory = QueryEngineFactory::new(catalog.clone());
|
||||
|
||||
(
|
||||
catalog.clone() as CatalogManagerRef,
|
||||
factory,
|
||||
Some(catalog as TableIdProviderRef),
|
||||
)
|
||||
} else {
|
||||
let catalog = Arc::new(
|
||||
catalog::local::LocalCatalogManager::try_new(table_engine.clone())
|
||||
.await
|
||||
.context(CatalogSnafu)?,
|
||||
);
|
||||
let factory = QueryEngineFactory::new(catalog.clone());
|
||||
|
||||
(
|
||||
catalog.clone() as CatalogManagerRef,
|
||||
factory,
|
||||
Some(catalog as TableIdProviderRef),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
Mode::Distributed => {
|
||||
@@ -139,7 +152,11 @@ impl Instance {
|
||||
};
|
||||
Ok(Self {
|
||||
query_engine: query_engine.clone(),
|
||||
sql_handler: SqlHandler::new(table_engine, catalog_manager.clone()),
|
||||
sql_handler: SqlHandler::new(
|
||||
table_engine,
|
||||
catalog_manager.clone(),
|
||||
query_engine.clone(),
|
||||
),
|
||||
catalog_manager,
|
||||
physical_planner: PhysicalPlanner::new(query_engine),
|
||||
script_executor,
|
||||
@@ -185,7 +202,15 @@ pub(crate) async fn new_object_store(store_config: &ObjectStoreConfig) -> Result
|
||||
.build()
|
||||
.context(error::InitBackendSnafu { dir: &data_dir })?;
|
||||
|
||||
let object_store = ObjectStore::new(accessor).layer(LoggingLayer); // Add logging
|
||||
let object_store = ObjectStore::new(accessor)
|
||||
// Add retry
|
||||
.layer(RetryLayer::new(ExponentialBackoff::default().with_jitter()))
|
||||
// Add metrics
|
||||
.layer(MetricsLayer)
|
||||
// Add logging
|
||||
.layer(LoggingLayer)
|
||||
// Add tracing
|
||||
.layer(TracingLayer);
|
||||
|
||||
Ok(object_store)
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
use api::result::{build_err_result, AdminResultBuilder, ObjectResultBuilder};
|
||||
use api::v1::{
|
||||
admin_expr, insert_expr, object_expr, select_expr, AdminExpr, AdminResult, CreateDatabaseExpr,
|
||||
admin_expr, object_expr, select_expr, AdminExpr, AdminResult, Column, CreateDatabaseExpr,
|
||||
ObjectExpr, ObjectResult, SelectExpr,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
@@ -22,7 +22,7 @@ use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_grpc::select::to_object_result;
|
||||
use common_insert::insertion_expr_to_request;
|
||||
use common_grpc_expr::insertion_expr_to_request;
|
||||
use common_query::Output;
|
||||
use query::plan::LogicalPlan;
|
||||
use servers::query_handler::{GrpcAdminHandler, GrpcQueryHandler};
|
||||
@@ -44,7 +44,7 @@ impl Instance {
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
values: insert_expr::Values,
|
||||
insert_batches: Vec<(Vec<Column>, u32)>,
|
||||
) -> Result<Output> {
|
||||
let schema_provider = self
|
||||
.catalog_manager
|
||||
@@ -55,11 +55,7 @@ impl Instance {
|
||||
.context(CatalogSnafu)?
|
||||
.context(SchemaNotFoundSnafu { name: schema_name })?;
|
||||
|
||||
let insert_batches =
|
||||
common_insert::insert_batches(&values.values).context(InsertDataSnafu)?;
|
||||
|
||||
ensure!(!insert_batches.is_empty(), EmptyInsertBatchSnafu);
|
||||
|
||||
let table = schema_provider
|
||||
.table(table_name)
|
||||
.context(CatalogSnafu)?
|
||||
@@ -87,10 +83,10 @@ impl Instance {
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
values: insert_expr::Values,
|
||||
insert_batches: Vec<(Vec<Column>, u32)>,
|
||||
) -> ObjectResult {
|
||||
match self
|
||||
.execute_grpc_insert(catalog_name, schema_name, table_name, values)
|
||||
.execute_grpc_insert(catalog_name, schema_name, table_name, insert_batches)
|
||||
.await
|
||||
{
|
||||
Ok(Output::AffectedRows(rows)) => ObjectResultBuilder::new()
|
||||
@@ -170,25 +166,13 @@ impl GrpcQueryHandler for Instance {
|
||||
let catalog_name = DEFAULT_CATALOG_NAME;
|
||||
let schema_name = &insert_expr.schema_name;
|
||||
let table_name = &insert_expr.table_name;
|
||||
let expr = insert_expr
|
||||
.expr
|
||||
.context(servers::error::InvalidQuerySnafu {
|
||||
reason: "missing `expr` in `InsertExpr`",
|
||||
})?;
|
||||
|
||||
// TODO(fys): _region_number is for later use.
|
||||
let _region_number: u32 = insert_expr.region_number;
|
||||
|
||||
match expr {
|
||||
insert_expr::Expr::Values(values) => {
|
||||
self.handle_insert(catalog_name, schema_name, table_name, values)
|
||||
.await
|
||||
}
|
||||
insert_expr::Expr::Sql(sql) => {
|
||||
let output = self.execute_sql(&sql).await;
|
||||
to_object_result(output).await
|
||||
}
|
||||
}
|
||||
let insert_batches = vec![(insert_expr.columns, insert_expr.row_count)];
|
||||
self.handle_insert(catalog_name, schema_name, table_name, insert_batches)
|
||||
.await
|
||||
}
|
||||
Some(object_expr::Expr::Select(select_expr)) => self.handle_select(select_expr).await,
|
||||
other => {
|
||||
@@ -211,6 +195,9 @@ impl GrpcAdminHandler for Instance {
|
||||
Some(admin_expr::Expr::CreateDatabase(create_database_expr)) => {
|
||||
self.execute_create_database(create_database_expr).await
|
||||
}
|
||||
Some(admin_expr::Expr::DropTable(drop_table_expr)) => {
|
||||
self.handle_drop_table(drop_table_expr).await
|
||||
}
|
||||
other => {
|
||||
return servers::error::NotSupportedSnafu {
|
||||
feat: format!("{:?}", other),
|
||||
|
||||
@@ -107,6 +107,10 @@ impl Instance {
|
||||
let req = self.sql_handler.alter_to_request(alter_table)?;
|
||||
self.sql_handler.execute(SqlRequest::Alter(req)).await
|
||||
}
|
||||
Statement::DropTable(drop_table) => {
|
||||
let req = self.sql_handler.drop_table_to_request(drop_table);
|
||||
self.sql_handler.execute(SqlRequest::DropTable(req)).await
|
||||
}
|
||||
Statement::ShowDatabases(stmt) => {
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::ShowDatabases(stmt))
|
||||
@@ -115,6 +119,11 @@ impl Instance {
|
||||
Statement::ShowTables(stmt) => {
|
||||
self.sql_handler.execute(SqlRequest::ShowTables(stmt)).await
|
||||
}
|
||||
Statement::Explain(stmt) => {
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::Explain(Box::new(stmt)))
|
||||
.await
|
||||
}
|
||||
Statement::DescribeTable(stmt) => {
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::DescribeTable(stmt))
|
||||
|
||||
@@ -58,7 +58,11 @@ impl Instance {
|
||||
let factory = QueryEngineFactory::new(catalog_manager.clone());
|
||||
let query_engine = factory.query_engine();
|
||||
|
||||
let sql_handler = SqlHandler::new(mock_engine.clone(), catalog_manager.clone());
|
||||
let sql_handler = SqlHandler::new(
|
||||
mock_engine.clone(),
|
||||
catalog_manager.clone(),
|
||||
query_engine.clone(),
|
||||
);
|
||||
let physical_planner = PhysicalPlanner::new(query_engine.clone());
|
||||
let script_executor = ScriptExecutor::new(catalog_manager.clone(), query_engine.clone())
|
||||
.await
|
||||
@@ -123,7 +127,11 @@ impl Instance {
|
||||
);
|
||||
Ok(Self {
|
||||
query_engine: query_engine.clone(),
|
||||
sql_handler: SqlHandler::new(table_engine, catalog_manager.clone()),
|
||||
sql_handler: SqlHandler::new(
|
||||
table_engine,
|
||||
catalog_manager.clone(),
|
||||
query_engine.clone(),
|
||||
),
|
||||
catalog_manager,
|
||||
physical_planner: PhysicalPlanner::new(query_engine),
|
||||
script_executor,
|
||||
|
||||
@@ -12,22 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::result::AdminResultBuilder;
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::{AdminResult, AlterExpr, CreateExpr, DropColumns};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use api::v1::{AdminResult, AlterExpr, CreateExpr, DropTableExpr};
|
||||
use common_error::prelude::{ErrorExt, StatusCode};
|
||||
use common_grpc_expr::{alter_expr_to_request, create_expr_to_request};
|
||||
use common_query::Output;
|
||||
use common_telemetry::{error, info};
|
||||
use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
|
||||
use futures::TryFutureExt;
|
||||
use snafu::prelude::*;
|
||||
use table::metadata::TableId;
|
||||
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, CreateTableRequest};
|
||||
use table::requests::DropTableRequest;
|
||||
|
||||
use crate::error::{self, BumpTableIdSnafu, MissingFieldSnafu, Result};
|
||||
use crate::error::{AlterExprToRequestSnafu, BumpTableIdSnafu, CreateExprToRequestSnafu};
|
||||
use crate::instance::Instance;
|
||||
use crate::sql::SqlRequest;
|
||||
|
||||
@@ -75,7 +70,7 @@ impl Instance {
|
||||
}
|
||||
};
|
||||
|
||||
let request = create_expr_to_request(table_id, expr).await;
|
||||
let request = create_expr_to_request(table_id, expr).context(CreateExprToRequestSnafu);
|
||||
let result = futures::future::ready(request)
|
||||
.and_then(|request| self.sql_handler().execute(SqlRequest::CreateTable(request)))
|
||||
.await;
|
||||
@@ -94,14 +89,17 @@ impl Instance {
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_alter(&self, expr: AlterExpr) -> AdminResult {
|
||||
let request = match alter_expr_to_request(expr).transpose() {
|
||||
Some(req) => req,
|
||||
let request = match alter_expr_to_request(expr)
|
||||
.context(AlterExprToRequestSnafu)
|
||||
.transpose()
|
||||
{
|
||||
None => {
|
||||
return AdminResultBuilder::default()
|
||||
.status_code(StatusCode::Success as u32)
|
||||
.mutate_result(0, 0)
|
||||
.build()
|
||||
}
|
||||
Some(req) => req,
|
||||
};
|
||||
|
||||
let result = futures::future::ready(request)
|
||||
@@ -119,156 +117,47 @@ impl Instance {
|
||||
.build(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_expr_to_request(table_id: TableId, expr: CreateExpr) -> Result<CreateTableRequest> {
|
||||
let schema = create_table_schema(&expr)?;
|
||||
let primary_key_indices = expr
|
||||
.primary_keys
|
||||
.iter()
|
||||
.map(|key| {
|
||||
schema
|
||||
.column_index_by_name(key)
|
||||
.context(error::KeyColumnNotFoundSnafu { name: key })
|
||||
})
|
||||
.collect::<Result<Vec<usize>>>()?;
|
||||
|
||||
let catalog_name = expr
|
||||
.catalog_name
|
||||
.unwrap_or_else(|| DEFAULT_CATALOG_NAME.to_string());
|
||||
let schema_name = expr
|
||||
.schema_name
|
||||
.unwrap_or_else(|| DEFAULT_SCHEMA_NAME.to_string());
|
||||
|
||||
let region_ids = if expr.region_ids.is_empty() {
|
||||
vec![0]
|
||||
} else {
|
||||
expr.region_ids
|
||||
};
|
||||
|
||||
Ok(CreateTableRequest {
|
||||
id: table_id,
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name: expr.table_name,
|
||||
desc: expr.desc,
|
||||
schema,
|
||||
region_numbers: region_ids,
|
||||
primary_key_indices,
|
||||
create_if_not_exists: expr.create_if_not_exists,
|
||||
table_options: expr.table_options,
|
||||
})
|
||||
}
|
||||
|
||||
fn alter_expr_to_request(expr: AlterExpr) -> Result<Option<AlterTableRequest>> {
|
||||
match expr.kind {
|
||||
Some(Kind::AddColumns(add_columns)) => {
|
||||
let mut add_column_requests = vec![];
|
||||
for add_column_expr in add_columns.add_columns {
|
||||
let column_def = add_column_expr.column_def.context(MissingFieldSnafu {
|
||||
field: "column_def",
|
||||
})?;
|
||||
|
||||
let schema =
|
||||
column_def
|
||||
.try_as_column_schema()
|
||||
.context(error::InvalidColumnDefSnafu {
|
||||
column: &column_def.name,
|
||||
})?;
|
||||
add_column_requests.push(AddColumnRequest {
|
||||
column_schema: schema,
|
||||
is_key: add_column_expr.is_key,
|
||||
})
|
||||
}
|
||||
|
||||
let alter_kind = AlterKind::AddColumns {
|
||||
columns: add_column_requests,
|
||||
};
|
||||
|
||||
let request = AlterTableRequest {
|
||||
catalog_name: expr.catalog_name,
|
||||
schema_name: expr.schema_name,
|
||||
table_name: expr.table_name,
|
||||
alter_kind,
|
||||
};
|
||||
Ok(Some(request))
|
||||
pub(crate) async fn handle_drop_table(&self, expr: DropTableExpr) -> AdminResult {
|
||||
let req = DropTableRequest {
|
||||
catalog_name: expr.catalog_name,
|
||||
schema_name: expr.schema_name,
|
||||
table_name: expr.table_name,
|
||||
};
|
||||
let result = self.sql_handler().execute(SqlRequest::DropTable(req)).await;
|
||||
match result {
|
||||
Ok(Output::AffectedRows(rows)) => AdminResultBuilder::default()
|
||||
.status_code(StatusCode::Success as u32)
|
||||
.mutate_result(rows as _, 0)
|
||||
.build(),
|
||||
Ok(Output::Stream(_)) | Ok(Output::RecordBatches(_)) => unreachable!(),
|
||||
Err(err) => AdminResultBuilder::default()
|
||||
.status_code(err.status_code() as u32)
|
||||
.err_msg(err.to_string())
|
||||
.build(),
|
||||
}
|
||||
Some(Kind::DropColumns(DropColumns { drop_columns })) => {
|
||||
let alter_kind = AlterKind::DropColumns {
|
||||
names: drop_columns.into_iter().map(|c| c.name).collect(),
|
||||
};
|
||||
|
||||
let request = AlterTableRequest {
|
||||
catalog_name: expr.catalog_name,
|
||||
schema_name: expr.schema_name,
|
||||
table_name: expr.table_name,
|
||||
alter_kind,
|
||||
};
|
||||
Ok(Some(request))
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn create_table_schema(expr: &CreateExpr) -> Result<SchemaRef> {
|
||||
let column_schemas = expr
|
||||
.column_defs
|
||||
.iter()
|
||||
.map(|x| {
|
||||
x.try_as_column_schema()
|
||||
.context(error::InvalidColumnDefSnafu { column: &x.name })
|
||||
})
|
||||
.collect::<Result<Vec<ColumnSchema>>>()?;
|
||||
|
||||
ensure!(
|
||||
column_schemas
|
||||
.iter()
|
||||
.any(|column| column.name == expr.time_index),
|
||||
error::KeyColumnNotFoundSnafu {
|
||||
name: &expr.time_index,
|
||||
}
|
||||
);
|
||||
|
||||
let column_schemas = column_schemas
|
||||
.into_iter()
|
||||
.map(|column_schema| {
|
||||
if column_schema.name == expr.time_index {
|
||||
column_schema.with_time_index(true)
|
||||
} else {
|
||||
column_schema
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(Arc::new(
|
||||
SchemaBuilder::try_from(column_schemas)
|
||||
.context(error::CreateSchemaSnafu)?
|
||||
.build()
|
||||
.context(error::CreateSchemaSnafu)?,
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use api::v1::ColumnDef;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::{ColumnDataType, ColumnDef};
|
||||
use common_catalog::consts::MIN_USER_TABLE_ID;
|
||||
use common_grpc_expr::create_table_schema;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::ColumnDefaultConstraint;
|
||||
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, SchemaBuilder, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
|
||||
use super::*;
|
||||
use crate::tests::test_util;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_create_expr_to_request() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (opts, _guard) = test_util::create_tmp_dir_and_datanode_opts("create_expr_to_request");
|
||||
let instance = Instance::with_mock_meta_client(&opts).await.unwrap();
|
||||
instance.start().await.unwrap();
|
||||
|
||||
let expr = testing_create_expr();
|
||||
let request = create_expr_to_request(1024, expr).await.unwrap();
|
||||
assert_eq!(request.id, common_catalog::consts::MIN_USER_TABLE_ID);
|
||||
let request = create_expr_to_request(1024, expr).unwrap();
|
||||
assert_eq!(request.id, MIN_USER_TABLE_ID);
|
||||
assert_eq!(request.catalog_name, "greptime".to_string());
|
||||
assert_eq!(request.schema_name, "public".to_string());
|
||||
assert_eq!(request.table_name, "my-metrics");
|
||||
@@ -279,12 +168,13 @@ mod tests {
|
||||
|
||||
let mut expr = testing_create_expr();
|
||||
expr.primary_keys = vec!["host".to_string(), "not-exist-column".to_string()];
|
||||
let result = create_expr_to_request(1025, expr).await;
|
||||
assert!(result.is_err());
|
||||
assert!(result
|
||||
.unwrap_err()
|
||||
.to_string()
|
||||
.contains("Specified timestamp key or primary key column not found: not-exist-column"));
|
||||
let result = create_expr_to_request(1025, expr);
|
||||
let err_msg = result.unwrap_err().to_string();
|
||||
assert!(
|
||||
err_msg.contains("Column `not-exist-column` not found in table `my-metrics`"),
|
||||
"{}",
|
||||
err_msg
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -295,14 +185,16 @@ mod tests {
|
||||
|
||||
expr.time_index = "not-exist-column".to_string();
|
||||
let result = create_table_schema(&expr);
|
||||
assert!(result.is_err());
|
||||
assert!(result
|
||||
.unwrap_err()
|
||||
.to_string()
|
||||
.contains("Specified timestamp key or primary key column not found: not-exist-column"));
|
||||
let err_msg = result.unwrap_err().to_string();
|
||||
assert!(
|
||||
err_msg.contains("Missing timestamp column"),
|
||||
"actual: {}",
|
||||
err_msg
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
fn test_create_column_schema() {
|
||||
let column_def = ColumnDef {
|
||||
name: "a".to_string(),
|
||||
@@ -318,7 +210,7 @@ mod tests {
|
||||
|
||||
let column_def = ColumnDef {
|
||||
name: "a".to_string(),
|
||||
datatype: 12, // string
|
||||
datatype: ColumnDataType::String as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: None,
|
||||
};
|
||||
@@ -330,7 +222,7 @@ mod tests {
|
||||
let default_constraint = ColumnDefaultConstraint::Value(Value::from("default value"));
|
||||
let column_def = ColumnDef {
|
||||
name: "a".to_string(),
|
||||
datatype: 12, // string
|
||||
datatype: ColumnDataType::String as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: Some(default_constraint.clone().try_into().unwrap()),
|
||||
};
|
||||
@@ -348,25 +240,25 @@ mod tests {
|
||||
let column_defs = vec![
|
||||
ColumnDef {
|
||||
name: "host".to_string(),
|
||||
datatype: 12, // string
|
||||
datatype: ColumnDataType::String as i32,
|
||||
is_nullable: false,
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "ts".to_string(),
|
||||
datatype: 15, // timestamp
|
||||
datatype: ColumnDataType::Timestamp as i32,
|
||||
is_nullable: false,
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "cpu".to_string(),
|
||||
datatype: 9, // float32
|
||||
datatype: ColumnDataType::Float32 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "memory".to_string(),
|
||||
datatype: 10, // float64
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: None,
|
||||
},
|
||||
|
||||
@@ -16,18 +16,22 @@
|
||||
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_query::Output;
|
||||
use query::sql::{describe_table, show_databases, show_tables};
|
||||
use common_telemetry::error;
|
||||
use query::query_engine::QueryEngineRef;
|
||||
use query::sql::{describe_table, explain, show_databases, show_tables};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use sql::statements::describe::DescribeTable;
|
||||
use sql::statements::explain::Explain;
|
||||
use sql::statements::show::{ShowDatabases, ShowTables};
|
||||
use table::engine::{EngineContext, TableEngineRef, TableReference};
|
||||
use table::requests::*;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{self, GetTableSnafu, Result, TableNotFoundSnafu};
|
||||
use crate::error::{ExecuteSqlSnafu, GetTableSnafu, Result, TableNotFoundSnafu};
|
||||
|
||||
mod alter;
|
||||
mod create;
|
||||
mod drop_table;
|
||||
mod insert;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -36,41 +40,57 @@ pub enum SqlRequest {
|
||||
CreateTable(CreateTableRequest),
|
||||
CreateDatabase(CreateDatabaseRequest),
|
||||
Alter(AlterTableRequest),
|
||||
DropTable(DropTableRequest),
|
||||
ShowDatabases(ShowDatabases),
|
||||
ShowTables(ShowTables),
|
||||
DescribeTable(DescribeTable),
|
||||
Explain(Box<Explain>),
|
||||
}
|
||||
|
||||
// Handler to execute SQL except query
|
||||
pub struct SqlHandler {
|
||||
table_engine: TableEngineRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
query_engine: QueryEngineRef,
|
||||
}
|
||||
|
||||
impl SqlHandler {
|
||||
pub fn new(table_engine: TableEngineRef, catalog_manager: CatalogManagerRef) -> Self {
|
||||
pub fn new(
|
||||
table_engine: TableEngineRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
query_engine: QueryEngineRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_engine,
|
||||
catalog_manager,
|
||||
query_engine,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn execute(&self, request: SqlRequest) -> Result<Output> {
|
||||
match request {
|
||||
let result = match request {
|
||||
SqlRequest::Insert(req) => self.insert(req).await,
|
||||
SqlRequest::CreateTable(req) => self.create_table(req).await,
|
||||
SqlRequest::CreateDatabase(req) => self.create_database(req).await,
|
||||
SqlRequest::Alter(req) => self.alter(req).await,
|
||||
SqlRequest::DropTable(req) => self.drop_table(req).await,
|
||||
SqlRequest::ShowDatabases(stmt) => {
|
||||
show_databases(stmt, self.catalog_manager.clone()).context(error::ExecuteSqlSnafu)
|
||||
show_databases(stmt, self.catalog_manager.clone()).context(ExecuteSqlSnafu)
|
||||
}
|
||||
SqlRequest::ShowTables(stmt) => {
|
||||
show_tables(stmt, self.catalog_manager.clone()).context(error::ExecuteSqlSnafu)
|
||||
show_tables(stmt, self.catalog_manager.clone()).context(ExecuteSqlSnafu)
|
||||
}
|
||||
SqlRequest::DescribeTable(stmt) => {
|
||||
describe_table(stmt, self.catalog_manager.clone()).context(error::ExecuteSqlSnafu)
|
||||
describe_table(stmt, self.catalog_manager.clone()).context(ExecuteSqlSnafu)
|
||||
}
|
||||
SqlRequest::Explain(stmt) => explain(stmt, self.query_engine.clone())
|
||||
.await
|
||||
.context(ExecuteSqlSnafu),
|
||||
};
|
||||
if let Err(e) = &result {
|
||||
error!("Datanode execution error: {:?}", e);
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
pub(crate) fn get_table<'a>(&self, table_ref: &'a TableReference) -> Result<TableRef> {
|
||||
@@ -216,7 +236,7 @@ mod tests {
|
||||
);
|
||||
let factory = QueryEngineFactory::new(catalog_list.clone());
|
||||
let query_engine = factory.query_engine();
|
||||
let sql_handler = SqlHandler::new(table_engine, catalog_list);
|
||||
let sql_handler = SqlHandler::new(table_engine, catalog_list, query_engine.clone());
|
||||
|
||||
let stmt = match query_engine.sql_to_statement(sql).unwrap() {
|
||||
Statement::Insert(i) => i,
|
||||
|
||||
@@ -84,7 +84,6 @@ impl SqlHandler {
|
||||
|
||||
// determine catalog and schema from the very beginning
|
||||
let table_name = req.table_name.clone();
|
||||
let table_id = req.id;
|
||||
let table = self
|
||||
.table_engine
|
||||
.create_table(&ctx, req)
|
||||
@@ -97,7 +96,7 @@ impl SqlHandler {
|
||||
catalog: table.table_info().catalog_name.clone(),
|
||||
schema: table.table_info().schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
table_id,
|
||||
table_id: table.table_info().ident.table_id,
|
||||
table,
|
||||
};
|
||||
|
||||
@@ -172,7 +171,7 @@ impl SqlHandler {
|
||||
return ConstraintNotSupportedSnafu {
|
||||
constraint: format!("{:?}", c),
|
||||
}
|
||||
.fail()
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
71
src/datanode/src/sql/drop_table.rs
Normal file
71
src/datanode/src/sql/drop_table.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use catalog::DeregisterTableRequest;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_query::Output;
|
||||
use common_telemetry::info;
|
||||
use snafu::ResultExt;
|
||||
use sql::statements::drop::DropTable;
|
||||
use table::engine::{EngineContext, TableReference};
|
||||
use table::requests::DropTableRequest;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::sql::SqlHandler;
|
||||
|
||||
impl SqlHandler {
|
||||
pub async fn drop_table(&self, req: DropTableRequest) -> Result<Output> {
|
||||
let deregister_table_req = DeregisterTableRequest {
|
||||
catalog: req.catalog_name.clone(),
|
||||
schema: req.schema_name.clone(),
|
||||
table_name: req.table_name.clone(),
|
||||
};
|
||||
|
||||
let table_reference = TableReference {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
table: &req.table_name,
|
||||
};
|
||||
let table_full_name = table_reference.to_string();
|
||||
|
||||
self.catalog_manager
|
||||
.deregister_table(deregister_table_req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::DropTableSnafu {
|
||||
table_name: table_full_name.clone(),
|
||||
})?;
|
||||
|
||||
let ctx = EngineContext {};
|
||||
self.table_engine()
|
||||
.drop_table(&ctx, req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::DropTableSnafu {
|
||||
table_name: table_full_name.clone(),
|
||||
})?;
|
||||
|
||||
info!("Successfully dropped table: {}", table_full_name);
|
||||
|
||||
Ok(Output::AffectedRows(1))
|
||||
}
|
||||
|
||||
pub fn drop_table_to_request(&self, drop_table: DropTable) -> DropTableRequest {
|
||||
DropTableRequest {
|
||||
catalog_name: drop_table.catalog_name,
|
||||
schema_name: drop_table.schema_name,
|
||||
table_name: drop_table.table_name,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -13,17 +13,15 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::codec::InsertBatch;
|
||||
use api::v1::column::SemanticType;
|
||||
use api::v1::{
|
||||
admin_result, column, insert_expr, AddColumn, AddColumns, AlterExpr, Column, ColumnDataType,
|
||||
ColumnDef, CreateExpr, InsertExpr, MutateResult,
|
||||
admin_result, column, AddColumn, AddColumns, AlterExpr, Column, ColumnDataType, ColumnDef,
|
||||
CreateExpr, InsertExpr, MutateResult,
|
||||
};
|
||||
use client::admin::Admin;
|
||||
use client::{Client, Database, ObjectResult};
|
||||
@@ -230,7 +228,10 @@ async fn insert_and_assert(db: &Database) {
|
||||
// testing data:
|
||||
let (expected_host_col, expected_cpu_col, expected_mem_col, expected_ts_col) = expect_data();
|
||||
|
||||
let values = vec![InsertBatch {
|
||||
let expr = InsertExpr {
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "demo".to_string(),
|
||||
region_number: 0,
|
||||
columns: vec![
|
||||
expected_host_col.clone(),
|
||||
expected_cpu_col.clone(),
|
||||
@@ -238,14 +239,6 @@ async fn insert_and_assert(db: &Database) {
|
||||
expected_ts_col.clone(),
|
||||
],
|
||||
row_count: 4,
|
||||
}
|
||||
.into()];
|
||||
let expr = InsertExpr {
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "demo".to_string(),
|
||||
expr: Some(insert_expr::Expr::Values(insert_expr::Values { values })),
|
||||
options: HashMap::default(),
|
||||
region_number: 0,
|
||||
};
|
||||
let result = db.insert(expr).await;
|
||||
result.unwrap();
|
||||
|
||||
@@ -21,7 +21,7 @@ use datatypes::prelude::ConcreteDataType;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use serde_json::json;
|
||||
use servers::http::{ColumnSchema, HttpServer, JsonOutput, JsonResponse, Schema};
|
||||
use servers::http::{ColumnSchema, HttpOptions, HttpServer, JsonOutput, JsonResponse, Schema};
|
||||
use test_util::TestGuard;
|
||||
|
||||
use crate::instance::{Instance, InstanceRef};
|
||||
@@ -46,7 +46,7 @@ async fn make_test_app(name: &str) -> (Router, TestGuard) {
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let http_server = HttpServer::new(instance);
|
||||
let http_server = HttpServer::new(instance, HttpOptions::default());
|
||||
(http_server.make_app(), guard)
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@ async fn make_test_app_with_frontend(name: &str) -> (Router, TestGuard) {
|
||||
.await
|
||||
.unwrap();
|
||||
frontend.start().await.unwrap();
|
||||
let mut http_server = HttpServer::new(Arc::new(frontend));
|
||||
let mut http_server = HttpServer::new(Arc::new(frontend), HttpOptions::default());
|
||||
http_server.set_script_handler(instance.clone());
|
||||
let app = http_server.make_app();
|
||||
(app, guard)
|
||||
|
||||
@@ -21,6 +21,7 @@ use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, SchemaBuilder};
|
||||
use mito::config::EngineConfig;
|
||||
use mito::table::test_util::{new_test_object_store, MockEngine, MockMitoEngine};
|
||||
use query::QueryEngineFactory;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
@@ -121,5 +122,9 @@ pub async fn create_mock_sql_handler() -> SqlHandler {
|
||||
.await
|
||||
.unwrap(),
|
||||
);
|
||||
SqlHandler::new(mock_engine, catalog_manager)
|
||||
|
||||
let catalog_list = catalog::local::new_memory_catalog_list().unwrap();
|
||||
let factory = QueryEngineFactory::new(catalog_list);
|
||||
|
||||
SqlHandler::new(mock_engine, catalog_manager, factory.query_engine())
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ common-catalog = { path = "../common/catalog" }
|
||||
common-runtime = { path = "../common/runtime" }
|
||||
common-telemetry = { path = "../common/telemetry" }
|
||||
common-time = { path = "../common/time" }
|
||||
common-insert = { path = "../common/insert" }
|
||||
common-grpc-expr = { path = "../common/grpc-expr" }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = [
|
||||
"simd",
|
||||
] }
|
||||
|
||||
@@ -19,8 +19,9 @@ use std::sync::Arc;
|
||||
use catalog::error::{self as catalog_err, InvalidCatalogValueSnafu};
|
||||
use catalog::remote::{Kv, KvBackendRef};
|
||||
use catalog::{
|
||||
CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef, RegisterSchemaRequest,
|
||||
RegisterSystemTableRequest, RegisterTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef, DeregisterTableRequest,
|
||||
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, SchemaProvider,
|
||||
SchemaProviderRef,
|
||||
};
|
||||
use common_catalog::{CatalogKey, SchemaKey, TableGlobalKey, TableGlobalValue};
|
||||
use futures::StreamExt;
|
||||
@@ -65,17 +66,21 @@ impl CatalogManager for FrontendCatalogManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn register_table(
|
||||
async fn register_table(&self, _request: RegisterTableRequest) -> catalog::error::Result<bool> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn deregister_table(
|
||||
&self,
|
||||
_request: RegisterTableRequest,
|
||||
) -> catalog::error::Result<usize> {
|
||||
_request: DeregisterTableRequest,
|
||||
) -> catalog::error::Result<bool> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn register_schema(
|
||||
&self,
|
||||
_request: RegisterSchemaRequest,
|
||||
) -> catalog::error::Result<usize> {
|
||||
) -> catalog::error::Result<bool> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
@@ -273,8 +278,7 @@ impl SchemaProvider for FrontendSchemaProvider {
|
||||
}
|
||||
Some(r) => r,
|
||||
};
|
||||
let val = TableGlobalValue::parse(String::from_utf8_lossy(&res.1))
|
||||
.context(InvalidCatalogValueSnafu)?;
|
||||
let val = TableGlobalValue::from_bytes(&res.1).context(InvalidCatalogValueSnafu)?;
|
||||
|
||||
let table = Arc::new(DistTable::new(
|
||||
table_name,
|
||||
|
||||
@@ -250,6 +250,12 @@ pub enum Error {
|
||||
source: client::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to drop table, source: {}", source))]
|
||||
DropTable {
|
||||
#[snafu(backtrace)]
|
||||
source: client::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to insert values to table, source: {}", source))]
|
||||
Insert {
|
||||
#[snafu(backtrace)]
|
||||
@@ -277,25 +283,25 @@ pub enum Error {
|
||||
#[snafu(display("Failed to build CreateExpr on insertion: {}", source))]
|
||||
BuildCreateExprOnInsertion {
|
||||
#[snafu(backtrace)]
|
||||
source: common_insert::error::Error,
|
||||
source: common_grpc_expr::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to find new columns on insertion: {}", source))]
|
||||
FindNewColumnsOnInsertion {
|
||||
#[snafu(backtrace)]
|
||||
source: common_insert::error::Error,
|
||||
source: common_grpc_expr::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to deserialize insert batching: {}", source))]
|
||||
DeserializeInsertBatch {
|
||||
#[snafu(backtrace)]
|
||||
source: common_insert::error::Error,
|
||||
source: common_grpc_expr::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to deserialize insert batching: {}", source))]
|
||||
InsertBatchToRequest {
|
||||
#[snafu(backtrace)]
|
||||
source: common_insert::error::Error,
|
||||
source: common_grpc_expr::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to find catalog by name: {}", catalog_name))]
|
||||
@@ -427,6 +433,18 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Missing meta_client_opts section in config"))]
|
||||
MissingMetasrvOpts { backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Failed to convert AlterExpr to AlterRequest, source: {}", source))]
|
||||
AlterExprToRequest {
|
||||
#[snafu(backtrace)]
|
||||
source: common_grpc_expr::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to find leaders when altering table, table: {}", table))]
|
||||
LeaderNotFound { table: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Table already exists: `{}`", table))]
|
||||
TableAlreadyExist { table: String, backtrace: Backtrace },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -497,23 +515,27 @@ impl ErrorExt for Error {
|
||||
Error::BumpTableId { source, .. } => source.status_code(),
|
||||
Error::SchemaNotFound { .. } => StatusCode::InvalidArguments,
|
||||
Error::CatalogNotFound { .. } => StatusCode::InvalidArguments,
|
||||
Error::CreateTable { source, .. } => source.status_code(),
|
||||
Error::AlterTable { source, .. } => source.status_code(),
|
||||
Error::Insert { source, .. } => source.status_code(),
|
||||
Error::CreateTable { source, .. }
|
||||
| Error::AlterTable { source, .. }
|
||||
| Error::DropTable { source }
|
||||
| Error::Select { source, .. }
|
||||
| Error::CreateDatabase { source, .. }
|
||||
| Error::CreateTableOnInsertion { source, .. }
|
||||
| Error::AlterTableOnInsertion { source, .. }
|
||||
| Error::Insert { source, .. } => source.status_code(),
|
||||
Error::BuildCreateExprOnInsertion { source, .. } => source.status_code(),
|
||||
Error::CreateTableOnInsertion { source, .. } => source.status_code(),
|
||||
Error::AlterTableOnInsertion { source, .. } => source.status_code(),
|
||||
Error::Select { source, .. } => source.status_code(),
|
||||
Error::FindNewColumnsOnInsertion { source, .. } => source.status_code(),
|
||||
Error::DeserializeInsertBatch { source, .. } => source.status_code(),
|
||||
Error::PrimaryKeyNotFound { .. } => StatusCode::InvalidArguments,
|
||||
Error::ExecuteSql { source, .. } => source.status_code(),
|
||||
Error::InsertBatchToRequest { source, .. } => source.status_code(),
|
||||
Error::CreateDatabase { source, .. } => source.status_code(),
|
||||
Error::CollectRecordbatchStream { source } | Error::CreateRecordbatches { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::MissingMetasrvOpts { .. } => StatusCode::InvalidArguments,
|
||||
Error::AlterExprToRequest { source, .. } => source.status_code(),
|
||||
Error::LeaderNotFound { .. } => StatusCode::StorageUnavailable,
|
||||
Error::TableAlreadyExist { .. } => StatusCode::TableAlreadyExists,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,8 +16,7 @@ use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::codec::InsertBatch;
|
||||
use api::v1::{ColumnDataType, CreateExpr};
|
||||
use api::v1::{Column, ColumnDataType, CreateExpr};
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use sql::statements::create::{CreateTable, TIME_INDEX};
|
||||
@@ -35,12 +34,12 @@ pub type CreateExprFactoryRef = Arc<dyn CreateExprFactory + Send + Sync>;
|
||||
pub trait CreateExprFactory {
|
||||
async fn create_expr_by_stmt(&self, stmt: &CreateTable) -> Result<CreateExpr>;
|
||||
|
||||
async fn create_expr_by_insert_batch(
|
||||
async fn create_expr_by_columns(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
batch: &[InsertBatch],
|
||||
columns: &[Column],
|
||||
) -> crate::error::Result<CreateExpr>;
|
||||
}
|
||||
|
||||
@@ -53,20 +52,20 @@ impl CreateExprFactory for DefaultCreateExprFactory {
|
||||
create_to_expr(None, vec![0], stmt)
|
||||
}
|
||||
|
||||
async fn create_expr_by_insert_batch(
|
||||
async fn create_expr_by_columns(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
batch: &[InsertBatch],
|
||||
columns: &[Column],
|
||||
) -> Result<CreateExpr> {
|
||||
let table_id = None;
|
||||
let create_expr = common_insert::build_create_expr_from_insertion(
|
||||
let create_expr = common_grpc_expr::build_create_expr_from_insertion(
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_id,
|
||||
table_name,
|
||||
batch,
|
||||
columns,
|
||||
)
|
||||
.context(BuildCreateExprOnInsertionSnafu)?;
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ use std::sync::Arc;
|
||||
|
||||
use meta_client::MetaClientOpts;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::http::HttpOptions;
|
||||
use servers::Mode;
|
||||
use snafu::prelude::*;
|
||||
|
||||
@@ -31,7 +32,7 @@ use crate::server::Services;
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct FrontendOptions {
|
||||
pub http_addr: Option<String>,
|
||||
pub http_options: Option<HttpOptions>,
|
||||
pub grpc_options: Option<GrpcOptions>,
|
||||
pub mysql_options: Option<MysqlOptions>,
|
||||
pub postgres_options: Option<PostgresOptions>,
|
||||
@@ -46,7 +47,7 @@ pub struct FrontendOptions {
|
||||
impl Default for FrontendOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
http_addr: Some("127.0.0.1:4000".to_string()),
|
||||
http_options: Some(HttpOptions::default()),
|
||||
grpc_options: Some(GrpcOptions::default()),
|
||||
mysql_options: Some(MysqlOptions::default()),
|
||||
postgres_options: Some(PostgresOptions::default()),
|
||||
|
||||
@@ -17,17 +17,16 @@ mod influxdb;
|
||||
mod opentsdb;
|
||||
mod prometheus;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::result::ObjectResultBuilder;
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::codec::InsertBatch;
|
||||
use api::v1::object_expr::Expr;
|
||||
use api::v1::{
|
||||
admin_expr, insert_expr, select_expr, AddColumns, AdminExpr, AdminResult, AlterExpr,
|
||||
CreateDatabaseExpr, CreateExpr, InsertExpr, ObjectExpr, ObjectResult as GrpcObjectResult,
|
||||
admin_expr, select_expr, AddColumns, AdminExpr, AdminResult, AlterExpr, Column,
|
||||
CreateDatabaseExpr, CreateExpr, DropTableExpr, InsertExpr, ObjectExpr,
|
||||
ObjectResult as GrpcObjectResult,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use catalog::remote::MetaKvBackend;
|
||||
@@ -52,6 +51,7 @@ use snafu::prelude::*;
|
||||
use sql::dialect::GenericDialect;
|
||||
use sql::parser::ParserContext;
|
||||
use sql::statements::create::Partitions;
|
||||
use sql::statements::explain::Explain;
|
||||
use sql::statements::insert::Insert;
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
@@ -59,13 +59,14 @@ use crate::catalog::FrontendCatalogManager;
|
||||
use crate::datanode::DatanodeClients;
|
||||
use crate::error::{
|
||||
self, AlterTableOnInsertionSnafu, AlterTableSnafu, CatalogNotFoundSnafu, CatalogSnafu,
|
||||
CreateDatabaseSnafu, CreateTableSnafu, DeserializeInsertBatchSnafu,
|
||||
FindNewColumnsOnInsertionSnafu, InsertSnafu, MissingMetasrvOptsSnafu, Result,
|
||||
SchemaNotFoundSnafu, SelectSnafu,
|
||||
CreateDatabaseSnafu, CreateTableSnafu, DropTableSnafu, FindNewColumnsOnInsertionSnafu,
|
||||
InsertSnafu, MissingMetasrvOptsSnafu, Result, SchemaNotFoundSnafu, SelectSnafu,
|
||||
UnsupportedExprSnafu,
|
||||
};
|
||||
use crate::expr_factory::{CreateExprFactoryRef, DefaultCreateExprFactory};
|
||||
use crate::frontend::FrontendOptions;
|
||||
use crate::sql::insert_to_request;
|
||||
use crate::table::insert::insert_request_to_insert_batch;
|
||||
use crate::table::route::TableRoutes;
|
||||
|
||||
#[async_trait]
|
||||
@@ -268,15 +269,47 @@ impl Instance {
|
||||
|
||||
/// Handle alter expr
|
||||
pub async fn handle_alter(&self, expr: AlterExpr) -> Result<Output> {
|
||||
self.admin(expr.schema_name.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME))
|
||||
.alter(expr)
|
||||
.await
|
||||
.and_then(admin_result_to_output)
|
||||
.context(AlterTableSnafu)
|
||||
match &self.dist_instance {
|
||||
Some(dist_instance) => dist_instance.handle_alter_table(expr).await,
|
||||
None => self
|
||||
.admin(expr.schema_name.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME))
|
||||
.alter(expr)
|
||||
.await
|
||||
.and_then(admin_result_to_output)
|
||||
.context(AlterTableSnafu),
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle drop table expr
|
||||
pub async fn handle_drop_table(&self, expr: DropTableExpr) -> Result<Output> {
|
||||
match self.mode {
|
||||
Mode::Standalone => self
|
||||
.admin(&expr.schema_name)
|
||||
.drop_table(expr)
|
||||
.await
|
||||
.and_then(admin_result_to_output)
|
||||
.context(DropTableSnafu),
|
||||
// TODO(ruihang): support drop table in distributed mode
|
||||
Mode::Distributed => UnsupportedExprSnafu {
|
||||
name: "Distributed DROP TABLE",
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle explain expr
|
||||
pub async fn handle_explain(&self, sql: &str, explain_stmt: Explain) -> Result<Output> {
|
||||
if let Some(dist_instance) = &self.dist_instance {
|
||||
dist_instance
|
||||
.handle_sql(sql, Statement::Explain(explain_stmt))
|
||||
.await
|
||||
} else {
|
||||
Ok(Output::AffectedRows(0))
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle batch inserts
|
||||
pub async fn handle_inserts(&self, insert_expr: &[InsertExpr]) -> Result<Output> {
|
||||
pub async fn handle_inserts(&self, insert_expr: Vec<InsertExpr>) -> Result<Output> {
|
||||
let mut success = 0;
|
||||
for expr in insert_expr {
|
||||
match self.handle_insert(expr).await? {
|
||||
@@ -288,68 +321,20 @@ impl Instance {
|
||||
}
|
||||
|
||||
/// Handle insert. for 'values' insertion, create/alter the destination table on demand.
|
||||
pub async fn handle_insert(&self, insert_expr: &InsertExpr) -> Result<Output> {
|
||||
pub async fn handle_insert(&self, mut insert_expr: InsertExpr) -> Result<Output> {
|
||||
let table_name = &insert_expr.table_name;
|
||||
let catalog_name = DEFAULT_CATALOG_NAME;
|
||||
let schema_name = &insert_expr.schema_name;
|
||||
|
||||
if let Some(expr) = &insert_expr.expr {
|
||||
match expr {
|
||||
api::v1::insert_expr::Expr::Values(values) => {
|
||||
// TODO(hl): gRPC should also support partitioning.
|
||||
let region_number = 0;
|
||||
self.handle_insert_values(
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
region_number,
|
||||
values,
|
||||
)
|
||||
.await
|
||||
}
|
||||
api::v1::insert_expr::Expr::Sql(_) => {
|
||||
// Frontend does not comprehend insert request that is raw SQL string
|
||||
self.database(schema_name)
|
||||
.insert(insert_expr.clone())
|
||||
.await
|
||||
.and_then(Output::try_from)
|
||||
.context(InsertSnafu)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// expr is empty
|
||||
Ok(Output::AffectedRows(0))
|
||||
}
|
||||
}
|
||||
let columns = &insert_expr.columns;
|
||||
|
||||
self.create_or_alter_table_on_demand(catalog_name, schema_name, table_name, columns)
|
||||
.await?;
|
||||
|
||||
insert_expr.region_number = 0;
|
||||
|
||||
/// Handle insert requests in frontend
|
||||
/// If insert is SQL string flavor, just forward to datanode
|
||||
/// If insert is parsed InsertExpr, frontend should comprehend the schema and create/alter table on demand.
|
||||
pub async fn handle_insert_values(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
region_number: u32,
|
||||
values: &insert_expr::Values,
|
||||
) -> Result<Output> {
|
||||
let insert_batches =
|
||||
common_insert::insert_batches(&values.values).context(DeserializeInsertBatchSnafu)?;
|
||||
self.create_or_alter_table_on_demand(
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
&insert_batches,
|
||||
)
|
||||
.await?;
|
||||
self.database(schema_name)
|
||||
.insert(InsertExpr {
|
||||
schema_name: schema_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
region_number,
|
||||
options: Default::default(),
|
||||
expr: Some(insert_expr::Expr::Values(values.clone())),
|
||||
})
|
||||
.insert(insert_expr)
|
||||
.await
|
||||
.and_then(Output::try_from)
|
||||
.context(InsertSnafu)
|
||||
@@ -363,7 +348,7 @@ impl Instance {
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
insert_batches: &[InsertBatch],
|
||||
columns: &[Column],
|
||||
) -> Result<()> {
|
||||
match self
|
||||
.catalog_manager
|
||||
@@ -385,13 +370,8 @@ impl Instance {
|
||||
"Table {}.{}.{} does not exist, try create table",
|
||||
catalog_name, schema_name, table_name,
|
||||
);
|
||||
self.create_table_by_insert_batches(
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
insert_batches,
|
||||
)
|
||||
.await?;
|
||||
self.create_table_by_columns(catalog_name, schema_name, table_name, columns)
|
||||
.await?;
|
||||
info!(
|
||||
"Successfully created table on insertion: {}.{}.{}",
|
||||
catalog_name, schema_name, table_name
|
||||
@@ -399,7 +379,8 @@ impl Instance {
|
||||
}
|
||||
Some(table) => {
|
||||
let schema = table.schema();
|
||||
if let Some(add_columns) = common_insert::find_new_columns(&schema, insert_batches)
|
||||
|
||||
if let Some(add_columns) = common_grpc_expr::find_new_columns(&schema, columns)
|
||||
.context(FindNewColumnsOnInsertionSnafu)?
|
||||
{
|
||||
info!(
|
||||
@@ -424,17 +405,17 @@ impl Instance {
|
||||
}
|
||||
|
||||
/// Infer create table expr from inserting data
|
||||
async fn create_table_by_insert_batches(
|
||||
async fn create_table_by_columns(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
insert_batches: &[InsertBatch],
|
||||
columns: &[Column],
|
||||
) -> Result<Output> {
|
||||
// Create table automatically, build schema from data.
|
||||
let create_expr = self
|
||||
.create_expr_factory
|
||||
.create_expr_by_insert_batch(catalog_name, schema_name, table_name, insert_batches)
|
||||
.create_expr_by_columns(catalog_name, schema_name, table_name, columns)
|
||||
.await?;
|
||||
|
||||
info!(
|
||||
@@ -495,9 +476,10 @@ impl Instance {
|
||||
|
||||
let insert_request = insert_to_request(&schema_provider, *insert)?;
|
||||
|
||||
let batch = crate::table::insert::insert_request_to_insert_batch(&insert_request)?;
|
||||
let (columns, _row_count) =
|
||||
crate::table::insert::insert_request_to_insert_batch(&insert_request)?;
|
||||
|
||||
self.create_or_alter_table_on_demand(&catalog, &schema, &table, &[batch])
|
||||
self.create_or_alter_table_on_demand(&catalog, &schema, &table, &columns)
|
||||
.await?;
|
||||
|
||||
let table = schema_provider
|
||||
@@ -510,6 +492,19 @@ impl Instance {
|
||||
.await
|
||||
.context(error::TableSnafu)
|
||||
}
|
||||
|
||||
fn stmt_to_insert_batch(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
insert: Box<Insert>,
|
||||
) -> Result<(Vec<Column>, u32)> {
|
||||
let catalog_provider = self.get_catalog(catalog)?;
|
||||
let schema_provider = Self::get_schema(catalog_provider, schema)?;
|
||||
|
||||
let insert_request = insert_to_request(&schema_provider, *insert)?;
|
||||
insert_request_to_insert_batch(&insert_request)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -563,7 +558,7 @@ impl SqlQueryHandler for Instance {
|
||||
.context(server_error::ExecuteQuerySnafu { query }),
|
||||
Statement::Insert(insert) => match self.mode {
|
||||
Mode::Standalone => {
|
||||
let (_, schema_name, table_name) = insert
|
||||
let (catalog_name, schema_name, table_name) = insert
|
||||
.full_table_name()
|
||||
.context(error::ParseSqlSnafu)
|
||||
.map_err(BoxedError::new)
|
||||
@@ -571,14 +566,19 @@ impl SqlQueryHandler for Instance {
|
||||
msg: "Failed to get table name",
|
||||
})?;
|
||||
|
||||
let (columns, row_count) = self
|
||||
.stmt_to_insert_batch(&catalog_name, &schema_name, insert)
|
||||
.map_err(BoxedError::new)
|
||||
.context(server_error::ExecuteQuerySnafu { query })?;
|
||||
|
||||
let expr = InsertExpr {
|
||||
schema_name,
|
||||
table_name,
|
||||
expr: Some(insert_expr::Expr::Sql(query.to_string())),
|
||||
region_number: 0,
|
||||
options: HashMap::default(),
|
||||
columns,
|
||||
row_count,
|
||||
};
|
||||
self.handle_insert(&expr)
|
||||
self.handle_insert(expr)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(server_error::ExecuteQuerySnafu { query })
|
||||
@@ -634,8 +634,24 @@ impl SqlQueryHandler for Instance {
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(server_error::ExecuteQuerySnafu { query }),
|
||||
Statement::DropTable(drop_stmt) => {
|
||||
let expr = DropTableExpr {
|
||||
catalog_name: drop_stmt.catalog_name,
|
||||
schema_name: drop_stmt.schema_name,
|
||||
table_name: drop_stmt.table_name,
|
||||
};
|
||||
self.handle_drop_table(expr)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(server_error::ExecuteQuerySnafu { query })
|
||||
}
|
||||
Statement::Explain(explain_stmt) => self
|
||||
.handle_explain(query, explain_stmt)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(server_error::ExecuteQuerySnafu { query }),
|
||||
Statement::ShowCreateTable(_) => {
|
||||
return server_error::NotSupportedSnafu { feat: query }.fail()
|
||||
return server_error::NotSupportedSnafu { feat: query }.fail();
|
||||
}
|
||||
}
|
||||
.map_err(BoxedError::new)
|
||||
@@ -674,7 +690,8 @@ impl GrpcQueryHandler for Instance {
|
||||
if let Some(expr) = &query.expr {
|
||||
match expr {
|
||||
Expr::Insert(insert) => {
|
||||
let result = self.handle_insert(insert).await;
|
||||
// TODO(fys): refactor, avoid clone
|
||||
let result = self.handle_insert(insert.clone()).await;
|
||||
result
|
||||
.map(|o| match o {
|
||||
Output::AffectedRows(rows) => ObjectResultBuilder::new()
|
||||
@@ -739,6 +756,7 @@ fn get_schema_name(expr: &AdminExpr) -> &str {
|
||||
Some(admin_expr::Expr::Create(expr)) => expr.schema_name.as_deref(),
|
||||
Some(admin_expr::Expr::Alter(expr)) => expr.schema_name.as_deref(),
|
||||
Some(admin_expr::Expr::CreateDatabase(_)) | None => Some(DEFAULT_SCHEMA_NAME),
|
||||
Some(admin_expr::Expr::DropTable(expr)) => Some(expr.schema_name.as_ref()),
|
||||
};
|
||||
schema_name.unwrap_or(DEFAULT_SCHEMA_NAME)
|
||||
}
|
||||
@@ -765,7 +783,7 @@ impl GrpcAdminHandler for Instance {
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
|
||||
use api::v1::codec::{InsertBatch, SelectResult};
|
||||
use api::v1::codec::SelectResult;
|
||||
use api::v1::column::SemanticType;
|
||||
use api::v1::{
|
||||
admin_expr, admin_result, column, object_expr, object_result, select_expr, Column,
|
||||
@@ -924,22 +942,19 @@ mod tests {
|
||||
);
|
||||
|
||||
// insert
|
||||
let values = vec![InsertBatch {
|
||||
columns: vec![
|
||||
expected_host_col.clone(),
|
||||
expected_cpu_col.clone(),
|
||||
expected_mem_col.clone(),
|
||||
expected_ts_col.clone(),
|
||||
],
|
||||
row_count: 4,
|
||||
}
|
||||
.into()];
|
||||
let columns = vec![
|
||||
expected_host_col.clone(),
|
||||
expected_cpu_col.clone(),
|
||||
expected_mem_col.clone(),
|
||||
expected_ts_col.clone(),
|
||||
];
|
||||
let row_count = 4;
|
||||
let insert_expr = InsertExpr {
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "demo".to_string(),
|
||||
expr: Some(insert_expr::Expr::Values(insert_expr::Values { values })),
|
||||
options: HashMap::default(),
|
||||
region_number: 0,
|
||||
columns,
|
||||
row_count,
|
||||
};
|
||||
let object_expr = ObjectExpr {
|
||||
header: Some(ExprHeader::default()),
|
||||
|
||||
@@ -16,13 +16,14 @@ use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::{CreateDatabaseExpr, CreateExpr};
|
||||
use api::v1::{AlterExpr, CreateDatabaseExpr, CreateExpr};
|
||||
use catalog::CatalogList;
|
||||
use chrono::DateTime;
|
||||
use client::admin::{admin_result_to_output, Admin};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_catalog::{SchemaKey, SchemaValue, TableGlobalKey, TableGlobalValue};
|
||||
use common_query::Output;
|
||||
use common_telemetry::{debug, info};
|
||||
use common_telemetry::{debug, error, info};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::RawSchema;
|
||||
use meta_client::client::MetaClient;
|
||||
@@ -30,7 +31,7 @@ use meta_client::rpc::{
|
||||
CreateRequest as MetaCreateRequest, Partition as MetaPartition, PutRequest, RouteResponse,
|
||||
TableName, TableRoute,
|
||||
};
|
||||
use query::sql::{describe_table, show_databases, show_tables};
|
||||
use query::sql::{describe_table, explain, show_databases, show_tables};
|
||||
use query::{QueryEngineFactory, QueryEngineRef};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sql::statements::create::Partitions;
|
||||
@@ -42,10 +43,12 @@ use table::metadata::{RawTableInfo, RawTableMeta, TableIdent, TableType};
|
||||
use crate::catalog::FrontendCatalogManager;
|
||||
use crate::datanode::DatanodeClients;
|
||||
use crate::error::{
|
||||
self, CatalogEntrySerdeSnafu, ColumnDataTypeSnafu, PrimaryKeyNotFoundSnafu, RequestMetaSnafu,
|
||||
Result, StartMetaClientSnafu,
|
||||
self, CatalogEntrySerdeSnafu, CatalogNotFoundSnafu, CatalogSnafu, ColumnDataTypeSnafu,
|
||||
PrimaryKeyNotFoundSnafu, RequestMetaSnafu, Result, SchemaNotFoundSnafu, StartMetaClientSnafu,
|
||||
TableNotFoundSnafu,
|
||||
};
|
||||
use crate::partitioning::{PartitionBound, PartitionDef};
|
||||
use crate::table::DistTable;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct DistInstance {
|
||||
@@ -143,6 +146,9 @@ impl DistInstance {
|
||||
.context(error::ExecuteSqlSnafu { sql }),
|
||||
Statement::DescribeTable(stmt) => describe_table(stmt, self.catalog_manager.clone())
|
||||
.context(error::ExecuteSqlSnafu { sql }),
|
||||
Statement::Explain(stmt) => explain(Box::new(stmt), self.query_engine.clone())
|
||||
.await
|
||||
.context(error::ExecuteSqlSnafu { sql }),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
@@ -166,6 +172,34 @@ impl DistInstance {
|
||||
Ok(Output::AffectedRows(1))
|
||||
}
|
||||
|
||||
pub async fn handle_alter_table(&self, expr: AlterExpr) -> Result<Output> {
|
||||
let catalog_name = expr.catalog_name.as_deref().unwrap_or(DEFAULT_CATALOG_NAME);
|
||||
let schema_name = expr.schema_name.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME);
|
||||
let table_name = expr.table_name.as_str();
|
||||
let table = self
|
||||
.catalog_manager
|
||||
.catalog(catalog_name)
|
||||
.context(CatalogSnafu)?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?
|
||||
.schema(schema_name)
|
||||
.context(CatalogSnafu)?
|
||||
.context(SchemaNotFoundSnafu {
|
||||
schema_info: format!("{}.{}", catalog_name, schema_name),
|
||||
})?
|
||||
.table(table_name)
|
||||
.context(CatalogSnafu)?
|
||||
.context(TableNotFoundSnafu {
|
||||
table_name: format!("{}.{}.{}", catalog_name, schema_name, table_name),
|
||||
})?;
|
||||
|
||||
let dist_table = table
|
||||
.as_any()
|
||||
.downcast_ref::<DistTable>()
|
||||
.expect("Table impl must be DistTable in distributed mode");
|
||||
dist_table.alter_by_expr(expr).await?;
|
||||
Ok(Output::AffectedRows(0))
|
||||
}
|
||||
|
||||
async fn create_table_in_meta(
|
||||
&self,
|
||||
create_table: &CreateExpr,
|
||||
@@ -205,17 +239,32 @@ impl DistInstance {
|
||||
catalog_name: table_name.catalog_name.clone(),
|
||||
schema_name: table_name.schema_name.clone(),
|
||||
table_name: table_name.table_name.clone(),
|
||||
};
|
||||
}
|
||||
.to_string();
|
||||
|
||||
let value = create_table_global_value(create_table, table_route)?
|
||||
.as_bytes()
|
||||
.context(error::CatalogEntrySerdeSnafu)?;
|
||||
|
||||
self.catalog_manager
|
||||
if let Err(existing) = self
|
||||
.catalog_manager
|
||||
.backend()
|
||||
.set(key.to_string().as_bytes(), &value)
|
||||
.compare_and_set(key.as_bytes(), &[], &value)
|
||||
.await
|
||||
.context(error::CatalogSnafu)
|
||||
.context(CatalogSnafu)?
|
||||
{
|
||||
let existing_bytes = existing.unwrap(); //this unwrap is safe since we compare with empty bytes and failed
|
||||
let existing_value =
|
||||
TableGlobalValue::from_bytes(&existing_bytes).context(CatalogEntrySerdeSnafu)?;
|
||||
if existing_value.table_info.ident.table_id != create_table.table_id.unwrap() {
|
||||
error!(
|
||||
"Table with name {} already exists, value in catalog: {:?}",
|
||||
key, existing_bytes
|
||||
);
|
||||
return error::TableAlreadyExistSnafu { table: key }.fail();
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,13 +14,11 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::v1::codec::InsertBatch;
|
||||
use api::v1::insert_expr::Expr;
|
||||
use api::v1::InsertExpr;
|
||||
use api::v1::{Column, InsertExpr};
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_insert::column_to_vector;
|
||||
use common_grpc_expr::column_to_vector;
|
||||
use servers::influxdb::InfluxdbRequest;
|
||||
use servers::query_handler::InfluxdbLineProtocolHandler;
|
||||
use servers::{error as server_error, Mode};
|
||||
@@ -28,7 +26,7 @@ use snafu::{OptionExt, ResultExt};
|
||||
use table::requests::InsertRequest;
|
||||
|
||||
use crate::error;
|
||||
use crate::error::{DeserializeInsertBatchSnafu, InsertBatchToRequestSnafu, Result};
|
||||
use crate::error::{InsertBatchToRequestSnafu, Result};
|
||||
use crate::instance::Instance;
|
||||
|
||||
#[async_trait]
|
||||
@@ -37,7 +35,7 @@ impl InfluxdbLineProtocolHandler for Instance {
|
||||
match self.mode {
|
||||
Mode::Standalone => {
|
||||
let exprs: Vec<InsertExpr> = request.try_into()?;
|
||||
self.handle_inserts(&exprs)
|
||||
self.handle_inserts(exprs)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(server_error::ExecuteQuerySnafu {
|
||||
@@ -61,53 +59,41 @@ impl InfluxdbLineProtocolHandler for Instance {
|
||||
impl Instance {
|
||||
pub(crate) async fn dist_insert(&self, inserts: Vec<InsertExpr>) -> Result<usize> {
|
||||
let mut joins = Vec::with_capacity(inserts.len());
|
||||
let catalog_name = DEFAULT_CATALOG_NAME.to_string();
|
||||
let catalog_name = DEFAULT_CATALOG_NAME;
|
||||
|
||||
for insert in inserts {
|
||||
let self_clone = self.clone();
|
||||
let insert_batches = match &insert.expr.unwrap() {
|
||||
Expr::Values(values) => common_insert::insert_batches(&values.values)
|
||||
.context(DeserializeInsertBatchSnafu)?,
|
||||
Expr::Sql(_) => unreachable!(),
|
||||
};
|
||||
|
||||
self.create_or_alter_table_on_demand(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
&insert.schema_name,
|
||||
&insert.table_name,
|
||||
&insert_batches,
|
||||
)
|
||||
.await?;
|
||||
let schema_name = insert.schema_name.to_string();
|
||||
let table_name = insert.table_name.to_string();
|
||||
|
||||
let schema_name = insert.schema_name.clone();
|
||||
let table_name = insert.table_name.clone();
|
||||
let columns = &insert.columns;
|
||||
let row_count = insert.row_count;
|
||||
|
||||
for insert_batch in &insert_batches {
|
||||
let catalog_name = catalog_name.clone();
|
||||
let schema_name = schema_name.clone();
|
||||
let table_name = table_name.clone();
|
||||
let request = Self::insert_batch_to_request(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
insert_batch,
|
||||
)?;
|
||||
// TODO(fys): need a separate runtime here
|
||||
let self_clone = self_clone.clone();
|
||||
let join = tokio::spawn(async move {
|
||||
let catalog = self_clone.get_catalog(&catalog_name)?;
|
||||
let schema = Self::get_schema(catalog, &schema_name)?;
|
||||
let table = schema
|
||||
.table(&table_name)
|
||||
.context(error::CatalogSnafu)?
|
||||
.context(error::TableNotFoundSnafu {
|
||||
table_name: &table_name,
|
||||
})?;
|
||||
self.create_or_alter_table_on_demand(catalog_name, &schema_name, &table_name, columns)
|
||||
.await?;
|
||||
|
||||
table.insert(request).await.context(error::TableSnafu)
|
||||
});
|
||||
joins.push(join);
|
||||
}
|
||||
let request = Self::columns_to_request(
|
||||
catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
columns,
|
||||
row_count,
|
||||
)?;
|
||||
|
||||
// TODO(fys): need a separate runtime here
|
||||
let self_clone = self_clone.clone();
|
||||
let join = tokio::spawn(async move {
|
||||
let catalog = self_clone.get_catalog(catalog_name)?;
|
||||
let schema = Self::get_schema(catalog, &schema_name)?;
|
||||
let table = schema
|
||||
.table(&table_name)
|
||||
.context(error::CatalogSnafu)?
|
||||
.context(error::TableNotFoundSnafu { table_name })?;
|
||||
|
||||
table.insert(request).await.context(error::TableSnafu)
|
||||
});
|
||||
joins.push(join);
|
||||
}
|
||||
|
||||
let mut affected = 0;
|
||||
@@ -119,16 +105,16 @@ impl Instance {
|
||||
Ok(affected)
|
||||
}
|
||||
|
||||
fn insert_batch_to_request(
|
||||
fn columns_to_request(
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
batches: &InsertBatch,
|
||||
columns: &[Column],
|
||||
row_count: u32,
|
||||
) -> Result<InsertRequest> {
|
||||
let mut vectors = HashMap::with_capacity(batches.columns.len());
|
||||
for col in &batches.columns {
|
||||
let vector =
|
||||
column_to_vector(col, batches.row_count).context(InsertBatchToRequestSnafu)?;
|
||||
let mut vectors = HashMap::with_capacity(columns.len());
|
||||
for col in columns {
|
||||
let vector = column_to_vector(col, row_count).context(InsertBatchToRequestSnafu)?;
|
||||
vectors.insert(col.column_name.clone(), vector);
|
||||
}
|
||||
Ok(InsertRequest {
|
||||
|
||||
@@ -53,7 +53,7 @@ impl OpentsdbProtocolHandler for Instance {
|
||||
impl Instance {
|
||||
async fn insert_opentsdb_metric(&self, data_point: &DataPoint) -> Result<()> {
|
||||
let expr = data_point.as_grpc_insert();
|
||||
self.handle_insert(&expr).await?;
|
||||
self.handle_insert(expr).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,7 +115,7 @@ impl PrometheusProtocolHandler for Instance {
|
||||
Mode::Standalone => {
|
||||
let exprs = prometheus::write_request_to_insert_exprs(database, request)?;
|
||||
let futures = exprs
|
||||
.iter()
|
||||
.into_iter()
|
||||
.map(|e| self.handle_insert(e))
|
||||
.collect::<Vec<_>>();
|
||||
let res = futures_util::future::join_all(futures)
|
||||
|
||||
@@ -116,10 +116,10 @@ impl Services {
|
||||
None
|
||||
};
|
||||
|
||||
let http_server_and_addr = if let Some(http_addr) = &opts.http_addr {
|
||||
let http_addr = parse_addr(http_addr)?;
|
||||
let http_server_and_addr = if let Some(http_options) = &opts.http_options {
|
||||
let http_addr = parse_addr(&http_options.addr)?;
|
||||
|
||||
let mut http_server = HttpServer::new(instance.clone());
|
||||
let mut http_server = HttpServer::new(instance.clone(), http_options.clone());
|
||||
if opentsdb_server_and_addr.is_some() {
|
||||
http_server.set_opentsdb_handler(instance.clone());
|
||||
}
|
||||
|
||||
@@ -18,13 +18,17 @@ use std::any::Any;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::AlterExpr;
|
||||
use async_trait::async_trait;
|
||||
use client::admin::Admin;
|
||||
use client::Database;
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use common_query::error::Result as QueryResult;
|
||||
use common_query::logical_plan::Expr;
|
||||
use common_query::physical_plan::{PhysicalPlan, PhysicalPlanRef};
|
||||
use common_recordbatch::adapter::AsyncRecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
|
||||
use common_telemetry::debug;
|
||||
use datafusion::execution::runtime_env::RuntimeEnv;
|
||||
use datafusion::logical_plan::Expr as DfExpr;
|
||||
use datafusion::physical_plan::{
|
||||
@@ -43,7 +47,7 @@ use table::Table;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::datanode::DatanodeClients;
|
||||
use crate::error::{self, Error, Result};
|
||||
use crate::error::{self, Error, LeaderNotFoundSnafu, RequestDatanodeSnafu, Result};
|
||||
use crate::partitioning::columns::RangeColumnsPartitionRule;
|
||||
use crate::partitioning::range::RangePartitionRule;
|
||||
use crate::partitioning::{
|
||||
@@ -348,6 +352,36 @@ impl DistTable {
|
||||
};
|
||||
Ok(partition_rule)
|
||||
}
|
||||
|
||||
/// Define a `alter_by_expr` instead of impl [`Table::alter`] to avoid redundant conversion between
|
||||
/// [`table::requests::AlterTableRequest`] and [`AlterExpr`].
|
||||
pub(crate) async fn alter_by_expr(&self, expr: AlterExpr) -> Result<()> {
|
||||
let table_routes = self.table_routes.get_route(&self.table_name).await?;
|
||||
let leaders = table_routes.find_leaders();
|
||||
ensure!(
|
||||
!leaders.is_empty(),
|
||||
LeaderNotFoundSnafu {
|
||||
table: format!(
|
||||
"{:?}.{:?}.{}",
|
||||
expr.catalog_name, expr.schema_name, expr.table_name
|
||||
)
|
||||
}
|
||||
);
|
||||
for datanode in leaders {
|
||||
let admin = Admin::new(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
self.datanode_clients.get_client(&datanode).await,
|
||||
);
|
||||
debug!("Sent alter table {:?} to {:?}", expr, admin);
|
||||
let result = admin
|
||||
.alter(expr.clone())
|
||||
.await
|
||||
.context(RequestDatanodeSnafu)?;
|
||||
debug!("Alter table result: {:?}", result);
|
||||
// TODO(hl): We should further check and track alter result in some global DDL task tracker
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn project_schema(table_schema: SchemaRef, projection: &Option<Vec<usize>>) -> SchemaRef {
|
||||
@@ -477,9 +511,8 @@ impl PartitionExec {
|
||||
mod test {
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::codec::InsertBatch;
|
||||
use api::v1::column::SemanticType;
|
||||
use api::v1::{column, insert_expr, Column, ColumnDataType};
|
||||
use api::v1::{column, Column, ColumnDataType};
|
||||
use catalog::remote::MetaKvBackend;
|
||||
use common_recordbatch::util;
|
||||
use datafusion::arrow_print;
|
||||
@@ -936,8 +969,8 @@ mod test {
|
||||
start_ts: i64,
|
||||
) {
|
||||
let rows = data.len() as u32;
|
||||
let values = vec![InsertBatch {
|
||||
columns: vec![
|
||||
let values = vec![(
|
||||
vec![
|
||||
Column {
|
||||
column_name: "ts".to_string(),
|
||||
values: Some(column::Values {
|
||||
@@ -967,10 +1000,8 @@ mod test {
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
row_count: rows,
|
||||
}
|
||||
.into()];
|
||||
let values = insert_expr::Values { values };
|
||||
rows,
|
||||
)];
|
||||
dn_instance
|
||||
.execute_grpc_insert(
|
||||
&table_name.catalog_name,
|
||||
|
||||
@@ -16,10 +16,8 @@ use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::codec::InsertBatch;
|
||||
use api::v1::column::SemanticType;
|
||||
use api::v1::insert_expr::Expr;
|
||||
use api::v1::{codec, insert_expr, Column, InsertExpr, MutateResult};
|
||||
use api::v1::{Column, InsertExpr, MutateResult};
|
||||
use client::{Database, ObjectResult};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
@@ -84,7 +82,7 @@ impl DistTable {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert_request_to_insert_batch(insert: &InsertRequest) -> Result<InsertBatch> {
|
||||
pub fn insert_request_to_insert_batch(insert: &InsertRequest) -> Result<(Vec<Column>, u32)> {
|
||||
let mut row_count = None;
|
||||
|
||||
let columns = insert
|
||||
@@ -127,24 +125,20 @@ pub fn insert_request_to_insert_batch(insert: &InsertRequest) -> Result<InsertBa
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
let insert_batch = codec::InsertBatch {
|
||||
columns,
|
||||
row_count: row_count.map(|rows| rows as u32).unwrap_or(0),
|
||||
};
|
||||
Ok(insert_batch)
|
||||
let row_count = row_count.unwrap_or(0) as u32;
|
||||
|
||||
Ok((columns, row_count))
|
||||
}
|
||||
|
||||
fn to_insert_expr(region_number: RegionNumber, insert: InsertRequest) -> Result<InsertExpr> {
|
||||
let table_name = insert.table_name.clone();
|
||||
let insert_batch = insert_request_to_insert_batch(&insert)?;
|
||||
let (columns, row_count) = insert_request_to_insert_batch(&insert)?;
|
||||
Ok(InsertExpr {
|
||||
schema_name: insert.schema_name,
|
||||
table_name,
|
||||
expr: Some(Expr::Values(insert_expr::Values {
|
||||
values: vec![insert_batch.into()],
|
||||
})),
|
||||
region_number,
|
||||
options: Default::default(),
|
||||
columns,
|
||||
row_count,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -152,8 +146,6 @@ fn to_insert_expr(region_number: RegionNumber, insert: InsertRequest) -> Result<
|
||||
mod tests {
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::v1::codec::InsertBatch;
|
||||
use api::v1::insert_expr::Expr;
|
||||
use api::v1::{ColumnDataType, InsertExpr};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
@@ -199,16 +191,7 @@ mod tests {
|
||||
let table_name = insert_expr.table_name;
|
||||
assert_eq!("demo", table_name);
|
||||
|
||||
let expr = insert_expr.expr.as_ref().unwrap();
|
||||
let vals = match expr {
|
||||
Expr::Values(vals) => vals,
|
||||
Expr::Sql(_) => unreachable!(),
|
||||
};
|
||||
|
||||
let batch: &[u8] = vals.values[0].as_ref();
|
||||
let vals: InsertBatch = batch.try_into().unwrap();
|
||||
|
||||
for column in vals.columns {
|
||||
for column in insert_expr.columns {
|
||||
let name = column.column_name;
|
||||
if name == "id" {
|
||||
assert_eq!(0, column.null_mask[0]);
|
||||
|
||||
@@ -216,8 +216,7 @@ async fn get_table_global_value(
|
||||
let tv = get_from_store(kv_store, tg_key).await?;
|
||||
match tv {
|
||||
Some(tv) => {
|
||||
let tv = TableGlobalValue::parse(&String::from_utf8_lossy(&tv))
|
||||
.context(error::InvalidCatalogValueSnafu)?;
|
||||
let tv = TableGlobalValue::from_bytes(&tv).context(error::InvalidCatalogValueSnafu)?;
|
||||
Ok(Some(tv))
|
||||
}
|
||||
None => Ok(None),
|
||||
|
||||
@@ -123,14 +123,14 @@ impl<S: StorageEngine> TableEngine for MitoEngine<S> {
|
||||
async fn drop_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
_request: DropTableRequest,
|
||||
) -> TableResult<()> {
|
||||
unimplemented!();
|
||||
request: DropTableRequest,
|
||||
) -> TableResult<bool> {
|
||||
Ok(self.inner.drop_table(request).await?)
|
||||
}
|
||||
}
|
||||
|
||||
struct MitoEngineInner<S: StorageEngine> {
|
||||
/// All tables opened by the engine.
|
||||
/// All tables opened by the engine. Map key is formatted [TableReference].
|
||||
///
|
||||
/// Writing to `tables` should also hold the `table_mutex`.
|
||||
tables: RwLock<HashMap<String, TableRef>>,
|
||||
@@ -464,6 +464,22 @@ impl<S: StorageEngine> MitoEngineInner<S> {
|
||||
.context(error::AlterTableSnafu { table_name })?;
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
/// Drop table. Returns whether a table is dropped (true) or not exist (false).
|
||||
async fn drop_table(&self, req: DropTableRequest) -> Result<bool> {
|
||||
let table_reference = TableReference {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
table: &req.table_name,
|
||||
};
|
||||
// todo(ruihang): reclaim persisted data
|
||||
Ok(self
|
||||
.tables
|
||||
.write()
|
||||
.unwrap()
|
||||
.remove(&table_reference.to_string())
|
||||
.is_some())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: StorageEngine> MitoEngineInner<S> {
|
||||
@@ -961,4 +977,69 @@ mod tests {
|
||||
assert_eq!(new_schema.timestamp_column(), old_schema.timestamp_column());
|
||||
assert_eq!(new_schema.version(), old_schema.version() + 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_drop_table() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let ctx = EngineContext::default();
|
||||
|
||||
let (_engine, table_engine, table, _object_store, _dir) =
|
||||
test_util::setup_mock_engine_and_table().await;
|
||||
let engine_ctx = EngineContext {};
|
||||
|
||||
let table_info = table.table_info();
|
||||
let table_reference = TableReference {
|
||||
catalog: DEFAULT_CATALOG_NAME,
|
||||
schema: DEFAULT_SCHEMA_NAME,
|
||||
table: &table_info.name,
|
||||
};
|
||||
|
||||
let create_table_request = CreateTableRequest {
|
||||
id: 1,
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_info.name.to_string(),
|
||||
schema: table_info.meta.schema.clone(),
|
||||
create_if_not_exists: true,
|
||||
desc: None,
|
||||
primary_key_indices: Vec::default(),
|
||||
table_options: HashMap::new(),
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
|
||||
let created_table = table_engine
|
||||
.create_table(&ctx, create_table_request)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(table_info, created_table.table_info());
|
||||
assert!(table_engine.table_exists(&engine_ctx, &table_reference));
|
||||
|
||||
let drop_table_request = DropTableRequest {
|
||||
catalog_name: table_reference.catalog.to_string(),
|
||||
schema_name: table_reference.schema.to_string(),
|
||||
table_name: table_reference.table.to_string(),
|
||||
};
|
||||
let table_dropped = table_engine
|
||||
.drop_table(&engine_ctx, drop_table_request)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(table_dropped);
|
||||
assert!(!table_engine.table_exists(&engine_ctx, &table_reference));
|
||||
|
||||
// should be able to re-create
|
||||
let request = CreateTableRequest {
|
||||
id: 2,
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_info.name.to_string(),
|
||||
schema: table_info.meta.schema.clone(),
|
||||
create_if_not_exists: false,
|
||||
desc: None,
|
||||
primary_key_indices: Vec::default(),
|
||||
table_options: HashMap::new(),
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
table_engine.create_table(&ctx, request).await.unwrap();
|
||||
assert!(table_engine.table_exists(&engine_ctx, &table_reference));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
futures = { version = "0.3" }
|
||||
opendal = "0.20"
|
||||
opendal = { version = "0.21", features = ["layers-tracing", "layers-metrics"]}
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -12,10 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub use opendal::io_util::SeekableReader;
|
||||
pub use opendal::raw::SeekableReader;
|
||||
pub use opendal::{
|
||||
layers, services, Accessor, Layer, Object, ObjectEntry, ObjectMetadata, ObjectMode,
|
||||
ObjectStreamer, Operator as ObjectStore,
|
||||
layers, services, Error, ErrorKind, Layer, Object, ObjectLister, ObjectMetadata, ObjectMode,
|
||||
Operator as ObjectStore,
|
||||
};
|
||||
pub mod backend;
|
||||
pub mod util;
|
||||
|
||||
@@ -14,9 +14,9 @@
|
||||
|
||||
use futures::TryStreamExt;
|
||||
|
||||
use crate::{ObjectEntry, ObjectStreamer};
|
||||
use crate::{Object, ObjectLister};
|
||||
|
||||
pub async fn collect(stream: ObjectStreamer) -> Result<Vec<ObjectEntry>, std::io::Error> {
|
||||
pub async fn collect(stream: ObjectLister) -> Result<Vec<Object>, opendal::Error> {
|
||||
stream.try_collect::<Vec<_>>().await
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ use std::env;
|
||||
use anyhow::Result;
|
||||
use common_telemetry::logging;
|
||||
use object_store::backend::{fs, s3};
|
||||
use object_store::{util, Object, ObjectMode, ObjectStore, ObjectStreamer};
|
||||
use object_store::{util, Object, ObjectLister, ObjectMode, ObjectStore};
|
||||
use tempdir::TempDir;
|
||||
|
||||
async fn test_object_crud(store: &ObjectStore) -> Result<()> {
|
||||
@@ -61,7 +61,7 @@ async fn test_object_list(store: &ObjectStore) -> Result<()> {
|
||||
|
||||
// List objects
|
||||
let o: Object = store.object("/");
|
||||
let obs: ObjectStreamer = o.list().await?;
|
||||
let obs: ObjectLister = o.list().await?;
|
||||
let objects = util::collect(obs).await?;
|
||||
assert_eq!(3, objects.len());
|
||||
|
||||
@@ -74,7 +74,7 @@ async fn test_object_list(store: &ObjectStore) -> Result<()> {
|
||||
assert_eq!(1, objects.len());
|
||||
|
||||
// Only o2 is exists
|
||||
let o2 = &objects[0].clone().into_object();
|
||||
let o2 = &objects[0].clone();
|
||||
let bs = o2.read().await?;
|
||||
assert_eq!("Hello, object2!", String::from_utf8(bs)?);
|
||||
// Delete o2
|
||||
|
||||
@@ -22,6 +22,7 @@ use datafusion::physical_plan::udf::ScalarUDF;
|
||||
use datafusion::sql::planner::{ContextProvider, SqlToRel};
|
||||
use datatypes::arrow::datatypes::DataType;
|
||||
use snafu::ResultExt;
|
||||
use sql::statements::explain::Explain;
|
||||
use sql::statements::query::Query;
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
@@ -53,6 +54,18 @@ impl<'a, S: ContextProvider + Send + Sync> DfPlanner<'a, S> {
|
||||
|
||||
Ok(LogicalPlan::DfPlan(result))
|
||||
}
|
||||
|
||||
/// Converts EXPLAIN statement to logical plan.
|
||||
pub fn explain_to_plan(&self, explain: Explain) -> Result<LogicalPlan> {
|
||||
let result = self
|
||||
.sql_to_rel
|
||||
.sql_statement_to_plan(explain.inner.clone())
|
||||
.context(error::PlanSqlSnafu {
|
||||
sql: explain.to_string(),
|
||||
})?;
|
||||
|
||||
Ok(LogicalPlan::DfPlan(result))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, S> Planner for DfPlanner<'a, S>
|
||||
@@ -63,6 +76,7 @@ where
|
||||
fn statement_to_plan(&self, statement: Statement) -> Result<LogicalPlan> {
|
||||
match statement {
|
||||
Statement::Query(qb) => self.query_to_plan(qb),
|
||||
Statement::Explain(explain) => self.explain_to_plan(explain),
|
||||
Statement::ShowTables(_)
|
||||
| Statement::ShowDatabases(_)
|
||||
| Statement::ShowCreateTable(_)
|
||||
@@ -70,7 +84,8 @@ where
|
||||
| Statement::CreateTable(_)
|
||||
| Statement::CreateDatabase(_)
|
||||
| Statement::Alter(_)
|
||||
| Statement::Insert(_) => unreachable!(),
|
||||
| Statement::Insert(_)
|
||||
| Statement::DropTable(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,9 +24,12 @@ use datatypes::vectors::{Helper, StringVector};
|
||||
use once_cell::sync::Lazy;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sql::statements::describe::DescribeTable;
|
||||
use sql::statements::explain::Explain;
|
||||
use sql::statements::show::{ShowDatabases, ShowKind, ShowTables};
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::QueryEngineRef;
|
||||
|
||||
const SCHEMAS_COLUMN: &str = "Schemas";
|
||||
const TABLES_COLUMN: &str = "Tables";
|
||||
@@ -138,6 +141,11 @@ pub fn show_tables(stmt: ShowTables, catalog_manager: CatalogManagerRef) -> Resu
|
||||
Ok(Output::RecordBatches(records))
|
||||
}
|
||||
|
||||
pub async fn explain(stmt: Box<Explain>, query_engine: QueryEngineRef) -> Result<Output> {
|
||||
let plan = query_engine.statement_to_plan(Statement::Explain(*stmt))?;
|
||||
query_engine.execute(&plan).await
|
||||
}
|
||||
|
||||
pub fn describe_table(stmt: DescribeTable, catalog_manager: CatalogManagerRef) -> Result<Output> {
|
||||
let catalog = stmt.catalog_name.as_str();
|
||||
let schema = stmt.schema_name.as_str();
|
||||
|
||||
@@ -24,6 +24,7 @@ datatypes = { path = "../datatypes" }
|
||||
futures = "0.3"
|
||||
hex = { version = "0.4" }
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
humantime-serde = "1.1"
|
||||
influxdb_line_protocol = { git = "https://github.com/evenyag/influxdb_iox", branch = "feat/line-protocol" }
|
||||
metrics = "0.20"
|
||||
num_cpus = "1.13"
|
||||
|
||||
@@ -59,6 +59,7 @@ const HTTP_API_VERSION: &str = "v1";
|
||||
|
||||
pub struct HttpServer {
|
||||
sql_handler: SqlQueryHandlerRef,
|
||||
options: HttpOptions,
|
||||
influxdb_handler: Option<InfluxdbLineProtocolHandlerRef>,
|
||||
opentsdb_handler: Option<OpentsdbProtocolHandlerRef>,
|
||||
prom_handler: Option<PrometheusProtocolHandlerRef>,
|
||||
@@ -66,6 +67,22 @@ pub struct HttpServer {
|
||||
shutdown_tx: Mutex<Option<Sender<()>>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct HttpOptions {
|
||||
pub addr: String,
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub timeout: Duration,
|
||||
}
|
||||
|
||||
impl Default for HttpOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
addr: "127.0.0.1:4000".to_string(),
|
||||
timeout: Duration::from_secs(30),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema, Eq, PartialEq)]
|
||||
pub struct ColumnSchema {
|
||||
name: String,
|
||||
@@ -271,9 +288,10 @@ pub struct ApiState {
|
||||
}
|
||||
|
||||
impl HttpServer {
|
||||
pub fn new(sql_handler: SqlQueryHandlerRef) -> Self {
|
||||
pub fn new(sql_handler: SqlQueryHandlerRef, options: HttpOptions) -> Self {
|
||||
Self {
|
||||
sql_handler,
|
||||
options,
|
||||
opentsdb_handler: None,
|
||||
influxdb_handler: None,
|
||||
prom_handler: None,
|
||||
@@ -385,8 +403,7 @@ impl HttpServer {
|
||||
ServiceBuilder::new()
|
||||
.layer(HandleErrorLayer::new(handle_error))
|
||||
.layer(TraceLayer::new_for_http())
|
||||
// TODO(LFC): make timeout configurable
|
||||
.layer(TimeoutLayer::new(Duration::from_secs(30)))
|
||||
.layer(TimeoutLayer::new(self.options.timeout))
|
||||
// custom layer
|
||||
.layer(middleware::from_fn(context::build_ctx)),
|
||||
)
|
||||
@@ -443,14 +460,71 @@ async fn handle_error(err: BoxError) -> Json<JsonResponse> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::future::pending;
|
||||
use std::sync::Arc;
|
||||
|
||||
use axum::handler::Handler;
|
||||
use axum::http::StatusCode;
|
||||
use axum::routing::get;
|
||||
use axum_test_helper::TestClient;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use datatypes::vectors::{StringVector, UInt32Vector};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use super::*;
|
||||
use crate::query_handler::SqlQueryHandler;
|
||||
|
||||
struct DummyInstance {
|
||||
_tx: mpsc::Sender<(String, Vec<u8>)>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SqlQueryHandler for DummyInstance {
|
||||
async fn do_query(&self, _query: &str) -> Result<Output> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
fn timeout() -> TimeoutLayer {
|
||||
TimeoutLayer::new(Duration::from_millis(10))
|
||||
}
|
||||
|
||||
async fn forever() {
|
||||
pending().await
|
||||
}
|
||||
|
||||
fn make_test_app(tx: mpsc::Sender<(String, Vec<u8>)>) -> Router {
|
||||
let instance = Arc::new(DummyInstance { _tx: tx });
|
||||
let server = HttpServer::new(instance, HttpOptions::default());
|
||||
server.make_app().route(
|
||||
"/test/timeout",
|
||||
get(forever.layer(
|
||||
ServiceBuilder::new()
|
||||
.layer(HandleErrorLayer::new(|_: BoxError| async {
|
||||
StatusCode::REQUEST_TIMEOUT
|
||||
}))
|
||||
.layer(timeout()),
|
||||
)),
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_http_options_default() {
|
||||
let default = HttpOptions::default();
|
||||
assert_eq!("127.0.0.1:4000".to_string(), default.addr);
|
||||
assert_eq!(Duration::from_secs(30), default.timeout)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_http_server_request_timeout() {
|
||||
let (tx, _rx) = mpsc::channel(100);
|
||||
let app = make_test_app(tx);
|
||||
let client = TestClient::new(app);
|
||||
let res = client.get("/test/timeout").send().await;
|
||||
assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_recordbatches_conversion() {
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::v1::insert_expr::{self, Expr};
|
||||
use api::v1::InsertExpr;
|
||||
use common_grpc::writer::{LinesWriter, Precision};
|
||||
use influxdb_line_protocol::{parse_lines, FieldValue};
|
||||
@@ -165,14 +164,15 @@ impl TryFrom<&InfluxdbRequest> for Vec<InsertExpr> {
|
||||
|
||||
Ok(writers
|
||||
.into_iter()
|
||||
.map(|(table_name, writer)| InsertExpr {
|
||||
schema_name: schema_name.clone(),
|
||||
table_name,
|
||||
expr: Some(Expr::Values(insert_expr::Values {
|
||||
values: vec![writer.finish().into()],
|
||||
})),
|
||||
options: HashMap::default(),
|
||||
region_number: 0,
|
||||
.map(|(table_name, writer)| {
|
||||
let (columns, row_count) = writer.finish();
|
||||
InsertExpr {
|
||||
schema_name: schema_name.clone(),
|
||||
table_name,
|
||||
region_number: 0,
|
||||
columns,
|
||||
row_count,
|
||||
}
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
@@ -180,12 +180,9 @@ impl TryFrom<&InfluxdbRequest> for Vec<InsertExpr> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::codec::InsertBatch;
|
||||
use api::v1::column::{SemanticType, Values};
|
||||
use api::v1::insert_expr::Expr;
|
||||
use api::v1::{Column, ColumnDataType, InsertExpr};
|
||||
use common_base::BitVec;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
@@ -242,15 +239,9 @@ monitor2,host=host4 cpu=66.3,memory=1029 1663840496400340003";
|
||||
|
||||
for expr in insert_exprs {
|
||||
assert_eq!("public", expr.schema_name);
|
||||
let values = match expr.expr.unwrap() {
|
||||
Expr::Values(vals) => vals,
|
||||
Expr::Sql(_) => panic!(),
|
||||
};
|
||||
let raw_batch = values.values.get(0).unwrap();
|
||||
let batch: InsertBatch = raw_batch.deref().try_into().unwrap();
|
||||
match &expr.table_name[..] {
|
||||
"monitor1" => assert_monitor_1(&batch),
|
||||
"monitor2" => assert_monitor_2(&batch),
|
||||
"monitor1" => assert_monitor_1(&expr.columns),
|
||||
"monitor2" => assert_monitor_2(&expr.columns),
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
@@ -327,8 +318,7 @@ monitor2,host=host4 cpu=66.3,memory=1029 1663840496400340003";
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_monitor_1(insert_batch: &InsertBatch) {
|
||||
let columns = &insert_batch.columns;
|
||||
fn assert_monitor_1(columns: &[Column]) {
|
||||
assert_eq!(4, columns.len());
|
||||
verify_column(
|
||||
&columns[0],
|
||||
@@ -379,8 +369,7 @@ monitor2,host=host4 cpu=66.3,memory=1029 1663840496400340003";
|
||||
);
|
||||
}
|
||||
|
||||
fn assert_monitor_2(insert_batch: &InsertBatch) {
|
||||
let columns = &insert_batch.columns;
|
||||
fn assert_monitor_2(columns: &[Column]) {
|
||||
assert_eq!(4, columns.len());
|
||||
verify_column(
|
||||
&columns[0],
|
||||
|
||||
@@ -12,11 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::v1::codec::InsertBatch;
|
||||
use api::v1::column::SemanticType;
|
||||
use api::v1::{column, insert_expr, Column, ColumnDataType, InsertExpr};
|
||||
use api::v1::{column, Column, ColumnDataType, InsertExpr};
|
||||
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
|
||||
use common_grpc::writer::Precision;
|
||||
use table::requests::InsertRequest;
|
||||
@@ -189,18 +186,12 @@ impl DataPoint {
|
||||
});
|
||||
}
|
||||
|
||||
let batch = InsertBatch {
|
||||
columns,
|
||||
row_count: 1,
|
||||
};
|
||||
InsertExpr {
|
||||
schema_name,
|
||||
table_name: self.metric.clone(),
|
||||
expr: Some(insert_expr::Expr::Values(insert_expr::Values {
|
||||
values: vec![batch.into()],
|
||||
})),
|
||||
options: HashMap::default(),
|
||||
region_number: 0,
|
||||
columns,
|
||||
row_count: 1,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -337,36 +328,31 @@ mod test {
|
||||
let grpc_insert = data_point.as_grpc_insert();
|
||||
assert_eq!(grpc_insert.table_name, "my_metric_1");
|
||||
|
||||
match grpc_insert.expr {
|
||||
Some(insert_expr::Expr::Values(insert_expr::Values { values })) => {
|
||||
assert_eq!(values.len(), 1);
|
||||
let insert_batch = InsertBatch::try_from(values[0].as_slice()).unwrap();
|
||||
assert_eq!(insert_batch.row_count, 1);
|
||||
let columns = insert_batch.columns;
|
||||
assert_eq!(columns.len(), 4);
|
||||
let columns = &grpc_insert.columns;
|
||||
let row_count = grpc_insert.row_count;
|
||||
|
||||
assert_eq!(columns[0].column_name, OPENTSDB_TIMESTAMP_COLUMN_NAME);
|
||||
assert_eq!(
|
||||
columns[0].values.as_ref().unwrap().ts_millis_values,
|
||||
vec![1000]
|
||||
);
|
||||
assert_eq!(row_count, 1);
|
||||
assert_eq!(columns.len(), 4);
|
||||
|
||||
assert_eq!(columns[1].column_name, OPENTSDB_VALUE_COLUMN_NAME);
|
||||
assert_eq!(columns[1].values.as_ref().unwrap().f64_values, vec![1.0]);
|
||||
assert_eq!(columns[0].column_name, OPENTSDB_TIMESTAMP_COLUMN_NAME);
|
||||
assert_eq!(
|
||||
columns[0].values.as_ref().unwrap().ts_millis_values,
|
||||
vec![1000]
|
||||
);
|
||||
|
||||
assert_eq!(columns[2].column_name, "tagk1");
|
||||
assert_eq!(
|
||||
columns[2].values.as_ref().unwrap().string_values,
|
||||
vec!["tagv1"]
|
||||
);
|
||||
assert_eq!(columns[1].column_name, OPENTSDB_VALUE_COLUMN_NAME);
|
||||
assert_eq!(columns[1].values.as_ref().unwrap().f64_values, vec![1.0]);
|
||||
|
||||
assert_eq!(columns[3].column_name, "tagk2");
|
||||
assert_eq!(
|
||||
columns[3].values.as_ref().unwrap().string_values,
|
||||
vec!["tagv2"]
|
||||
);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
assert_eq!(columns[2].column_name, "tagk1");
|
||||
assert_eq!(
|
||||
columns[2].values.as_ref().unwrap().string_values,
|
||||
vec!["tagv1"]
|
||||
);
|
||||
|
||||
assert_eq!(columns[3].column_name, "tagk2");
|
||||
assert_eq!(
|
||||
columns[3].values.as_ref().unwrap().string_values,
|
||||
vec!["tagv2"]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,14 +14,14 @@
|
||||
|
||||
//! prometheus protocol supportings
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::collections::BTreeMap;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
use api::prometheus::remote::label_matcher::Type as MatcherType;
|
||||
use api::prometheus::remote::{Label, Query, Sample, TimeSeries, WriteRequest};
|
||||
use api::v1::codec::{InsertBatch, SelectResult};
|
||||
use api::v1::codec::SelectResult;
|
||||
use api::v1::column::SemanticType;
|
||||
use api::v1::{column, insert_expr, Column, ColumnDataType, InsertExpr};
|
||||
use api::v1::{column, Column, ColumnDataType, InsertExpr};
|
||||
use common_grpc::writer::Precision::MILLISECOND;
|
||||
use openmetrics_parser::{MetricsExposition, PrometheusType, PrometheusValue};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -413,21 +413,14 @@ fn timeseries_to_insert_expr(database: &str, mut timeseries: TimeSeries) -> Resu
|
||||
});
|
||||
}
|
||||
|
||||
let batch = InsertBatch {
|
||||
columns,
|
||||
row_count: row_count as u32,
|
||||
};
|
||||
Ok(InsertExpr {
|
||||
schema_name,
|
||||
table_name: table_name.context(error::InvalidPromRemoteRequestSnafu {
|
||||
msg: "missing '__name__' label in timeseries",
|
||||
})?,
|
||||
|
||||
expr: Some(insert_expr::Expr::Values(insert_expr::Values {
|
||||
values: vec![batch.into()],
|
||||
})),
|
||||
options: HashMap::default(),
|
||||
region_number: 0,
|
||||
columns,
|
||||
row_count: row_count as u32,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -683,105 +676,93 @@ mod tests {
|
||||
assert_eq!("metric2", exprs[1].table_name);
|
||||
assert_eq!("metric3", exprs[2].table_name);
|
||||
|
||||
let values = exprs[0].clone().expr.unwrap();
|
||||
match values {
|
||||
insert_expr::Expr::Values(insert_expr::Values { values }) => {
|
||||
assert_eq!(1, values.len());
|
||||
let batch = InsertBatch::try_from(values[0].as_slice()).unwrap();
|
||||
assert_eq!(2, batch.row_count);
|
||||
let columns = batch.columns;
|
||||
assert_eq!(columns.len(), 3);
|
||||
let expr = exprs.get(0).unwrap();
|
||||
|
||||
assert_eq!(columns[0].column_name, TIMESTAMP_COLUMN_NAME);
|
||||
assert_eq!(
|
||||
columns[0].values.as_ref().unwrap().ts_millis_values,
|
||||
vec![1000, 2000]
|
||||
);
|
||||
let columns = &expr.columns;
|
||||
let row_count = expr.row_count;
|
||||
|
||||
assert_eq!(columns[1].column_name, VALUE_COLUMN_NAME);
|
||||
assert_eq!(
|
||||
columns[1].values.as_ref().unwrap().f64_values,
|
||||
vec![1.0, 2.0]
|
||||
);
|
||||
assert_eq!(2, row_count);
|
||||
assert_eq!(columns.len(), 3);
|
||||
|
||||
assert_eq!(columns[2].column_name, "job");
|
||||
assert_eq!(
|
||||
columns[2].values.as_ref().unwrap().string_values,
|
||||
vec!["spark", "spark"]
|
||||
);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
assert_eq!(columns[0].column_name, TIMESTAMP_COLUMN_NAME);
|
||||
assert_eq!(
|
||||
columns[0].values.as_ref().unwrap().ts_millis_values,
|
||||
vec![1000, 2000]
|
||||
);
|
||||
|
||||
let values = exprs[1].clone().expr.unwrap();
|
||||
match values {
|
||||
insert_expr::Expr::Values(insert_expr::Values { values }) => {
|
||||
assert_eq!(1, values.len());
|
||||
let batch = InsertBatch::try_from(values[0].as_slice()).unwrap();
|
||||
assert_eq!(2, batch.row_count);
|
||||
let columns = batch.columns;
|
||||
assert_eq!(columns.len(), 4);
|
||||
assert_eq!(columns[1].column_name, VALUE_COLUMN_NAME);
|
||||
assert_eq!(
|
||||
columns[1].values.as_ref().unwrap().f64_values,
|
||||
vec![1.0, 2.0]
|
||||
);
|
||||
|
||||
assert_eq!(columns[0].column_name, TIMESTAMP_COLUMN_NAME);
|
||||
assert_eq!(
|
||||
columns[0].values.as_ref().unwrap().ts_millis_values,
|
||||
vec![1000, 2000]
|
||||
);
|
||||
assert_eq!(columns[2].column_name, "job");
|
||||
assert_eq!(
|
||||
columns[2].values.as_ref().unwrap().string_values,
|
||||
vec!["spark", "spark"]
|
||||
);
|
||||
|
||||
assert_eq!(columns[1].column_name, VALUE_COLUMN_NAME);
|
||||
assert_eq!(
|
||||
columns[1].values.as_ref().unwrap().f64_values,
|
||||
vec![3.0, 4.0]
|
||||
);
|
||||
let expr = exprs.get(1).unwrap();
|
||||
|
||||
assert_eq!(columns[2].column_name, "instance");
|
||||
assert_eq!(
|
||||
columns[2].values.as_ref().unwrap().string_values,
|
||||
vec!["test_host1", "test_host1"]
|
||||
);
|
||||
assert_eq!(columns[3].column_name, "idc");
|
||||
assert_eq!(
|
||||
columns[3].values.as_ref().unwrap().string_values,
|
||||
vec!["z001", "z001"]
|
||||
);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
let columns = &expr.columns;
|
||||
let row_count = expr.row_count;
|
||||
|
||||
let values = exprs[2].clone().expr.unwrap();
|
||||
match values {
|
||||
insert_expr::Expr::Values(insert_expr::Values { values }) => {
|
||||
assert_eq!(1, values.len());
|
||||
let batch = InsertBatch::try_from(values[0].as_slice()).unwrap();
|
||||
assert_eq!(3, batch.row_count);
|
||||
let columns = batch.columns;
|
||||
assert_eq!(columns.len(), 4);
|
||||
assert_eq!(2, row_count);
|
||||
assert_eq!(columns.len(), 4);
|
||||
|
||||
assert_eq!(columns[0].column_name, TIMESTAMP_COLUMN_NAME);
|
||||
assert_eq!(
|
||||
columns[0].values.as_ref().unwrap().ts_millis_values,
|
||||
vec![1000, 2000, 3000]
|
||||
);
|
||||
assert_eq!(columns[0].column_name, TIMESTAMP_COLUMN_NAME);
|
||||
assert_eq!(
|
||||
columns[0].values.as_ref().unwrap().ts_millis_values,
|
||||
vec![1000, 2000]
|
||||
);
|
||||
|
||||
assert_eq!(columns[1].column_name, VALUE_COLUMN_NAME);
|
||||
assert_eq!(
|
||||
columns[1].values.as_ref().unwrap().f64_values,
|
||||
vec![5.0, 6.0, 7.0]
|
||||
);
|
||||
assert_eq!(columns[1].column_name, VALUE_COLUMN_NAME);
|
||||
assert_eq!(
|
||||
columns[1].values.as_ref().unwrap().f64_values,
|
||||
vec![3.0, 4.0]
|
||||
);
|
||||
|
||||
assert_eq!(columns[2].column_name, "idc");
|
||||
assert_eq!(
|
||||
columns[2].values.as_ref().unwrap().string_values,
|
||||
vec!["z002", "z002", "z002"]
|
||||
);
|
||||
assert_eq!(columns[3].column_name, "app");
|
||||
assert_eq!(
|
||||
columns[3].values.as_ref().unwrap().string_values,
|
||||
vec!["biz", "biz", "biz"]
|
||||
);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
assert_eq!(columns[2].column_name, "instance");
|
||||
assert_eq!(
|
||||
columns[2].values.as_ref().unwrap().string_values,
|
||||
vec!["test_host1", "test_host1"]
|
||||
);
|
||||
assert_eq!(columns[3].column_name, "idc");
|
||||
assert_eq!(
|
||||
columns[3].values.as_ref().unwrap().string_values,
|
||||
vec!["z001", "z001"]
|
||||
);
|
||||
|
||||
let expr = exprs.get(2).unwrap();
|
||||
|
||||
let columns = &expr.columns;
|
||||
let row_count = expr.row_count;
|
||||
|
||||
assert_eq!(3, row_count);
|
||||
assert_eq!(columns.len(), 4);
|
||||
|
||||
assert_eq!(columns[0].column_name, TIMESTAMP_COLUMN_NAME);
|
||||
assert_eq!(
|
||||
columns[0].values.as_ref().unwrap().ts_millis_values,
|
||||
vec![1000, 2000, 3000]
|
||||
);
|
||||
|
||||
assert_eq!(columns[1].column_name, VALUE_COLUMN_NAME);
|
||||
assert_eq!(
|
||||
columns[1].values.as_ref().unwrap().f64_values,
|
||||
vec![5.0, 6.0, 7.0]
|
||||
);
|
||||
|
||||
assert_eq!(columns[2].column_name, "idc");
|
||||
assert_eq!(
|
||||
columns[2].values.as_ref().unwrap().string_values,
|
||||
vec!["z002", "z002", "z002"]
|
||||
);
|
||||
assert_eq!(columns[3].column_name, "app");
|
||||
assert_eq!(
|
||||
columns[3].values.as_ref().unwrap().string_values,
|
||||
vec!["biz", "biz", "biz"]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -20,7 +20,7 @@ use axum::Router;
|
||||
use axum_test_helper::TestClient;
|
||||
use common_query::Output;
|
||||
use servers::error::Result;
|
||||
use servers::http::HttpServer;
|
||||
use servers::http::{HttpOptions, HttpServer};
|
||||
use servers::influxdb::InfluxdbRequest;
|
||||
use servers::query_handler::{InfluxdbLineProtocolHandler, SqlQueryHandler};
|
||||
use tokio::sync::mpsc;
|
||||
@@ -51,7 +51,7 @@ impl SqlQueryHandler for DummyInstance {
|
||||
|
||||
fn make_test_app(tx: mpsc::Sender<(String, String)>) -> Router {
|
||||
let instance = Arc::new(DummyInstance { tx });
|
||||
let mut server = HttpServer::new(instance.clone());
|
||||
let mut server = HttpServer::new(instance.clone(), HttpOptions::default());
|
||||
server.set_influxdb_handler(instance);
|
||||
server.make_app()
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ use axum::Router;
|
||||
use axum_test_helper::TestClient;
|
||||
use common_query::Output;
|
||||
use servers::error::{self, Result};
|
||||
use servers::http::HttpServer;
|
||||
use servers::http::{HttpOptions, HttpServer};
|
||||
use servers::opentsdb::codec::DataPoint;
|
||||
use servers::query_handler::{OpentsdbProtocolHandler, SqlQueryHandler};
|
||||
use tokio::sync::mpsc;
|
||||
@@ -51,7 +51,7 @@ impl SqlQueryHandler for DummyInstance {
|
||||
|
||||
fn make_test_app(tx: mpsc::Sender<String>) -> Router {
|
||||
let instance = Arc::new(DummyInstance { tx });
|
||||
let mut server = HttpServer::new(instance.clone());
|
||||
let mut server = HttpServer::new(instance.clone(), HttpOptions::default());
|
||||
server.set_opentsdb_handler(instance);
|
||||
server.make_app()
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ use axum_test_helper::TestClient;
|
||||
use common_query::Output;
|
||||
use prost::Message;
|
||||
use servers::error::Result;
|
||||
use servers::http::HttpServer;
|
||||
use servers::http::{HttpOptions, HttpServer};
|
||||
use servers::prometheus;
|
||||
use servers::prometheus::{snappy_compress, Metrics};
|
||||
use servers::query_handler::{PrometheusProtocolHandler, PrometheusResponse, SqlQueryHandler};
|
||||
@@ -76,7 +76,7 @@ impl SqlQueryHandler for DummyInstance {
|
||||
|
||||
fn make_test_app(tx: mpsc::Sender<(String, Vec<u8>)>) -> Router {
|
||||
let instance = Arc::new(DummyInstance { tx });
|
||||
let mut server = HttpServer::new(instance.clone());
|
||||
let mut server = HttpServer::new(instance.clone(), HttpOptions::default());
|
||||
server.set_prom_handler(instance);
|
||||
server.make_app()
|
||||
}
|
||||
|
||||
@@ -22,6 +22,8 @@ use crate::error::{
|
||||
self, InvalidDatabaseNameSnafu, InvalidTableNameSnafu, Result, SyntaxSnafu, TokenizerSnafu,
|
||||
};
|
||||
use crate::statements::describe::DescribeTable;
|
||||
use crate::statements::drop::DropTable;
|
||||
use crate::statements::explain::Explain;
|
||||
use crate::statements::show::{ShowCreateTable, ShowDatabases, ShowKind, ShowTables};
|
||||
use crate::statements::statement::Statement;
|
||||
use crate::statements::table_idents_to_full_name;
|
||||
@@ -98,6 +100,8 @@ impl<'a> ParserContext<'a> {
|
||||
|
||||
Keyword::ALTER => self.parse_alter(),
|
||||
|
||||
Keyword::DROP => self.parse_drop(),
|
||||
|
||||
// todo(hl) support more statements.
|
||||
_ => self.unsupported(self.peek_token_as_string()),
|
||||
}
|
||||
@@ -258,7 +262,46 @@ impl<'a> ParserContext<'a> {
|
||||
}
|
||||
|
||||
fn parse_explain(&mut self) -> Result<Statement> {
|
||||
todo!()
|
||||
let explain_statement =
|
||||
self.parser
|
||||
.parse_explain(false)
|
||||
.with_context(|_| error::UnexpectedSnafu {
|
||||
sql: self.sql,
|
||||
expected: "a query statement",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
|
||||
Ok(Statement::Explain(Explain::try_from(explain_statement)?))
|
||||
}
|
||||
|
||||
fn parse_drop(&mut self) -> Result<Statement> {
|
||||
self.parser.next_token();
|
||||
if !self.matches_keyword(Keyword::TABLE) {
|
||||
return self.unsupported(self.peek_token_as_string());
|
||||
}
|
||||
self.parser.next_token();
|
||||
|
||||
let table_ident =
|
||||
self.parser
|
||||
.parse_object_name()
|
||||
.with_context(|_| error::UnexpectedSnafu {
|
||||
sql: self.sql,
|
||||
expected: "a table name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
ensure!(
|
||||
!table_ident.0.is_empty(),
|
||||
InvalidTableNameSnafu {
|
||||
name: table_ident.to_string()
|
||||
}
|
||||
);
|
||||
|
||||
let (catalog_name, schema_name, table_name) = table_idents_to_full_name(&table_ident)?;
|
||||
Ok(Statement::DropTable(DropTable {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
}))
|
||||
}
|
||||
|
||||
// Report unexpected token
|
||||
@@ -328,6 +371,8 @@ impl<'a> ParserContext<'a> {
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use sqlparser::ast::{Query as SpQuery, Statement as SpStatement};
|
||||
use sqlparser::dialect::GenericDialect;
|
||||
|
||||
use super::*;
|
||||
@@ -471,4 +516,93 @@ mod tests {
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_explain() {
|
||||
let sql = "EXPLAIN select * from foo";
|
||||
let result = ParserContext::create_with_dialect(sql, &GenericDialect {});
|
||||
let stmts = result.unwrap();
|
||||
assert_eq!(1, stmts.len());
|
||||
|
||||
let select = sqlparser::ast::Select {
|
||||
distinct: false,
|
||||
top: None,
|
||||
projection: vec![sqlparser::ast::SelectItem::Wildcard],
|
||||
from: vec![sqlparser::ast::TableWithJoins {
|
||||
relation: sqlparser::ast::TableFactor::Table {
|
||||
name: sqlparser::ast::ObjectName(vec![sqlparser::ast::Ident::new("foo")]),
|
||||
alias: None,
|
||||
args: vec![],
|
||||
with_hints: vec![],
|
||||
},
|
||||
joins: vec![],
|
||||
}],
|
||||
lateral_views: vec![],
|
||||
selection: None,
|
||||
group_by: vec![],
|
||||
cluster_by: vec![],
|
||||
distribute_by: vec![],
|
||||
sort_by: vec![],
|
||||
having: None,
|
||||
};
|
||||
|
||||
let sp_statement = SpStatement::Query(Box::new(SpQuery {
|
||||
with: None,
|
||||
body: sqlparser::ast::SetExpr::Select(Box::new(select)),
|
||||
order_by: vec![],
|
||||
limit: None,
|
||||
offset: None,
|
||||
fetch: None,
|
||||
lock: None,
|
||||
}));
|
||||
|
||||
let explain = Explain::try_from(SpStatement::Explain {
|
||||
describe_alias: false,
|
||||
analyze: false,
|
||||
verbose: false,
|
||||
statement: Box::new(sp_statement),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stmts[0], Statement::Explain(explain))
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_drop_table() {
|
||||
let sql = "DROP TABLE foo";
|
||||
let result = ParserContext::create_with_dialect(sql, &GenericDialect {});
|
||||
let mut stmts = result.unwrap();
|
||||
assert_eq!(
|
||||
stmts.pop().unwrap(),
|
||||
Statement::DropTable(DropTable {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "foo".to_string()
|
||||
})
|
||||
);
|
||||
|
||||
let sql = "DROP TABLE my_schema.foo";
|
||||
let result = ParserContext::create_with_dialect(sql, &GenericDialect {});
|
||||
let mut stmts = result.unwrap();
|
||||
assert_eq!(
|
||||
stmts.pop().unwrap(),
|
||||
Statement::DropTable(DropTable {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: "my_schema".to_string(),
|
||||
table_name: "foo".to_string()
|
||||
})
|
||||
);
|
||||
|
||||
let sql = "DROP TABLE my_catalog.my_schema.foo";
|
||||
let result = ParserContext::create_with_dialect(sql, &GenericDialect {});
|
||||
let mut stmts = result.unwrap();
|
||||
assert_eq!(
|
||||
stmts.pop().unwrap(),
|
||||
Statement::DropTable(DropTable {
|
||||
catalog_name: "my_catalog".to_string(),
|
||||
schema_name: "my_schema".to_string(),
|
||||
table_name: "foo".to_string()
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
pub mod alter;
|
||||
pub mod create;
|
||||
pub mod describe;
|
||||
pub mod drop;
|
||||
pub mod explain;
|
||||
pub mod insert;
|
||||
pub mod query;
|
||||
pub mod show;
|
||||
@@ -321,11 +323,16 @@ pub fn sql_data_type_to_concrete_data_type(data_type: &SqlDataType) -> Result<Co
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
|
||||
use api::v1::ColumnDataType;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use datatypes::types::BooleanType;
|
||||
use datatypes::value::OrderedFloat;
|
||||
|
||||
use super::*;
|
||||
use crate::ast::Ident;
|
||||
use crate::ast::{DataType, Ident};
|
||||
use crate::statements::ColumnOption;
|
||||
|
||||
fn check_type(sql_type: SqlDataType, data_type: ConcreteDataType) {
|
||||
assert_eq!(
|
||||
@@ -530,4 +537,61 @@ mod tests {
|
||||
)
|
||||
.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_parse_column_default_constraint() {
|
||||
let bool_value = sqlparser::ast::Value::Boolean(true);
|
||||
|
||||
let opts = vec![
|
||||
ColumnOptionDef {
|
||||
name: None,
|
||||
option: ColumnOption::Default(Expr::Value(bool_value)),
|
||||
},
|
||||
ColumnOptionDef {
|
||||
name: None,
|
||||
option: ColumnOption::NotNull,
|
||||
},
|
||||
];
|
||||
|
||||
let constraint =
|
||||
parse_column_default_constraint("coll", &ConcreteDataType::Boolean(BooleanType), &opts)
|
||||
.unwrap();
|
||||
|
||||
assert_matches!(
|
||||
constraint,
|
||||
Some(ColumnDefaultConstraint::Value(Value::Boolean(true)))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_sql_column_def_to_grpc_column_def() {
|
||||
// test basic
|
||||
let column_def = ColumnDef {
|
||||
name: "col".into(),
|
||||
data_type: DataType::Double,
|
||||
collation: None,
|
||||
options: vec![],
|
||||
};
|
||||
|
||||
let grpc_column_def = sql_column_def_to_grpc_column_def(column_def).unwrap();
|
||||
|
||||
assert_eq!("col", grpc_column_def.name);
|
||||
assert!(grpc_column_def.is_nullable); // nullable when options are empty
|
||||
assert_eq!(ColumnDataType::Float64 as i32, grpc_column_def.datatype);
|
||||
assert_eq!(None, grpc_column_def.default_constraint);
|
||||
|
||||
// test not null
|
||||
let column_def = ColumnDef {
|
||||
name: "col".into(),
|
||||
data_type: DataType::Double,
|
||||
collation: None,
|
||||
options: vec![ColumnOptionDef {
|
||||
name: None,
|
||||
option: ColumnOption::NotNull,
|
||||
}],
|
||||
};
|
||||
|
||||
let grpc_column_def = sql_column_def_to_grpc_column_def(column_def).unwrap();
|
||||
assert!(!grpc_column_def.is_nullable);
|
||||
}
|
||||
}
|
||||
|
||||
32
src/sql/src/statements/drop.rs
Normal file
32
src/sql/src/statements/drop.rs
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// DROP TABLE statement.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct DropTable {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
pub table_name: String,
|
||||
}
|
||||
|
||||
impl DropTable {
|
||||
/// Creates a statement for `DROP TABLE`
|
||||
pub fn new(catalog_name: String, schema_name: String, table_name: String) -> Self {
|
||||
DropTable {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
}
|
||||
}
|
||||
}
|
||||
37
src/sql/src/statements/explain.rs
Normal file
37
src/sql/src/statements/explain.rs
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use sqlparser::ast::Statement as SpStatement;
|
||||
|
||||
use crate::error::Error;
|
||||
|
||||
/// Explain statement.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Explain {
|
||||
pub inner: SpStatement,
|
||||
}
|
||||
|
||||
impl TryFrom<SpStatement> for Explain {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: SpStatement) -> Result<Self, Self::Error> {
|
||||
Ok(Explain { inner: value })
|
||||
}
|
||||
}
|
||||
|
||||
impl ToString for Explain {
|
||||
fn to_string(&self) -> String {
|
||||
self.inner.to_string()
|
||||
}
|
||||
}
|
||||
@@ -18,11 +18,14 @@ use sqlparser::parser::ParserError;
|
||||
use crate::statements::alter::AlterTable;
|
||||
use crate::statements::create::{CreateDatabase, CreateTable};
|
||||
use crate::statements::describe::DescribeTable;
|
||||
use crate::statements::drop::DropTable;
|
||||
use crate::statements::explain::Explain;
|
||||
use crate::statements::insert::Insert;
|
||||
use crate::statements::query::Query;
|
||||
use crate::statements::show::{ShowCreateTable, ShowDatabases, ShowTables};
|
||||
|
||||
/// Tokens parsed by `DFParser` are converted into these values.
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Statement {
|
||||
// Query
|
||||
@@ -31,6 +34,8 @@ pub enum Statement {
|
||||
Insert(Box<Insert>),
|
||||
/// CREATE TABLE
|
||||
CreateTable(CreateTable),
|
||||
// DROP TABLE
|
||||
DropTable(DropTable),
|
||||
// CREATE DATABASE
|
||||
CreateDatabase(CreateDatabase),
|
||||
/// ALTER TABLE
|
||||
@@ -43,6 +48,8 @@ pub enum Statement {
|
||||
ShowCreateTable(ShowCreateTable),
|
||||
// DESCRIBE TABLE
|
||||
DescribeTable(DescribeTable),
|
||||
// EXPLAIN QUERY
|
||||
Explain(Explain),
|
||||
}
|
||||
|
||||
/// Converts Statement to sqlparser statement
|
||||
@@ -63,11 +70,15 @@ impl TryFrom<Statement> for SpStatement {
|
||||
Statement::DescribeTable(_) => Err(ParserError::ParserError(
|
||||
"sqlparser does not support DESCRIBE TABLE query.".to_string(),
|
||||
)),
|
||||
Statement::DropTable(_) => Err(ParserError::ParserError(
|
||||
"sqlparser does not support DROP TABLE query.".to_string(),
|
||||
)),
|
||||
Statement::Query(s) => Ok(SpStatement::Query(Box::new(s.inner))),
|
||||
Statement::Insert(i) => Ok(i.inner),
|
||||
Statement::CreateDatabase(_) | Statement::CreateTable(_) | Statement::Alter(_) => {
|
||||
unimplemented!()
|
||||
}
|
||||
Statement::Explain(e) => Ok(e.inner),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to write columns, source: {}", source))]
|
||||
FlushIo {
|
||||
source: std::io::Error,
|
||||
source: object_store::Error,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
@@ -62,28 +62,28 @@ pub enum Error {
|
||||
ReadObject {
|
||||
path: String,
|
||||
backtrace: Backtrace,
|
||||
source: IoError,
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Fail to write object into path: {}, source: {}", path, source))]
|
||||
WriteObject {
|
||||
path: String,
|
||||
backtrace: Backtrace,
|
||||
source: IoError,
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Fail to delete object from path: {}, source: {}", path, source))]
|
||||
DeleteObject {
|
||||
path: String,
|
||||
backtrace: Backtrace,
|
||||
source: IoError,
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Fail to list objects in path: {}, source: {}", path, source))]
|
||||
ListObjects {
|
||||
path: String,
|
||||
backtrace: Backtrace,
|
||||
source: IoError,
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Fail to create str from bytes, source: {}", source))]
|
||||
@@ -457,7 +457,14 @@ mod tests {
|
||||
))
|
||||
}
|
||||
|
||||
let error = throw_io_error().context(FlushIoSnafu).err().unwrap();
|
||||
let error = throw_io_error()
|
||||
.map_err(|err| {
|
||||
object_store::Error::new(object_store::ErrorKind::Unexpected, "writer close failed")
|
||||
.set_source(err)
|
||||
})
|
||||
.context(FlushIoSnafu)
|
||||
.err()
|
||||
.unwrap();
|
||||
assert_eq!(StatusCode::StorageUnavailable, error.status_code());
|
||||
assert!(error.backtrace_opt().is_some());
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ use async_trait::async_trait;
|
||||
use common_telemetry::logging;
|
||||
use futures::TryStreamExt;
|
||||
use lazy_static::lazy_static;
|
||||
use object_store::{util, ObjectEntry, ObjectStore};
|
||||
use object_store::{util, Object, ObjectStore};
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
@@ -63,7 +63,7 @@ pub fn is_delta_file(file_name: &str) -> bool {
|
||||
}
|
||||
|
||||
pub struct ObjectStoreLogIterator {
|
||||
iter: Box<dyn Iterator<Item = (ManifestVersion, ObjectEntry)> + Send + Sync>,
|
||||
iter: Box<dyn Iterator<Item = (ManifestVersion, Object)> + Send + Sync>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -72,8 +72,7 @@ impl LogIterator for ObjectStoreLogIterator {
|
||||
|
||||
async fn next_log(&mut self) -> Result<Option<(ManifestVersion, Vec<u8>)>> {
|
||||
match self.iter.next() {
|
||||
Some((v, e)) => {
|
||||
let object = e.into_object();
|
||||
Some((v, object)) => {
|
||||
let bytes = object.read().await.context(ReadObjectSnafu {
|
||||
path: object.path(),
|
||||
})?;
|
||||
@@ -156,7 +155,7 @@ impl ManifestLogStorage for ManifestObjectStore {
|
||||
.await
|
||||
.context(ListObjectsSnafu { path: &self.path })?;
|
||||
|
||||
let mut entries: Vec<(ManifestVersion, ObjectEntry)> = streamer
|
||||
let mut entries: Vec<(ManifestVersion, Object)> = streamer
|
||||
.try_filter_map(|e| async move {
|
||||
let file_name = e.name();
|
||||
if is_delta_file(file_name) {
|
||||
|
||||
@@ -122,9 +122,19 @@ impl<'a> ParquetWriter<'a> {
|
||||
sink.close().await.context(error::WriteParquetSnafu)?;
|
||||
drop(sink);
|
||||
|
||||
writer.close().await.context(error::WriteObjectSnafu {
|
||||
path: self.file_path,
|
||||
})
|
||||
writer
|
||||
.close()
|
||||
.await
|
||||
.map_err(|err| {
|
||||
object_store::Error::new(
|
||||
object_store::ErrorKind::Unexpected,
|
||||
"writer close failed",
|
||||
)
|
||||
.set_source(err)
|
||||
})
|
||||
.context(error::WriteObjectSnafu {
|
||||
path: self.file_path,
|
||||
})
|
||||
}
|
||||
)
|
||||
.map(|_| ())
|
||||
|
||||
@@ -74,8 +74,8 @@ pub trait TableEngine: Send + Sync {
|
||||
/// Returns true when the given table is exists.
|
||||
fn table_exists<'a>(&self, ctx: &EngineContext, table_ref: &'a TableReference) -> bool;
|
||||
|
||||
/// Drops the given table.
|
||||
async fn drop_table(&self, ctx: &EngineContext, request: DropTableRequest) -> Result<()>;
|
||||
/// Drops the given table. Return true if the table is dropped, or false if the table doesn't exist.
|
||||
async fn drop_table(&self, ctx: &EngineContext, request: DropTableRequest) -> Result<bool>;
|
||||
}
|
||||
|
||||
pub type TableEngineRef = Arc<dyn TableEngine>;
|
||||
|
||||
@@ -84,4 +84,8 @@ pub enum AlterKind {
|
||||
|
||||
/// Drop table request
|
||||
#[derive(Debug)]
|
||||
pub struct DropTableRequest {}
|
||||
pub struct DropTableRequest {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
pub table_name: String,
|
||||
}
|
||||
|
||||
@@ -69,7 +69,8 @@ pub trait Table: Send + Sync {
|
||||
Ok(FilterPushDownType::Unsupported)
|
||||
}
|
||||
|
||||
async fn alter(&self, _request: AlterTableRequest) -> Result<()> {
|
||||
async fn alter(&self, request: AlterTableRequest) -> Result<()> {
|
||||
let _ = request;
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,24 +27,26 @@ use futures::task::{Context, Poll};
|
||||
use futures::Stream;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::metadata::{TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType};
|
||||
use crate::metadata::{TableId, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType};
|
||||
use crate::table::scan::SimpleTableScan;
|
||||
use crate::table::{Expr, Table};
|
||||
|
||||
/// numbers table for test
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct NumbersTable {
|
||||
table_id: TableId,
|
||||
schema: SchemaRef,
|
||||
}
|
||||
|
||||
impl Default for NumbersTable {
|
||||
fn default() -> Self {
|
||||
impl NumbersTable {
|
||||
pub fn new(table_id: TableId) -> Self {
|
||||
let column_schemas = vec![ColumnSchema::new(
|
||||
"number",
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
false,
|
||||
)];
|
||||
Self {
|
||||
table_id,
|
||||
schema: Arc::new(
|
||||
SchemaBuilder::try_from_columns(column_schemas)
|
||||
.unwrap()
|
||||
@@ -55,6 +57,12 @@ impl Default for NumbersTable {
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for NumbersTable {
|
||||
fn default() -> Self {
|
||||
NumbersTable::new(1)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Table for NumbersTable {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
@@ -68,7 +76,7 @@ impl Table for NumbersTable {
|
||||
fn table_info(&self) -> TableInfoRef {
|
||||
Arc::new(
|
||||
TableInfoBuilder::default()
|
||||
.table_id(1)
|
||||
.table_id(self.table_id)
|
||||
.name("numbers")
|
||||
.catalog_name("greptime")
|
||||
.schema_name("public")
|
||||
|
||||
@@ -97,7 +97,7 @@ impl TableEngine for MockTableEngine {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn drop_table(&self, _ctx: &EngineContext, _request: DropTableRequest) -> Result<()> {
|
||||
async fn drop_table(&self, _ctx: &EngineContext, _request: DropTableRequest) -> Result<bool> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
60
tests/cases/standalone/basic.result
Normal file
60
tests/cases/standalone/basic.result
Normal file
@@ -0,0 +1,60 @@
|
||||
CREATE TABLE system_metrics (
|
||||
host STRING,
|
||||
idc STRING,
|
||||
cpu_util DOUBLE,
|
||||
memory_util DOUBLE,
|
||||
disk_util DOUBLE,
|
||||
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY(host, idc),
|
||||
TIME INDEX(ts)
|
||||
);
|
||||
|
||||
MutateResult { success: 1, failure: 0 }
|
||||
|
||||
INSERT INTO system_metrics
|
||||
VALUES
|
||||
("host1", "idc_a", 11.8, 10.3, 10.3, 1667446797450),
|
||||
("host2", "idc_a", 80.1, 70.3, 90.0, 1667446797450),
|
||||
("host1", "idc_b", 50.0, 66.7, 40.6, 1667446797450);
|
||||
|
||||
MutateResult { success: 3, failure: 0 }
|
||||
|
||||
SELECT * FROM system_metrics;
|
||||
|
||||
+-----------------------+----------------------+----------------------------+-------------------------------+-----------------------------+----------------------------+
|
||||
| host, #Field, #String | idc, #Field, #String | cpu_util, #Field, #Float64 | memory_util, #Field, #Float64 | disk_util, #Field, #Float64 | ts, #Timestamp, #Timestamp |
|
||||
+-----------------------+----------------------+----------------------------+-------------------------------+-----------------------------+----------------------------+
|
||||
| host1 | idc_a | 11.8 | 10.3 | 10.3 | 1667446797450 |
|
||||
| host1 | idc_b | 50 | 66.7 | 40.6 | 1667446797450 |
|
||||
| host2 | idc_a | 80.1 | 70.3 | 90 | 1667446797450 |
|
||||
+-----------------------+----------------------+----------------------------+-------------------------------+-----------------------------+----------------------------+
|
||||
|
||||
SELECT count(*) FROM system_metrics;
|
||||
|
||||
+----------------------------------+
|
||||
| COUNT(UInt8(1)), #Field, #Uint64 |
|
||||
+----------------------------------+
|
||||
| 3 |
|
||||
+----------------------------------+
|
||||
|
||||
SELECT avg(cpu_util) FROM system_metrics;
|
||||
|
||||
+------------------------------------------------+
|
||||
| AVG(system_metrics.cpu_util), #Field, #Float64 |
|
||||
+------------------------------------------------+
|
||||
| 47.29999999999999 |
|
||||
+------------------------------------------------+
|
||||
|
||||
SELECT idc, avg(memory_util) FROM system_metrics GROUP BY idc ORDER BY idc;
|
||||
|
||||
+----------------------+---------------------------------------------------+
|
||||
| idc, #Field, #String | AVG(system_metrics.memory_util), #Field, #Float64 |
|
||||
+----------------------+---------------------------------------------------+
|
||||
| idc_a | 40.3 |
|
||||
| idc_b | 66.7 |
|
||||
+----------------------+---------------------------------------------------+
|
||||
|
||||
DROP TABLE system_metrics;
|
||||
|
||||
MutateResult { success: 1, failure: 0 }
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user