mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-26 08:00:01 +00:00
Compare commits
170 Commits
v0.3.0
...
v0.4.0-nig
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d9751268aa | ||
|
|
8f1241912c | ||
|
|
97cfa3d6c9 | ||
|
|
ef7c5dd311 | ||
|
|
ce43896a0b | ||
|
|
c9cce0225d | ||
|
|
5bfd0d9857 | ||
|
|
e4fd5d0fd3 | ||
|
|
132668bcd1 | ||
|
|
8b4145b634 | ||
|
|
735c6390ca | ||
|
|
9ff7670adf | ||
|
|
16be56a743 | ||
|
|
2bfe25157f | ||
|
|
4fdb6d2f21 | ||
|
|
39091421a4 | ||
|
|
674bfd85c7 | ||
|
|
4fa8340572 | ||
|
|
5422224530 | ||
|
|
077785cf1e | ||
|
|
a751aa5ba0 | ||
|
|
264c5ea720 | ||
|
|
fa12392d2c | ||
|
|
421103c336 | ||
|
|
41e856eb9e | ||
|
|
e1ca454992 | ||
|
|
2d30f4c373 | ||
|
|
a7ea3bbc16 | ||
|
|
fc850c9988 | ||
|
|
f293126315 | ||
|
|
c615fb2a93 | ||
|
|
65f5349767 | ||
|
|
ed756288b3 | ||
|
|
04ddeffd2a | ||
|
|
c8ed1bbfae | ||
|
|
207d3d23a1 | ||
|
|
63173f63a1 | ||
|
|
4ea8a78817 | ||
|
|
553530cff4 | ||
|
|
c3db99513a | ||
|
|
8e256b317d | ||
|
|
b31fad5d52 | ||
|
|
00181885cc | ||
|
|
195dfdc5d3 | ||
|
|
f20b5695b8 | ||
|
|
f731193ddc | ||
|
|
963e468286 | ||
|
|
f19498f73e | ||
|
|
4cc42e2ba6 | ||
|
|
cd5afc8cb7 | ||
|
|
6dd24f4dc4 | ||
|
|
55500b7711 | ||
|
|
64acfd3802 | ||
|
|
ad165c1c64 | ||
|
|
8dcb12e317 | ||
|
|
03e30652c8 | ||
|
|
61c793796c | ||
|
|
dc085442d7 | ||
|
|
9153191819 | ||
|
|
979400ac58 | ||
|
|
28748edb0d | ||
|
|
66e5ed5483 | ||
|
|
af2fb2acbd | ||
|
|
eb2654b89a | ||
|
|
3d0d082c56 | ||
|
|
4073fceea5 | ||
|
|
8a00424468 | ||
|
|
4b580f4037 | ||
|
|
ee16262b45 | ||
|
|
f37b394f1a | ||
|
|
ccee60f37d | ||
|
|
bee8323bae | ||
|
|
000df8cf1e | ||
|
|
884731a2c8 | ||
|
|
2922c25a16 | ||
|
|
4dec06ec86 | ||
|
|
3b6f70cde3 | ||
|
|
b8e92292d2 | ||
|
|
746fe8b4fe | ||
|
|
20f2fc4a2a | ||
|
|
2ef84f64f1 | ||
|
|
451cc02d8d | ||
|
|
b466ef6cb6 | ||
|
|
5b42e15105 | ||
|
|
e1bb7acfe5 | ||
|
|
2c0c4672b4 | ||
|
|
e54415e723 | ||
|
|
783a794060 | ||
|
|
563f6e05e2 | ||
|
|
25cb667470 | ||
|
|
c77b94650c | ||
|
|
605776f49c | ||
|
|
d45e7b7480 | ||
|
|
2b3ca1309a | ||
|
|
acfa229641 | ||
|
|
7e23dd7714 | ||
|
|
559d1f73a2 | ||
|
|
bc33fdc8ef | ||
|
|
f287d3115b | ||
|
|
b737a240de | ||
|
|
99f0479bd2 | ||
|
|
313121f2ae | ||
|
|
fcff66e039 | ||
|
|
03057cab6c | ||
|
|
dcfce49cff | ||
|
|
78b07996b1 | ||
|
|
034564fd27 | ||
|
|
a95f8767a8 | ||
|
|
964d26e415 | ||
|
|
fd412b7b07 | ||
|
|
223cf31409 | ||
|
|
62f660e439 | ||
|
|
0fb18245b8 | ||
|
|
caed6879e6 | ||
|
|
5ab0747092 | ||
|
|
b1ccc7ef5d | ||
|
|
d1b5ce0d35 | ||
|
|
a314993ab4 | ||
|
|
fa522bc579 | ||
|
|
5335203360 | ||
|
|
23bf55a265 | ||
|
|
3b91fc2c64 | ||
|
|
6205616301 | ||
|
|
e47ef1f0d2 | ||
|
|
16c1ee2618 | ||
|
|
323e2aed07 | ||
|
|
cbc2620a59 | ||
|
|
4fdee5ea3c | ||
|
|
30472cebae | ||
|
|
903f02bf10 | ||
|
|
1703e93e15 | ||
|
|
2dd86b686f | ||
|
|
128c6ec98c | ||
|
|
960b84262b | ||
|
|
69854c07c5 | ||
|
|
1eeb5b4330 | ||
|
|
9b3037fe97 | ||
|
|
09747ea206 | ||
|
|
fb35e09072 | ||
|
|
803940cfa4 | ||
|
|
420ae054b3 | ||
|
|
0f1e061f24 | ||
|
|
7961de25ad | ||
|
|
f7d98e533b | ||
|
|
b540d640cf | ||
|
|
51a4d660b7 | ||
|
|
1b2381502e | ||
|
|
0e937be3f5 | ||
|
|
564c183607 | ||
|
|
8c78368374 | ||
|
|
67c16dd631 | ||
|
|
ddcee052b2 | ||
|
|
7efcf868d5 | ||
|
|
f08f726bec | ||
|
|
7437820bdc | ||
|
|
910c950717 | ||
|
|
f91cd250f8 | ||
|
|
115d9eea8d | ||
|
|
bc8f236806 | ||
|
|
fdbda51c25 | ||
|
|
e184826353 | ||
|
|
5b8e54e60e | ||
|
|
8cda1635cc | ||
|
|
f63ddb57c3 | ||
|
|
d2a8fd9890 | ||
|
|
91026a6820 | ||
|
|
7a60bfec2a | ||
|
|
a103614fd2 | ||
|
|
1b4976b077 | ||
|
|
166fb8871e |
@@ -20,6 +20,3 @@ out/
|
||||
|
||||
# Rust
|
||||
target/
|
||||
|
||||
# Git
|
||||
.git
|
||||
|
||||
@@ -14,4 +14,8 @@ GT_AZBLOB_CONTAINER=AZBLOB container
|
||||
GT_AZBLOB_ACCOUNT_NAME=AZBLOB account name
|
||||
GT_AZBLOB_ACCOUNT_KEY=AZBLOB account key
|
||||
GT_AZBLOB_ENDPOINT=AZBLOB endpoint
|
||||
|
||||
# Settings for gcs test
|
||||
GT_GCS_BUCKET = GCS bucket
|
||||
GT_GCS_SCOPE = GCS scope
|
||||
GT_GCS_CREDENTIAL_PATH = GCS credential path
|
||||
GT_GCS_ENDPOINT = GCS end point
|
||||
|
||||
14
.github/workflows/docs.yml
vendored
14
.github/workflows/docs.yml
vendored
@@ -27,6 +27,13 @@ name: CI
|
||||
# https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/defining-the-mergeability-of-pull-requests/troubleshooting-required-status-checks#handling-skipped-but-required-checks
|
||||
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
|
||||
check:
|
||||
name: Check
|
||||
if: github.event.pull_request.draft == false
|
||||
@@ -53,3 +60,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
78
.github/workflows/release.yml
vendored
78
.github/workflows/release.yml
vendored
@@ -5,27 +5,37 @@ on:
|
||||
schedule:
|
||||
# At 00:00 on Monday.
|
||||
- cron: '0 0 * * 1'
|
||||
# Mannually trigger only builds binaries.
|
||||
# Manually trigger only builds binaries.
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry_run:
|
||||
description: 'Skip docker push and release steps'
|
||||
type: boolean
|
||||
default: true
|
||||
skip_test:
|
||||
description: 'Do not run tests during build'
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
name: Release
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-05-03
|
||||
|
||||
SCHEDULED_BUILD_VERSION_PREFIX: v0.3.0
|
||||
SCHEDULED_BUILD_VERSION_PREFIX: v0.4.0
|
||||
|
||||
SCHEDULED_PERIOD: nightly
|
||||
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
DISABLE_RUN_TESTS: false
|
||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || false }}
|
||||
|
||||
jobs:
|
||||
build-macos:
|
||||
name: Build macOS binary
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# The file format is greptime-<os>-<arch>
|
||||
include:
|
||||
@@ -117,9 +127,25 @@ jobs:
|
||||
name: ${{ matrix.file }}.sha256sum
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
|
||||
|
||||
- name: Configure tag
|
||||
shell: bash
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
VERSION=${{ github.ref_name }}
|
||||
echo "TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload to S3
|
||||
run: |
|
||||
aws s3 cp target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }} s3://${{ secrets.GREPTIMEDB_RELEASE_BUCKET_NAME }}/releases/${TAG} --recursive --exclude "*" --include "*.tgz"
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
AWS_DEFAULT_REGION: ${{ secrets.AWS_CN_REGION }}
|
||||
|
||||
build-linux:
|
||||
name: Build linux binary
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# The file format is greptime-<os>-<arch>
|
||||
include:
|
||||
@@ -277,11 +303,26 @@ jobs:
|
||||
name: ${{ matrix.file }}.sha256sum
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
|
||||
|
||||
- name: Configure tag
|
||||
shell: bash
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
VERSION=${{ github.ref_name }}
|
||||
echo "TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload to S3
|
||||
run: |
|
||||
aws s3 cp target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }} s3://${{ secrets.GREPTIMEDB_RELEASE_BUCKET_NAME }}/releases/${TAG} --recursive --exclude "*" --include "*.tgz"
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
AWS_DEFAULT_REGION: ${{ secrets.AWS_CN_REGION }}
|
||||
|
||||
docker:
|
||||
name: Build docker image
|
||||
needs: [build-linux, build-macos]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -294,7 +335,7 @@ jobs:
|
||||
|
||||
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
shell: bash
|
||||
if: github.event_name == 'schedule'
|
||||
if: github.event_name != 'push'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
@@ -302,7 +343,7 @@ jobs:
|
||||
|
||||
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
|
||||
shell: bash
|
||||
if: github.event_name != 'schedule'
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
VERSION=${{ github.ref_name }}
|
||||
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
@@ -367,7 +408,7 @@ jobs:
|
||||
# Release artifacts only when all the artifacts are built successfully.
|
||||
needs: [build-linux, build-macos, docker]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -377,7 +418,7 @@ jobs:
|
||||
|
||||
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-${SCHEDULED_PERIOD}-YYYYMMDD, like v0.2.0-nigthly-20230313.
|
||||
shell: bash
|
||||
if: github.event_name == 'schedule'
|
||||
if: github.event_name != 'push'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-${{ env.SCHEDULED_PERIOD }}-$buildTime
|
||||
@@ -395,13 +436,13 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Create scheduled build git tag
|
||||
if: github.event_name == 'schedule'
|
||||
if: github.event_name != 'push'
|
||||
run: |
|
||||
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
|
||||
|
||||
- name: Publish scheduled release # configure the different release title and tags.
|
||||
uses: ncipollo/release-action@v1
|
||||
if: github.event_name == 'schedule'
|
||||
if: github.event_name != 'push'
|
||||
with:
|
||||
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
|
||||
prerelease: ${{ env.prerelease }}
|
||||
@@ -413,12 +454,13 @@ jobs:
|
||||
|
||||
- name: Publish release
|
||||
uses: ncipollo/release-action@v1
|
||||
if: github.event_name != 'schedule'
|
||||
if: github.event_name == 'push'
|
||||
with:
|
||||
name: "${{ github.ref_name }}"
|
||||
prerelease: ${{ env.prerelease }}
|
||||
makeLatest: ${{ env.makeLatest }}
|
||||
generateReleaseNotes: true
|
||||
generateReleaseNotes: false
|
||||
allowUpdates: true
|
||||
artifacts: |
|
||||
**/greptime-*
|
||||
|
||||
@@ -426,7 +468,7 @@ jobs:
|
||||
name: Push docker image to alibaba cloud container registry
|
||||
needs: [docker]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
@@ -441,13 +483,13 @@ jobs:
|
||||
- name: Login to alibaba cloud container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: registry.cn-hangzhou.aliyuncs.com
|
||||
registry: greptime-registry.cn-hangzhou.cr.aliyuncs.com
|
||||
username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
|
||||
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
shell: bash
|
||||
if: github.event_name == 'schedule'
|
||||
if: github.event_name != 'push'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
@@ -455,7 +497,7 @@ jobs:
|
||||
|
||||
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
|
||||
shell: bash
|
||||
if: github.event_name != 'schedule'
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
VERSION=${{ github.ref_name }}
|
||||
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
@@ -463,6 +505,6 @@ jobs:
|
||||
- name: Push image to alibaba cloud container registry # Use 'docker buildx imagetools create' to create a new image base on source image.
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
--tag registry.cn-hangzhou.aliyuncs.com/greptime/greptimedb:latest \
|
||||
--tag registry.cn-hangzhou.aliyuncs.com/greptime/greptimedb:${{ env.IMAGE_TAG }} \
|
||||
--tag greptime-registry.cn-hangzhou.cr.aliyuncs.com/greptime/greptimedb:latest \
|
||||
--tag greptime-registry.cn-hangzhou.cr.aliyuncs.com/greptime/greptimedb:${{ env.IMAGE_TAG }} \
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -44,3 +44,5 @@ benchmarks/data
|
||||
|
||||
# Vscode workspace
|
||||
*.code-workspace
|
||||
|
||||
venv/
|
||||
|
||||
701
Cargo.lock
generated
701
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
14
Cargo.toml
14
Cargo.toml
@@ -17,6 +17,7 @@ members = [
|
||||
"src/common/meta",
|
||||
"src/common/procedure",
|
||||
"src/common/procedure-test",
|
||||
"src/common/pprof",
|
||||
"src/common/query",
|
||||
"src/common/recordbatch",
|
||||
"src/common/runtime",
|
||||
@@ -32,6 +33,7 @@ members = [
|
||||
"src/meta-client",
|
||||
"src/meta-srv",
|
||||
"src/mito",
|
||||
"src/mito2",
|
||||
"src/object-store",
|
||||
"src/partition",
|
||||
"src/promql",
|
||||
@@ -49,7 +51,7 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.3.0"
|
||||
version = "0.3.2"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -69,14 +71,17 @@ datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
etcd-client = "0.11"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "4398d20c56d5f7939cc2960789cb1fa7dd18e6fe" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "9b26cca2132df83916692cc6fc092a26a1a76f0b" }
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
parquet = "40.0"
|
||||
paste = "1.0"
|
||||
prost = "0.11"
|
||||
rand = "0.8"
|
||||
regex = "1.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
@@ -84,14 +89,15 @@ sqlparser = "0.34"
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.28", features = ["full"] }
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.7"
|
||||
tonic = { version = "0.9", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
metrics = "0.20"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "f0798c4c648d89f51abe63e870919c75dd463199" }
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "f0798c4c648d89f51abe63e870919c75dd463199"
|
||||
rev = "abbd357c1e193cd270ea65ee7652334a150b628f"
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
|
||||
7
Cross.toml
Normal file
7
Cross.toml
Normal file
@@ -0,0 +1,7 @@
|
||||
[build]
|
||||
pre-build = [
|
||||
"dpkg --add-architecture $CROSS_DEB_ARCH",
|
||||
"apt update && apt install -y unzip zlib1g-dev zlib1g-dev:$CROSS_DEB_ARCH",
|
||||
"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/",
|
||||
"chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google",
|
||||
]
|
||||
2
Makefile
2
Makefile
@@ -34,7 +34,7 @@ docker-image: ## Build docker image.
|
||||
##@ Test
|
||||
|
||||
test: nextest ## Run unit and integration tests.
|
||||
cargo nextest run
|
||||
cargo nextest run --retries 3
|
||||
|
||||
.PHONY: nextest ## Install nextest tools.
|
||||
nextest:
|
||||
|
||||
@@ -106,7 +106,7 @@ Please see [the online document site](https://docs.greptime.com/getting-started/
|
||||
|
||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview#connect) on our [official document site](https://docs.greptime.com/).
|
||||
|
||||
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients).
|
||||
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/client/overview).
|
||||
|
||||
## Resources
|
||||
|
||||
@@ -123,7 +123,7 @@ To write and query data, GreptimeDB is compatible with multiple [protocols and c
|
||||
|
||||
### Documentation
|
||||
|
||||
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts.html)
|
||||
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts/overview)
|
||||
- GreptimeDB [Developer
|
||||
Guide](https://docs.greptime.com/developer-guide/overview.html)
|
||||
- GreptimeDB [internal code document](https://greptimedb.rs)
|
||||
|
||||
@@ -114,7 +114,7 @@ async fn write_data(
|
||||
};
|
||||
|
||||
let now = Instant::now();
|
||||
db.insert(requests).await.unwrap();
|
||||
let _ = db.insert(requests).await.unwrap();
|
||||
let elapsed = now.elapsed();
|
||||
total_rpc_elapsed_ms += elapsed.as_millis();
|
||||
progress_bar.inc(row_count as _);
|
||||
@@ -377,19 +377,16 @@ fn create_table_expr() -> CreateTableExpr {
|
||||
}
|
||||
|
||||
fn query_set() -> HashMap<String, String> {
|
||||
let mut ret = HashMap::new();
|
||||
|
||||
ret.insert(
|
||||
"count_all".to_string(),
|
||||
format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
|
||||
);
|
||||
|
||||
ret.insert(
|
||||
"fare_amt_by_passenger".to_string(),
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count")
|
||||
);
|
||||
|
||||
ret
|
||||
HashMap::from([
|
||||
(
|
||||
"count_all".to_string(),
|
||||
format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
|
||||
),
|
||||
(
|
||||
"fare_amt_by_passenger".to_string(),
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count"),
|
||||
)
|
||||
])
|
||||
}
|
||||
|
||||
async fn do_write(args: &Args, db: &Database) {
|
||||
@@ -414,7 +411,8 @@ async fn do_write(args: &Args, db: &Database) {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
let _ = write_jobs
|
||||
.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
}
|
||||
}
|
||||
while write_jobs.join_next().await.is_some() {
|
||||
@@ -423,7 +421,8 @@ async fn do_write(args: &Args, db: &Database) {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
let _ = write_jobs
|
||||
.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,8 @@ rpc_addr = "127.0.0.1:3001"
|
||||
rpc_hostname = "127.0.0.1"
|
||||
# The number of gRPC server worker threads, 8 by default.
|
||||
rpc_runtime_size = 8
|
||||
# Interval for sending heartbeat messages to the Metasrv in milliseconds, 5000 by default.
|
||||
heartbeat_interval_millis = 5000
|
||||
|
||||
# Metasrv client options.
|
||||
[meta_client_options]
|
||||
@@ -26,8 +28,8 @@ tcp_nodelay = true
|
||||
[wal]
|
||||
# WAL data directory
|
||||
# dir = "/tmp/greptimedb/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
file_size = "256MB"
|
||||
purge_threshold = "4GB"
|
||||
purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
@@ -51,7 +53,7 @@ max_purge_tasks = 32
|
||||
# Create a checkpoint every <checkpoint_margin> actions.
|
||||
checkpoint_margin = 10
|
||||
# Region manifest logs and checkpoints gc execution duration
|
||||
gc_duration = '30s'
|
||||
gc_duration = '10m'
|
||||
# Whether to try creating a manifest checkpoint on region opening
|
||||
checkpoint_on_startup = false
|
||||
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
# Node running mode, see `standalone.example.toml`.
|
||||
mode = "distributed"
|
||||
# Interval for sending heartbeat task to the Metasrv in milliseconds, 5000 by default.
|
||||
heartbeat_interval_millis = 5000
|
||||
# Interval for retry sending heartbeat task in milliseconds, 5000 by default.
|
||||
retry_interval_millis = 5000
|
||||
|
||||
# HTTP server options, see `standalone.example.toml`.
|
||||
[http_options]
|
||||
addr = "127.0.0.1:4000"
|
||||
timeout = "30s"
|
||||
body_limit = "64MB"
|
||||
|
||||
# gRPC server options, see `standalone.example.toml`.
|
||||
[grpc_options]
|
||||
@@ -42,18 +47,20 @@ runtime_size = 2
|
||||
[influxdb_options]
|
||||
enable = true
|
||||
|
||||
# Prometheus protocol options, see `standalone.example.toml`.
|
||||
[prometheus_options]
|
||||
# Prometheus remote storage options, see `standalone.example.toml`.
|
||||
[prom_store_options]
|
||||
enable = true
|
||||
|
||||
# Prometheus protocol options, see `standalone.example.toml`.
|
||||
[prom_options]
|
||||
[prometheus_options]
|
||||
addr = "127.0.0.1:4004"
|
||||
|
||||
# Metasrv client options, see `datanode.example.toml`.
|
||||
[meta_client_options]
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
timeout_millis = 3000
|
||||
# DDL timeouts options.
|
||||
ddl_timeout_millis = 10000
|
||||
connect_timeout_millis = 5000
|
||||
tcp_nodelay = true
|
||||
|
||||
|
||||
@@ -18,3 +18,18 @@ use_memory_store = false
|
||||
# [logging]
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# level = "info"
|
||||
|
||||
# Procedure storage options.
|
||||
[procedure]
|
||||
# Procedure max retry time.
|
||||
max_retry_times = 3
|
||||
# Initial retry delay of procedures, increases exponentially
|
||||
retry_delay = "500ms"
|
||||
|
||||
# # Datanode options.
|
||||
# [datanode]
|
||||
# # Datanode client options.
|
||||
# [datanode.client_options]
|
||||
# timeout_millis = 10000
|
||||
# connect_timeout_millis = 10000
|
||||
# tcp_nodelay = true
|
||||
|
||||
@@ -9,6 +9,9 @@ enable_memory_catalog = false
|
||||
addr = "127.0.0.1:4000"
|
||||
# HTTP request timeout, 30s by default.
|
||||
timeout = "30s"
|
||||
# HTTP request body limit, 64Mb by default.
|
||||
# the following units are supported: B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB
|
||||
body_limit = "64MB"
|
||||
|
||||
# gRPC server options.
|
||||
[grpc_options]
|
||||
@@ -66,13 +69,13 @@ runtime_size = 2
|
||||
# Whether to enable InfluxDB protocol in HTTP API, true by default.
|
||||
enable = true
|
||||
|
||||
# Prometheus protocol options.
|
||||
[prometheus_options]
|
||||
# Prometheus remote storage options
|
||||
[prom_store_options]
|
||||
# Whether to enable Prometheus remote write and read in HTTP API, true by default.
|
||||
enable = true
|
||||
|
||||
# Prom protocol options.
|
||||
[prom_options]
|
||||
# Prometheus protocol options
|
||||
[prometheus_options]
|
||||
# Prometheus API server address, "127.0.0.1:4004" by default.
|
||||
addr = "127.0.0.1:4004"
|
||||
|
||||
@@ -81,9 +84,9 @@ addr = "127.0.0.1:4004"
|
||||
# WAL data directory
|
||||
# dir = "/tmp/greptimedb/wal"
|
||||
# WAL file size in bytes.
|
||||
file_size = "1GB"
|
||||
# WAL purge threshold in bytes.
|
||||
purge_threshold = "50GB"
|
||||
file_size = "256MB"
|
||||
# WAL purge threshold.
|
||||
purge_threshold = "4GB"
|
||||
# WAL purge interval in seconds.
|
||||
purge_interval = "10m"
|
||||
# WAL read batch size.
|
||||
@@ -115,7 +118,7 @@ max_purge_tasks = 32
|
||||
# Create a checkpoint every <checkpoint_margin> actions.
|
||||
checkpoint_margin = 10
|
||||
# Region manifest logs and checkpoints gc execution duration
|
||||
gc_duration = '30s'
|
||||
gc_duration = '10m'
|
||||
# Whether to try creating a manifest checkpoint on region opening
|
||||
checkpoint_on_startup = false
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ RUN apt-get update && apt-get install -y \
|
||||
libssl-dev \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3 \
|
||||
@@ -29,7 +30,9 @@ RUN cargo build --release
|
||||
# TODO(zyy17): Maybe should use the more secure container image.
|
||||
FROM ubuntu:22.04 as base
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
|
||||
-y install ca-certificates \
|
||||
curl
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /greptimedb/target/release/greptime /greptime/bin/
|
||||
|
||||
24
docker/Dockerfile-centos7-builder
Normal file
24
docker/Dockerfile-centos7-builder
Normal file
@@ -0,0 +1,24 @@
|
||||
FROM centos:7
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Install dependencies
|
||||
RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
|
||||
RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel
|
||||
|
||||
# Install protoc
|
||||
RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
|
||||
RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
||||
|
||||
# Install Rust
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
|
||||
CMD ["cargo", "build", "--release"]
|
||||
@@ -8,6 +8,7 @@ RUN apt-get update && apt-get install -y \
|
||||
libssl-dev \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
wget
|
||||
@@ -48,7 +49,9 @@ RUN export PYO3_CROSS_LIB_DIR=$PY_INSTALL_PATH/lib && \
|
||||
# Exporting the binary to the clean image
|
||||
FROM ubuntu:22.04 as base
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
|
||||
-y install ca-certificates \
|
||||
curl
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /greptimedb/target/aarch64-unknown-linux-gnu/release/greptime /greptime/bin/
|
||||
|
||||
@@ -4,7 +4,8 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
ca-certificates \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip
|
||||
python3-pip \
|
||||
curl
|
||||
|
||||
COPY requirements.txt /etc/greptime/requirements.txt
|
||||
|
||||
|
||||
39
docs/benchmarks/tsbs/v0.3.2.md
Normal file
39
docs/benchmarks/tsbs/v0.3.2.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# TSBS benchmark - v0.3.2
|
||||
|
||||
## Environment
|
||||
|
||||
| | |
|
||||
| --- | --- |
|
||||
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
|
||||
| Memory | 32GB |
|
||||
| Disk | SOLIDIGM SSDPFKNU010TZ |
|
||||
| OS | Ubuntu 22.04.2 LTS |
|
||||
|
||||
|
||||
## Write performance
|
||||
|
||||
| Write buffer size | Ingest rate(rows/s) |
|
||||
| --- | --- |
|
||||
| 512M | 139583.04 |
|
||||
| 32M | 279250.52 |
|
||||
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | v0.3.2 write buffer 32M (ms) | v0.3.2 write buffer 512M (ms) | v0.3.1 write buffer 32M (ms) |
|
||||
| --- | --- | --- | --- |
|
||||
| cpu-max-all-1 | 921.12 | 241.23 | 553.63 |
|
||||
| cpu-max-all-8 | 2657.66 | 502.78 | 3308.41 |
|
||||
| double-groupby-1 | 28238.85 | 27367.42 | 52148.22 |
|
||||
| double-groupby-5 | 33094.65 | 32421.89 | 56762.37 |
|
||||
| double-groupby-all | 38565.89 | 38635.52 | 59596.80 |
|
||||
| groupby-orderby-limit | 23321.60 | 22423.55 | 53983.23 |
|
||||
| high-cpu-1 | 1167.04 | 254.15 | 832.41 |
|
||||
| high-cpu-all | 32814.08 | 29906.94 | 62853.12 |
|
||||
| lastpoint | 192045.05 | 153575.42 | NA |
|
||||
| single-groupby-1-1-1 | 63.97 | 87.35 | 92.66 |
|
||||
| single-groupby-1-1-12 | 666.24 | 326.98 | 781.50 |
|
||||
| single-groupby-1-8-1 | 225.29 | 137.97 |281.95 |
|
||||
| single-groupby-5-1-1 | 70.40 | 81.64 | 86.15 |
|
||||
| single-groupby-5-1-12 | 722.75 | 356.01 | 805.18 |
|
||||
| single-groupby-5-8-1 | 285.60 | 115.88 | 326.29 |
|
||||
303
docs/rfcs/2023-07-06-table-engine-refactor.md
Normal file
303
docs/rfcs/2023-07-06-table-engine-refactor.md
Normal file
@@ -0,0 +1,303 @@
|
||||
---
|
||||
Feature Name: table-engine-refactor
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/1869
|
||||
Date: 2023-07-06
|
||||
Author: "Yingwen <realevenyag@gmail.com>"
|
||||
---
|
||||
|
||||
Refactor Table Engine
|
||||
----------------------
|
||||
|
||||
# Summary
|
||||
Refactor table engines to address several historical tech debts.
|
||||
|
||||
# Motivation
|
||||
Both `Frontend` and `Datanode` have to deal with multiple regions in a table. This results in code duplication and additional burden to the `Datanode`.
|
||||
|
||||
Before:
|
||||
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
|
||||
subgraph Frontend["Frontend"]
|
||||
subgraph MyTable
|
||||
A("region 0, 2 -> Datanode0")
|
||||
B("region 1, 3 -> Datanode1")
|
||||
end
|
||||
end
|
||||
|
||||
MyTable --> MetaSrv
|
||||
MetaSrv --> ETCD
|
||||
|
||||
MyTable-->TableEngine0
|
||||
MyTable-->TableEngine1
|
||||
|
||||
subgraph Datanode0
|
||||
Procedure0("procedure")
|
||||
TableEngine0("table engine")
|
||||
region0
|
||||
region2
|
||||
mytable0("my_table")
|
||||
|
||||
Procedure0-->mytable0
|
||||
TableEngine0-->mytable0
|
||||
mytable0-->region0
|
||||
mytable0-->region2
|
||||
end
|
||||
|
||||
|
||||
subgraph Datanode1
|
||||
Procedure1("procedure")
|
||||
TableEngine1("table engine")
|
||||
region1
|
||||
region3
|
||||
mytable1("my_table")
|
||||
|
||||
Procedure1-->mytable1
|
||||
TableEngine1-->mytable1
|
||||
mytable1-->region1
|
||||
mytable1-->region3
|
||||
end
|
||||
|
||||
|
||||
subgraph manifest["table manifest"]
|
||||
M0("my_table")
|
||||
M1("regions: [0, 1, 2, 3]")
|
||||
end
|
||||
|
||||
mytable1-->manifest
|
||||
mytable0-->manifest
|
||||
|
||||
RegionManifest0("region manifest 0")
|
||||
RegionManifest1("region manifest 1")
|
||||
RegionManifest2("region manifest 2")
|
||||
RegionManifest3("region manifest 3")
|
||||
region0-->RegionManifest0
|
||||
region1-->RegionManifest1
|
||||
region2-->RegionManifest2
|
||||
region3-->RegionManifest3
|
||||
```
|
||||
|
||||
`Datanodes` can update the same manifest file for a table as regions are assigned to different nodes in the cluster. We also have to run procedures on `Datanode` to ensure the table manifest is consistent with region manifests. "Table" in a `Datanode` is a subset of the table's regions. The `Datanode` is much closer to `RegionServer` in `HBase` which only deals with regions.
|
||||
|
||||
In cluster mode, we store table metadata in etcd and table manifest. The table manifest becomes redundant. We can remove the table manifest if we refactor the table engines to region engines that only care about regions. What's more, we don't need to run those procedures on `Datanode`.
|
||||
|
||||
After:
|
||||
```mermaid
|
||||
graph TB
|
||||
|
||||
subgraph Frontend["Frontend"]
|
||||
direction LR
|
||||
subgraph MyTable
|
||||
A("region 0, 2 -> Datanode0")
|
||||
B("region 1, 3 -> Datanode1")
|
||||
end
|
||||
end
|
||||
|
||||
MyTable --> MetaSrv
|
||||
MetaSrv --> ETCD
|
||||
|
||||
MyTable-->RegionEngine
|
||||
MyTable-->RegionEngine1
|
||||
|
||||
subgraph Datanode0
|
||||
RegionEngine("region engine")
|
||||
region0
|
||||
region2
|
||||
RegionEngine-->region0
|
||||
RegionEngine-->region2
|
||||
end
|
||||
|
||||
|
||||
subgraph Datanode1
|
||||
RegionEngine1("region engine")
|
||||
region1
|
||||
region3
|
||||
RegionEngine1-->region1
|
||||
RegionEngine1-->region3
|
||||
end
|
||||
|
||||
RegionManifest0("region manifest 0")
|
||||
RegionManifest1("region manifest 1")
|
||||
RegionManifest2("region manifest 2")
|
||||
RegionManifest3("region manifest 3")
|
||||
region0-->RegionManifest0
|
||||
region1-->RegionManifest1
|
||||
region2-->RegionManifest2
|
||||
region3-->RegionManifest3
|
||||
```
|
||||
This RFC proposes to refactor table engines into region engines as a first step to make the `Datanode` acts like a `RegionServer`.
|
||||
|
||||
|
||||
# Details
|
||||
## Overview
|
||||
|
||||
We plan to refactor the `TableEngine` trait into `RegionEngine` gradually. This RFC focuses on the `mito` engine as it is the default table engine and the most complicated engine.
|
||||
|
||||
Currently, we built `MitoEngine` upon `StorageEngine` that manages regions of the `mito` engine. Since `MitoEngine` becomes a region engine, we could combine `StorageEngine` with `MitoEngine` to simplify our code structure.
|
||||
|
||||
The chart below shows the overall architecture of the `MitoEngine`.
|
||||
|
||||
```mermaid
|
||||
classDiagram
|
||||
class MitoEngine~LogStore~ {
|
||||
-WorkerGroup workers
|
||||
}
|
||||
class MitoRegion {
|
||||
+VersionControlRef version_control
|
||||
-RegionId region_id
|
||||
-String manifest_dir
|
||||
-AtomicI64 last_flush_millis
|
||||
+region_id() RegionId
|
||||
+scan() ChunkReaderImpl
|
||||
}
|
||||
class RegionMap {
|
||||
-HashMap<RegionId, MitoRegionRef> regions
|
||||
}
|
||||
class ChunkReaderImpl
|
||||
|
||||
class WorkerGroup {
|
||||
-Vec~RegionWorker~ workers
|
||||
}
|
||||
class RegionWorker {
|
||||
-RegionMap regions
|
||||
-Sender sender
|
||||
-JoinHandle handle
|
||||
}
|
||||
class RegionWorkerThread~LogStore~ {
|
||||
-RegionMap regions
|
||||
-Receiver receiver
|
||||
-Wal~LogStore~ wal
|
||||
-ObjectStore object_store
|
||||
-MemtableBuilderRef memtable_builder
|
||||
-FlushSchedulerRef~LogStore~ flush_scheduler
|
||||
-FlushStrategy flush_strategy
|
||||
-CompactionSchedulerRef~LogStore~ compaction_scheduler
|
||||
-FilePurgerRef file_purger
|
||||
}
|
||||
class Wal~LogStore~ {
|
||||
-LogStore log_store
|
||||
}
|
||||
class MitoConfig
|
||||
|
||||
MitoEngine~LogStore~ o-- MitoConfig
|
||||
MitoEngine~LogStore~ o-- MitoRegion
|
||||
MitoEngine~LogStore~ o-- WorkerGroup
|
||||
MitoRegion o-- VersionControl
|
||||
MitoRegion -- ChunkReaderImpl
|
||||
WorkerGroup o-- RegionWorker
|
||||
RegionWorker o-- RegionMap
|
||||
RegionWorker -- RegionWorkerThread~LogStore~
|
||||
RegionWorkerThread~LogStore~ o-- RegionMap
|
||||
RegionWorkerThread~LogStore~ o-- Wal~LogStore~
|
||||
```
|
||||
|
||||
We replace the `RegionWriter` with `RegionWorker` to process write requests and DDL requests.
|
||||
|
||||
|
||||
## Metadata
|
||||
We also merge region's metadata with table's metadata. It should make metadata much easier to maintain.
|
||||
```mermaid
|
||||
classDiagram
|
||||
class VersionControl {
|
||||
-CowCell~Version~ version
|
||||
-AtomicU64 committed_sequence
|
||||
}
|
||||
class Version {
|
||||
-RegionMetadataRef metadata
|
||||
-MemtableVersionRef memtables
|
||||
-LevelMetasRef ssts
|
||||
-SequenceNumber flushed_sequence
|
||||
-ManifestVersion manifest_version
|
||||
}
|
||||
class MemtableVersion {
|
||||
-MemtableRef mutable
|
||||
-Vec~MemtableRef~ immutables
|
||||
+mutable_memtable() MemtableRef
|
||||
+immutable_memtables() &[MemtableRef]
|
||||
+freeze_mutable(MemtableRef new_mutable) MemtableVersion
|
||||
}
|
||||
class LevelMetas {
|
||||
-LevelMetaVec levels
|
||||
-AccessLayerRef sst_layer
|
||||
-FilePurgerRef file_purger
|
||||
-Option~i64~ compaction_time_window
|
||||
}
|
||||
class LevelMeta {
|
||||
-Level level
|
||||
-HashMap<FileId, FileHandle> files
|
||||
}
|
||||
class FileHandle {
|
||||
-FileMeta meta
|
||||
-bool compacting
|
||||
-AtomicBool deleted
|
||||
-AccessLayerRef sst_layer
|
||||
-FilePurgerRef file_purger
|
||||
}
|
||||
class FileMeta {
|
||||
+RegionId region_id
|
||||
+FileId file_id
|
||||
+Option<Timestamp, Timestamp> time_range
|
||||
+Level level
|
||||
+u64 file_size
|
||||
}
|
||||
|
||||
VersionControl o-- Version
|
||||
Version o-- RegionMetadata
|
||||
Version o-- MemtableVersion
|
||||
Version o-- LevelMetas
|
||||
LevelMetas o-- LevelMeta
|
||||
LevelMeta o-- FileHandle
|
||||
FileHandle o-- FileMeta
|
||||
|
||||
class RegionMetadata {
|
||||
+RegionId region_id
|
||||
+VersionNumber version
|
||||
+SchemaRef table_schema
|
||||
+Vec~usize~ primary_key_indices
|
||||
+Vec~usize~ value_indices
|
||||
+ColumnId next_column_id
|
||||
+TableOptions region_options
|
||||
+DateTime~Utc~ created_on
|
||||
+RegionSchemaRef region_schema
|
||||
}
|
||||
class RegionSchema {
|
||||
-SchemaRef user_schema
|
||||
-StoreSchemaRef store_schema
|
||||
-ColumnsMetadataRef columns
|
||||
}
|
||||
class Schema
|
||||
class StoreSchema {
|
||||
-Vec~ColumnMetadata~ columns
|
||||
-SchemaRef schema
|
||||
-usize row_key_end
|
||||
-usize user_column_end
|
||||
}
|
||||
class ColumnsMetadata {
|
||||
-Vec~ColumnMetadata~ columns
|
||||
-HashMap<String, usize> name_to_col_index
|
||||
-usize row_key_end
|
||||
-usize timestamp_key_index
|
||||
-usize user_column_end
|
||||
}
|
||||
class ColumnMetadata
|
||||
|
||||
RegionMetadata o-- RegionSchema
|
||||
RegionMetadata o-- Schema
|
||||
RegionSchema o-- StoreSchema
|
||||
RegionSchema o-- Schema
|
||||
RegionSchema o-- ColumnsMetadata
|
||||
StoreSchema o-- ColumnsMetadata
|
||||
StoreSchema o-- Schema
|
||||
StoreSchema o-- ColumnMetadata
|
||||
ColumnsMetadata o-- ColumnMetadata
|
||||
```
|
||||
|
||||
# Drawback
|
||||
This is a breaking change.
|
||||
|
||||
# Future Work
|
||||
- Rename `TableEngine` to `RegionEngine`
|
||||
- Simplify schema relationship in the `mito` engine
|
||||
- Refactor the `Datanode` into a `RegionServer`.
|
||||
@@ -15,7 +15,7 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::prelude::StatusCode;
|
||||
use common_error::status_code::StatusCode;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::prelude::*;
|
||||
use snafu::Location;
|
||||
@@ -41,7 +41,7 @@ pub enum Error {
|
||||
))]
|
||||
ConvertColumnDefaultConstraint {
|
||||
column: String,
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
@@ -52,7 +52,7 @@ pub enum Error {
|
||||
))]
|
||||
InvalidColumnDefaultConstraint {
|
||||
column: String,
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -256,6 +256,7 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
||||
Some(Expr::Alter(_)) => "ddl.alter",
|
||||
Some(Expr::DropTable(_)) => "ddl.drop_table",
|
||||
Some(Expr::FlushTable(_)) => "ddl.flush_table",
|
||||
Some(Expr::CompactTable(_)) => "ddl.compact_table",
|
||||
None => "ddl.empty",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
pub mod error;
|
||||
pub mod helper;
|
||||
|
||||
pub mod prometheus {
|
||||
pub mod prom_store {
|
||||
pub mod remote {
|
||||
pub use greptime_proto::prometheus::remote::*;
|
||||
}
|
||||
|
||||
@@ -29,12 +29,12 @@ datatypes = { path = "../datatypes" }
|
||||
futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
key-lock = "0.1"
|
||||
lazy_static = "1.4"
|
||||
lazy_static.workspace = true
|
||||
meta-client = { path = "../meta-client" }
|
||||
metrics.workspace = true
|
||||
moka = { version = "0.11", features = ["future"] }
|
||||
parking_lot = "0.12"
|
||||
regex = "1.6"
|
||||
regex.workspace = true
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
session = { path = "../session" }
|
||||
|
||||
@@ -16,10 +16,10 @@ use std::any::Any;
|
||||
use std::fmt::Debug;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::prelude::{Snafu, StatusCode};
|
||||
use common_error::status_code::StatusCode;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::Location;
|
||||
use snafu::{Location, Snafu};
|
||||
use tokio::task::JoinError;
|
||||
|
||||
use crate::DeregisterTableRequest;
|
||||
@@ -32,18 +32,18 @@ pub enum Error {
|
||||
source
|
||||
))]
|
||||
CompileScriptInternal {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
#[snafu(display("Failed to open system catalog table, source: {}", source))]
|
||||
OpenSystemCatalog {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create system catalog table, source: {}", source))]
|
||||
CreateSystemCatalog {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
@@ -54,7 +54,7 @@ pub enum Error {
|
||||
))]
|
||||
CreateTable {
|
||||
table_info: String,
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
@@ -94,7 +94,7 @@ pub enum Error {
|
||||
#[snafu(display("Table engine not found: {}, source: {}", engine_name, source))]
|
||||
TableEngineNotFound {
|
||||
engine_name: String,
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
@@ -132,7 +132,7 @@ pub enum Error {
|
||||
#[snafu(display("Failed to open table, table info: {}, source: {}", table_info, source))]
|
||||
OpenTable {
|
||||
table_info: String,
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
@@ -147,13 +147,13 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to read system catalog table records"))]
|
||||
ReadSystemCatalog {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create recordbatch, source: {}", source))]
|
||||
CreateRecordBatch {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
@@ -162,7 +162,7 @@ pub enum Error {
|
||||
source
|
||||
))]
|
||||
InsertCatalogRecord {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
@@ -173,7 +173,7 @@ pub enum Error {
|
||||
))]
|
||||
DeregisterTable {
|
||||
request: DeregisterTableRequest,
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
@@ -182,36 +182,43 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to scan system catalog table, source: {}", source))]
|
||||
SystemCatalogTableScan {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("{source}"))]
|
||||
Internal {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to upgrade weak catalog manager reference. location: {}",
|
||||
location
|
||||
))]
|
||||
UpgradeWeakCatalogManagerRef { location: Location },
|
||||
|
||||
#[snafu(display("Failed to execute system catalog table scan, source: {}", source))]
|
||||
SystemCatalogTableScanExec {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: common_query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot parse catalog value, source: {}", source))]
|
||||
InvalidCatalogValue {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: common_catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to perform metasrv operation, source: {}", source))]
|
||||
MetaSrv {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid table info in catalog, source: {}", source))]
|
||||
InvalidTableInfoInCatalog {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
@@ -230,12 +237,18 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Table schema mismatch, source: {}", source))]
|
||||
TableSchemaMismatch {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("A generic error has occurred, msg: {}", msg))]
|
||||
Generic { msg: String, location: Location },
|
||||
|
||||
#[snafu(display("Table metadata manager error: {}", source))]
|
||||
TableMetadataManager {
|
||||
source: common_meta::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -256,9 +269,11 @@ impl ErrorExt for Error {
|
||||
| Error::EmptyValue { .. }
|
||||
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
Error::Generic { .. } | Error::SystemCatalogTypeMismatch { .. } => StatusCode::Internal,
|
||||
Error::Generic { .. }
|
||||
| Error::SystemCatalogTypeMismatch { .. }
|
||||
| Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
||||
|
||||
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source } => {
|
||||
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::InvalidCatalogValue { source, .. } => source.status_code(),
|
||||
@@ -275,20 +290,21 @@ impl ErrorExt for Error {
|
||||
| Error::OpenTable { source, .. }
|
||||
| Error::CreateTable { source, .. }
|
||||
| Error::DeregisterTable { source, .. }
|
||||
| Error::TableSchemaMismatch { source } => source.status_code(),
|
||||
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
|
||||
|
||||
Error::MetaSrv { source, .. } => source.status_code(),
|
||||
Error::SystemCatalogTableScan { source } => source.status_code(),
|
||||
Error::SystemCatalogTableScanExec { source } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { source } => source.status_code(),
|
||||
Error::SystemCatalogTableScan { source, .. } => source.status_code(),
|
||||
Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
||||
|
||||
Error::CompileScriptInternal { source } | Error::Internal { source } => {
|
||||
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::Unimplemented { .. } | Error::NotSupported { .. } => StatusCode::Unsupported,
|
||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
||||
Error::TableMetadataManager { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -30,6 +30,7 @@ pub const TABLE_GLOBAL_KEY_PREFIX: &str = "__tg";
|
||||
pub const TABLE_REGIONAL_KEY_PREFIX: &str = "__tr";
|
||||
|
||||
const ALPHANUMERICS_NAME_PATTERN: &str = "[a-zA-Z_][a-zA-Z0-9_]*";
|
||||
const TABLE_NAME_PATTERN: &str = "[a-zA-Z_:][a-zA-Z0-9_:]*";
|
||||
|
||||
lazy_static! {
|
||||
static ref CATALOG_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
@@ -47,14 +48,14 @@ lazy_static! {
|
||||
|
||||
lazy_static! {
|
||||
static ref TABLE_GLOBAL_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{TABLE_GLOBAL_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})$"
|
||||
"^{TABLE_GLOBAL_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-({TABLE_NAME_PATTERN})$"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref TABLE_REGIONAL_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{TABLE_REGIONAL_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-([0-9]+)$"
|
||||
"^{TABLE_REGIONAL_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-({TABLE_NAME_PATTERN})-([0-9]+)$"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
@@ -67,6 +68,7 @@ pub fn build_schema_prefix(catalog_name: impl AsRef<str>) -> String {
|
||||
format!("{SCHEMA_KEY_PREFIX}-{}-", catalog_name.as_ref())
|
||||
}
|
||||
|
||||
/// Global table info has only one key across all datanodes so it does not have `node_id` field.
|
||||
pub fn build_table_global_prefix(
|
||||
catalog_name: impl AsRef<str>,
|
||||
schema_name: impl AsRef<str>,
|
||||
@@ -78,6 +80,7 @@ pub fn build_table_global_prefix(
|
||||
)
|
||||
}
|
||||
|
||||
/// Regional table info varies between datanode, so it contains a `node_id` field.
|
||||
pub fn build_table_regional_prefix(
|
||||
catalog_name: impl AsRef<str>,
|
||||
schema_name: impl AsRef<str>,
|
||||
@@ -91,7 +94,7 @@ pub fn build_table_regional_prefix(
|
||||
}
|
||||
|
||||
/// Table global info has only one key across all datanodes so it does not have `node_id` field.
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Hash, Eq, PartialEq)]
|
||||
pub struct TableGlobalKey {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
@@ -124,6 +127,14 @@ impl TableGlobalKey {
|
||||
table_name: captures[3].to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_raw_key(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
pub fn try_from_raw_key(key: &[u8]) -> Result<Self, Error> {
|
||||
Self::parse(String::from_utf8_lossy(key))
|
||||
}
|
||||
}
|
||||
|
||||
/// Table global info contains necessary info for a datanode to create table regions, including
|
||||
@@ -141,6 +152,10 @@ impl TableGlobalValue {
|
||||
pub fn table_id(&self) -> TableId {
|
||||
self.table_info.ident.table_id
|
||||
}
|
||||
|
||||
pub fn engine(&self) -> &str {
|
||||
&self.table_info.meta.engine
|
||||
}
|
||||
}
|
||||
|
||||
/// Table regional info that varies between datanode, so it contains a `node_id` field.
|
||||
@@ -189,6 +204,9 @@ impl TableRegionalKey {
|
||||
/// region ids allocated by metasrv.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct TableRegionalValue {
|
||||
// We can remove the `Option` from the table id once all regional values
|
||||
// stored in meta have table ids.
|
||||
pub table_id: Option<TableId>,
|
||||
pub version: TableVersion,
|
||||
pub regions_ids: Vec<u32>,
|
||||
pub engine_name: Option<String>,
|
||||
@@ -375,6 +393,28 @@ mod tests {
|
||||
#[test]
|
||||
fn test_table_global_value_compatibility() {
|
||||
let s = r#"{"node_id":1,"regions_id_map":{"1":[0]},"table_info":{"ident":{"table_id":1098,"version":1},"name":"container_cpu_limit","desc":"Created on insertion","catalog_name":"greptime","schema_name":"dd","meta":{"schema":{"column_schemas":[{"name":"container_id","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"container_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"docker_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"host","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_tag","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"interval","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"runtime","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"short_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"type","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"dd_value","data_type":{"Float64":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"ts","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":false,"is_time_index":true,"default_constraint":null,"metadata":{"greptime:time_index":"true"}},{"name":"git.repository_url","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}}],"timestamp_index":11,"version":1},"primary_key_indices":[0,1,2,3,4,5,6,7,8,9,12],"value_indices":[10,11],"engine":"mito","next_column_id":12,"region_numbers":[],"engine_options":{},"options":{},"created_on":"1970-01-01T00:00:00Z"},"table_type":"Base"}}"#;
|
||||
TableGlobalValue::parse(s).unwrap();
|
||||
let _ = TableGlobalValue::parse(s).unwrap();
|
||||
}
|
||||
|
||||
fn test_valid_table_patterns(table_name: &str) {
|
||||
assert_eq!(
|
||||
table_name,
|
||||
TableGlobalKey::parse(format!("__tg-catalog-schema-{}", table_name))
|
||||
.unwrap()
|
||||
.table_name
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
table_name,
|
||||
TableRegionalKey::parse(format!("__tr-catalog-schema-{}-0", table_name))
|
||||
.unwrap()
|
||||
.table_name
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_table_name_pattern() {
|
||||
test_valid_table_patterns("cpu:metrics");
|
||||
test_valid_table_patterns(":cpu:metrics");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,63 +16,51 @@ mod columns;
|
||||
mod tables;
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_query::physical_plan::PhysicalPlanRef;
|
||||
use common_query::prelude::Expr;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
|
||||
use datatypes::schema::SchemaRef;
|
||||
use futures_util::StreamExt;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
|
||||
use table::metadata::TableType;
|
||||
use table::{Result as TableResult, Table, TableRef};
|
||||
|
||||
use self::columns::InformationSchemaColumns;
|
||||
use crate::error::Result;
|
||||
use crate::information_schema::tables::InformationSchemaTables;
|
||||
use crate::{CatalogProviderRef, SchemaProvider};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const TABLES: &str = "tables";
|
||||
const COLUMNS: &str = "columns";
|
||||
|
||||
pub(crate) struct InformationSchemaProvider {
|
||||
pub struct InformationSchemaProvider {
|
||||
catalog_name: String,
|
||||
catalog_provider: CatalogProviderRef,
|
||||
tables: Vec<String>,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaProvider {
|
||||
pub(crate) fn new(catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
|
||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
catalog_name,
|
||||
catalog_provider,
|
||||
tables: vec![TABLES.to_string(), COLUMNS.to_string()],
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for InformationSchemaProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
async fn table_names(&self) -> Result<Vec<String>> {
|
||||
Ok(self.tables.clone())
|
||||
}
|
||||
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
impl InformationSchemaProvider {
|
||||
pub fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let stream_builder = match name.to_ascii_lowercase().as_ref() {
|
||||
TABLES => Arc::new(InformationSchemaTables::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_provider.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _,
|
||||
COLUMNS => Arc::new(InformationSchemaColumns::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_provider.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _,
|
||||
_ => {
|
||||
return Ok(None);
|
||||
@@ -81,11 +69,6 @@ impl SchemaProvider for InformationSchemaProvider {
|
||||
|
||||
Ok(Some(Arc::new(InformationTable::new(stream_builder))))
|
||||
}
|
||||
|
||||
async fn table_exist(&self, name: &str) -> Result<bool> {
|
||||
let normalized_name = name.to_ascii_lowercase();
|
||||
Ok(self.tables.contains(&normalized_name))
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(ruihang): make it a more generic trait:
|
||||
@@ -120,18 +103,8 @@ impl Table for InformationTable {
|
||||
unreachable!("Should not call table_info() of InformationTable directly")
|
||||
}
|
||||
|
||||
/// Scan the table and returns a SendableRecordBatchStream.
|
||||
async fn scan(
|
||||
&self,
|
||||
_projection: Option<&Vec<usize>>,
|
||||
_filters: &[Expr],
|
||||
// limit can be used to reduce the amount scanned
|
||||
// from the datasource as a performance optimization.
|
||||
// If set, it contains the amount of rows needed by the `LogicalPlan`,
|
||||
// The datasource should return *at least* this number of rows if available.
|
||||
_limit: Option<usize>,
|
||||
) -> TableResult<PhysicalPlanRef> {
|
||||
unimplemented!()
|
||||
fn table_type(&self) -> table::metadata::TableType {
|
||||
TableType::View
|
||||
}
|
||||
|
||||
async fn scan_to_stream(&self, request: ScanRequest) -> TableResult<SendableRecordBatchStream> {
|
||||
|
||||
@@ -12,13 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::{
|
||||
SEMANTIC_TYPE_FIELD, SEMANTIC_TYPE_PRIMARY_KEY, SEMANTIC_TYPE_TIME_INDEX,
|
||||
};
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
@@ -29,16 +29,18 @@ use datatypes::prelude::{ConcreteDataType, DataType};
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{StringVectorBuilder, VectorRef};
|
||||
use snafu::ResultExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use super::InformationStreamBuilder;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::CatalogProviderRef;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub(super) struct InformationSchemaColumns {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_provider: CatalogProviderRef,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
const TABLE_CATALOG: &str = "table_catalog";
|
||||
@@ -49,7 +51,7 @@ const DATA_TYPE: &str = "data_type";
|
||||
const SEMANTIC_TYPE: &str = "semantic_type";
|
||||
|
||||
impl InformationSchemaColumns {
|
||||
pub(super) fn new(catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||
@@ -61,7 +63,7 @@ impl InformationSchemaColumns {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_provider,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,7 +71,7 @@ impl InformationSchemaColumns {
|
||||
InformationSchemaColumnsBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_provider.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -103,7 +105,7 @@ impl InformationStreamBuilder for InformationSchemaColumns {
|
||||
struct InformationSchemaColumnsBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_provider: CatalogProviderRef,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
catalog_names: StringVectorBuilder,
|
||||
schema_names: StringVectorBuilder,
|
||||
@@ -114,11 +116,15 @@ struct InformationSchemaColumnsBuilder {
|
||||
}
|
||||
|
||||
impl InformationSchemaColumnsBuilder {
|
||||
fn new(schema: SchemaRef, catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_provider,
|
||||
catalog_manager,
|
||||
catalog_names: StringVectorBuilder::with_capacity(42),
|
||||
schema_names: StringVectorBuilder::with_capacity(42),
|
||||
table_names: StringVectorBuilder::with_capacity(42),
|
||||
@@ -131,11 +137,23 @@ impl InformationSchemaColumnsBuilder {
|
||||
/// Construct the `information_schema.tables` virtual table
|
||||
async fn make_tables(&mut self) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
|
||||
for schema_name in self.catalog_provider.schema_names().await? {
|
||||
let Some(schema) = self.catalog_provider.schema(&schema_name).await? else { continue };
|
||||
for table_name in schema.table_names().await? {
|
||||
let Some(table) = schema.table(&table_name).await? else { continue };
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
if !catalog_manager
|
||||
.schema_exist(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
continue;
|
||||
}
|
||||
for table_name in catalog_manager
|
||||
.table_names(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
let Some(table) = catalog_manager.table(&catalog_name, &schema_name, &table_name).await? else { continue };
|
||||
let keys = &table.table_info().meta.primary_key_indices;
|
||||
let schema = table.schema();
|
||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||
|
||||
@@ -12,11 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_NAME;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
@@ -26,21 +26,23 @@ use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatc
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder};
|
||||
use snafu::ResultExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::metadata::TableType;
|
||||
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::InformationStreamBuilder;
|
||||
use crate::CatalogProviderRef;
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub(super) struct InformationSchemaTables {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_provider: CatalogProviderRef,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaTables {
|
||||
pub(super) fn new(catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new("table_catalog", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_schema", ConcreteDataType::string_datatype(), false),
|
||||
@@ -52,7 +54,7 @@ impl InformationSchemaTables {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_provider,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,7 +62,7 @@ impl InformationSchemaTables {
|
||||
InformationSchemaTablesBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_provider.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -97,7 +99,7 @@ impl InformationStreamBuilder for InformationSchemaTables {
|
||||
struct InformationSchemaTablesBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_provider: CatalogProviderRef,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
catalog_names: StringVectorBuilder,
|
||||
schema_names: StringVectorBuilder,
|
||||
@@ -108,11 +110,15 @@ struct InformationSchemaTablesBuilder {
|
||||
}
|
||||
|
||||
impl InformationSchemaTablesBuilder {
|
||||
fn new(schema: SchemaRef, catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_provider,
|
||||
catalog_manager,
|
||||
catalog_names: StringVectorBuilder::with_capacity(42),
|
||||
schema_names: StringVectorBuilder::with_capacity(42),
|
||||
table_names: StringVectorBuilder::with_capacity(42),
|
||||
@@ -125,15 +131,27 @@ impl InformationSchemaTablesBuilder {
|
||||
/// Construct the `information_schema.tables` virtual table
|
||||
async fn make_tables(&mut self) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
|
||||
for schema_name in self.catalog_provider.schema_names().await? {
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
||||
continue;
|
||||
}
|
||||
if !catalog_manager
|
||||
.schema_exist(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
let Some(schema) = self.catalog_provider.schema(&schema_name).await? else { continue };
|
||||
for table_name in schema.table_names().await? {
|
||||
let Some(table) = schema.table(&table_name).await? else { continue };
|
||||
for table_name in catalog_manager
|
||||
.table_names(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
let Some(table) = catalog_manager.table(&catalog_name, &schema_name, &table_name).await? else { continue };
|
||||
let table_info = table.table_info();
|
||||
self.add_table(
|
||||
&catalog_name,
|
||||
|
||||
@@ -14,13 +14,14 @@
|
||||
|
||||
#![feature(trait_upcasting)]
|
||||
#![feature(assert_matches)]
|
||||
#![feature(try_blocks)]
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::{RegionStat, TableName};
|
||||
use api::v1::meta::{RegionStat, TableIdent, TableName};
|
||||
use common_telemetry::{info, warn};
|
||||
use snafu::ResultExt;
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
@@ -29,65 +30,49 @@ use table::requests::CreateTableRequest;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{CreateTableSnafu, Result};
|
||||
pub use crate::schema::{SchemaProvider, SchemaProviderRef};
|
||||
|
||||
pub mod error;
|
||||
pub mod helper;
|
||||
pub(crate) mod information_schema;
|
||||
pub mod information_schema;
|
||||
pub mod local;
|
||||
mod metrics;
|
||||
pub mod remote;
|
||||
pub mod schema;
|
||||
pub mod system;
|
||||
pub mod table_source;
|
||||
pub mod tables;
|
||||
|
||||
/// Represents a catalog, comprising a number of named schemas.
|
||||
#[async_trait::async_trait]
|
||||
pub trait CatalogProvider: Sync + Send {
|
||||
/// Returns the catalog provider as [`Any`](std::any::Any)
|
||||
/// so that it can be downcast to a specific implementation.
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
/// Retrieves the list of available schema names in this catalog.
|
||||
async fn schema_names(&self) -> Result<Vec<String>>;
|
||||
|
||||
/// Registers schema to this catalog.
|
||||
async fn register_schema(
|
||||
&self,
|
||||
name: String,
|
||||
schema: SchemaProviderRef,
|
||||
) -> Result<Option<SchemaProviderRef>>;
|
||||
|
||||
/// Retrieves a specific schema from the catalog by name, provided it exists.
|
||||
async fn schema(&self, name: &str) -> Result<Option<SchemaProviderRef>>;
|
||||
}
|
||||
|
||||
pub type CatalogProviderRef = Arc<dyn CatalogProvider>;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait CatalogManager: Send + Sync {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
/// Starts a catalog manager.
|
||||
async fn start(&self) -> Result<()>;
|
||||
|
||||
async fn register_catalog(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
) -> Result<Option<CatalogProviderRef>>;
|
||||
|
||||
/// Registers a table within given catalog/schema to catalog manager,
|
||||
/// returns whether the table registered.
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool>;
|
||||
|
||||
/// Deregisters a table within given catalog/schema to catalog manager,
|
||||
/// returns whether the table deregistered.
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool>;
|
||||
/// Registers a catalog to catalog manager, returns whether the catalog exist before.
|
||||
async fn register_catalog(&self, name: String) -> Result<bool>;
|
||||
|
||||
/// Register a schema with catalog name and schema name. Retuens whether the
|
||||
/// schema registered.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This method will/should fail if catalog not exist
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool>;
|
||||
|
||||
/// Deregisters a database within given catalog/schema to catalog manager
|
||||
async fn deregister_schema(&self, request: DeregisterSchemaRequest) -> Result<bool>;
|
||||
|
||||
/// Registers a table within given catalog/schema to catalog manager,
|
||||
/// returns whether the table registered.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This method will/should fail if catalog or schema not exist
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool>;
|
||||
|
||||
/// Deregisters a table within given catalog/schema to catalog manager
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<()>;
|
||||
|
||||
/// Rename a table to [RenameTableRequest::new_table_name], returns whether the table is renamed.
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool>;
|
||||
|
||||
@@ -97,9 +82,15 @@ pub trait CatalogManager: Send + Sync {
|
||||
|
||||
async fn catalog_names(&self) -> Result<Vec<String>>;
|
||||
|
||||
async fn catalog(&self, catalog: &str) -> Result<Option<CatalogProviderRef>>;
|
||||
async fn schema_names(&self, catalog: &str) -> Result<Vec<String>>;
|
||||
|
||||
async fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>>;
|
||||
async fn table_names(&self, catalog: &str, schema: &str) -> Result<Vec<String>>;
|
||||
|
||||
async fn catalog_exist(&self, catalog: &str) -> Result<bool>;
|
||||
|
||||
async fn schema_exist(&self, catalog: &str, schema: &str) -> Result<bool>;
|
||||
|
||||
async fn table_exist(&self, catalog: &str, schema: &str, table: &str) -> Result<bool>;
|
||||
|
||||
/// Returns the table by catalog, schema and table name.
|
||||
async fn table(
|
||||
@@ -108,8 +99,6 @@ pub trait CatalogManager: Send + Sync {
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableRef>>;
|
||||
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
}
|
||||
|
||||
pub type CatalogManagerRef = Arc<dyn CatalogManager>;
|
||||
@@ -164,17 +153,15 @@ pub struct DeregisterTableRequest {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RegisterSchemaRequest {
|
||||
pub struct DeregisterSchemaRequest {
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
}
|
||||
|
||||
pub trait CatalogProviderFactory {
|
||||
fn create(&self, catalog_name: String) -> CatalogProviderRef;
|
||||
}
|
||||
|
||||
pub trait SchemaProviderFactory {
|
||||
fn create(&self, catalog_name: String, schema_name: String) -> SchemaProviderRef;
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RegisterSchemaRequest {
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
|
||||
@@ -202,7 +189,7 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
|
||||
table_name,
|
||||
),
|
||||
})?;
|
||||
manager
|
||||
let _ = manager
|
||||
.register_table(RegisterTableRequest {
|
||||
catalog: catalog_name.clone(),
|
||||
schema: schema_name.clone(),
|
||||
@@ -233,29 +220,31 @@ pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> (u64, Vec<Reg
|
||||
|
||||
let Ok(catalog_names) = catalog_manager.catalog_names().await else { return (region_number, region_stats) };
|
||||
for catalog_name in catalog_names {
|
||||
let Ok(Some(catalog)) = catalog_manager.catalog(&catalog_name).await else { continue };
|
||||
|
||||
let Ok(schema_names) = catalog.schema_names().await else { continue };
|
||||
let Ok(schema_names) = catalog_manager.schema_names(&catalog_name).await else { continue };
|
||||
for schema_name in schema_names {
|
||||
let Ok(Some(schema)) = catalog.schema(&schema_name).await else { continue };
|
||||
|
||||
let Ok(table_names) = schema.table_names().await else { continue };
|
||||
let Ok(table_names) = catalog_manager.table_names(&catalog_name,&schema_name).await else { continue };
|
||||
for table_name in table_names {
|
||||
let Ok(Some(table)) = schema.table(&table_name).await else { continue };
|
||||
let Ok(Some(table)) = catalog_manager.table(&catalog_name, &schema_name, &table_name).await else { continue };
|
||||
|
||||
let region_numbers = &table.table_info().meta.region_numbers;
|
||||
let table_info = table.table_info();
|
||||
let region_numbers = &table_info.meta.region_numbers;
|
||||
region_number += region_numbers.len() as u64;
|
||||
|
||||
let engine = &table.table_info().meta.engine;
|
||||
let engine = &table_info.meta.engine;
|
||||
let table_id = table_info.ident.table_id;
|
||||
|
||||
match table.region_stats() {
|
||||
Ok(stats) => {
|
||||
let stats = stats.into_iter().map(|stat| RegionStat {
|
||||
region_id: stat.region_id,
|
||||
table_name: Some(TableName {
|
||||
catalog_name: catalog_name.clone(),
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
table_ident: Some(TableIdent {
|
||||
table_id,
|
||||
table_name: Some(TableName {
|
||||
catalog_name: catalog_name.clone(),
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
}),
|
||||
engine: engine.clone(),
|
||||
}),
|
||||
approximate_bytes: stat.disk_usage_bytes as i64,
|
||||
attrs: HashMap::from([("engine_name".to_owned(), engine.clone())]),
|
||||
|
||||
@@ -16,6 +16,4 @@ pub mod manager;
|
||||
pub mod memory;
|
||||
|
||||
pub use manager::LocalCatalogManager;
|
||||
pub use memory::{
|
||||
new_memory_catalog_list, MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider,
|
||||
};
|
||||
pub use memory::{new_memory_catalog_manager, MemoryCatalogManager};
|
||||
|
||||
@@ -18,7 +18,8 @@ use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MIN_USER_TABLE_ID,
|
||||
MITO_ENGINE, SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_NAME,
|
||||
MITO_ENGINE, NUMBERS_TABLE_ID, SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_ID,
|
||||
SYSTEM_CATALOG_TABLE_NAME,
|
||||
};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
@@ -32,7 +33,7 @@ use table::engine::manager::TableEngineManagerRef;
|
||||
use table::engine::EngineContext;
|
||||
use table::metadata::TableId;
|
||||
use table::requests::OpenTableRequest;
|
||||
use table::table::numbers::NumbersTable;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::table::TableIdProvider;
|
||||
use table::TableRef;
|
||||
|
||||
@@ -40,18 +41,19 @@ use crate::error::{
|
||||
self, CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu,
|
||||
Result, SchemaExistsSnafu, SchemaNotFoundSnafu, SystemCatalogSnafu,
|
||||
SystemCatalogTypeMismatchSnafu, TableEngineNotFoundSnafu, TableExistsSnafu, TableNotExistSnafu,
|
||||
TableNotFoundSnafu,
|
||||
TableNotFoundSnafu, UnimplementedSnafu,
|
||||
};
|
||||
use crate::local::memory::{MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::local::memory::MemoryCatalogManager;
|
||||
use crate::system::{
|
||||
decode_system_catalog, Entry, SystemCatalogTable, TableEntry, ENTRY_TYPE_INDEX, KEY_INDEX,
|
||||
VALUE_INDEX,
|
||||
};
|
||||
use crate::tables::SystemCatalog;
|
||||
use crate::{
|
||||
handle_system_table_request, CatalogManager, CatalogProviderRef, DeregisterTableRequest,
|
||||
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
|
||||
SchemaProviderRef,
|
||||
handle_system_table_request, CatalogManager, CatalogManagerRef, DeregisterSchemaRequest,
|
||||
DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
|
||||
RegisterTableRequest, RenameTableRequest,
|
||||
};
|
||||
|
||||
/// A `CatalogManager` consists of a system catalog and a bunch of user catalogs.
|
||||
@@ -74,11 +76,11 @@ impl LocalCatalogManager {
|
||||
engine_name: MITO_ENGINE,
|
||||
})?;
|
||||
let table = SystemCatalogTable::new(engine.clone()).await?;
|
||||
let memory_catalog_list = crate::local::memory::new_memory_catalog_list()?;
|
||||
let memory_catalog_manager = crate::local::memory::new_memory_catalog_manager()?;
|
||||
let system_catalog = Arc::new(SystemCatalog::new(table));
|
||||
Ok(Self {
|
||||
system: system_catalog,
|
||||
catalogs: memory_catalog_list,
|
||||
catalogs: memory_catalog_manager,
|
||||
engine_manager,
|
||||
next_table_id: AtomicU32::new(MIN_USER_TABLE_ID),
|
||||
init_lock: Mutex::new(false),
|
||||
@@ -116,26 +118,47 @@ impl LocalCatalogManager {
|
||||
}
|
||||
|
||||
async fn init_system_catalog(&self) -> Result<()> {
|
||||
let system_schema = Arc::new(MemorySchemaProvider::new());
|
||||
system_schema.register_table_sync(
|
||||
SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
self.system.information_schema.system.clone(),
|
||||
)?;
|
||||
let system_catalog = Arc::new(MemoryCatalogProvider::new());
|
||||
system_catalog.register_schema_sync(INFORMATION_SCHEMA_NAME.to_string(), system_schema)?;
|
||||
self.catalogs
|
||||
.register_catalog_sync(SYSTEM_CATALOG_NAME.to_string(), system_catalog)?;
|
||||
// register SystemCatalogTable
|
||||
let _ = self
|
||||
.catalogs
|
||||
.register_catalog_sync(SYSTEM_CATALOG_NAME.to_string())?;
|
||||
let _ = self.catalogs.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: SYSTEM_CATALOG_NAME.to_string(),
|
||||
schema: INFORMATION_SCHEMA_NAME.to_string(),
|
||||
})?;
|
||||
let register_table_req = RegisterTableRequest {
|
||||
catalog: SYSTEM_CATALOG_NAME.to_string(),
|
||||
schema: INFORMATION_SCHEMA_NAME.to_string(),
|
||||
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
table_id: SYSTEM_CATALOG_TABLE_ID,
|
||||
table: self.system.information_schema.system.clone(),
|
||||
};
|
||||
let _ = self.catalogs.register_table(register_table_req).await?;
|
||||
|
||||
let default_catalog = Arc::new(MemoryCatalogProvider::new());
|
||||
let default_schema = Arc::new(MemorySchemaProvider::new());
|
||||
// register default catalog and default schema
|
||||
let _ = self
|
||||
.catalogs
|
||||
.register_catalog_sync(DEFAULT_CATALOG_NAME.to_string())?;
|
||||
let _ = self.catalogs.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
})?;
|
||||
|
||||
// Add numbers table for test
|
||||
let table = Arc::new(NumbersTable::default());
|
||||
default_schema.register_table_sync("numbers".to_string(), table)?;
|
||||
let numbers_table = Arc::new(NumbersTable::default());
|
||||
let register_number_table_req = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: NUMBERS_TABLE_NAME.to_string(),
|
||||
table_id: NUMBERS_TABLE_ID,
|
||||
table: numbers_table,
|
||||
};
|
||||
|
||||
let _ = self
|
||||
.catalogs
|
||||
.register_table(register_number_table_req)
|
||||
.await?;
|
||||
|
||||
default_catalog.register_schema_sync(DEFAULT_SCHEMA_NAME.to_string(), default_schema)?;
|
||||
self.catalogs
|
||||
.register_catalog_sync(DEFAULT_CATALOG_NAME.to_string(), default_catalog)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -207,30 +230,26 @@ impl LocalCatalogManager {
|
||||
for entry in entries {
|
||||
match entry {
|
||||
Entry::Catalog(c) => {
|
||||
self.catalogs.register_catalog_if_absent(
|
||||
c.catalog_name.clone(),
|
||||
Arc::new(MemoryCatalogProvider::new()),
|
||||
);
|
||||
let _ = self
|
||||
.catalogs
|
||||
.register_catalog_if_absent(c.catalog_name.clone());
|
||||
info!("Register catalog: {}", c.catalog_name);
|
||||
}
|
||||
Entry::Schema(s) => {
|
||||
self.catalogs
|
||||
.catalog(&s.catalog_name)
|
||||
.await?
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &s.catalog_name,
|
||||
})?
|
||||
.register_schema(
|
||||
s.schema_name.clone(),
|
||||
Arc::new(MemorySchemaProvider::new()),
|
||||
)
|
||||
.await?;
|
||||
let req = RegisterSchemaRequest {
|
||||
catalog: s.catalog_name.clone(),
|
||||
schema: s.schema_name.clone(),
|
||||
};
|
||||
let _ = self.catalogs.register_schema_sync(req)?;
|
||||
info!("Registered schema: {:?}", s);
|
||||
}
|
||||
Entry::Table(t) => {
|
||||
max_table_id = max_table_id.max(t.table_id);
|
||||
if t.is_deleted {
|
||||
continue;
|
||||
}
|
||||
self.open_and_register_table(&t).await?;
|
||||
info!("Registered table: {:?}", t);
|
||||
max_table_id = max_table_id.max(t.table_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -245,23 +264,11 @@ impl LocalCatalogManager {
|
||||
}
|
||||
|
||||
async fn open_and_register_table(&self, t: &TableEntry) -> Result<()> {
|
||||
let catalog =
|
||||
self.catalogs
|
||||
.catalog(&t.catalog_name)
|
||||
.await?
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &t.catalog_name,
|
||||
})?;
|
||||
let schema = catalog
|
||||
.schema(&t.schema_name)
|
||||
.await?
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: &t.catalog_name,
|
||||
schema: &t.schema_name,
|
||||
})?;
|
||||
self.check_catalog_schema_exist(&t.catalog_name, &t.schema_name)
|
||||
.await?;
|
||||
|
||||
let context = EngineContext {};
|
||||
let request = OpenTableRequest {
|
||||
let open_request = OpenTableRequest {
|
||||
catalog_name: t.catalog_name.clone(),
|
||||
schema_name: t.schema_name.clone(),
|
||||
table_name: t.table_name.clone(),
|
||||
@@ -275,8 +282,8 @@ impl LocalCatalogManager {
|
||||
engine_name: &t.engine,
|
||||
})?;
|
||||
|
||||
let option = engine
|
||||
.open_table(&context, request)
|
||||
let table_ref = engine
|
||||
.open_table(&context, open_request)
|
||||
.await
|
||||
.with_context(|_| OpenTableSnafu {
|
||||
table_info: format!(
|
||||
@@ -291,7 +298,48 @@ impl LocalCatalogManager {
|
||||
),
|
||||
})?;
|
||||
|
||||
schema.register_table(t.table_name.clone(), option).await?;
|
||||
let register_request = RegisterTableRequest {
|
||||
catalog: t.catalog_name.clone(),
|
||||
schema: t.schema_name.clone(),
|
||||
table_name: t.table_name.clone(),
|
||||
table_id: t.table_id,
|
||||
table: table_ref,
|
||||
};
|
||||
let _ = self.catalogs.register_table(register_request).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn check_state(&self) -> Result<()> {
|
||||
let started = self.init_lock.lock().await;
|
||||
ensure!(
|
||||
*started,
|
||||
IllegalManagerStateSnafu {
|
||||
msg: "Catalog manager not started",
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn check_catalog_schema_exist(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
) -> Result<()> {
|
||||
if !self.catalogs.catalog_exist(catalog_name).await? {
|
||||
return CatalogNotFoundSnafu { catalog_name }.fail()?;
|
||||
}
|
||||
if !self
|
||||
.catalogs
|
||||
.schema_exist(catalog_name, schema_name)
|
||||
.await?
|
||||
{
|
||||
return SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
}
|
||||
.fail()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -312,34 +360,21 @@ impl CatalogManager for LocalCatalogManager {
|
||||
}
|
||||
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
|
||||
let started = self.init_lock.lock().await;
|
||||
self.check_state().await?;
|
||||
|
||||
ensure!(
|
||||
*started,
|
||||
IllegalManagerStateSnafu {
|
||||
msg: "Catalog manager not started",
|
||||
}
|
||||
);
|
||||
let catalog_name = request.catalog.clone();
|
||||
let schema_name = request.schema.clone();
|
||||
|
||||
let catalog_name = &request.catalog;
|
||||
let schema_name = &request.schema;
|
||||
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(catalog_name)
|
||||
.await?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
let schema = catalog
|
||||
.schema(schema_name)
|
||||
.await?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
self.check_catalog_schema_exist(&catalog_name, &schema_name)
|
||||
.await?;
|
||||
|
||||
{
|
||||
let _lock = self.register_lock.lock().await;
|
||||
if let Some(existing) = schema.table(&request.table_name).await? {
|
||||
if let Some(existing) = self
|
||||
.catalogs
|
||||
.table(&request.catalog, &request.schema, &request.table_name)
|
||||
.await?
|
||||
{
|
||||
if existing.table_info().ident.table_id != request.table_id {
|
||||
error!(
|
||||
"Unexpected table register request: {:?}, existing: {:?}",
|
||||
@@ -348,8 +383,8 @@ impl CatalogManager for LocalCatalogManager {
|
||||
);
|
||||
return TableExistsSnafu {
|
||||
table: format_full_table_name(
|
||||
catalog_name,
|
||||
schema_name,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&request.table_name,
|
||||
),
|
||||
}
|
||||
@@ -358,24 +393,25 @@ impl CatalogManager for LocalCatalogManager {
|
||||
// Try to register table with same table id, just ignore.
|
||||
Ok(false)
|
||||
} else {
|
||||
let engine = request.table.table_info().meta.engine.to_string();
|
||||
// table does not exist
|
||||
self.system
|
||||
let engine = request.table.table_info().meta.engine.to_string();
|
||||
let table_name = request.table_name.clone();
|
||||
let table_id = request.table_id;
|
||||
let _ = self.catalogs.register_table(request).await?;
|
||||
let _ = self
|
||||
.system
|
||||
.register_table(
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
request.table_name.clone(),
|
||||
request.table_id,
|
||||
table_name,
|
||||
table_id,
|
||||
engine,
|
||||
)
|
||||
.await?;
|
||||
schema
|
||||
.register_table(request.table_name, request.table)
|
||||
.await?;
|
||||
increment_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(catalog_name, schema_name)],
|
||||
&[crate::metrics::db_label(&catalog_name, &schema_name)],
|
||||
);
|
||||
Ok(true)
|
||||
}
|
||||
@@ -383,41 +419,27 @@ impl CatalogManager for LocalCatalogManager {
|
||||
}
|
||||
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
|
||||
let started = self.init_lock.lock().await;
|
||||
|
||||
ensure!(
|
||||
*started,
|
||||
IllegalManagerStateSnafu {
|
||||
msg: "Catalog manager not started",
|
||||
}
|
||||
);
|
||||
self.check_state().await?;
|
||||
|
||||
let catalog_name = &request.catalog;
|
||||
let schema_name = &request.schema;
|
||||
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(catalog_name)
|
||||
.await?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
|
||||
let schema = catalog
|
||||
.schema(schema_name)
|
||||
.await?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
|
||||
let _lock = self.register_lock.lock().await;
|
||||
self.check_catalog_schema_exist(catalog_name, schema_name)
|
||||
.await?;
|
||||
ensure!(
|
||||
!schema.table_exist(&request.new_table_name).await?,
|
||||
self.catalogs
|
||||
.table(catalog_name, schema_name, &request.new_table_name)
|
||||
.await?
|
||||
.is_none(),
|
||||
TableExistsSnafu {
|
||||
table: &request.new_table_name
|
||||
}
|
||||
);
|
||||
let old_table = schema
|
||||
.table(&request.table_name)
|
||||
|
||||
let _lock = self.register_lock.lock().await;
|
||||
let old_table = self
|
||||
.catalogs
|
||||
.table(catalog_name, schema_name, &request.table_name)
|
||||
.await?
|
||||
.context(TableNotExistSnafu {
|
||||
table: &request.table_name,
|
||||
@@ -425,7 +447,8 @@ impl CatalogManager for LocalCatalogManager {
|
||||
|
||||
let engine = old_table.table_info().meta.engine.to_string();
|
||||
// rename table in system catalog
|
||||
self.system
|
||||
let _ = self
|
||||
.system
|
||||
.register_table(
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
@@ -435,18 +458,11 @@ impl CatalogManager for LocalCatalogManager {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let renamed = schema
|
||||
.rename_table(&request.table_name, request.new_table_name.clone())
|
||||
.await
|
||||
.is_ok();
|
||||
Ok(renamed)
|
||||
self.catalogs.rename_table(request).await
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
{
|
||||
let started = *self.init_lock.lock().await;
|
||||
ensure!(started, IllegalManagerStateSnafu { msg: "not started" });
|
||||
}
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<()> {
|
||||
self.check_state().await?;
|
||||
|
||||
{
|
||||
let _ = self.register_lock.lock().await;
|
||||
@@ -467,61 +483,51 @@ impl CatalogManager for LocalCatalogManager {
|
||||
.ident
|
||||
.table_id;
|
||||
|
||||
if !self.system.deregister_table(&request, table_id).await? {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
self.system.deregister_table(&request, table_id).await?;
|
||||
self.catalogs.deregister_table(request).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
let started = self.init_lock.lock().await;
|
||||
ensure!(
|
||||
*started,
|
||||
IllegalManagerStateSnafu {
|
||||
msg: "Catalog manager not started",
|
||||
}
|
||||
);
|
||||
self.check_state().await?;
|
||||
|
||||
let catalog_name = &request.catalog;
|
||||
let schema_name = &request.schema;
|
||||
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(catalog_name)
|
||||
.await?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
if !self.catalogs.catalog_exist(catalog_name).await? {
|
||||
return CatalogNotFoundSnafu { catalog_name }.fail()?;
|
||||
}
|
||||
|
||||
{
|
||||
let _lock = self.register_lock.lock().await;
|
||||
ensure!(
|
||||
catalog.schema(schema_name).await?.is_none(),
|
||||
!self
|
||||
.catalogs
|
||||
.schema_exist(catalog_name, schema_name)
|
||||
.await?,
|
||||
SchemaExistsSnafu {
|
||||
schema: schema_name,
|
||||
}
|
||||
);
|
||||
self.system
|
||||
.register_schema(request.catalog, schema_name.clone())
|
||||
let _ = self
|
||||
.system
|
||||
.register_schema(request.catalog.clone(), schema_name.clone())
|
||||
.await?;
|
||||
catalog
|
||||
.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))
|
||||
.await?;
|
||||
|
||||
Ok(true)
|
||||
self.catalogs.register_schema_sync(request)
|
||||
}
|
||||
}
|
||||
|
||||
async fn deregister_schema(&self, _request: DeregisterSchemaRequest) -> Result<bool> {
|
||||
UnimplementedSnafu {
|
||||
operation: "deregister schema",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
|
||||
let catalog_name = request.create_table_request.catalog_name.clone();
|
||||
let schema_name = request.create_table_request.schema_name.clone();
|
||||
|
||||
ensure!(
|
||||
!*self.init_lock.lock().await,
|
||||
IllegalManagerStateSnafu {
|
||||
msg: "Catalog manager already started",
|
||||
}
|
||||
);
|
||||
|
||||
let mut sys_table_requests = self.system_table_requests.lock().await;
|
||||
sys_table_requests.push(request);
|
||||
increment_gauge!(
|
||||
@@ -532,15 +538,8 @@ impl CatalogManager for LocalCatalogManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>> {
|
||||
self.catalogs
|
||||
.catalog(catalog)
|
||||
.await?
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: catalog,
|
||||
})?
|
||||
.schema(schema)
|
||||
.await
|
||||
async fn schema_exist(&self, catalog: &str, schema: &str) -> Result<bool> {
|
||||
self.catalogs.schema_exist(catalog, schema).await
|
||||
}
|
||||
|
||||
async fn table(
|
||||
@@ -549,39 +548,44 @@ impl CatalogManager for LocalCatalogManager {
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableRef>> {
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(catalog_name)
|
||||
.await?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
let schema = catalog
|
||||
.schema(schema_name)
|
||||
.await?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
schema.table(table_name).await
|
||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
||||
let manager: CatalogManagerRef = self.catalogs.clone() as _;
|
||||
let provider =
|
||||
InformationSchemaProvider::new(catalog_name.to_string(), Arc::downgrade(&manager));
|
||||
return provider.table(table_name);
|
||||
}
|
||||
|
||||
self.catalogs
|
||||
.table(catalog_name, schema_name, table_name)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn catalog(&self, catalog: &str) -> Result<Option<CatalogProviderRef>> {
|
||||
async fn catalog_exist(&self, catalog: &str) -> Result<bool> {
|
||||
if catalog.eq_ignore_ascii_case(SYSTEM_CATALOG_NAME) {
|
||||
Ok(Some(self.system.clone()))
|
||||
Ok(true)
|
||||
} else {
|
||||
self.catalogs.catalog(catalog).await
|
||||
self.catalogs.catalog_exist(catalog).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn table_exist(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
|
||||
self.catalogs.table_exist(catalog, schema, table).await
|
||||
}
|
||||
|
||||
async fn catalog_names(&self) -> Result<Vec<String>> {
|
||||
self.catalogs.catalog_names().await
|
||||
}
|
||||
|
||||
async fn register_catalog(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
) -> Result<Option<CatalogProviderRef>> {
|
||||
self.catalogs.register_catalog(name, catalog).await
|
||||
async fn schema_names(&self, catalog_name: &str) -> Result<Vec<String>> {
|
||||
self.catalogs.schema_names(catalog_name).await
|
||||
}
|
||||
|
||||
async fn table_names(&self, catalog_name: &str, schema_name: &str) -> Result<Vec<String>> {
|
||||
self.catalogs.table_names(catalog_name, schema_name).await
|
||||
}
|
||||
|
||||
async fn register_catalog(&self, name: String) -> Result<bool> {
|
||||
self.catalogs.register_catalog(name).await
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
@@ -607,6 +611,7 @@ mod tests {
|
||||
table_name: "T1".to_string(),
|
||||
table_id: 1,
|
||||
engine: MITO_ENGINE.to_string(),
|
||||
is_deleted: false,
|
||||
}),
|
||||
Entry::Catalog(CatalogEntry {
|
||||
catalog_name: "C2".to_string(),
|
||||
@@ -628,6 +633,7 @@ mod tests {
|
||||
table_name: "T2".to_string(),
|
||||
table_id: 2,
|
||||
engine: MITO_ENGINE.to_string(),
|
||||
is_deleted: false,
|
||||
}),
|
||||
];
|
||||
let res = LocalCatalogManager::sort_entries(vec);
|
||||
|
||||
@@ -18,29 +18,27 @@ use std::collections::HashMap;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::consts::MIN_USER_TABLE_ID;
|
||||
use common_telemetry::error;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
|
||||
use metrics::{decrement_gauge, increment_gauge};
|
||||
use snafu::{ensure, OptionExt};
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
use table::table::TableIdProvider;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
self, CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, TableNotFoundSnafu,
|
||||
CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::schema::SchemaProvider;
|
||||
use crate::{
|
||||
CatalogManager, CatalogProvider, CatalogProviderRef, DeregisterTableRequest,
|
||||
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
|
||||
SchemaProviderRef,
|
||||
CatalogManager, DeregisterSchemaRequest, DeregisterTableRequest, RegisterSchemaRequest,
|
||||
RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
|
||||
};
|
||||
|
||||
type SchemaEntries = HashMap<String, HashMap<String, TableRef>>;
|
||||
|
||||
/// Simple in-memory list of catalogs
|
||||
pub struct MemoryCatalogManager {
|
||||
/// Collection of catalogs containing schemas and ultimately Tables
|
||||
pub catalogs: RwLock<HashMap<String, CatalogProviderRef>>,
|
||||
pub catalogs: RwLock<HashMap<String, SchemaEntries>>,
|
||||
pub table_id: AtomicU32,
|
||||
}
|
||||
|
||||
@@ -50,13 +48,14 @@ impl Default for MemoryCatalogManager {
|
||||
table_id: AtomicU32::new(MIN_USER_TABLE_ID),
|
||||
catalogs: Default::default(),
|
||||
};
|
||||
let default_catalog = Arc::new(MemoryCatalogProvider::new());
|
||||
manager
|
||||
.register_catalog_sync("greptime".to_string(), default_catalog.clone())
|
||||
.unwrap();
|
||||
default_catalog
|
||||
.register_schema_sync("public".to_string(), Arc::new(MemorySchemaProvider::new()))
|
||||
.unwrap();
|
||||
|
||||
let catalog = HashMap::from([(DEFAULT_SCHEMA_NAME.to_string(), HashMap::new())]);
|
||||
let _ = manager
|
||||
.catalogs
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(DEFAULT_CATALOG_NAME.to_string(), catalog);
|
||||
|
||||
manager
|
||||
}
|
||||
}
|
||||
@@ -76,81 +75,94 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
}
|
||||
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
|
||||
let schema = self
|
||||
.catalog(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.schema(&request.schema)
|
||||
.await?
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
increment_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
||||
);
|
||||
schema
|
||||
.register_table(request.table_name, request.table)
|
||||
.await
|
||||
.map(|v| v.is_none())
|
||||
self.register_table_sync(request)
|
||||
}
|
||||
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
|
||||
let catalog = self
|
||||
.catalog(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?;
|
||||
let schema =
|
||||
catalog
|
||||
.schema(&request.schema)
|
||||
.await?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
Ok(schema
|
||||
.rename_table(&request.table_name, request.new_table_name)
|
||||
.await
|
||||
.is_ok())
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
let schema = self
|
||||
.catalog(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
let schema = catalogs
|
||||
.get_mut(&request.catalog)
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.schema(&request.schema)
|
||||
.await?
|
||||
.get_mut(&request.schema)
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
decrement_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
||||
);
|
||||
schema
|
||||
.deregister_table(&request.table_name)
|
||||
.await
|
||||
.map(|v| v.is_some())
|
||||
|
||||
// check old and new table names
|
||||
if !schema.contains_key(&request.table_name) {
|
||||
return TableNotFoundSnafu {
|
||||
table_info: request.table_name.to_string(),
|
||||
}
|
||||
.fail()?;
|
||||
}
|
||||
if schema.contains_key(&request.new_table_name) {
|
||||
return TableExistsSnafu {
|
||||
table: &request.new_table_name,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
let table = schema.remove(&request.table_name).unwrap();
|
||||
let _ = schema.insert(request.new_table_name, table);
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<()> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
let schema = catalogs
|
||||
.get_mut(&request.catalog)
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.get_mut(&request.schema)
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
let result = schema.remove(&request.table_name);
|
||||
if result.is_some() {
|
||||
decrement_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
let catalog = self
|
||||
.catalog(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
self.register_schema_sync(request)
|
||||
}
|
||||
|
||||
async fn deregister_schema(&self, request: DeregisterSchemaRequest) -> Result<bool> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
let schemas = catalogs
|
||||
.get_mut(&request.catalog)
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?;
|
||||
catalog
|
||||
.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))
|
||||
.await?;
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0);
|
||||
let table_count = schemas
|
||||
.remove(&request.schema)
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?
|
||||
.len();
|
||||
decrement_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
table_count as f64,
|
||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
||||
);
|
||||
|
||||
decrement_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
||||
);
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
@@ -159,12 +171,16 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>> {
|
||||
if let Some(c) = self.catalog(catalog) {
|
||||
c.schema(schema).await
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
async fn schema_exist(&self, catalog: &str, schema: &str) -> Result<bool> {
|
||||
Ok(self
|
||||
.catalogs
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(catalog)
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
catalog_name: catalog,
|
||||
})?
|
||||
.contains_key(schema))
|
||||
}
|
||||
|
||||
async fn table(
|
||||
@@ -173,27 +189,69 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableRef>> {
|
||||
let Some(catalog) = self
|
||||
.catalog(catalog) else { return Ok(None)};
|
||||
let Some(s) = catalog.schema(schema).await? else { return Ok(None) };
|
||||
s.table(table_name).await
|
||||
let result = try {
|
||||
self.catalogs
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(catalog)?
|
||||
.get(schema)?
|
||||
.get(table_name)
|
||||
.cloned()?
|
||||
};
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn catalog(&self, catalog: &str) -> Result<Option<CatalogProviderRef>> {
|
||||
Ok(self.catalogs.read().unwrap().get(catalog).cloned())
|
||||
async fn catalog_exist(&self, catalog: &str) -> Result<bool> {
|
||||
Ok(self.catalogs.read().unwrap().get(catalog).is_some())
|
||||
}
|
||||
|
||||
async fn table_exist(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
|
||||
let catalogs = self.catalogs.read().unwrap();
|
||||
Ok(catalogs
|
||||
.get(catalog)
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
catalog_name: catalog,
|
||||
})?
|
||||
.get(schema)
|
||||
.with_context(|| SchemaNotFoundSnafu { catalog, schema })?
|
||||
.contains_key(table))
|
||||
}
|
||||
|
||||
async fn catalog_names(&self) -> Result<Vec<String>> {
|
||||
Ok(self.catalogs.read().unwrap().keys().cloned().collect())
|
||||
}
|
||||
|
||||
async fn register_catalog(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
) -> Result<Option<CatalogProviderRef>> {
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
|
||||
self.register_catalog_sync(name, catalog)
|
||||
async fn schema_names(&self, catalog_name: &str) -> Result<Vec<String>> {
|
||||
Ok(self
|
||||
.catalogs
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(catalog_name)
|
||||
.with_context(|| CatalogNotFoundSnafu { catalog_name })?
|
||||
.keys()
|
||||
.cloned()
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn table_names(&self, catalog_name: &str, schema_name: &str) -> Result<Vec<String>> {
|
||||
Ok(self
|
||||
.catalogs
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(catalog_name)
|
||||
.with_context(|| CatalogNotFoundSnafu { catalog_name })?
|
||||
.get(schema_name)
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?
|
||||
.keys()
|
||||
.cloned()
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn register_catalog(&self, name: String) -> Result<bool> {
|
||||
self.register_catalog_sync(name)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
@@ -202,206 +260,95 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
}
|
||||
|
||||
impl MemoryCatalogManager {
|
||||
/// Registers a catalog and return `None` if no catalog with the same name was already
|
||||
/// registered, or `Some` with the previously registered catalog.
|
||||
pub fn register_catalog_if_absent(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
) -> Option<CatalogProviderRef> {
|
||||
/// Registers a catalog and return the catalog already exist
|
||||
pub fn register_catalog_if_absent(&self, name: String) -> bool {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
let entry = catalogs.entry(name);
|
||||
match entry {
|
||||
Entry::Occupied(v) => Some(v.get().clone()),
|
||||
Entry::Occupied(_) => true,
|
||||
Entry::Vacant(v) => {
|
||||
v.insert(catalog);
|
||||
None
|
||||
let _ = v.insert(HashMap::new());
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn register_catalog_sync(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
) -> Result<Option<CatalogProviderRef>> {
|
||||
pub fn register_catalog_sync(&self, name: String) -> Result<bool> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
Ok(catalogs.insert(name, catalog))
|
||||
}
|
||||
|
||||
fn catalog(&self, catalog_name: &str) -> Option<CatalogProviderRef> {
|
||||
self.catalogs.read().unwrap().get(catalog_name).cloned()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MemoryCatalogProvider {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple in-memory implementation of a catalog.
|
||||
pub struct MemoryCatalogProvider {
|
||||
schemas: RwLock<HashMap<String, Arc<dyn SchemaProvider>>>,
|
||||
}
|
||||
|
||||
impl MemoryCatalogProvider {
|
||||
/// Instantiates a new MemoryCatalogProvider with an empty collection of schemas.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
schemas: RwLock::new(HashMap::new()),
|
||||
match catalogs.entry(name) {
|
||||
Entry::Vacant(e) => {
|
||||
e.insert(HashMap::new());
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
|
||||
Ok(true)
|
||||
}
|
||||
Entry::Occupied(_) => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn schema_names_sync(&self) -> Result<Vec<String>> {
|
||||
let schemas = self.schemas.read().unwrap();
|
||||
Ok(schemas.keys().cloned().collect())
|
||||
pub fn register_schema_sync(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get_mut(&request.catalog)
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?;
|
||||
|
||||
match catalog.entry(request.schema) {
|
||||
Entry::Vacant(e) => {
|
||||
e.insert(HashMap::new());
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0);
|
||||
Ok(true)
|
||||
}
|
||||
Entry::Occupied(_) => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn register_schema_sync(
|
||||
&self,
|
||||
name: String,
|
||||
schema: SchemaProviderRef,
|
||||
) -> Result<Option<SchemaProviderRef>> {
|
||||
let mut schemas = self.schemas.write().unwrap();
|
||||
ensure!(
|
||||
!schemas.contains_key(&name),
|
||||
error::SchemaExistsSnafu { schema: &name }
|
||||
pub fn register_table_sync(&self, request: RegisterTableRequest) -> Result<bool> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
let schema = catalogs
|
||||
.get_mut(&request.catalog)
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.get_mut(&request.schema)
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
|
||||
if schema.contains_key(&request.table_name) {
|
||||
return TableExistsSnafu {
|
||||
table: &request.table_name,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
schema.insert(request.table_name, request.table);
|
||||
increment_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
||||
);
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0);
|
||||
Ok(schemas.insert(name, schema))
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
pub fn schema_sync(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>> {
|
||||
let schemas = self.schemas.read().unwrap();
|
||||
Ok(schemas.get(name).cloned())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CatalogProvider for MemoryCatalogProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
async fn schema_names(&self) -> Result<Vec<String>> {
|
||||
self.schema_names_sync()
|
||||
}
|
||||
|
||||
async fn register_schema(
|
||||
&self,
|
||||
name: String,
|
||||
schema: SchemaProviderRef,
|
||||
) -> Result<Option<SchemaProviderRef>> {
|
||||
self.register_schema_sync(name, schema)
|
||||
}
|
||||
|
||||
async fn schema(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>> {
|
||||
self.schema_sync(name)
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple in-memory implementation of a schema.
|
||||
pub struct MemorySchemaProvider {
|
||||
tables: RwLock<HashMap<String, TableRef>>,
|
||||
}
|
||||
|
||||
impl MemorySchemaProvider {
|
||||
/// Instantiates a new MemorySchemaProvider with an empty collection of tables.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
tables: RwLock::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn register_table_sync(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
if let Some(existing) = tables.get(name.as_str()) {
|
||||
// if table with the same name but different table id exists, then it's a fatal bug
|
||||
if existing.table_info().ident.table_id != table.table_info().ident.table_id {
|
||||
error!(
|
||||
"Unexpected table register: {:?}, existing: {:?}",
|
||||
table.table_info(),
|
||||
existing.table_info()
|
||||
);
|
||||
return TableExistsSnafu { table: name }.fail()?;
|
||||
}
|
||||
Ok(Some(existing.clone()))
|
||||
} else {
|
||||
Ok(tables.insert(name, table))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn rename_table_sync(&self, name: &str, new_name: String) -> Result<TableRef> {
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
let Some(table) = tables.remove(name) else {
|
||||
return TableNotFoundSnafu {
|
||||
table_info: name.to_string(),
|
||||
}
|
||||
.fail()?;
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn new_with_table(table: TableRef) -> Self {
|
||||
let manager = Self::default();
|
||||
let request = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table.table_info().name.clone(),
|
||||
table_id: table.table_info().ident.table_id,
|
||||
table,
|
||||
};
|
||||
let e = match tables.entry(new_name) {
|
||||
Entry::Vacant(e) => e,
|
||||
Entry::Occupied(e) => {
|
||||
return TableExistsSnafu { table: e.key() }.fail();
|
||||
}
|
||||
};
|
||||
e.insert(table.clone());
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
pub fn table_exist_sync(&self, name: &str) -> Result<bool> {
|
||||
let tables = self.tables.read().unwrap();
|
||||
Ok(tables.contains_key(name))
|
||||
}
|
||||
|
||||
pub fn deregister_table_sync(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
Ok(tables.remove(name))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MemorySchemaProvider {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for MemorySchemaProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
async fn table_names(&self) -> Result<Vec<String>> {
|
||||
let tables = self.tables.read().unwrap();
|
||||
Ok(tables.keys().cloned().collect())
|
||||
}
|
||||
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let tables = self.tables.read().unwrap();
|
||||
Ok(tables.get(name).cloned())
|
||||
}
|
||||
|
||||
async fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
|
||||
self.register_table_sync(name, table)
|
||||
}
|
||||
|
||||
async fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef> {
|
||||
self.rename_table_sync(name, new_name)
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
self.deregister_table_sync(name)
|
||||
}
|
||||
|
||||
async fn table_exist(&self, name: &str) -> Result<bool> {
|
||||
self.table_exist_sync(name)
|
||||
let _ = manager.register_table_sync(request).unwrap();
|
||||
manager
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a memory catalog list contains a numbers table for test
|
||||
pub fn new_memory_catalog_list() -> Result<Arc<MemoryCatalogManager>> {
|
||||
pub fn new_memory_catalog_manager() -> Result<Arc<MemoryCatalogManager>> {
|
||||
Ok(Arc::new(MemoryCatalogManager::default()))
|
||||
}
|
||||
|
||||
@@ -409,89 +356,100 @@ pub fn new_memory_catalog_list() -> Result<Arc<MemoryCatalogManager>> {
|
||||
mod tests {
|
||||
use common_catalog::consts::*;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::prelude::StatusCode;
|
||||
use table::table::numbers::NumbersTable;
|
||||
use common_error::status_code::StatusCode;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_new_memory_catalog_list() {
|
||||
let catalog_list = new_memory_catalog_list().unwrap();
|
||||
let default_catalog = CatalogManager::catalog(&*catalog_list, DEFAULT_CATALOG_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let catalog_list = new_memory_catalog_manager().unwrap();
|
||||
|
||||
let default_schema = default_catalog
|
||||
.schema(DEFAULT_SCHEMA_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let register_request = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: NUMBERS_TABLE_NAME.to_string(),
|
||||
table_id: NUMBERS_TABLE_ID,
|
||||
table: Arc::new(NumbersTable::default()),
|
||||
};
|
||||
|
||||
default_schema
|
||||
.register_table("numbers".to_string(), Arc::new(NumbersTable::default()))
|
||||
let _ = catalog_list.register_table(register_request).await.unwrap();
|
||||
let table = catalog_list
|
||||
.table(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
DEFAULT_SCHEMA_NAME,
|
||||
NUMBERS_TABLE_NAME,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let table = default_schema.table("numbers").await.unwrap();
|
||||
assert!(table.is_some());
|
||||
assert!(default_schema.table("not_exists").await.unwrap().is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mem_provider() {
|
||||
let provider = MemorySchemaProvider::new();
|
||||
let table_name = "numbers";
|
||||
assert!(!provider.table_exist_sync(table_name).unwrap());
|
||||
provider.deregister_table_sync(table_name).unwrap();
|
||||
let test_table = NumbersTable::default();
|
||||
// register table successfully
|
||||
assert!(provider
|
||||
.register_table_sync(table_name.to_string(), Arc::new(test_table))
|
||||
let _ = table.unwrap();
|
||||
assert!(catalog_list
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "not_exists")
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(provider.table_exist_sync(table_name).unwrap());
|
||||
let other_table = NumbersTable::new(12);
|
||||
let result = provider.register_table_sync(table_name.to_string(), Arc::new(other_table));
|
||||
let err = result.err().unwrap();
|
||||
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mem_provider_rename_table() {
|
||||
let provider = MemorySchemaProvider::new();
|
||||
let table_name = "num";
|
||||
assert!(!provider.table_exist_sync(table_name).unwrap());
|
||||
let test_table: TableRef = Arc::new(NumbersTable::default());
|
||||
async fn test_mem_manager_rename_table() {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
let table_name = "test_table";
|
||||
assert!(!catalog
|
||||
.table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
|
||||
.await
|
||||
.unwrap());
|
||||
// register test table
|
||||
assert!(provider
|
||||
.register_table_sync(table_name.to_string(), test_table.clone())
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(provider.table_exist_sync(table_name).unwrap());
|
||||
let table_id = 2333;
|
||||
let register_request = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
table_id,
|
||||
table: Arc::new(NumbersTable::new(table_id)),
|
||||
};
|
||||
assert!(catalog.register_table(register_request).await.unwrap());
|
||||
assert!(catalog
|
||||
.table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
|
||||
.await
|
||||
.unwrap());
|
||||
|
||||
// rename test table
|
||||
let new_table_name = "numbers";
|
||||
provider
|
||||
.rename_table_sync(table_name, new_table_name.to_string())
|
||||
.unwrap();
|
||||
let new_table_name = "test_table_renamed";
|
||||
let rename_request = RenameTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
new_table_name: new_table_name.to_string(),
|
||||
table_id,
|
||||
};
|
||||
let _ = catalog.rename_table(rename_request).await.unwrap();
|
||||
|
||||
// test old table name not exist
|
||||
assert!(!provider.table_exist_sync(table_name).unwrap());
|
||||
provider.deregister_table_sync(table_name).unwrap();
|
||||
assert!(!catalog
|
||||
.table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
|
||||
.await
|
||||
.unwrap());
|
||||
|
||||
// test new table name exists
|
||||
assert!(provider.table_exist_sync(new_table_name).unwrap());
|
||||
let registered_table = provider.table(new_table_name).await.unwrap().unwrap();
|
||||
assert_eq!(
|
||||
registered_table.table_info().ident.table_id,
|
||||
test_table.table_info().ident.table_id
|
||||
);
|
||||
assert!(catalog
|
||||
.table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
|
||||
.await
|
||||
.unwrap());
|
||||
let registered_table = catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(registered_table.table_info().ident.table_id, table_id);
|
||||
|
||||
let other_table = Arc::new(NumbersTable::new(2));
|
||||
let result = provider
|
||||
.register_table(new_table_name.to_string(), other_table)
|
||||
.await;
|
||||
let dup_register_request = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: new_table_name.to_string(),
|
||||
table_id: table_id + 1,
|
||||
table: Arc::new(NumbersTable::new(table_id + 1)),
|
||||
};
|
||||
let result = catalog.register_table(dup_register_request).await;
|
||||
let err = result.err().unwrap();
|
||||
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
|
||||
}
|
||||
@@ -499,16 +457,11 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_catalog_rename_table() {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
let schema = catalog
|
||||
.schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
// register table
|
||||
let table_name = "num";
|
||||
let table_id = 2333;
|
||||
let table: TableRef = Arc::new(NumbersTable::new(table_id));
|
||||
|
||||
// register table
|
||||
let register_table_req = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
@@ -517,7 +470,11 @@ mod tests {
|
||||
table,
|
||||
};
|
||||
assert!(catalog.register_table(register_table_req).await.unwrap());
|
||||
assert!(schema.table_exist(table_name).await.unwrap());
|
||||
assert!(catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some());
|
||||
|
||||
// rename table
|
||||
let new_table_name = "numbers_new";
|
||||
@@ -529,8 +486,16 @@ mod tests {
|
||||
table_id,
|
||||
};
|
||||
assert!(catalog.rename_table(rename_table_req).await.unwrap());
|
||||
assert!(!schema.table_exist(table_name).await.unwrap());
|
||||
assert!(schema.table_exist(new_table_name).await.unwrap());
|
||||
assert!(catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some());
|
||||
|
||||
let registered_table = catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
|
||||
@@ -543,50 +508,80 @@ mod tests {
|
||||
#[test]
|
||||
pub fn test_register_if_absent() {
|
||||
let list = MemoryCatalogManager::default();
|
||||
assert!(list
|
||||
.register_catalog_if_absent(
|
||||
"test_catalog".to_string(),
|
||||
Arc::new(MemoryCatalogProvider::new())
|
||||
)
|
||||
.is_none());
|
||||
list.register_catalog_if_absent(
|
||||
"test_catalog".to_string(),
|
||||
Arc::new(MemoryCatalogProvider::new()),
|
||||
)
|
||||
.unwrap();
|
||||
list.as_any()
|
||||
.downcast_ref::<MemoryCatalogManager>()
|
||||
.unwrap();
|
||||
assert!(!list.register_catalog_if_absent("test_catalog".to_string(),));
|
||||
assert!(list.register_catalog_if_absent("test_catalog".to_string()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
pub async fn test_catalog_deregister_table() {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
let schema = catalog
|
||||
.schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let table_name = "foo_table";
|
||||
|
||||
let register_table_req = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "numbers".to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
table_id: 2333,
|
||||
table: Arc::new(NumbersTable::default()),
|
||||
};
|
||||
catalog.register_table(register_table_req).await.unwrap();
|
||||
assert!(schema.table_exist("numbers").await.unwrap());
|
||||
let _ = catalog.register_table(register_table_req).await.unwrap();
|
||||
assert!(catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some());
|
||||
|
||||
let deregister_table_req = DeregisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "numbers".to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
};
|
||||
catalog
|
||||
.deregister_table(deregister_table_req)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!schema.table_exist("numbers").await.unwrap());
|
||||
assert!(catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_catalog_deregister_schema() {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
|
||||
// Registers a catalog, a schema, and a table.
|
||||
let catalog_name = "foo_catalog".to_string();
|
||||
let schema_name = "foo_schema".to_string();
|
||||
let table_name = "foo_table".to_string();
|
||||
let schema = RegisterSchemaRequest {
|
||||
catalog: catalog_name.clone(),
|
||||
schema: schema_name.clone(),
|
||||
};
|
||||
let table = RegisterTableRequest {
|
||||
catalog: catalog_name.clone(),
|
||||
schema: schema_name.clone(),
|
||||
table_name,
|
||||
table_id: 0,
|
||||
table: Arc::new(NumbersTable::default()),
|
||||
};
|
||||
catalog
|
||||
.register_catalog(catalog_name.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
catalog.register_schema(schema).await.unwrap();
|
||||
catalog.register_table(table).await.unwrap();
|
||||
|
||||
let request = DeregisterSchemaRequest {
|
||||
catalog: catalog_name.clone(),
|
||||
schema: schema_name.clone(),
|
||||
};
|
||||
|
||||
assert!(catalog.deregister_schema(request).await.unwrap());
|
||||
assert!(!catalog
|
||||
.schema_exist(&catalog_name, &schema_name)
|
||||
.await
|
||||
.unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,76 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::Debug;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use client::CachedMetaKvBackend;
|
||||
use futures::Stream;
|
||||
use futures_util::StreamExt;
|
||||
pub use manager::{RemoteCatalogManager, RemoteCatalogProvider, RemoteSchemaProvider};
|
||||
|
||||
use crate::error::Error;
|
||||
pub use client::{CachedMetaKvBackend, MetaKvBackend};
|
||||
pub use manager::RemoteCatalogManager;
|
||||
|
||||
mod client;
|
||||
mod manager;
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
pub mod mock;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Kv(pub Vec<u8>, pub Vec<u8>);
|
||||
|
||||
pub type ValueIter<'a, E> = Pin<Box<dyn Stream<Item = Result<Kv, E>> + Send + 'a>>;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait KvBackend: Send + Sync {
|
||||
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
|
||||
where
|
||||
'a: 'b;
|
||||
|
||||
async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), Error>;
|
||||
|
||||
/// Compare and set value of key. `expect` is the expected value, if backend's current value associated
|
||||
/// with key is the same as `expect`, the value will be updated to `val`.
|
||||
///
|
||||
/// - If the compare-and-set operation successfully updated value, this method will return an `Ok(Ok())`
|
||||
/// - If associated value is not the same as `expect`, no value will be updated and an `Ok(Err(Vec<u8>))`
|
||||
/// will be returned, the `Err(Vec<u8>)` indicates the current associated value of key.
|
||||
/// - If any error happens during operation, an `Err(Error)` will be returned.
|
||||
async fn compare_and_set(
|
||||
&self,
|
||||
key: &[u8],
|
||||
expect: &[u8],
|
||||
val: &[u8],
|
||||
) -> Result<Result<(), Option<Vec<u8>>>, Error>;
|
||||
|
||||
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), Error>;
|
||||
|
||||
async fn delete(&self, key: &[u8]) -> Result<(), Error> {
|
||||
self.delete_range(key, &[]).await
|
||||
}
|
||||
|
||||
/// Default get is implemented based on `range` method.
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<Kv>, Error> {
|
||||
let mut iter = self.range(key);
|
||||
while let Some(r) = iter.next().await {
|
||||
let kv = r?;
|
||||
if kv.0 == key {
|
||||
return Ok(Some(kv));
|
||||
}
|
||||
}
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
/// MoveValue atomically renames the key to the given updated key.
|
||||
async fn move_value(&self, from_key: &[u8], to_key: &[u8]) -> Result<(), Error>;
|
||||
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
}
|
||||
|
||||
pub type KvBackendRef = Arc<dyn KvBackend>;
|
||||
pub mod region_alive_keeper;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait KvCacheInvalidator: Send + Sync {
|
||||
@@ -89,71 +30,3 @@ pub trait KvCacheInvalidator: Send + Sync {
|
||||
}
|
||||
|
||||
pub type KvCacheInvalidatorRef = Arc<dyn KvCacheInvalidator>;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use async_stream::stream;
|
||||
|
||||
use super::*;
|
||||
|
||||
struct MockKvBackend {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for MockKvBackend {
|
||||
fn range<'a, 'b>(&'a self, _key: &[u8]) -> ValueIter<'b, Error>
|
||||
where
|
||||
'a: 'b,
|
||||
{
|
||||
Box::pin(stream!({
|
||||
for i in 0..3 {
|
||||
yield Ok(Kv(
|
||||
i.to_string().as_bytes().to_vec(),
|
||||
i.to_string().as_bytes().to_vec(),
|
||||
))
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
async fn set(&self, _key: &[u8], _val: &[u8]) -> Result<(), Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn compare_and_set(
|
||||
&self,
|
||||
_key: &[u8],
|
||||
_expect: &[u8],
|
||||
_val: &[u8],
|
||||
) -> Result<Result<(), Option<Vec<u8>>>, Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn delete_range(&self, _key: &[u8], _end: &[u8]) -> Result<(), Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn move_value(&self, _from_key: &[u8], _to_key: &[u8]) -> Result<(), Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get() {
|
||||
let backend = MockKvBackend {};
|
||||
|
||||
let result = backend.get(0.to_string().as_bytes()).await;
|
||||
assert_eq!(0.to_string().as_bytes(), result.unwrap().unwrap().0);
|
||||
|
||||
let result = backend.get(1.to_string().as_bytes()).await;
|
||||
assert_eq!(1.to_string().as_bytes(), result.unwrap().unwrap().0);
|
||||
|
||||
let result = backend.get(2.to_string().as_bytes()).await;
|
||||
assert_eq!(2.to_string().as_bytes(), result.unwrap().unwrap().0);
|
||||
|
||||
let result = backend.get(3.to_string().as_bytes()).await;
|
||||
assert!(result.unwrap().is_none());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,54 +17,82 @@ use std::fmt::Debug;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_stream::stream;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::error::Error::{CacheNotGet, GetKvCache};
|
||||
use common_meta::error::{CacheNotGetSnafu, Error, MetaSrvSnafu, Result};
|
||||
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
||||
use common_meta::rpc::store::{
|
||||
CompareAndPutRequest, DeleteRangeRequest, MoveValueRequest, PutRequest, RangeRequest,
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, MoveValueRequest, MoveValueResponse, PutRequest, PutResponse,
|
||||
RangeRequest, RangeResponse,
|
||||
};
|
||||
use common_telemetry::{info, timer};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use common_telemetry::timer;
|
||||
use meta_client::client::MetaClient;
|
||||
use moka::future::{Cache, CacheBuilder};
|
||||
use snafu::ResultExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use super::KvCacheInvalidator;
|
||||
use crate::error::{Error, GenericSnafu, MetaSrvSnafu, Result};
|
||||
use crate::metrics::{METRIC_CATALOG_KV_GET, METRIC_CATALOG_KV_REMOTE_GET};
|
||||
use crate::remote::{Kv, KvBackend, KvBackendRef, ValueIter};
|
||||
|
||||
const CACHE_MAX_CAPACITY: u64 = 10000;
|
||||
const CACHE_TTL_SECOND: u64 = 10 * 60;
|
||||
const CACHE_TTI_SECOND: u64 = 5 * 60;
|
||||
|
||||
pub type CacheBackendRef = Arc<Cache<Vec<u8>, Option<Kv>>>;
|
||||
pub type CacheBackendRef = Arc<Cache<Vec<u8>, KeyValue>>;
|
||||
|
||||
pub struct CachedMetaKvBackend {
|
||||
kv_backend: KvBackendRef,
|
||||
cache: CacheBackendRef,
|
||||
name: String,
|
||||
}
|
||||
|
||||
impl TxnService for CachedMetaKvBackend {
|
||||
type Error = Error;
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for CachedMetaKvBackend {
|
||||
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
|
||||
where
|
||||
'a: 'b,
|
||||
{
|
||||
self.kv_backend.range(key)
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<Kv>> {
|
||||
async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
|
||||
self.kv_backend.range(req).await
|
||||
}
|
||||
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
|
||||
let _timer = timer!(METRIC_CATALOG_KV_GET);
|
||||
|
||||
let init = async {
|
||||
let _timer = timer!(METRIC_CATALOG_KV_REMOTE_GET);
|
||||
|
||||
self.kv_backend.get(key).await
|
||||
self.kv_backend.get(key).await.map(|val| {
|
||||
val.with_context(|| CacheNotGetSnafu {
|
||||
key: String::from_utf8_lossy(key),
|
||||
})
|
||||
})?
|
||||
};
|
||||
|
||||
let schema_provider = self.cache.try_get_with_by_ref(key, init).await;
|
||||
schema_provider.map_err(|e| GenericSnafu { msg: e.to_string() }.build())
|
||||
// currently moka doesn't have `optionally_try_get_with_by_ref`
|
||||
// TODO(fys): change to moka method when available
|
||||
// https://github.com/moka-rs/moka/issues/254
|
||||
match self.cache.try_get_with_by_ref(key, init).await {
|
||||
Ok(val) => Ok(Some(val)),
|
||||
Err(e) => match e.as_ref() {
|
||||
CacheNotGet { .. } => Ok(None),
|
||||
_ => Err(e),
|
||||
},
|
||||
}
|
||||
.map_err(|e| GetKvCache {
|
||||
err_msg: e.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], val: &[u8]) -> Result<()> {
|
||||
let ret = self.kv_backend.set(key, val).await;
|
||||
async fn put(&self, req: PutRequest) -> Result<PutResponse> {
|
||||
let key = &req.key.clone();
|
||||
|
||||
let ret = self.kv_backend.put(req).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(key).await;
|
||||
@@ -73,8 +101,72 @@ impl KvBackend for CachedMetaKvBackend {
|
||||
ret
|
||||
}
|
||||
|
||||
async fn delete(&self, key: &[u8]) -> Result<()> {
|
||||
let ret = self.kv_backend.delete_range(key, &[]).await;
|
||||
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse> {
|
||||
let keys = req
|
||||
.kvs
|
||||
.iter()
|
||||
.map(|kv| kv.key().to_vec())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let resp = self.kv_backend.batch_put(req).await;
|
||||
|
||||
if resp.is_ok() {
|
||||
for key in keys {
|
||||
self.invalidate_key(&key).await;
|
||||
}
|
||||
}
|
||||
|
||||
resp
|
||||
}
|
||||
|
||||
async fn delete_range(&self, mut req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
|
||||
let prev_kv = req.prev_kv;
|
||||
|
||||
req.prev_kv = true;
|
||||
let resp = self.kv_backend.delete_range(req).await;
|
||||
match resp {
|
||||
Ok(mut resp) => {
|
||||
for prev_kv in resp.prev_kvs.iter() {
|
||||
self.invalidate_key(prev_kv.key()).await;
|
||||
}
|
||||
|
||||
if !prev_kv {
|
||||
resp.prev_kvs = vec![];
|
||||
}
|
||||
Ok(resp)
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
async fn batch_delete(&self, mut req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
|
||||
let prev_kv = req.prev_kv;
|
||||
|
||||
req.prev_kv = true;
|
||||
let resp = self.kv_backend.batch_delete(req).await;
|
||||
match resp {
|
||||
Ok(mut resp) => {
|
||||
for prev_kv in resp.prev_kvs.iter() {
|
||||
self.invalidate_key(prev_kv.key()).await;
|
||||
}
|
||||
|
||||
if !prev_kv {
|
||||
resp.prev_kvs = vec![];
|
||||
}
|
||||
Ok(resp)
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
async fn batch_get(&self, req: BatchGetRequest) -> Result<BatchGetResponse> {
|
||||
self.kv_backend.batch_get(req).await
|
||||
}
|
||||
|
||||
async fn compare_and_put(&self, req: CompareAndPutRequest) -> Result<CompareAndPutResponse> {
|
||||
let key = &req.key.clone();
|
||||
|
||||
let ret = self.kv_backend.compare_and_put(req).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(key).await;
|
||||
@@ -83,28 +175,11 @@ impl KvBackend for CachedMetaKvBackend {
|
||||
ret
|
||||
}
|
||||
|
||||
async fn delete_range(&self, _key: &[u8], _end: &[u8]) -> Result<()> {
|
||||
// TODO(fys): implement it
|
||||
unimplemented!()
|
||||
}
|
||||
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse> {
|
||||
let from_key = &req.from_key.clone();
|
||||
let to_key = &req.to_key.clone();
|
||||
|
||||
async fn compare_and_set(
|
||||
&self,
|
||||
key: &[u8],
|
||||
expect: &[u8],
|
||||
val: &[u8],
|
||||
) -> Result<std::result::Result<(), Option<Vec<u8>>>> {
|
||||
let ret = self.kv_backend.compare_and_set(key, expect, val).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(key).await;
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
async fn move_value(&self, from_key: &[u8], to_key: &[u8]) -> Result<()> {
|
||||
let ret = self.kv_backend.move_value(from_key, to_key).await;
|
||||
let ret = self.kv_backend.move_value(req).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(from_key).await;
|
||||
@@ -128,15 +203,8 @@ impl KvCacheInvalidator for CachedMetaKvBackend {
|
||||
|
||||
impl CachedMetaKvBackend {
|
||||
pub fn new(client: Arc<MetaClient>) -> Self {
|
||||
let cache = Arc::new(
|
||||
CacheBuilder::new(CACHE_MAX_CAPACITY)
|
||||
.time_to_live(Duration::from_secs(CACHE_TTL_SECOND))
|
||||
.time_to_idle(Duration::from_secs(CACHE_TTI_SECOND))
|
||||
.build(),
|
||||
);
|
||||
let kv_backend = Arc::new(MetaKvBackend { client });
|
||||
|
||||
Self { kv_backend, cache }
|
||||
Self::wrap(kv_backend)
|
||||
}
|
||||
|
||||
pub fn wrap(kv_backend: KvBackendRef) -> Self {
|
||||
@@ -147,7 +215,12 @@ impl CachedMetaKvBackend {
|
||||
.build(),
|
||||
);
|
||||
|
||||
Self { kv_backend, cache }
|
||||
let name = format!("CachedKvBackend({})", kv_backend.name());
|
||||
Self {
|
||||
kv_backend,
|
||||
cache,
|
||||
name,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cache(&self) -> &CacheBackendRef {
|
||||
@@ -160,88 +233,97 @@ pub struct MetaKvBackend {
|
||||
pub client: Arc<MetaClient>,
|
||||
}
|
||||
|
||||
impl TxnService for MetaKvBackend {
|
||||
type Error = Error;
|
||||
}
|
||||
|
||||
/// Implement `KvBackend` trait for `MetaKvBackend` instead of opendal's `Accessor` since
|
||||
/// `MetaClient`'s range method can return both keys and values, which can reduce IO overhead
|
||||
/// comparing to `Accessor`'s list and get method.
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for MetaKvBackend {
|
||||
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
|
||||
where
|
||||
'a: 'b,
|
||||
{
|
||||
let key = key.to_vec();
|
||||
Box::pin(stream!({
|
||||
let mut resp = self
|
||||
.client
|
||||
.range(RangeRequest::new().with_prefix(key))
|
||||
.await
|
||||
.context(MetaSrvSnafu)?;
|
||||
let kvs = resp.take_kvs();
|
||||
for mut kv in kvs.into_iter() {
|
||||
yield Ok(Kv(kv.take_key(), kv.take_value()))
|
||||
}
|
||||
}))
|
||||
fn name(&self) -> &str {
|
||||
"MetaKvBackend"
|
||||
}
|
||||
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<Kv>> {
|
||||
async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
|
||||
self.client
|
||||
.range(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)
|
||||
}
|
||||
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
|
||||
let mut response = self
|
||||
.client
|
||||
.range(RangeRequest::new().with_key(key))
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)?;
|
||||
Ok(response
|
||||
.take_kvs()
|
||||
.get_mut(0)
|
||||
.map(|kv| Kv(kv.take_key(), kv.take_value())))
|
||||
Ok(response.take_kvs().get_mut(0).map(|kv| KeyValue {
|
||||
key: kv.take_key(),
|
||||
value: kv.take_value(),
|
||||
}))
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], val: &[u8]) -> Result<()> {
|
||||
let req = PutRequest::new()
|
||||
.with_key(key.to_vec())
|
||||
.with_value(val.to_vec());
|
||||
let _ = self.client.put(req).await.context(MetaSrvSnafu)?;
|
||||
Ok(())
|
||||
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse> {
|
||||
self.client
|
||||
.batch_put(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)
|
||||
}
|
||||
|
||||
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<()> {
|
||||
let req = DeleteRangeRequest::new().with_range(key.to_vec(), end.to_vec());
|
||||
let resp = self.client.delete_range(req).await.context(MetaSrvSnafu)?;
|
||||
info!(
|
||||
"Delete range, key: {}, end: {}, deleted: {}",
|
||||
String::from_utf8_lossy(key),
|
||||
String::from_utf8_lossy(end),
|
||||
resp.deleted()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
async fn put(&self, req: PutRequest) -> Result<PutResponse> {
|
||||
self.client
|
||||
.put(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)
|
||||
}
|
||||
|
||||
async fn compare_and_set(
|
||||
async fn delete_range(&self, req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
|
||||
self.client
|
||||
.delete_range(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)
|
||||
}
|
||||
|
||||
async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
|
||||
self.client
|
||||
.batch_delete(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)
|
||||
}
|
||||
|
||||
async fn batch_get(&self, req: BatchGetRequest) -> Result<BatchGetResponse> {
|
||||
self.client
|
||||
.batch_get(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)
|
||||
}
|
||||
|
||||
async fn compare_and_put(
|
||||
&self,
|
||||
key: &[u8],
|
||||
expect: &[u8],
|
||||
val: &[u8],
|
||||
) -> Result<std::result::Result<(), Option<Vec<u8>>>> {
|
||||
let request = CompareAndPutRequest::new()
|
||||
.with_key(key.to_vec())
|
||||
.with_expect(expect.to_vec())
|
||||
.with_value(val.to_vec());
|
||||
let mut response = self
|
||||
.client
|
||||
request: CompareAndPutRequest,
|
||||
) -> Result<CompareAndPutResponse> {
|
||||
self.client
|
||||
.compare_and_put(request)
|
||||
.await
|
||||
.context(MetaSrvSnafu)?;
|
||||
if response.is_success() {
|
||||
Ok(Ok(()))
|
||||
} else {
|
||||
Ok(Err(response.take_prev_kv().map(|v| v.value().to_vec())))
|
||||
}
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)
|
||||
}
|
||||
|
||||
async fn move_value(&self, from_key: &[u8], to_key: &[u8]) -> Result<()> {
|
||||
let req = MoveValueRequest::new(from_key, to_key);
|
||||
self.client.move_value(req).await.context(MetaSrvSnafu)?;
|
||||
Ok(())
|
||||
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse> {
|
||||
self.client
|
||||
.move_value(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -12,160 +12,24 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::btree_map::Entry;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, RwLock as StdRwLock};
|
||||
|
||||
use async_stream::stream;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_recordbatch::RecordBatch;
|
||||
use common_telemetry::logging::info;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use datatypes::vectors::StringVector;
|
||||
use serde::Serializer;
|
||||
use table::engine::{EngineContext, TableEngine, TableReference};
|
||||
use table::engine::{CloseTableResult, EngineContext, TableEngine};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::{AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest};
|
||||
use table::requests::{
|
||||
AlterTableRequest, CloseTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest,
|
||||
};
|
||||
use table::test_util::MemTable;
|
||||
use table::TableRef;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::helper::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
|
||||
use crate::remote::{Kv, KvBackend, ValueIter};
|
||||
|
||||
pub struct MockKvBackend {
|
||||
map: RwLock<BTreeMap<Vec<u8>, Vec<u8>>>,
|
||||
}
|
||||
|
||||
impl Default for MockKvBackend {
|
||||
fn default() -> Self {
|
||||
let mut map = BTreeMap::default();
|
||||
let catalog_value = CatalogValue {}.as_bytes().unwrap();
|
||||
let schema_value = SchemaValue {}.as_bytes().unwrap();
|
||||
|
||||
let default_catalog_key = CatalogKey {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
}
|
||||
.to_string();
|
||||
|
||||
let default_schema_key = SchemaKey {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
}
|
||||
.to_string();
|
||||
|
||||
// create default catalog and schema
|
||||
map.insert(default_catalog_key.into(), catalog_value);
|
||||
map.insert(default_schema_key.into(), schema_value);
|
||||
|
||||
let map = RwLock::new(map);
|
||||
Self { map }
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for MockKvBackend {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
futures::executor::block_on(async {
|
||||
let map = self.map.read().await;
|
||||
for (k, v) in map.iter() {
|
||||
f.serialize_str(&String::from_utf8_lossy(k))?;
|
||||
f.serialize_str(" -> ")?;
|
||||
f.serialize_str(&String::from_utf8_lossy(v))?;
|
||||
f.serialize_str("\n")?;
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for MockKvBackend {
|
||||
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
|
||||
where
|
||||
'a: 'b,
|
||||
{
|
||||
let prefix = key.to_vec();
|
||||
let prefix_string = String::from_utf8_lossy(&prefix).to_string();
|
||||
Box::pin(stream!({
|
||||
let maps = self.map.read().await.clone();
|
||||
for (k, v) in maps.range(prefix.clone()..) {
|
||||
let key_string = String::from_utf8_lossy(k).to_string();
|
||||
let matches = key_string.starts_with(&prefix_string);
|
||||
if matches {
|
||||
yield Ok(Kv(k.clone(), v.clone()))
|
||||
} else {
|
||||
info!("Stream finished");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), Error> {
|
||||
let mut map = self.map.write().await;
|
||||
map.insert(key.to_vec(), val.to_vec());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn compare_and_set(
|
||||
&self,
|
||||
key: &[u8],
|
||||
expect: &[u8],
|
||||
val: &[u8],
|
||||
) -> Result<Result<(), Option<Vec<u8>>>, Error> {
|
||||
let mut map = self.map.write().await;
|
||||
let existing = map.entry(key.to_vec());
|
||||
match existing {
|
||||
Entry::Vacant(e) => {
|
||||
if expect.is_empty() {
|
||||
e.insert(val.to_vec());
|
||||
Ok(Ok(()))
|
||||
} else {
|
||||
Ok(Err(None))
|
||||
}
|
||||
}
|
||||
Entry::Occupied(mut existing) => {
|
||||
if existing.get() == expect {
|
||||
existing.insert(val.to_vec());
|
||||
Ok(Ok(()))
|
||||
} else {
|
||||
Ok(Err(Some(existing.get().clone())))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), Error> {
|
||||
let mut map = self.map.write().await;
|
||||
if end.is_empty() {
|
||||
let _ = map.remove(key);
|
||||
} else {
|
||||
let start = key.to_vec();
|
||||
let end = end.to_vec();
|
||||
let range = start..end;
|
||||
|
||||
map.retain(|k, _| !range.contains(k));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn move_value(&self, _from_key: &[u8], _to_key: &[u8]) -> Result<(), Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MockTableEngine {
|
||||
tables: RwLock<HashMap<String, TableRef>>,
|
||||
tables: StdRwLock<HashMap<TableId, TableRef>>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -180,19 +44,8 @@ impl TableEngine for MockTableEngine {
|
||||
_ctx: &EngineContext,
|
||||
request: CreateTableRequest,
|
||||
) -> table::Result<TableRef> {
|
||||
let table_name = request.table_name.clone();
|
||||
let catalog_name = request.catalog_name.clone();
|
||||
let schema_name = request.schema_name.clone();
|
||||
let table_id = request.id;
|
||||
|
||||
let default_table_id = "0".to_owned();
|
||||
let table_id = TableId::from_str(
|
||||
request
|
||||
.table_options
|
||||
.extra_options
|
||||
.get("table_id")
|
||||
.unwrap_or(&default_table_id),
|
||||
)
|
||||
.unwrap();
|
||||
let schema = Arc::new(Schema::new(vec![ColumnSchema::new(
|
||||
"name",
|
||||
ConcreteDataType::string_datatype(),
|
||||
@@ -202,16 +55,16 @@ impl TableEngine for MockTableEngine {
|
||||
let data = vec![Arc::new(StringVector::from(vec!["a", "b", "c"])) as _];
|
||||
let record_batch = RecordBatch::new(schema, data).unwrap();
|
||||
let table: TableRef = Arc::new(MemTable::new_with_catalog(
|
||||
&table_name,
|
||||
&request.table_name,
|
||||
record_batch,
|
||||
table_id,
|
||||
catalog_name,
|
||||
schema_name,
|
||||
request.catalog_name,
|
||||
request.schema_name,
|
||||
vec![0],
|
||||
)) as Arc<_>;
|
||||
|
||||
let mut tables = self.tables.write().await;
|
||||
tables.insert(table_name, table.clone() as TableRef);
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
let _ = tables.insert(table_id, table.clone() as TableRef);
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
@@ -220,7 +73,7 @@ impl TableEngine for MockTableEngine {
|
||||
_ctx: &EngineContext,
|
||||
request: OpenTableRequest,
|
||||
) -> table::Result<Option<TableRef>> {
|
||||
Ok(self.tables.read().await.get(&request.table_name).cloned())
|
||||
Ok(self.tables.read().unwrap().get(&request.table_id).cloned())
|
||||
}
|
||||
|
||||
async fn alter_table(
|
||||
@@ -234,25 +87,13 @@ impl TableEngine for MockTableEngine {
|
||||
fn get_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
table_ref: &TableReference,
|
||||
table_id: TableId,
|
||||
) -> table::Result<Option<TableRef>> {
|
||||
futures::executor::block_on(async {
|
||||
Ok(self
|
||||
.tables
|
||||
.read()
|
||||
.await
|
||||
.get(&table_ref.to_string())
|
||||
.cloned())
|
||||
})
|
||||
Ok(self.tables.read().unwrap().get(&table_id).cloned())
|
||||
}
|
||||
|
||||
fn table_exists(&self, _ctx: &EngineContext, table_ref: &TableReference) -> bool {
|
||||
futures::executor::block_on(async {
|
||||
self.tables
|
||||
.read()
|
||||
.await
|
||||
.contains_key(&table_ref.to_string())
|
||||
})
|
||||
fn table_exists(&self, _ctx: &EngineContext, table_id: TableId) -> bool {
|
||||
self.tables.read().unwrap().contains_key(&table_id)
|
||||
}
|
||||
|
||||
async fn drop_table(
|
||||
@@ -263,6 +104,15 @@ impl TableEngine for MockTableEngine {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn close_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
request: CloseTableRequest,
|
||||
) -> table::Result<CloseTableResult> {
|
||||
let _ = self.tables.write().unwrap().remove(&request.table_id);
|
||||
Ok(CloseTableResult::Released(vec![]))
|
||||
}
|
||||
|
||||
async fn close(&self) -> table::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
826
src/catalog/src/remote/region_alive_keeper.rs
Normal file
826
src/catalog/src/remote/region_alive_keeper.rs
Normal file
@@ -0,0 +1,826 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::future::Future;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_meta::error::InvalidProtoMsgSnafu;
|
||||
use common_meta::heartbeat::handler::{
|
||||
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
|
||||
};
|
||||
use common_meta::ident::TableIdent;
|
||||
use common_meta::RegionIdent;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::RegionNumber;
|
||||
use table::engine::manager::TableEngineManagerRef;
|
||||
use table::engine::{CloseTableResult, EngineContext, TableEngineRef};
|
||||
use table::requests::CloseTableRequest;
|
||||
use table::TableRef;
|
||||
use tokio::sync::{mpsc, oneshot, Mutex};
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::time::{Duration, Instant};
|
||||
|
||||
use crate::error::{Result, TableEngineNotFoundSnafu};
|
||||
|
||||
/// [RegionAliveKeepers] manages all [RegionAliveKeeper] in a scope of tables.
|
||||
pub struct RegionAliveKeepers {
|
||||
table_engine_manager: TableEngineManagerRef,
|
||||
keepers: Arc<Mutex<HashMap<TableIdent, Arc<RegionAliveKeeper>>>>,
|
||||
heartbeat_interval_millis: u64,
|
||||
started: AtomicBool,
|
||||
|
||||
/// The epoch when [RegionAliveKeepers] is created. It's used to get a monotonically non-decreasing
|
||||
/// elapsed time when submitting heartbeats to Metasrv (because [Instant] is monotonically
|
||||
/// non-decreasing). The heartbeat request will carry the duration since this epoch, and the
|
||||
/// duration acts like an "invariant point" for region's keep alive lease.
|
||||
epoch: Instant,
|
||||
}
|
||||
|
||||
impl RegionAliveKeepers {
|
||||
pub fn new(
|
||||
table_engine_manager: TableEngineManagerRef,
|
||||
heartbeat_interval_millis: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_engine_manager,
|
||||
keepers: Arc::new(Mutex::new(HashMap::new())),
|
||||
heartbeat_interval_millis,
|
||||
started: AtomicBool::new(false),
|
||||
epoch: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn find_keeper(&self, table_ident: &TableIdent) -> Option<Arc<RegionAliveKeeper>> {
|
||||
self.keepers.lock().await.get(table_ident).cloned()
|
||||
}
|
||||
|
||||
pub async fn register_table(&self, table_ident: TableIdent, table: TableRef) -> Result<()> {
|
||||
let keeper = self.find_keeper(&table_ident).await;
|
||||
if keeper.is_some() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let table_engine = self
|
||||
.table_engine_manager
|
||||
.engine(&table_ident.engine)
|
||||
.context(TableEngineNotFoundSnafu {
|
||||
engine_name: &table_ident.engine,
|
||||
})?;
|
||||
|
||||
let keeper = Arc::new(RegionAliveKeeper::new(
|
||||
table_engine,
|
||||
table_ident.clone(),
|
||||
self.heartbeat_interval_millis,
|
||||
));
|
||||
for r in table.table_info().meta.region_numbers.iter() {
|
||||
keeper.register_region(*r).await;
|
||||
}
|
||||
|
||||
let mut keepers = self.keepers.lock().await;
|
||||
let _ = keepers.insert(table_ident.clone(), keeper.clone());
|
||||
|
||||
if self.started.load(Ordering::Relaxed) {
|
||||
keeper.start().await;
|
||||
|
||||
info!("RegionAliveKeeper for table {table_ident} is started!");
|
||||
} else {
|
||||
info!("RegionAliveKeeper for table {table_ident} is registered but not started yet!");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn deregister_table(
|
||||
&self,
|
||||
table_ident: &TableIdent,
|
||||
) -> Option<Arc<RegionAliveKeeper>> {
|
||||
self.keepers.lock().await.remove(table_ident).map(|x| {
|
||||
info!("Deregister RegionAliveKeeper for table {table_ident}");
|
||||
x
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn register_region(&self, region_ident: &RegionIdent) {
|
||||
let table_ident = ®ion_ident.table_ident;
|
||||
let Some(keeper) = self.find_keeper(table_ident).await else {
|
||||
// Alive keeper could be affected by lagging msg, just warn and ignore.
|
||||
warn!("Alive keeper for region {region_ident} is not found!");
|
||||
return;
|
||||
};
|
||||
keeper.register_region(region_ident.region_number).await
|
||||
}
|
||||
|
||||
pub async fn deregister_region(&self, region_ident: &RegionIdent) {
|
||||
let table_ident = ®ion_ident.table_ident;
|
||||
let Some(keeper) = self.find_keeper(table_ident).await else {
|
||||
// Alive keeper could be affected by lagging msg, just warn and ignore.
|
||||
warn!("Alive keeper for region {region_ident} is not found!");
|
||||
return;
|
||||
};
|
||||
let _ = keeper.deregister_region(region_ident.region_number).await;
|
||||
}
|
||||
|
||||
pub async fn start(&self) {
|
||||
let keepers = self.keepers.lock().await;
|
||||
for keeper in keepers.values() {
|
||||
keeper.start().await;
|
||||
}
|
||||
self.started.store(true, Ordering::Relaxed);
|
||||
|
||||
info!(
|
||||
"RegionAliveKeepers for tables {:?} are started!",
|
||||
keepers.keys().map(|x| x.to_string()).collect::<Vec<_>>(),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn epoch(&self) -> Instant {
|
||||
self.epoch
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl HeartbeatResponseHandler for RegionAliveKeepers {
|
||||
fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool {
|
||||
!ctx.response.region_leases.is_empty()
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
&self,
|
||||
ctx: &mut HeartbeatResponseHandlerContext,
|
||||
) -> common_meta::error::Result<HandleControl> {
|
||||
let leases = ctx.response.region_leases.drain(..).collect::<Vec<_>>();
|
||||
for lease in leases {
|
||||
let table_ident: TableIdent = match lease
|
||||
.table_ident
|
||||
.context(InvalidProtoMsgSnafu {
|
||||
err_msg: "'table_ident' is missing in RegionLease",
|
||||
})
|
||||
.and_then(|x| x.try_into())
|
||||
{
|
||||
Ok(x) => x,
|
||||
Err(e) => {
|
||||
error!(e; "");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let Some(keeper) = self.keepers.lock().await.get(&table_ident).cloned() else {
|
||||
// Alive keeper could be affected by lagging msg, just warn and ignore.
|
||||
warn!("Alive keeper for table {table_ident} is not found!");
|
||||
continue;
|
||||
};
|
||||
|
||||
let start_instant = self.epoch + Duration::from_millis(lease.duration_since_epoch);
|
||||
let deadline = start_instant + Duration::from_secs(lease.lease_seconds);
|
||||
keeper.keep_lived(lease.regions, deadline).await;
|
||||
}
|
||||
Ok(HandleControl::Continue)
|
||||
}
|
||||
}
|
||||
|
||||
/// [RegionAliveKeeper] starts a countdown for each region in a table. When deadline is reached,
|
||||
/// the region will be closed.
|
||||
/// The deadline is controlled by Metasrv. It works like "lease" for regions: a Datanode submits its
|
||||
/// opened regions to Metasrv, in heartbeats. If Metasrv decides some region could be resided in this
|
||||
/// Datanode, it will "extend" the region's "lease", with a deadline for [RegionAliveKeeper] to
|
||||
/// countdown.
|
||||
pub struct RegionAliveKeeper {
|
||||
table_engine: TableEngineRef,
|
||||
table_ident: TableIdent,
|
||||
countdown_task_handles: Arc<Mutex<HashMap<RegionNumber, Arc<CountdownTaskHandle>>>>,
|
||||
heartbeat_interval_millis: u64,
|
||||
started: AtomicBool,
|
||||
}
|
||||
|
||||
impl RegionAliveKeeper {
|
||||
fn new(
|
||||
table_engine: TableEngineRef,
|
||||
table_ident: TableIdent,
|
||||
heartbeat_interval_millis: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_engine,
|
||||
table_ident,
|
||||
countdown_task_handles: Arc::new(Mutex::new(HashMap::new())),
|
||||
heartbeat_interval_millis,
|
||||
started: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
async fn find_handle(&self, region: &RegionNumber) -> Option<Arc<CountdownTaskHandle>> {
|
||||
self.countdown_task_handles
|
||||
.lock()
|
||||
.await
|
||||
.get(region)
|
||||
.cloned()
|
||||
}
|
||||
|
||||
async fn register_region(&self, region: RegionNumber) {
|
||||
if self.find_handle(®ion).await.is_some() {
|
||||
return;
|
||||
}
|
||||
|
||||
let countdown_task_handles = Arc::downgrade(&self.countdown_task_handles);
|
||||
let on_task_finished = async move {
|
||||
if let Some(x) = countdown_task_handles.upgrade() {
|
||||
let _ = x.lock().await.remove(®ion);
|
||||
} // Else the countdown task handles map could be dropped because the keeper is dropped.
|
||||
};
|
||||
let handle = Arc::new(CountdownTaskHandle::new(
|
||||
self.table_engine.clone(),
|
||||
self.table_ident.clone(),
|
||||
region,
|
||||
|| on_task_finished,
|
||||
));
|
||||
|
||||
let mut handles = self.countdown_task_handles.lock().await;
|
||||
let _ = handles.insert(region, handle.clone());
|
||||
|
||||
if self.started.load(Ordering::Relaxed) {
|
||||
handle.start(self.heartbeat_interval_millis).await;
|
||||
|
||||
info!(
|
||||
"Region alive countdown for region {region} in table {} is started!",
|
||||
self.table_ident
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"Region alive countdown for region {region} in table {} is registered but not started yet!",
|
||||
self.table_ident
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async fn deregister_region(&self, region: RegionNumber) -> Option<Arc<CountdownTaskHandle>> {
|
||||
self.countdown_task_handles
|
||||
.lock()
|
||||
.await
|
||||
.remove(®ion)
|
||||
.map(|x| {
|
||||
info!(
|
||||
"Deregister alive countdown for region {region} in table {}",
|
||||
self.table_ident
|
||||
);
|
||||
x
|
||||
})
|
||||
}
|
||||
|
||||
async fn start(&self) {
|
||||
let handles = self.countdown_task_handles.lock().await;
|
||||
for handle in handles.values() {
|
||||
handle.start(self.heartbeat_interval_millis).await;
|
||||
}
|
||||
|
||||
self.started.store(true, Ordering::Relaxed);
|
||||
info!(
|
||||
"Region alive countdowns for regions {:?} in table {} are started!",
|
||||
handles.keys().copied().collect::<Vec<_>>(),
|
||||
self.table_ident
|
||||
);
|
||||
}
|
||||
|
||||
async fn keep_lived(&self, designated_regions: Vec<RegionNumber>, deadline: Instant) {
|
||||
for region in designated_regions {
|
||||
if let Some(handle) = self.find_handle(®ion).await {
|
||||
handle.reset_deadline(deadline).await;
|
||||
}
|
||||
// Else the region alive keeper might be triggered by lagging messages, we can safely ignore it.
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn deadline(&self, region: RegionNumber) -> Option<Instant> {
|
||||
let mut deadline = None;
|
||||
if let Some(handle) = self.find_handle(®ion).await {
|
||||
let (s, r) = oneshot::channel();
|
||||
if handle.tx.send(CountdownCommand::Deadline(s)).await.is_ok() {
|
||||
deadline = r.await.ok()
|
||||
}
|
||||
}
|
||||
deadline
|
||||
}
|
||||
|
||||
pub fn table_ident(&self) -> &TableIdent {
|
||||
&self.table_ident
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum CountdownCommand {
|
||||
Start(u64),
|
||||
Reset(Instant),
|
||||
Deadline(oneshot::Sender<Instant>),
|
||||
}
|
||||
|
||||
struct CountdownTaskHandle {
|
||||
tx: mpsc::Sender<CountdownCommand>,
|
||||
handler: JoinHandle<()>,
|
||||
table_ident: TableIdent,
|
||||
region: RegionNumber,
|
||||
}
|
||||
|
||||
impl CountdownTaskHandle {
|
||||
/// Creates a new [CountdownTaskHandle] and starts the countdown task.
|
||||
/// # Params
|
||||
/// - `on_task_finished`: a callback to be invoked when the task is finished. Note that it will not
|
||||
/// be invoked if the task is cancelled (by dropping the handle). This is because we want something
|
||||
/// meaningful to be done when the task is finished, e.g. deregister the handle from the map.
|
||||
/// While dropping the handle does not necessarily mean the task is finished.
|
||||
fn new<Fut>(
|
||||
table_engine: TableEngineRef,
|
||||
table_ident: TableIdent,
|
||||
region: RegionNumber,
|
||||
on_task_finished: impl FnOnce() -> Fut + Send + 'static,
|
||||
) -> Self
|
||||
where
|
||||
Fut: Future<Output = ()> + Send,
|
||||
{
|
||||
let (tx, rx) = mpsc::channel(1024);
|
||||
|
||||
let mut countdown_task = CountdownTask {
|
||||
table_engine,
|
||||
table_ident: table_ident.clone(),
|
||||
region,
|
||||
rx,
|
||||
};
|
||||
let handler = common_runtime::spawn_bg(async move {
|
||||
countdown_task.run().await;
|
||||
on_task_finished().await;
|
||||
});
|
||||
|
||||
Self {
|
||||
tx,
|
||||
handler,
|
||||
table_ident,
|
||||
region,
|
||||
}
|
||||
}
|
||||
|
||||
async fn start(&self, heartbeat_interval_millis: u64) {
|
||||
if let Err(e) = self
|
||||
.tx
|
||||
.send(CountdownCommand::Start(heartbeat_interval_millis))
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"Failed to start region alive keeper countdown: {e}. \
|
||||
Maybe the task is stopped due to region been closed."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async fn reset_deadline(&self, deadline: Instant) {
|
||||
if let Err(e) = self.tx.send(CountdownCommand::Reset(deadline)).await {
|
||||
warn!(
|
||||
"Failed to reset region alive keeper deadline: {e}. \
|
||||
Maybe the task is stopped due to region been closed."
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for CountdownTaskHandle {
|
||||
fn drop(&mut self) {
|
||||
debug!(
|
||||
"Aborting region alive countdown task for region {} in table {}",
|
||||
self.region, self.table_ident,
|
||||
);
|
||||
self.handler.abort();
|
||||
}
|
||||
}
|
||||
|
||||
struct CountdownTask {
|
||||
table_engine: TableEngineRef,
|
||||
table_ident: TableIdent,
|
||||
region: RegionNumber,
|
||||
rx: mpsc::Receiver<CountdownCommand>,
|
||||
}
|
||||
|
||||
impl CountdownTask {
|
||||
async fn run(&mut self) {
|
||||
// 30 years. See `Instant::far_future`.
|
||||
let far_future = Instant::now() + Duration::from_secs(86400 * 365 * 30);
|
||||
|
||||
// Make sure the alive countdown is not gonna happen before heartbeat task is started (the
|
||||
// "start countdown" command will be sent from heartbeat task).
|
||||
let countdown = tokio::time::sleep_until(far_future);
|
||||
tokio::pin!(countdown);
|
||||
|
||||
let region = &self.region;
|
||||
let table_ident = &self.table_ident;
|
||||
loop {
|
||||
tokio::select! {
|
||||
command = self.rx.recv() => {
|
||||
match command {
|
||||
Some(CountdownCommand::Start(heartbeat_interval_millis)) => {
|
||||
// Set first deadline in 4 heartbeats (roughly after 20 seconds from now if heartbeat
|
||||
// interval is set to default 5 seconds), to make Datanode and Metasrv more tolerable to
|
||||
// network or other jitters during startup.
|
||||
let first_deadline = Instant::now() + Duration::from_millis(heartbeat_interval_millis) * 4;
|
||||
countdown.set(tokio::time::sleep_until(first_deadline));
|
||||
},
|
||||
Some(CountdownCommand::Reset(deadline)) => {
|
||||
if countdown.deadline() < deadline {
|
||||
debug!(
|
||||
"Reset deadline of region {region} of table {table_ident} to approximately {} seconds later",
|
||||
(deadline - Instant::now()).as_secs_f32(),
|
||||
);
|
||||
countdown.set(tokio::time::sleep_until(deadline));
|
||||
}
|
||||
// Else the countdown could be either:
|
||||
// - not started yet;
|
||||
// - during startup protection;
|
||||
// - received a lagging heartbeat message.
|
||||
// All can be safely ignored.
|
||||
},
|
||||
None => {
|
||||
info!(
|
||||
"The handle of countdown task for region {region} of table {table_ident} \
|
||||
is dropped, RegionAliveKeeper out."
|
||||
);
|
||||
break;
|
||||
},
|
||||
Some(CountdownCommand::Deadline(tx)) => {
|
||||
let _ = tx.send(countdown.deadline());
|
||||
}
|
||||
}
|
||||
}
|
||||
() = &mut countdown => {
|
||||
let result = self.close_region().await;
|
||||
warn!(
|
||||
"Region {region} of table {table_ident} is closed, result: {result:?}. \
|
||||
RegionAliveKeeper out.",
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn close_region(&self) -> CloseTableResult {
|
||||
let ctx = EngineContext::default();
|
||||
let region = self.region;
|
||||
let table_ident = &self.table_ident;
|
||||
loop {
|
||||
let request = CloseTableRequest {
|
||||
catalog_name: table_ident.catalog.clone(),
|
||||
schema_name: table_ident.schema.clone(),
|
||||
table_name: table_ident.table.clone(),
|
||||
table_id: table_ident.table_id,
|
||||
region_numbers: vec![region],
|
||||
flush: true,
|
||||
};
|
||||
match self.table_engine.close_table(&ctx, request).await {
|
||||
Ok(result) => return result,
|
||||
// If region is failed to close, immediately retry. Maybe we should panic instead?
|
||||
Err(e) => error!(e;
|
||||
"Failed to close region {region} of table {table_ident}. \
|
||||
For the integrity of data, retry closing and retry without wait.",
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::{HeartbeatResponse, RegionLease};
|
||||
use common_meta::heartbeat::mailbox::HeartbeatMailbox;
|
||||
use datatypes::schema::RawSchema;
|
||||
use table::engine::manager::MemoryTableEngineManager;
|
||||
use table::engine::TableEngine;
|
||||
use table::requests::{CreateTableRequest, TableOptions};
|
||||
use table::test_util::EmptyTable;
|
||||
|
||||
use super::*;
|
||||
use crate::remote::mock::MockTableEngine;
|
||||
|
||||
async fn prepare_keepers() -> (TableIdent, RegionAliveKeepers) {
|
||||
let table_engine = Arc::new(MockTableEngine::default());
|
||||
let table_engine_manager = Arc::new(MemoryTableEngineManager::new(table_engine));
|
||||
let keepers = RegionAliveKeepers::new(table_engine_manager, 5000);
|
||||
|
||||
let catalog = "my_catalog";
|
||||
let schema = "my_schema";
|
||||
let table = "my_table";
|
||||
let table_ident = TableIdent {
|
||||
catalog: catalog.to_string(),
|
||||
schema: schema.to_string(),
|
||||
table: table.to_string(),
|
||||
table_id: 1,
|
||||
engine: "MockTableEngine".to_string(),
|
||||
};
|
||||
let table = Arc::new(EmptyTable::new(CreateTableRequest {
|
||||
id: 1,
|
||||
catalog_name: catalog.to_string(),
|
||||
schema_name: schema.to_string(),
|
||||
table_name: table.to_string(),
|
||||
desc: None,
|
||||
schema: RawSchema {
|
||||
column_schemas: vec![],
|
||||
timestamp_index: None,
|
||||
version: 0,
|
||||
},
|
||||
region_numbers: vec![1, 2, 3],
|
||||
primary_key_indices: vec![],
|
||||
create_if_not_exists: false,
|
||||
table_options: TableOptions::default(),
|
||||
engine: "MockTableEngine".to_string(),
|
||||
}));
|
||||
keepers
|
||||
.register_table(table_ident.clone(), table)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(keepers.keepers.lock().await.contains_key(&table_ident));
|
||||
|
||||
(table_ident, keepers)
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_handle_heartbeat_response() {
|
||||
let (table_ident, keepers) = prepare_keepers().await;
|
||||
|
||||
keepers.start().await;
|
||||
let startup_protection_until = Instant::now() + Duration::from_secs(21);
|
||||
|
||||
let duration_since_epoch = (Instant::now() - keepers.epoch).as_millis() as _;
|
||||
let lease_seconds = 100;
|
||||
let response = HeartbeatResponse {
|
||||
region_leases: vec![RegionLease {
|
||||
table_ident: Some(table_ident.clone().into()),
|
||||
regions: vec![1, 3], // Not extending region 2's lease time.
|
||||
duration_since_epoch,
|
||||
lease_seconds,
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
let keep_alive_until = keepers.epoch
|
||||
+ Duration::from_millis(duration_since_epoch)
|
||||
+ Duration::from_secs(lease_seconds);
|
||||
|
||||
let (tx, _) = mpsc::channel(8);
|
||||
let mailbox = Arc::new(HeartbeatMailbox::new(tx));
|
||||
let mut ctx = HeartbeatResponseHandlerContext::new(mailbox, response);
|
||||
|
||||
assert!(keepers.handle(&mut ctx).await.unwrap() == HandleControl::Continue);
|
||||
|
||||
// sleep to wait for background task spawned in `handle`
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
|
||||
async fn test(
|
||||
keeper: &Arc<RegionAliveKeeper>,
|
||||
region_number: RegionNumber,
|
||||
startup_protection_until: Instant,
|
||||
keep_alive_until: Instant,
|
||||
is_kept_live: bool,
|
||||
) {
|
||||
let deadline = keeper.deadline(region_number).await.unwrap();
|
||||
if is_kept_live {
|
||||
assert!(deadline > startup_protection_until && deadline == keep_alive_until);
|
||||
} else {
|
||||
assert!(deadline <= startup_protection_until);
|
||||
}
|
||||
}
|
||||
|
||||
let keeper = &keepers
|
||||
.keepers
|
||||
.lock()
|
||||
.await
|
||||
.get(&table_ident)
|
||||
.cloned()
|
||||
.unwrap();
|
||||
|
||||
// Test region 1 and 3 is kept lived. Their deadlines are updated to desired instant.
|
||||
test(keeper, 1, startup_protection_until, keep_alive_until, true).await;
|
||||
test(keeper, 3, startup_protection_until, keep_alive_until, true).await;
|
||||
|
||||
// Test region 2 is not kept lived. It's deadline is not updated: still during startup protection period.
|
||||
test(keeper, 2, startup_protection_until, keep_alive_until, false).await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_region_alive_keepers() {
|
||||
let (table_ident, keepers) = prepare_keepers().await;
|
||||
|
||||
keepers
|
||||
.register_region(&RegionIdent {
|
||||
cluster_id: 1,
|
||||
datanode_id: 1,
|
||||
table_ident: table_ident.clone(),
|
||||
region_number: 4,
|
||||
})
|
||||
.await;
|
||||
|
||||
keepers.start().await;
|
||||
for keeper in keepers.keepers.lock().await.values() {
|
||||
let regions = {
|
||||
let handles = keeper.countdown_task_handles.lock().await;
|
||||
handles.keys().copied().collect::<Vec<_>>()
|
||||
};
|
||||
for region in regions {
|
||||
// assert countdown tasks are started
|
||||
let deadline = keeper.deadline(region).await.unwrap();
|
||||
assert!(deadline <= Instant::now() + Duration::from_secs(20));
|
||||
}
|
||||
}
|
||||
|
||||
keepers
|
||||
.deregister_region(&RegionIdent {
|
||||
cluster_id: 1,
|
||||
datanode_id: 1,
|
||||
table_ident: table_ident.clone(),
|
||||
region_number: 1,
|
||||
})
|
||||
.await;
|
||||
let mut regions = keepers
|
||||
.find_keeper(&table_ident)
|
||||
.await
|
||||
.unwrap()
|
||||
.countdown_task_handles
|
||||
.lock()
|
||||
.await
|
||||
.keys()
|
||||
.copied()
|
||||
.collect::<Vec<_>>();
|
||||
regions.sort();
|
||||
assert_eq!(regions, vec![2, 3, 4]);
|
||||
|
||||
let keeper = keepers.deregister_table(&table_ident).await.unwrap();
|
||||
assert!(Arc::try_unwrap(keeper).is_ok(), "keeper is not dropped");
|
||||
assert!(keepers.keepers.lock().await.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_region_alive_keeper() {
|
||||
let table_engine = Arc::new(MockTableEngine::default());
|
||||
let table_ident = TableIdent {
|
||||
catalog: "my_catalog".to_string(),
|
||||
schema: "my_schema".to_string(),
|
||||
table: "my_table".to_string(),
|
||||
table_id: 1024,
|
||||
engine: "mito".to_string(),
|
||||
};
|
||||
let keeper = RegionAliveKeeper::new(table_engine, table_ident, 1000);
|
||||
|
||||
let region = 1;
|
||||
assert!(keeper.find_handle(®ion).await.is_none());
|
||||
keeper.register_region(region).await;
|
||||
let _ = keeper.find_handle(®ion).await.unwrap();
|
||||
|
||||
let ten_seconds_later = || Instant::now() + Duration::from_secs(10);
|
||||
|
||||
keeper.keep_lived(vec![1, 2, 3], ten_seconds_later()).await;
|
||||
assert!(keeper.find_handle(&2).await.is_none());
|
||||
assert!(keeper.find_handle(&3).await.is_none());
|
||||
|
||||
let far_future = Instant::now() + Duration::from_secs(86400 * 365 * 29);
|
||||
// assert if keeper is not started, keep_lived is of no use
|
||||
assert!(keeper.deadline(region).await.unwrap() > far_future);
|
||||
|
||||
keeper.start().await;
|
||||
keeper.keep_lived(vec![1, 2, 3], ten_seconds_later()).await;
|
||||
// assert keep_lived works if keeper is started
|
||||
assert!(keeper.deadline(region).await.unwrap() <= ten_seconds_later());
|
||||
|
||||
let handle = keeper.deregister_region(region).await.unwrap();
|
||||
assert!(Arc::try_unwrap(handle).is_ok(), "handle is not dropped");
|
||||
assert!(keeper.find_handle(®ion).await.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_countdown_task_handle() {
|
||||
let table_engine = Arc::new(MockTableEngine::default());
|
||||
let table_ident = TableIdent {
|
||||
catalog: "my_catalog".to_string(),
|
||||
schema: "my_schema".to_string(),
|
||||
table: "my_table".to_string(),
|
||||
table_id: 1024,
|
||||
engine: "mito".to_string(),
|
||||
};
|
||||
let finished = Arc::new(AtomicBool::new(false));
|
||||
let finished_clone = finished.clone();
|
||||
let handle = CountdownTaskHandle::new(
|
||||
table_engine.clone(),
|
||||
table_ident.clone(),
|
||||
1,
|
||||
|| async move { finished_clone.store(true, Ordering::Relaxed) },
|
||||
);
|
||||
let tx = handle.tx.clone();
|
||||
|
||||
// assert countdown task is running
|
||||
tx.send(CountdownCommand::Start(5000)).await.unwrap();
|
||||
assert!(!finished.load(Ordering::Relaxed));
|
||||
|
||||
drop(handle);
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
|
||||
// assert countdown task is stopped
|
||||
assert!(tx
|
||||
.try_send(CountdownCommand::Reset(
|
||||
Instant::now() + Duration::from_secs(10)
|
||||
))
|
||||
.is_err());
|
||||
// assert `on_task_finished` is not called (because the task is aborted by the handle's drop)
|
||||
assert!(!finished.load(Ordering::Relaxed));
|
||||
|
||||
let finished = Arc::new(AtomicBool::new(false));
|
||||
let finished_clone = finished.clone();
|
||||
let handle = CountdownTaskHandle::new(table_engine, table_ident, 1, || async move {
|
||||
finished_clone.store(true, Ordering::Relaxed)
|
||||
});
|
||||
handle.tx.send(CountdownCommand::Start(100)).await.unwrap();
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
// assert `on_task_finished` is called when task is finished normally
|
||||
assert!(finished.load(Ordering::Relaxed));
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_countdown_task_run() {
|
||||
let ctx = &EngineContext::default();
|
||||
let catalog = "my_catalog";
|
||||
let schema = "my_schema";
|
||||
let table = "my_table";
|
||||
let table_id = 1;
|
||||
let request = CreateTableRequest {
|
||||
id: table_id,
|
||||
catalog_name: catalog.to_string(),
|
||||
schema_name: schema.to_string(),
|
||||
table_name: table.to_string(),
|
||||
desc: None,
|
||||
schema: RawSchema {
|
||||
column_schemas: vec![],
|
||||
timestamp_index: None,
|
||||
version: 0,
|
||||
},
|
||||
region_numbers: vec![],
|
||||
primary_key_indices: vec![],
|
||||
create_if_not_exists: false,
|
||||
table_options: TableOptions::default(),
|
||||
engine: "mito".to_string(),
|
||||
};
|
||||
|
||||
let table_engine = Arc::new(MockTableEngine::default());
|
||||
let _ = table_engine.create_table(ctx, request).await.unwrap();
|
||||
|
||||
let table_ident = TableIdent {
|
||||
catalog: catalog.to_string(),
|
||||
schema: schema.to_string(),
|
||||
table: table.to_string(),
|
||||
table_id,
|
||||
engine: "mito".to_string(),
|
||||
};
|
||||
let (tx, rx) = mpsc::channel(10);
|
||||
let mut task = CountdownTask {
|
||||
table_engine: table_engine.clone(),
|
||||
table_ident,
|
||||
region: 1,
|
||||
rx,
|
||||
};
|
||||
let _handle = common_runtime::spawn_bg(async move {
|
||||
task.run().await;
|
||||
});
|
||||
|
||||
async fn deadline(tx: &mpsc::Sender<CountdownCommand>) -> Instant {
|
||||
let (s, r) = oneshot::channel();
|
||||
tx.send(CountdownCommand::Deadline(s)).await.unwrap();
|
||||
r.await.unwrap()
|
||||
}
|
||||
|
||||
// if countdown task is not started, its deadline is set to far future
|
||||
assert!(deadline(&tx).await > Instant::now() + Duration::from_secs(86400 * 365 * 29));
|
||||
|
||||
// start countdown in 250ms * 4 = 1s
|
||||
tx.send(CountdownCommand::Start(250)).await.unwrap();
|
||||
// assert deadline is correctly set
|
||||
assert!(deadline(&tx).await <= Instant::now() + Duration::from_secs(1));
|
||||
|
||||
// reset countdown in 1.5s
|
||||
tx.send(CountdownCommand::Reset(
|
||||
Instant::now() + Duration::from_millis(1500),
|
||||
))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// assert the table is closed after deadline is reached
|
||||
assert!(table_engine.table_exists(ctx, table_id));
|
||||
// spare 500ms for the task to close the table
|
||||
tokio::time::sleep(Duration::from_millis(2000)).await;
|
||||
assert!(!table_engine.table_exists(ctx, table_id));
|
||||
}
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{NotSupportedSnafu, Result};
|
||||
|
||||
/// Represents a schema, comprising a number of named tables.
|
||||
#[async_trait]
|
||||
pub trait SchemaProvider: Sync + Send {
|
||||
/// Returns the schema provider as [`Any`](std::any::Any)
|
||||
/// so that it can be downcast to a specific implementation.
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
/// Retrieves the list of available table names in this schema.
|
||||
async fn table_names(&self) -> Result<Vec<String>>;
|
||||
|
||||
/// Retrieves a specific table from the schema by name, provided it exists.
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>>;
|
||||
|
||||
/// If supported by the implementation, adds a new table to this schema.
|
||||
/// If a table of the same name existed before, it returns "Table already exists" error.
|
||||
async fn register_table(&self, name: String, _table: TableRef) -> Result<Option<TableRef>> {
|
||||
NotSupportedSnafu {
|
||||
op: format!("register_table({name}, <table>)"),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
/// If supported by the implementation, renames an existing table from this schema and returns it.
|
||||
/// If no table of that name exists, returns "Table not found" error.
|
||||
async fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef> {
|
||||
NotSupportedSnafu {
|
||||
op: format!("rename_table({name}, {new_name})"),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
/// If supported by the implementation, removes an existing table from this schema and returns it.
|
||||
/// If no table of that name exists, returns Ok(None).
|
||||
async fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
NotSupportedSnafu {
|
||||
op: format!("deregister_table({name})"),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
/// If supported by the implementation, checks the table exist in the schema provider or not.
|
||||
/// If no matched table in the schema provider, return false.
|
||||
/// Otherwise, return true.
|
||||
async fn table_exist(&self, name: &str) -> Result<bool>;
|
||||
}
|
||||
|
||||
pub type SchemaProviderRef = Arc<dyn SchemaProvider>;
|
||||
@@ -20,8 +20,6 @@ use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MITO_ENGINE,
|
||||
SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_ID, SYSTEM_CATALOG_TABLE_NAME,
|
||||
};
|
||||
use common_query::logical_plan::Expr;
|
||||
use common_query::physical_plan::{PhysicalPlanRef, SessionContext};
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use common_telemetry::debug;
|
||||
use common_time::util;
|
||||
@@ -60,15 +58,6 @@ impl Table for SystemCatalogTable {
|
||||
self.0.schema()
|
||||
}
|
||||
|
||||
async fn scan(
|
||||
&self,
|
||||
projection: Option<&Vec<usize>>,
|
||||
filters: &[Expr],
|
||||
limit: Option<usize>,
|
||||
) -> table::Result<PhysicalPlanRef> {
|
||||
self.0.scan(projection, filters, limit).await
|
||||
}
|
||||
|
||||
async fn scan_to_stream(&self, request: ScanRequest) -> TableResult<SendableRecordBatchStream> {
|
||||
self.0.scan_to_stream(request).await
|
||||
}
|
||||
@@ -136,14 +125,17 @@ impl SystemCatalogTable {
|
||||
/// Create a stream of all entries inside system catalog table
|
||||
pub async fn records(&self) -> Result<SendableRecordBatchStream> {
|
||||
let full_projection = None;
|
||||
let ctx = SessionContext::new();
|
||||
let scan = self
|
||||
.scan(full_projection, &[], None)
|
||||
let scan_req = ScanRequest {
|
||||
sequence: None,
|
||||
projection: full_projection,
|
||||
filters: vec![],
|
||||
output_ordering: None,
|
||||
limit: None,
|
||||
};
|
||||
let stream = self
|
||||
.scan_to_stream(scan_req)
|
||||
.await
|
||||
.context(error::SystemCatalogTableScanSnafu)?;
|
||||
let stream = scan
|
||||
.execute(0, ctx.task_ctx())
|
||||
.context(error::SystemCatalogTableScanExecSnafu)?;
|
||||
Ok(stream)
|
||||
}
|
||||
}
|
||||
@@ -211,38 +203,50 @@ pub fn build_table_insert_request(
|
||||
build_insert_request(
|
||||
EntryType::Table,
|
||||
entry_key.as_bytes(),
|
||||
serde_json::to_string(&TableEntryValue { table_name, engine })
|
||||
.unwrap()
|
||||
.as_bytes(),
|
||||
serde_json::to_string(&TableEntryValue {
|
||||
table_name,
|
||||
engine,
|
||||
is_deleted: false,
|
||||
})
|
||||
.unwrap()
|
||||
.as_bytes(),
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn build_table_deletion_request(
|
||||
request: &DeregisterTableRequest,
|
||||
table_id: TableId,
|
||||
) -> DeleteRequest {
|
||||
let table_key = format_table_entry_key(&request.catalog, &request.schema, table_id);
|
||||
DeleteRequest {
|
||||
key_column_values: build_primary_key_columns(EntryType::Table, table_key.as_bytes()),
|
||||
}
|
||||
) -> InsertRequest {
|
||||
let entry_key = format_table_entry_key(&request.catalog, &request.schema, table_id);
|
||||
build_insert_request(
|
||||
EntryType::Table,
|
||||
entry_key.as_bytes(),
|
||||
serde_json::to_string(&TableEntryValue {
|
||||
table_name: "".to_string(),
|
||||
engine: "".to_string(),
|
||||
is_deleted: true,
|
||||
})
|
||||
.unwrap()
|
||||
.as_bytes(),
|
||||
)
|
||||
}
|
||||
|
||||
fn build_primary_key_columns(entry_type: EntryType, key: &[u8]) -> HashMap<String, VectorRef> {
|
||||
let mut m = HashMap::with_capacity(3);
|
||||
m.insert(
|
||||
"entry_type".to_string(),
|
||||
Arc::new(UInt8Vector::from_slice([entry_type as u8])) as _,
|
||||
);
|
||||
m.insert(
|
||||
"key".to_string(),
|
||||
Arc::new(BinaryVector::from_slice(&[key])) as _,
|
||||
);
|
||||
// Timestamp in key part is intentionally left to 0
|
||||
m.insert(
|
||||
"timestamp".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice([0])) as _,
|
||||
);
|
||||
m
|
||||
HashMap::from([
|
||||
(
|
||||
"entry_type".to_string(),
|
||||
Arc::new(UInt8Vector::from_slice([entry_type as u8])) as VectorRef,
|
||||
),
|
||||
(
|
||||
"key".to_string(),
|
||||
Arc::new(BinaryVector::from_slice(&[key])) as VectorRef,
|
||||
),
|
||||
(
|
||||
"timestamp".to_string(),
|
||||
// Timestamp in key part is intentionally left to 0
|
||||
Arc::new(TimestampMillisecondVector::from_slice([0])) as VectorRef,
|
||||
),
|
||||
])
|
||||
}
|
||||
|
||||
pub fn build_schema_insert_request(catalog_name: String, schema_name: String) -> InsertRequest {
|
||||
@@ -262,18 +266,18 @@ pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) ->
|
||||
let mut columns_values = HashMap::with_capacity(6);
|
||||
columns_values.extend(primary_key_columns.into_iter());
|
||||
|
||||
columns_values.insert(
|
||||
let _ = columns_values.insert(
|
||||
"value".to_string(),
|
||||
Arc::new(BinaryVector::from_slice(&[value])) as _,
|
||||
);
|
||||
|
||||
let now = util::current_time_millis();
|
||||
columns_values.insert(
|
||||
let _ = columns_values.insert(
|
||||
"gmt_created".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
|
||||
);
|
||||
|
||||
columns_values.insert(
|
||||
let _ = columns_values.insert(
|
||||
"gmt_modified".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
|
||||
);
|
||||
@@ -343,6 +347,7 @@ pub fn decode_system_catalog(
|
||||
table_name: table_meta.table_name,
|
||||
table_id,
|
||||
engine: table_meta.engine,
|
||||
is_deleted: table_meta.is_deleted,
|
||||
}))
|
||||
}
|
||||
}
|
||||
@@ -399,6 +404,7 @@ pub struct TableEntry {
|
||||
pub table_name: String,
|
||||
pub table_id: TableId,
|
||||
pub engine: String,
|
||||
pub is_deleted: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
@@ -407,12 +413,19 @@ pub struct TableEntryValue {
|
||||
|
||||
#[serde(default = "mito_engine")]
|
||||
pub engine: String,
|
||||
|
||||
#[serde(default = "not_deleted")]
|
||||
pub is_deleted: bool,
|
||||
}
|
||||
|
||||
fn mito_engine() -> String {
|
||||
MITO_ENGINE.to_string()
|
||||
}
|
||||
|
||||
fn not_deleted() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_recordbatch::RecordBatches;
|
||||
@@ -482,14 +495,13 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
pub fn test_decode_mismatch() {
|
||||
decode_system_catalog(
|
||||
assert!(decode_system_catalog(
|
||||
Some(EntryType::Table as u8),
|
||||
Some("some_catalog.some_schema.42".as_bytes()),
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -504,7 +516,7 @@ mod tests {
|
||||
let dir = create_temp_dir("system-table-test");
|
||||
let store_dir = dir.path().to_string_lossy();
|
||||
let mut builder = object_store::services::Fs::default();
|
||||
builder.root(&store_dir);
|
||||
let _ = builder.root(&store_dir);
|
||||
let object_store = ObjectStore::new(builder).unwrap().finish();
|
||||
let noop_compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
|
||||
let table_engine = Arc::new(MitoEngine::new(
|
||||
@@ -572,6 +584,7 @@ mod tests {
|
||||
table_name: "my_table".to_string(),
|
||||
table_id: 1,
|
||||
engine: MITO_ENGINE.to_string(),
|
||||
is_deleted: false,
|
||||
});
|
||||
assert_eq!(entry, expected);
|
||||
|
||||
@@ -583,11 +596,11 @@ mod tests {
|
||||
},
|
||||
1,
|
||||
);
|
||||
let result = catalog_table.delete(table_deletion).await.unwrap();
|
||||
let result = catalog_table.insert(table_deletion).await.unwrap();
|
||||
assert_eq!(result, 1);
|
||||
|
||||
let records = catalog_table.records().await.unwrap();
|
||||
let batches = RecordBatches::try_collect(records).await.unwrap().take();
|
||||
assert_eq!(batches.len(), 0);
|
||||
assert_eq!(batches.len(), 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,10 +24,7 @@ use session::context::QueryContext;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
|
||||
use crate::error::{
|
||||
CatalogNotFoundSnafu, QueryAccessDeniedSnafu, Result, SchemaNotFoundSnafu, TableNotExistSnafu,
|
||||
};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::error::{QueryAccessDeniedSnafu, Result, TableNotExistSnafu};
|
||||
use crate::CatalogManagerRef;
|
||||
|
||||
pub struct DfTableSourceProvider {
|
||||
@@ -104,41 +101,18 @@ impl DfTableSourceProvider {
|
||||
let schema_name = table_ref.schema.as_ref();
|
||||
let table_name = table_ref.table.as_ref();
|
||||
|
||||
let schema = if schema_name != INFORMATION_SCHEMA_NAME {
|
||||
let catalog = self
|
||||
.catalog_manager
|
||||
.catalog(catalog_name)
|
||||
.await?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
catalog
|
||||
.schema(schema_name)
|
||||
.await?
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?
|
||||
} else {
|
||||
let catalog_provider = self
|
||||
.catalog_manager
|
||||
.catalog(catalog_name)
|
||||
.await?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
Arc::new(InformationSchemaProvider::new(
|
||||
catalog_name.to_string(),
|
||||
catalog_provider,
|
||||
))
|
||||
};
|
||||
let table = schema
|
||||
.table(table_name)
|
||||
let table = self
|
||||
.catalog_manager
|
||||
.table(catalog_name, schema_name, table_name)
|
||||
.await?
|
||||
.with_context(|| TableNotExistSnafu {
|
||||
table: format_full_table_name(catalog_name, schema_name, table_name),
|
||||
})?;
|
||||
|
||||
let table = DfTableProviderAdapter::new(table);
|
||||
let table = provider_as_source(Arc::new(table));
|
||||
self.resolved_tables.insert(resolved_name, table.clone());
|
||||
Ok(table)
|
||||
let provider = DfTableProviderAdapter::new(table);
|
||||
let source = provider_as_source(Arc::new(provider));
|
||||
let _ = self.resolved_tables.insert(resolved_name, source.clone());
|
||||
Ok(source)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,14 +136,14 @@ mod tests {
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_ok());
|
||||
let _ = result.unwrap();
|
||||
|
||||
let table_ref = TableReference::Partial {
|
||||
schema: Cow::Borrowed("public"),
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_ok());
|
||||
let _ = result.unwrap();
|
||||
|
||||
let table_ref = TableReference::Partial {
|
||||
schema: Cow::Borrowed("wrong_schema"),
|
||||
@@ -184,7 +158,7 @@ mod tests {
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_ok());
|
||||
let _ = result.unwrap();
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("wrong_catalog"),
|
||||
@@ -198,14 +172,14 @@ mod tests {
|
||||
schema: Cow::Borrowed("information_schema"),
|
||||
table: Cow::Borrowed("columns"),
|
||||
};
|
||||
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
|
||||
let _ = table_provider.resolve_table_ref(table_ref).unwrap();
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("greptime"),
|
||||
schema: Cow::Borrowed("information_schema"),
|
||||
table: Cow::Borrowed("columns"),
|
||||
};
|
||||
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
|
||||
let _ = table_provider.resolve_table_ref(table_ref).unwrap();
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("dummy"),
|
||||
|
||||
@@ -14,49 +14,24 @@
|
||||
|
||||
// The `tables` table in system catalog keeps a record of all tables created by user.
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::consts::{INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_TABLE_NAME};
|
||||
use common_telemetry::logging;
|
||||
use snafu::ResultExt;
|
||||
use table::metadata::TableId;
|
||||
use table::{Table, TableRef};
|
||||
use table::Table;
|
||||
|
||||
use crate::error::{self, Error, InsertCatalogRecordSnafu, Result as CatalogResult};
|
||||
use crate::error::{self, InsertCatalogRecordSnafu, Result as CatalogResult};
|
||||
use crate::system::{
|
||||
build_schema_insert_request, build_table_deletion_request, build_table_insert_request,
|
||||
SystemCatalogTable,
|
||||
};
|
||||
use crate::{CatalogProvider, DeregisterTableRequest, SchemaProvider, SchemaProviderRef};
|
||||
use crate::DeregisterTableRequest;
|
||||
|
||||
pub struct InformationSchema {
|
||||
pub system: Arc<SystemCatalogTable>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for InformationSchema {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
async fn table_names(&self) -> Result<Vec<String>, Error> {
|
||||
Ok(vec![SYSTEM_CATALOG_TABLE_NAME.to_string()])
|
||||
}
|
||||
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>, Error> {
|
||||
if name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME) {
|
||||
Ok(Some(self.system.clone()))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
async fn table_exist(&self, name: &str) -> Result<bool, Error> {
|
||||
Ok(name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SystemCatalog {
|
||||
pub information_schema: Arc<InformationSchema>,
|
||||
}
|
||||
@@ -91,12 +66,21 @@ impl SystemCatalog {
|
||||
&self,
|
||||
request: &DeregisterTableRequest,
|
||||
table_id: TableId,
|
||||
) -> CatalogResult<bool> {
|
||||
) -> CatalogResult<()> {
|
||||
self.information_schema
|
||||
.system
|
||||
.delete(build_table_deletion_request(request, table_id))
|
||||
.insert(build_table_deletion_request(request, table_id))
|
||||
.await
|
||||
.map(|x| x == 1)
|
||||
.map(|x| {
|
||||
if x != 1 {
|
||||
let table = common_catalog::format_full_table_name(
|
||||
&request.catalog,
|
||||
&request.schema,
|
||||
&request.table_name
|
||||
);
|
||||
logging::warn!("Failed to delete table record from information_schema, unexpected returned result: {x}, table: {table}");
|
||||
}
|
||||
})
|
||||
.with_context(|_| error::DeregisterTableSnafu {
|
||||
request: request.clone(),
|
||||
})
|
||||
@@ -115,30 +99,3 @@ impl SystemCatalog {
|
||||
.context(InsertCatalogRecordSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CatalogProvider for SystemCatalog {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
async fn schema_names(&self) -> Result<Vec<String>, Error> {
|
||||
Ok(vec![INFORMATION_SCHEMA_NAME.to_string()])
|
||||
}
|
||||
|
||||
async fn register_schema(
|
||||
&self,
|
||||
_name: String,
|
||||
_schema: SchemaProviderRef,
|
||||
) -> Result<Option<SchemaProviderRef>, Error> {
|
||||
panic!("System catalog does not support registering schema!")
|
||||
}
|
||||
|
||||
async fn schema(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>, Error> {
|
||||
if name.eq_ignore_ascii_case(INFORMATION_SCHEMA_NAME) {
|
||||
Ok(Some(self.information_schema.clone()))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,55 +19,70 @@ mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use catalog::error::Error;
|
||||
use catalog::helper::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
|
||||
use catalog::remote::mock::{MockKvBackend, MockTableEngine};
|
||||
use catalog::remote::{
|
||||
CachedMetaKvBackend, KvBackend, KvBackendRef, RemoteCatalogManager, RemoteCatalogProvider,
|
||||
RemoteSchemaProvider,
|
||||
};
|
||||
use catalog::{CatalogManager, RegisterTableRequest};
|
||||
use catalog::remote::mock::MockTableEngine;
|
||||
use catalog::remote::region_alive_keeper::RegionAliveKeepers;
|
||||
use catalog::remote::{CachedMetaKvBackend, RemoteCatalogManager};
|
||||
use catalog::{CatalogManager, RegisterSchemaRequest, RegisterTableRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
|
||||
use common_meta::ident::TableIdent;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use common_meta::kv_backend::KvBackend;
|
||||
use common_meta::rpc::store::{CompareAndPutRequest, PutRequest, RangeRequest};
|
||||
use datatypes::schema::RawSchema;
|
||||
use futures_util::StreamExt;
|
||||
use table::engine::manager::{MemoryTableEngineManager, TableEngineManagerRef};
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::requests::CreateTableRequest;
|
||||
use table::test_util::EmptyTable;
|
||||
use tokio::time::Instant;
|
||||
|
||||
struct TestingComponents {
|
||||
catalog_manager: Arc<RemoteCatalogManager>,
|
||||
table_engine_manager: TableEngineManagerRef,
|
||||
region_alive_keepers: Arc<RegionAliveKeepers>,
|
||||
}
|
||||
|
||||
impl TestingComponents {
|
||||
fn table_engine(&self) -> TableEngineRef {
|
||||
self.table_engine_manager.engine(MITO_ENGINE).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_backend() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let backend = MockKvBackend::default();
|
||||
let backend = MemoryKvBackend::<Error>::default();
|
||||
|
||||
let default_catalog_key = CatalogKey {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
}
|
||||
.to_string();
|
||||
|
||||
backend
|
||||
.set(
|
||||
default_catalog_key.as_bytes(),
|
||||
&CatalogValue {}.as_bytes().unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let req = PutRequest::new()
|
||||
.with_key(default_catalog_key.as_bytes())
|
||||
.with_value(CatalogValue.as_bytes().unwrap());
|
||||
backend.put(req).await.unwrap();
|
||||
|
||||
let schema_key = SchemaKey {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
}
|
||||
.to_string();
|
||||
backend
|
||||
.set(schema_key.as_bytes(), &SchemaValue {}.as_bytes().unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
let req = PutRequest::new()
|
||||
.with_key(schema_key.as_bytes())
|
||||
.with_value(SchemaValue.as_bytes().unwrap());
|
||||
backend.put(req).await.unwrap();
|
||||
|
||||
let mut iter = backend.range("__c-".as_bytes());
|
||||
let mut res = HashSet::new();
|
||||
while let Some(r) = iter.next().await {
|
||||
let kv = r.unwrap();
|
||||
res.insert(String::from_utf8_lossy(&kv.0).to_string());
|
||||
}
|
||||
let req = RangeRequest::new().with_prefix(b"__c-".to_vec());
|
||||
let res = backend
|
||||
.range(req)
|
||||
.await
|
||||
.unwrap()
|
||||
.kvs
|
||||
.into_iter()
|
||||
.map(|kv| String::from_utf8_lossy(kv.key()).to_string());
|
||||
assert_eq!(
|
||||
vec!["__c-greptime".to_string()],
|
||||
res.into_iter().collect::<Vec<_>>()
|
||||
@@ -76,98 +91,100 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cached_backend() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let backend = CachedMetaKvBackend::wrap(Arc::new(MockKvBackend::default()));
|
||||
let backend = CachedMetaKvBackend::wrap(Arc::new(MemoryKvBackend::default()));
|
||||
|
||||
let default_catalog_key = CatalogKey {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
}
|
||||
.to_string();
|
||||
|
||||
backend
|
||||
.set(
|
||||
default_catalog_key.as_bytes(),
|
||||
&CatalogValue {}.as_bytes().unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let req = PutRequest::new()
|
||||
.with_key(default_catalog_key.as_bytes())
|
||||
.with_value(CatalogValue.as_bytes().unwrap());
|
||||
backend.put(req).await.unwrap();
|
||||
|
||||
let ret = backend.get(b"__c-greptime").await.unwrap();
|
||||
assert!(ret.is_some());
|
||||
let _ = ret.unwrap();
|
||||
|
||||
let _ = backend
|
||||
.compare_and_set(
|
||||
b"__c-greptime",
|
||||
&CatalogValue {}.as_bytes().unwrap(),
|
||||
b"123",
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let req = CompareAndPutRequest::new()
|
||||
.with_key(b"__c-greptime".to_vec())
|
||||
.with_expect(CatalogValue.as_bytes().unwrap())
|
||||
.with_value(b"123".to_vec());
|
||||
let _ = backend.compare_and_put(req).await.unwrap();
|
||||
|
||||
let ret = backend.get(b"__c-greptime").await.unwrap();
|
||||
assert!(ret.is_some());
|
||||
assert_eq!(&b"123"[..], &(ret.as_ref().unwrap().1));
|
||||
assert_eq!(b"123", ret.as_ref().unwrap().value.as_slice());
|
||||
|
||||
let _ = backend.set(b"__c-greptime", b"1234").await;
|
||||
let req = PutRequest::new()
|
||||
.with_key(b"__c-greptime".to_vec())
|
||||
.with_value(b"1234".to_vec());
|
||||
let _ = backend.put(req).await;
|
||||
|
||||
let ret = backend.get(b"__c-greptime").await.unwrap();
|
||||
assert!(ret.is_some());
|
||||
assert_eq!(&b"1234"[..], &(ret.as_ref().unwrap().1));
|
||||
assert_eq!(b"1234", ret.unwrap().value.as_slice());
|
||||
|
||||
backend.delete(b"__c-greptime").await.unwrap();
|
||||
backend.delete(b"__c-greptime", false).await.unwrap();
|
||||
|
||||
let ret = backend.get(b"__c-greptime").await.unwrap();
|
||||
assert!(ret.is_none());
|
||||
}
|
||||
|
||||
async fn prepare_components(
|
||||
node_id: u64,
|
||||
) -> (
|
||||
KvBackendRef,
|
||||
TableEngineRef,
|
||||
Arc<RemoteCatalogManager>,
|
||||
TableEngineManagerRef,
|
||||
) {
|
||||
let cached_backend = Arc::new(CachedMetaKvBackend::wrap(
|
||||
Arc::new(MockKvBackend::default()),
|
||||
));
|
||||
async fn prepare_components(node_id: u64) -> TestingComponents {
|
||||
let backend = Arc::new(MemoryKvBackend::default());
|
||||
|
||||
let req = PutRequest::new()
|
||||
.with_key(b"__c-greptime".to_vec())
|
||||
.with_value(b"".to_vec());
|
||||
backend.put(req).await.unwrap();
|
||||
|
||||
let req = PutRequest::new()
|
||||
.with_key(b"__s-greptime-public".to_vec())
|
||||
.with_value(b"".to_vec());
|
||||
backend.put(req).await.unwrap();
|
||||
|
||||
let cached_backend = Arc::new(CachedMetaKvBackend::wrap(backend));
|
||||
|
||||
let table_engine = Arc::new(MockTableEngine::default());
|
||||
let engine_manager = Arc::new(MemoryTableEngineManager::alias(
|
||||
MITO_ENGINE.to_string(),
|
||||
table_engine.clone(),
|
||||
table_engine,
|
||||
));
|
||||
|
||||
let catalog_manager =
|
||||
RemoteCatalogManager::new(engine_manager.clone(), node_id, cached_backend.clone());
|
||||
let region_alive_keepers = Arc::new(RegionAliveKeepers::new(engine_manager.clone(), 5000));
|
||||
|
||||
let catalog_manager = RemoteCatalogManager::new(
|
||||
engine_manager.clone(),
|
||||
node_id,
|
||||
cached_backend.clone(),
|
||||
region_alive_keepers.clone(),
|
||||
Arc::new(TableMetadataManager::new(cached_backend)),
|
||||
);
|
||||
catalog_manager.start().await.unwrap();
|
||||
|
||||
(
|
||||
cached_backend,
|
||||
table_engine,
|
||||
Arc::new(catalog_manager),
|
||||
engine_manager as Arc<_>,
|
||||
)
|
||||
TestingComponents {
|
||||
catalog_manager: Arc::new(catalog_manager),
|
||||
table_engine_manager: engine_manager,
|
||||
region_alive_keepers,
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_remote_catalog_default() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_id = 42;
|
||||
let (_, _, catalog_manager, _) = prepare_components(node_id).await;
|
||||
let TestingComponents {
|
||||
catalog_manager, ..
|
||||
} = prepare_components(node_id).await;
|
||||
assert_eq!(
|
||||
vec![DEFAULT_CATALOG_NAME.to_string()],
|
||||
catalog_manager.catalog_names().await.unwrap()
|
||||
);
|
||||
|
||||
let default_catalog = catalog_manager
|
||||
.catalog(DEFAULT_CATALOG_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
vec![DEFAULT_SCHEMA_NAME.to_string()],
|
||||
default_catalog.schema_names().await.unwrap()
|
||||
catalog_manager
|
||||
.schema_names(DEFAULT_CATALOG_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
@@ -175,14 +192,16 @@ mod tests {
|
||||
async fn test_remote_catalog_register_nonexistent() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_id = 42;
|
||||
let (_, table_engine, catalog_manager, _) = prepare_components(node_id).await;
|
||||
let components = prepare_components(node_id).await;
|
||||
|
||||
// register a new table with an nonexistent catalog
|
||||
let catalog_name = "nonexistent_catalog".to_string();
|
||||
let schema_name = "nonexistent_schema".to_string();
|
||||
let table_name = "fail_table".to_string();
|
||||
// this schema has no effect
|
||||
let table_schema = RawSchema::new(vec![]);
|
||||
let table = table_engine
|
||||
let table = components
|
||||
.table_engine()
|
||||
.create_table(
|
||||
&EngineContext {},
|
||||
CreateTableRequest {
|
||||
@@ -208,7 +227,7 @@ mod tests {
|
||||
table_id: 1,
|
||||
table,
|
||||
};
|
||||
let res = catalog_manager.register_table(reg_req).await;
|
||||
let res = components.catalog_manager.register_table(reg_req).await;
|
||||
|
||||
// because nonexistent_catalog does not exist yet.
|
||||
assert_matches!(
|
||||
@@ -220,23 +239,16 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_register_table() {
|
||||
let node_id = 42;
|
||||
let (_, table_engine, catalog_manager, _) = prepare_components(node_id).await;
|
||||
let default_catalog = catalog_manager
|
||||
.catalog(DEFAULT_CATALOG_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let components = prepare_components(node_id).await;
|
||||
assert_eq!(
|
||||
vec![DEFAULT_SCHEMA_NAME.to_string()],
|
||||
default_catalog.schema_names().await.unwrap()
|
||||
components
|
||||
.catalog_manager
|
||||
.schema_names(DEFAULT_CATALOG_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
let default_schema = default_catalog
|
||||
.schema(DEFAULT_SCHEMA_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
// register a new table with an nonexistent catalog
|
||||
let catalog_name = DEFAULT_CATALOG_NAME.to_string();
|
||||
let schema_name = DEFAULT_SCHEMA_NAME.to_string();
|
||||
@@ -244,7 +256,8 @@ mod tests {
|
||||
let table_id = 1;
|
||||
// this schema has no effect
|
||||
let table_schema = RawSchema::new(vec![]);
|
||||
let table = table_engine
|
||||
let table = components
|
||||
.table_engine()
|
||||
.create_table(
|
||||
&EngineContext {},
|
||||
CreateTableRequest {
|
||||
@@ -270,40 +283,51 @@ mod tests {
|
||||
table_id,
|
||||
table,
|
||||
};
|
||||
assert!(catalog_manager.register_table(reg_req).await.unwrap());
|
||||
assert!(components
|
||||
.catalog_manager
|
||||
.register_table(reg_req)
|
||||
.await
|
||||
.unwrap());
|
||||
assert_eq!(
|
||||
vec![table_name],
|
||||
default_schema.table_names().await.unwrap()
|
||||
components
|
||||
.catalog_manager
|
||||
.table_names(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_register_catalog_schema_table() {
|
||||
let node_id = 42;
|
||||
let (backend, table_engine, catalog_manager, engine_manager) =
|
||||
prepare_components(node_id).await;
|
||||
let components = prepare_components(node_id).await;
|
||||
|
||||
let catalog_name = "test_catalog".to_string();
|
||||
let schema_name = "nonexistent_schema".to_string();
|
||||
let catalog = Arc::new(RemoteCatalogProvider::new(
|
||||
catalog_name.clone(),
|
||||
backend.clone(),
|
||||
engine_manager.clone(),
|
||||
node_id,
|
||||
));
|
||||
|
||||
// register catalog to catalog manager
|
||||
CatalogManager::register_catalog(&*catalog_manager, catalog_name.clone(), catalog)
|
||||
assert!(components
|
||||
.catalog_manager
|
||||
.register_catalog(catalog_name.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
.is_ok());
|
||||
assert_eq!(
|
||||
HashSet::<String>::from_iter(
|
||||
vec![DEFAULT_CATALOG_NAME.to_string(), catalog_name.clone()].into_iter()
|
||||
),
|
||||
HashSet::from_iter(catalog_manager.catalog_names().await.unwrap().into_iter())
|
||||
HashSet::from_iter(
|
||||
components
|
||||
.catalog_manager
|
||||
.catalog_names()
|
||||
.await
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
)
|
||||
);
|
||||
|
||||
let table_to_register = table_engine
|
||||
let table_to_register = components
|
||||
.table_engine()
|
||||
.create_table(
|
||||
&EngineContext {},
|
||||
CreateTableRequest {
|
||||
@@ -332,41 +356,128 @@ mod tests {
|
||||
};
|
||||
// this register will fail since schema does not exist yet
|
||||
assert_matches!(
|
||||
catalog_manager
|
||||
components
|
||||
.catalog_manager
|
||||
.register_table(reg_req.clone())
|
||||
.await
|
||||
.unwrap_err(),
|
||||
catalog::error::Error::SchemaNotFound { .. }
|
||||
);
|
||||
|
||||
let new_catalog = catalog_manager
|
||||
.catalog(&catalog_name)
|
||||
let register_schema_request = RegisterSchemaRequest {
|
||||
catalog: catalog_name.to_string(),
|
||||
schema: schema_name.to_string(),
|
||||
};
|
||||
assert!(components
|
||||
.catalog_manager
|
||||
.register_schema(register_schema_request)
|
||||
.await
|
||||
.unwrap()
|
||||
.expect("catalog should exist since it's already registered");
|
||||
let schema = Arc::new(RemoteSchemaProvider::new(
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
node_id,
|
||||
engine_manager,
|
||||
backend.clone(),
|
||||
));
|
||||
|
||||
let prev = new_catalog
|
||||
.register_schema(schema_name.clone(), schema.clone())
|
||||
.expect("Register schema should not fail"));
|
||||
assert!(components
|
||||
.catalog_manager
|
||||
.register_table(reg_req)
|
||||
.await
|
||||
.expect("Register schema should not fail");
|
||||
assert!(prev.is_none());
|
||||
assert!(catalog_manager.register_table(reg_req).await.unwrap());
|
||||
.unwrap());
|
||||
|
||||
assert_eq!(
|
||||
HashSet::from([schema_name.clone()]),
|
||||
new_catalog
|
||||
.schema_names()
|
||||
components
|
||||
.catalog_manager
|
||||
.schema_names(&catalog_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.collect()
|
||||
)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_register_table_before_and_after_region_alive_keeper_started() {
|
||||
let components = prepare_components(42).await;
|
||||
let catalog_manager = &components.catalog_manager;
|
||||
let region_alive_keepers = &components.region_alive_keepers;
|
||||
|
||||
let table_before = TableIdent {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table: "table_before".to_string(),
|
||||
table_id: 1,
|
||||
engine: MITO_ENGINE.to_string(),
|
||||
};
|
||||
let request = RegisterTableRequest {
|
||||
catalog: table_before.catalog.clone(),
|
||||
schema: table_before.schema.clone(),
|
||||
table_name: table_before.table.clone(),
|
||||
table_id: table_before.table_id,
|
||||
table: Arc::new(EmptyTable::new(CreateTableRequest {
|
||||
id: table_before.table_id,
|
||||
catalog_name: table_before.catalog.clone(),
|
||||
schema_name: table_before.schema.clone(),
|
||||
table_name: table_before.table.clone(),
|
||||
desc: None,
|
||||
schema: RawSchema::new(vec![]),
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![],
|
||||
create_if_not_exists: false,
|
||||
table_options: Default::default(),
|
||||
engine: MITO_ENGINE.to_string(),
|
||||
})),
|
||||
};
|
||||
assert!(catalog_manager.register_table(request).await.unwrap());
|
||||
|
||||
let keeper = region_alive_keepers
|
||||
.find_keeper(&table_before)
|
||||
.await
|
||||
.unwrap();
|
||||
let deadline = keeper.deadline(0).await.unwrap();
|
||||
let far_future = Instant::now() + Duration::from_secs(86400 * 365 * 29);
|
||||
// assert region alive countdown is not started
|
||||
assert!(deadline > far_future);
|
||||
|
||||
region_alive_keepers.start().await;
|
||||
|
||||
let table_after = TableIdent {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table: "table_after".to_string(),
|
||||
table_id: 2,
|
||||
engine: MITO_ENGINE.to_string(),
|
||||
};
|
||||
let request = RegisterTableRequest {
|
||||
catalog: table_after.catalog.clone(),
|
||||
schema: table_after.schema.clone(),
|
||||
table_name: table_after.table.clone(),
|
||||
table_id: table_after.table_id,
|
||||
table: Arc::new(EmptyTable::new(CreateTableRequest {
|
||||
id: table_after.table_id,
|
||||
catalog_name: table_after.catalog.clone(),
|
||||
schema_name: table_after.schema.clone(),
|
||||
table_name: table_after.table.clone(),
|
||||
desc: None,
|
||||
schema: RawSchema::new(vec![]),
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![],
|
||||
create_if_not_exists: false,
|
||||
table_options: Default::default(),
|
||||
engine: MITO_ENGINE.to_string(),
|
||||
})),
|
||||
};
|
||||
assert!(catalog_manager.register_table(request).await.unwrap());
|
||||
|
||||
let keeper = region_alive_keepers
|
||||
.find_keeper(&table_after)
|
||||
.await
|
||||
.unwrap();
|
||||
let deadline = keeper.deadline(0).await.unwrap();
|
||||
// assert countdown is started for the table registered after [RegionAliveKeepers] started
|
||||
assert!(deadline <= Instant::now() + Duration::from_secs(20));
|
||||
|
||||
let keeper = region_alive_keepers
|
||||
.find_keeper(&table_before)
|
||||
.await
|
||||
.unwrap();
|
||||
let deadline = keeper.deadline(0).await.unwrap();
|
||||
// assert countdown is started for the table registered before [RegionAliveKeepers] started, too
|
||||
assert!(deadline <= Instant::now() + Duration::from_secs(20));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ tonic.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
datanode = { path = "../datanode" }
|
||||
derive-new = "0.5"
|
||||
substrait = { path = "../common/substrait" }
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
183
src/client/examples/stream_ingest.rs
Normal file
183
src/client/examples/stream_ingest.rs
Normal file
@@ -0,0 +1,183 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::column::*;
|
||||
use api::v1::*;
|
||||
use client::{Client, Database, DEFAULT_SCHEMA_NAME};
|
||||
use derive_new::new;
|
||||
use tracing::{error, info};
|
||||
|
||||
fn main() {
|
||||
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
|
||||
.unwrap();
|
||||
|
||||
run();
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn run() {
|
||||
let greptimedb_endpoint =
|
||||
std::env::var("GREPTIMEDB_ENDPOINT").unwrap_or_else(|_| "localhost:4001".to_owned());
|
||||
|
||||
let greptimedb_dbname =
|
||||
std::env::var("GREPTIMEDB_DBNAME").unwrap_or_else(|_| DEFAULT_SCHEMA_NAME.to_owned());
|
||||
|
||||
let grpc_client = Client::with_urls(vec![&greptimedb_endpoint]);
|
||||
|
||||
let client = Database::new_with_dbname(greptimedb_dbname, grpc_client);
|
||||
|
||||
let stream_inserter = client.streaming_inserter().unwrap();
|
||||
|
||||
if let Err(e) = stream_inserter
|
||||
.insert(vec![to_insert_request(weather_records_1())])
|
||||
.await
|
||||
{
|
||||
error!("Error: {e}");
|
||||
}
|
||||
|
||||
if let Err(e) = stream_inserter
|
||||
.insert(vec![to_insert_request(weather_records_2())])
|
||||
.await
|
||||
{
|
||||
error!("Error: {e}");
|
||||
}
|
||||
|
||||
let result = stream_inserter.finish().await;
|
||||
|
||||
match result {
|
||||
Ok(rows) => {
|
||||
info!("Rows written: {rows}");
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error: {e}");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(new)]
|
||||
struct WeatherRecord {
|
||||
timestamp_millis: i64,
|
||||
collector: String,
|
||||
temperature: f32,
|
||||
humidity: i32,
|
||||
}
|
||||
|
||||
fn weather_records_1() -> Vec<WeatherRecord> {
|
||||
vec![
|
||||
WeatherRecord::new(1686109527000, "c1".to_owned(), 26.4, 15),
|
||||
WeatherRecord::new(1686023127000, "c1".to_owned(), 29.3, 20),
|
||||
WeatherRecord::new(1685936727000, "c1".to_owned(), 31.8, 13),
|
||||
WeatherRecord::new(1686109527000, "c2".to_owned(), 20.4, 67),
|
||||
WeatherRecord::new(1686023127000, "c2".to_owned(), 18.0, 74),
|
||||
WeatherRecord::new(1685936727000, "c2".to_owned(), 19.2, 81),
|
||||
]
|
||||
}
|
||||
|
||||
fn weather_records_2() -> Vec<WeatherRecord> {
|
||||
vec![
|
||||
WeatherRecord::new(1686109527001, "c3".to_owned(), 26.4, 15),
|
||||
WeatherRecord::new(1686023127002, "c3".to_owned(), 29.3, 20),
|
||||
WeatherRecord::new(1685936727003, "c3".to_owned(), 31.8, 13),
|
||||
WeatherRecord::new(1686109527004, "c4".to_owned(), 20.4, 67),
|
||||
WeatherRecord::new(1686023127005, "c4".to_owned(), 18.0, 74),
|
||||
WeatherRecord::new(1685936727006, "c4".to_owned(), 19.2, 81),
|
||||
]
|
||||
}
|
||||
|
||||
/// This function generates some random data and bundle them into a
|
||||
/// `InsertRequest`.
|
||||
///
|
||||
/// Data structure:
|
||||
///
|
||||
/// - `ts`: a timestamp column
|
||||
/// - `collector`: a tag column
|
||||
/// - `temperature`: a value field of f32
|
||||
/// - `humidity`: a value field of i32
|
||||
///
|
||||
fn to_insert_request(records: Vec<WeatherRecord>) -> InsertRequest {
|
||||
// convert records into columns
|
||||
let rows = records.len();
|
||||
|
||||
// transpose records into columns
|
||||
let (timestamp_millis, collectors, temp, humidity) = records.into_iter().fold(
|
||||
(
|
||||
Vec::with_capacity(rows),
|
||||
Vec::with_capacity(rows),
|
||||
Vec::with_capacity(rows),
|
||||
Vec::with_capacity(rows),
|
||||
),
|
||||
|mut acc, rec| {
|
||||
acc.0.push(rec.timestamp_millis);
|
||||
acc.1.push(rec.collector);
|
||||
acc.2.push(rec.temperature);
|
||||
acc.3.push(rec.humidity);
|
||||
|
||||
acc
|
||||
},
|
||||
);
|
||||
|
||||
let columns = vec![
|
||||
// timestamp column: `ts`
|
||||
Column {
|
||||
column_name: "ts".to_owned(),
|
||||
values: Some(column::Values {
|
||||
ts_millisecond_values: timestamp_millis,
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||
..Default::default()
|
||||
},
|
||||
// tag column: collectors
|
||||
Column {
|
||||
column_name: "collector".to_owned(),
|
||||
values: Some(column::Values {
|
||||
string_values: collectors.into_iter().collect(),
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
datatype: ColumnDataType::String as i32,
|
||||
..Default::default()
|
||||
},
|
||||
// field column: temperature
|
||||
Column {
|
||||
column_name: "temperature".to_owned(),
|
||||
values: Some(column::Values {
|
||||
f32_values: temp,
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
datatype: ColumnDataType::Float32 as i32,
|
||||
..Default::default()
|
||||
},
|
||||
// field column: humidity
|
||||
Column {
|
||||
column_name: "humidity".to_owned(),
|
||||
values: Some(column::Values {
|
||||
i32_values: humidity,
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
datatype: ColumnDataType::Int32 as i32,
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
|
||||
InsertRequest {
|
||||
table_name: "weather_demo".to_owned(),
|
||||
columns,
|
||||
row_count: rows as u32,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -165,7 +165,7 @@ impl Client {
|
||||
pub async fn health_check(&self) -> Result<()> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
let mut client = HealthCheckClient::new(channel);
|
||||
client.health_check(HealthCheckRequest {}).await?;
|
||||
let _ = client.health_check(HealthCheckRequest {}).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,12 +13,10 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
use common_meta::peer::Peer;
|
||||
use common_telemetry::info;
|
||||
use moka::future::{Cache, CacheBuilder};
|
||||
|
||||
use crate::Client;
|
||||
@@ -26,22 +24,11 @@ use crate::Client;
|
||||
pub struct DatanodeClients {
|
||||
channel_manager: ChannelManager,
|
||||
clients: Cache<Peer, Client>,
|
||||
started: Arc<Mutex<bool>>,
|
||||
}
|
||||
|
||||
impl Default for DatanodeClients {
|
||||
fn default() -> Self {
|
||||
// TODO(LFC): Make this channel config configurable.
|
||||
let config = ChannelConfig::new().timeout(Duration::from_secs(8));
|
||||
|
||||
Self {
|
||||
channel_manager: ChannelManager::with_config(config),
|
||||
clients: CacheBuilder::new(1024)
|
||||
.time_to_live(Duration::from_secs(30 * 60))
|
||||
.time_to_idle(Duration::from_secs(5 * 60))
|
||||
.build(),
|
||||
started: Arc::new(Mutex::new(false)),
|
||||
}
|
||||
Self::new(ChannelConfig::new())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,16 +41,14 @@ impl Debug for DatanodeClients {
|
||||
}
|
||||
|
||||
impl DatanodeClients {
|
||||
pub fn start(&self) {
|
||||
let mut started = self.started.lock().unwrap();
|
||||
if *started {
|
||||
return;
|
||||
pub fn new(config: ChannelConfig) -> Self {
|
||||
Self {
|
||||
channel_manager: ChannelManager::with_config(config),
|
||||
clients: CacheBuilder::new(1024)
|
||||
.time_to_live(Duration::from_secs(30 * 60))
|
||||
.time_to_idle(Duration::from_secs(5 * 60))
|
||||
.build(),
|
||||
}
|
||||
|
||||
self.channel_manager.start_channel_recycle();
|
||||
|
||||
info!("Datanode clients manager is started!");
|
||||
*started = true;
|
||||
}
|
||||
|
||||
pub async fn get_client(&self, datanode: &Peer) -> Client {
|
||||
|
||||
@@ -17,26 +17,23 @@ use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{
|
||||
greptime_response, AffectedRows, AlterExpr, AuthHeader, CreateTableExpr, DdlRequest,
|
||||
DeleteRequest, DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequests, PromRangeQuery,
|
||||
QueryRequest, RequestHeader,
|
||||
greptime_response, AffectedRows, AlterExpr, AuthHeader, CompactTableExpr, CreateTableExpr,
|
||||
DdlRequest, DeleteRequest, DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequests,
|
||||
PromRangeQuery, QueryRequest, RequestHeader,
|
||||
};
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use common_error::prelude::*;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_grpc::flight::{flight_messages_to_recordbatches, FlightDecoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
use common_telemetry::{logging, timer};
|
||||
use futures_util::{TryFutureExt, TryStreamExt};
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use tokio::sync::mpsc::Sender;
|
||||
use tokio::sync::{mpsc, OnceCell};
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
|
||||
};
|
||||
use crate::{error, metrics, Client, Result};
|
||||
use crate::{error, metrics, Client, Result, StreamInserter};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Database {
|
||||
@@ -50,7 +47,6 @@ pub struct Database {
|
||||
dbname: String,
|
||||
|
||||
client: Client,
|
||||
streaming_client: OnceCell<Sender<GreptimeRequest>>,
|
||||
ctx: FlightContext,
|
||||
}
|
||||
|
||||
@@ -62,7 +58,6 @@ impl Database {
|
||||
schema: schema.into(),
|
||||
dbname: "".to_string(),
|
||||
client,
|
||||
streaming_client: OnceCell::new(),
|
||||
ctx: FlightContext::default(),
|
||||
}
|
||||
}
|
||||
@@ -80,7 +75,6 @@ impl Database {
|
||||
schema: "".to_string(),
|
||||
dbname: dbname.into(),
|
||||
client,
|
||||
streaming_client: OnceCell::new(),
|
||||
ctx: FlightContext::default(),
|
||||
}
|
||||
}
|
||||
@@ -120,20 +114,24 @@ impl Database {
|
||||
self.handle(Request::Inserts(requests)).await
|
||||
}
|
||||
|
||||
pub async fn insert_to_stream(&self, requests: InsertRequests) -> Result<()> {
|
||||
let streaming_client = self
|
||||
.streaming_client
|
||||
.get_or_try_init(|| self.client_stream())
|
||||
.await?;
|
||||
pub fn streaming_inserter(&self) -> Result<StreamInserter> {
|
||||
self.streaming_inserter_with_channel_size(65536)
|
||||
}
|
||||
|
||||
let request = self.to_rpc_request(Request::Inserts(requests));
|
||||
pub fn streaming_inserter_with_channel_size(
|
||||
&self,
|
||||
channel_size: usize,
|
||||
) -> Result<StreamInserter> {
|
||||
let client = self.client.make_database_client()?.inner;
|
||||
|
||||
streaming_client.send(request).await.map_err(|e| {
|
||||
error::ClientStreamingSnafu {
|
||||
err_msg: e.to_string(),
|
||||
}
|
||||
.build()
|
||||
})
|
||||
let stream_inserter = StreamInserter::new(
|
||||
client,
|
||||
self.dbname().to_string(),
|
||||
self.ctx.auth_header.clone(),
|
||||
channel_size,
|
||||
);
|
||||
|
||||
Ok(stream_inserter)
|
||||
}
|
||||
|
||||
pub async fn delete(&self, request: DeleteRequest) -> Result<u32> {
|
||||
@@ -169,14 +167,6 @@ impl Database {
|
||||
}
|
||||
}
|
||||
|
||||
async fn client_stream(&self) -> Result<Sender<GreptimeRequest>> {
|
||||
let mut client = self.client.make_database_client()?.inner;
|
||||
let (sender, receiver) = mpsc::channel::<GreptimeRequest>(65536);
|
||||
let receiver = ReceiverStream::new(receiver);
|
||||
client.handle_requests(receiver).await?;
|
||||
Ok(sender)
|
||||
}
|
||||
|
||||
pub async fn sql(&self, sql: &str) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_SQL);
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
@@ -244,6 +234,14 @@ impl Database {
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn compact_table(&self, expr: CompactTableExpr) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_COMPACT_TABLE);
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CompactTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
async fn do_get(&self, request: Request) -> Result<Output> {
|
||||
// FIXME(paomian): should be added some labels for metrics
|
||||
let _timer = timer!(metrics::METRIC_GRPC_DO_GET);
|
||||
@@ -254,7 +252,6 @@ impl Database {
|
||||
|
||||
let mut client = self.client.make_flight_client()?;
|
||||
|
||||
// TODO(LFC): Streaming get flight data.
|
||||
let flight_data: Vec<FlightData> = client
|
||||
.mut_inner()
|
||||
.do_get(request)
|
||||
|
||||
@@ -15,8 +15,10 @@
|
||||
use std::any::Any;
|
||||
use std::str::FromStr;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use snafu::Location;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_error::{INNER_ERROR_CODE, INNER_ERROR_MSG};
|
||||
use snafu::{Location, Snafu};
|
||||
use tonic::{Code, Status};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
@@ -34,13 +36,13 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to convert FlightData, source: {}", source))]
|
||||
ConvertFlightData {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: common_grpc::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Column datatype error, source: {}", source))]
|
||||
ColumnDataType {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
@@ -57,7 +59,7 @@ pub enum Error {
|
||||
))]
|
||||
CreateChannel {
|
||||
addr: String,
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: common_grpc::error::Error,
|
||||
},
|
||||
|
||||
@@ -85,7 +87,7 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::Server { code, .. } => *code,
|
||||
Error::FlightGet { source, .. } => source.status_code(),
|
||||
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source } => {
|
||||
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
|
||||
|
||||
@@ -15,9 +15,10 @@
|
||||
mod client;
|
||||
pub mod client_manager;
|
||||
mod database;
|
||||
mod error;
|
||||
pub mod error;
|
||||
pub mod load_balance;
|
||||
mod metrics;
|
||||
mod stream_insert;
|
||||
|
||||
pub use api;
|
||||
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
@@ -25,3 +26,4 @@ pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
pub use self::client::Client;
|
||||
pub use self::database::Database;
|
||||
pub use self::error::{Error, Result};
|
||||
pub use self::stream_insert::StreamInserter;
|
||||
|
||||
@@ -60,7 +60,7 @@ mod tests {
|
||||
let random = Random;
|
||||
for _ in 0..100 {
|
||||
let peer = random.get_peer(&peers).unwrap();
|
||||
all.contains(peer);
|
||||
assert!(all.contains(peer));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,4 +22,5 @@ pub const METRIC_GRPC_LOGICAL_PLAN: &str = "grpc.logical_plan";
|
||||
pub const METRIC_GRPC_ALTER: &str = "grpc.alter";
|
||||
pub const METRIC_GRPC_DROP_TABLE: &str = "grpc.drop_table";
|
||||
pub const METRIC_GRPC_FLUSH_TABLE: &str = "grpc.flush_table";
|
||||
pub const METRIC_GRPC_COMPACT_TABLE: &str = "grpc.compact_table";
|
||||
pub const METRIC_GRPC_DO_GET: &str = "grpc.do_get";
|
||||
|
||||
115
src/client/src/stream_insert.rs
Normal file
115
src/client/src/stream_insert.rs
Normal file
@@ -0,0 +1,115 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::{
|
||||
greptime_response, AffectedRows, AuthHeader, GreptimeRequest, GreptimeResponse, InsertRequest,
|
||||
InsertRequests, RequestHeader,
|
||||
};
|
||||
use snafu::OptionExt;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use tonic::transport::Channel;
|
||||
use tonic::{Response, Status};
|
||||
|
||||
use crate::error::{self, IllegalDatabaseResponseSnafu, Result};
|
||||
|
||||
/// A structure that provides some methods for streaming data insert.
|
||||
///
|
||||
/// [`StreamInserter`] cannot be constructed via the `StreamInserter::new` method.
|
||||
/// You can use the following way to obtain [`StreamInserter`].
|
||||
///
|
||||
/// ```ignore
|
||||
/// let grpc_client = Client::with_urls(vec!["127.0.0.1:4002"]);
|
||||
/// let client = Database::new_with_dbname("db_name", grpc_client);
|
||||
/// let stream_inserter = client.streaming_inserter().unwrap();
|
||||
/// ```
|
||||
///
|
||||
/// If you want to see a concrete usage example, please see
|
||||
/// [stream_inserter.rs](https://github.com/GreptimeTeam/greptimedb/blob/develop/src/client/examples/stream_ingest.rs).
|
||||
pub struct StreamInserter {
|
||||
sender: mpsc::Sender<GreptimeRequest>,
|
||||
|
||||
auth_header: Option<AuthHeader>,
|
||||
|
||||
dbname: String,
|
||||
|
||||
join: JoinHandle<std::result::Result<Response<GreptimeResponse>, Status>>,
|
||||
}
|
||||
|
||||
impl StreamInserter {
|
||||
pub(crate) fn new(
|
||||
mut client: GreptimeDatabaseClient<Channel>,
|
||||
dbname: String,
|
||||
auth_header: Option<AuthHeader>,
|
||||
channel_size: usize,
|
||||
) -> StreamInserter {
|
||||
let (send, recv) = tokio::sync::mpsc::channel(channel_size);
|
||||
|
||||
let join: JoinHandle<std::result::Result<Response<GreptimeResponse>, Status>> =
|
||||
tokio::spawn(async move {
|
||||
let recv_stream = ReceiverStream::new(recv);
|
||||
client.handle_requests(recv_stream).await
|
||||
});
|
||||
|
||||
StreamInserter {
|
||||
sender: send,
|
||||
auth_header,
|
||||
dbname,
|
||||
join,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn insert(&self, requests: Vec<InsertRequest>) -> Result<()> {
|
||||
let inserts = InsertRequests { inserts: requests };
|
||||
let request = self.to_rpc_request(Request::Inserts(inserts));
|
||||
|
||||
self.sender.send(request).await.map_err(|e| {
|
||||
error::ClientStreamingSnafu {
|
||||
err_msg: e.to_string(),
|
||||
}
|
||||
.build()
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn finish(self) -> Result<u32> {
|
||||
drop(self.sender);
|
||||
|
||||
let response = self.join.await.unwrap()?;
|
||||
|
||||
let response = response
|
||||
.into_inner()
|
||||
.response
|
||||
.context(IllegalDatabaseResponseSnafu {
|
||||
err_msg: "GreptimeResponse is empty",
|
||||
})?;
|
||||
|
||||
let greptime_response::Response::AffectedRows(AffectedRows { value }) = response;
|
||||
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
|
||||
GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
authorization: self.auth_header.clone(),
|
||||
dbname: self.dbname.clone(),
|
||||
..Default::default()
|
||||
}),
|
||||
request: Some(request),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -10,8 +10,9 @@ name = "greptime"
|
||||
path = "src/bin/greptime.rs"
|
||||
|
||||
[features]
|
||||
mem-prof = ["tikv-jemallocator", "tikv-jemalloc-ctl"]
|
||||
default = ["metrics-process"]
|
||||
tokio-console = ["common-telemetry/tokio-console"]
|
||||
metrics-process = ["servers/metrics-process"]
|
||||
|
||||
[dependencies]
|
||||
anymap = "1.0.0-beta.2"
|
||||
@@ -21,6 +22,7 @@ client = { path = "../client" }
|
||||
common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-meta = { path = "../common/meta" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-telemetry = { path = "../common/telemetry", features = [
|
||||
"deadlock_detection",
|
||||
@@ -42,8 +44,7 @@ servers = { path = "../servers" }
|
||||
session = { path = "../session" }
|
||||
snafu.workspace = true
|
||||
substrait = { path = "../common/substrait" }
|
||||
tikv-jemalloc-ctl = { version = "0.5", optional = true }
|
||||
tikv-jemallocator = { version = "0.5", optional = true }
|
||||
tikv-jemallocator = "0.5"
|
||||
tokio.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
@@ -51,7 +52,7 @@ common-test-util = { path = "../common/test-util" }
|
||||
rexpect = "0.5"
|
||||
temp-env = "0.3"
|
||||
serde.workspace = true
|
||||
toml = "0.5"
|
||||
toml.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
build-data = "0.1.3"
|
||||
build-data = "0.1.4"
|
||||
|
||||
@@ -180,15 +180,19 @@ fn full_version() -> &'static str {
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(feature = "mem-prof")]
|
||||
fn log_env_flags() {
|
||||
info!("command line arguments");
|
||||
for argument in std::env::args() {
|
||||
info!("argument: {}", argument);
|
||||
}
|
||||
}
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let cmd = Command::parse();
|
||||
// TODO(dennis):
|
||||
// 1. adds ip/port to app
|
||||
let app_name = &cmd.subcmd.to_string();
|
||||
|
||||
let opts = cmd.load_options()?;
|
||||
@@ -205,6 +209,14 @@ async fn main() -> Result<()> {
|
||||
// Report app version as gauge.
|
||||
gauge!("app_version", 1.0, "short_version" => short_version(), "version" => full_version());
|
||||
|
||||
// Log version and argument flags.
|
||||
info!(
|
||||
"short_version: {}, full_version: {}",
|
||||
short_version(),
|
||||
full_version()
|
||||
);
|
||||
log_env_flags();
|
||||
|
||||
let mut app = cmd.build(opts).await?;
|
||||
|
||||
tokio::select! {
|
||||
|
||||
@@ -19,7 +19,9 @@ use std::time::Instant;
|
||||
use catalog::remote::CachedMetaKvBackend;
|
||||
use client::client_manager::DatanodeClients;
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::prelude::ErrorExt;
|
||||
use common_base::Plugins;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_telemetry::logging;
|
||||
@@ -107,7 +109,7 @@ impl Repl {
|
||||
Ok(ref line) => {
|
||||
let request = line.trim();
|
||||
|
||||
self.rl.add_history_entry(request.to_string());
|
||||
let _ = self.rl.add_history_entry(request.to_string());
|
||||
|
||||
request.try_into()
|
||||
}
|
||||
@@ -136,7 +138,7 @@ impl Repl {
|
||||
}
|
||||
}
|
||||
ReplCommand::Sql { sql } => {
|
||||
self.execute_sql(sql).await;
|
||||
let _ = self.execute_sql(sql).await;
|
||||
}
|
||||
ReplCommand::Exit => {
|
||||
return Ok(());
|
||||
@@ -262,17 +264,19 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
|
||||
let catalog_list = Arc::new(FrontendCatalogManager::new(
|
||||
cached_meta_backend.clone(),
|
||||
cached_meta_backend,
|
||||
cached_meta_backend.clone(),
|
||||
partition_manager,
|
||||
datanode_clients,
|
||||
Arc::new(TableMetadataManager::new(cached_meta_backend)),
|
||||
));
|
||||
let plugins: Arc<Plugins> = Default::default();
|
||||
let state = Arc::new(QueryEngineState::new(
|
||||
catalog_list,
|
||||
false,
|
||||
None,
|
||||
None,
|
||||
Default::default(),
|
||||
plugins.clone(),
|
||||
));
|
||||
|
||||
Ok(DatafusionQueryEngine::new(state))
|
||||
Ok(DatafusionQueryEngine::new(state, plugins))
|
||||
}
|
||||
|
||||
@@ -84,8 +84,6 @@ struct StartCommand {
|
||||
rpc_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
rpc_hostname: Option<String>,
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
#[clap(long, multiple = true, value_delimiter = ',')]
|
||||
metasrv_addr: Option<Vec<String>>,
|
||||
#[clap(short, long)]
|
||||
@@ -126,10 +124,6 @@ impl StartCommand {
|
||||
opts.rpc_hostname = self.rpc_hostname.clone();
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.mysql_addr {
|
||||
opts.mysql_addr = addr.clone();
|
||||
}
|
||||
|
||||
if let Some(node_id) = self.node_id {
|
||||
opts.node_id = Some(node_id);
|
||||
}
|
||||
@@ -176,7 +170,9 @@ impl StartCommand {
|
||||
logging::info!("Datanode start command: {:#?}", self);
|
||||
logging::info!("Datanode options: {:#?}", opts);
|
||||
|
||||
let datanode = Datanode::new(opts).await.context(StartDatanodeSnafu)?;
|
||||
let datanode = Datanode::new(opts, Default::default())
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
Ok(Instance { datanode })
|
||||
}
|
||||
@@ -205,13 +201,12 @@ mod tests {
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
rpc_hostname = "127.0.0.1"
|
||||
rpc_runtime_size = 8
|
||||
mysql_addr = "127.0.0.1:4406"
|
||||
mysql_runtime_size = 2
|
||||
|
||||
[meta_client_options]
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
ddl_timeout_millis= 10000
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
@@ -252,8 +247,6 @@ mod tests {
|
||||
cmd.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
|
||||
|
||||
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
|
||||
assert_eq!("127.0.0.1:4406".to_string(), options.mysql_addr);
|
||||
assert_eq!(2, options.mysql_runtime_size);
|
||||
assert_eq!(Some(42), options.node_id);
|
||||
|
||||
assert_eq!("/other/wal", options.wal.dir.unwrap());
|
||||
@@ -267,10 +260,12 @@ mod tests {
|
||||
timeout_millis,
|
||||
connect_timeout_millis,
|
||||
tcp_nodelay,
|
||||
ddl_timeout_millis,
|
||||
} = options.meta_client_options.unwrap();
|
||||
|
||||
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
|
||||
assert_eq!(5000, connect_timeout_millis);
|
||||
assert_eq!(10000, ddl_timeout_millis);
|
||||
assert_eq!(3000, timeout_millis);
|
||||
assert!(tcp_nodelay);
|
||||
|
||||
@@ -281,6 +276,7 @@ mod tests {
|
||||
ObjectStoreConfig::S3 { .. } => unreachable!(),
|
||||
ObjectStoreConfig::Oss { .. } => unreachable!(),
|
||||
ObjectStoreConfig::Azblob { .. } => unreachable!(),
|
||||
ObjectStoreConfig::Gcs { .. } => unreachable!(),
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
@@ -334,12 +330,12 @@ mod tests {
|
||||
.is_err());
|
||||
|
||||
// Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value
|
||||
(StartCommand {
|
||||
assert!((StartCommand {
|
||||
node_id: Some(42),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(TopLevelOptions::default())
|
||||
.unwrap();
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -368,8 +364,6 @@ mod tests {
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
rpc_hostname = "127.0.0.1"
|
||||
rpc_runtime_size = 8
|
||||
mysql_addr = "127.0.0.1:4406"
|
||||
mysql_runtime_size = 2
|
||||
|
||||
[meta_client_options]
|
||||
timeout_millis = 3000
|
||||
|
||||
@@ -14,53 +14,54 @@
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use config::ConfigError;
|
||||
use rustyline::error::ReadlineError;
|
||||
use snafu::Location;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to start datanode, source: {}", source))]
|
||||
StartDatanode {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown datanode, source: {}", source))]
|
||||
ShutdownDatanode {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start frontend, source: {}", source))]
|
||||
StartFrontend {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown frontend, source: {}", source))]
|
||||
ShutdownFrontend {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build meta server, source: {}", source))]
|
||||
BuildMetaServer {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start meta server, source: {}", source))]
|
||||
StartMetaServer {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown meta server, source: {}", source))]
|
||||
ShutdownMetaServer {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
@@ -72,14 +73,14 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Illegal auth config: {}", source))]
|
||||
IllegalAuthConfig {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: servers::auth::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported selector type, {} source: {}", selector_type, source))]
|
||||
UnsupportedSelectorType {
|
||||
selector_type: String,
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
@@ -101,44 +102,44 @@ pub enum Error {
|
||||
#[snafu(display("Failed to request database, sql: {sql}, source: {source}"))]
|
||||
RequestDatabase {
|
||||
sql: String,
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: client::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to collect RecordBatches, source: {source}"))]
|
||||
CollectRecordBatches {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to pretty print Recordbatches, source: {source}"))]
|
||||
PrettyPrintRecordBatches {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start Meta client, source: {}", source))]
|
||||
StartMetaClient {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse SQL: {}, source: {}", sql, source))]
|
||||
ParseSql {
|
||||
sql: String,
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to plan statement, source: {}", source))]
|
||||
PlanStatement {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode logical plan in substrait, source: {}", source))]
|
||||
SubstraitEncodeLogicalPlan {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: substrait::error::Error,
|
||||
},
|
||||
|
||||
@@ -150,7 +151,7 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to start catalog manager, source: {}", source))]
|
||||
StartCatalogManager {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: catalog::error::Error,
|
||||
},
|
||||
}
|
||||
@@ -160,13 +161,13 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::StartDatanode { source } => source.status_code(),
|
||||
Error::StartFrontend { source } => source.status_code(),
|
||||
Error::ShutdownDatanode { source } => source.status_code(),
|
||||
Error::ShutdownFrontend { source } => source.status_code(),
|
||||
Error::StartMetaServer { source } => source.status_code(),
|
||||
Error::ShutdownMetaServer { source } => source.status_code(),
|
||||
Error::BuildMetaServer { source } => source.status_code(),
|
||||
Error::StartDatanode { source, .. } => source.status_code(),
|
||||
Error::StartFrontend { source, .. } => source.status_code(),
|
||||
Error::ShutdownDatanode { source, .. } => source.status_code(),
|
||||
Error::ShutdownFrontend { source, .. } => source.status_code(),
|
||||
Error::StartMetaServer { source, .. } => source.status_code(),
|
||||
Error::ShutdownMetaServer { source, .. } => source.status_code(),
|
||||
Error::BuildMetaServer { source, .. } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::MissingConfig { .. }
|
||||
| Error::LoadLayeredConfig { .. }
|
||||
@@ -175,15 +176,14 @@ impl ErrorExt for Error {
|
||||
| Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments,
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
||||
Error::RequestDatabase { source, .. } => source.status_code(),
|
||||
Error::CollectRecordBatches { source } | Error::PrettyPrintRecordBatches { source } => {
|
||||
Error::CollectRecordBatches { source, .. }
|
||||
| Error::PrettyPrintRecordBatches { source, .. } => source.status_code(),
|
||||
Error::StartMetaClient { source, .. } => source.status_code(),
|
||||
Error::ParseSql { source, .. } | Error::PlanStatement { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::StartMetaClient { source } => source.status_code(),
|
||||
Error::ParseSql { source, .. } | Error::PlanStatement { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::SubstraitEncodeLogicalPlan { source } => source.status_code(),
|
||||
Error::StartCatalogManager { source } => source.status_code(),
|
||||
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
|
||||
Error::StartCatalogManager { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ use common_base::Plugins;
|
||||
use common_telemetry::logging;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use frontend::service_config::{InfluxdbOptions, PromOptions};
|
||||
use frontend::service_config::{InfluxdbOptions, PrometheusOptions};
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::auth::UserProviderRef;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
@@ -172,7 +172,7 @@ impl StartCommand {
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.prom_addr {
|
||||
opts.prom_options = Some(PromOptions { addr: addr.clone() });
|
||||
opts.prometheus_options = Some(PrometheusOptions { addr: addr.clone() });
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.postgres_addr {
|
||||
@@ -236,6 +236,7 @@ mod tests {
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use frontend::service_config::GrpcOptions;
|
||||
use servers::auth::{Identity, Password, UserProviderRef};
|
||||
@@ -260,6 +261,10 @@ mod tests {
|
||||
command.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
|
||||
|
||||
assert_eq!(opts.http_options.as_ref().unwrap().addr, "127.0.0.1:1234");
|
||||
assert_eq!(
|
||||
ReadableSize::mb(64),
|
||||
opts.http_options.as_ref().unwrap().body_limit
|
||||
);
|
||||
assert_eq!(opts.mysql_options.as_ref().unwrap().addr, "127.0.0.1:5678");
|
||||
assert_eq!(
|
||||
opts.postgres_options.as_ref().unwrap().addr,
|
||||
@@ -269,7 +274,10 @@ mod tests {
|
||||
opts.opentsdb_options.as_ref().unwrap().addr,
|
||||
"127.0.0.1:4321"
|
||||
);
|
||||
assert_eq!(opts.prom_options.as_ref().unwrap().addr, "127.0.0.1:4444");
|
||||
assert_eq!(
|
||||
opts.prometheus_options.as_ref().unwrap().addr,
|
||||
"127.0.0.1:4444"
|
||||
);
|
||||
|
||||
let default_opts = FrontendOptions::default();
|
||||
assert_eq!(
|
||||
@@ -301,6 +309,7 @@ mod tests {
|
||||
[http_options]
|
||||
addr = "127.0.0.1:4000"
|
||||
timeout = "30s"
|
||||
body_limit = "2GB"
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
@@ -326,6 +335,11 @@ mod tests {
|
||||
fe_opts.http_options.as_ref().unwrap().timeout
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ReadableSize::gb(2),
|
||||
fe_opts.http_options.as_ref().unwrap().body_limit
|
||||
);
|
||||
|
||||
assert_eq!("debug", fe_opts.logging.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), fe_opts.logging.dir);
|
||||
}
|
||||
@@ -339,19 +353,15 @@ mod tests {
|
||||
};
|
||||
|
||||
let plugins = load_frontend_plugins(&command.user_provider);
|
||||
assert!(plugins.is_ok());
|
||||
let plugins = plugins.unwrap();
|
||||
let provider = plugins.get::<UserProviderRef>();
|
||||
assert!(provider.is_some());
|
||||
|
||||
let provider = provider.unwrap();
|
||||
let provider = plugins.get::<UserProviderRef>().unwrap();
|
||||
let result = provider
|
||||
.authenticate(
|
||||
Identity::UserId("test", None),
|
||||
Password::PlainText("test".to_string().into()),
|
||||
)
|
||||
.await;
|
||||
assert!(result.is_ok());
|
||||
let _ = result.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -93,6 +93,8 @@ struct StartCommand {
|
||||
#[clap(long)]
|
||||
use_memory_store: bool,
|
||||
#[clap(long)]
|
||||
disable_region_failover: bool,
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
http_timeout: Option<u64>,
|
||||
@@ -134,9 +136,9 @@ impl StartCommand {
|
||||
.context(error::UnsupportedSelectorTypeSnafu { selector_type })?;
|
||||
}
|
||||
|
||||
if self.use_memory_store {
|
||||
opts.use_memory_store = true;
|
||||
}
|
||||
opts.use_memory_store = self.use_memory_store;
|
||||
|
||||
opts.disable_region_failover = self.disable_region_failover;
|
||||
|
||||
if let Some(http_addr) = &self.http_addr {
|
||||
opts.http_opts.addr = http_addr.clone();
|
||||
|
||||
@@ -23,7 +23,7 @@ use datanode::instance::InstanceRef;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use frontend::service_config::{
|
||||
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromOptions,
|
||||
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
|
||||
PrometheusOptions,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -89,8 +89,8 @@ pub struct StandaloneOptions {
|
||||
pub postgres_options: Option<PostgresOptions>,
|
||||
pub opentsdb_options: Option<OpentsdbOptions>,
|
||||
pub influxdb_options: Option<InfluxdbOptions>,
|
||||
pub prom_store_options: Option<PromStoreOptions>,
|
||||
pub prometheus_options: Option<PrometheusOptions>,
|
||||
pub prom_options: Option<PromOptions>,
|
||||
pub wal: WalConfig,
|
||||
pub storage: StorageConfig,
|
||||
pub procedure: ProcedureConfig,
|
||||
@@ -108,8 +108,8 @@ impl Default for StandaloneOptions {
|
||||
postgres_options: Some(PostgresOptions::default()),
|
||||
opentsdb_options: Some(OpentsdbOptions::default()),
|
||||
influxdb_options: Some(InfluxdbOptions::default()),
|
||||
prom_store_options: Some(PromStoreOptions::default()),
|
||||
prometheus_options: Some(PrometheusOptions::default()),
|
||||
prom_options: Some(PromOptions::default()),
|
||||
wal: WalConfig::default(),
|
||||
storage: StorageConfig::default(),
|
||||
procedure: ProcedureConfig::default(),
|
||||
@@ -128,10 +128,11 @@ impl StandaloneOptions {
|
||||
postgres_options: self.postgres_options,
|
||||
opentsdb_options: self.opentsdb_options,
|
||||
influxdb_options: self.influxdb_options,
|
||||
prom_store_options: self.prom_store_options,
|
||||
prometheus_options: self.prometheus_options,
|
||||
prom_options: self.prom_options,
|
||||
meta_client_options: None,
|
||||
logging: self.logging,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -268,7 +269,7 @@ impl StartCommand {
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.prom_addr {
|
||||
opts.prom_options = Some(PromOptions { addr: addr.clone() })
|
||||
opts.prometheus_options = Some(PrometheusOptions { addr: addr.clone() })
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.postgres_addr {
|
||||
@@ -308,7 +309,7 @@ impl StartCommand {
|
||||
fe_opts, dn_opts
|
||||
);
|
||||
|
||||
let datanode = Datanode::new(dn_opts.clone())
|
||||
let datanode = Datanode::new(dn_opts.clone(), Default::default())
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
@@ -341,6 +342,7 @@ mod tests {
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use servers::auth::{Identity, Password, UserProviderRef};
|
||||
use servers::Mode;
|
||||
@@ -356,18 +358,15 @@ mod tests {
|
||||
};
|
||||
|
||||
let plugins = load_frontend_plugins(&command.user_provider);
|
||||
assert!(plugins.is_ok());
|
||||
let plugins = plugins.unwrap();
|
||||
let provider = plugins.get::<UserProviderRef>();
|
||||
assert!(provider.is_some());
|
||||
let provider = provider.unwrap();
|
||||
let provider = plugins.get::<UserProviderRef>().unwrap();
|
||||
let result = provider
|
||||
.authenticate(
|
||||
Identity::UserId("test", None),
|
||||
Password::PlainText("test".to_string().into()),
|
||||
)
|
||||
.await;
|
||||
assert!(result.is_ok());
|
||||
let _ = result.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -411,6 +410,7 @@ mod tests {
|
||||
[http_options]
|
||||
addr = "127.0.0.1:4000"
|
||||
timeout = "30s"
|
||||
body_limit = "128MB"
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
@@ -436,6 +436,10 @@ mod tests {
|
||||
Duration::from_secs(30),
|
||||
fe_opts.http_options.as_ref().unwrap().timeout
|
||||
);
|
||||
assert_eq!(
|
||||
ReadableSize::mb(128),
|
||||
fe_opts.http_options.as_ref().unwrap().body_limit
|
||||
);
|
||||
assert_eq!(
|
||||
"127.0.0.1:4001".to_string(),
|
||||
fe_opts.grpc_options.unwrap().addr
|
||||
@@ -562,6 +566,10 @@ mod tests {
|
||||
opts.fe_opts.http_options.as_ref().unwrap().addr,
|
||||
"127.0.0.1:14000"
|
||||
);
|
||||
assert_eq!(
|
||||
ReadableSize::mb(64),
|
||||
opts.fe_opts.http_options.as_ref().unwrap().body_limit
|
||||
);
|
||||
|
||||
// Should be default value.
|
||||
assert_eq!(
|
||||
|
||||
@@ -27,10 +27,10 @@ mod tests {
|
||||
|
||||
impl Repl {
|
||||
fn send_line(&mut self, line: &str) {
|
||||
self.repl.send_line(line).unwrap();
|
||||
let _ = self.repl.send_line(line).unwrap();
|
||||
|
||||
// read a line to consume the prompt
|
||||
self.read_line();
|
||||
let _ = self.read_line();
|
||||
}
|
||||
|
||||
fn read_line(&mut self) -> String {
|
||||
@@ -76,7 +76,7 @@ mod tests {
|
||||
std::thread::sleep(Duration::from_secs(3));
|
||||
|
||||
let mut repl_cmd = Command::new("./greptime");
|
||||
repl_cmd.current_dir(bin_path).args([
|
||||
let _ = repl_cmd.current_dir(bin_path).args([
|
||||
"--log-level=off",
|
||||
"cli",
|
||||
"attach",
|
||||
@@ -105,7 +105,7 @@ mod tests {
|
||||
test_select(repl);
|
||||
|
||||
datanode.kill().unwrap();
|
||||
datanode.wait().unwrap();
|
||||
let _ = datanode.wait().unwrap();
|
||||
}
|
||||
|
||||
fn test_create_database(repl: &mut Repl) {
|
||||
|
||||
@@ -14,4 +14,4 @@ serde = { version = "1.0", features = ["derive"] }
|
||||
snafu.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
toml = "0.5"
|
||||
toml.workspace = true
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::any::Any;
|
||||
use std::io::{Read, Write};
|
||||
|
||||
use bytes::{Buf, BufMut, BytesMut};
|
||||
use common_error::prelude::ErrorExt;
|
||||
use common_error::ext::ErrorExt;
|
||||
use paste::paste;
|
||||
use snafu::{ensure, Location, ResultExt, Snafu};
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ impl Plugins {
|
||||
}
|
||||
|
||||
pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
|
||||
self.lock().insert(value);
|
||||
let _ = self.lock().insert(value);
|
||||
}
|
||||
|
||||
pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
|
||||
|
||||
@@ -119,7 +119,7 @@ mod tests {
|
||||
#[test]
|
||||
fn [<test_read_write_ $num_ty _from_vec_buffer>]() {
|
||||
let mut buf = vec![];
|
||||
assert!(buf.[<write_ $num_ty _le>]($num_ty::MAX).is_ok());
|
||||
let _ = buf.[<write_ $num_ty _le>]($num_ty::MAX).unwrap();
|
||||
assert_eq!($num_ty::MAX, buf.as_slice().[<read_ $num_ty _le>]().unwrap());
|
||||
}
|
||||
}
|
||||
@@ -132,7 +132,7 @@ mod tests {
|
||||
#[test]
|
||||
pub fn test_peek_write_from_vec_buffer() {
|
||||
let mut buf: Vec<u8> = vec![];
|
||||
assert!(buf.write_from_slice("hello".as_bytes()).is_ok());
|
||||
buf.write_from_slice("hello".as_bytes()).unwrap();
|
||||
let mut slice = buf.as_slice();
|
||||
assert_eq!(104, slice.peek_u8_le().unwrap());
|
||||
slice.advance_by(1);
|
||||
|
||||
@@ -27,6 +27,8 @@ pub const MAX_SYS_TABLE_ID: u32 = MIN_USER_TABLE_ID - 1;
|
||||
pub const SYSTEM_CATALOG_TABLE_ID: u32 = 0;
|
||||
/// scripts table id
|
||||
pub const SCRIPTS_TABLE_ID: u32 = 1;
|
||||
/// numbers table id
|
||||
pub const NUMBERS_TABLE_ID: u32 = 2;
|
||||
|
||||
pub const MITO_ENGINE: &str = "mito";
|
||||
pub const IMMUTABLE_FILE_ENGINE: &str = "file";
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::prelude::{Snafu, StatusCode};
|
||||
use snafu::Location;
|
||||
use common_error::status_code::StatusCode;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
|
||||
@@ -24,6 +24,7 @@ datafusion.workspace = true
|
||||
derive_builder = "0.12"
|
||||
futures.workspace = true
|
||||
object-store = { path = "../../object-store" }
|
||||
orc-rust = "0.2"
|
||||
regex = "1.7"
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -12,24 +12,26 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::future::Future;
|
||||
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use async_trait::async_trait;
|
||||
use datafusion::parquet::format::FileMetaData;
|
||||
use object_store::Writer;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::io::{AsyncWrite, AsyncWriteExt};
|
||||
use tokio_util::compat::Compat;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::share_buffer::SharedBuffer;
|
||||
|
||||
pub struct BufferedWriter<T, U> {
|
||||
writer: T,
|
||||
/// None stands for [`BufferedWriter`] closed.
|
||||
pub struct LazyBufferedWriter<T, U, F> {
|
||||
path: String,
|
||||
writer_factory: F,
|
||||
writer: Option<T>,
|
||||
/// None stands for [`LazyBufferedWriter`] closed.
|
||||
encoder: Option<U>,
|
||||
buffer: SharedBuffer,
|
||||
rows_written: usize,
|
||||
bytes_written: u64,
|
||||
flushed: bool,
|
||||
threshold: usize,
|
||||
}
|
||||
|
||||
@@ -42,58 +44,79 @@ pub trait ArrowWriterCloser {
|
||||
async fn close(mut self) -> Result<FileMetaData>;
|
||||
}
|
||||
|
||||
pub type DefaultBufferedWriter<E> = BufferedWriter<Compat<Writer>, E>;
|
||||
|
||||
impl<T: AsyncWrite + Send + Unpin, U: DfRecordBatchEncoder + ArrowWriterCloser>
|
||||
BufferedWriter<T, U>
|
||||
impl<
|
||||
T: AsyncWrite + Send + Unpin,
|
||||
U: DfRecordBatchEncoder + ArrowWriterCloser,
|
||||
F: FnMut(String) -> Fut,
|
||||
Fut: Future<Output = Result<T>>,
|
||||
> LazyBufferedWriter<T, U, F>
|
||||
{
|
||||
/// Closes `LazyBufferedWriter` and optionally flushes all data to underlying storage
|
||||
/// if any row's been written.
|
||||
pub async fn close_with_arrow_writer(mut self) -> Result<(FileMetaData, u64)> {
|
||||
let encoder = self
|
||||
.encoder
|
||||
.take()
|
||||
.context(error::BufferedWriterClosedSnafu)?;
|
||||
let metadata = encoder.close().await?;
|
||||
let written = self.try_flush(true).await?;
|
||||
|
||||
// Use `rows_written` to keep a track of if any rows have been written.
|
||||
// If no row's been written, then we can simply close the underlying
|
||||
// writer without flush so that no file will be actually created.
|
||||
if self.rows_written != 0 {
|
||||
self.bytes_written += self.try_flush(true).await?;
|
||||
}
|
||||
// It's important to shut down! flushes all pending writes
|
||||
self.close().await?;
|
||||
Ok((metadata, written))
|
||||
self.close_inner_writer().await?;
|
||||
Ok((metadata, self.bytes_written))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncWrite + Send + Unpin, U: DfRecordBatchEncoder> BufferedWriter<T, U> {
|
||||
pub async fn close(&mut self) -> Result<()> {
|
||||
self.writer.shutdown().await.context(error::AsyncWriteSnafu)
|
||||
impl<
|
||||
T: AsyncWrite + Send + Unpin,
|
||||
U: DfRecordBatchEncoder,
|
||||
F: FnMut(String) -> Fut,
|
||||
Fut: Future<Output = Result<T>>,
|
||||
> LazyBufferedWriter<T, U, F>
|
||||
{
|
||||
/// Closes the writer without flushing the buffer data.
|
||||
pub async fn close_inner_writer(&mut self) -> Result<()> {
|
||||
if let Some(writer) = &mut self.writer {
|
||||
writer.shutdown().await.context(error::AsyncWriteSnafu)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn new(threshold: usize, buffer: SharedBuffer, encoder: U, writer: T) -> Self {
|
||||
pub fn new(
|
||||
threshold: usize,
|
||||
buffer: SharedBuffer,
|
||||
encoder: U,
|
||||
path: impl AsRef<str>,
|
||||
writer_factory: F,
|
||||
) -> Self {
|
||||
Self {
|
||||
path: path.as_ref().to_string(),
|
||||
threshold,
|
||||
writer,
|
||||
encoder: Some(encoder),
|
||||
buffer,
|
||||
rows_written: 0,
|
||||
bytes_written: 0,
|
||||
flushed: false,
|
||||
writer_factory,
|
||||
writer: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bytes_written(&self) -> u64 {
|
||||
self.bytes_written
|
||||
}
|
||||
|
||||
pub async fn write(&mut self, batch: &RecordBatch) -> Result<()> {
|
||||
let encoder = self
|
||||
.encoder
|
||||
.as_mut()
|
||||
.context(error::BufferedWriterClosedSnafu)?;
|
||||
encoder.write(batch)?;
|
||||
self.try_flush(false).await?;
|
||||
self.rows_written += batch.num_rows();
|
||||
self.bytes_written += self.try_flush(false).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn flushed(&self) -> bool {
|
||||
self.flushed
|
||||
}
|
||||
|
||||
pub async fn try_flush(&mut self, all: bool) -> Result<u64> {
|
||||
let mut bytes_written: u64 = 0;
|
||||
|
||||
@@ -106,7 +129,8 @@ impl<T: AsyncWrite + Send + Unpin, U: DfRecordBatchEncoder> BufferedWriter<T, U>
|
||||
};
|
||||
let size = chunk.len();
|
||||
|
||||
self.writer
|
||||
self.maybe_init_writer()
|
||||
.await?
|
||||
.write_all(&chunk)
|
||||
.await
|
||||
.context(error::AsyncWriteSnafu)?;
|
||||
@@ -117,22 +141,27 @@ impl<T: AsyncWrite + Send + Unpin, U: DfRecordBatchEncoder> BufferedWriter<T, U>
|
||||
if all {
|
||||
bytes_written += self.try_flush_all().await?;
|
||||
}
|
||||
|
||||
self.flushed = bytes_written > 0;
|
||||
self.bytes_written += bytes_written;
|
||||
|
||||
Ok(bytes_written)
|
||||
}
|
||||
|
||||
/// Only initiates underlying file writer when rows have been written.
|
||||
async fn maybe_init_writer(&mut self) -> Result<&mut T> {
|
||||
if let Some(ref mut writer) = self.writer {
|
||||
Ok(writer)
|
||||
} else {
|
||||
let writer = (self.writer_factory)(self.path.clone()).await?;
|
||||
Ok(self.writer.insert(writer))
|
||||
}
|
||||
}
|
||||
|
||||
async fn try_flush_all(&mut self) -> Result<u64> {
|
||||
let remain = self.buffer.buffer.lock().unwrap().split();
|
||||
let size = remain.len();
|
||||
|
||||
self.writer
|
||||
self.maybe_init_writer()
|
||||
.await?
|
||||
.write_all(&remain)
|
||||
.await
|
||||
.context(error::AsyncWriteSnafu)?;
|
||||
|
||||
Ok(size as u64)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,9 +15,10 @@
|
||||
use std::any::Any;
|
||||
|
||||
use arrow_schema::ArrowError;
|
||||
use common_error::prelude::*;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use datafusion::parquet::errors::ParquetError;
|
||||
use snafu::Location;
|
||||
use snafu::{Location, Snafu};
|
||||
use url::ParseError;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
@@ -54,6 +55,12 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build orc reader, source: {}", source))]
|
||||
OrcReader {
|
||||
location: Location,
|
||||
source: orc_rust::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read object from path: {}, source: {}", path, source))]
|
||||
ReadObject {
|
||||
path: String,
|
||||
@@ -171,7 +178,8 @@ impl ErrorExt for Error {
|
||||
| ReadRecordBatch { .. }
|
||||
| WriteRecordBatch { .. }
|
||||
| EncodeRecordBatch { .. }
|
||||
| BufferedWriterClosed { .. } => StatusCode::Unexpected,
|
||||
| BufferedWriterClosed { .. }
|
||||
| OrcReader { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -182,6 +190,7 @@ impl ErrorExt for Error {
|
||||
fn location_opt(&self) -> Option<common_error::snafu::Location> {
|
||||
use Error::*;
|
||||
match self {
|
||||
OrcReader { location, .. } => Some(*location),
|
||||
BuildBackend { location, .. } => Some(*location),
|
||||
ReadObject { location, .. } => Some(*location),
|
||||
ListObjects { location, .. } => Some(*location),
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
pub mod csv;
|
||||
pub mod json;
|
||||
pub mod orc;
|
||||
pub mod parquet;
|
||||
#[cfg(test)]
|
||||
pub mod tests;
|
||||
@@ -35,12 +36,12 @@ use datafusion::physical_plan::SendableRecordBatchStream;
|
||||
use futures::StreamExt;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
use tokio_util::compat::FuturesAsyncWriteCompatExt;
|
||||
|
||||
use self::csv::CsvFormat;
|
||||
use self::json::JsonFormat;
|
||||
use self::orc::OrcFormat;
|
||||
use self::parquet::ParquetFormat;
|
||||
use crate::buffered_writer::{BufferedWriter, DfRecordBatchEncoder};
|
||||
use crate::buffered_writer::{DfRecordBatchEncoder, LazyBufferedWriter};
|
||||
use crate::compression::CompressionType;
|
||||
use crate::error::{self, Result};
|
||||
use crate::share_buffer::SharedBuffer;
|
||||
@@ -57,6 +58,18 @@ pub enum Format {
|
||||
Csv(CsvFormat),
|
||||
Json(JsonFormat),
|
||||
Parquet(ParquetFormat),
|
||||
Orc(OrcFormat),
|
||||
}
|
||||
|
||||
impl Format {
|
||||
pub fn suffix(&self) -> &'static str {
|
||||
match self {
|
||||
Format::Csv(_) => ".csv",
|
||||
Format::Json(_) => ".json",
|
||||
Format::Parquet(_) => ".parquet",
|
||||
&Format::Orc(_) => ".orc",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&HashMap<String, String>> for Format {
|
||||
@@ -72,6 +85,7 @@ impl TryFrom<&HashMap<String, String>> for Format {
|
||||
"CSV" => Ok(Self::Csv(CsvFormat::try_from(options)?)),
|
||||
"JSON" => Ok(Self::Json(JsonFormat::try_from(options)?)),
|
||||
"PARQUET" => Ok(Self::Parquet(ParquetFormat::default())),
|
||||
"ORC" => Ok(Self::Orc(OrcFormat)),
|
||||
_ => error::UnsupportedFormatSnafu { format: &format }.fail(),
|
||||
}
|
||||
}
|
||||
@@ -181,15 +195,14 @@ pub async fn stream_to_file<T: DfRecordBatchEncoder, U: Fn(SharedBuffer) -> T>(
|
||||
threshold: usize,
|
||||
encoder_factory: U,
|
||||
) -> Result<usize> {
|
||||
let writer = store
|
||||
.writer(path)
|
||||
.await
|
||||
.context(error::WriteObjectSnafu { path })?
|
||||
.compat_write();
|
||||
|
||||
let buffer = SharedBuffer::with_capacity(threshold);
|
||||
let encoder = encoder_factory(buffer.clone());
|
||||
let mut writer = BufferedWriter::new(threshold, buffer, encoder, writer);
|
||||
let mut writer = LazyBufferedWriter::new(threshold, buffer, encoder, path, |path| async {
|
||||
store
|
||||
.writer(&path)
|
||||
.await
|
||||
.context(error::WriteObjectSnafu { path })
|
||||
});
|
||||
|
||||
let mut rows = 0;
|
||||
|
||||
@@ -200,9 +213,8 @@ pub async fn stream_to_file<T: DfRecordBatchEncoder, U: Fn(SharedBuffer) -> T>(
|
||||
}
|
||||
|
||||
// Flushes all pending writes
|
||||
writer.try_flush(true).await?;
|
||||
|
||||
writer.close().await?;
|
||||
let _ = writer.try_flush(true).await?;
|
||||
writer.close_inner_writer().await?;
|
||||
|
||||
Ok(rows)
|
||||
}
|
||||
|
||||
@@ -291,20 +291,20 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_try_from() {
|
||||
let mut map = HashMap::new();
|
||||
let map = HashMap::new();
|
||||
let format: CsvFormat = CsvFormat::try_from(&map).unwrap();
|
||||
|
||||
assert_eq!(format, CsvFormat::default());
|
||||
|
||||
map.insert(
|
||||
FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(),
|
||||
"2000".to_string(),
|
||||
);
|
||||
|
||||
map.insert(FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string());
|
||||
map.insert(FORMAT_DELIMITER.to_string(), b'\t'.to_string());
|
||||
map.insert(FORMAT_HAS_HEADER.to_string(), "false".to_string());
|
||||
|
||||
let map = HashMap::from([
|
||||
(
|
||||
FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(),
|
||||
"2000".to_string(),
|
||||
),
|
||||
(FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string()),
|
||||
(FORMAT_DELIMITER.to_string(), b'\t'.to_string()),
|
||||
(FORMAT_HAS_HEADER.to_string(), "false".to_string()),
|
||||
]);
|
||||
let format = CsvFormat::try_from(&map).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
|
||||
@@ -214,18 +214,18 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_try_from() {
|
||||
let mut map = HashMap::new();
|
||||
let map = HashMap::new();
|
||||
let format = JsonFormat::try_from(&map).unwrap();
|
||||
|
||||
assert_eq!(format, JsonFormat::default());
|
||||
|
||||
map.insert(
|
||||
FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(),
|
||||
"2000".to_string(),
|
||||
);
|
||||
|
||||
map.insert(FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string());
|
||||
|
||||
let map = HashMap::from([
|
||||
(
|
||||
FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(),
|
||||
"2000".to_string(),
|
||||
),
|
||||
(FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string()),
|
||||
]);
|
||||
let format = JsonFormat::try_from(&map).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
|
||||
125
src/common/datasource/src/file_format/orc.rs
Normal file
125
src/common/datasource/src/file_format/orc.rs
Normal file
@@ -0,0 +1,125 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use arrow::compute::cast;
|
||||
use arrow_schema::{Schema, SchemaRef};
|
||||
use async_trait::async_trait;
|
||||
use datafusion::arrow::record_batch::RecordBatch as DfRecordBatch;
|
||||
use datafusion::error::{DataFusionError, Result as DfResult};
|
||||
use datafusion::physical_plan::RecordBatchStream;
|
||||
use futures::Stream;
|
||||
use object_store::ObjectStore;
|
||||
use orc_rust::arrow_reader::{create_arrow_schema, Cursor};
|
||||
use orc_rust::async_arrow_reader::ArrowStreamReader;
|
||||
pub use orc_rust::error::Error as OrcError;
|
||||
use orc_rust::reader::Reader;
|
||||
use snafu::ResultExt;
|
||||
use tokio::io::{AsyncRead, AsyncSeek};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::file_format::FileFormat;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub struct OrcFormat;
|
||||
|
||||
pub async fn new_orc_cursor<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
|
||||
reader: R,
|
||||
) -> Result<Cursor<R>> {
|
||||
let reader = Reader::new_async(reader)
|
||||
.await
|
||||
.context(error::OrcReaderSnafu)?;
|
||||
let cursor = Cursor::root(reader).context(error::OrcReaderSnafu)?;
|
||||
Ok(cursor)
|
||||
}
|
||||
|
||||
pub async fn new_orc_stream_reader<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
|
||||
reader: R,
|
||||
) -> Result<ArrowStreamReader<R>> {
|
||||
let cursor = new_orc_cursor(reader).await?;
|
||||
Ok(ArrowStreamReader::new(cursor, None))
|
||||
}
|
||||
|
||||
pub async fn infer_orc_schema<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
|
||||
reader: R,
|
||||
) -> Result<Schema> {
|
||||
let cursor = new_orc_cursor(reader).await?;
|
||||
Ok(create_arrow_schema(&cursor))
|
||||
}
|
||||
|
||||
pub struct OrcArrowStreamReaderAdapter<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> {
|
||||
output_schema: SchemaRef,
|
||||
stream: ArrowStreamReader<T>,
|
||||
}
|
||||
|
||||
impl<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> OrcArrowStreamReaderAdapter<T> {
|
||||
pub fn new(output_schema: SchemaRef, stream: ArrowStreamReader<T>) -> Self {
|
||||
Self {
|
||||
stream,
|
||||
output_schema,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> RecordBatchStream
|
||||
for OrcArrowStreamReaderAdapter<T>
|
||||
{
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.output_schema.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> Stream for OrcArrowStreamReaderAdapter<T> {
|
||||
type Item = DfResult<DfRecordBatch>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
let batch = futures::ready!(Pin::new(&mut self.stream).poll_next(cx))
|
||||
.map(|r| r.map_err(|e| DataFusionError::External(Box::new(e))));
|
||||
|
||||
let batch = batch.map(|b| {
|
||||
b.and_then(|b| {
|
||||
let mut columns = Vec::with_capacity(b.num_columns());
|
||||
for (idx, column) in b.columns().iter().enumerate() {
|
||||
if column.data_type() != self.output_schema.field(idx).data_type() {
|
||||
let output = cast(&column, self.output_schema.field(idx).data_type())?;
|
||||
columns.push(output)
|
||||
} else {
|
||||
columns.push(column.clone())
|
||||
}
|
||||
}
|
||||
let record_batch = DfRecordBatch::try_new(self.output_schema.clone(), columns)?;
|
||||
|
||||
Ok(record_batch)
|
||||
})
|
||||
});
|
||||
|
||||
Poll::Ready(batch)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl FileFormat for OrcFormat {
|
||||
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
||||
let reader = store
|
||||
.reader(path)
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path })?;
|
||||
|
||||
let schema = infer_orc_schema(reader).await?;
|
||||
|
||||
Ok(schema)
|
||||
}
|
||||
}
|
||||
@@ -20,7 +20,7 @@ use crate::error::{BuildBackendSnafu, Result};
|
||||
|
||||
pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
|
||||
let mut builder = Fs::default();
|
||||
builder.root(root);
|
||||
let _ = builder.root(root);
|
||||
let object_store = ObjectStore::new(builder)
|
||||
.context(BuildBackendSnafu)?
|
||||
.finish();
|
||||
|
||||
@@ -20,7 +20,7 @@ use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
|
||||
const ENDPOINT_URL: &str = "endpoint_url";
|
||||
const ENDPOINT: &str = "endpoint";
|
||||
const ACCESS_KEY_ID: &str = "access_key_id";
|
||||
const SECRET_ACCESS_KEY: &str = "secret_access_key";
|
||||
const SESSION_TOKEN: &str = "session_token";
|
||||
@@ -34,28 +34,26 @@ pub fn build_s3_backend(
|
||||
) -> Result<ObjectStore> {
|
||||
let mut builder = S3::default();
|
||||
|
||||
builder.root(path);
|
||||
let _ = builder.root(path).bucket(host);
|
||||
|
||||
builder.bucket(host);
|
||||
|
||||
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
|
||||
builder.endpoint(endpoint);
|
||||
if let Some(endpoint) = connection.get(ENDPOINT) {
|
||||
let _ = builder.endpoint(endpoint);
|
||||
}
|
||||
|
||||
if let Some(region) = connection.get(REGION) {
|
||||
builder.region(region);
|
||||
let _ = builder.region(region);
|
||||
}
|
||||
|
||||
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
|
||||
builder.access_key_id(key_id);
|
||||
let _ = builder.access_key_id(key_id);
|
||||
}
|
||||
|
||||
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
|
||||
builder.secret_access_key(key);
|
||||
let _ = builder.secret_access_key(key);
|
||||
}
|
||||
|
||||
if let Some(session_token) = connection.get(SESSION_TOKEN) {
|
||||
builder.security_token(session_token);
|
||||
let _ = builder.security_token(session_token);
|
||||
}
|
||||
|
||||
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
|
||||
@@ -69,7 +67,7 @@ pub fn build_s3_backend(
|
||||
.build()
|
||||
})?;
|
||||
if enable {
|
||||
builder.enable_virtual_host_style();
|
||||
let _ = builder.enable_virtual_host_style();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ pub fn format_schema(schema: Schema) -> Vec<String> {
|
||||
|
||||
pub fn test_store(root: &str) -> ObjectStore {
|
||||
let mut builder = Fs::default();
|
||||
builder.root(root);
|
||||
let _ = builder.root(root);
|
||||
|
||||
ObjectStore::new(builder).unwrap().finish()
|
||||
}
|
||||
@@ -64,7 +64,7 @@ pub fn test_tmp_store(root: &str) -> (ObjectStore, TempDir) {
|
||||
let dir = create_temp_dir(root);
|
||||
|
||||
let mut builder = Fs::default();
|
||||
builder.root("/");
|
||||
let _ = builder.root("/");
|
||||
|
||||
(ObjectStore::new(builder).unwrap().finish(), dir)
|
||||
}
|
||||
@@ -113,14 +113,14 @@ pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usi
|
||||
|
||||
let output_path = format!("{}/{}", dir.path().display(), "output");
|
||||
|
||||
stream_to_json(
|
||||
assert!(stream_to_json(
|
||||
Box::pin(stream),
|
||||
tmp_store.clone(),
|
||||
&output_path,
|
||||
threshold(size),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
.is_ok());
|
||||
|
||||
let written = tmp_store.read(&output_path).await.unwrap();
|
||||
let origin = store.read(origin_path).await.unwrap();
|
||||
@@ -155,14 +155,14 @@ pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usiz
|
||||
|
||||
let output_path = format!("{}/{}", dir.path().display(), "output");
|
||||
|
||||
stream_to_csv(
|
||||
assert!(stream_to_csv(
|
||||
Box::pin(stream),
|
||||
tmp_store.clone(),
|
||||
&output_path,
|
||||
threshold(size),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
.is_ok());
|
||||
|
||||
let written = tmp_store.read(&output_path).await.unwrap();
|
||||
let origin = store.read(origin_path).await.unwrap();
|
||||
|
||||
11
src/common/datasource/tests/orc/README.md
Normal file
11
src/common/datasource/tests/orc/README.md
Normal file
@@ -0,0 +1,11 @@
|
||||
## Generate orc data
|
||||
|
||||
```bash
|
||||
python3 -m venv venv
|
||||
venv/bin/pip install -U pip
|
||||
venv/bin/pip install -U pyorc
|
||||
|
||||
./venv/bin/python write.py
|
||||
|
||||
cargo test
|
||||
```
|
||||
BIN
src/common/datasource/tests/orc/test.orc
Normal file
BIN
src/common/datasource/tests/orc/test.orc
Normal file
Binary file not shown.
103
src/common/datasource/tests/orc/write.py
Normal file
103
src/common/datasource/tests/orc/write.py
Normal file
@@ -0,0 +1,103 @@
|
||||
# Copyright 2023 Greptime Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import random
|
||||
import datetime
|
||||
import pyorc
|
||||
|
||||
data = {
|
||||
"double_a": [1.0, 2.0, 3.0, 4.0, 5.0],
|
||||
"a": [1.0, 2.0, None, 4.0, 5.0],
|
||||
"b": [True, False, None, True, False],
|
||||
"str_direct": ["a", "cccccc", None, "ddd", "ee"],
|
||||
"d": ["a", "bb", None, "ccc", "ddd"],
|
||||
"e": ["ddd", "cc", None, "bb", "a"],
|
||||
"f": ["aaaaa", "bbbbb", None, "ccccc", "ddddd"],
|
||||
"int_short_repeated": [5, 5, None, 5, 5],
|
||||
"int_neg_short_repeated": [-5, -5, None, -5, -5],
|
||||
"int_delta": [1, 2, None, 4, 5],
|
||||
"int_neg_delta": [5, 4, None, 2, 1],
|
||||
"int_direct": [1, 6, None, 3, 2],
|
||||
"int_neg_direct": [-1, -6, None, -3, -2],
|
||||
"bigint_direct": [1, 6, None, 3, 2],
|
||||
"bigint_neg_direct": [-1, -6, None, -3, -2],
|
||||
"bigint_other": [5, -5, 1, 5, 5],
|
||||
"utf8_increase": ["a", "bb", "ccc", "dddd", "eeeee"],
|
||||
"utf8_decrease": ["eeeee", "dddd", "ccc", "bb", "a"],
|
||||
"timestamp_simple": [datetime.datetime(2023, 4, 1, 20, 15, 30, 2000), datetime.datetime.fromtimestamp(int('1629617204525777000')/1000000000), datetime.datetime(2023, 1, 1), datetime.datetime(2023, 2, 1), datetime.datetime(2023, 3, 1)],
|
||||
"date_simple": [datetime.date(2023, 4, 1), datetime.date(2023, 3, 1), datetime.date(2023, 1, 1), datetime.date(2023, 2, 1), datetime.date(2023, 3, 1)]
|
||||
}
|
||||
|
||||
def infer_schema(data):
|
||||
schema = "struct<"
|
||||
for key, value in data.items():
|
||||
dt = type(value[0])
|
||||
if dt == float:
|
||||
dt = "float"
|
||||
elif dt == int:
|
||||
dt = "int"
|
||||
elif dt == bool:
|
||||
dt = "boolean"
|
||||
elif dt == str:
|
||||
dt = "string"
|
||||
elif key.startswith("timestamp"):
|
||||
dt = "timestamp"
|
||||
elif key.startswith("date"):
|
||||
dt = "date"
|
||||
else:
|
||||
print(key,value,dt)
|
||||
raise NotImplementedError
|
||||
if key.startswith("double"):
|
||||
dt = "double"
|
||||
if key.startswith("bigint"):
|
||||
dt = "bigint"
|
||||
schema += key + ":" + dt + ","
|
||||
|
||||
schema = schema[:-1] + ">"
|
||||
return schema
|
||||
|
||||
|
||||
|
||||
def _write(
|
||||
schema: str,
|
||||
data,
|
||||
file_name: str,
|
||||
compression=pyorc.CompressionKind.NONE,
|
||||
dict_key_size_threshold=0.0,
|
||||
):
|
||||
output = open(file_name, "wb")
|
||||
writer = pyorc.Writer(
|
||||
output,
|
||||
schema,
|
||||
dict_key_size_threshold=dict_key_size_threshold,
|
||||
# use a small number to ensure that compression crosses value boundaries
|
||||
compression_block_size=32,
|
||||
compression=compression,
|
||||
)
|
||||
num_rows = len(list(data.values())[0])
|
||||
for x in range(num_rows):
|
||||
row = tuple(values[x] for values in data.values())
|
||||
writer.write(row)
|
||||
writer.close()
|
||||
|
||||
with open(file_name, "rb") as f:
|
||||
reader = pyorc.Reader(f)
|
||||
list(reader)
|
||||
|
||||
|
||||
_write(
|
||||
infer_schema(data),
|
||||
data,
|
||||
"test.orc",
|
||||
)
|
||||
@@ -17,16 +17,7 @@ pub mod format;
|
||||
pub mod mock;
|
||||
pub mod status_code;
|
||||
|
||||
pub mod prelude {
|
||||
pub use snafu::prelude::*;
|
||||
pub use snafu::{Backtrace, ErrorCompat};
|
||||
|
||||
pub use crate::ext::{BoxedError, ErrorExt};
|
||||
pub use crate::format::DebugFormat;
|
||||
pub use crate::status_code::StatusCode;
|
||||
|
||||
pub const INNER_ERROR_CODE: &str = "INNER_ERROR_CODE";
|
||||
pub const INNER_ERROR_MSG: &str = "INNER_ERROR_MSG";
|
||||
}
|
||||
pub const INNER_ERROR_CODE: &str = "INNER_ERROR_CODE";
|
||||
pub const INNER_ERROR_MSG: &str = "INNER_ERROR_MSG";
|
||||
|
||||
pub use snafu;
|
||||
|
||||
@@ -19,7 +19,8 @@ use std::fmt;
|
||||
|
||||
use snafu::Location;
|
||||
|
||||
use crate::prelude::*;
|
||||
use crate::ext::ErrorExt;
|
||||
use crate::status_code::StatusCode;
|
||||
|
||||
/// A mock error mainly for test.
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -66,6 +66,9 @@ pub enum StatusCode {
|
||||
// ====== Begin of server related status code =====
|
||||
/// Runtime resources exhausted, like creating threads failed.
|
||||
RuntimeResourcesExhausted = 6000,
|
||||
|
||||
/// Rate limit exceeded
|
||||
RateLimited = 6001,
|
||||
// ====== End of server related status code =======
|
||||
|
||||
// ====== Begin of auth related status code =====
|
||||
@@ -111,6 +114,7 @@ impl StatusCode {
|
||||
| StatusCode::TableColumnNotFound
|
||||
| StatusCode::TableColumnExists
|
||||
| StatusCode::DatabaseNotFound
|
||||
| StatusCode::RateLimited
|
||||
| StatusCode::UserNotFound
|
||||
| StatusCode::UnsupportedPasswordType
|
||||
| StatusCode::UserPasswordMismatch
|
||||
@@ -141,6 +145,7 @@ impl StatusCode {
|
||||
| StatusCode::TableColumnNotFound
|
||||
| StatusCode::TableColumnExists
|
||||
| StatusCode::DatabaseNotFound
|
||||
| StatusCode::RateLimited
|
||||
| StatusCode::UserNotFound
|
||||
| StatusCode::UnsupportedPasswordType
|
||||
| StatusCode::UserPasswordMismatch
|
||||
|
||||
@@ -22,7 +22,7 @@ struct Foo {}
|
||||
#[test]
|
||||
#[allow(clippy::extra_unused_type_parameters)]
|
||||
fn test_derive() {
|
||||
Foo::default();
|
||||
let _ = Foo::default();
|
||||
assert_fields!(Foo: input_types);
|
||||
assert_impl_all!(Foo: std::fmt::Debug, Default, AggrFuncTypeStore);
|
||||
}
|
||||
|
||||
@@ -158,19 +158,19 @@ mod test {
|
||||
fn test_update_batch() {
|
||||
// test update empty batch, expect not updating anything
|
||||
let mut argmax = Argmax::<i32>::default();
|
||||
assert!(argmax.update_batch(&[]).is_ok());
|
||||
argmax.update_batch(&[]).unwrap();
|
||||
assert_eq!(Value::Null, argmax.evaluate().unwrap());
|
||||
|
||||
// test update one not-null value
|
||||
let mut argmax = Argmax::<i32>::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Some(42)]))];
|
||||
assert!(argmax.update_batch(&v).is_ok());
|
||||
argmax.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(0_u64), argmax.evaluate().unwrap());
|
||||
|
||||
// test update one null value
|
||||
let mut argmax = Argmax::<i32>::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Option::<i32>::None]))];
|
||||
assert!(argmax.update_batch(&v).is_ok());
|
||||
argmax.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::Null, argmax.evaluate().unwrap());
|
||||
|
||||
// test update no null-value batch
|
||||
@@ -180,7 +180,7 @@ mod test {
|
||||
Some(1),
|
||||
Some(3),
|
||||
]))];
|
||||
assert!(argmax.update_batch(&v).is_ok());
|
||||
argmax.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(2_u64), argmax.evaluate().unwrap());
|
||||
|
||||
// test update null-value batch
|
||||
@@ -190,7 +190,7 @@ mod test {
|
||||
None,
|
||||
Some(4),
|
||||
]))];
|
||||
assert!(argmax.update_batch(&v).is_ok());
|
||||
argmax.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(2_u64), argmax.evaluate().unwrap());
|
||||
|
||||
// test update with constant vector
|
||||
@@ -199,7 +199,7 @@ mod test {
|
||||
Arc::new(Int32Vector::from_vec(vec![4])),
|
||||
10,
|
||||
))];
|
||||
assert!(argmax.update_batch(&v).is_ok());
|
||||
argmax.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(0_u64), argmax.evaluate().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -166,19 +166,19 @@ mod test {
|
||||
fn test_update_batch() {
|
||||
// test update empty batch, expect not updating anything
|
||||
let mut argmin = Argmin::<i32>::default();
|
||||
assert!(argmin.update_batch(&[]).is_ok());
|
||||
argmin.update_batch(&[]).unwrap();
|
||||
assert_eq!(Value::Null, argmin.evaluate().unwrap());
|
||||
|
||||
// test update one not-null value
|
||||
let mut argmin = Argmin::<i32>::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Some(42)]))];
|
||||
assert!(argmin.update_batch(&v).is_ok());
|
||||
argmin.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(0_u32), argmin.evaluate().unwrap());
|
||||
|
||||
// test update one null value
|
||||
let mut argmin = Argmin::<i32>::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Option::<i32>::None]))];
|
||||
assert!(argmin.update_batch(&v).is_ok());
|
||||
argmin.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::Null, argmin.evaluate().unwrap());
|
||||
|
||||
// test update no null-value batch
|
||||
@@ -188,7 +188,7 @@ mod test {
|
||||
Some(1),
|
||||
Some(3),
|
||||
]))];
|
||||
assert!(argmin.update_batch(&v).is_ok());
|
||||
argmin.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(0_u32), argmin.evaluate().unwrap());
|
||||
|
||||
// test update null-value batch
|
||||
@@ -198,7 +198,7 @@ mod test {
|
||||
None,
|
||||
Some(4),
|
||||
]))];
|
||||
assert!(argmin.update_batch(&v).is_ok());
|
||||
argmin.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(0_u32), argmin.evaluate().unwrap());
|
||||
|
||||
// test update with constant vector
|
||||
@@ -207,7 +207,7 @@ mod test {
|
||||
Arc::new(Int32Vector::from_vec(vec![4])),
|
||||
10,
|
||||
))];
|
||||
assert!(argmin.update_batch(&v).is_ok());
|
||||
argmin.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(0_u32), argmin.evaluate().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -192,20 +192,20 @@ mod test {
|
||||
fn test_update_batch() {
|
||||
// test update empty batch, expect not updating anything
|
||||
let mut diff = Diff::<i32, i64>::default();
|
||||
assert!(diff.update_batch(&[]).is_ok());
|
||||
diff.update_batch(&[]).unwrap();
|
||||
assert!(diff.values.is_empty());
|
||||
assert_eq!(Value::Null, diff.evaluate().unwrap());
|
||||
|
||||
// test update one not-null value
|
||||
let mut diff = Diff::<i32, i64>::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Some(42)]))];
|
||||
assert!(diff.update_batch(&v).is_ok());
|
||||
diff.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::Null, diff.evaluate().unwrap());
|
||||
|
||||
// test update one null value
|
||||
let mut diff = Diff::<i32, i64>::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Option::<i32>::None]))];
|
||||
assert!(diff.update_batch(&v).is_ok());
|
||||
diff.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::Null, diff.evaluate().unwrap());
|
||||
|
||||
// test update no null-value batch
|
||||
@@ -216,7 +216,7 @@ mod test {
|
||||
Some(2),
|
||||
]))];
|
||||
let values = vec![Value::from(2_i64), Value::from(1_i64)];
|
||||
assert!(diff.update_batch(&v).is_ok());
|
||||
diff.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::List(ListValue::new(
|
||||
Some(Box::new(values)),
|
||||
@@ -234,7 +234,7 @@ mod test {
|
||||
Some(4),
|
||||
]))];
|
||||
let values = vec![Value::from(5_i64), Value::from(1_i64)];
|
||||
assert!(diff.update_batch(&v).is_ok());
|
||||
diff.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::List(ListValue::new(
|
||||
Some(Box::new(values)),
|
||||
@@ -250,7 +250,7 @@ mod test {
|
||||
4,
|
||||
))];
|
||||
let values = vec![Value::from(0_i64), Value::from(0_i64), Value::from(0_i64)];
|
||||
assert!(diff.update_batch(&v).is_ok());
|
||||
diff.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::List(ListValue::new(
|
||||
Some(Box::new(values)),
|
||||
|
||||
@@ -188,19 +188,19 @@ mod test {
|
||||
fn test_update_batch() {
|
||||
// test update empty batch, expect not updating anything
|
||||
let mut mean = Mean::<i32>::default();
|
||||
assert!(mean.update_batch(&[]).is_ok());
|
||||
mean.update_batch(&[]).unwrap();
|
||||
assert_eq!(Value::Null, mean.evaluate().unwrap());
|
||||
|
||||
// test update one not-null value
|
||||
let mut mean = Mean::<i32>::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Some(42)]))];
|
||||
assert!(mean.update_batch(&v).is_ok());
|
||||
mean.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(42.0_f64), mean.evaluate().unwrap());
|
||||
|
||||
// test update one null value
|
||||
let mut mean = Mean::<i32>::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Option::<i32>::None]))];
|
||||
assert!(mean.update_batch(&v).is_ok());
|
||||
mean.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::Null, mean.evaluate().unwrap());
|
||||
|
||||
// test update no null-value batch
|
||||
@@ -210,7 +210,7 @@ mod test {
|
||||
Some(1),
|
||||
Some(2),
|
||||
]))];
|
||||
assert!(mean.update_batch(&v).is_ok());
|
||||
mean.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(0.6666666666666666), mean.evaluate().unwrap());
|
||||
|
||||
// test update null-value batch
|
||||
@@ -221,7 +221,7 @@ mod test {
|
||||
Some(3),
|
||||
Some(4),
|
||||
]))];
|
||||
assert!(mean.update_batch(&v).is_ok());
|
||||
mean.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(1.6666666666666667), mean.evaluate().unwrap());
|
||||
|
||||
// test update with constant vector
|
||||
@@ -230,7 +230,7 @@ mod test {
|
||||
Arc::new(Int32Vector::from_vec(vec![4])),
|
||||
10,
|
||||
))];
|
||||
assert!(mean.update_batch(&v).is_ok());
|
||||
mean.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(4.0), mean.evaluate().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -299,7 +299,7 @@ mod test {
|
||||
fn test_update_batch() {
|
||||
// test update empty batch, expect not updating anything
|
||||
let mut percentile = Percentile::<i32>::default();
|
||||
assert!(percentile.update_batch(&[]).is_ok());
|
||||
percentile.update_batch(&[]).unwrap();
|
||||
assert!(percentile.not_greater.is_empty());
|
||||
assert!(percentile.greater.is_empty());
|
||||
assert_eq!(Value::Null, percentile.evaluate().unwrap());
|
||||
@@ -310,7 +310,7 @@ mod test {
|
||||
Arc::new(Int32Vector::from(vec![Some(42)])),
|
||||
Arc::new(Float64Vector::from(vec![Some(100.0_f64)])),
|
||||
];
|
||||
assert!(percentile.update_batch(&v).is_ok());
|
||||
percentile.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(42.0_f64), percentile.evaluate().unwrap());
|
||||
|
||||
// test update one null value
|
||||
@@ -319,7 +319,7 @@ mod test {
|
||||
Arc::new(Int32Vector::from(vec![Option::<i32>::None])),
|
||||
Arc::new(Float64Vector::from(vec![Some(100.0_f64)])),
|
||||
];
|
||||
assert!(percentile.update_batch(&v).is_ok());
|
||||
percentile.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::Null, percentile.evaluate().unwrap());
|
||||
|
||||
// test update no null-value batch
|
||||
@@ -332,7 +332,7 @@ mod test {
|
||||
Some(100.0_f64),
|
||||
])),
|
||||
];
|
||||
assert!(percentile.update_batch(&v).is_ok());
|
||||
percentile.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(2_f64), percentile.evaluate().unwrap());
|
||||
|
||||
// test update null-value batch
|
||||
@@ -346,7 +346,7 @@ mod test {
|
||||
Some(100.0_f64),
|
||||
])),
|
||||
];
|
||||
assert!(percentile.update_batch(&v).is_ok());
|
||||
percentile.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(4_f64), percentile.evaluate().unwrap());
|
||||
|
||||
// test update with constant vector
|
||||
@@ -358,7 +358,7 @@ mod test {
|
||||
)),
|
||||
Arc::new(Float64Vector::from(vec![Some(100.0_f64), Some(100.0_f64)])),
|
||||
];
|
||||
assert!(percentile.update_batch(&v).is_ok());
|
||||
percentile.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(4_f64), percentile.evaluate().unwrap());
|
||||
|
||||
// test left border
|
||||
@@ -371,7 +371,7 @@ mod test {
|
||||
Some(0.0_f64),
|
||||
])),
|
||||
];
|
||||
assert!(percentile.update_batch(&v).is_ok());
|
||||
percentile.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(-1.0_f64), percentile.evaluate().unwrap());
|
||||
|
||||
// test medium
|
||||
@@ -384,7 +384,7 @@ mod test {
|
||||
Some(50.0_f64),
|
||||
])),
|
||||
];
|
||||
assert!(percentile.update_batch(&v).is_ok());
|
||||
percentile.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(1.0_f64), percentile.evaluate().unwrap());
|
||||
|
||||
// test right border
|
||||
@@ -397,7 +397,7 @@ mod test {
|
||||
Some(100.0_f64),
|
||||
])),
|
||||
];
|
||||
assert!(percentile.update_batch(&v).is_ok());
|
||||
percentile.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(2.0_f64), percentile.evaluate().unwrap());
|
||||
|
||||
// the following is the result of numpy.percentile
|
||||
@@ -414,7 +414,7 @@ mod test {
|
||||
Some(40.0_f64),
|
||||
])),
|
||||
];
|
||||
assert!(percentile.update_batch(&v).is_ok());
|
||||
percentile.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::from(6.400000000_f64), percentile.evaluate().unwrap());
|
||||
|
||||
// the following is the result of numpy.percentile
|
||||
@@ -430,7 +430,7 @@ mod test {
|
||||
Some(95.0_f64),
|
||||
])),
|
||||
];
|
||||
assert!(percentile.update_batch(&v).is_ok());
|
||||
percentile.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::from(9.700_000_000_000_001_f64),
|
||||
percentile.evaluate().unwrap()
|
||||
|
||||
@@ -267,7 +267,7 @@ mod test {
|
||||
fn test_update_batch() {
|
||||
// test update empty batch, expect not updating anything
|
||||
let mut polyval = Polyval::<i32, i64>::default();
|
||||
assert!(polyval.update_batch(&[]).is_ok());
|
||||
polyval.update_batch(&[]).unwrap();
|
||||
assert!(polyval.values.is_empty());
|
||||
assert_eq!(Value::Null, polyval.evaluate().unwrap());
|
||||
|
||||
@@ -277,7 +277,7 @@ mod test {
|
||||
Arc::new(Int32Vector::from(vec![Some(3)])),
|
||||
Arc::new(Int64Vector::from(vec![Some(2_i64)])),
|
||||
];
|
||||
assert!(polyval.update_batch(&v).is_ok());
|
||||
polyval.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::Int64(3), polyval.evaluate().unwrap());
|
||||
|
||||
// test update one null value
|
||||
@@ -286,7 +286,7 @@ mod test {
|
||||
Arc::new(Int32Vector::from(vec![Option::<i32>::None])),
|
||||
Arc::new(Int64Vector::from(vec![Some(2_i64)])),
|
||||
];
|
||||
assert!(polyval.update_batch(&v).is_ok());
|
||||
polyval.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::Null, polyval.evaluate().unwrap());
|
||||
|
||||
// test update no null-value batch
|
||||
@@ -299,7 +299,7 @@ mod test {
|
||||
Some(2_i64),
|
||||
])),
|
||||
];
|
||||
assert!(polyval.update_batch(&v).is_ok());
|
||||
polyval.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::Int64(13), polyval.evaluate().unwrap());
|
||||
|
||||
// test update null-value batch
|
||||
@@ -313,7 +313,7 @@ mod test {
|
||||
Some(2_i64),
|
||||
])),
|
||||
];
|
||||
assert!(polyval.update_batch(&v).is_ok());
|
||||
polyval.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::Int64(13), polyval.evaluate().unwrap());
|
||||
|
||||
// test update with constant vector
|
||||
@@ -325,7 +325,7 @@ mod test {
|
||||
)),
|
||||
Arc::new(Int64Vector::from(vec![Some(5_i64), Some(5_i64)])),
|
||||
];
|
||||
assert!(polyval.update_batch(&v).is_ok());
|
||||
polyval.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::Int64(24), polyval.evaluate().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -231,7 +231,7 @@ mod test {
|
||||
fn test_update_batch() {
|
||||
// test update empty batch, expect not updating anything
|
||||
let mut scipy_stats_norm_cdf = ScipyStatsNormCdf::<i32>::default();
|
||||
assert!(scipy_stats_norm_cdf.update_batch(&[]).is_ok());
|
||||
scipy_stats_norm_cdf.update_batch(&[]).unwrap();
|
||||
assert!(scipy_stats_norm_cdf.values.is_empty());
|
||||
assert_eq!(Value::Null, scipy_stats_norm_cdf.evaluate().unwrap());
|
||||
|
||||
@@ -245,7 +245,7 @@ mod test {
|
||||
Some(2.0_f64),
|
||||
])),
|
||||
];
|
||||
assert!(scipy_stats_norm_cdf.update_batch(&v).is_ok());
|
||||
scipy_stats_norm_cdf.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::from(0.8086334555398362),
|
||||
scipy_stats_norm_cdf.evaluate().unwrap()
|
||||
@@ -262,7 +262,7 @@ mod test {
|
||||
Some(2.0_f64),
|
||||
])),
|
||||
];
|
||||
assert!(scipy_stats_norm_cdf.update_batch(&v).is_ok());
|
||||
scipy_stats_norm_cdf.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::from(0.5412943699039795),
|
||||
scipy_stats_norm_cdf.evaluate().unwrap()
|
||||
|
||||
@@ -232,7 +232,7 @@ mod test {
|
||||
fn test_update_batch() {
|
||||
// test update empty batch, expect not updating anything
|
||||
let mut scipy_stats_norm_pdf = ScipyStatsNormPdf::<i32>::default();
|
||||
assert!(scipy_stats_norm_pdf.update_batch(&[]).is_ok());
|
||||
scipy_stats_norm_pdf.update_batch(&[]).unwrap();
|
||||
assert!(scipy_stats_norm_pdf.values.is_empty());
|
||||
assert_eq!(Value::Null, scipy_stats_norm_pdf.evaluate().unwrap());
|
||||
|
||||
@@ -246,7 +246,7 @@ mod test {
|
||||
Some(2.0_f64),
|
||||
])),
|
||||
];
|
||||
assert!(scipy_stats_norm_pdf.update_batch(&v).is_ok());
|
||||
scipy_stats_norm_pdf.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::from(0.17843340219081558),
|
||||
scipy_stats_norm_pdf.evaluate().unwrap()
|
||||
@@ -263,7 +263,7 @@ mod test {
|
||||
Some(2.0_f64),
|
||||
])),
|
||||
];
|
||||
assert!(scipy_stats_norm_pdf.update_batch(&v).is_ok());
|
||||
scipy_stats_norm_pdf.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::from(0.12343972049858312),
|
||||
scipy_stats_norm_pdf.evaluate().unwrap()
|
||||
|
||||
@@ -32,14 +32,16 @@ pub struct FunctionRegistry {
|
||||
|
||||
impl FunctionRegistry {
|
||||
pub fn register(&self, func: FunctionRef) {
|
||||
self.functions
|
||||
let _ = self
|
||||
.functions
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(func.name().to_string(), func);
|
||||
}
|
||||
|
||||
pub fn register_aggregate_function(&self, func: AggregateFunctionMetaRef) {
|
||||
self.aggregate_functions
|
||||
let _ = self
|
||||
.aggregate_functions
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(func.name(), func);
|
||||
@@ -92,7 +94,7 @@ mod tests {
|
||||
assert!(registry.get_function("test_and").is_none());
|
||||
assert!(registry.functions().is_empty());
|
||||
registry.register(func);
|
||||
assert!(registry.get_function("test_and").is_some());
|
||||
let _ = registry.get_function("test_and").unwrap();
|
||||
assert_eq!(1, registry.functions().len());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,13 +16,16 @@ use std::fmt;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{self, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::Timestamp;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::StringType;
|
||||
use datatypes::vectors::{Int64Vector, StringVector, Vector, VectorRef};
|
||||
use datatypes::types::TimestampType;
|
||||
use datatypes::vectors::{
|
||||
Int64Vector, StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
TimestampNanosecondVector, TimestampSecondVector, Vector, VectorRef,
|
||||
};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::scalars::function::{Function, FunctionContext};
|
||||
@@ -42,18 +45,33 @@ fn convert_to_seconds(arg: &str) -> Option<i64> {
|
||||
}
|
||||
}
|
||||
|
||||
fn process_vector(vector: &dyn Vector) -> Vec<Option<i64>> {
|
||||
(0..vector.len())
|
||||
.map(|i| paste::expr!((vector.get(i)).as_timestamp().map(|ts| ts.value())))
|
||||
.collect::<Vec<Option<i64>>>()
|
||||
}
|
||||
|
||||
impl Function for ToUnixtimeFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::timestamp_second_datatype())
|
||||
Ok(ConcreteDataType::int64_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::exact(
|
||||
vec![ConcreteDataType::String(StringType)],
|
||||
Signature::uniform(
|
||||
1,
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
@@ -61,7 +79,7 @@ impl Function for ToUnixtimeFunction {
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
error::InvalidFuncArgsSnafu {
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
columns.len()
|
||||
@@ -79,6 +97,42 @@ impl Function for ToUnixtimeFunction {
|
||||
.collect::<Vec<_>>(),
|
||||
)))
|
||||
}
|
||||
ConcreteDataType::Int64(_) | ConcreteDataType::Int32(_) => {
|
||||
let array = columns[0].to_arrow_array();
|
||||
Ok(Arc::new(Int64Vector::try_from_arrow_array(&array).unwrap()))
|
||||
}
|
||||
ConcreteDataType::Timestamp(ts) => {
|
||||
let array = columns[0].to_arrow_array();
|
||||
let value = match ts {
|
||||
TimestampType::Second(_) => {
|
||||
let vector = paste::expr!(TimestampSecondVector::try_from_arrow_array(
|
||||
array
|
||||
)
|
||||
.unwrap());
|
||||
process_vector(&vector)
|
||||
}
|
||||
TimestampType::Millisecond(_) => {
|
||||
let vector = paste::expr!(
|
||||
TimestampMillisecondVector::try_from_arrow_array(array).unwrap()
|
||||
);
|
||||
process_vector(&vector)
|
||||
}
|
||||
TimestampType::Microsecond(_) => {
|
||||
let vector = paste::expr!(
|
||||
TimestampMicrosecondVector::try_from_arrow_array(array).unwrap()
|
||||
);
|
||||
process_vector(&vector)
|
||||
}
|
||||
TimestampType::Nanosecond(_) => {
|
||||
let vector = paste::expr!(TimestampNanosecondVector::try_from_arrow_array(
|
||||
array
|
||||
)
|
||||
.unwrap());
|
||||
process_vector(&vector)
|
||||
}
|
||||
};
|
||||
Ok(Arc::new(Int64Vector::from(value)))
|
||||
}
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
@@ -97,28 +151,37 @@ impl fmt::Display for ToUnixtimeFunction {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::StringType;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder};
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::timestamp::TimestampSecond;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::StringVector;
|
||||
use datatypes::vectors::{StringVector, TimestampSecondVector};
|
||||
|
||||
use super::{ToUnixtimeFunction, *};
|
||||
use crate::scalars::Function;
|
||||
|
||||
#[test]
|
||||
fn test_to_unixtime() {
|
||||
fn test_string_to_unixtime() {
|
||||
let f = ToUnixtimeFunction::default();
|
||||
assert_eq!("to_unixtime", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Exact(valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![ConcreteDataType::String(StringType)]
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
]
|
||||
));
|
||||
|
||||
let times = vec![
|
||||
@@ -145,4 +208,106 @@ mod tests {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_int_to_unixtime() {
|
||||
let f = ToUnixtimeFunction::default();
|
||||
assert_eq!("to_unixtime", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::int64_datatype(),
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
]
|
||||
));
|
||||
|
||||
let times = vec![Some(3_i64), None, Some(5_i64), None];
|
||||
let results = vec![Some(3), None, Some(5), None];
|
||||
let args: Vec<VectorRef> = vec![Arc::new(Int64Vector::from(times.clone()))];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in times.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
if i == 1 || i == 3 {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::Int64(ts) => {
|
||||
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_to_unixtime() {
|
||||
let f = ToUnixtimeFunction::default();
|
||||
assert_eq!("to_unixtime", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::int64_datatype(),
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
]
|
||||
));
|
||||
|
||||
let times: Vec<Option<TimestampSecond>> = vec![
|
||||
Some(TimestampSecond::new(123)),
|
||||
None,
|
||||
Some(TimestampSecond::new(42)),
|
||||
None,
|
||||
];
|
||||
let results = vec![Some(123), None, Some(42), None];
|
||||
let ts_vector: TimestampSecondVector = build_vector_from_slice(×);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(ts_vector)];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in times.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
if i == 1 || i == 3 {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::Int64(ts) => {
|
||||
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn build_vector_from_slice<T: ScalarVector>(items: &[Option<T::RefItem<'_>>]) -> T {
|
||||
let mut builder = T::Builder::with_capacity(items.len());
|
||||
for item in items {
|
||||
builder.push(*item);
|
||||
}
|
||||
builder.finish()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,11 +34,11 @@ const LOCATION_TYPE_FIRST: i32 = LocationType::First as i32;
|
||||
const LOCATION_TYPE_AFTER: i32 = LocationType::After as i32;
|
||||
|
||||
/// Convert an [`AlterExpr`] to an [`AlterTableRequest`]
|
||||
pub fn alter_expr_to_request(expr: AlterExpr) -> Result<AlterTableRequest> {
|
||||
pub fn alter_expr_to_request(table_id: TableId, expr: AlterExpr) -> Result<AlterTableRequest> {
|
||||
let catalog_name = expr.catalog_name;
|
||||
let schema_name = expr.schema_name;
|
||||
let kind = expr.kind.context(MissingFieldSnafu { field: "kind" })?;
|
||||
match kind {
|
||||
let alter_kind = match kind {
|
||||
Kind::AddColumns(add_columns) => {
|
||||
let add_column_requests = add_columns
|
||||
.add_columns
|
||||
@@ -61,42 +61,27 @@ pub fn alter_expr_to_request(expr: AlterExpr) -> Result<AlterTableRequest> {
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
let alter_kind = AlterKind::AddColumns {
|
||||
AlterKind::AddColumns {
|
||||
columns: add_column_requests,
|
||||
};
|
||||
|
||||
let request = AlterTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name: expr.table_name,
|
||||
alter_kind,
|
||||
};
|
||||
Ok(request)
|
||||
}
|
||||
Kind::DropColumns(DropColumns { drop_columns }) => {
|
||||
let alter_kind = AlterKind::DropColumns {
|
||||
names: drop_columns.into_iter().map(|c| c.name).collect(),
|
||||
};
|
||||
|
||||
let request = AlterTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name: expr.table_name,
|
||||
alter_kind,
|
||||
};
|
||||
Ok(request)
|
||||
}
|
||||
}
|
||||
Kind::DropColumns(DropColumns { drop_columns }) => AlterKind::DropColumns {
|
||||
names: drop_columns.into_iter().map(|c| c.name).collect(),
|
||||
},
|
||||
Kind::RenameTable(RenameTable { new_table_name }) => {
|
||||
let alter_kind = AlterKind::RenameTable { new_table_name };
|
||||
let request = AlterTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name: expr.table_name,
|
||||
alter_kind,
|
||||
};
|
||||
Ok(request)
|
||||
AlterKind::RenameTable { new_table_name }
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let request = AlterTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name: expr.table_name,
|
||||
table_id,
|
||||
alter_kind,
|
||||
table_version: Some(expr.table_version),
|
||||
};
|
||||
Ok(request)
|
||||
}
|
||||
|
||||
pub fn create_table_schema(expr: &CreateTableExpr, require_time_index: bool) -> Result<RawSchema> {
|
||||
@@ -237,9 +222,10 @@ mod tests {
|
||||
location: None,
|
||||
}],
|
||||
})),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let alter_request = alter_expr_to_request(expr).unwrap();
|
||||
let alter_request = alter_expr_to_request(1, expr).unwrap();
|
||||
assert_eq!(alter_request.catalog_name, "");
|
||||
assert_eq!(alter_request.schema_name, "");
|
||||
assert_eq!("monitor".to_string(), alter_request.table_name);
|
||||
@@ -294,9 +280,10 @@ mod tests {
|
||||
},
|
||||
],
|
||||
})),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let alter_request = alter_expr_to_request(expr).unwrap();
|
||||
let alter_request = alter_expr_to_request(1, expr).unwrap();
|
||||
assert_eq!(alter_request.catalog_name, "");
|
||||
assert_eq!(alter_request.schema_name, "");
|
||||
assert_eq!("monitor".to_string(), alter_request.table_name);
|
||||
@@ -342,9 +329,10 @@ mod tests {
|
||||
name: "mem_usage".to_string(),
|
||||
}],
|
||||
})),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let alter_request = alter_expr_to_request(expr).unwrap();
|
||||
let alter_request = alter_expr_to_request(1, expr).unwrap();
|
||||
assert_eq!(alter_request.catalog_name, "test_catalog");
|
||||
assert_eq!(alter_request.schema_name, "test_schema");
|
||||
assert_eq!("monitor".to_string(), alter_request.table_name);
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::prelude::{Snafu, StatusCode};
|
||||
use snafu::Location;
|
||||
use common_error::status_code::StatusCode;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -32,7 +32,7 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Column datatype error, source: {}", source))]
|
||||
ColumnDataType {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
@@ -54,7 +54,7 @@ pub enum Error {
|
||||
InvalidColumnProto { err_msg: String, location: Location },
|
||||
#[snafu(display("Failed to create vector, source: {}", source))]
|
||||
CreateVector {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
@@ -68,13 +68,13 @@ pub enum Error {
|
||||
))]
|
||||
InvalidColumnDef {
|
||||
column: String,
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unrecognized table option: {}", source))]
|
||||
UnrecognizedTableOption {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
|
||||
@@ -77,7 +77,7 @@ pub fn find_new_columns(schema: &SchemaRef, columns: &[Column]) -> Result<Option
|
||||
is_key: *semantic_type == TAG_SEMANTIC_TYPE,
|
||||
location: None,
|
||||
});
|
||||
new_columns.insert(column_name.to_string());
|
||||
let _ = new_columns.insert(column_name.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -239,7 +239,7 @@ pub fn build_create_expr_from_insertion(
|
||||
|
||||
let column_def = build_column_def(column_name, *datatype, is_nullable);
|
||||
column_defs.push(column_def);
|
||||
new_columns.insert(column_name.to_string());
|
||||
let _ = new_columns.insert(column_name.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ datafusion.workspace = true
|
||||
datatypes = { path = "../../datatypes" }
|
||||
flatbuffers = "23.1"
|
||||
futures = "0.3"
|
||||
lazy_static.workspace = true
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tokio.workspace = true
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user