mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-08 14:22:58 +00:00
Compare commits
110 Commits
v0.3.2
...
v0.4.0-nig
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
81ea61ba43 | ||
|
|
662879ff4b | ||
|
|
48996b0646 | ||
|
|
0b4ac987cd | ||
|
|
9c1f0234de | ||
|
|
f55bff51ac | ||
|
|
0fc0f74cd7 | ||
|
|
5f65e3ff44 | ||
|
|
1f371f5e6e | ||
|
|
632cb26430 | ||
|
|
39e74dc87e | ||
|
|
41139ec11d | ||
|
|
657fcaf9d0 | ||
|
|
f1cd28ffa1 | ||
|
|
86378ad93a | ||
|
|
792d8dfe33 | ||
|
|
e3ac3298b1 | ||
|
|
953b8a0132 | ||
|
|
e0aecc9209 | ||
|
|
a7557b70f1 | ||
|
|
51fe074666 | ||
|
|
6235441577 | ||
|
|
172febb1af | ||
|
|
2ef0d06cdb | ||
|
|
2e2a82689c | ||
|
|
bb8468437e | ||
|
|
3241de0b85 | ||
|
|
b227a7637c | ||
|
|
43bde82e28 | ||
|
|
62a41d2280 | ||
|
|
3741751c8d | ||
|
|
8bea853954 | ||
|
|
37dad206f4 | ||
|
|
1783e4c5cb | ||
|
|
b81570b99a | ||
|
|
6811acb314 | ||
|
|
3e846e27f8 | ||
|
|
f152568701 | ||
|
|
dd62f4c407 | ||
|
|
4fd37d9d4e | ||
|
|
7cf6c2bd5c | ||
|
|
8f71ac2172 | ||
|
|
076d44055f | ||
|
|
d9751268aa | ||
|
|
8f1241912c | ||
|
|
97cfa3d6c9 | ||
|
|
ef7c5dd311 | ||
|
|
ce43896a0b | ||
|
|
c9cce0225d | ||
|
|
5bfd0d9857 | ||
|
|
e4fd5d0fd3 | ||
|
|
132668bcd1 | ||
|
|
8b4145b634 | ||
|
|
735c6390ca | ||
|
|
9ff7670adf | ||
|
|
16be56a743 | ||
|
|
2bfe25157f | ||
|
|
4fdb6d2f21 | ||
|
|
39091421a4 | ||
|
|
674bfd85c7 | ||
|
|
4fa8340572 | ||
|
|
5422224530 | ||
|
|
077785cf1e | ||
|
|
a751aa5ba0 | ||
|
|
264c5ea720 | ||
|
|
fa12392d2c | ||
|
|
421103c336 | ||
|
|
41e856eb9e | ||
|
|
e1ca454992 | ||
|
|
2d30f4c373 | ||
|
|
a7ea3bbc16 | ||
|
|
fc850c9988 | ||
|
|
f293126315 | ||
|
|
c615fb2a93 | ||
|
|
65f5349767 | ||
|
|
ed756288b3 | ||
|
|
04ddeffd2a | ||
|
|
c8ed1bbfae | ||
|
|
207d3d23a1 | ||
|
|
63173f63a1 | ||
|
|
4ea8a78817 | ||
|
|
553530cff4 | ||
|
|
c3db99513a | ||
|
|
8e256b317d | ||
|
|
b31fad5d52 | ||
|
|
00181885cc | ||
|
|
195dfdc5d3 | ||
|
|
f20b5695b8 | ||
|
|
f731193ddc | ||
|
|
963e468286 | ||
|
|
f19498f73e | ||
|
|
4cc42e2ba6 | ||
|
|
cd5afc8cb7 | ||
|
|
6dd24f4dc4 | ||
|
|
55500b7711 | ||
|
|
64acfd3802 | ||
|
|
ad165c1c64 | ||
|
|
8dcb12e317 | ||
|
|
03e30652c8 | ||
|
|
61c793796c | ||
|
|
dc085442d7 | ||
|
|
9153191819 | ||
|
|
979400ac58 | ||
|
|
28748edb0d | ||
|
|
66e5ed5483 | ||
|
|
af2fb2acbd | ||
|
|
eb2654b89a | ||
|
|
3d0d082c56 | ||
|
|
4073fceea5 | ||
|
|
8a00424468 |
@@ -14,4 +14,8 @@ GT_AZBLOB_CONTAINER=AZBLOB container
|
||||
GT_AZBLOB_ACCOUNT_NAME=AZBLOB account name
|
||||
GT_AZBLOB_ACCOUNT_KEY=AZBLOB account key
|
||||
GT_AZBLOB_ENDPOINT=AZBLOB endpoint
|
||||
|
||||
# Settings for gcs test
|
||||
GT_GCS_BUCKET = GCS bucket
|
||||
GT_GCS_SCOPE = GCS scope
|
||||
GT_GCS_CREDENTIAL_PATH = GCS credential path
|
||||
GT_GCS_ENDPOINT = GCS end point
|
||||
|
||||
1
.github/workflows/develop.yml
vendored
1
.github/workflows/develop.yml
vendored
@@ -1,4 +1,5 @@
|
||||
on:
|
||||
merge_group:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths-ignore:
|
||||
|
||||
15
.github/workflows/docs.yml
vendored
15
.github/workflows/docs.yml
vendored
@@ -1,4 +1,5 @@
|
||||
on:
|
||||
merge_group:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths:
|
||||
@@ -27,6 +28,13 @@ name: CI
|
||||
# https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/defining-the-mergeability-of-pull-requests/troubleshooting-required-status-checks#handling-skipped-but-required-checks
|
||||
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
|
||||
check:
|
||||
name: Check
|
||||
if: github.event.pull_request.draft == false
|
||||
@@ -53,3 +61,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
35
.github/workflows/release.yml
vendored
35
.github/workflows/release.yml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
schedule:
|
||||
# At 00:00 on Monday.
|
||||
- cron: '0 0 * * 1'
|
||||
# Mannually trigger only builds binaries.
|
||||
# Manually trigger only builds binaries.
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry_run:
|
||||
@@ -127,6 +127,14 @@ jobs:
|
||||
name: ${{ matrix.file }}.sha256sum
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
|
||||
|
||||
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
shell: bash
|
||||
if: github.event_name != 'push'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
echo "TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Configure tag
|
||||
shell: bash
|
||||
if: github.event_name == 'push'
|
||||
@@ -136,7 +144,7 @@ jobs:
|
||||
|
||||
- name: Upload to S3
|
||||
run: |
|
||||
aws s3 sync target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }} s3://${{ secrets.GREPTIMEDB_RELEASE_BUCKET_NAME }}/releases/${TAG}
|
||||
aws s3 cp target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }} s3://${{ secrets.GREPTIMEDB_RELEASE_BUCKET_NAME }}/releases/${TAG} --recursive --exclude "*" --include "*.tgz"
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
@@ -303,6 +311,14 @@ jobs:
|
||||
name: ${{ matrix.file }}.sha256sum
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
|
||||
|
||||
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
shell: bash
|
||||
if: github.event_name != 'push'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
echo "TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Configure tag
|
||||
shell: bash
|
||||
if: github.event_name == 'push'
|
||||
@@ -312,7 +328,7 @@ jobs:
|
||||
|
||||
- name: Upload to S3
|
||||
run: |
|
||||
aws s3 sync target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }} s3://${{ secrets.GREPTIMEDB_RELEASE_BUCKET_NAME }}/releases/${TAG}
|
||||
aws s3 cp target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }} s3://${{ secrets.GREPTIMEDB_RELEASE_BUCKET_NAME }}/releases/${TAG} --recursive --exclude "*" --include "*.tgz"
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
@@ -383,7 +399,7 @@ jobs:
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
|
||||
with:
|
||||
context: ./docker/ci/
|
||||
context: .
|
||||
file: ./docker/ci/Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
@@ -395,7 +411,7 @@ jobs:
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
|
||||
with:
|
||||
context: ./docker/ci/
|
||||
context: .
|
||||
file: ./docker/ci/Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64
|
||||
@@ -459,7 +475,8 @@ jobs:
|
||||
name: "${{ github.ref_name }}"
|
||||
prerelease: ${{ env.prerelease }}
|
||||
makeLatest: ${{ env.makeLatest }}
|
||||
generateReleaseNotes: true
|
||||
generateReleaseNotes: false
|
||||
allowUpdates: true
|
||||
artifacts: |
|
||||
**/greptime-*
|
||||
|
||||
@@ -482,7 +499,7 @@ jobs:
|
||||
- name: Login to alibaba cloud container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: registry.cn-hangzhou.aliyuncs.com
|
||||
registry: greptime-registry.cn-hangzhou.cr.aliyuncs.com
|
||||
username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
|
||||
@@ -504,6 +521,6 @@ jobs:
|
||||
- name: Push image to alibaba cloud container registry # Use 'docker buildx imagetools create' to create a new image base on source image.
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
--tag registry.cn-hangzhou.aliyuncs.com/greptime/greptimedb:latest \
|
||||
--tag registry.cn-hangzhou.aliyuncs.com/greptime/greptimedb:${{ env.IMAGE_TAG }} \
|
||||
--tag greptime-registry.cn-hangzhou.cr.aliyuncs.com/greptime/greptimedb:latest \
|
||||
--tag greptime-registry.cn-hangzhou.cr.aliyuncs.com/greptime/greptimedb:${{ env.IMAGE_TAG }} \
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
422
Cargo.lock
generated
422
Cargo.lock
generated
@@ -81,7 +81,7 @@ dependencies = [
|
||||
"bytes",
|
||||
"cfg-if 1.0.0",
|
||||
"http",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"schemars",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -209,7 +209,7 @@ dependencies = [
|
||||
"greptime-proto",
|
||||
"prost",
|
||||
"snafu",
|
||||
"tonic",
|
||||
"tonic 0.9.2",
|
||||
"tonic-build",
|
||||
]
|
||||
|
||||
@@ -228,6 +228,20 @@ version = "0.1.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b3f9eb837c6a783fbf002e3e5cc7925a3aa6893d6d42f9169517528983777590"
|
||||
|
||||
[[package]]
|
||||
name = "aquamarine"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df752953c49ce90719c7bf1fc587bc8227aed04732ea0c0f85e5397d7fdbd1a1"
|
||||
dependencies = [
|
||||
"include_dir",
|
||||
"itertools",
|
||||
"proc-macro-error",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
version = "1.6.0"
|
||||
@@ -382,7 +396,7 @@ dependencies = [
|
||||
"paste",
|
||||
"prost",
|
||||
"tokio",
|
||||
"tonic",
|
||||
"tonic 0.9.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -412,7 +426,7 @@ dependencies = [
|
||||
"arrow-schema",
|
||||
"chrono",
|
||||
"half 2.2.1",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"lexical-core",
|
||||
"num",
|
||||
"serde",
|
||||
@@ -691,6 +705,7 @@ dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
"bytes",
|
||||
"futures-util",
|
||||
"headers",
|
||||
"http",
|
||||
"http-body",
|
||||
"hyper",
|
||||
@@ -1305,7 +1320,7 @@ version = "0.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bf74ea341ae8905eac9a234b6a5a845e118c25bbbdecf85ec77431a8b3bfa0be"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"lazy_static",
|
||||
"num-traits",
|
||||
"regex",
|
||||
@@ -1436,7 +1451,7 @@ dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
"clap_derive 3.2.25",
|
||||
"clap_lex 0.2.4",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"once_cell",
|
||||
"strsim 0.10.0",
|
||||
"termcolor",
|
||||
@@ -1539,7 +1554,7 @@ dependencies = [
|
||||
"substrait 0.7.5",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tonic",
|
||||
"tonic 0.9.2",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
@@ -1575,19 +1590,24 @@ name = "cmd"
|
||||
version = "0.3.2"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"async-trait",
|
||||
"build-data",
|
||||
"catalog",
|
||||
"chrono",
|
||||
"clap 3.2.25",
|
||||
"client",
|
||||
"common-base",
|
||||
"common-error",
|
||||
"common-meta",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-telemetry",
|
||||
"common-test-util",
|
||||
"config",
|
||||
"datanode",
|
||||
"datatypes",
|
||||
"either",
|
||||
"etcd-client",
|
||||
"frontend",
|
||||
"futures",
|
||||
"meta-client",
|
||||
@@ -1596,6 +1616,7 @@ dependencies = [
|
||||
"nu-ansi-term",
|
||||
"partition",
|
||||
"query",
|
||||
"rand",
|
||||
"rexpect",
|
||||
"rustyline 10.1.1",
|
||||
"serde",
|
||||
@@ -1603,10 +1624,11 @@ dependencies = [
|
||||
"session",
|
||||
"snafu",
|
||||
"substrait 0.3.2",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tikv-jemallocator",
|
||||
"tokio",
|
||||
"toml",
|
||||
"toml 0.7.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1643,7 +1665,7 @@ dependencies = [
|
||||
"paste",
|
||||
"serde",
|
||||
"snafu",
|
||||
"toml",
|
||||
"toml 0.7.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1655,8 +1677,6 @@ dependencies = [
|
||||
"common-error",
|
||||
"common-telemetry",
|
||||
"datatypes",
|
||||
"lazy_static",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu",
|
||||
@@ -1758,11 +1778,12 @@ dependencies = [
|
||||
"datatypes",
|
||||
"flatbuffers",
|
||||
"futures",
|
||||
"lazy_static",
|
||||
"prost",
|
||||
"rand",
|
||||
"snafu",
|
||||
"tokio",
|
||||
"tonic",
|
||||
"tonic 0.9.2",
|
||||
"tower",
|
||||
]
|
||||
|
||||
@@ -1812,8 +1833,11 @@ dependencies = [
|
||||
"common-telemetry",
|
||||
"common-time",
|
||||
"datatypes",
|
||||
"etcd-client",
|
||||
"futures",
|
||||
"lazy_static",
|
||||
"prost",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu",
|
||||
@@ -1846,6 +1870,7 @@ dependencies = [
|
||||
"common-test-util",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"humantime-serde",
|
||||
"object-store",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -1926,9 +1951,11 @@ dependencies = [
|
||||
"metrics-exporter-prometheus",
|
||||
"metrics-util",
|
||||
"once_cell",
|
||||
"opentelemetry",
|
||||
"opentelemetry 0.17.0",
|
||||
"opentelemetry-jaeger",
|
||||
"parking_lot 0.12.1",
|
||||
"rand",
|
||||
"rs-snowflake",
|
||||
"serde",
|
||||
"tokio",
|
||||
"tracing",
|
||||
@@ -1953,6 +1980,7 @@ dependencies = [
|
||||
name = "common-time"
|
||||
version = "0.3.2"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
"chrono-tz 0.8.2",
|
||||
"common-error",
|
||||
@@ -1986,7 +2014,7 @@ dependencies = [
|
||||
"rust-ini 0.18.0",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"toml",
|
||||
"toml 0.5.11",
|
||||
"yaml-rust",
|
||||
]
|
||||
|
||||
@@ -2011,7 +2039,7 @@ checksum = "c2895653b4d9f1538a83970077cb01dfc77a4810524e51a110944688e916b18e"
|
||||
dependencies = [
|
||||
"prost",
|
||||
"prost-types",
|
||||
"tonic",
|
||||
"tonic 0.9.2",
|
||||
"tracing-core",
|
||||
]
|
||||
|
||||
@@ -2033,7 +2061,7 @@ dependencies = [
|
||||
"thread_local",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tonic",
|
||||
"tonic 0.9.2",
|
||||
"tracing",
|
||||
"tracing-core",
|
||||
"tracing-subscriber",
|
||||
@@ -2434,7 +2462,7 @@ dependencies = [
|
||||
"futures",
|
||||
"glob",
|
||||
"hashbrown 0.13.2",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"itertools",
|
||||
"lazy_static",
|
||||
"log",
|
||||
@@ -2535,7 +2563,7 @@ dependencies = [
|
||||
"datafusion-row",
|
||||
"half 2.2.1",
|
||||
"hashbrown 0.13.2",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"itertools",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
@@ -2637,7 +2665,6 @@ dependencies = [
|
||||
"pin-project",
|
||||
"prost",
|
||||
"query",
|
||||
"regex",
|
||||
"secrecy",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -2652,8 +2679,8 @@ dependencies = [
|
||||
"table-procedure",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"toml",
|
||||
"tonic",
|
||||
"toml 0.7.6",
|
||||
"tonic 0.9.2",
|
||||
"tower",
|
||||
"tower-http",
|
||||
"url",
|
||||
@@ -2969,6 +2996,12 @@ dependencies = [
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "equivalent"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
|
||||
|
||||
[[package]]
|
||||
name = "erased-serde"
|
||||
version = "0.3.25"
|
||||
@@ -3039,7 +3072,7 @@ dependencies = [
|
||||
"prost",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tonic",
|
||||
"tonic 0.9.2",
|
||||
"tonic-build",
|
||||
"tower",
|
||||
"tower-service",
|
||||
@@ -3252,6 +3285,7 @@ dependencies = [
|
||||
"moka 0.9.7",
|
||||
"object-store",
|
||||
"openmetrics-parser",
|
||||
"opentelemetry-proto",
|
||||
"partition",
|
||||
"prost",
|
||||
"query",
|
||||
@@ -3269,8 +3303,8 @@ dependencies = [
|
||||
"substrait 0.3.2",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml",
|
||||
"tonic",
|
||||
"toml 0.7.6",
|
||||
"tonic 0.9.2",
|
||||
"tower",
|
||||
"uuid",
|
||||
]
|
||||
@@ -4109,12 +4143,12 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/WenyXu/greptime-proto.git?rev=1eda4691a5d2c8ffc463d48ca2317905ba7e4b2d#1eda4691a5d2c8ffc463d48ca2317905ba7e4b2d"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=356694a72f12ad9e15008d4245a0b4fe48f982ad#356694a72f12ad9e15008d4245a0b4fe48f982ad"
|
||||
dependencies = [
|
||||
"prost",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tonic",
|
||||
"tonic 0.9.2",
|
||||
"tonic-build",
|
||||
]
|
||||
|
||||
@@ -4130,7 +4164,7 @@ dependencies = [
|
||||
"futures-sink",
|
||||
"futures-util",
|
||||
"http",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"slab",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -4171,6 +4205,12 @@ dependencies = [
|
||||
"ahash 0.8.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a"
|
||||
|
||||
[[package]]
|
||||
name = "hashlink"
|
||||
version = "0.8.2"
|
||||
@@ -4193,6 +4233,31 @@ dependencies = [
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "headers"
|
||||
version = "0.3.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584"
|
||||
dependencies = [
|
||||
"base64 0.13.1",
|
||||
"bitflags 1.3.2",
|
||||
"bytes",
|
||||
"headers-core",
|
||||
"http",
|
||||
"httpdate",
|
||||
"mime",
|
||||
"sha1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "headers-core"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429"
|
||||
dependencies = [
|
||||
"http",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "heck"
|
||||
version = "0.4.1"
|
||||
@@ -4265,6 +4330,17 @@ dependencies = [
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hostname"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"match_cfg",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http"
|
||||
version = "0.2.9"
|
||||
@@ -4425,6 +4501,25 @@ dependencies = [
|
||||
"hashbrown 0.12.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "include_dir"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "18762faeff7122e89e0857b02f7ce6fcc0d101d5e9ad2ad7846cc01d61b7f19e"
|
||||
dependencies = [
|
||||
"include_dir_macros",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "include_dir_macros"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "indexmap"
|
||||
version = "1.9.3"
|
||||
@@ -4436,6 +4531,16 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "indexmap"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d"
|
||||
dependencies = [
|
||||
"equivalent",
|
||||
"hashbrown 0.14.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "indicatif"
|
||||
version = "0.17.5"
|
||||
@@ -4462,7 +4567,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2fb7c1b80a1dfa604bb4a649a5c5aeef3d913f7c520cb42b40e534e8a61bcdfc"
|
||||
dependencies = [
|
||||
"ahash 0.8.3",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"is-terminal",
|
||||
"itoa",
|
||||
"log",
|
||||
@@ -4925,7 +5030,7 @@ dependencies = [
|
||||
"cactus",
|
||||
"cfgrammar",
|
||||
"filetime",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"lazy_static",
|
||||
"lrtable",
|
||||
"num-traits",
|
||||
@@ -5044,6 +5149,12 @@ version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d"
|
||||
|
||||
[[package]]
|
||||
name = "match_cfg"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4"
|
||||
|
||||
[[package]]
|
||||
name = "matchers"
|
||||
version = "0.1.0"
|
||||
@@ -5154,7 +5265,7 @@ dependencies = [
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tonic",
|
||||
"tonic 0.9.2",
|
||||
"tower",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
@@ -5175,6 +5286,7 @@ dependencies = [
|
||||
"common-catalog",
|
||||
"common-error",
|
||||
"common-grpc",
|
||||
"common-grpc-expr",
|
||||
"common-meta",
|
||||
"common-procedure",
|
||||
"common-procedure-test",
|
||||
@@ -5203,7 +5315,8 @@ dependencies = [
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tonic",
|
||||
"toml 0.7.6",
|
||||
"tonic 0.9.2",
|
||||
"tower",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
@@ -5246,7 +5359,7 @@ version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8603921e1f54ef386189335f288441af761e0fc61bcb552168d9cedfe63ebc70"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"metrics",
|
||||
"metrics-util",
|
||||
"parking_lot 0.12.1",
|
||||
@@ -5291,7 +5404,7 @@ dependencies = [
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
"hashbrown 0.12.3",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"metrics",
|
||||
"num_cpus",
|
||||
"ordered-float 2.10.0",
|
||||
@@ -5394,6 +5507,47 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.3.2"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"aquamarine",
|
||||
"arc-swap",
|
||||
"async-compat",
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
"chrono",
|
||||
"common-base",
|
||||
"common-catalog",
|
||||
"common-datasource",
|
||||
"common-error",
|
||||
"common-procedure",
|
||||
"common-procedure-test",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-runtime",
|
||||
"common-telemetry",
|
||||
"common-test-util",
|
||||
"common-time",
|
||||
"dashmap",
|
||||
"datafusion",
|
||||
"datafusion-common",
|
||||
"datatypes",
|
||||
"futures",
|
||||
"key-lock",
|
||||
"lazy_static",
|
||||
"log-store",
|
||||
"metrics",
|
||||
"object-store",
|
||||
"parquet",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu",
|
||||
"storage",
|
||||
"store-api",
|
||||
"table",
|
||||
"tokio",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "moka"
|
||||
@@ -5969,6 +6123,16 @@ dependencies = [
|
||||
"tokio-stream",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry"
|
||||
version = "0.19.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5f4b8347cc26099d3aeee044065ecc3ae11469796b4d65d065a23a584ed92a6f"
|
||||
dependencies = [
|
||||
"opentelemetry_api",
|
||||
"opentelemetry_sdk",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-jaeger"
|
||||
version = "0.16.0"
|
||||
@@ -5977,20 +6141,69 @@ checksum = "f8c0b12cd9e3f9b35b52f6e0dac66866c519b26f424f4bbf96e3fe8bfbdc5229"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"lazy_static",
|
||||
"opentelemetry",
|
||||
"opentelemetry 0.17.0",
|
||||
"opentelemetry-semantic-conventions",
|
||||
"thiserror",
|
||||
"thrift 0.15.0",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-proto"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "045f8eea8c0fa19f7d48e7bc3128a39c2e5c533d5c61298c548dfefc1064474c"
|
||||
dependencies = [
|
||||
"futures",
|
||||
"futures-util",
|
||||
"opentelemetry 0.19.0",
|
||||
"prost",
|
||||
"tonic 0.8.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-semantic-conventions"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "985cc35d832d412224b2cffe2f9194b1b89b6aa5d0bef76d080dce09d90e62bd"
|
||||
dependencies = [
|
||||
"opentelemetry",
|
||||
"opentelemetry 0.17.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry_api"
|
||||
version = "0.19.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ed41783a5bf567688eb38372f2b7a8530f5a607a4b49d38dd7573236c23ca7e2"
|
||||
dependencies = [
|
||||
"fnv",
|
||||
"futures-channel",
|
||||
"futures-util",
|
||||
"indexmap 1.9.3",
|
||||
"once_cell",
|
||||
"pin-project-lite",
|
||||
"thiserror",
|
||||
"urlencoding",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry_sdk"
|
||||
version = "0.19.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b3a2a91fdbfdd4d212c0dcc2ab540de2c2bcbbd90be17de7a7daf8822d010c1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"crossbeam-channel",
|
||||
"dashmap",
|
||||
"fnv",
|
||||
"futures-channel",
|
||||
"futures-executor",
|
||||
"futures-util",
|
||||
"once_cell",
|
||||
"opentelemetry_api",
|
||||
"percent-encoding",
|
||||
"rand",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6001,8 +6214,9 @@ checksum = "978aa494585d3ca4ad74929863093e87cac9790d81fe7aba2b3dc2890643a0fc"
|
||||
|
||||
[[package]]
|
||||
name = "orc-rust"
|
||||
version = "0.2.3"
|
||||
source = "git+https://github.com/WenyXu/orc-rs.git?rev=0319acd32456e403c20f135cc012441a76852605#0319acd32456e403c20f135cc012441a76852605"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "01773d11a950f7418e691899bd2917e393972188b961d381ee3ce1880ad02e32"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"bytes",
|
||||
@@ -6359,14 +6573,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4"
|
||||
dependencies = [
|
||||
"fixedbitset",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pgwire"
|
||||
version = "0.14.1"
|
||||
version = "0.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bd92c65406efd0d621cdece478a41a89e472a559e44a6f2b218df4c14e66a888"
|
||||
checksum = "e2de42ee35f9694def25c37c15f564555411d9904b48e33680618ee7359080dc"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"base64 0.21.2",
|
||||
@@ -6703,7 +6917,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5ca9c6be70d989d21a136eb86c2d83e4b328447fac4a88dace2143c179c86267"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6712,7 +6926,7 @@ version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785"
|
||||
dependencies = [
|
||||
"toml",
|
||||
"toml 0.5.11",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6821,6 +7035,7 @@ dependencies = [
|
||||
"common-catalog",
|
||||
"common-error",
|
||||
"common-function-macro",
|
||||
"common-telemetry",
|
||||
"datafusion",
|
||||
"datatypes",
|
||||
"futures",
|
||||
@@ -7558,6 +7773,12 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rs-snowflake"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e60ef3b82994702bbe4e134d98aadca4b49ed04440148985678d415c68127666"
|
||||
|
||||
[[package]]
|
||||
name = "rsa"
|
||||
version = "0.6.1"
|
||||
@@ -7840,7 +8061,7 @@ source = "git+https://github.com/discord9/RustPython?rev=9ed5137412#9ed51374125b
|
||||
dependencies = [
|
||||
"ahash 0.7.6",
|
||||
"bitflags 1.3.2",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"itertools",
|
||||
"log",
|
||||
"num-complex",
|
||||
@@ -7913,7 +8134,7 @@ name = "rustpython-derive-impl"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/discord9/RustPython?rev=9ed5137412#9ed51374125b5f1a9e5cee5dd7e27023b8591f1e"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"itertools",
|
||||
"maplit",
|
||||
"once_cell",
|
||||
@@ -8054,7 +8275,7 @@ dependencies = [
|
||||
"glob",
|
||||
"half 1.8.2",
|
||||
"hex",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"is-macro",
|
||||
"itertools",
|
||||
"libc",
|
||||
@@ -8266,7 +8487,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "02c613288622e5f0c3fdc5dbd4db1c5fbe752746b1d1a56a0630b78fd00de44f"
|
||||
dependencies = [
|
||||
"dyn-clone",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"schemars_derive",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -8497,6 +8718,15 @@ dependencies = [
|
||||
"syn 2.0.18",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_spanned"
|
||||
version = "0.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_tokenstream"
|
||||
version = "0.1.7"
|
||||
@@ -8538,7 +8768,7 @@ version = "0.9.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9d684e3ec7de3bf5466b32bd75303ac16f0736426e5a4e0d6e489559ce1249c"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"itoa",
|
||||
"ryu",
|
||||
"serde",
|
||||
@@ -8582,7 +8812,9 @@ dependencies = [
|
||||
"derive_builder 0.12.0",
|
||||
"digest",
|
||||
"futures",
|
||||
"headers",
|
||||
"hex",
|
||||
"hostname",
|
||||
"http-body",
|
||||
"humantime-serde",
|
||||
"hyper",
|
||||
@@ -8596,6 +8828,7 @@ dependencies = [
|
||||
"once_cell",
|
||||
"openmetrics-parser",
|
||||
"opensrv-mysql",
|
||||
"opentelemetry-proto",
|
||||
"parking_lot 0.12.1",
|
||||
"pgwire",
|
||||
"pin-project",
|
||||
@@ -8627,7 +8860,7 @@ dependencies = [
|
||||
"tokio-rustls 0.24.0",
|
||||
"tokio-stream",
|
||||
"tokio-test",
|
||||
"tonic",
|
||||
"tonic 0.9.2",
|
||||
"tonic-reflection",
|
||||
"tower",
|
||||
"tower-http",
|
||||
@@ -8641,6 +8874,7 @@ dependencies = [
|
||||
"common-catalog",
|
||||
"common-telemetry",
|
||||
"common-time",
|
||||
"derive_builder 0.12.0",
|
||||
"sql",
|
||||
]
|
||||
|
||||
@@ -8951,7 +9185,7 @@ dependencies = [
|
||||
"prettydiff",
|
||||
"regex",
|
||||
"thiserror",
|
||||
"toml",
|
||||
"toml 0.5.11",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
@@ -9032,7 +9266,7 @@ dependencies = [
|
||||
"hex",
|
||||
"hkdf",
|
||||
"hmac",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"itoa",
|
||||
"libc",
|
||||
"log",
|
||||
@@ -9185,7 +9419,7 @@ dependencies = [
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tonic",
|
||||
"tonic 0.9.2",
|
||||
"tonic-build",
|
||||
"uuid",
|
||||
]
|
||||
@@ -9203,7 +9437,7 @@ dependencies = [
|
||||
"common-recordbatch",
|
||||
"common-time",
|
||||
"datatypes",
|
||||
"derive_builder 0.11.2",
|
||||
"derive_builder 0.12.0",
|
||||
"futures",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -9481,7 +9715,7 @@ dependencies = [
|
||||
"datafusion-expr",
|
||||
"datafusion-physical-expr",
|
||||
"datatypes",
|
||||
"derive_builder 0.11.2",
|
||||
"derive_builder 0.12.0",
|
||||
"futures",
|
||||
"humantime",
|
||||
"humantime-serde",
|
||||
@@ -9624,6 +9858,7 @@ dependencies = [
|
||||
"mito",
|
||||
"object-store",
|
||||
"once_cell",
|
||||
"opentelemetry-proto",
|
||||
"partition",
|
||||
"paste",
|
||||
"prost",
|
||||
@@ -9644,7 +9879,7 @@ dependencies = [
|
||||
"table",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tonic",
|
||||
"tonic 0.9.2",
|
||||
"tower",
|
||||
"uuid",
|
||||
]
|
||||
@@ -10003,22 +10238,71 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_datetime"
|
||||
version = "0.6.2"
|
||||
name = "toml"
|
||||
version = "0.7.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f"
|
||||
checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
"toml_edit",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_datetime"
|
||||
version = "0.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_edit"
|
||||
version = "0.19.10"
|
||||
version = "0.19.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2380d56e8670370eee6566b0bfd4265f65b3f432e8c6d85623f728d4fa31f739"
|
||||
checksum = "c500344a19072298cd05a7224b3c0c629348b78692bf48466c5238656e315a78"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"indexmap 2.0.0",
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
"winnow",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tonic"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8f219fad3b929bef19b1f86fbc0358d35daed8f2cac972037ac0dc10bbb8d5fb"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
"axum",
|
||||
"base64 0.13.1",
|
||||
"bytes",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"h2",
|
||||
"http",
|
||||
"http-body",
|
||||
"hyper",
|
||||
"hyper-timeout",
|
||||
"percent-encoding",
|
||||
"pin-project",
|
||||
"prost",
|
||||
"prost-derive",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tokio-util",
|
||||
"tower",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
"tracing-futures",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tonic"
|
||||
version = "0.9.2"
|
||||
@@ -10073,7 +10357,7 @@ dependencies = [
|
||||
"prost-types",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tonic",
|
||||
"tonic 0.9.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -10085,7 +10369,7 @@ dependencies = [
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"hdrhistogram",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"pin-project",
|
||||
"pin-project-lite",
|
||||
"rand",
|
||||
@@ -10232,7 +10516,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fbbe89715c1dbbb790059e2565353978564924ee85017b5fff365c872ff6721f"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"opentelemetry",
|
||||
"opentelemetry 0.17.0",
|
||||
"tracing",
|
||||
"tracing-core",
|
||||
"tracing-log",
|
||||
@@ -10656,6 +10940,12 @@ dependencies = [
|
||||
"percent-encoding",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "urlencoding"
|
||||
version = "2.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e8db7427f936968176eaa7cdf81b7f98b980b18495ec28f1b5791ac3bfe3eea9"
|
||||
|
||||
[[package]]
|
||||
name = "utf8parse"
|
||||
version = "0.2.1"
|
||||
@@ -11208,16 +11498,16 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "x509-certificate"
|
||||
version = "0.19.0"
|
||||
version = "0.20.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bf14059fbc1dce14de1d08535c411ba0b18749c2550a12550300da90b7ba350b"
|
||||
checksum = "2133ce6c08c050a5b368730a67c53a603ffd4a4a6c577c5218675a19f7782c05"
|
||||
dependencies = [
|
||||
"bcder",
|
||||
"bytes",
|
||||
"chrono",
|
||||
"der 0.7.6",
|
||||
"hex",
|
||||
"pem 1.1.1",
|
||||
"pem 2.0.1",
|
||||
"ring",
|
||||
"signature",
|
||||
"spki 0.7.2",
|
||||
|
||||
@@ -71,14 +71,20 @@ datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
derive_builder = "0.12"
|
||||
etcd-client = "0.11"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/WenyXu/greptime-proto.git", rev = "1eda4691a5d2c8ffc463d48ca2317905ba7e4b2d" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "356694a72f12ad9e15008d4245a0b4fe48f982ad" }
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
once_cell = "1.18"
|
||||
opentelemetry-proto = { version = "0.2", features = ["gen-tonic", "metrics"] }
|
||||
parquet = "40.0"
|
||||
paste = "1.0"
|
||||
prost = "0.11"
|
||||
rand = "0.8"
|
||||
regex = "1.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
@@ -86,6 +92,7 @@ sqlparser = "0.34"
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.28", features = ["full"] }
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.7"
|
||||
tonic = { version = "0.9", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
metrics = "0.20"
|
||||
|
||||
117
Makefile
117
Makefile
@@ -1,15 +1,72 @@
|
||||
IMAGE_REGISTRY ?= greptimedb
|
||||
# The arguments for building images.
|
||||
CARGO_PROFILE ?=
|
||||
FEATURES ?=
|
||||
TARGET_DIR ?=
|
||||
CARGO_BUILD_OPTS := --locked
|
||||
IMAGE_REGISTRY ?= docker.io
|
||||
IMAGE_NAMESPACE ?= greptime
|
||||
IMAGE_TAG ?= latest
|
||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||
BASE_IMAGE ?= ubuntu
|
||||
RUST_TOOLCHAIN ?= $(shell cat rust-toolchain.toml | grep channel | cut -d'"' -f2)
|
||||
CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
|
||||
|
||||
# The arguments for running integration tests.
|
||||
ETCD_VERSION ?= v3.5.9
|
||||
ETCD_IMAGE ?= quay.io/coreos/etcd:${ETCD_VERSION}
|
||||
RETRY_COUNT ?= 3
|
||||
NEXTEST_OPTS := --retries ${RETRY_COUNT}
|
||||
BUILD_JOBS ?= $(shell which nproc 1>/dev/null && expr $$(nproc) / 2) # If nproc is not available, we don't set the build jobs.
|
||||
ifeq ($(BUILD_JOBS), 0) # If the number of cores is less than 2, set the build jobs to 1.
|
||||
BUILD_JOBS := 1
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(BUILD_JOBS)),)
|
||||
NEXTEST_OPTS += --build-jobs=${BUILD_JOBS}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(CARGO_PROFILE)),)
|
||||
CARGO_BUILD_OPTS += --profile ${CARGO_PROFILE}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(FEATURES)),)
|
||||
CARGO_BUILD_OPTS += --features ${FEATURES}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(TARGET_DIR)),)
|
||||
CARGO_BUILD_OPTS += --target-dir ${TARGET_DIR}
|
||||
endif
|
||||
|
||||
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), true)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
|
||||
else
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
||||
endif
|
||||
|
||||
##@ Build
|
||||
|
||||
.PHONY: build
|
||||
build: ## Build debug version greptime.
|
||||
cargo build
|
||||
build: ## Build debug version greptime. If USE_DEV_BUILDER is true, the binary will be built in dev-builder.
|
||||
ifeq ($(USE_DEV_BUILDER), true)
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder:latest \
|
||||
make build CARGO_PROFILE=${CARGO_PROFILE} FEATURES=${FEATURES} TARGET_DIR=${TARGET_DIR}
|
||||
else
|
||||
cargo build ${CARGO_BUILD_OPTS}
|
||||
endif
|
||||
|
||||
.PHONY: release
|
||||
release: ## Build release version greptime.
|
||||
cargo build --release
|
||||
release: ## Build release version greptime. If USE_DEV_BUILDER is true, the binary will be built in dev-builder.
|
||||
ifeq ($(USE_DEV_BUILDER), true)
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder:latest \
|
||||
make release CARGO_PROFILE=${CARGO_PROFILE} FEATURES=${FEATURES} TARGET_DIR=${TARGET_DIR}
|
||||
else
|
||||
cargo build --release ${CARGO_BUILD_OPTS}
|
||||
endif
|
||||
|
||||
.PHONY: clean
|
||||
clean: ## Clean the project.
|
||||
@@ -28,13 +85,38 @@ check-toml: ## Check all TOML files.
|
||||
taplo format --check --option "indent_string= "
|
||||
|
||||
.PHONY: docker-image
|
||||
docker-image: ## Build docker image.
|
||||
docker build --network host -f docker/Dockerfile -t ${IMAGE_REGISTRY}:${IMAGE_TAG} .
|
||||
docker-image: multi-platform-buildx ## Build docker image.
|
||||
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
|
||||
--build-arg="CARGO_PROFILE=${CARGO_PROFILE}" --build-arg="FEATURES=${FEATURES}" \
|
||||
-f docker/${BASE_IMAGE}/Dockerfile \
|
||||
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/greptimedb:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
||||
|
||||
.PHONY: build-greptime-by-buildx
|
||||
build-greptime-by-buildx: multi-platform-buildx ## Build greptime binary by docker buildx. The binary will be copied to the current directory.
|
||||
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
|
||||
--target=builder \
|
||||
--build-arg="CARGO_PROFILE=${CARGO_PROFILE}" --build-arg="FEATURES=${FEATURES}" \
|
||||
-f docker/${BASE_IMAGE}/Dockerfile \
|
||||
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/greptimedb-builder:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
||||
|
||||
docker run --rm -v ${PWD}:/data \
|
||||
--entrypoint cp ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/greptimedb-builder:${IMAGE_TAG} \
|
||||
/out/target/${CARGO_PROFILE}/greptime /data/greptime
|
||||
|
||||
.PHONY: dev-builder
|
||||
dev-builder: multi-platform-buildx ## Build dev-builder image.
|
||||
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
|
||||
--build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
|
||||
-f docker/dev-builder/Dockerfile \
|
||||
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
||||
|
||||
.PHONY: multi-platform-buildx
|
||||
multi-platform-buildx: ## Create buildx multi-platform builder.
|
||||
docker buildx inspect ${BUILDX_BUILDER_NAME} || docker buildx create --name ${BUILDX_BUILDER_NAME} --driver docker-container --bootstrap --use
|
||||
|
||||
##@ Test
|
||||
|
||||
test: nextest ## Run unit and integration tests.
|
||||
cargo nextest run --retries 3
|
||||
cargo nextest run ${NEXTEST_OPTS}
|
||||
|
||||
.PHONY: nextest ## Install nextest tools.
|
||||
nextest:
|
||||
@@ -56,6 +138,21 @@ clippy: ## Check clippy rules.
|
||||
fmt-check: ## Check code format.
|
||||
cargo fmt --all -- --check
|
||||
|
||||
.PHONY: start-etcd
|
||||
start-etcd: ## Start single node etcd for testing purpose.
|
||||
docker run --rm -d --network=host -p 2379-2380:2379-2380 ${ETCD_IMAGE}
|
||||
|
||||
.PHONY: stop-etcd
|
||||
stop-etcd: ## Stop single node etcd for testing purpose.
|
||||
docker stop $$(docker ps -q --filter ancestor=${ETCD_IMAGE})
|
||||
|
||||
.PHONY: run-it-in-container
|
||||
run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder:latest \
|
||||
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
||||
|
||||
##@ General
|
||||
|
||||
# The help target prints out all targets with their descriptions organized
|
||||
@@ -71,4 +168,4 @@ fmt-check: ## Check code format.
|
||||
|
||||
.PHONY: help
|
||||
help: ## Display help messages.
|
||||
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-30s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||
|
||||
@@ -47,14 +47,10 @@ for years. Based on their best-practices, GreptimeDB is born to give you:
|
||||
|
||||
## Quick Start
|
||||
|
||||
### GreptimePlay
|
||||
### [GreptimePlay](https://greptime.com/playground)
|
||||
|
||||
Try out the features of GreptimeDB right from your browser.
|
||||
|
||||
<a href="https://greptime.com/playground" target="_blank"><img
|
||||
src="https://www.greptime.com/assets/greptime_play_button_colorful.1bbe2746.png"
|
||||
alt="GreptimePlay" width="200px" /></a>
|
||||
|
||||
### Build
|
||||
|
||||
#### Build from Source
|
||||
@@ -106,7 +102,7 @@ Please see [the online document site](https://docs.greptime.com/getting-started/
|
||||
|
||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview#connect) on our [official document site](https://docs.greptime.com/).
|
||||
|
||||
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/client/overview).
|
||||
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
|
||||
|
||||
## Resources
|
||||
|
||||
|
||||
@@ -10,8 +10,10 @@ rpc_addr = "127.0.0.1:3001"
|
||||
rpc_hostname = "127.0.0.1"
|
||||
# The number of gRPC server worker threads, 8 by default.
|
||||
rpc_runtime_size = 8
|
||||
|
||||
[heartbeat]
|
||||
# Interval for sending heartbeat messages to the Metasrv in milliseconds, 5000 by default.
|
||||
heartbeat_interval_millis = 5000
|
||||
interval_millis = 5000
|
||||
|
||||
# Metasrv client options.
|
||||
[meta_client_options]
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
# Node running mode, see `standalone.example.toml`.
|
||||
mode = "distributed"
|
||||
|
||||
[heartbeat]
|
||||
# Interval for sending heartbeat task to the Metasrv in milliseconds, 5000 by default.
|
||||
heartbeat_interval_millis = 5000
|
||||
interval_millis = 5000
|
||||
# Interval for retry sending heartbeat task in milliseconds, 5000 by default.
|
||||
retry_interval_millis = 5000
|
||||
|
||||
@@ -47,18 +49,20 @@ runtime_size = 2
|
||||
[influxdb_options]
|
||||
enable = true
|
||||
|
||||
# Prometheus protocol options, see `standalone.example.toml`.
|
||||
[prometheus_options]
|
||||
# Prometheus remote storage options, see `standalone.example.toml`.
|
||||
[prom_store_options]
|
||||
enable = true
|
||||
|
||||
# Prometheus protocol options, see `standalone.example.toml`.
|
||||
[prom_options]
|
||||
[prometheus_options]
|
||||
addr = "127.0.0.1:4004"
|
||||
|
||||
# Metasrv client options, see `datanode.example.toml`.
|
||||
[meta_client_options]
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
timeout_millis = 3000
|
||||
# DDL timeouts options.
|
||||
ddl_timeout_millis = 10000
|
||||
connect_timeout_millis = 5000
|
||||
tcp_nodelay = true
|
||||
|
||||
|
||||
@@ -18,3 +18,18 @@ use_memory_store = false
|
||||
# [logging]
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# level = "info"
|
||||
|
||||
# Procedure storage options.
|
||||
[procedure]
|
||||
# Procedure max retry time.
|
||||
max_retry_times = 3
|
||||
# Initial retry delay of procedures, increases exponentially
|
||||
retry_delay = "500ms"
|
||||
|
||||
# # Datanode options.
|
||||
# [datanode]
|
||||
# # Datanode client options.
|
||||
# [datanode.client_options]
|
||||
# timeout_millis = 10000
|
||||
# connect_timeout_millis = 10000
|
||||
# tcp_nodelay = true
|
||||
|
||||
@@ -69,13 +69,13 @@ runtime_size = 2
|
||||
# Whether to enable InfluxDB protocol in HTTP API, true by default.
|
||||
enable = true
|
||||
|
||||
# Prometheus protocol options.
|
||||
[prometheus_options]
|
||||
# Prometheus remote storage options
|
||||
[prom_store_options]
|
||||
# Whether to enable Prometheus remote write and read in HTTP API, true by default.
|
||||
enable = true
|
||||
|
||||
# Prom protocol options.
|
||||
[prom_options]
|
||||
# Prometheus protocol options
|
||||
[prometheus_options]
|
||||
# Prometheus API server address, "127.0.0.1:4004" by default.
|
||||
addr = "127.0.0.1:4004"
|
||||
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
FROM centos:7
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
RUN sed -e 's|^mirrorlist=|#mirrorlist=|g' \
|
||||
-e 's|^#baseurl=http://mirror.centos.org/centos|baseurl=http://mirrors.tuna.tsinghua.edu.cn/centos|g' \
|
||||
-i.bak \
|
||||
/etc/yum.repos.d/CentOS-*.repo
|
||||
|
||||
# Install dependencies
|
||||
RUN RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
|
||||
RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel
|
||||
|
||||
# Install protoc
|
||||
RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
|
||||
RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
||||
|
||||
# Install Rust
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
|
||||
CMD ["cargo", "build", "--release"]
|
||||
@@ -1,58 +0,0 @@
|
||||
FROM ubuntu:22.04 as builder
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libssl-dev \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
wget
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Install cross platform toolchain
|
||||
RUN apt-get -y update && \
|
||||
apt-get -y install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu && \
|
||||
apt-get install binutils-aarch64-linux-gnu
|
||||
|
||||
COPY ./docker/aarch64/compile-python.sh ./docker/aarch64/
|
||||
RUN chmod +x ./docker/aarch64/compile-python.sh && \
|
||||
./docker/aarch64/compile-python.sh
|
||||
|
||||
COPY ./rust-toolchain.toml .
|
||||
# Install rustup target for cross compiling.
|
||||
RUN rustup target add aarch64-unknown-linux-gnu
|
||||
COPY . .
|
||||
# Update dependency, using separate `RUN` to separate cache
|
||||
RUN cargo fetch
|
||||
|
||||
# This three env var is set in script, so I set it manually in dockerfile.
|
||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
|
||||
ENV LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
|
||||
ENV PY_INSTALL_PATH=/greptimedb/python_arm64_build
|
||||
|
||||
# Set the environment variable for cross compiling and compile it
|
||||
# cross compiled python is `python3` in path, but pyo3 need `python` in path so alias it
|
||||
# Build the project in release mode.
|
||||
RUN export PYO3_CROSS_LIB_DIR=$PY_INSTALL_PATH/lib && \
|
||||
alias python=python3 && \
|
||||
cargo build --target aarch64-unknown-linux-gnu --release -F pyo3_backend
|
||||
|
||||
# Exporting the binary to the clean image
|
||||
FROM ubuntu:22.04 as base
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /greptimedb/target/aarch64-unknown-linux-gnu/release/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
@@ -1,87 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# this script will download Python source code, compile it, and install it to /usr/local/lib
|
||||
# then use this python to compile cross-compiled python for aarch64
|
||||
ARCH=$1
|
||||
PYTHON_VERSION=3.10.10
|
||||
PYTHON_SOURCE_DIR=Python-${PYTHON_VERSION}
|
||||
PYTHON_INSTALL_PATH_AMD64=${PWD}/python-${PYTHON_VERSION}/amd64
|
||||
PYTHON_INSTALL_PATH_AARCH64=${PWD}/python-${PYTHON_VERSION}/aarch64
|
||||
|
||||
function download_python_source_code() {
|
||||
wget https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tgz
|
||||
tar -xvf Python-$PYTHON_VERSION.tgz
|
||||
}
|
||||
|
||||
function compile_for_amd64_platform() {
|
||||
mkdir -p "$PYTHON_INSTALL_PATH_AMD64"
|
||||
|
||||
echo "Compiling for amd64 platform..."
|
||||
|
||||
./configure \
|
||||
--prefix="$PYTHON_INSTALL_PATH_AMD64" \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
|
||||
|
||||
make
|
||||
make install
|
||||
}
|
||||
|
||||
# explain Python compile options here a bit:s
|
||||
# --enable-shared: enable building a shared Python library (default is no) but we do need it for calling from rust
|
||||
# CC, CXX, AR, LD, RANLIB: set the compiler, archiver, linker, and ranlib programs to use
|
||||
# build: the machine you are building on, host: the machine you will run the compiled program on
|
||||
# --with-system-ffi: build _ctypes module using an installed ffi library, see Doc/library/ctypes.rst, not used in here TODO: could remove
|
||||
# ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes:
|
||||
# allow cross-compiled python to have -pthread set for CXX, see https://github.com/python/cpython/pull/22525
|
||||
# ac_cv_have_long_long_format=yes: target platform supports long long type
|
||||
# disable-ipv6: disable ipv6 support, we don't need it in here
|
||||
# ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no: disable pty support, we don't need it in here
|
||||
function compile_for_aarch64_platform() {
|
||||
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
|
||||
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
|
||||
|
||||
mkdir -p "$PYTHON_INSTALL_PATH_AARCH64"
|
||||
|
||||
echo "Compiling for aarch64 platform..."
|
||||
echo "LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
|
||||
echo "LIBRARY_PATH: $LIBRARY_PATH"
|
||||
echo "PATH: $PATH"
|
||||
|
||||
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
|
||||
--prefix="$PYTHON_INSTALL_PATH_AARCH64" --enable-optimizations \
|
||||
CC=aarch64-linux-gnu-gcc \
|
||||
CXX=aarch64-linux-gnu-g++ \
|
||||
AR=aarch64-linux-gnu-ar \
|
||||
LD=aarch64-linux-gnu-ld \
|
||||
RANLIB=aarch64-linux-gnu-ranlib \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
|
||||
|
||||
make
|
||||
make altinstall
|
||||
}
|
||||
|
||||
# Main script starts here.
|
||||
download_python_source_code
|
||||
|
||||
# Enter the python source code directory.
|
||||
cd $PYTHON_SOURCE_DIR || exit 1
|
||||
|
||||
# Build local python first, then build cross-compiled python.
|
||||
compile_for_amd64_platform
|
||||
|
||||
# Clean the build directory.
|
||||
make clean && make distclean
|
||||
|
||||
# Cross compile python for aarch64.
|
||||
if [ "$ARCH" = "aarch64-unknown-linux-gnu" ]; then
|
||||
compile_for_aarch64_platform
|
||||
fi
|
||||
51
docker/centos/Dockerfile
Normal file
51
docker/centos/Dockerfile
Normal file
@@ -0,0 +1,51 @@
|
||||
FROM centos:7 as builder
|
||||
|
||||
ARG CARGO_PROFILE
|
||||
ARG FEATURES
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Install dependencies
|
||||
RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
|
||||
RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel
|
||||
|
||||
# Install protoc
|
||||
RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
|
||||
RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
||||
|
||||
# Install Rust
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
|
||||
# Build the project in release mode.
|
||||
RUN --mount=target=.,rw \
|
||||
--mount=type=cache,target=/usr/local/cargo/registry \
|
||||
make build \
|
||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||
FEATURES=${FEATURES} \
|
||||
TARGET_DIR=/out/target
|
||||
|
||||
# Export the binary to the clean image.
|
||||
FROM centos:7 as base
|
||||
|
||||
ARG CARGO_PROFILE
|
||||
|
||||
RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${CARGO_PROFILE}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
@@ -4,9 +4,10 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
ca-certificates \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip
|
||||
python3-pip \
|
||||
curl
|
||||
|
||||
COPY requirements.txt /etc/greptime/requirements.txt
|
||||
COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||
|
||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||
|
||||
|
||||
16
docker/ci/Dockerfile-centos
Normal file
16
docker/ci/Dockerfile-centos
Normal file
@@ -0,0 +1,16 @@
|
||||
FROM centos:7
|
||||
|
||||
RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
ADD $TARGETARCH/greptime /greptime/bin/
|
||||
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
36
docker/dev-builder/Dockerfile
Normal file
36
docker/dev-builder/Dockerfile
Normal file
@@ -0,0 +1,36 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
libssl-dev \
|
||||
tzdata \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
ca-certificates \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip
|
||||
|
||||
RUN git config --global --add safe.directory /greptimedb
|
||||
|
||||
# Install Python dependencies.
|
||||
COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Install Rust toolchains.
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo install cargo-nextest --locked
|
||||
@@ -1,5 +1,8 @@
|
||||
FROM ubuntu:22.04 as builder
|
||||
|
||||
ARG CARGO_PROFILE
|
||||
ARG FEATURES
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
@@ -11,11 +14,9 @@ RUN apt-get update && apt-get install -y \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
&& pip3 install --upgrade pip \
|
||||
&& pip3 install pyarrow
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
@@ -23,17 +24,32 @@ RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-mo
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Build the project in release mode.
|
||||
COPY . .
|
||||
RUN cargo build --release
|
||||
RUN --mount=target=.,rw \
|
||||
--mount=type=cache,target=/usr/local/cargo/registry \
|
||||
make build \
|
||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||
FEATURES=${FEATURES} \
|
||||
TARGET_DIR=/out/target
|
||||
|
||||
# Export the binary to the clean image.
|
||||
# TODO(zyy17): Maybe should use the more secure container image.
|
||||
FROM ubuntu:22.04 as base
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||
ARG CARGO_PROFILE
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
|
||||
-y install ca-certificates \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip \
|
||||
curl
|
||||
|
||||
COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||
|
||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /greptimedb/target/release/greptime /greptime/bin/
|
||||
COPY --from=builder /out/target/${CARGO_PROFILE}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
39
docs/benchmarks/tsbs/v0.3.2.md
Normal file
39
docs/benchmarks/tsbs/v0.3.2.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# TSBS benchmark - v0.3.2
|
||||
|
||||
## Environment
|
||||
|
||||
| | |
|
||||
| --- | --- |
|
||||
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
|
||||
| Memory | 32GB |
|
||||
| Disk | SOLIDIGM SSDPFKNU010TZ |
|
||||
| OS | Ubuntu 22.04.2 LTS |
|
||||
|
||||
|
||||
## Write performance
|
||||
|
||||
| Write buffer size | Ingest rate(rows/s) |
|
||||
| --- | --- |
|
||||
| 512M | 139583.04 |
|
||||
| 32M | 279250.52 |
|
||||
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | v0.3.2 write buffer 32M (ms) | v0.3.2 write buffer 512M (ms) | v0.3.1 write buffer 32M (ms) |
|
||||
| --- | --- | --- | --- |
|
||||
| cpu-max-all-1 | 921.12 | 241.23 | 553.63 |
|
||||
| cpu-max-all-8 | 2657.66 | 502.78 | 3308.41 |
|
||||
| double-groupby-1 | 28238.85 | 27367.42 | 52148.22 |
|
||||
| double-groupby-5 | 33094.65 | 32421.89 | 56762.37 |
|
||||
| double-groupby-all | 38565.89 | 38635.52 | 59596.80 |
|
||||
| groupby-orderby-limit | 23321.60 | 22423.55 | 53983.23 |
|
||||
| high-cpu-1 | 1167.04 | 254.15 | 832.41 |
|
||||
| high-cpu-all | 32814.08 | 29906.94 | 62853.12 |
|
||||
| lastpoint | 192045.05 | 153575.42 | NA |
|
||||
| single-groupby-1-1-1 | 63.97 | 87.35 | 92.66 |
|
||||
| single-groupby-1-1-12 | 666.24 | 326.98 | 781.50 |
|
||||
| single-groupby-1-8-1 | 225.29 | 137.97 |281.95 |
|
||||
| single-groupby-5-1-1 | 70.40 | 81.64 | 86.15 |
|
||||
| single-groupby-5-1-12 | 722.75 | 356.01 | 805.18 |
|
||||
| single-groupby-5-8-1 | 285.60 | 115.88 | 326.29 |
|
||||
303
docs/rfcs/2023-07-06-table-engine-refactor.md
Normal file
303
docs/rfcs/2023-07-06-table-engine-refactor.md
Normal file
@@ -0,0 +1,303 @@
|
||||
---
|
||||
Feature Name: table-engine-refactor
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/1869
|
||||
Date: 2023-07-06
|
||||
Author: "Yingwen <realevenyag@gmail.com>"
|
||||
---
|
||||
|
||||
Refactor Table Engine
|
||||
----------------------
|
||||
|
||||
# Summary
|
||||
Refactor table engines to address several historical tech debts.
|
||||
|
||||
# Motivation
|
||||
Both `Frontend` and `Datanode` have to deal with multiple regions in a table. This results in code duplication and additional burden to the `Datanode`.
|
||||
|
||||
Before:
|
||||
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
|
||||
subgraph Frontend["Frontend"]
|
||||
subgraph MyTable
|
||||
A("region 0, 2 -> Datanode0")
|
||||
B("region 1, 3 -> Datanode1")
|
||||
end
|
||||
end
|
||||
|
||||
MyTable --> MetaSrv
|
||||
MetaSrv --> ETCD
|
||||
|
||||
MyTable-->TableEngine0
|
||||
MyTable-->TableEngine1
|
||||
|
||||
subgraph Datanode0
|
||||
Procedure0("procedure")
|
||||
TableEngine0("table engine")
|
||||
region0
|
||||
region2
|
||||
mytable0("my_table")
|
||||
|
||||
Procedure0-->mytable0
|
||||
TableEngine0-->mytable0
|
||||
mytable0-->region0
|
||||
mytable0-->region2
|
||||
end
|
||||
|
||||
|
||||
subgraph Datanode1
|
||||
Procedure1("procedure")
|
||||
TableEngine1("table engine")
|
||||
region1
|
||||
region3
|
||||
mytable1("my_table")
|
||||
|
||||
Procedure1-->mytable1
|
||||
TableEngine1-->mytable1
|
||||
mytable1-->region1
|
||||
mytable1-->region3
|
||||
end
|
||||
|
||||
|
||||
subgraph manifest["table manifest"]
|
||||
M0("my_table")
|
||||
M1("regions: [0, 1, 2, 3]")
|
||||
end
|
||||
|
||||
mytable1-->manifest
|
||||
mytable0-->manifest
|
||||
|
||||
RegionManifest0("region manifest 0")
|
||||
RegionManifest1("region manifest 1")
|
||||
RegionManifest2("region manifest 2")
|
||||
RegionManifest3("region manifest 3")
|
||||
region0-->RegionManifest0
|
||||
region1-->RegionManifest1
|
||||
region2-->RegionManifest2
|
||||
region3-->RegionManifest3
|
||||
```
|
||||
|
||||
`Datanodes` can update the same manifest file for a table as regions are assigned to different nodes in the cluster. We also have to run procedures on `Datanode` to ensure the table manifest is consistent with region manifests. "Table" in a `Datanode` is a subset of the table's regions. The `Datanode` is much closer to `RegionServer` in `HBase` which only deals with regions.
|
||||
|
||||
In cluster mode, we store table metadata in etcd and table manifest. The table manifest becomes redundant. We can remove the table manifest if we refactor the table engines to region engines that only care about regions. What's more, we don't need to run those procedures on `Datanode`.
|
||||
|
||||
After:
|
||||
```mermaid
|
||||
graph TB
|
||||
|
||||
subgraph Frontend["Frontend"]
|
||||
direction LR
|
||||
subgraph MyTable
|
||||
A("region 0, 2 -> Datanode0")
|
||||
B("region 1, 3 -> Datanode1")
|
||||
end
|
||||
end
|
||||
|
||||
MyTable --> MetaSrv
|
||||
MetaSrv --> ETCD
|
||||
|
||||
MyTable-->RegionEngine
|
||||
MyTable-->RegionEngine1
|
||||
|
||||
subgraph Datanode0
|
||||
RegionEngine("region engine")
|
||||
region0
|
||||
region2
|
||||
RegionEngine-->region0
|
||||
RegionEngine-->region2
|
||||
end
|
||||
|
||||
|
||||
subgraph Datanode1
|
||||
RegionEngine1("region engine")
|
||||
region1
|
||||
region3
|
||||
RegionEngine1-->region1
|
||||
RegionEngine1-->region3
|
||||
end
|
||||
|
||||
RegionManifest0("region manifest 0")
|
||||
RegionManifest1("region manifest 1")
|
||||
RegionManifest2("region manifest 2")
|
||||
RegionManifest3("region manifest 3")
|
||||
region0-->RegionManifest0
|
||||
region1-->RegionManifest1
|
||||
region2-->RegionManifest2
|
||||
region3-->RegionManifest3
|
||||
```
|
||||
This RFC proposes to refactor table engines into region engines as a first step to make the `Datanode` acts like a `RegionServer`.
|
||||
|
||||
|
||||
# Details
|
||||
## Overview
|
||||
|
||||
We plan to refactor the `TableEngine` trait into `RegionEngine` gradually. This RFC focuses on the `mito` engine as it is the default table engine and the most complicated engine.
|
||||
|
||||
Currently, we built `MitoEngine` upon `StorageEngine` that manages regions of the `mito` engine. Since `MitoEngine` becomes a region engine, we could combine `StorageEngine` with `MitoEngine` to simplify our code structure.
|
||||
|
||||
The chart below shows the overall architecture of the `MitoEngine`.
|
||||
|
||||
```mermaid
|
||||
classDiagram
|
||||
class MitoEngine~LogStore~ {
|
||||
-WorkerGroup workers
|
||||
}
|
||||
class MitoRegion {
|
||||
+VersionControlRef version_control
|
||||
-RegionId region_id
|
||||
-String manifest_dir
|
||||
-AtomicI64 last_flush_millis
|
||||
+region_id() RegionId
|
||||
+scan() ChunkReaderImpl
|
||||
}
|
||||
class RegionMap {
|
||||
-HashMap<RegionId, MitoRegionRef> regions
|
||||
}
|
||||
class ChunkReaderImpl
|
||||
|
||||
class WorkerGroup {
|
||||
-Vec~RegionWorker~ workers
|
||||
}
|
||||
class RegionWorker {
|
||||
-RegionMap regions
|
||||
-Sender sender
|
||||
-JoinHandle handle
|
||||
}
|
||||
class RegionWorkerThread~LogStore~ {
|
||||
-RegionMap regions
|
||||
-Receiver receiver
|
||||
-Wal~LogStore~ wal
|
||||
-ObjectStore object_store
|
||||
-MemtableBuilderRef memtable_builder
|
||||
-FlushSchedulerRef~LogStore~ flush_scheduler
|
||||
-FlushStrategy flush_strategy
|
||||
-CompactionSchedulerRef~LogStore~ compaction_scheduler
|
||||
-FilePurgerRef file_purger
|
||||
}
|
||||
class Wal~LogStore~ {
|
||||
-LogStore log_store
|
||||
}
|
||||
class MitoConfig
|
||||
|
||||
MitoEngine~LogStore~ o-- MitoConfig
|
||||
MitoEngine~LogStore~ o-- MitoRegion
|
||||
MitoEngine~LogStore~ o-- WorkerGroup
|
||||
MitoRegion o-- VersionControl
|
||||
MitoRegion -- ChunkReaderImpl
|
||||
WorkerGroup o-- RegionWorker
|
||||
RegionWorker o-- RegionMap
|
||||
RegionWorker -- RegionWorkerThread~LogStore~
|
||||
RegionWorkerThread~LogStore~ o-- RegionMap
|
||||
RegionWorkerThread~LogStore~ o-- Wal~LogStore~
|
||||
```
|
||||
|
||||
We replace the `RegionWriter` with `RegionWorker` to process write requests and DDL requests.
|
||||
|
||||
|
||||
## Metadata
|
||||
We also merge region's metadata with table's metadata. It should make metadata much easier to maintain.
|
||||
```mermaid
|
||||
classDiagram
|
||||
class VersionControl {
|
||||
-CowCell~Version~ version
|
||||
-AtomicU64 committed_sequence
|
||||
}
|
||||
class Version {
|
||||
-RegionMetadataRef metadata
|
||||
-MemtableVersionRef memtables
|
||||
-LevelMetasRef ssts
|
||||
-SequenceNumber flushed_sequence
|
||||
-ManifestVersion manifest_version
|
||||
}
|
||||
class MemtableVersion {
|
||||
-MemtableRef mutable
|
||||
-Vec~MemtableRef~ immutables
|
||||
+mutable_memtable() MemtableRef
|
||||
+immutable_memtables() &[MemtableRef]
|
||||
+freeze_mutable(MemtableRef new_mutable) MemtableVersion
|
||||
}
|
||||
class LevelMetas {
|
||||
-LevelMetaVec levels
|
||||
-AccessLayerRef sst_layer
|
||||
-FilePurgerRef file_purger
|
||||
-Option~i64~ compaction_time_window
|
||||
}
|
||||
class LevelMeta {
|
||||
-Level level
|
||||
-HashMap<FileId, FileHandle> files
|
||||
}
|
||||
class FileHandle {
|
||||
-FileMeta meta
|
||||
-bool compacting
|
||||
-AtomicBool deleted
|
||||
-AccessLayerRef sst_layer
|
||||
-FilePurgerRef file_purger
|
||||
}
|
||||
class FileMeta {
|
||||
+RegionId region_id
|
||||
+FileId file_id
|
||||
+Option<Timestamp, Timestamp> time_range
|
||||
+Level level
|
||||
+u64 file_size
|
||||
}
|
||||
|
||||
VersionControl o-- Version
|
||||
Version o-- RegionMetadata
|
||||
Version o-- MemtableVersion
|
||||
Version o-- LevelMetas
|
||||
LevelMetas o-- LevelMeta
|
||||
LevelMeta o-- FileHandle
|
||||
FileHandle o-- FileMeta
|
||||
|
||||
class RegionMetadata {
|
||||
+RegionId region_id
|
||||
+VersionNumber version
|
||||
+SchemaRef table_schema
|
||||
+Vec~usize~ primary_key_indices
|
||||
+Vec~usize~ value_indices
|
||||
+ColumnId next_column_id
|
||||
+TableOptions region_options
|
||||
+DateTime~Utc~ created_on
|
||||
+RegionSchemaRef region_schema
|
||||
}
|
||||
class RegionSchema {
|
||||
-SchemaRef user_schema
|
||||
-StoreSchemaRef store_schema
|
||||
-ColumnsMetadataRef columns
|
||||
}
|
||||
class Schema
|
||||
class StoreSchema {
|
||||
-Vec~ColumnMetadata~ columns
|
||||
-SchemaRef schema
|
||||
-usize row_key_end
|
||||
-usize user_column_end
|
||||
}
|
||||
class ColumnsMetadata {
|
||||
-Vec~ColumnMetadata~ columns
|
||||
-HashMap<String, usize> name_to_col_index
|
||||
-usize row_key_end
|
||||
-usize timestamp_key_index
|
||||
-usize user_column_end
|
||||
}
|
||||
class ColumnMetadata
|
||||
|
||||
RegionMetadata o-- RegionSchema
|
||||
RegionMetadata o-- Schema
|
||||
RegionSchema o-- StoreSchema
|
||||
RegionSchema o-- Schema
|
||||
RegionSchema o-- ColumnsMetadata
|
||||
StoreSchema o-- ColumnsMetadata
|
||||
StoreSchema o-- Schema
|
||||
StoreSchema o-- ColumnMetadata
|
||||
ColumnsMetadata o-- ColumnMetadata
|
||||
```
|
||||
|
||||
# Drawback
|
||||
This is a breaking change.
|
||||
|
||||
# Future Work
|
||||
- Rename `TableEngine` to `RegionEngine`
|
||||
- Simplify schema relationship in the `mito` engine
|
||||
- Refactor the `Datanode` into a `RegionServer`.
|
||||
@@ -15,7 +15,7 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::prelude::StatusCode;
|
||||
use common_error::status_code::StatusCode;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::prelude::*;
|
||||
use snafu::Location;
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use common_base::BitVec;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::TimestampType;
|
||||
use datatypes::types::{TimeType, TimestampType};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use greptime_proto::v1::ddl_request::Expr;
|
||||
@@ -71,6 +71,10 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||
ColumnDataType::TimestampNanosecond => {
|
||||
ConcreteDataType::timestamp_nanosecond_datatype()
|
||||
}
|
||||
ColumnDataType::TimeSecond => ConcreteDataType::time_second_datatype(),
|
||||
ColumnDataType::TimeMillisecond => ConcreteDataType::time_millisecond_datatype(),
|
||||
ColumnDataType::TimeMicrosecond => ConcreteDataType::time_microsecond_datatype(),
|
||||
ColumnDataType::TimeNanosecond => ConcreteDataType::time_nanosecond_datatype(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -95,12 +99,18 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
ConcreteDataType::String(_) => ColumnDataType::String,
|
||||
ConcreteDataType::Date(_) => ColumnDataType::Date,
|
||||
ConcreteDataType::DateTime(_) => ColumnDataType::Datetime,
|
||||
ConcreteDataType::Timestamp(unit) => match unit {
|
||||
ConcreteDataType::Timestamp(t) => match t {
|
||||
TimestampType::Second(_) => ColumnDataType::TimestampSecond,
|
||||
TimestampType::Millisecond(_) => ColumnDataType::TimestampMillisecond,
|
||||
TimestampType::Microsecond(_) => ColumnDataType::TimestampMicrosecond,
|
||||
TimestampType::Nanosecond(_) => ColumnDataType::TimestampNanosecond,
|
||||
},
|
||||
ConcreteDataType::Time(t) => match t {
|
||||
TimeType::Second(_) => ColumnDataType::TimeSecond,
|
||||
TimeType::Millisecond(_) => ColumnDataType::TimeMillisecond,
|
||||
TimeType::Microsecond(_) => ColumnDataType::TimeMicrosecond,
|
||||
TimeType::Nanosecond(_) => ColumnDataType::TimeNanosecond,
|
||||
},
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_) => {
|
||||
@@ -189,6 +199,22 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
|
||||
ts_nanosecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimeSecond => Values {
|
||||
time_second_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimeMillisecond => Values {
|
||||
time_millisecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimeMicrosecond => Values {
|
||||
time_microsecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimeNanosecond => Values {
|
||||
time_nanosecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,6 +249,12 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
||||
TimeUnit::Microsecond => values.ts_microsecond_values.push(val.value()),
|
||||
TimeUnit::Nanosecond => values.ts_nanosecond_values.push(val.value()),
|
||||
},
|
||||
Value::Time(val) => match val.unit() {
|
||||
TimeUnit::Second => values.time_second_values.push(val.value()),
|
||||
TimeUnit::Millisecond => values.time_millisecond_values.push(val.value()),
|
||||
TimeUnit::Microsecond => values.time_microsecond_values.push(val.value()),
|
||||
TimeUnit::Nanosecond => values.time_nanosecond_values.push(val.value()),
|
||||
},
|
||||
Value::List(_) => unreachable!(),
|
||||
});
|
||||
column.null_mask = null_mask.into_vec();
|
||||
@@ -256,6 +288,8 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
||||
Some(Expr::Alter(_)) => "ddl.alter",
|
||||
Some(Expr::DropTable(_)) => "ddl.drop_table",
|
||||
Some(Expr::FlushTable(_)) => "ddl.flush_table",
|
||||
Some(Expr::CompactTable(_)) => "ddl.compact_table",
|
||||
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
|
||||
None => "ddl.empty",
|
||||
}
|
||||
}
|
||||
@@ -265,7 +299,8 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::vectors::{
|
||||
BooleanVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
BooleanVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector,
|
||||
TimeSecondVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
TimestampNanosecondVector, TimestampSecondVector,
|
||||
};
|
||||
|
||||
@@ -328,6 +363,10 @@ mod tests {
|
||||
let values = values_with_capacity(ColumnDataType::TimestampMillisecond, 2);
|
||||
let values = values.ts_millisecond_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = values_with_capacity(ColumnDataType::TimeMillisecond, 2);
|
||||
let values = values.time_millisecond_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -396,6 +435,10 @@ mod tests {
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::TimestampMillisecond).into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
|
||||
ColumnDataTypeWrapper(ColumnDataType::TimeMillisecond).into()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -524,6 +567,47 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_put_time_values() {
|
||||
let mut column = Column {
|
||||
column_name: "test".to_string(),
|
||||
semantic_type: 0,
|
||||
values: Some(Values {
|
||||
..Default::default()
|
||||
}),
|
||||
null_mask: vec![],
|
||||
datatype: 0,
|
||||
};
|
||||
|
||||
let vector = Arc::new(TimeNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![1, 2, 3],
|
||||
column.values.as_ref().unwrap().time_nanosecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(TimeMillisecondVector::from_vec(vec![4, 5, 6]));
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![4, 5, 6],
|
||||
column.values.as_ref().unwrap().time_millisecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(TimeMicrosecondVector::from_vec(vec![7, 8, 9]));
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![7, 8, 9],
|
||||
column.values.as_ref().unwrap().time_microsecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(TimeSecondVector::from_vec(vec![10, 11, 12]));
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![10, 11, 12],
|
||||
column.values.as_ref().unwrap().time_second_values
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_put_vector() {
|
||||
use crate::v1::column::SemanticType;
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
pub mod error;
|
||||
pub mod helper;
|
||||
|
||||
pub mod prometheus {
|
||||
pub mod prom_store {
|
||||
pub mod remote {
|
||||
pub use greptime_proto::prometheus::remote::*;
|
||||
}
|
||||
|
||||
@@ -29,12 +29,12 @@ datatypes = { path = "../datatypes" }
|
||||
futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
key-lock = "0.1"
|
||||
lazy_static = "1.4"
|
||||
lazy_static.workspace = true
|
||||
meta-client = { path = "../meta-client" }
|
||||
metrics.workspace = true
|
||||
moka = { version = "0.11", features = ["future"] }
|
||||
parking_lot = "0.12"
|
||||
regex = "1.6"
|
||||
regex.workspace = true
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
session = { path = "../session" }
|
||||
|
||||
@@ -16,10 +16,10 @@ use std::any::Any;
|
||||
use std::fmt::Debug;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::prelude::{Snafu, StatusCode};
|
||||
use common_error::status_code::StatusCode;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::Location;
|
||||
use snafu::{Location, Snafu};
|
||||
use tokio::task::JoinError;
|
||||
|
||||
use crate::DeregisterTableRequest;
|
||||
|
||||
@@ -1,397 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Display, Formatter};
|
||||
|
||||
use common_catalog::error::{
|
||||
DeserializeCatalogEntryValueSnafu, Error, InvalidCatalogSnafu, SerializeCatalogEntryValueSnafu,
|
||||
};
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize, Serializer};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::{RawTableInfo, TableId, TableVersion};
|
||||
|
||||
pub const CATALOG_KEY_PREFIX: &str = "__c";
|
||||
pub const SCHEMA_KEY_PREFIX: &str = "__s";
|
||||
pub const TABLE_GLOBAL_KEY_PREFIX: &str = "__tg";
|
||||
pub const TABLE_REGIONAL_KEY_PREFIX: &str = "__tr";
|
||||
|
||||
const ALPHANUMERICS_NAME_PATTERN: &str = "[a-zA-Z_][a-zA-Z0-9_]*";
|
||||
|
||||
lazy_static! {
|
||||
static ref CATALOG_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{CATALOG_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})$"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref SCHEMA_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{SCHEMA_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})$"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref TABLE_GLOBAL_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{TABLE_GLOBAL_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})$"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref TABLE_REGIONAL_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{TABLE_REGIONAL_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-([0-9]+)$"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
pub fn build_catalog_prefix() -> String {
|
||||
format!("{CATALOG_KEY_PREFIX}-")
|
||||
}
|
||||
|
||||
pub fn build_schema_prefix(catalog_name: impl AsRef<str>) -> String {
|
||||
format!("{SCHEMA_KEY_PREFIX}-{}-", catalog_name.as_ref())
|
||||
}
|
||||
|
||||
/// Global table info has only one key across all datanodes so it does not have `node_id` field.
|
||||
pub fn build_table_global_prefix(
|
||||
catalog_name: impl AsRef<str>,
|
||||
schema_name: impl AsRef<str>,
|
||||
) -> String {
|
||||
format!(
|
||||
"{TABLE_GLOBAL_KEY_PREFIX}-{}-{}-",
|
||||
catalog_name.as_ref(),
|
||||
schema_name.as_ref()
|
||||
)
|
||||
}
|
||||
|
||||
/// Regional table info varies between datanode, so it contains a `node_id` field.
|
||||
pub fn build_table_regional_prefix(
|
||||
catalog_name: impl AsRef<str>,
|
||||
schema_name: impl AsRef<str>,
|
||||
) -> String {
|
||||
format!(
|
||||
"{}-{}-{}-",
|
||||
TABLE_REGIONAL_KEY_PREFIX,
|
||||
catalog_name.as_ref(),
|
||||
schema_name.as_ref()
|
||||
)
|
||||
}
|
||||
|
||||
/// Table global info has only one key across all datanodes so it does not have `node_id` field.
|
||||
#[derive(Clone, Hash, Eq, PartialEq)]
|
||||
pub struct TableGlobalKey {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
pub table_name: String,
|
||||
}
|
||||
|
||||
impl Display for TableGlobalKey {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(TABLE_GLOBAL_KEY_PREFIX)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.catalog_name)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.schema_name)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.table_name)
|
||||
}
|
||||
}
|
||||
|
||||
impl TableGlobalKey {
|
||||
pub fn parse<S: AsRef<str>>(s: S) -> Result<Self, Error> {
|
||||
let key = s.as_ref();
|
||||
let captures = TABLE_GLOBAL_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidCatalogSnafu { key })?;
|
||||
ensure!(captures.len() == 4, InvalidCatalogSnafu { key });
|
||||
|
||||
Ok(Self {
|
||||
catalog_name: captures[1].to_string(),
|
||||
schema_name: captures[2].to_string(),
|
||||
table_name: captures[3].to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_raw_key(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
pub fn try_from_raw_key(key: &[u8]) -> Result<Self, Error> {
|
||||
Self::parse(String::from_utf8_lossy(key))
|
||||
}
|
||||
}
|
||||
|
||||
/// Table global info contains necessary info for a datanode to create table regions, including
|
||||
/// table id, table meta(schema...), region id allocation across datanodes.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct TableGlobalValue {
|
||||
/// Id of datanode that created the global table info kv. only for debugging.
|
||||
pub node_id: u64,
|
||||
/// Allocation of region ids across all datanodes.
|
||||
pub regions_id_map: HashMap<u64, Vec<u32>>,
|
||||
pub table_info: RawTableInfo,
|
||||
}
|
||||
|
||||
impl TableGlobalValue {
|
||||
pub fn table_id(&self) -> TableId {
|
||||
self.table_info.ident.table_id
|
||||
}
|
||||
|
||||
pub fn engine(&self) -> &str {
|
||||
&self.table_info.meta.engine
|
||||
}
|
||||
}
|
||||
|
||||
/// Table regional info that varies between datanode, so it contains a `node_id` field.
|
||||
pub struct TableRegionalKey {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
pub table_name: String,
|
||||
pub node_id: u64,
|
||||
}
|
||||
|
||||
impl Display for TableRegionalKey {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(TABLE_REGIONAL_KEY_PREFIX)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.catalog_name)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.schema_name)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.table_name)?;
|
||||
f.write_str("-")?;
|
||||
f.serialize_u64(self.node_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl TableRegionalKey {
|
||||
pub fn parse<S: AsRef<str>>(s: S) -> Result<Self, Error> {
|
||||
let key = s.as_ref();
|
||||
let captures = TABLE_REGIONAL_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidCatalogSnafu { key })?;
|
||||
ensure!(captures.len() == 5, InvalidCatalogSnafu { key });
|
||||
let node_id = captures[4]
|
||||
.to_string()
|
||||
.parse()
|
||||
.map_err(|_| InvalidCatalogSnafu { key }.build())?;
|
||||
Ok(Self {
|
||||
catalog_name: captures[1].to_string(),
|
||||
schema_name: captures[2].to_string(),
|
||||
table_name: captures[3].to_string(),
|
||||
node_id,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Regional table info of specific datanode, including table version on that datanode and
|
||||
/// region ids allocated by metasrv.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct TableRegionalValue {
|
||||
// We can remove the `Option` from the table id once all regional values
|
||||
// stored in meta have table ids.
|
||||
pub table_id: Option<TableId>,
|
||||
pub version: TableVersion,
|
||||
pub regions_ids: Vec<u32>,
|
||||
pub engine_name: Option<String>,
|
||||
}
|
||||
|
||||
pub struct CatalogKey {
|
||||
pub catalog_name: String,
|
||||
}
|
||||
|
||||
impl Display for CatalogKey {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(CATALOG_KEY_PREFIX)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.catalog_name)
|
||||
}
|
||||
}
|
||||
|
||||
impl CatalogKey {
|
||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
||||
let key = s.as_ref();
|
||||
let captures = CATALOG_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidCatalogSnafu { key })?;
|
||||
ensure!(captures.len() == 2, InvalidCatalogSnafu { key });
|
||||
Ok(Self {
|
||||
catalog_name: captures[1].to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CatalogValue;
|
||||
|
||||
pub struct SchemaKey {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
}
|
||||
|
||||
impl Display for SchemaKey {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(SCHEMA_KEY_PREFIX)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.catalog_name)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.schema_name)
|
||||
}
|
||||
}
|
||||
|
||||
impl SchemaKey {
|
||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
||||
let key = s.as_ref();
|
||||
let captures = SCHEMA_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidCatalogSnafu { key })?;
|
||||
ensure!(captures.len() == 3, InvalidCatalogSnafu { key });
|
||||
Ok(Self {
|
||||
catalog_name: captures[1].to_string(),
|
||||
schema_name: captures[2].to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct SchemaValue;
|
||||
|
||||
macro_rules! define_catalog_value {
|
||||
( $($val_ty: ty), *) => {
|
||||
$(
|
||||
impl $val_ty {
|
||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
||||
serde_json::from_str(s.as_ref())
|
||||
.context(DeserializeCatalogEntryValueSnafu { raw: s.as_ref() })
|
||||
}
|
||||
|
||||
pub fn from_bytes(bytes: impl AsRef<[u8]>) -> Result<Self, Error> {
|
||||
Self::parse(&String::from_utf8_lossy(bytes.as_ref()))
|
||||
}
|
||||
|
||||
pub fn as_bytes(&self) -> Result<Vec<u8>, Error> {
|
||||
Ok(serde_json::to_string(self)
|
||||
.context(SerializeCatalogEntryValueSnafu)?
|
||||
.into_bytes())
|
||||
}
|
||||
}
|
||||
)*
|
||||
}
|
||||
}
|
||||
|
||||
define_catalog_value!(
|
||||
TableRegionalValue,
|
||||
TableGlobalValue,
|
||||
CatalogValue,
|
||||
SchemaValue
|
||||
);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, RawSchema, Schema};
|
||||
use table::metadata::{RawTableMeta, TableIdent, TableType};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_catalog_key() {
|
||||
let key = "__c-C";
|
||||
let catalog_key = CatalogKey::parse(key).unwrap();
|
||||
assert_eq!("C", catalog_key.catalog_name);
|
||||
assert_eq!(key, catalog_key.to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_schema_key() {
|
||||
let key = "__s-C-S";
|
||||
let schema_key = SchemaKey::parse(key).unwrap();
|
||||
assert_eq!("C", schema_key.catalog_name);
|
||||
assert_eq!("S", schema_key.schema_name);
|
||||
assert_eq!(key, schema_key.to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_table_key() {
|
||||
let key = "__tg-C-S-T";
|
||||
let entry = TableGlobalKey::parse(key).unwrap();
|
||||
assert_eq!("C", entry.catalog_name);
|
||||
assert_eq!("S", entry.schema_name);
|
||||
assert_eq!("T", entry.table_name);
|
||||
assert_eq!(key, &entry.to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_prefix() {
|
||||
assert_eq!("__c-", build_catalog_prefix());
|
||||
assert_eq!("__s-CATALOG-", build_schema_prefix("CATALOG"));
|
||||
assert_eq!(
|
||||
"__tg-CATALOG-SCHEMA-",
|
||||
build_table_global_prefix("CATALOG", "SCHEMA")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_schema() {
|
||||
let schema = Schema::new(vec![ColumnSchema::new(
|
||||
"name",
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
)]);
|
||||
|
||||
let meta = RawTableMeta {
|
||||
schema: RawSchema::from(&schema),
|
||||
engine: "mito".to_string(),
|
||||
created_on: chrono::DateTime::default(),
|
||||
primary_key_indices: vec![0, 1],
|
||||
next_column_id: 3,
|
||||
engine_options: Default::default(),
|
||||
value_indices: vec![2, 3],
|
||||
options: Default::default(),
|
||||
region_numbers: vec![1],
|
||||
};
|
||||
|
||||
let table_info = RawTableInfo {
|
||||
ident: TableIdent {
|
||||
table_id: 42,
|
||||
version: 1,
|
||||
},
|
||||
name: "table_1".to_string(),
|
||||
desc: Some("blah".to_string()),
|
||||
catalog_name: "catalog_1".to_string(),
|
||||
schema_name: "schema_1".to_string(),
|
||||
meta,
|
||||
table_type: TableType::Base,
|
||||
};
|
||||
|
||||
let value = TableGlobalValue {
|
||||
node_id: 0,
|
||||
regions_id_map: HashMap::from([(0, vec![1, 2, 3])]),
|
||||
table_info,
|
||||
};
|
||||
let serialized = serde_json::to_string(&value).unwrap();
|
||||
let deserialized = TableGlobalValue::parse(serialized).unwrap();
|
||||
assert_eq!(value, deserialized);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_table_global_value_compatibility() {
|
||||
let s = r#"{"node_id":1,"regions_id_map":{"1":[0]},"table_info":{"ident":{"table_id":1098,"version":1},"name":"container_cpu_limit","desc":"Created on insertion","catalog_name":"greptime","schema_name":"dd","meta":{"schema":{"column_schemas":[{"name":"container_id","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"container_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"docker_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"host","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_tag","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"interval","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"runtime","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"short_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"type","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"dd_value","data_type":{"Float64":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"ts","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":false,"is_time_index":true,"default_constraint":null,"metadata":{"greptime:time_index":"true"}},{"name":"git.repository_url","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}}],"timestamp_index":11,"version":1},"primary_key_indices":[0,1,2,3,4,5,6,7,8,9,12],"value_indices":[10,11],"engine":"mito","next_column_id":12,"region_numbers":[],"engine_options":{},"options":{},"created_on":"1970-01-01T00:00:00Z"},"table_type":"Base"}}"#;
|
||||
let _ = TableGlobalValue::parse(s).unwrap();
|
||||
}
|
||||
}
|
||||
@@ -19,13 +19,14 @@ use std::any::Any;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
|
||||
use datatypes::schema::SchemaRef;
|
||||
use futures_util::StreamExt;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
|
||||
use table::metadata::TableType;
|
||||
use table::{Result as TableResult, Table, TableRef};
|
||||
|
||||
use self::columns::InformationSchemaColumns;
|
||||
@@ -102,6 +103,10 @@ impl Table for InformationTable {
|
||||
unreachable!("Should not call table_info() of InformationTable directly")
|
||||
}
|
||||
|
||||
fn table_type(&self) -> table::metadata::TableType {
|
||||
TableType::View
|
||||
}
|
||||
|
||||
async fn scan_to_stream(&self, request: ScanRequest) -> TableResult<SendableRecordBatchStream> {
|
||||
let projection = request.projection;
|
||||
let projected_schema = if let Some(projection) = &projection {
|
||||
|
||||
@@ -18,7 +18,7 @@ use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::{
|
||||
SEMANTIC_TYPE_FIELD, SEMANTIC_TYPE_PRIMARY_KEY, SEMANTIC_TYPE_TIME_INDEX,
|
||||
};
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_NAME;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
|
||||
@@ -21,7 +21,7 @@ use std::collections::HashMap;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::{RegionStat, TableName};
|
||||
use api::v1::meta::{RegionStat, TableIdent, TableName};
|
||||
use common_telemetry::{info, warn};
|
||||
use snafu::ResultExt;
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
@@ -32,7 +32,6 @@ use table::TableRef;
|
||||
use crate::error::{CreateTableSnafu, Result};
|
||||
|
||||
pub mod error;
|
||||
pub mod helper;
|
||||
pub mod information_schema;
|
||||
pub mod local;
|
||||
mod metrics;
|
||||
@@ -59,6 +58,9 @@ pub trait CatalogManager: Send + Sync {
|
||||
/// This method will/should fail if catalog not exist
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool>;
|
||||
|
||||
/// Deregisters a database within given catalog/schema to catalog manager
|
||||
async fn deregister_schema(&self, request: DeregisterSchemaRequest) -> Result<bool>;
|
||||
|
||||
/// Registers a table within given catalog/schema to catalog manager,
|
||||
/// returns whether the table registered.
|
||||
///
|
||||
@@ -149,6 +151,12 @@ pub struct DeregisterTableRequest {
|
||||
pub table_name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DeregisterSchemaRequest {
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RegisterSchemaRequest {
|
||||
pub catalog: String,
|
||||
@@ -217,19 +225,25 @@ pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> (u64, Vec<Reg
|
||||
for table_name in table_names {
|
||||
let Ok(Some(table)) = catalog_manager.table(&catalog_name, &schema_name, &table_name).await else { continue };
|
||||
|
||||
let region_numbers = &table.table_info().meta.region_numbers;
|
||||
let table_info = table.table_info();
|
||||
let region_numbers = &table_info.meta.region_numbers;
|
||||
region_number += region_numbers.len() as u64;
|
||||
|
||||
let engine = &table.table_info().meta.engine;
|
||||
let engine = &table_info.meta.engine;
|
||||
let table_id = table_info.ident.table_id;
|
||||
|
||||
match table.region_stats() {
|
||||
Ok(stats) => {
|
||||
let stats = stats.into_iter().map(|stat| RegionStat {
|
||||
region_id: stat.region_id,
|
||||
table_name: Some(TableName {
|
||||
catalog_name: catalog_name.clone(),
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
table_ident: Some(TableIdent {
|
||||
table_id,
|
||||
table_name: Some(TableName {
|
||||
catalog_name: catalog_name.clone(),
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
}),
|
||||
engine: engine.clone(),
|
||||
}),
|
||||
approximate_bytes: stat.disk_usage_bytes as i64,
|
||||
attrs: HashMap::from([("engine_name".to_owned(), engine.clone())]),
|
||||
|
||||
@@ -41,7 +41,7 @@ use crate::error::{
|
||||
self, CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu,
|
||||
Result, SchemaExistsSnafu, SchemaNotFoundSnafu, SystemCatalogSnafu,
|
||||
SystemCatalogTypeMismatchSnafu, TableEngineNotFoundSnafu, TableExistsSnafu, TableNotExistSnafu,
|
||||
TableNotFoundSnafu,
|
||||
TableNotFoundSnafu, UnimplementedSnafu,
|
||||
};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::local::memory::MemoryCatalogManager;
|
||||
@@ -51,8 +51,9 @@ use crate::system::{
|
||||
};
|
||||
use crate::tables::SystemCatalog;
|
||||
use crate::{
|
||||
handle_system_table_request, CatalogManager, CatalogManagerRef, DeregisterTableRequest,
|
||||
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
|
||||
handle_system_table_request, CatalogManager, CatalogManagerRef, DeregisterSchemaRequest,
|
||||
DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
|
||||
RegisterTableRequest, RenameTableRequest,
|
||||
};
|
||||
|
||||
/// A `CatalogManager` consists of a system catalog and a bunch of user catalogs.
|
||||
@@ -516,9 +517,14 @@ impl CatalogManager for LocalCatalogManager {
|
||||
}
|
||||
}
|
||||
|
||||
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
|
||||
self.check_state().await?;
|
||||
async fn deregister_schema(&self, _request: DeregisterSchemaRequest) -> Result<bool> {
|
||||
UnimplementedSnafu {
|
||||
operation: "deregister schema",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
|
||||
let catalog_name = request.create_table_request.catalog_name.clone();
|
||||
let schema_name = request.create_table_request.schema_name.clone();
|
||||
|
||||
|
||||
@@ -29,8 +29,8 @@ use crate::error::{
|
||||
CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::{
|
||||
CatalogManager, DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
|
||||
RegisterTableRequest, RenameTableRequest,
|
||||
CatalogManager, DeregisterSchemaRequest, DeregisterTableRequest, RegisterSchemaRequest,
|
||||
RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
|
||||
};
|
||||
|
||||
type SchemaEntries = HashMap<String, HashMap<String, TableRef>>;
|
||||
@@ -75,15 +75,7 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
}
|
||||
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
|
||||
let catalog = request.catalog.clone();
|
||||
let schema = request.schema.clone();
|
||||
let result = self.register_table_sync(request);
|
||||
increment_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&catalog, &schema)],
|
||||
);
|
||||
result
|
||||
self.register_table_sync(request)
|
||||
}
|
||||
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
|
||||
@@ -143,11 +135,35 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
let registered = self.register_schema_sync(request)?;
|
||||
if registered {
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0);
|
||||
}
|
||||
Ok(registered)
|
||||
self.register_schema_sync(request)
|
||||
}
|
||||
|
||||
async fn deregister_schema(&self, request: DeregisterSchemaRequest) -> Result<bool> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
let schemas = catalogs
|
||||
.get_mut(&request.catalog)
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?;
|
||||
let table_count = schemas
|
||||
.remove(&request.schema)
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?
|
||||
.len();
|
||||
decrement_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
table_count as f64,
|
||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
||||
);
|
||||
|
||||
decrement_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
||||
);
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn register_system_table(&self, _request: RegisterSystemTableRequest) -> Result<()> {
|
||||
@@ -235,11 +251,7 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
}
|
||||
|
||||
async fn register_catalog(&self, name: String) -> Result<bool> {
|
||||
let registered = self.register_catalog_sync(name)?;
|
||||
if registered {
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
|
||||
}
|
||||
Ok(registered)
|
||||
self.register_catalog_sync(name)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
@@ -263,7 +275,15 @@ impl MemoryCatalogManager {
|
||||
|
||||
pub fn register_catalog_sync(&self, name: String) -> Result<bool> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
Ok(catalogs.insert(name, HashMap::new()).is_some())
|
||||
|
||||
match catalogs.entry(name) {
|
||||
Entry::Vacant(e) => {
|
||||
e.insert(HashMap::new());
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
|
||||
Ok(true)
|
||||
}
|
||||
Entry::Occupied(_) => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn register_schema_sync(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
@@ -273,11 +293,15 @@ impl MemoryCatalogManager {
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?;
|
||||
if catalog.contains_key(&request.schema) {
|
||||
return Ok(false);
|
||||
|
||||
match catalog.entry(request.schema) {
|
||||
Entry::Vacant(e) => {
|
||||
e.insert(HashMap::new());
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0);
|
||||
Ok(true)
|
||||
}
|
||||
Entry::Occupied(_) => Ok(false),
|
||||
}
|
||||
let _ = catalog.insert(request.schema, HashMap::new());
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
pub fn register_table_sync(&self, request: RegisterTableRequest) -> Result<bool> {
|
||||
@@ -299,8 +323,13 @@ impl MemoryCatalogManager {
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
Ok(schema.insert(request.table_name, request.table).is_none())
|
||||
schema.insert(request.table_name, request.table);
|
||||
increment_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
||||
);
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
@@ -327,7 +356,7 @@ pub fn new_memory_catalog_manager() -> Result<Arc<MemoryCatalogManager>> {
|
||||
mod tests {
|
||||
use common_catalog::consts::*;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::prelude::StatusCode;
|
||||
use common_error::status_code::StatusCode;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
|
||||
use super::*;
|
||||
@@ -517,4 +546,42 @@ mod tests {
|
||||
.unwrap()
|
||||
.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_catalog_deregister_schema() {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
|
||||
// Registers a catalog, a schema, and a table.
|
||||
let catalog_name = "foo_catalog".to_string();
|
||||
let schema_name = "foo_schema".to_string();
|
||||
let table_name = "foo_table".to_string();
|
||||
let schema = RegisterSchemaRequest {
|
||||
catalog: catalog_name.clone(),
|
||||
schema: schema_name.clone(),
|
||||
};
|
||||
let table = RegisterTableRequest {
|
||||
catalog: catalog_name.clone(),
|
||||
schema: schema_name.clone(),
|
||||
table_name,
|
||||
table_id: 0,
|
||||
table: Arc::new(NumbersTable::default()),
|
||||
};
|
||||
catalog
|
||||
.register_catalog(catalog_name.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
catalog.register_schema(schema).await.unwrap();
|
||||
catalog.register_table(table).await.unwrap();
|
||||
|
||||
let request = DeregisterSchemaRequest {
|
||||
catalog: catalog_name.clone(),
|
||||
schema: schema_name.clone(),
|
||||
};
|
||||
|
||||
assert!(catalog.deregister_schema(request).await.unwrap());
|
||||
assert!(!catalog
|
||||
.schema_exist(&catalog_name, &schema_name)
|
||||
.await
|
||||
.unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,76 +30,3 @@ pub trait KvCacheInvalidator: Send + Sync {
|
||||
}
|
||||
|
||||
pub type KvCacheInvalidatorRef = Arc<dyn KvCacheInvalidator>;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::any::Any;
|
||||
|
||||
use async_stream::stream;
|
||||
use common_meta::kv_backend::{Kv, KvBackend, ValueIter};
|
||||
|
||||
use crate::error::Error;
|
||||
|
||||
struct MockKvBackend {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for MockKvBackend {
|
||||
type Error = Error;
|
||||
|
||||
fn range<'a, 'b>(&'a self, _key: &[u8]) -> ValueIter<'b, Error>
|
||||
where
|
||||
'a: 'b,
|
||||
{
|
||||
Box::pin(stream!({
|
||||
for i in 0..3 {
|
||||
yield Ok(Kv(
|
||||
i.to_string().as_bytes().to_vec(),
|
||||
i.to_string().as_bytes().to_vec(),
|
||||
))
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
async fn set(&self, _key: &[u8], _val: &[u8]) -> Result<(), Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn compare_and_set(
|
||||
&self,
|
||||
_key: &[u8],
|
||||
_expect: &[u8],
|
||||
_val: &[u8],
|
||||
) -> Result<Result<(), Option<Vec<u8>>>, Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn delete_range(&self, _key: &[u8], _end: &[u8]) -> Result<(), Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn move_value(&self, _from_key: &[u8], _to_key: &[u8]) -> Result<(), Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get() {
|
||||
let backend = MockKvBackend {};
|
||||
|
||||
let result = backend.get(0.to_string().as_bytes()).await;
|
||||
assert_eq!(0.to_string().as_bytes(), result.unwrap().unwrap().0);
|
||||
|
||||
let result = backend.get(1.to_string().as_bytes()).await;
|
||||
assert_eq!(1.to_string().as_bytes(), result.unwrap().unwrap().0);
|
||||
|
||||
let result = backend.get(2.to_string().as_bytes()).await;
|
||||
assert_eq!(2.to_string().as_bytes(), result.unwrap().unwrap().0);
|
||||
|
||||
let result = backend.get(3.to_string().as_bytes()).await;
|
||||
assert!(result.unwrap().is_none());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,15 +17,18 @@ use std::fmt::Debug;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_stream::stream;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::error::Error::{CacheNotGet, GetKvCache};
|
||||
use common_meta::error::{CacheNotGetSnafu, Error, MetaSrvSnafu, Result};
|
||||
use common_meta::kv_backend::{Kv, KvBackend, KvBackendRef, ValueIter};
|
||||
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
||||
use common_meta::rpc::store::{
|
||||
CompareAndPutRequest, DeleteRangeRequest, MoveValueRequest, PutRequest, RangeRequest,
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, MoveValueRequest, MoveValueResponse, PutRequest, PutResponse,
|
||||
RangeRequest, RangeResponse,
|
||||
};
|
||||
use common_telemetry::{info, timer};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use common_telemetry::timer;
|
||||
use meta_client::client::MetaClient;
|
||||
use moka::future::{Cache, CacheBuilder};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -37,24 +40,133 @@ const CACHE_MAX_CAPACITY: u64 = 10000;
|
||||
const CACHE_TTL_SECOND: u64 = 10 * 60;
|
||||
const CACHE_TTI_SECOND: u64 = 5 * 60;
|
||||
|
||||
pub type CacheBackendRef = Arc<Cache<Vec<u8>, Kv>>;
|
||||
pub type CacheBackendRef = Arc<Cache<Vec<u8>, KeyValue>>;
|
||||
|
||||
pub struct CachedMetaKvBackend {
|
||||
kv_backend: KvBackendRef,
|
||||
cache: CacheBackendRef,
|
||||
name: String,
|
||||
}
|
||||
|
||||
impl TxnService for CachedMetaKvBackend {
|
||||
type Error = Error;
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for CachedMetaKvBackend {
|
||||
type Error = Error;
|
||||
|
||||
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
|
||||
where
|
||||
'a: 'b,
|
||||
{
|
||||
self.kv_backend.range(key)
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<Kv>> {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
|
||||
self.kv_backend.range(req).await
|
||||
}
|
||||
|
||||
async fn put(&self, req: PutRequest) -> Result<PutResponse> {
|
||||
let key = &req.key.clone();
|
||||
|
||||
let ret = self.kv_backend.put(req).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(key).await;
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse> {
|
||||
let keys = req
|
||||
.kvs
|
||||
.iter()
|
||||
.map(|kv| kv.key().to_vec())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let resp = self.kv_backend.batch_put(req).await;
|
||||
|
||||
if resp.is_ok() {
|
||||
for key in keys {
|
||||
self.invalidate_key(&key).await;
|
||||
}
|
||||
}
|
||||
|
||||
resp
|
||||
}
|
||||
|
||||
async fn batch_get(&self, req: BatchGetRequest) -> Result<BatchGetResponse> {
|
||||
self.kv_backend.batch_get(req).await
|
||||
}
|
||||
|
||||
async fn compare_and_put(&self, req: CompareAndPutRequest) -> Result<CompareAndPutResponse> {
|
||||
let key = &req.key.clone();
|
||||
|
||||
let ret = self.kv_backend.compare_and_put(req).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(key).await;
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
async fn delete_range(&self, mut req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
|
||||
let prev_kv = req.prev_kv;
|
||||
|
||||
req.prev_kv = true;
|
||||
let resp = self.kv_backend.delete_range(req).await;
|
||||
match resp {
|
||||
Ok(mut resp) => {
|
||||
for prev_kv in resp.prev_kvs.iter() {
|
||||
self.invalidate_key(prev_kv.key()).await;
|
||||
}
|
||||
|
||||
if !prev_kv {
|
||||
resp.prev_kvs = vec![];
|
||||
}
|
||||
Ok(resp)
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
async fn batch_delete(&self, mut req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
|
||||
let prev_kv = req.prev_kv;
|
||||
|
||||
req.prev_kv = true;
|
||||
let resp = self.kv_backend.batch_delete(req).await;
|
||||
match resp {
|
||||
Ok(mut resp) => {
|
||||
for prev_kv in resp.prev_kvs.iter() {
|
||||
self.invalidate_key(prev_kv.key()).await;
|
||||
}
|
||||
|
||||
if !prev_kv {
|
||||
resp.prev_kvs = vec![];
|
||||
}
|
||||
Ok(resp)
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse> {
|
||||
let from_key = &req.from_key.clone();
|
||||
let to_key = &req.to_key.clone();
|
||||
|
||||
let ret = self.kv_backend.move_value(req).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(from_key).await;
|
||||
self.invalidate_key(to_key).await;
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
|
||||
let _timer = timer!(METRIC_CATALOG_KV_GET);
|
||||
|
||||
let init = async {
|
||||
@@ -80,61 +192,6 @@ impl KvBackend for CachedMetaKvBackend {
|
||||
err_msg: e.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], val: &[u8]) -> Result<()> {
|
||||
let ret = self.kv_backend.set(key, val).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(key).await;
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
async fn delete(&self, key: &[u8]) -> Result<()> {
|
||||
let ret = self.kv_backend.delete_range(key, &[]).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(key).await;
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
async fn delete_range(&self, _key: &[u8], _end: &[u8]) -> Result<()> {
|
||||
// TODO(fys): implement it
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn compare_and_set(
|
||||
&self,
|
||||
key: &[u8],
|
||||
expect: &[u8],
|
||||
val: &[u8],
|
||||
) -> Result<std::result::Result<(), Option<Vec<u8>>>> {
|
||||
let ret = self.kv_backend.compare_and_set(key, expect, val).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(key).await;
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
async fn move_value(&self, from_key: &[u8], to_key: &[u8]) -> Result<()> {
|
||||
let ret = self.kv_backend.move_value(from_key, to_key).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(from_key).await;
|
||||
self.invalidate_key(to_key).await;
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -146,15 +203,8 @@ impl KvCacheInvalidator for CachedMetaKvBackend {
|
||||
|
||||
impl CachedMetaKvBackend {
|
||||
pub fn new(client: Arc<MetaClient>) -> Self {
|
||||
let cache = Arc::new(
|
||||
CacheBuilder::new(CACHE_MAX_CAPACITY)
|
||||
.time_to_live(Duration::from_secs(CACHE_TTL_SECOND))
|
||||
.time_to_idle(Duration::from_secs(CACHE_TTI_SECOND))
|
||||
.build(),
|
||||
);
|
||||
let kv_backend = Arc::new(MetaKvBackend { client });
|
||||
|
||||
Self { kv_backend, cache }
|
||||
Self::wrap(kv_backend)
|
||||
}
|
||||
|
||||
pub fn wrap(kv_backend: KvBackendRef) -> Self {
|
||||
@@ -165,7 +215,12 @@ impl CachedMetaKvBackend {
|
||||
.build(),
|
||||
);
|
||||
|
||||
Self { kv_backend, cache }
|
||||
let name = format!("CachedKvBackend({})", kv_backend.name());
|
||||
Self {
|
||||
kv_backend,
|
||||
cache,
|
||||
name,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cache(&self) -> &CacheBackendRef {
|
||||
@@ -178,108 +233,97 @@ pub struct MetaKvBackend {
|
||||
pub client: Arc<MetaClient>,
|
||||
}
|
||||
|
||||
impl TxnService for MetaKvBackend {
|
||||
type Error = Error;
|
||||
}
|
||||
|
||||
/// Implement `KvBackend` trait for `MetaKvBackend` instead of opendal's `Accessor` since
|
||||
/// `MetaClient`'s range method can return both keys and values, which can reduce IO overhead
|
||||
/// comparing to `Accessor`'s list and get method.
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for MetaKvBackend {
|
||||
type Error = Error;
|
||||
|
||||
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
|
||||
where
|
||||
'a: 'b,
|
||||
{
|
||||
let key = key.to_vec();
|
||||
Box::pin(stream!({
|
||||
let mut resp = self
|
||||
.client
|
||||
.range(RangeRequest::new().with_prefix(key))
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)?;
|
||||
let kvs = resp.take_kvs();
|
||||
for mut kv in kvs.into_iter() {
|
||||
yield Ok(Kv(kv.take_key(), kv.take_value()))
|
||||
}
|
||||
}))
|
||||
fn name(&self) -> &str {
|
||||
"MetaKvBackend"
|
||||
}
|
||||
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<Kv>> {
|
||||
async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
|
||||
self.client
|
||||
.range(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)
|
||||
}
|
||||
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
|
||||
let mut response = self
|
||||
.client
|
||||
.range(RangeRequest::new().with_key(key))
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)?;
|
||||
Ok(response
|
||||
.take_kvs()
|
||||
.get_mut(0)
|
||||
.map(|kv| Kv(kv.take_key(), kv.take_value())))
|
||||
Ok(response.take_kvs().get_mut(0).map(|kv| KeyValue {
|
||||
key: kv.take_key(),
|
||||
value: kv.take_value(),
|
||||
}))
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], val: &[u8]) -> Result<()> {
|
||||
let req = PutRequest::new()
|
||||
.with_key(key.to_vec())
|
||||
.with_value(val.to_vec());
|
||||
let _ = self
|
||||
.client
|
||||
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse> {
|
||||
self.client
|
||||
.batch_put(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)
|
||||
}
|
||||
|
||||
async fn put(&self, req: PutRequest) -> Result<PutResponse> {
|
||||
self.client
|
||||
.put(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)?;
|
||||
Ok(())
|
||||
.context(MetaSrvSnafu)
|
||||
}
|
||||
|
||||
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<()> {
|
||||
let req = DeleteRangeRequest::new().with_range(key.to_vec(), end.to_vec());
|
||||
let resp = self
|
||||
.client
|
||||
async fn delete_range(&self, req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
|
||||
self.client
|
||||
.delete_range(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)?;
|
||||
info!(
|
||||
"Delete range, key: {}, end: {}, deleted: {}",
|
||||
String::from_utf8_lossy(key),
|
||||
String::from_utf8_lossy(end),
|
||||
resp.deleted()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
.context(MetaSrvSnafu)
|
||||
}
|
||||
|
||||
async fn compare_and_set(
|
||||
async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
|
||||
self.client
|
||||
.batch_delete(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)
|
||||
}
|
||||
|
||||
async fn batch_get(&self, req: BatchGetRequest) -> Result<BatchGetResponse> {
|
||||
self.client
|
||||
.batch_get(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)
|
||||
}
|
||||
|
||||
async fn compare_and_put(
|
||||
&self,
|
||||
key: &[u8],
|
||||
expect: &[u8],
|
||||
val: &[u8],
|
||||
) -> Result<std::result::Result<(), Option<Vec<u8>>>> {
|
||||
let request = CompareAndPutRequest::new()
|
||||
.with_key(key.to_vec())
|
||||
.with_expect(expect.to_vec())
|
||||
.with_value(val.to_vec());
|
||||
let mut response = self
|
||||
.client
|
||||
request: CompareAndPutRequest,
|
||||
) -> Result<CompareAndPutResponse> {
|
||||
self.client
|
||||
.compare_and_put(request)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)?;
|
||||
if response.is_success() {
|
||||
Ok(Ok(()))
|
||||
} else {
|
||||
Ok(Err(response.take_prev_kv().map(|v| v.value().to_vec())))
|
||||
}
|
||||
.context(MetaSrvSnafu)
|
||||
}
|
||||
|
||||
async fn move_value(&self, from_key: &[u8], to_key: &[u8]) -> Result<()> {
|
||||
let req = MoveValueRequest::new(from_key, to_key);
|
||||
let _ = self
|
||||
.client
|
||||
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse> {
|
||||
self.client
|
||||
.move_value(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)?;
|
||||
Ok(())
|
||||
.context(MetaSrvSnafu)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -312,6 +312,10 @@ impl RegionAliveKeeper {
|
||||
}
|
||||
deadline
|
||||
}
|
||||
|
||||
pub fn table_ident(&self) -> &TableIdent {
|
||||
&self.table_ident
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -21,17 +21,19 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use catalog::helper::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
|
||||
use catalog::error::Error;
|
||||
use catalog::remote::mock::MockTableEngine;
|
||||
use catalog::remote::region_alive_keeper::RegionAliveKeepers;
|
||||
use catalog::remote::{CachedMetaKvBackend, RemoteCatalogManager};
|
||||
use catalog::{CatalogManager, RegisterSchemaRequest, RegisterTableRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
|
||||
use common_meta::helper::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
|
||||
use common_meta::ident::TableIdent;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use common_meta::kv_backend::KvBackend;
|
||||
use common_meta::rpc::store::{CompareAndPutRequest, PutRequest, RangeRequest};
|
||||
use datatypes::schema::RawSchema;
|
||||
use futures_util::StreamExt;
|
||||
use table::engine::manager::{MemoryTableEngineManager, TableEngineManagerRef};
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::requests::CreateTableRequest;
|
||||
@@ -52,38 +54,35 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_backend() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let backend = MemoryKvBackend::default();
|
||||
let backend = MemoryKvBackend::<Error>::default();
|
||||
|
||||
let default_catalog_key = CatalogKey {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
}
|
||||
.to_string();
|
||||
|
||||
backend
|
||||
.set(
|
||||
default_catalog_key.as_bytes(),
|
||||
&CatalogValue {}.as_bytes().unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let req = PutRequest::new()
|
||||
.with_key(default_catalog_key.as_bytes())
|
||||
.with_value(CatalogValue.as_bytes().unwrap());
|
||||
backend.put(req).await.unwrap();
|
||||
|
||||
let schema_key = SchemaKey {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
}
|
||||
.to_string();
|
||||
backend
|
||||
.set(schema_key.as_bytes(), &SchemaValue {}.as_bytes().unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
let req = PutRequest::new()
|
||||
.with_key(schema_key.as_bytes())
|
||||
.with_value(SchemaValue.as_bytes().unwrap());
|
||||
backend.put(req).await.unwrap();
|
||||
|
||||
let mut iter = backend.range("__c-".as_bytes());
|
||||
let mut res = HashSet::new();
|
||||
while let Some(r) = iter.next().await {
|
||||
let kv = r.unwrap();
|
||||
let _ = res.insert(String::from_utf8_lossy(&kv.0).to_string());
|
||||
}
|
||||
let req = RangeRequest::new().with_prefix(b"__c-".to_vec());
|
||||
let res = backend
|
||||
.range(req)
|
||||
.await
|
||||
.unwrap()
|
||||
.kvs
|
||||
.into_iter()
|
||||
.map(|kv| String::from_utf8_lossy(kv.key()).to_string());
|
||||
assert_eq!(
|
||||
vec!["__c-greptime".to_string()],
|
||||
res.into_iter().collect::<Vec<_>>()
|
||||
@@ -98,36 +97,32 @@ mod tests {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
}
|
||||
.to_string();
|
||||
|
||||
backend
|
||||
.set(
|
||||
default_catalog_key.as_bytes(),
|
||||
&CatalogValue {}.as_bytes().unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let req = PutRequest::new()
|
||||
.with_key(default_catalog_key.as_bytes())
|
||||
.with_value(CatalogValue.as_bytes().unwrap());
|
||||
backend.put(req).await.unwrap();
|
||||
|
||||
let ret = backend.get(b"__c-greptime").await.unwrap();
|
||||
let _ = ret.unwrap();
|
||||
|
||||
let _ = backend
|
||||
.compare_and_set(
|
||||
b"__c-greptime",
|
||||
&CatalogValue {}.as_bytes().unwrap(),
|
||||
b"123",
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let req = CompareAndPutRequest::new()
|
||||
.with_key(b"__c-greptime".to_vec())
|
||||
.with_expect(CatalogValue.as_bytes().unwrap())
|
||||
.with_value(b"123".to_vec());
|
||||
let _ = backend.compare_and_put(req).await.unwrap();
|
||||
|
||||
let ret = backend.get(b"__c-greptime").await.unwrap();
|
||||
assert_eq!(&b"123"[..], &(ret.as_ref().unwrap().1));
|
||||
assert_eq!(b"123", ret.as_ref().unwrap().value.as_slice());
|
||||
|
||||
let _ = backend.set(b"__c-greptime", b"1234").await;
|
||||
let req = PutRequest::new()
|
||||
.with_key(b"__c-greptime".to_vec())
|
||||
.with_value(b"1234".to_vec());
|
||||
let _ = backend.put(req).await;
|
||||
|
||||
let ret = backend.get(b"__c-greptime").await.unwrap();
|
||||
assert_eq!(&b"1234"[..], &(ret.as_ref().unwrap().1));
|
||||
assert_eq!(b"1234", ret.unwrap().value.as_slice());
|
||||
|
||||
backend.delete(b"__c-greptime").await.unwrap();
|
||||
backend.delete(b"__c-greptime", false).await.unwrap();
|
||||
|
||||
let ret = backend.get(b"__c-greptime").await.unwrap();
|
||||
assert!(ret.is_none());
|
||||
@@ -135,8 +130,16 @@ mod tests {
|
||||
|
||||
async fn prepare_components(node_id: u64) -> TestingComponents {
|
||||
let backend = Arc::new(MemoryKvBackend::default());
|
||||
backend.set(b"__c-greptime", b"").await.unwrap();
|
||||
backend.set(b"__s-greptime-public", b"").await.unwrap();
|
||||
|
||||
let req = PutRequest::new()
|
||||
.with_key(b"__c-greptime".to_vec())
|
||||
.with_value(b"".to_vec());
|
||||
backend.put(req).await.unwrap();
|
||||
|
||||
let req = PutRequest::new()
|
||||
.with_key(b"__s-greptime-public".to_vec())
|
||||
.with_value(b"".to_vec());
|
||||
backend.put(req).await.unwrap();
|
||||
|
||||
let cached_backend = Arc::new(CachedMetaKvBackend::wrap(backend));
|
||||
|
||||
@@ -153,6 +156,7 @@ mod tests {
|
||||
node_id,
|
||||
cached_backend.clone(),
|
||||
region_alive_keepers.clone(),
|
||||
Arc::new(TableMetadataManager::new(cached_backend)),
|
||||
);
|
||||
catalog_manager.start().await.unwrap();
|
||||
|
||||
|
||||
@@ -73,7 +73,7 @@ async fn run() {
|
||||
|
||||
let logical = mock_logical_plan();
|
||||
event!(Level::INFO, "plan size: {:#?}", logical.len());
|
||||
let result = db.logical_plan(logical).await.unwrap();
|
||||
let result = db.logical_plan(logical, None).await.unwrap();
|
||||
|
||||
event!(Level::INFO, "result: {:#?}", result);
|
||||
}
|
||||
|
||||
@@ -13,12 +13,10 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
use common_meta::peer::Peer;
|
||||
use common_telemetry::info;
|
||||
use moka::future::{Cache, CacheBuilder};
|
||||
|
||||
use crate::Client;
|
||||
@@ -26,21 +24,11 @@ use crate::Client;
|
||||
pub struct DatanodeClients {
|
||||
channel_manager: ChannelManager,
|
||||
clients: Cache<Peer, Client>,
|
||||
started: Arc<Mutex<bool>>,
|
||||
}
|
||||
|
||||
impl Default for DatanodeClients {
|
||||
fn default() -> Self {
|
||||
let config = ChannelConfig::new().timeout(Duration::from_secs(8));
|
||||
|
||||
Self {
|
||||
channel_manager: ChannelManager::with_config(config),
|
||||
clients: CacheBuilder::new(1024)
|
||||
.time_to_live(Duration::from_secs(30 * 60))
|
||||
.time_to_idle(Duration::from_secs(5 * 60))
|
||||
.build(),
|
||||
started: Arc::new(Mutex::new(false)),
|
||||
}
|
||||
Self::new(ChannelConfig::new())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,16 +41,14 @@ impl Debug for DatanodeClients {
|
||||
}
|
||||
|
||||
impl DatanodeClients {
|
||||
pub fn start(&self) {
|
||||
let mut started = self.started.lock().unwrap();
|
||||
if *started {
|
||||
return;
|
||||
pub fn new(config: ChannelConfig) -> Self {
|
||||
Self {
|
||||
channel_manager: ChannelManager::with_config(config),
|
||||
clients: CacheBuilder::new(1024)
|
||||
.time_to_live(Duration::from_secs(30 * 60))
|
||||
.time_to_idle(Duration::from_secs(5 * 60))
|
||||
.build(),
|
||||
}
|
||||
|
||||
self.channel_manager.start_channel_recycle();
|
||||
|
||||
info!("Datanode clients manager is started!");
|
||||
*started = true;
|
||||
}
|
||||
|
||||
pub async fn get_client(&self, datanode: &Peer) -> Client {
|
||||
|
||||
@@ -17,12 +17,12 @@ use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{
|
||||
greptime_response, AffectedRows, AlterExpr, AuthHeader, CreateTableExpr, DdlRequest,
|
||||
DeleteRequest, DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequests, PromRangeQuery,
|
||||
QueryRequest, RequestHeader,
|
||||
AlterExpr, AuthHeader, CompactTableExpr, CreateTableExpr, DdlRequest, DeleteRequest,
|
||||
DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequests, PromRangeQuery, QueryRequest,
|
||||
RequestHeader, TruncateTableExpr,
|
||||
};
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use common_error::prelude::*;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_grpc::flight::{flight_messages_to_recordbatches, FlightDecoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
use common_telemetry::{logging, timer};
|
||||
@@ -30,10 +30,8 @@ use futures_util::{TryFutureExt, TryStreamExt};
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
|
||||
};
|
||||
use crate::{error, metrics, Client, Result, StreamInserter};
|
||||
use crate::error::{ConvertFlightDataSnafu, IllegalFlightMessagesSnafu, ServerSnafu};
|
||||
use crate::{error, from_grpc_response, metrics, Client, Result, StreamInserter};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Database {
|
||||
@@ -141,27 +139,21 @@ impl Database {
|
||||
|
||||
async fn handle(&self, request: Request) -> Result<u32> {
|
||||
let mut client = self.client.make_database_client()?.inner;
|
||||
let request = self.to_rpc_request(request);
|
||||
let response = client
|
||||
.handle(request)
|
||||
.await?
|
||||
.into_inner()
|
||||
.response
|
||||
.context(IllegalDatabaseResponseSnafu {
|
||||
err_msg: "GreptimeResponse is empty",
|
||||
})?;
|
||||
let greptime_response::Response::AffectedRows(AffectedRows { value }) = response;
|
||||
Ok(value)
|
||||
let request = self.to_rpc_request(request, None);
|
||||
let response = client.handle(request).await?.into_inner();
|
||||
from_grpc_response(response)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
|
||||
fn to_rpc_request(&self, request: Request, trace_id: Option<u64>) -> GreptimeRequest {
|
||||
GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
dbname: self.dbname.clone(),
|
||||
trace_id,
|
||||
span_id: None,
|
||||
}),
|
||||
request: Some(request),
|
||||
}
|
||||
@@ -169,17 +161,27 @@ impl Database {
|
||||
|
||||
pub async fn sql(&self, sql: &str) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_SQL);
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql.to_string())),
|
||||
}))
|
||||
self.do_get(
|
||||
Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql.to_string())),
|
||||
}),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<Output> {
|
||||
pub async fn logical_plan(
|
||||
&self,
|
||||
logical_plan: Vec<u8>,
|
||||
trace_id: Option<u64>,
|
||||
) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_LOGICAL_PLAN);
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::LogicalPlan(logical_plan)),
|
||||
}))
|
||||
self.do_get(
|
||||
Request::Query(QueryRequest {
|
||||
query: Some(Query::LogicalPlan(logical_plan)),
|
||||
}),
|
||||
trace_id,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -191,53 +193,90 @@ impl Database {
|
||||
step: &str,
|
||||
) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_PROMQL_RANGE_QUERY);
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::PromRangeQuery(PromRangeQuery {
|
||||
query: promql.to_string(),
|
||||
start: start.to_string(),
|
||||
end: end.to_string(),
|
||||
step: step.to_string(),
|
||||
})),
|
||||
}))
|
||||
self.do_get(
|
||||
Request::Query(QueryRequest {
|
||||
query: Some(Query::PromRangeQuery(PromRangeQuery {
|
||||
query: promql.to_string(),
|
||||
start: start.to_string(),
|
||||
end: end.to_string(),
|
||||
step: step.to_string(),
|
||||
})),
|
||||
}),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_CREATE_TABLE);
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(expr)),
|
||||
}))
|
||||
self.do_get(
|
||||
Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(expr)),
|
||||
}),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_ALTER);
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::Alter(expr)),
|
||||
}))
|
||||
self.do_get(
|
||||
Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::Alter(expr)),
|
||||
}),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_DROP_TABLE);
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::DropTable(expr)),
|
||||
}))
|
||||
self.do_get(
|
||||
Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::DropTable(expr)),
|
||||
}),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn flush_table(&self, expr: FlushTableExpr) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_FLUSH_TABLE);
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::FlushTable(expr)),
|
||||
}))
|
||||
self.do_get(
|
||||
Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::FlushTable(expr)),
|
||||
}),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn do_get(&self, request: Request) -> Result<Output> {
|
||||
pub async fn compact_table(&self, expr: CompactTableExpr) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_COMPACT_TABLE);
|
||||
self.do_get(
|
||||
Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CompactTable(expr)),
|
||||
}),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn truncate_table(&self, expr: TruncateTableExpr) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_TRUNCATE_TABLE);
|
||||
self.do_get(
|
||||
Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::TruncateTable(expr)),
|
||||
}),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn do_get(&self, request: Request, trace_id: Option<u64>) -> Result<Output> {
|
||||
// FIXME(paomian): should be added some labels for metrics
|
||||
let _timer = timer!(metrics::METRIC_GRPC_DO_GET);
|
||||
let request = self.to_rpc_request(request);
|
||||
let request = self.to_rpc_request(request, trace_id);
|
||||
let request = Ticket {
|
||||
ticket: request.encode_to_vec().into(),
|
||||
};
|
||||
@@ -254,7 +293,7 @@ impl Database {
|
||||
let e: error::Error = e.into();
|
||||
let code = e.status_code();
|
||||
let msg = e.to_string();
|
||||
error::ServerSnafu { code, msg }
|
||||
ServerSnafu { code, msg }
|
||||
.fail::<()>()
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::FlightGetSnafu {
|
||||
|
||||
@@ -15,8 +15,10 @@
|
||||
use std::any::Any;
|
||||
use std::str::FromStr;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use snafu::Location;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_error::{INNER_ERROR_CODE, INNER_ERROR_MSG};
|
||||
use snafu::{Location, Snafu};
|
||||
use tonic::{Code, Status};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
|
||||
@@ -15,15 +15,48 @@
|
||||
mod client;
|
||||
pub mod client_manager;
|
||||
mod database;
|
||||
mod error;
|
||||
pub mod error;
|
||||
pub mod load_balance;
|
||||
mod metrics;
|
||||
mod stream_insert;
|
||||
|
||||
pub use api;
|
||||
use api::v1::greptime_response::Response;
|
||||
use api::v1::{AffectedRows, GreptimeResponse};
|
||||
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::status_code::StatusCode;
|
||||
use snafu::OptionExt;
|
||||
|
||||
pub use self::client::Client;
|
||||
pub use self::database::Database;
|
||||
pub use self::error::{Error, Result};
|
||||
pub use self::stream_insert::StreamInserter;
|
||||
use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};
|
||||
|
||||
pub fn from_grpc_response(response: GreptimeResponse) -> Result<u32> {
|
||||
let header = response.header.context(IllegalDatabaseResponseSnafu {
|
||||
err_msg: "missing header",
|
||||
})?;
|
||||
let status = header.status.context(IllegalDatabaseResponseSnafu {
|
||||
err_msg: "missing status",
|
||||
})?;
|
||||
|
||||
if StatusCode::is_success(status.status_code) {
|
||||
let res = response.response.context(IllegalDatabaseResponseSnafu {
|
||||
err_msg: "missing response",
|
||||
})?;
|
||||
match res {
|
||||
Response::AffectedRows(AffectedRows { value }) => Ok(value),
|
||||
}
|
||||
} else {
|
||||
let status_code =
|
||||
StatusCode::from_u32(status.status_code).context(IllegalDatabaseResponseSnafu {
|
||||
err_msg: format!("invalid status: {:?}", status),
|
||||
})?;
|
||||
ServerSnafu {
|
||||
code: status_code,
|
||||
msg: status.err_msg,
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,4 +22,6 @@ pub const METRIC_GRPC_LOGICAL_PLAN: &str = "grpc.logical_plan";
|
||||
pub const METRIC_GRPC_ALTER: &str = "grpc.alter";
|
||||
pub const METRIC_GRPC_DROP_TABLE: &str = "grpc.drop_table";
|
||||
pub const METRIC_GRPC_FLUSH_TABLE: &str = "grpc.flush_table";
|
||||
pub const METRIC_GRPC_COMPACT_TABLE: &str = "grpc.compact_table";
|
||||
pub const METRIC_GRPC_TRUNCATE_TABLE: &str = "grpc.truncate_table";
|
||||
pub const METRIC_GRPC_DO_GET: &str = "grpc.do_get";
|
||||
|
||||
@@ -15,17 +15,16 @@
|
||||
use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::{
|
||||
greptime_response, AffectedRows, AuthHeader, GreptimeRequest, GreptimeResponse, InsertRequest,
|
||||
InsertRequests, RequestHeader,
|
||||
AuthHeader, GreptimeRequest, GreptimeResponse, InsertRequest, InsertRequests, RequestHeader,
|
||||
};
|
||||
use snafu::OptionExt;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use tonic::transport::Channel;
|
||||
use tonic::{Response, Status};
|
||||
|
||||
use crate::error::{self, IllegalDatabaseResponseSnafu, Result};
|
||||
use crate::error::{self, Result};
|
||||
use crate::from_grpc_response;
|
||||
|
||||
/// A structure that provides some methods for streaming data insert.
|
||||
///
|
||||
@@ -89,17 +88,8 @@ impl StreamInserter {
|
||||
drop(self.sender);
|
||||
|
||||
let response = self.join.await.unwrap()?;
|
||||
|
||||
let response = response
|
||||
.into_inner()
|
||||
.response
|
||||
.context(IllegalDatabaseResponseSnafu {
|
||||
err_msg: "GreptimeResponse is empty",
|
||||
})?;
|
||||
|
||||
let greptime_response::Response::AffectedRows(AffectedRows { value }) = response;
|
||||
|
||||
Ok(value)
|
||||
let response = response.into_inner();
|
||||
from_grpc_response(response)
|
||||
}
|
||||
|
||||
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
|
||||
|
||||
@@ -16,19 +16,24 @@ metrics-process = ["servers/metrics-process"]
|
||||
|
||||
[dependencies]
|
||||
anymap = "1.0.0-beta.2"
|
||||
async-trait.workspace = true
|
||||
catalog = { path = "../catalog" }
|
||||
chrono.workspace = true
|
||||
clap = { version = "3.1", features = ["derive"] }
|
||||
client = { path = "../client" }
|
||||
common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-meta = { path = "../common/meta" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-telemetry = { path = "../common/telemetry", features = [
|
||||
"deadlock_detection",
|
||||
] }
|
||||
config = "0.13"
|
||||
datanode = { path = "../datanode" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
either = "1.8"
|
||||
etcd-client.workspace = true
|
||||
frontend = { path = "../frontend" }
|
||||
futures.workspace = true
|
||||
meta-client = { path = "../meta-client" }
|
||||
@@ -37,12 +42,14 @@ metrics.workspace = true
|
||||
nu-ansi-term = "0.46"
|
||||
partition = { path = "../partition" }
|
||||
query = { path = "../query" }
|
||||
rand.workspace = true
|
||||
rustyline = "10.1"
|
||||
serde.workspace = true
|
||||
servers = { path = "../servers" }
|
||||
session = { path = "../session" }
|
||||
snafu.workspace = true
|
||||
substrait = { path = "../common/substrait" }
|
||||
table = { path = "../table" }
|
||||
tikv-jemallocator = "0.5"
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -51,7 +58,7 @@ common-test-util = { path = "../common/test-util" }
|
||||
rexpect = "0.5"
|
||||
temp-env = "0.3"
|
||||
serde.workspace = true
|
||||
toml = "0.5"
|
||||
toml.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
build-data = "0.1.4"
|
||||
|
||||
@@ -12,24 +12,38 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod bench;
|
||||
mod cmd;
|
||||
mod helper;
|
||||
mod repl;
|
||||
mod upgrade;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use bench::BenchTableMetadataCommand;
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
pub use repl::Repl;
|
||||
use upgrade::UpgradeCommand;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
|
||||
pub struct Instance {
|
||||
repl: Repl,
|
||||
#[async_trait]
|
||||
pub trait Tool {
|
||||
async fn do_work(&self) -> Result<()>;
|
||||
}
|
||||
|
||||
pub enum Instance {
|
||||
Repl(Repl),
|
||||
Tool(Box<dyn Tool>),
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
self.repl.run().await
|
||||
match self {
|
||||
Instance::Repl(repl) => repl.run().await,
|
||||
Instance::Tool(tool) => tool.do_work().await,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
@@ -63,12 +77,16 @@ impl Command {
|
||||
#[derive(Parser)]
|
||||
enum SubCommand {
|
||||
Attach(AttachCommand),
|
||||
Upgrade(UpgradeCommand),
|
||||
Bench(BenchTableMetadataCommand),
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Attach(cmd) => cmd.build().await,
|
||||
SubCommand::Upgrade(cmd) => cmd.build().await,
|
||||
SubCommand::Bench(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -86,7 +104,7 @@ pub(crate) struct AttachCommand {
|
||||
impl AttachCommand {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
let repl = Repl::try_new(&self).await?;
|
||||
Ok(Instance { repl })
|
||||
Ok(Instance::Repl(repl))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
198
src/cmd/src/cli/bench.rs
Normal file
198
src/cmd/src/cli/bench.rs
Normal file
@@ -0,0 +1,198 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod datanode_table;
|
||||
mod table_info;
|
||||
mod table_name;
|
||||
mod table_region;
|
||||
|
||||
use std::future::Future;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_meta::key::table_region::RegionDistribution;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::table_name::TableName;
|
||||
use common_telemetry::info;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||
use meta_srv::service::store::etcd::EtcdStore;
|
||||
use meta_srv::service::store::kv::KvBackendAdapter;
|
||||
use rand::prelude::SliceRandom;
|
||||
use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableType};
|
||||
|
||||
use crate::cli::bench::datanode_table::DatanodeTableBencher;
|
||||
use crate::cli::bench::table_info::TableInfoBencher;
|
||||
use crate::cli::bench::table_name::TableNameBencher;
|
||||
use crate::cli::bench::table_region::TableRegionBencher;
|
||||
use crate::cli::{Instance, Tool};
|
||||
use crate::error::Result;
|
||||
|
||||
async fn bench<F, Fut>(desc: &str, f: F, count: u32)
|
||||
where
|
||||
F: Fn(u32) -> Fut,
|
||||
Fut: Future<Output = ()>,
|
||||
{
|
||||
let mut total = Duration::default();
|
||||
|
||||
for i in 1..=count {
|
||||
let start = Instant::now();
|
||||
|
||||
f(i).await;
|
||||
|
||||
total += start.elapsed();
|
||||
}
|
||||
|
||||
let cost = total.as_millis() as f64 / count as f64;
|
||||
info!("{desc}, average operation cost: {cost:.2} ms");
|
||||
}
|
||||
|
||||
async fn bench_self_recorded<F, Fut>(desc: &str, f: F, count: u32)
|
||||
where
|
||||
F: Fn(u32) -> Fut,
|
||||
Fut: Future<Output = Duration>,
|
||||
{
|
||||
let mut total = Duration::default();
|
||||
|
||||
for i in 1..=count {
|
||||
total += f(i).await;
|
||||
}
|
||||
|
||||
let cost = total.as_millis() as f64 / count as f64;
|
||||
info!("{desc}, average operation cost: {cost:.2} ms");
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct BenchTableMetadataCommand {
|
||||
#[clap(long)]
|
||||
etcd_addr: String,
|
||||
#[clap(long)]
|
||||
count: u32,
|
||||
}
|
||||
|
||||
impl BenchTableMetadataCommand {
|
||||
pub async fn build(&self) -> Result<Instance> {
|
||||
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr]).await.unwrap();
|
||||
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(KvBackendAdapter::wrap(
|
||||
etcd_store,
|
||||
)));
|
||||
|
||||
let tool = BenchTableMetadata {
|
||||
table_metadata_manager,
|
||||
count: self.count,
|
||||
};
|
||||
Ok(Instance::Tool(Box::new(tool)))
|
||||
}
|
||||
}
|
||||
|
||||
struct BenchTableMetadata {
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
count: u32,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for BenchTableMetadata {
|
||||
async fn do_work(&self) -> Result<()> {
|
||||
info!("Start benching table name manager ...");
|
||||
TableNameBencher::new(self.table_metadata_manager.table_name_manager(), self.count)
|
||||
.start()
|
||||
.await;
|
||||
|
||||
info!("Start benching table info manager ...");
|
||||
TableInfoBencher::new(self.table_metadata_manager.table_info_manager(), self.count)
|
||||
.start()
|
||||
.await;
|
||||
|
||||
info!("Start benching table region manager ...");
|
||||
TableRegionBencher::new(
|
||||
self.table_metadata_manager.table_region_manager(),
|
||||
self.count,
|
||||
)
|
||||
.start()
|
||||
.await;
|
||||
|
||||
info!("Start benching datanode table manager ...");
|
||||
DatanodeTableBencher::new(
|
||||
self.table_metadata_manager.datanode_table_manager(),
|
||||
self.count,
|
||||
)
|
||||
.start()
|
||||
.await;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn create_table_info(table_id: TableId, table_name: TableName) -> RawTableInfo {
|
||||
let columns = 100;
|
||||
let mut column_schemas = Vec::with_capacity(columns);
|
||||
column_schemas.push(
|
||||
ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
true,
|
||||
)
|
||||
.with_time_index(true),
|
||||
);
|
||||
|
||||
for i in 1..columns {
|
||||
let column_name = format!("my_column_{i}");
|
||||
column_schemas.push(ColumnSchema::new(
|
||||
column_name,
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
));
|
||||
}
|
||||
|
||||
let meta = RawTableMeta {
|
||||
schema: RawSchema::new(column_schemas),
|
||||
engine: "mito".to_string(),
|
||||
created_on: chrono::DateTime::default(),
|
||||
primary_key_indices: vec![],
|
||||
next_column_id: columns as u32 + 1,
|
||||
engine_options: Default::default(),
|
||||
value_indices: vec![],
|
||||
options: Default::default(),
|
||||
region_numbers: (1..=100).collect(),
|
||||
};
|
||||
|
||||
RawTableInfo {
|
||||
ident: TableIdent {
|
||||
table_id,
|
||||
version: 1,
|
||||
},
|
||||
name: table_name.table_name,
|
||||
desc: Some("blah".to_string()),
|
||||
catalog_name: table_name.catalog_name,
|
||||
schema_name: table_name.schema_name,
|
||||
meta,
|
||||
table_type: TableType::Base,
|
||||
}
|
||||
}
|
||||
|
||||
fn create_region_distribution() -> RegionDistribution {
|
||||
let mut regions = (1..=100).collect::<Vec<u32>>();
|
||||
regions.shuffle(&mut rand::thread_rng());
|
||||
|
||||
let mut region_distribution = RegionDistribution::new();
|
||||
for datanode_id in 0..10 {
|
||||
region_distribution.insert(
|
||||
datanode_id as u64,
|
||||
regions[datanode_id * 10..(datanode_id + 1) * 10].to_vec(),
|
||||
);
|
||||
}
|
||||
region_distribution
|
||||
}
|
||||
131
src/cmd/src/cli/bench/datanode_table.rs
Normal file
131
src/cmd/src/cli/bench/datanode_table.rs
Normal file
@@ -0,0 +1,131 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_meta::key::datanode_table::{DatanodeTableKey, DatanodeTableManager};
|
||||
|
||||
use super::bench;
|
||||
|
||||
pub struct DatanodeTableBencher<'a> {
|
||||
datanode_table_manager: &'a DatanodeTableManager,
|
||||
count: u32,
|
||||
}
|
||||
|
||||
impl<'a> DatanodeTableBencher<'a> {
|
||||
pub fn new(datanode_table_manager: &'a DatanodeTableManager, count: u32) -> Self {
|
||||
Self {
|
||||
datanode_table_manager,
|
||||
count,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start(&self) {
|
||||
self.bench_create().await;
|
||||
self.bench_get().await;
|
||||
self.bench_move_region().await;
|
||||
self.bench_tables().await;
|
||||
self.bench_remove().await;
|
||||
}
|
||||
|
||||
async fn bench_create(&self) {
|
||||
let desc = format!(
|
||||
"DatanodeTableBencher: create {} datanode table keys",
|
||||
self.count
|
||||
);
|
||||
bench(
|
||||
&desc,
|
||||
|i| async move {
|
||||
self.datanode_table_manager
|
||||
.create(1, i, vec![1, 2, 3, 4])
|
||||
.await
|
||||
.unwrap();
|
||||
},
|
||||
self.count,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn bench_get(&self) {
|
||||
let desc = format!(
|
||||
"DatanodeTableBencher: get {} datanode table keys",
|
||||
self.count
|
||||
);
|
||||
bench(
|
||||
&desc,
|
||||
|i| async move {
|
||||
let key = DatanodeTableKey::new(1, i);
|
||||
assert!(self
|
||||
.datanode_table_manager
|
||||
.get(&key)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some());
|
||||
},
|
||||
self.count,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn bench_move_region(&self) {
|
||||
let desc = format!(
|
||||
"DatanodeTableBencher: move {} datanode table regions",
|
||||
self.count
|
||||
);
|
||||
bench(
|
||||
&desc,
|
||||
|i| async move {
|
||||
self.datanode_table_manager
|
||||
.move_region(1, 2, i, 1)
|
||||
.await
|
||||
.unwrap();
|
||||
},
|
||||
self.count,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn bench_tables(&self) {
|
||||
let desc = format!(
|
||||
"DatanodeTableBencher: list {} datanode table keys",
|
||||
self.count
|
||||
);
|
||||
bench(
|
||||
&desc,
|
||||
|_| async move {
|
||||
assert!(!self
|
||||
.datanode_table_manager
|
||||
.tables(1)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_empty());
|
||||
},
|
||||
self.count,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn bench_remove(&self) {
|
||||
let desc = format!(
|
||||
"DatanodeTableBencher: remove {} datanode table keys",
|
||||
self.count
|
||||
);
|
||||
bench(
|
||||
&desc,
|
||||
|i| async move {
|
||||
self.datanode_table_manager.remove(1, i).await.unwrap();
|
||||
},
|
||||
self.count,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
111
src/cmd/src/cli/bench/table_info.rs
Normal file
111
src/cmd/src/cli/bench/table_info.rs
Normal file
@@ -0,0 +1,111 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::time::Instant;
|
||||
|
||||
use common_meta::key::table_info::TableInfoManager;
|
||||
use common_meta::table_name::TableName;
|
||||
|
||||
use super::{bench, bench_self_recorded, create_table_info};
|
||||
|
||||
pub struct TableInfoBencher<'a> {
|
||||
table_info_manager: &'a TableInfoManager,
|
||||
count: u32,
|
||||
}
|
||||
|
||||
impl<'a> TableInfoBencher<'a> {
|
||||
pub fn new(table_info_manager: &'a TableInfoManager, count: u32) -> Self {
|
||||
Self {
|
||||
table_info_manager,
|
||||
count,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start(&self) {
|
||||
self.bench_create().await;
|
||||
self.bench_get().await;
|
||||
self.bench_compare_and_put().await;
|
||||
self.bench_remove().await;
|
||||
}
|
||||
|
||||
async fn bench_create(&self) {
|
||||
let desc = format!("TableInfoBencher: create {} table infos", self.count);
|
||||
bench(
|
||||
&desc,
|
||||
|i| async move {
|
||||
let table_name = format!("bench_table_name_{}", i);
|
||||
let table_name = TableName::new("bench_catalog", "bench_schema", table_name);
|
||||
let table_info = create_table_info(i, table_name);
|
||||
self.table_info_manager
|
||||
.create(i, &table_info)
|
||||
.await
|
||||
.unwrap();
|
||||
},
|
||||
self.count,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn bench_get(&self) {
|
||||
let desc = format!("TableInfoBencher: get {} table infos", self.count);
|
||||
bench(
|
||||
&desc,
|
||||
|i| async move {
|
||||
assert!(self.table_info_manager.get(i).await.unwrap().is_some());
|
||||
},
|
||||
self.count,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn bench_compare_and_put(&self) {
|
||||
let desc = format!(
|
||||
"TableInfoBencher: compare_and_put {} table infos",
|
||||
self.count
|
||||
);
|
||||
bench_self_recorded(
|
||||
&desc,
|
||||
|i| async move {
|
||||
let table_info_value = self.table_info_manager.get(i).await.unwrap().unwrap();
|
||||
|
||||
let mut new_table_info = table_info_value.table_info.clone();
|
||||
new_table_info.ident.version += 1;
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
self.table_info_manager
|
||||
.compare_and_put(i, Some(table_info_value), new_table_info)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
start.elapsed()
|
||||
},
|
||||
self.count,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn bench_remove(&self) {
|
||||
let desc = format!("TableInfoBencher: remove {} table infos", self.count);
|
||||
bench(
|
||||
&desc,
|
||||
|i| async move {
|
||||
self.table_info_manager.remove(i).await.unwrap();
|
||||
},
|
||||
self.count,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
131
src/cmd/src/cli/bench/table_name.rs
Normal file
131
src/cmd/src/cli/bench/table_name.rs
Normal file
@@ -0,0 +1,131 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_meta::key::table_name::{TableNameKey, TableNameManager};
|
||||
|
||||
use super::bench;
|
||||
|
||||
pub struct TableNameBencher<'a> {
|
||||
table_name_manager: &'a TableNameManager,
|
||||
count: u32,
|
||||
}
|
||||
|
||||
impl<'a> TableNameBencher<'a> {
|
||||
pub fn new(table_name_manager: &'a TableNameManager, count: u32) -> Self {
|
||||
Self {
|
||||
table_name_manager,
|
||||
count,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start(&self) {
|
||||
self.bench_create().await;
|
||||
self.bench_rename().await;
|
||||
self.bench_get().await;
|
||||
self.bench_tables().await;
|
||||
self.bench_remove().await;
|
||||
}
|
||||
|
||||
async fn bench_create(&self) {
|
||||
let desc = format!("TableNameBencher: create {} table names", self.count);
|
||||
bench(
|
||||
&desc,
|
||||
|i| async move {
|
||||
let table_name = format!("bench_table_name_{}", i);
|
||||
let table_name_key = create_table_name_key(&table_name);
|
||||
self.table_name_manager
|
||||
.create(&table_name_key, i)
|
||||
.await
|
||||
.unwrap();
|
||||
},
|
||||
self.count,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn bench_rename(&self) {
|
||||
let desc = format!("TableNameBencher: rename {} table names", self.count);
|
||||
bench(
|
||||
&desc,
|
||||
|i| async move {
|
||||
let table_name = format!("bench_table_name_{}", i);
|
||||
let new_table_name = format!("bench_table_name_new_{}", i);
|
||||
let table_name_key = create_table_name_key(&table_name);
|
||||
self.table_name_manager
|
||||
.rename(table_name_key, i, &new_table_name)
|
||||
.await
|
||||
.unwrap();
|
||||
},
|
||||
self.count,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn bench_get(&self) {
|
||||
let desc = format!("TableNameBencher: get {} table names", self.count);
|
||||
bench(
|
||||
&desc,
|
||||
|i| async move {
|
||||
let table_name = format!("bench_table_name_new_{}", i);
|
||||
let table_name_key = create_table_name_key(&table_name);
|
||||
assert!(self
|
||||
.table_name_manager
|
||||
.get(table_name_key)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some());
|
||||
},
|
||||
self.count,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn bench_tables(&self) {
|
||||
let desc = format!("TableNameBencher: list all {} table names", self.count);
|
||||
bench(
|
||||
&desc,
|
||||
|_| async move {
|
||||
assert!(!self
|
||||
.table_name_manager
|
||||
.tables("bench_catalog", "bench_schema")
|
||||
.await
|
||||
.unwrap()
|
||||
.is_empty());
|
||||
},
|
||||
self.count,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn bench_remove(&self) {
|
||||
let desc = format!("TableNameBencher: remove {} table names", self.count);
|
||||
bench(
|
||||
&desc,
|
||||
|i| async move {
|
||||
let table_name = format!("bench_table_name_new_{}", i);
|
||||
let table_name_key = create_table_name_key(&table_name);
|
||||
self.table_name_manager
|
||||
.remove(table_name_key)
|
||||
.await
|
||||
.unwrap();
|
||||
},
|
||||
self.count,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
fn create_table_name_key(table_name: &str) -> TableNameKey {
|
||||
TableNameKey::new("bench_catalog", "bench_schema", table_name)
|
||||
}
|
||||
112
src/cmd/src/cli/bench/table_region.rs
Normal file
112
src/cmd/src/cli/bench/table_region.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::time::Instant;
|
||||
|
||||
use common_meta::key::table_region::TableRegionManager;
|
||||
|
||||
use super::{bench, bench_self_recorded, create_region_distribution};
|
||||
|
||||
pub struct TableRegionBencher<'a> {
|
||||
table_region_manager: &'a TableRegionManager,
|
||||
count: u32,
|
||||
}
|
||||
|
||||
impl<'a> TableRegionBencher<'a> {
|
||||
pub fn new(table_region_manager: &'a TableRegionManager, count: u32) -> Self {
|
||||
Self {
|
||||
table_region_manager,
|
||||
count,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start(&self) {
|
||||
self.bench_create().await;
|
||||
self.bench_get().await;
|
||||
self.bench_compare_and_put().await;
|
||||
self.bench_remove().await;
|
||||
}
|
||||
|
||||
async fn bench_create(&self) {
|
||||
let desc = format!("TableRegionBencher: create {} table regions", self.count);
|
||||
bench_self_recorded(
|
||||
&desc,
|
||||
|i| async move {
|
||||
let region_distribution = create_region_distribution();
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
self.table_region_manager
|
||||
.create(i, ®ion_distribution)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
start.elapsed()
|
||||
},
|
||||
self.count,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn bench_get(&self) {
|
||||
let desc = format!("TableRegionBencher: get {} table regions", self.count);
|
||||
bench(
|
||||
&desc,
|
||||
|i| async move {
|
||||
assert!(self.table_region_manager.get(i).await.unwrap().is_some());
|
||||
},
|
||||
self.count,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn bench_compare_and_put(&self) {
|
||||
let desc = format!(
|
||||
"TableRegionBencher: compare_and_put {} table regions",
|
||||
self.count
|
||||
);
|
||||
bench_self_recorded(
|
||||
&desc,
|
||||
|i| async move {
|
||||
let table_region_value = self.table_region_manager.get(i).await.unwrap().unwrap();
|
||||
|
||||
let new_region_distribution = create_region_distribution();
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
self.table_region_manager
|
||||
.compare_and_put(i, Some(table_region_value), new_region_distribution)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
start.elapsed()
|
||||
},
|
||||
self.count,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn bench_remove(&self) {
|
||||
let desc = format!("TableRegionBencher: remove {} table regions", self.count);
|
||||
bench(
|
||||
&desc,
|
||||
|i| async move {
|
||||
assert!(self.table_region_manager.remove(i).await.unwrap().is_some());
|
||||
},
|
||||
self.count,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
@@ -20,7 +20,8 @@ use catalog::remote::CachedMetaKvBackend;
|
||||
use client::client_manager::DatanodeClients;
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_base::Plugins;
|
||||
use common_error::prelude::ErrorExt;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_telemetry::logging;
|
||||
@@ -164,10 +165,7 @@ impl Repl {
|
||||
let stmt = QueryLanguageParser::parse_sql(&sql)
|
||||
.with_context(|_| ParseSqlSnafu { sql: sql.clone() })?;
|
||||
|
||||
let query_ctx = Arc::new(QueryContext::with(
|
||||
self.database.catalog(),
|
||||
self.database.schema(),
|
||||
));
|
||||
let query_ctx = QueryContext::with(self.database.catalog(), self.database.schema());
|
||||
|
||||
let plan = query_engine
|
||||
.planner()
|
||||
@@ -182,7 +180,7 @@ impl Repl {
|
||||
.encode(plan)
|
||||
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
||||
|
||||
self.database.logical_plan(plan.to_vec()).await
|
||||
self.database.logical_plan(plan.to_vec(), None).await
|
||||
} else {
|
||||
self.database.sql(&sql).await
|
||||
}
|
||||
@@ -263,9 +261,10 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
|
||||
let catalog_list = Arc::new(FrontendCatalogManager::new(
|
||||
cached_meta_backend.clone(),
|
||||
cached_meta_backend,
|
||||
cached_meta_backend.clone(),
|
||||
partition_manager,
|
||||
datanode_clients,
|
||||
Arc::new(TableMetadataManager::new(cached_meta_backend)),
|
||||
));
|
||||
let plugins: Arc<Plugins> = Default::default();
|
||||
let state = Arc::new(QueryEngineState::new(
|
||||
|
||||
202
src/cmd/src/cli/upgrade.rs
Normal file
202
src/cmd/src/cli/upgrade.rs
Normal file
@@ -0,0 +1,202 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_meta::helper::TableGlobalValue;
|
||||
use common_meta::key::datanode_table::{DatanodeTableKey, DatanodeTableValue};
|
||||
use common_meta::key::table_info::{TableInfoKey, TableInfoValue};
|
||||
use common_meta::key::table_name::{TableNameKey, TableNameValue};
|
||||
use common_meta::key::table_region::{RegionDistribution, TableRegionKey, TableRegionValue};
|
||||
use common_meta::key::TableMetaKey;
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchPutRequest, PutRequest, RangeRequest, RangeResponse,
|
||||
};
|
||||
use common_meta::util::get_prefix_end_key;
|
||||
use common_telemetry::info;
|
||||
use etcd_client::Client;
|
||||
use meta_srv::service::store::etcd::EtcdStore;
|
||||
use meta_srv::service::store::kv::KvStoreRef;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::cli::{Instance, Tool};
|
||||
use crate::error::{ConnectEtcdSnafu, Result};
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct UpgradeCommand {
|
||||
#[clap(long)]
|
||||
etcd_addr: String,
|
||||
#[clap(long)]
|
||||
dryrun: bool,
|
||||
}
|
||||
|
||||
impl UpgradeCommand {
|
||||
pub async fn build(&self) -> Result<Instance> {
|
||||
let client = Client::connect([&self.etcd_addr], None)
|
||||
.await
|
||||
.context(ConnectEtcdSnafu {
|
||||
etcd_addr: &self.etcd_addr,
|
||||
})?;
|
||||
let tool = MigrateTableMetadata {
|
||||
etcd_store: EtcdStore::with_etcd_client(client),
|
||||
dryrun: self.dryrun,
|
||||
};
|
||||
Ok(Instance::Tool(Box::new(tool)))
|
||||
}
|
||||
}
|
||||
|
||||
struct MigrateTableMetadata {
|
||||
etcd_store: KvStoreRef,
|
||||
dryrun: bool,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for MigrateTableMetadata {
|
||||
async fn do_work(&self) -> Result<()> {
|
||||
let mut key = b"__tg".to_vec();
|
||||
let range_end = get_prefix_end_key(&key);
|
||||
|
||||
let mut processed_keys = 0;
|
||||
loop {
|
||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||
|
||||
let req = RangeRequest::new()
|
||||
.with_range(key, range_end.clone())
|
||||
.with_limit(1000);
|
||||
let resp = self.etcd_store.range(req).await.unwrap();
|
||||
for kv in resp.kvs.iter() {
|
||||
let key = String::from_utf8_lossy(kv.key());
|
||||
let value = TableGlobalValue::from_bytes(kv.value())
|
||||
.unwrap_or_else(|e| panic!("table global value is corrupted: {e}, key: {key}"));
|
||||
|
||||
self.create_table_name_key(&value).await;
|
||||
|
||||
self.create_datanode_table_keys(&value).await;
|
||||
|
||||
self.split_table_global_value(&key, value).await;
|
||||
}
|
||||
|
||||
self.delete_migrated_keys(&resp).await;
|
||||
|
||||
processed_keys += resp.kvs.len();
|
||||
|
||||
if resp.more {
|
||||
key = get_prefix_end_key(resp.kvs.last().unwrap().key());
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
info!("Total migrated TableGlobalKeys: {processed_keys}");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl MigrateTableMetadata {
|
||||
async fn delete_migrated_keys(&self, resp: &RangeResponse) {
|
||||
info!("Deleting {} TableGlobalKeys", resp.kvs.len());
|
||||
let req = BatchDeleteRequest {
|
||||
keys: resp.kvs.iter().map(|kv| kv.key().to_vec()).collect(),
|
||||
prev_kv: false,
|
||||
};
|
||||
if self.dryrun {
|
||||
info!("Dryrun: do nothing");
|
||||
} else {
|
||||
self.etcd_store.batch_delete(req).await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
async fn split_table_global_value(&self, key: &str, value: TableGlobalValue) {
|
||||
let table_id = value.table_id();
|
||||
let region_distribution: RegionDistribution = value.regions_id_map.into_iter().collect();
|
||||
|
||||
let table_info_key = TableInfoKey::new(table_id);
|
||||
let table_info_value = TableInfoValue::new(value.table_info);
|
||||
|
||||
let table_region_key = TableRegionKey::new(table_id);
|
||||
let table_region_value = TableRegionValue::new(region_distribution);
|
||||
|
||||
info!("Splitting TableGlobalKey '{key}' into '{table_info_key}' and '{table_region_key}'");
|
||||
|
||||
if self.dryrun {
|
||||
info!("Dryrun: do nothing");
|
||||
} else {
|
||||
self.etcd_store
|
||||
.batch_put(
|
||||
BatchPutRequest::new()
|
||||
.add_kv(
|
||||
table_info_key.as_raw_key(),
|
||||
table_info_value.try_as_raw_value().unwrap(),
|
||||
)
|
||||
.add_kv(
|
||||
table_region_key.as_raw_key(),
|
||||
table_region_value.try_as_raw_value().unwrap(),
|
||||
),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_table_name_key(&self, value: &TableGlobalValue) {
|
||||
let table_info = &value.table_info;
|
||||
let table_id = value.table_id();
|
||||
|
||||
let table_name_key = TableNameKey::new(
|
||||
&table_info.catalog_name,
|
||||
&table_info.schema_name,
|
||||
&table_info.name,
|
||||
);
|
||||
let table_name_value = TableNameValue::new(table_id);
|
||||
|
||||
info!("Creating '{table_name_key}' => {table_id}");
|
||||
|
||||
if self.dryrun {
|
||||
info!("Dryrun: do nothing");
|
||||
} else {
|
||||
self.etcd_store
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(table_name_key.as_raw_key())
|
||||
.with_value(table_name_value.try_as_raw_value().unwrap()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_datanode_table_keys(&self, value: &TableGlobalValue) {
|
||||
let table_id = value.table_id();
|
||||
let region_distribution: RegionDistribution =
|
||||
value.regions_id_map.clone().into_iter().collect();
|
||||
|
||||
let datanode_table_kvs = region_distribution
|
||||
.into_iter()
|
||||
.map(|(datanode_id, regions)| {
|
||||
let k = DatanodeTableKey::new(datanode_id, table_id);
|
||||
info!("Creating DatanodeTableKey '{k}' => {regions:?}");
|
||||
(k, DatanodeTableValue::new(table_id, regions))
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if self.dryrun {
|
||||
info!("Dryrun: do nothing");
|
||||
} else {
|
||||
let mut req = BatchPutRequest::new();
|
||||
for (key, value) in datanode_table_kvs {
|
||||
req = req.add_kv(key.as_raw_key(), value.try_as_raw_value().unwrap());
|
||||
}
|
||||
self.etcd_store.batch_put(req).await.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -206,6 +206,7 @@ mod tests {
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
ddl_timeout_millis= 10000
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
@@ -259,10 +260,12 @@ mod tests {
|
||||
timeout_millis,
|
||||
connect_timeout_millis,
|
||||
tcp_nodelay,
|
||||
ddl_timeout_millis,
|
||||
} = options.meta_client_options.unwrap();
|
||||
|
||||
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
|
||||
assert_eq!(5000, connect_timeout_millis);
|
||||
assert_eq!(10000, ddl_timeout_millis);
|
||||
assert_eq!(3000, timeout_millis);
|
||||
assert!(tcp_nodelay);
|
||||
|
||||
@@ -273,6 +276,7 @@ mod tests {
|
||||
ObjectStoreConfig::S3 { .. } => unreachable!(),
|
||||
ObjectStoreConfig::Oss { .. } => unreachable!(),
|
||||
ObjectStoreConfig::Azblob { .. } => unreachable!(),
|
||||
ObjectStoreConfig::Gcs { .. } => unreachable!(),
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
|
||||
@@ -14,10 +14,11 @@
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use config::ConfigError;
|
||||
use rustyline::error::ReadlineError;
|
||||
use snafu::Location;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -153,6 +154,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
source: catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to connect to Etcd at {etcd_addr}, source: {}", source))]
|
||||
ConnectEtcd {
|
||||
etcd_addr: String,
|
||||
source: etcd_client::Error,
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -172,7 +180,9 @@ impl ErrorExt for Error {
|
||||
| Error::LoadLayeredConfig { .. }
|
||||
| Error::IllegalConfig { .. }
|
||||
| Error::InvalidReplCommand { .. }
|
||||
| Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments,
|
||||
| Error::IllegalAuthConfig { .. }
|
||||
| Error::ConnectEtcd { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
||||
Error::RequestDatabase { source, .. } => source.status_code(),
|
||||
Error::CollectRecordBatches { source, .. }
|
||||
|
||||
@@ -19,7 +19,7 @@ use common_base::Plugins;
|
||||
use common_telemetry::logging;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use frontend::service_config::{InfluxdbOptions, PromOptions};
|
||||
use frontend::service_config::{InfluxdbOptions, PrometheusOptions};
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::auth::UserProviderRef;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
@@ -172,7 +172,7 @@ impl StartCommand {
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.prom_addr {
|
||||
opts.prom_options = Some(PromOptions { addr: addr.clone() });
|
||||
opts.prometheus_options = Some(PrometheusOptions { addr: addr.clone() });
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.postgres_addr {
|
||||
@@ -274,7 +274,10 @@ mod tests {
|
||||
opts.opentsdb_options.as_ref().unwrap().addr,
|
||||
"127.0.0.1:4321"
|
||||
);
|
||||
assert_eq!(opts.prom_options.as_ref().unwrap().addr, "127.0.0.1:4444");
|
||||
assert_eq!(
|
||||
opts.prometheus_options.as_ref().unwrap().addr,
|
||||
"127.0.0.1:4444"
|
||||
);
|
||||
|
||||
let default_opts = FrontendOptions::default();
|
||||
assert_eq!(
|
||||
|
||||
@@ -23,7 +23,7 @@ use datanode::instance::InstanceRef;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use frontend::service_config::{
|
||||
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromOptions,
|
||||
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
|
||||
PrometheusOptions,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -89,8 +89,8 @@ pub struct StandaloneOptions {
|
||||
pub postgres_options: Option<PostgresOptions>,
|
||||
pub opentsdb_options: Option<OpentsdbOptions>,
|
||||
pub influxdb_options: Option<InfluxdbOptions>,
|
||||
pub prom_store_options: Option<PromStoreOptions>,
|
||||
pub prometheus_options: Option<PrometheusOptions>,
|
||||
pub prom_options: Option<PromOptions>,
|
||||
pub wal: WalConfig,
|
||||
pub storage: StorageConfig,
|
||||
pub procedure: ProcedureConfig,
|
||||
@@ -108,8 +108,8 @@ impl Default for StandaloneOptions {
|
||||
postgres_options: Some(PostgresOptions::default()),
|
||||
opentsdb_options: Some(OpentsdbOptions::default()),
|
||||
influxdb_options: Some(InfluxdbOptions::default()),
|
||||
prom_store_options: Some(PromStoreOptions::default()),
|
||||
prometheus_options: Some(PrometheusOptions::default()),
|
||||
prom_options: Some(PromOptions::default()),
|
||||
wal: WalConfig::default(),
|
||||
storage: StorageConfig::default(),
|
||||
procedure: ProcedureConfig::default(),
|
||||
@@ -128,8 +128,8 @@ impl StandaloneOptions {
|
||||
postgres_options: self.postgres_options,
|
||||
opentsdb_options: self.opentsdb_options,
|
||||
influxdb_options: self.influxdb_options,
|
||||
prom_store_options: self.prom_store_options,
|
||||
prometheus_options: self.prometheus_options,
|
||||
prom_options: self.prom_options,
|
||||
meta_client_options: None,
|
||||
logging: self.logging,
|
||||
..Default::default()
|
||||
@@ -269,7 +269,7 @@ impl StartCommand {
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.prom_addr {
|
||||
opts.prom_options = Some(PromOptions { addr: addr.clone() })
|
||||
opts.prometheus_options = Some(PrometheusOptions { addr: addr.clone() })
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.postgres_addr {
|
||||
|
||||
@@ -14,4 +14,4 @@ serde = { version = "1.0", features = ["derive"] }
|
||||
snafu.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
toml = "0.5"
|
||||
toml.workspace = true
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::any::Any;
|
||||
use std::io::{Read, Write};
|
||||
|
||||
use bytes::{Buf, BufMut, BytesMut};
|
||||
use common_error::prelude::ErrorExt;
|
||||
use common_error::ext::ErrorExt;
|
||||
use paste::paste;
|
||||
use snafu::{ensure, Location, ResultExt, Snafu};
|
||||
|
||||
|
||||
@@ -9,8 +9,6 @@ async-trait = "0.1"
|
||||
common-error = { path = "../error" }
|
||||
common-telemetry = { path = "../telemetry" }
|
||||
datatypes = { path = "../../datatypes" }
|
||||
lazy_static = "1.4"
|
||||
regex = "1.6"
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::prelude::{Snafu, StatusCode};
|
||||
use snafu::Location;
|
||||
use common_error::status_code::StatusCode;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
|
||||
@@ -21,10 +21,10 @@ common-base = { path = "../base" }
|
||||
common-error = { path = "../error" }
|
||||
common-runtime = { path = "../runtime" }
|
||||
datafusion.workspace = true
|
||||
derive_builder = "0.12"
|
||||
derive_builder.workspace = true
|
||||
futures.workspace = true
|
||||
object-store = { path = "../../object-store" }
|
||||
orc-rust = { git = "https://github.com/WenyXu/orc-rs.git", rev = "0319acd32456e403c20f135cc012441a76852605" }
|
||||
orc-rust = "0.2"
|
||||
regex = "1.7"
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -15,9 +15,10 @@
|
||||
use std::any::Any;
|
||||
|
||||
use arrow_schema::ArrowError;
|
||||
use common_error::prelude::*;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use datafusion::parquet::errors::ParquetError;
|
||||
use snafu::Location;
|
||||
use snafu::{Location, Snafu};
|
||||
use url::ParseError;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
|
||||
@@ -13,18 +13,20 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use arrow_schema::{Schema, SchemaRef};
|
||||
use arrow::compute::cast;
|
||||
use arrow_schema::{ArrowError, Schema, SchemaRef};
|
||||
use async_trait::async_trait;
|
||||
use datafusion::arrow::record_batch::RecordBatch as DfRecordBatch;
|
||||
use datafusion::error::{DataFusionError, Result as DfResult};
|
||||
use datafusion::physical_plan::file_format::{FileMeta, FileOpenFuture, FileOpener};
|
||||
use datafusion::physical_plan::RecordBatchStream;
|
||||
use futures::Stream;
|
||||
use futures::{Stream, StreamExt, TryStreamExt};
|
||||
use object_store::ObjectStore;
|
||||
use orc_rust::arrow_reader::{create_arrow_schema, Cursor};
|
||||
use orc_rust::async_arrow_reader::ArrowStreamReader;
|
||||
pub use orc_rust::error::Error as OrcError;
|
||||
use orc_rust::reader::Reader;
|
||||
use snafu::ResultExt;
|
||||
use tokio::io::{AsyncRead, AsyncSeek};
|
||||
@@ -60,12 +62,28 @@ pub async fn infer_orc_schema<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>
|
||||
}
|
||||
|
||||
pub struct OrcArrowStreamReaderAdapter<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> {
|
||||
output_schema: SchemaRef,
|
||||
projection: Vec<usize>,
|
||||
stream: ArrowStreamReader<T>,
|
||||
}
|
||||
|
||||
impl<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> OrcArrowStreamReaderAdapter<T> {
|
||||
pub fn new(stream: ArrowStreamReader<T>) -> Self {
|
||||
Self { stream }
|
||||
pub fn new(
|
||||
output_schema: SchemaRef,
|
||||
stream: ArrowStreamReader<T>,
|
||||
projection: Option<Vec<usize>>,
|
||||
) -> Self {
|
||||
let projection = if let Some(projection) = projection {
|
||||
projection
|
||||
} else {
|
||||
(0..output_schema.fields().len()).collect()
|
||||
};
|
||||
|
||||
Self {
|
||||
output_schema,
|
||||
projection,
|
||||
stream,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,7 +91,7 @@ impl<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> RecordBatchStream
|
||||
for OrcArrowStreamReaderAdapter<T>
|
||||
{
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.stream.schema()
|
||||
self.output_schema.clone()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,6 +101,29 @@ impl<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> Stream for OrcArrowStrea
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
let batch = futures::ready!(Pin::new(&mut self.stream).poll_next(cx))
|
||||
.map(|r| r.map_err(|e| DataFusionError::External(Box::new(e))));
|
||||
|
||||
let projected_schema = self.output_schema.project(&self.projection)?;
|
||||
let batch = batch.map(|b| {
|
||||
b.and_then(|b| {
|
||||
let mut columns = Vec::with_capacity(self.projection.len());
|
||||
for idx in self.projection.iter() {
|
||||
let column = b.column(*idx);
|
||||
let field = self.output_schema.field(*idx);
|
||||
|
||||
if column.data_type() != field.data_type() {
|
||||
let output = cast(&column, field.data_type())?;
|
||||
columns.push(output)
|
||||
} else {
|
||||
columns.push(column.clone())
|
||||
}
|
||||
}
|
||||
|
||||
let record_batch = DfRecordBatch::try_new(projected_schema.into(), columns)?;
|
||||
|
||||
Ok(record_batch)
|
||||
})
|
||||
});
|
||||
|
||||
Poll::Ready(batch)
|
||||
}
|
||||
}
|
||||
@@ -100,3 +141,92 @@ impl FileFormat for OrcFormat {
|
||||
Ok(schema)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct OrcOpener {
|
||||
object_store: Arc<ObjectStore>,
|
||||
output_schema: SchemaRef,
|
||||
projection: Option<Vec<usize>>,
|
||||
}
|
||||
|
||||
impl OrcOpener {
|
||||
pub fn new(
|
||||
object_store: ObjectStore,
|
||||
output_schema: SchemaRef,
|
||||
projection: Option<Vec<usize>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
object_store: Arc::from(object_store),
|
||||
output_schema,
|
||||
projection,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FileOpener for OrcOpener {
|
||||
fn open(&self, meta: FileMeta) -> DfResult<FileOpenFuture> {
|
||||
let object_store = self.object_store.clone();
|
||||
let output_schema = self.output_schema.clone();
|
||||
let projection = self.projection.clone();
|
||||
Ok(Box::pin(async move {
|
||||
let reader = object_store
|
||||
.reader(meta.location().to_string().as_str())
|
||||
.await
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||
|
||||
let stream_reader = new_orc_stream_reader(reader)
|
||||
.await
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||
|
||||
let stream = OrcArrowStreamReaderAdapter::new(output_schema, stream_reader, projection);
|
||||
|
||||
let adopted = stream.map_err(|e| ArrowError::ExternalError(Box::new(e)));
|
||||
Ok(adopted.boxed())
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::file_format::FileFormat;
|
||||
use crate::test_util::{self, format_schema, test_store};
|
||||
|
||||
fn test_data_root() -> String {
|
||||
test_util::get_data_dir("tests/orc").display().to_string()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_orc_infer_schema() {
|
||||
let orc = OrcFormat::default();
|
||||
let store = test_store(&test_data_root());
|
||||
let schema = orc.infer_schema(&store, "test.orc").await.unwrap();
|
||||
let formatted: Vec<_> = format_schema(schema);
|
||||
|
||||
assert_eq!(
|
||||
vec![
|
||||
"double_a: Float64: NULL",
|
||||
"a: Float32: NULL",
|
||||
"b: Boolean: NULL",
|
||||
"str_direct: Utf8: NULL",
|
||||
"d: Utf8: NULL",
|
||||
"e: Utf8: NULL",
|
||||
"f: Utf8: NULL",
|
||||
"int_short_repeated: Int32: NULL",
|
||||
"int_neg_short_repeated: Int32: NULL",
|
||||
"int_delta: Int32: NULL",
|
||||
"int_neg_delta: Int32: NULL",
|
||||
"int_direct: Int32: NULL",
|
||||
"int_neg_direct: Int32: NULL",
|
||||
"bigint_direct: Int64: NULL",
|
||||
"bigint_neg_direct: Int64: NULL",
|
||||
"bigint_other: Int64: NULL",
|
||||
"utf8_increase: Utf8: NULL",
|
||||
"utf8_decrease: Utf8: NULL",
|
||||
"timestamp_simple: Timestamp(Nanosecond, None): NULL",
|
||||
"date_simple: Date32: NULL"
|
||||
],
|
||||
formatted
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,8 +30,9 @@ use crate::compression::CompressionType;
|
||||
use crate::error;
|
||||
use crate::file_format::csv::{CsvConfigBuilder, CsvOpener};
|
||||
use crate::file_format::json::JsonOpener;
|
||||
use crate::file_format::orc::{OrcFormat, OrcOpener};
|
||||
use crate::file_format::parquet::DefaultParquetFileReaderFactory;
|
||||
use crate::file_format::Format;
|
||||
use crate::file_format::{FileFormat, Format};
|
||||
use crate::test_util::{self, scan_config, test_basic_schema, test_store};
|
||||
|
||||
struct Test<'a, T: FileOpener> {
|
||||
@@ -193,6 +194,51 @@ async fn test_parquet_exec() {
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_orc_opener() {
|
||||
let root = test_util::get_data_dir("tests/orc").display().to_string();
|
||||
let store = test_store(&root);
|
||||
let orc = OrcFormat::default();
|
||||
let schema = orc.infer_schema(&store, "test.orc").await.unwrap();
|
||||
let schema = Arc::new(schema);
|
||||
|
||||
let orc_opener = OrcOpener::new(store.clone(), schema.clone(), None);
|
||||
let path = &test_util::get_data_dir("/test.orc").display().to_string();
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
config: scan_config(schema.clone(), None, path),
|
||||
opener: orc_opener.clone(),
|
||||
expected: vec![
|
||||
"+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+",
|
||||
"| double_a | a | b | str_direct | d | e | f | int_short_repeated | int_neg_short_repeated | int_delta | int_neg_delta | int_direct | int_neg_direct | bigint_direct | bigint_neg_direct | bigint_other | utf8_increase | utf8_decrease | timestamp_simple | date_simple |",
|
||||
"+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+",
|
||||
"| 1.0 | 1.0 | true | a | a | ddd | aaaaa | 5 | -5 | 1 | 5 | 1 | -1 | 1 | -1 | 5 | a | eeeee | 2023-04-01T20:15:30.002 | 2023-04-01 |",
|
||||
"| 2.0 | 2.0 | false | cccccc | bb | cc | bbbbb | 5 | -5 | 2 | 4 | 6 | -6 | 6 | -6 | -5 | bb | dddd | 2021-08-22T07:26:44.525777 | 2023-03-01 |",
|
||||
"| 3.0 | | | | | | | | | | | | | | | 1 | ccc | ccc | 2023-01-01T00:00:00 | 2023-01-01 |",
|
||||
"| 4.0 | 4.0 | true | ddd | ccc | bb | ccccc | 5 | -5 | 4 | 2 | 3 | -3 | 3 | -3 | 5 | dddd | bb | 2023-02-01T00:00:00 | 2023-02-01 |",
|
||||
"| 5.0 | 5.0 | false | ee | ddd | a | ddddd | 5 | -5 | 5 | 1 | 2 | -2 | 2 | -2 | 5 | eeeee | a | 2023-03-01T00:00:00 | 2023-03-01 |",
|
||||
"+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+",
|
||||
],
|
||||
},
|
||||
Test {
|
||||
config: scan_config(schema.clone(), Some(1), path),
|
||||
opener: orc_opener.clone(),
|
||||
expected: vec![
|
||||
"+----------+-----+------+------------+---+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+-------------------------+-------------+",
|
||||
"| double_a | a | b | str_direct | d | e | f | int_short_repeated | int_neg_short_repeated | int_delta | int_neg_delta | int_direct | int_neg_direct | bigint_direct | bigint_neg_direct | bigint_other | utf8_increase | utf8_decrease | timestamp_simple | date_simple |",
|
||||
"+----------+-----+------+------------+---+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+-------------------------+-------------+",
|
||||
"| 1.0 | 1.0 | true | a | a | ddd | aaaaa | 5 | -5 | 1 | 5 | 1 | -1 | 1 | -1 | 5 | a | eeeee | 2023-04-01T20:15:30.002 | 2023-04-01 |",
|
||||
"+----------+-----+------+------------+---+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+-------------------------+-------------+",
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
for test in tests {
|
||||
test.run().await;
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format() {
|
||||
let value = [(FORMAT_TYPE.to_string(), "csv".to_string())]
|
||||
@@ -213,6 +259,12 @@ fn test_format() {
|
||||
|
||||
assert_matches!(Format::try_from(&value).unwrap(), Format::Json(_));
|
||||
|
||||
let value = [(FORMAT_TYPE.to_string(), "ORC".to_string())]
|
||||
.into_iter()
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
assert_matches!(Format::try_from(&value).unwrap(), Format::Orc(_));
|
||||
|
||||
let value = [(FORMAT_TYPE.to_string(), "Foobar".to_string())]
|
||||
.into_iter()
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
@@ -20,7 +20,7 @@ use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
|
||||
const ENDPOINT_URL: &str = "endpoint_url";
|
||||
const ENDPOINT: &str = "endpoint";
|
||||
const ACCESS_KEY_ID: &str = "access_key_id";
|
||||
const SECRET_ACCESS_KEY: &str = "secret_access_key";
|
||||
const SESSION_TOKEN: &str = "session_token";
|
||||
@@ -36,7 +36,7 @@ pub fn build_s3_backend(
|
||||
|
||||
let _ = builder.root(path).bucket(host);
|
||||
|
||||
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
|
||||
if let Some(endpoint) = connection.get(ENDPOINT) {
|
||||
let _ = builder.endpoint(endpoint);
|
||||
}
|
||||
|
||||
|
||||
@@ -9,3 +9,46 @@ venv/bin/pip install -U pyorc
|
||||
|
||||
cargo test
|
||||
```
|
||||
|
||||
Schema:
|
||||
```
|
||||
+------------------------+-----------------------------+-------------+
|
||||
| column_name | data_type | is_nullable |
|
||||
+------------------------+-----------------------------+-------------+
|
||||
| double_a | Float64 | YES |
|
||||
| a | Float32 | YES |
|
||||
| b | Boolean | YES |
|
||||
| str_direct | Utf8 | YES |
|
||||
| d | Utf8 | YES |
|
||||
| e | Utf8 | YES |
|
||||
| f | Utf8 | YES |
|
||||
| int_short_repeated | Int32 | YES |
|
||||
| int_neg_short_repeated | Int32 | YES |
|
||||
| int_delta | Int32 | YES |
|
||||
| int_neg_delta | Int32 | YES |
|
||||
| int_direct | Int32 | YES |
|
||||
| int_neg_direct | Int32 | YES |
|
||||
| bigint_direct | Int64 | YES |
|
||||
| bigint_neg_direct | Int64 | YES |
|
||||
| bigint_other | Int64 | YES |
|
||||
| utf8_increase | Utf8 | YES |
|
||||
| utf8_decrease | Utf8 | YES |
|
||||
| timestamp_simple | Timestamp(Nanosecond, None) | YES |
|
||||
| date_simple | Date32 | YES |
|
||||
+------------------------+-----------------------------+-------------+
|
||||
```
|
||||
|
||||
|
||||
Data:
|
||||
```
|
||||
"+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+",
|
||||
"| double_a | a | b | str_direct | d | e | f | int_short_repeated | int_neg_short_repeated | int_delta | int_neg_delta | int_direct | int_neg_direct | bigint_direct | bigint_neg_direct | bigint_other | utf8_increase | utf8_decrease | timestamp_simple | date_simple |",
|
||||
"+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+",
|
||||
"| 1.0 | 1.0 | true | a | a | ddd | aaaaa | 5 | -5 | 1 | 5 | 1 | -1 | 1 | -1 | 5 | a | eeeee | 2023-04-01T20:15:30.002 | 2023-04-01 |",
|
||||
"| 2.0 | 2.0 | false | cccccc | bb | cc | bbbbb | 5 | -5 | 2 | 4 | 6 | -6 | 6 | -6 | -5 | bb | dddd | 2021-08-22T07:26:44.525777 | 2023-03-01 |",
|
||||
"| 3.0 | | | | | | | | | | | | | | | 1 | ccc | ccc | 2023-01-01T00:00:00 | 2023-01-01 |",
|
||||
"| 4.0 | 4.0 | true | ddd | ccc | bb | ccccc | 5 | -5 | 4 | 2 | 3 | -3 | 3 | -3 | 5 | dddd | bb | 2023-02-01T00:00:00 | 2023-02-01 |",
|
||||
"| 5.0 | 5.0 | false | ee | ddd | a | ddddd | 5 | -5 | 5 | 1 | 2 | -2 | 2 | -2 | 5 | eeeee | a | 2023-03-01T00:00:00 | 2023-03-01 |",
|
||||
"+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+",
|
||||
|
||||
```
|
||||
|
||||
@@ -17,16 +17,7 @@ pub mod format;
|
||||
pub mod mock;
|
||||
pub mod status_code;
|
||||
|
||||
pub mod prelude {
|
||||
pub use snafu::prelude::*;
|
||||
pub use snafu::{Backtrace, ErrorCompat};
|
||||
|
||||
pub use crate::ext::{BoxedError, ErrorExt};
|
||||
pub use crate::format::DebugFormat;
|
||||
pub use crate::status_code::StatusCode;
|
||||
|
||||
pub const INNER_ERROR_CODE: &str = "INNER_ERROR_CODE";
|
||||
pub const INNER_ERROR_MSG: &str = "INNER_ERROR_MSG";
|
||||
}
|
||||
pub const INNER_ERROR_CODE: &str = "INNER_ERROR_CODE";
|
||||
pub const INNER_ERROR_MSG: &str = "INNER_ERROR_MSG";
|
||||
|
||||
pub use snafu;
|
||||
|
||||
@@ -19,7 +19,8 @@ use std::fmt;
|
||||
|
||||
use snafu::Location;
|
||||
|
||||
use crate::prelude::*;
|
||||
use crate::ext::ErrorExt;
|
||||
use crate::status_code::StatusCode;
|
||||
|
||||
/// A mock error mainly for test.
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -66,6 +66,9 @@ pub enum StatusCode {
|
||||
// ====== Begin of server related status code =====
|
||||
/// Runtime resources exhausted, like creating threads failed.
|
||||
RuntimeResourcesExhausted = 6000,
|
||||
|
||||
/// Rate limit exceeded
|
||||
RateLimited = 6001,
|
||||
// ====== End of server related status code =======
|
||||
|
||||
// ====== Begin of auth related status code =====
|
||||
@@ -111,6 +114,7 @@ impl StatusCode {
|
||||
| StatusCode::TableColumnNotFound
|
||||
| StatusCode::TableColumnExists
|
||||
| StatusCode::DatabaseNotFound
|
||||
| StatusCode::RateLimited
|
||||
| StatusCode::UserNotFound
|
||||
| StatusCode::UnsupportedPasswordType
|
||||
| StatusCode::UserPasswordMismatch
|
||||
@@ -141,6 +145,7 @@ impl StatusCode {
|
||||
| StatusCode::TableColumnNotFound
|
||||
| StatusCode::TableColumnExists
|
||||
| StatusCode::DatabaseNotFound
|
||||
| StatusCode::RateLimited
|
||||
| StatusCode::UserNotFound
|
||||
| StatusCode::UnsupportedPasswordType
|
||||
| StatusCode::UserPasswordMismatch
|
||||
@@ -149,6 +154,44 @@ impl StatusCode {
|
||||
| StatusCode::AccessDenied => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_u32(value: u32) -> Option<Self> {
|
||||
match value {
|
||||
v if v == StatusCode::Success as u32 => Some(StatusCode::Success),
|
||||
v if v == StatusCode::Unknown as u32 => Some(StatusCode::Unknown),
|
||||
v if v == StatusCode::Unsupported as u32 => Some(StatusCode::Unsupported),
|
||||
v if v == StatusCode::Unexpected as u32 => Some(StatusCode::Unexpected),
|
||||
v if v == StatusCode::Internal as u32 => Some(StatusCode::Internal),
|
||||
v if v == StatusCode::InvalidArguments as u32 => Some(StatusCode::InvalidArguments),
|
||||
v if v == StatusCode::Cancelled as u32 => Some(StatusCode::Cancelled),
|
||||
v if v == StatusCode::InvalidSyntax as u32 => Some(StatusCode::InvalidSyntax),
|
||||
v if v == StatusCode::PlanQuery as u32 => Some(StatusCode::PlanQuery),
|
||||
v if v == StatusCode::EngineExecuteQuery as u32 => Some(StatusCode::EngineExecuteQuery),
|
||||
v if v == StatusCode::TableAlreadyExists as u32 => Some(StatusCode::TableAlreadyExists),
|
||||
v if v == StatusCode::TableNotFound as u32 => Some(StatusCode::TableNotFound),
|
||||
v if v == StatusCode::TableColumnNotFound as u32 => {
|
||||
Some(StatusCode::TableColumnNotFound)
|
||||
}
|
||||
v if v == StatusCode::TableColumnExists as u32 => Some(StatusCode::TableColumnExists),
|
||||
v if v == StatusCode::DatabaseNotFound as u32 => Some(StatusCode::DatabaseNotFound),
|
||||
v if v == StatusCode::StorageUnavailable as u32 => Some(StatusCode::StorageUnavailable),
|
||||
v if v == StatusCode::RuntimeResourcesExhausted as u32 => {
|
||||
Some(StatusCode::RuntimeResourcesExhausted)
|
||||
}
|
||||
v if v == StatusCode::RateLimited as u32 => Some(StatusCode::RateLimited),
|
||||
v if v == StatusCode::UserNotFound as u32 => Some(StatusCode::UserNotFound),
|
||||
v if v == StatusCode::UnsupportedPasswordType as u32 => {
|
||||
Some(StatusCode::UnsupportedPasswordType)
|
||||
}
|
||||
v if v == StatusCode::UserPasswordMismatch as u32 => {
|
||||
Some(StatusCode::UserPasswordMismatch)
|
||||
}
|
||||
v if v == StatusCode::AuthHeaderNotFound as u32 => Some(StatusCode::AuthHeaderNotFound),
|
||||
v if v == StatusCode::InvalidAuthHeader as u32 => Some(StatusCode::InvalidAuthHeader),
|
||||
v if v == StatusCode::AccessDenied as u32 => Some(StatusCode::AccessDenied),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for StatusCode {
|
||||
|
||||
@@ -16,7 +16,7 @@ datatypes = { path = "../../datatypes" }
|
||||
libc = "0.2"
|
||||
num = "0.4"
|
||||
num-traits = "0.2"
|
||||
once_cell = "1.10"
|
||||
once_cell.workspace = true
|
||||
paste = "1.0"
|
||||
snafu.workspace = true
|
||||
statrs = "0.16"
|
||||
|
||||
@@ -38,7 +38,7 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterExpr) -> Result<Alter
|
||||
let catalog_name = expr.catalog_name;
|
||||
let schema_name = expr.schema_name;
|
||||
let kind = expr.kind.context(MissingFieldSnafu { field: "kind" })?;
|
||||
match kind {
|
||||
let alter_kind = match kind {
|
||||
Kind::AddColumns(add_columns) => {
|
||||
let add_column_requests = add_columns
|
||||
.add_columns
|
||||
@@ -61,45 +61,27 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterExpr) -> Result<Alter
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
let alter_kind = AlterKind::AddColumns {
|
||||
AlterKind::AddColumns {
|
||||
columns: add_column_requests,
|
||||
};
|
||||
|
||||
let request = AlterTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name: expr.table_name,
|
||||
table_id,
|
||||
alter_kind,
|
||||
};
|
||||
Ok(request)
|
||||
}
|
||||
Kind::DropColumns(DropColumns { drop_columns }) => {
|
||||
let alter_kind = AlterKind::DropColumns {
|
||||
names: drop_columns.into_iter().map(|c| c.name).collect(),
|
||||
};
|
||||
|
||||
let request = AlterTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name: expr.table_name,
|
||||
table_id,
|
||||
alter_kind,
|
||||
};
|
||||
Ok(request)
|
||||
}
|
||||
}
|
||||
Kind::DropColumns(DropColumns { drop_columns }) => AlterKind::DropColumns {
|
||||
names: drop_columns.into_iter().map(|c| c.name).collect(),
|
||||
},
|
||||
Kind::RenameTable(RenameTable { new_table_name }) => {
|
||||
let alter_kind = AlterKind::RenameTable { new_table_name };
|
||||
let request = AlterTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name: expr.table_name,
|
||||
table_id,
|
||||
alter_kind,
|
||||
};
|
||||
Ok(request)
|
||||
AlterKind::RenameTable { new_table_name }
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let request = AlterTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name: expr.table_name,
|
||||
table_id,
|
||||
alter_kind,
|
||||
table_version: Some(expr.table_version),
|
||||
};
|
||||
Ok(request)
|
||||
}
|
||||
|
||||
pub fn create_table_schema(expr: &CreateTableExpr, require_time_index: bool) -> Result<RawSchema> {
|
||||
@@ -240,6 +222,7 @@ mod tests {
|
||||
location: None,
|
||||
}],
|
||||
})),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let alter_request = alter_expr_to_request(1, expr).unwrap();
|
||||
@@ -297,6 +280,7 @@ mod tests {
|
||||
},
|
||||
],
|
||||
})),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let alter_request = alter_expr_to_request(1, expr).unwrap();
|
||||
@@ -345,6 +329,7 @@ mod tests {
|
||||
name: "mem_usage".to_string(),
|
||||
}],
|
||||
})),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let alter_request = alter_expr_to_request(1, expr).unwrap();
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::prelude::{Snafu, StatusCode};
|
||||
use snafu::Location;
|
||||
use common_error::status_code::StatusCode;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
|
||||
@@ -22,17 +22,19 @@ use api::v1::{
|
||||
InsertRequest as GrpcInsertRequest,
|
||||
};
|
||||
use common_base::BitVec;
|
||||
use common_time::time::Time;
|
||||
use common_time::timestamp::Timestamp;
|
||||
use common_time::{Date, DateTime};
|
||||
use datatypes::data_type::{ConcreteDataType, DataType};
|
||||
use datatypes::prelude::{ValueRef, VectorRef};
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use datatypes::types::{Int16Type, Int8Type, TimestampType, UInt16Type, UInt8Type};
|
||||
use datatypes::types::{Int16Type, Int8Type, TimeType, TimestampType, UInt16Type, UInt8Type};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Float32Vector, Float64Vector,
|
||||
Int32Vector, Int64Vector, PrimitiveVector, StringVector, TimestampMicrosecondVector,
|
||||
Int32Vector, Int64Vector, PrimitiveVector, StringVector, TimeMicrosecondVector,
|
||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
|
||||
UInt64Vector,
|
||||
};
|
||||
@@ -194,6 +196,26 @@ fn collect_column_values(column_datatype: ColumnDataType, values: &Values) -> Ve
|
||||
Timestamp::new_nanosecond(*v)
|
||||
))
|
||||
}
|
||||
ColumnDataType::TimeSecond => {
|
||||
collect_values!(values.time_second_values, |v| ValueRef::Time(
|
||||
Time::new_second(*v)
|
||||
))
|
||||
}
|
||||
ColumnDataType::TimeMillisecond => {
|
||||
collect_values!(values.time_millisecond_values, |v| ValueRef::Time(
|
||||
Time::new_millisecond(*v)
|
||||
))
|
||||
}
|
||||
ColumnDataType::TimeMicrosecond => {
|
||||
collect_values!(values.time_millisecond_values, |v| ValueRef::Time(
|
||||
Time::new_microsecond(*v)
|
||||
))
|
||||
}
|
||||
ColumnDataType::TimeNanosecond => {
|
||||
collect_values!(values.time_millisecond_values, |v| ValueRef::Time(
|
||||
Time::new_nanosecond(*v)
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -387,6 +409,21 @@ fn values_to_vector(data_type: &ConcreteDataType, values: Values) -> VectorRef {
|
||||
values.ts_nanosecond_values,
|
||||
)),
|
||||
},
|
||||
ConcreteDataType::Time(unit) => match unit {
|
||||
TimeType::Second(_) => Arc::new(TimeSecondVector::from_iter_values(
|
||||
values.time_second_values.iter().map(|x| *x as i32),
|
||||
)),
|
||||
TimeType::Millisecond(_) => Arc::new(TimeMillisecondVector::from_iter_values(
|
||||
values.time_millisecond_values.iter().map(|x| *x as i32),
|
||||
)),
|
||||
TimeType::Microsecond(_) => Arc::new(TimeMicrosecondVector::from_vec(
|
||||
values.time_microsecond_values,
|
||||
)),
|
||||
TimeType::Nanosecond(_) => Arc::new(TimeNanosecondVector::from_vec(
|
||||
values.time_nanosecond_values,
|
||||
)),
|
||||
},
|
||||
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
@@ -495,6 +532,27 @@ fn convert_values(data_type: &ConcreteDataType, values: Values) -> Vec<Value> {
|
||||
.into_iter()
|
||||
.map(|v| Value::Timestamp(Timestamp::new_nanosecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Time(TimeType::Second(_)) => values
|
||||
.time_second_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Time(Time::new_second(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Time(TimeType::Millisecond(_)) => values
|
||||
.time_millisecond_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Time(Time::new_millisecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Time(TimeType::Microsecond(_)) => values
|
||||
.time_microsecond_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Time(Time::new_microsecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Time(TimeType::Nanosecond(_)) => values
|
||||
.time_nanosecond_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Time(Time::new_nanosecond(v)))
|
||||
.collect(),
|
||||
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
@@ -515,10 +573,13 @@ mod tests {
|
||||
use api::v1::{Column, ColumnDataType};
|
||||
use common_base::BitVec;
|
||||
use common_catalog::consts::MITO_ENGINE;
|
||||
use common_time::timestamp::Timestamp;
|
||||
use common_time::timestamp::{TimeUnit, Timestamp};
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, SchemaBuilder};
|
||||
use datatypes::types::{TimestampMillisecondType, TimestampSecondType, TimestampType};
|
||||
use datatypes::types::{
|
||||
TimeMillisecondType, TimeSecondType, TimeType, TimestampMillisecondType,
|
||||
TimestampSecondType, TimestampType,
|
||||
};
|
||||
use datatypes::value::Value;
|
||||
use paste::paste;
|
||||
use snafu::ResultExt;
|
||||
@@ -575,8 +636,8 @@ mod tests {
|
||||
);
|
||||
|
||||
let column_defs = create_expr.column_defs;
|
||||
assert_eq!(column_defs[3].name, create_expr.time_index);
|
||||
assert_eq!(4, column_defs.len());
|
||||
assert_eq!(column_defs[4].name, create_expr.time_index);
|
||||
assert_eq!(5, column_defs.len());
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::string_datatype(),
|
||||
@@ -620,6 +681,20 @@ mod tests {
|
||||
)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
column_defs
|
||||
.iter()
|
||||
.find(|c| c.name == "time")
|
||||
.unwrap()
|
||||
.datatype
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::from(
|
||||
@@ -653,7 +728,7 @@ mod tests {
|
||||
|
||||
let add_columns = find_new_columns(&schema, &insert_batch.0).unwrap().unwrap();
|
||||
|
||||
assert_eq!(2, add_columns.add_columns.len());
|
||||
assert_eq!(3, add_columns.add_columns.len());
|
||||
let host_column = &add_columns.add_columns[0];
|
||||
assert!(host_column.is_key);
|
||||
|
||||
@@ -675,6 +750,17 @@ mod tests {
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
let time_column = &add_columns.add_columns[2];
|
||||
assert!(!time_column.is_key);
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(time_column.column_def.as_ref().unwrap().datatype)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -886,6 +972,39 @@ mod tests {
|
||||
assert_eq!(expect, actual);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_time_values() {
|
||||
// second
|
||||
let actual = convert_values(
|
||||
&ConcreteDataType::Time(TimeType::Second(TimeSecondType)),
|
||||
Values {
|
||||
time_second_values: vec![1_i64, 2_i64, 3_i64],
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
let expect = vec![
|
||||
Value::Time(Time::new_second(1_i64)),
|
||||
Value::Time(Time::new_second(2_i64)),
|
||||
Value::Time(Time::new_second(3_i64)),
|
||||
];
|
||||
assert_eq!(expect, actual);
|
||||
|
||||
// millisecond
|
||||
let actual = convert_values(
|
||||
&ConcreteDataType::Time(TimeType::Millisecond(TimeMillisecondType)),
|
||||
Values {
|
||||
time_millisecond_values: vec![1_i64, 2_i64, 3_i64],
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
let expect = vec![
|
||||
Value::Time(Time::new_millisecond(1_i64)),
|
||||
Value::Time(Time::new_millisecond(2_i64)),
|
||||
Value::Time(Time::new_millisecond(3_i64)),
|
||||
];
|
||||
assert_eq!(expect, actual);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_null() {
|
||||
let null_mask = BitVec::from_slice(&[0b0000_0001, 0b0000_1000]);
|
||||
@@ -939,6 +1058,18 @@ mod tests {
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
};
|
||||
|
||||
let time_vals = column::Values {
|
||||
time_millisecond_values: vec![100, 101],
|
||||
..Default::default()
|
||||
};
|
||||
let time_column = Column {
|
||||
column_name: "time".to_string(),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
values: Some(time_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::TimeMillisecond as i32,
|
||||
};
|
||||
|
||||
let ts_vals = column::Values {
|
||||
ts_millisecond_values: vec![100, 101],
|
||||
..Default::default()
|
||||
@@ -952,7 +1083,7 @@ mod tests {
|
||||
};
|
||||
|
||||
(
|
||||
vec![host_column, cpu_column, mem_column, ts_column],
|
||||
vec![host_column, cpu_column, mem_column, time_column, ts_column],
|
||||
row_count,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ datafusion.workspace = true
|
||||
datatypes = { path = "../../datatypes" }
|
||||
flatbuffers = "23.1"
|
||||
futures = "0.3"
|
||||
lazy_static.workspace = true
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -12,13 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_telemetry::info;
|
||||
use dashmap::mapref::entry::Entry;
|
||||
use dashmap::DashMap;
|
||||
use lazy_static::lazy_static;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tonic::transport::{
|
||||
Certificate, Channel as InnerChannel, ClientTlsConfig, Endpoint, Identity, Uri,
|
||||
@@ -28,14 +29,20 @@ use tower::make::MakeConnection;
|
||||
use crate::error::{CreateChannelSnafu, InvalidConfigFilePathSnafu, InvalidTlsConfigSnafu, Result};
|
||||
|
||||
const RECYCLE_CHANNEL_INTERVAL_SECS: u64 = 60;
|
||||
const DEFAULT_REQUEST_TIMEOUT_SECS: u64 = 2;
|
||||
pub const DEFAULT_GRPC_REQUEST_TIMEOUT_SECS: u64 = 10;
|
||||
pub const DEFAULT_GRPC_CONNECT_TIMEOUT_SECS: u64 = 10;
|
||||
|
||||
lazy_static! {
|
||||
static ref ID: AtomicU64 = AtomicU64::new(0);
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ChannelManager {
|
||||
id: u64,
|
||||
config: ChannelConfig,
|
||||
client_tls_config: Option<ClientTlsConfig>,
|
||||
pool: Arc<Pool>,
|
||||
channel_recycle_started: Arc<Mutex<bool>>,
|
||||
channel_recycle_started: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl Default for ChannelManager {
|
||||
@@ -50,30 +57,17 @@ impl ChannelManager {
|
||||
}
|
||||
|
||||
pub fn with_config(config: ChannelConfig) -> Self {
|
||||
let id = ID.fetch_add(1, Ordering::Relaxed);
|
||||
let pool = Arc::new(Pool::default());
|
||||
Self {
|
||||
id,
|
||||
config,
|
||||
client_tls_config: None,
|
||||
pool,
|
||||
channel_recycle_started: Arc::new(Mutex::new(false)),
|
||||
channel_recycle_started: Arc::new(AtomicBool::new(false)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn start_channel_recycle(&self) {
|
||||
let mut started = self.channel_recycle_started.lock().unwrap();
|
||||
if *started {
|
||||
return;
|
||||
}
|
||||
|
||||
let pool = self.pool.clone();
|
||||
let _handle = common_runtime::spawn_bg(async {
|
||||
recycle_channel_in_loop(pool, RECYCLE_CHANNEL_INTERVAL_SECS).await;
|
||||
});
|
||||
info!("Channel recycle is started, running in the background!");
|
||||
|
||||
*started = true;
|
||||
}
|
||||
|
||||
pub fn with_tls_config(config: ChannelConfig) -> Result<Self> {
|
||||
let mut cm = Self::with_config(config.clone());
|
||||
|
||||
@@ -105,6 +99,8 @@ impl ChannelManager {
|
||||
}
|
||||
|
||||
pub fn get(&self, addr: impl AsRef<str>) -> Result<InnerChannel> {
|
||||
self.trigger_channel_recycling();
|
||||
|
||||
let addr = addr.as_ref();
|
||||
// It will acquire the read lock.
|
||||
if let Some(inner_ch) = self.pool.get(addr) {
|
||||
@@ -208,6 +204,25 @@ impl ChannelManager {
|
||||
|
||||
Ok(endpoint)
|
||||
}
|
||||
|
||||
fn trigger_channel_recycling(&self) {
|
||||
if self
|
||||
.channel_recycle_started
|
||||
.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.is_err()
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
let pool = self.pool.clone();
|
||||
let _handle = common_runtime::spawn_bg(async {
|
||||
recycle_channel_in_loop(pool, RECYCLE_CHANNEL_INTERVAL_SECS).await;
|
||||
});
|
||||
info!(
|
||||
"ChannelManager: {}, channel recycle is started, running in the background!",
|
||||
self.id
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
@@ -237,8 +252,8 @@ pub struct ChannelConfig {
|
||||
impl Default for ChannelConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
timeout: Some(Duration::from_secs(DEFAULT_REQUEST_TIMEOUT_SECS)),
|
||||
connect_timeout: Some(Duration::from_secs(4)),
|
||||
timeout: Some(Duration::from_secs(DEFAULT_GRPC_REQUEST_TIMEOUT_SECS)),
|
||||
connect_timeout: Some(Duration::from_secs(DEFAULT_GRPC_CONNECT_TIMEOUT_SECS)),
|
||||
concurrency_limit: None,
|
||||
rate_limit: None,
|
||||
initial_stream_window_size: None,
|
||||
@@ -468,7 +483,10 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_access_count() {
|
||||
let mgr = Arc::new(ChannelManager::new());
|
||||
let mgr = ChannelManager::new();
|
||||
// Do not start recycle
|
||||
mgr.channel_recycle_started.store(true, Ordering::Relaxed);
|
||||
let mgr = Arc::new(mgr);
|
||||
let addr = "test_uri";
|
||||
|
||||
let mut joins = Vec::with_capacity(10);
|
||||
@@ -498,8 +516,8 @@ mod tests {
|
||||
let default_cfg = ChannelConfig::new();
|
||||
assert_eq!(
|
||||
ChannelConfig {
|
||||
timeout: Some(Duration::from_secs(DEFAULT_REQUEST_TIMEOUT_SECS)),
|
||||
connect_timeout: Some(Duration::from_secs(4)),
|
||||
timeout: Some(Duration::from_secs(DEFAULT_GRPC_REQUEST_TIMEOUT_SECS)),
|
||||
connect_timeout: Some(Duration::from_secs(DEFAULT_GRPC_CONNECT_TIMEOUT_SECS)),
|
||||
concurrency_limit: None,
|
||||
rate_limit: None,
|
||||
initial_stream_window_size: None,
|
||||
|
||||
@@ -15,7 +15,8 @@
|
||||
use std::any::Any;
|
||||
use std::io;
|
||||
|
||||
use common_error::prelude::{ErrorExt, StatusCode};
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
@@ -14,10 +14,11 @@
|
||||
|
||||
use api::v1::column::Values;
|
||||
use common_base::BitVec;
|
||||
use datatypes::types::{TimestampType, WrapperType};
|
||||
use datatypes::types::{TimeType, TimestampType, WrapperType};
|
||||
use datatypes::vectors::{
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Float32Vector, Float64Vector,
|
||||
Int16Vector, Int32Vector, Int64Vector, Int8Vector, StringVector, TimestampMicrosecondVector,
|
||||
Int16Vector, Int32Vector, Int64Vector, Int8Vector, StringVector, TimeMicrosecondVector,
|
||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt16Vector,
|
||||
UInt32Vector, UInt64Vector, UInt8Vector, VectorRef,
|
||||
};
|
||||
@@ -167,6 +168,30 @@ pub fn values(arrays: &[VectorRef]) -> Result<Values> {
|
||||
TimestampNanosecondVector,
|
||||
ts_nanosecond_values,
|
||||
|x| { x.into_native() }
|
||||
),
|
||||
(
|
||||
ConcreteDataType::Time(TimeType::Second(_)),
|
||||
TimeSecondVector,
|
||||
time_second_values,
|
||||
|x| { x.into_native() as i64 }
|
||||
),
|
||||
(
|
||||
ConcreteDataType::Time(TimeType::Millisecond(_)),
|
||||
TimeMillisecondVector,
|
||||
time_millisecond_values,
|
||||
|x| { x.into_native() as i64 }
|
||||
),
|
||||
(
|
||||
ConcreteDataType::Time(TimeType::Microsecond(_)),
|
||||
TimeMicrosecondVector,
|
||||
time_microsecond_values,
|
||||
|x| { x.into_native() }
|
||||
),
|
||||
(
|
||||
ConcreteDataType::Time(TimeType::Nanosecond(_)),
|
||||
TimeNanosecondVector,
|
||||
time_nanosecond_values,
|
||||
|x| { x.into_native() }
|
||||
)
|
||||
)
|
||||
}
|
||||
@@ -187,6 +212,16 @@ mod tests {
|
||||
assert_eq!(vec![1, 2, 3], values.i32_values);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_arrow_array_time_second() {
|
||||
let array = TimeSecondVector::from(vec![Some(1), Some(2), None, Some(3)]);
|
||||
let array: VectorRef = Arc::new(array);
|
||||
|
||||
let values = values(&[array]).unwrap();
|
||||
|
||||
assert_eq!(vec![1, 2, 3], values.time_second_values);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_arrow_arrays_string() {
|
||||
let array = StringVector::from(vec![
|
||||
|
||||
@@ -15,7 +15,8 @@
|
||||
use std::any::Any;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use common_error::prelude::{ErrorExt, StatusCode};
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
@@ -13,8 +13,11 @@ common-error = { path = "../error" }
|
||||
common-runtime = { path = "../runtime" }
|
||||
common-telemetry = { path = "../telemetry" }
|
||||
common-time = { path = "../time" }
|
||||
etcd-client.workspace = true
|
||||
futures.workspace = true
|
||||
lazy_static.workspace = true
|
||||
prost.workspace = true
|
||||
regex.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -12,9 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_error::prelude::*;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use serde_json::error::Error as JsonError;
|
||||
use snafu::Location;
|
||||
use snafu::{Location, Snafu};
|
||||
use store_api::storage::RegionNumber;
|
||||
use table::metadata::TableId;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -56,6 +59,24 @@ pub enum Error {
|
||||
#[snafu(display("Invalid protobuf message, err: {}", err_msg))]
|
||||
InvalidProtoMsg { err_msg: String, location: Location },
|
||||
|
||||
#[snafu(display("Unexpected: {err_msg}"))]
|
||||
Unexpected { err_msg: String, location: Location },
|
||||
|
||||
#[snafu(display("Table already exists, table_id: {}", table_id))]
|
||||
TableAlreadyExists {
|
||||
table_id: TableId,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table does not exist, table_name: {}", table_name))]
|
||||
TableNotExist {
|
||||
table_name: String,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to rename table, reason: {}", reason))]
|
||||
RenameTable { reason: String, location: Location },
|
||||
|
||||
#[snafu(display("Invalid table metadata, err: {}", err_msg))]
|
||||
InvalidTableMetadata { err_msg: String, location: Location },
|
||||
|
||||
@@ -65,11 +86,33 @@ pub enum Error {
|
||||
#[snafu(display("Get null from cache, key: {}", key))]
|
||||
CacheNotGet { key: String, location: Location },
|
||||
|
||||
#[snafu(display("Failed to request MetaSrv, source: {}", source))]
|
||||
#[snafu(display("{source}"))]
|
||||
MetaSrv {
|
||||
source: BoxedError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Etcd txn error: {err_msg}"))]
|
||||
EtcdTxnOpResponse { err_msg: String, location: Location },
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to move region {} in table {}, err: {}",
|
||||
region,
|
||||
table_id,
|
||||
err_msg
|
||||
))]
|
||||
MoveRegion {
|
||||
table_id: TableId,
|
||||
region: RegionNumber,
|
||||
err_msg: String,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid catalog value, source: {}", source))]
|
||||
InvalidCatalogValue {
|
||||
source: common_catalog::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -78,20 +121,29 @@ impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
use Error::*;
|
||||
match self {
|
||||
IllegalServerState { .. } => StatusCode::Internal,
|
||||
IllegalServerState { .. } | EtcdTxnOpResponse { .. } => StatusCode::Internal,
|
||||
|
||||
SerdeJson { .. }
|
||||
| RouteInfoCorrupted { .. }
|
||||
| InvalidProtoMsg { .. }
|
||||
| InvalidTableMetadata { .. } => StatusCode::Unexpected,
|
||||
| InvalidTableMetadata { .. }
|
||||
| MoveRegion { .. }
|
||||
| Unexpected { .. } => StatusCode::Unexpected,
|
||||
|
||||
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } => StatusCode::Internal,
|
||||
SendMessage { .. }
|
||||
| GetKvCache { .. }
|
||||
| CacheNotGet { .. }
|
||||
| TableAlreadyExists { .. }
|
||||
| TableNotExist { .. }
|
||||
| RenameTable { .. } => StatusCode::Internal,
|
||||
|
||||
EncodeJson { .. } | DecodeJson { .. } | PayloadNotExist { .. } => {
|
||||
StatusCode::Unexpected
|
||||
}
|
||||
|
||||
MetaSrv { source, .. } => source.status_code(),
|
||||
|
||||
InvalidCatalogValue { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
175
src/common/meta/src/helper.rs
Normal file
175
src/common/meta/src/helper.rs
Normal file
@@ -0,0 +1,175 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Display, Formatter};
|
||||
|
||||
use common_catalog::error::{
|
||||
DeserializeCatalogEntryValueSnafu, Error, InvalidCatalogSnafu, SerializeCatalogEntryValueSnafu,
|
||||
};
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
|
||||
pub const CATALOG_KEY_PREFIX: &str = "__c";
|
||||
pub const SCHEMA_KEY_PREFIX: &str = "__s";
|
||||
|
||||
/// The pattern of a valid catalog, schema or table name.
|
||||
const NAME_PATTERN: &str = "[a-zA-Z_:][a-zA-Z0-9_:]*";
|
||||
|
||||
lazy_static! {
|
||||
static ref CATALOG_KEY_PATTERN: Regex =
|
||||
Regex::new(&format!("^{CATALOG_KEY_PREFIX}-({NAME_PATTERN})$")).unwrap();
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref SCHEMA_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{SCHEMA_KEY_PREFIX}-({NAME_PATTERN})-({NAME_PATTERN})$"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
pub fn build_catalog_prefix() -> String {
|
||||
format!("{CATALOG_KEY_PREFIX}-")
|
||||
}
|
||||
|
||||
pub fn build_schema_prefix(catalog_name: impl AsRef<str>) -> String {
|
||||
format!("{SCHEMA_KEY_PREFIX}-{}-", catalog_name.as_ref())
|
||||
}
|
||||
|
||||
/// Table global info contains necessary info for a datanode to create table regions, including
|
||||
/// table id, table meta(schema...), region id allocation across datanodes.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct TableGlobalValue {
|
||||
/// Id of datanode that created the global table info kv. only for debugging.
|
||||
pub node_id: u64,
|
||||
/// Allocation of region ids across all datanodes.
|
||||
pub regions_id_map: HashMap<u64, Vec<u32>>,
|
||||
pub table_info: RawTableInfo,
|
||||
}
|
||||
|
||||
impl TableGlobalValue {
|
||||
pub fn table_id(&self) -> TableId {
|
||||
self.table_info.ident.table_id
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CatalogKey {
|
||||
pub catalog_name: String,
|
||||
}
|
||||
|
||||
impl Display for CatalogKey {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(CATALOG_KEY_PREFIX)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.catalog_name)
|
||||
}
|
||||
}
|
||||
|
||||
impl CatalogKey {
|
||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
||||
let key = s.as_ref();
|
||||
let captures = CATALOG_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidCatalogSnafu { key })?;
|
||||
ensure!(captures.len() == 2, InvalidCatalogSnafu { key });
|
||||
Ok(Self {
|
||||
catalog_name: captures[1].to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CatalogValue;
|
||||
|
||||
pub struct SchemaKey {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
}
|
||||
|
||||
impl Display for SchemaKey {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(SCHEMA_KEY_PREFIX)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.catalog_name)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.schema_name)
|
||||
}
|
||||
}
|
||||
|
||||
impl SchemaKey {
|
||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
||||
let key = s.as_ref();
|
||||
let captures = SCHEMA_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidCatalogSnafu { key })?;
|
||||
ensure!(captures.len() == 3, InvalidCatalogSnafu { key });
|
||||
Ok(Self {
|
||||
catalog_name: captures[1].to_string(),
|
||||
schema_name: captures[2].to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct SchemaValue;
|
||||
|
||||
macro_rules! define_catalog_value {
|
||||
( $($val_ty: ty), *) => {
|
||||
$(
|
||||
impl $val_ty {
|
||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
||||
serde_json::from_str(s.as_ref())
|
||||
.context(DeserializeCatalogEntryValueSnafu { raw: s.as_ref() })
|
||||
}
|
||||
|
||||
pub fn from_bytes(bytes: impl AsRef<[u8]>) -> Result<Self, Error> {
|
||||
Self::parse(&String::from_utf8_lossy(bytes.as_ref()))
|
||||
}
|
||||
|
||||
pub fn as_bytes(&self) -> Result<Vec<u8>, Error> {
|
||||
Ok(serde_json::to_string(self)
|
||||
.context(SerializeCatalogEntryValueSnafu)?
|
||||
.into_bytes())
|
||||
}
|
||||
}
|
||||
)*
|
||||
}
|
||||
}
|
||||
|
||||
define_catalog_value!(TableGlobalValue, CatalogValue, SchemaValue);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_catalog_key() {
|
||||
let key = "__c-C";
|
||||
let catalog_key = CatalogKey::parse(key).unwrap();
|
||||
assert_eq!("C", catalog_key.catalog_name);
|
||||
assert_eq!(key, catalog_key.to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_schema_key() {
|
||||
let key = "__s-C-S";
|
||||
let schema_key = SchemaKey::parse(key).unwrap();
|
||||
assert_eq!("C", schema_key.catalog_name);
|
||||
assert_eq!("S", schema_key.schema_name);
|
||||
assert_eq!(key, schema_key.to_string());
|
||||
}
|
||||
}
|
||||
@@ -20,7 +20,7 @@ use snafu::OptionExt;
|
||||
|
||||
use crate::error::{Error, InvalidProtoMsgSnafu};
|
||||
|
||||
#[derive(Eq, Hash, PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||
#[derive(Eq, Hash, PartialEq, Clone, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct TableIdent {
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
|
||||
@@ -15,12 +15,22 @@
|
||||
//! This mod defines all the keys used in the metadata store (Metasrv).
|
||||
//! Specifically, there are these kinds of keys:
|
||||
//!
|
||||
//! 1. Table info key: `__table_info/{table_id}`
|
||||
//! 1. Datanode table key: `__dn_table/{datanode_id}/{table_id}`
|
||||
//! - The value is a [DatanodeTableValue] struct; it contains `table_id` and the regions that
|
||||
//! belong to this Datanode.
|
||||
//! - This key is primary used in the startup of Datanode, to let Datanode know which tables
|
||||
//! and regions it should open.
|
||||
//!
|
||||
//! 2. Table info key: `__table_info/{table_id}`
|
||||
//! - The value is a [TableInfoValue] struct; it contains the whole table info (like column
|
||||
//! schemas).
|
||||
//! - This key is mainly used in constructing the table in Datanode and Frontend.
|
||||
//!
|
||||
//! 2. Table region key: `__table_region/{table_id}`
|
||||
//! 3. Table name key: `__table_name/{catalog_name}/{schema_name}/{table_name}`
|
||||
//! - The value is a [TableNameValue] struct; it contains the table id.
|
||||
//! - Used in the table name to table id lookup.
|
||||
//!
|
||||
//! 4. Table region key: `__table_region/{table_id}`
|
||||
//! - The value is a [TableRegionValue] struct; it contains the region distribution of the
|
||||
//! table in the Datanodes.
|
||||
//!
|
||||
@@ -31,15 +41,21 @@
|
||||
//! table metadata manager: [TableMetadataManager]. It contains all the managers defined above.
|
||||
//! It's recommended to just use this manager only.
|
||||
|
||||
pub mod datanode_table;
|
||||
pub mod table_info;
|
||||
pub mod table_name;
|
||||
pub mod table_region;
|
||||
mod table_route;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use datanode_table::{DatanodeTableKey, DatanodeTableManager, DatanodeTableValue};
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use snafu::ResultExt;
|
||||
use table_info::{TableInfoManager, TableInfoValue};
|
||||
use table_region::{TableRegionManager, TableRegionValue};
|
||||
use table_info::{TableInfoKey, TableInfoManager, TableInfoValue};
|
||||
use table_name::{TableNameKey, TableNameManager, TableNameValue};
|
||||
use table_region::{TableRegionKey, TableRegionManager, TableRegionValue};
|
||||
|
||||
use crate::error::{InvalidTableMetadataSnafu, Result, SerdeJsonSnafu};
|
||||
pub use crate::key::table_route::{TableRouteKey, TABLE_ROUTE_PREFIX};
|
||||
@@ -47,9 +63,25 @@ use crate::kv_backend::KvBackendRef;
|
||||
|
||||
pub const REMOVED_PREFIX: &str = "__removed";
|
||||
|
||||
const TABLE_NAME_PATTERN: &str = "[a-zA-Z_:][a-zA-Z0-9_:]*";
|
||||
|
||||
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
|
||||
const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
|
||||
const TABLE_NAME_KEY_PREFIX: &str = "__table_name";
|
||||
const TABLE_REGION_KEY_PREFIX: &str = "__table_region";
|
||||
|
||||
lazy_static! {
|
||||
static ref DATANODE_TABLE_KEY_PATTERN: Regex =
|
||||
Regex::new(&format!("^{DATANODE_TABLE_KEY_PREFIX}/([0-9])/([0-9])$")).unwrap();
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref TABLE_NAME_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{TABLE_NAME_KEY_PREFIX}/({TABLE_NAME_PATTERN})/({TABLE_NAME_PATTERN})/({TABLE_NAME_PATTERN})$"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
pub fn to_removed_key(key: &str) -> String {
|
||||
format!("{REMOVED_PREFIX}-{key}")
|
||||
}
|
||||
@@ -61,18 +93,26 @@ pub trait TableMetaKey {
|
||||
pub type TableMetadataManagerRef = Arc<TableMetadataManager>;
|
||||
|
||||
pub struct TableMetadataManager {
|
||||
table_name_manager: TableNameManager,
|
||||
table_info_manager: TableInfoManager,
|
||||
table_region_manager: TableRegionManager,
|
||||
datanode_table_manager: DatanodeTableManager,
|
||||
}
|
||||
|
||||
impl TableMetadataManager {
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
TableMetadataManager {
|
||||
table_name_manager: TableNameManager::new(kv_backend.clone()),
|
||||
table_info_manager: TableInfoManager::new(kv_backend.clone()),
|
||||
table_region_manager: TableRegionManager::new(kv_backend),
|
||||
table_region_manager: TableRegionManager::new(kv_backend.clone()),
|
||||
datanode_table_manager: DatanodeTableManager::new(kv_backend),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn table_name_manager(&self) -> &TableNameManager {
|
||||
&self.table_name_manager
|
||||
}
|
||||
|
||||
pub fn table_info_manager(&self) -> &TableInfoManager {
|
||||
&self.table_info_manager
|
||||
}
|
||||
@@ -80,10 +120,33 @@ impl TableMetadataManager {
|
||||
pub fn table_region_manager(&self) -> &TableRegionManager {
|
||||
&self.table_region_manager
|
||||
}
|
||||
|
||||
pub fn datanode_table_manager(&self) -> &DatanodeTableManager {
|
||||
&self.datanode_table_manager
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_table_meta_key {
|
||||
($($val_ty: ty), *) => {
|
||||
$(
|
||||
impl std::fmt::Display for $val_ty {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", String::from_utf8_lossy(&self.as_raw_key()))
|
||||
}
|
||||
}
|
||||
)*
|
||||
}
|
||||
}
|
||||
|
||||
impl_table_meta_key!(
|
||||
TableNameKey<'_>,
|
||||
TableInfoKey,
|
||||
TableRegionKey,
|
||||
DatanodeTableKey
|
||||
);
|
||||
|
||||
macro_rules! impl_table_meta_value {
|
||||
( $($val_ty: ty), *) => {
|
||||
($($val_ty: ty), *) => {
|
||||
$(
|
||||
impl $val_ty {
|
||||
pub fn try_from_raw_value(raw_value: Vec<u8>) -> Result<Self> {
|
||||
@@ -104,8 +167,10 @@ macro_rules! impl_table_meta_value {
|
||||
}
|
||||
|
||||
impl_table_meta_value! {
|
||||
TableNameValue,
|
||||
TableInfoValue,
|
||||
TableRegionValue
|
||||
TableRegionValue,
|
||||
DatanodeTableValue
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
470
src/common/meta/src/key/datanode_table.rs
Normal file
470
src/common/meta/src/key/datanode_table.rs
Normal file
@@ -0,0 +1,470 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt};
|
||||
use store_api::storage::RegionNumber;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use super::{DATANODE_TABLE_KEY_PATTERN, DATANODE_TABLE_KEY_PREFIX};
|
||||
use crate::error::{InvalidTableMetadataSnafu, MoveRegionSnafu, Result, UnexpectedSnafu};
|
||||
use crate::key::{to_removed_key, TableMetaKey};
|
||||
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::{BatchGetRequest, CompareAndPutRequest, MoveValueRequest, RangeRequest};
|
||||
use crate::DatanodeId;
|
||||
|
||||
pub struct DatanodeTableKey {
|
||||
datanode_id: DatanodeId,
|
||||
table_id: TableId,
|
||||
}
|
||||
|
||||
impl DatanodeTableKey {
|
||||
pub fn new(datanode_id: DatanodeId, table_id: TableId) -> Self {
|
||||
Self {
|
||||
datanode_id,
|
||||
table_id,
|
||||
}
|
||||
}
|
||||
|
||||
fn prefix(datanode_id: DatanodeId) -> String {
|
||||
format!("{}/{datanode_id}", DATANODE_TABLE_KEY_PREFIX)
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub fn strip_table_id(raw_key: &[u8]) -> Result<TableId> {
|
||||
let key = String::from_utf8(raw_key.to_vec()).map_err(|e| {
|
||||
InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"DatanodeTableKey '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(raw_key)
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let captures =
|
||||
DATANODE_TABLE_KEY_PATTERN
|
||||
.captures(&key)
|
||||
.context(InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid DatanodeTableKey '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
let table_id = captures[2].parse::<TableId>().unwrap();
|
||||
Ok(table_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl TableMetaKey for DatanodeTableKey {
|
||||
fn as_raw_key(&self) -> Vec<u8> {
|
||||
format!("{}/{}", Self::prefix(self.datanode_id), self.table_id).into_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct DatanodeTableValue {
|
||||
pub table_id: TableId,
|
||||
pub regions: Vec<RegionNumber>,
|
||||
version: u64,
|
||||
}
|
||||
|
||||
impl DatanodeTableValue {
|
||||
pub fn new(table_id: TableId, regions: Vec<RegionNumber>) -> Self {
|
||||
Self {
|
||||
table_id,
|
||||
regions,
|
||||
version: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DatanodeTableManager {
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
impl DatanodeTableManager {
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
Self { kv_backend }
|
||||
}
|
||||
|
||||
pub async fn get(&self, key: &DatanodeTableKey) -> Result<Option<DatanodeTableValue>> {
|
||||
self.kv_backend
|
||||
.get(&key.as_raw_key())
|
||||
.await?
|
||||
.map(|kv| DatanodeTableValue::try_from_raw_value(kv.value))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Create DatanodeTable key and value. If the key already exists, check if the value is the same.
|
||||
pub async fn create(
|
||||
&self,
|
||||
datanode_id: DatanodeId,
|
||||
table_id: TableId,
|
||||
regions: Vec<RegionNumber>,
|
||||
) -> Result<()> {
|
||||
let key = DatanodeTableKey::new(datanode_id, table_id);
|
||||
let val = DatanodeTableValue::new(table_id, regions.clone());
|
||||
let req = CompareAndPutRequest::new()
|
||||
.with_key(key.as_raw_key())
|
||||
.with_value(val.try_as_raw_value()?);
|
||||
|
||||
let resp = self.kv_backend.compare_and_put(req).await?;
|
||||
if !resp.success {
|
||||
let Some(curr) = resp
|
||||
.prev_kv
|
||||
.map(|kv| DatanodeTableValue::try_from_raw_value(kv.value))
|
||||
.transpose()? else {
|
||||
return UnexpectedSnafu {
|
||||
err_msg: format!("compare_and_put expect None but failed with current value None, key: {key}, val: {val:?}"),
|
||||
}.fail();
|
||||
};
|
||||
|
||||
ensure!(
|
||||
curr.table_id == table_id && curr.regions == regions,
|
||||
UnexpectedSnafu {
|
||||
err_msg: format!("current value '{curr:?}' already existed for key '{key}', {val:?} is not set"),
|
||||
}
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn remove(&self, datanode_id: DatanodeId, table_id: TableId) -> Result<()> {
|
||||
let key = DatanodeTableKey::new(datanode_id, table_id);
|
||||
let removed_key = to_removed_key(&String::from_utf8_lossy(&key.as_raw_key()));
|
||||
let req = MoveValueRequest::new(key.as_raw_key(), removed_key.as_bytes());
|
||||
let _ = self.kv_backend.move_value(req).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn move_region(
|
||||
&self,
|
||||
from_datanode: DatanodeId,
|
||||
to_datanode: DatanodeId,
|
||||
table_id: TableId,
|
||||
region: RegionNumber,
|
||||
) -> Result<()> {
|
||||
let from_key = DatanodeTableKey::new(from_datanode, table_id);
|
||||
let to_key = DatanodeTableKey::new(to_datanode, table_id);
|
||||
let mut kvs = self
|
||||
.kv_backend
|
||||
.batch_get(BatchGetRequest {
|
||||
keys: vec![from_key.as_raw_key(), to_key.as_raw_key()],
|
||||
})
|
||||
.await?
|
||||
.kvs;
|
||||
|
||||
ensure!(
|
||||
!kvs.is_empty(),
|
||||
MoveRegionSnafu {
|
||||
table_id,
|
||||
region,
|
||||
err_msg: format!("DatanodeTableKey not found for Datanode {from_datanode}"),
|
||||
}
|
||||
);
|
||||
let mut from_value = DatanodeTableValue::try_from_raw_value(kvs.remove(0).value)?;
|
||||
|
||||
ensure!(
|
||||
from_value.regions.contains(®ion),
|
||||
MoveRegionSnafu {
|
||||
table_id,
|
||||
region,
|
||||
err_msg: format!("target region not found in Datanode {from_datanode}"),
|
||||
}
|
||||
);
|
||||
|
||||
let to_value = if !kvs.is_empty() {
|
||||
Some(DatanodeTableValue::try_from_raw_value(kvs.remove(0).value)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if let Some(v) = to_value.as_ref() {
|
||||
ensure!(
|
||||
!v.regions.contains(®ion),
|
||||
MoveRegionSnafu {
|
||||
table_id,
|
||||
region,
|
||||
err_msg: format!("target region already existed in Datanode {to_datanode}"),
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
let compares = vec![
|
||||
Compare::with_value(
|
||||
from_key.as_raw_key(),
|
||||
CompareOp::Equal,
|
||||
from_value.try_as_raw_value()?,
|
||||
),
|
||||
Compare::new(
|
||||
to_key.as_raw_key(),
|
||||
CompareOp::Equal,
|
||||
to_value
|
||||
.as_ref()
|
||||
.map(|x| x.try_as_raw_value())
|
||||
.transpose()?,
|
||||
),
|
||||
];
|
||||
|
||||
let mut operations = Vec::with_capacity(2);
|
||||
|
||||
from_value.regions.retain(|x| *x != region);
|
||||
if from_value.regions.is_empty() {
|
||||
operations.push(TxnOp::Delete(from_key.as_raw_key()));
|
||||
} else {
|
||||
from_value.version += 1;
|
||||
operations.push(TxnOp::Put(
|
||||
from_key.as_raw_key(),
|
||||
from_value.try_as_raw_value()?,
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(mut v) = to_value {
|
||||
v.regions.push(region);
|
||||
v.version += 1;
|
||||
operations.push(TxnOp::Put(to_key.as_raw_key(), v.try_as_raw_value()?));
|
||||
} else {
|
||||
let v = DatanodeTableValue::new(table_id, vec![region]);
|
||||
operations.push(TxnOp::Put(to_key.as_raw_key(), v.try_as_raw_value()?));
|
||||
}
|
||||
|
||||
let txn = Txn::new().when(compares).and_then(operations);
|
||||
let resp = self.kv_backend.txn(txn).await?;
|
||||
ensure!(
|
||||
resp.succeeded,
|
||||
MoveRegionSnafu {
|
||||
table_id,
|
||||
region,
|
||||
err_msg: format!("txn failed with responses: {:?}", resp.responses),
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn tables(&self, datanode_id: DatanodeId) -> Result<Vec<DatanodeTableValue>> {
|
||||
let prefix = DatanodeTableKey::prefix(datanode_id);
|
||||
let req = RangeRequest::new().with_prefix(prefix.as_bytes());
|
||||
let resp = self.kv_backend.range(req).await?;
|
||||
let table_ids = resp
|
||||
.kvs
|
||||
.into_iter()
|
||||
.map(|kv| DatanodeTableValue::try_from_raw_value(kv.value))
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
Ok(table_ids)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::*;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::kv_backend::KvBackend;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_move_region() {
|
||||
let manager = DatanodeTableManager::new(Arc::new(MemoryKvBackend::default()));
|
||||
|
||||
let result = manager.move_region(1, 2, 1, 1).await;
|
||||
assert!(result.unwrap_err().to_string().contains(
|
||||
"Failed to move region 1 in table 1, err: DatanodeTableKey not found for Datanode 1"
|
||||
));
|
||||
|
||||
assert!(manager.create(1, 1, vec![1, 2, 3]).await.is_ok());
|
||||
let result = manager.move_region(1, 2, 1, 100).await;
|
||||
assert!(result.unwrap_err().to_string().contains(
|
||||
"Failed to move region 100 in table 1, err: target region not found in Datanode 1"
|
||||
));
|
||||
|
||||
// Move region 1 from datanode 1 to datanode 2.
|
||||
// Note that the DatanodeTableValue is not existed for datanode 2 now.
|
||||
assert!(manager.move_region(1, 2, 1, 1).await.is_ok());
|
||||
let value = manager
|
||||
.get(&DatanodeTableKey::new(1, 1))
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
value,
|
||||
DatanodeTableValue {
|
||||
table_id: 1,
|
||||
regions: vec![2, 3],
|
||||
version: 1,
|
||||
}
|
||||
);
|
||||
let value = manager
|
||||
.get(&DatanodeTableKey::new(2, 1))
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
value,
|
||||
DatanodeTableValue {
|
||||
table_id: 1,
|
||||
regions: vec![1],
|
||||
version: 0,
|
||||
}
|
||||
);
|
||||
|
||||
// Move region 2 from datanode 1 to datanode 2.
|
||||
assert!(manager.move_region(1, 2, 1, 2).await.is_ok());
|
||||
let value = manager
|
||||
.get(&DatanodeTableKey::new(1, 1))
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
value,
|
||||
DatanodeTableValue {
|
||||
table_id: 1,
|
||||
regions: vec![3],
|
||||
version: 2,
|
||||
}
|
||||
);
|
||||
let value = manager
|
||||
.get(&DatanodeTableKey::new(2, 1))
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
value,
|
||||
DatanodeTableValue {
|
||||
table_id: 1,
|
||||
regions: vec![1, 2],
|
||||
version: 1,
|
||||
}
|
||||
);
|
||||
|
||||
// Move region 3 (the last region) from datanode 1 to datanode 2.
|
||||
assert!(manager.move_region(1, 2, 1, 3).await.is_ok());
|
||||
let value = manager.get(&DatanodeTableKey::new(1, 1)).await.unwrap();
|
||||
assert!(value.is_none());
|
||||
let value = manager
|
||||
.get(&DatanodeTableKey::new(2, 1))
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
value,
|
||||
DatanodeTableValue {
|
||||
table_id: 1,
|
||||
regions: vec![1, 2, 3],
|
||||
version: 2,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_datanode_table_value_manager() {
|
||||
let backend = Arc::new(MemoryKvBackend::default());
|
||||
let manager = DatanodeTableManager::new(backend.clone());
|
||||
|
||||
assert!(manager.create(1, 1, vec![1, 2, 3]).await.is_ok());
|
||||
assert!(manager.create(1, 2, vec![4, 5, 6]).await.is_ok());
|
||||
assert!(manager.create(2, 1, vec![4, 5, 6]).await.is_ok());
|
||||
assert!(manager.create(2, 2, vec![1, 2, 3]).await.is_ok());
|
||||
|
||||
// If the value is the same, "create" can be called again.
|
||||
assert!(manager.create(2, 2, vec![1, 2, 3]).await.is_ok());
|
||||
|
||||
let err_msg = manager
|
||||
.create(1, 1, vec![4, 5, 6])
|
||||
.await
|
||||
.unwrap_err()
|
||||
.to_string();
|
||||
assert!(err_msg.contains("Unexpected: current value 'DatanodeTableValue { table_id: 1, regions: [1, 2, 3], version: 0 }' already existed for key '__dn_table/1/1', DatanodeTableValue { table_id: 1, regions: [4, 5, 6], version: 0 } is not set"));
|
||||
|
||||
let to_be_removed_key = DatanodeTableKey::new(2, 1);
|
||||
let expected_value = DatanodeTableValue {
|
||||
table_id: 1,
|
||||
regions: vec![4, 5, 6],
|
||||
version: 0,
|
||||
};
|
||||
let value = manager.get(&to_be_removed_key).await.unwrap().unwrap();
|
||||
assert_eq!(value, expected_value);
|
||||
|
||||
assert!(manager.remove(2, 1).await.is_ok());
|
||||
assert!(manager.get(&to_be_removed_key).await.unwrap().is_none());
|
||||
let kv = backend
|
||||
.get(b"__removed-__dn_table/2/1")
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(b"__removed-__dn_table/2/1", kv.key());
|
||||
let value = DatanodeTableValue::try_from_raw_value(kv.value).unwrap();
|
||||
assert_eq!(value, expected_value);
|
||||
|
||||
let values = manager.tables(1).await.unwrap();
|
||||
assert_eq!(values.len(), 2);
|
||||
assert_eq!(
|
||||
values[0],
|
||||
DatanodeTableValue {
|
||||
table_id: 1,
|
||||
regions: vec![1, 2, 3],
|
||||
version: 0,
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
values[1],
|
||||
DatanodeTableValue {
|
||||
table_id: 2,
|
||||
regions: vec![4, 5, 6],
|
||||
version: 0,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serde() {
|
||||
let key = DatanodeTableKey {
|
||||
datanode_id: 1,
|
||||
table_id: 2,
|
||||
};
|
||||
let raw_key = key.as_raw_key();
|
||||
assert_eq!(raw_key, b"__dn_table/1/2");
|
||||
|
||||
let value = DatanodeTableValue {
|
||||
table_id: 42,
|
||||
regions: vec![1, 2, 3],
|
||||
version: 1,
|
||||
};
|
||||
let literal = br#"{"table_id":42,"regions":[1,2,3],"version":1}"#;
|
||||
|
||||
let raw_value = value.try_as_raw_value().unwrap();
|
||||
assert_eq!(raw_value, literal);
|
||||
|
||||
let actual = DatanodeTableValue::try_from_raw_value(literal.to_vec()).unwrap();
|
||||
assert_eq!(actual, value);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_strip_table_id() {
|
||||
fn test_err(raw_key: &[u8]) {
|
||||
let result = DatanodeTableKey::strip_table_id(raw_key);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
test_err(b"");
|
||||
test_err(vec![0u8, 159, 146, 150].as_slice()); // invalid UTF8 string
|
||||
test_err(b"invalid_prefix/1/2");
|
||||
test_err(b"__dn_table/");
|
||||
test_err(b"__dn_table/invalid_len_1");
|
||||
test_err(b"__dn_table/invalid_len_3/1/2");
|
||||
test_err(b"__dn_table/invalid_node_id/2");
|
||||
test_err(b"__dn_table/1/invalid_table_id");
|
||||
|
||||
let table_id = DatanodeTableKey::strip_table_id(b"__dn_table/1/2").unwrap();
|
||||
assert_eq!(table_id, 2);
|
||||
}
|
||||
}
|
||||
@@ -13,12 +13,14 @@
|
||||
// limitations under the License.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ensure;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
|
||||
use super::TABLE_INFO_KEY_PREFIX;
|
||||
use crate::error::Result;
|
||||
use crate::error::{Result, UnexpectedSnafu};
|
||||
use crate::key::{to_removed_key, TableMetaKey};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::{CompareAndPutRequest, MoveValueRequest};
|
||||
|
||||
pub struct TableInfoKey {
|
||||
table_id: TableId,
|
||||
@@ -42,6 +44,15 @@ pub struct TableInfoValue {
|
||||
version: u64,
|
||||
}
|
||||
|
||||
impl TableInfoValue {
|
||||
pub fn new(table_info: RawTableInfo) -> Self {
|
||||
Self {
|
||||
table_info,
|
||||
version: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TableInfoManager {
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
@@ -57,16 +68,47 @@ impl TableInfoManager {
|
||||
self.kv_backend
|
||||
.get(&raw_key)
|
||||
.await?
|
||||
.map(|x| TableInfoValue::try_from_raw_value(x.1))
|
||||
.map(|x| TableInfoValue::try_from_raw_value(x.value))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub async fn compare_and_set(
|
||||
/// Create TableInfo key and value. If the key already exists, check if the value is the same.
|
||||
pub async fn create(&self, table_id: TableId, table_info: &RawTableInfo) -> Result<()> {
|
||||
let result = self
|
||||
.compare_and_put(table_id, None, table_info.clone())
|
||||
.await?;
|
||||
if let Err(curr) = result {
|
||||
let Some(curr) = curr else {
|
||||
return UnexpectedSnafu {
|
||||
err_msg: format!("compare_and_put expect None but failed with current value None, table_id: {table_id}, table_info: {table_info:?}"),
|
||||
}.fail()
|
||||
};
|
||||
ensure!(
|
||||
&curr.table_info == table_info,
|
||||
UnexpectedSnafu {
|
||||
err_msg: format!(
|
||||
"TableInfoValue for table {table_id} is updated before it is created!"
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Compare and put value of key. `expect` is the expected value, if backend's current value associated
|
||||
/// with key is the same as `expect`, the value will be updated to `val`.
|
||||
///
|
||||
/// - If the compare-and-set operation successfully updated value, this method will return an `Ok(Ok())`
|
||||
/// - If associated value is not the same as `expect`, no value will be updated and an
|
||||
/// `Ok(Err(Option<TableInfoValue>))` will be returned. The `Option<TableInfoValue>` indicates
|
||||
/// the current associated value of key.
|
||||
/// - If any error happens during operation, an `Err(Error)` will be returned.
|
||||
pub async fn compare_and_put(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
expect: Option<TableInfoValue>,
|
||||
table_info: RawTableInfo,
|
||||
) -> Result<std::result::Result<(), Option<Vec<u8>>>> {
|
||||
) -> Result<std::result::Result<(), Option<TableInfoValue>>> {
|
||||
let key = TableInfoKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
|
||||
@@ -82,17 +124,27 @@ impl TableInfoManager {
|
||||
};
|
||||
let raw_value = value.try_as_raw_value()?;
|
||||
|
||||
self.kv_backend
|
||||
.compare_and_set(&raw_key, &expect, &raw_value)
|
||||
.await
|
||||
let req = CompareAndPutRequest::new()
|
||||
.with_key(raw_key)
|
||||
.with_expect(expect)
|
||||
.with_value(raw_value);
|
||||
let resp = self.kv_backend.compare_and_put(req).await?;
|
||||
Ok(if resp.success {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(resp
|
||||
.prev_kv
|
||||
.map(|x| TableInfoValue::try_from_raw_value(x.value))
|
||||
.transpose()?)
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn remove(&self, table_id: TableId) -> Result<()> {
|
||||
let key = TableInfoKey::new(table_id);
|
||||
let removed_key = to_removed_key(&String::from_utf8_lossy(key.as_raw_key().as_slice()));
|
||||
self.kv_backend
|
||||
.move_value(&key.as_raw_key(), removed_key.as_bytes())
|
||||
.await
|
||||
let key = TableInfoKey::new(table_id).as_raw_key();
|
||||
let removed_key = to_removed_key(&String::from_utf8_lossy(&key));
|
||||
let req = MoveValueRequest::new(key, removed_key.as_bytes());
|
||||
self.kv_backend.move_value(req).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,6 +159,7 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::kv_backend::KvBackend;
|
||||
use crate::rpc::store::PutRequest;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_table_info_manager() {
|
||||
@@ -120,10 +173,18 @@ mod tests {
|
||||
}
|
||||
.try_as_raw_value()
|
||||
.unwrap();
|
||||
backend.set(&key, &val).await.unwrap();
|
||||
let req = PutRequest::new().with_key(key).with_value(val);
|
||||
backend.put(req).await.unwrap();
|
||||
}
|
||||
|
||||
let manager = TableInfoManager::new(backend.clone());
|
||||
assert!(manager.create(99, &new_table_info(99)).await.is_ok());
|
||||
assert!(manager.create(99, &new_table_info(99)).await.is_ok());
|
||||
|
||||
let result = manager.create(99, &new_table_info(88)).await;
|
||||
let err_msg = result.unwrap_err().to_string();
|
||||
assert!(err_msg
|
||||
.contains("Unexpected: TableInfoValue for table 99 is updated before it is created!"));
|
||||
|
||||
let val = manager.get(1).await.unwrap().unwrap();
|
||||
assert_eq!(
|
||||
@@ -135,9 +196,23 @@ mod tests {
|
||||
);
|
||||
assert!(manager.get(4).await.unwrap().is_none());
|
||||
|
||||
// test cas failed, current value is not set
|
||||
let table_info = new_table_info(4);
|
||||
let result = manager
|
||||
.compare_and_set(4, None, table_info.clone())
|
||||
.compare_and_put(
|
||||
4,
|
||||
Some(TableInfoValue {
|
||||
table_info: table_info.clone(),
|
||||
version: 0,
|
||||
}),
|
||||
table_info.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(result.unwrap_err().is_none());
|
||||
|
||||
let result = manager
|
||||
.compare_and_put(4, None, table_info.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
@@ -145,10 +220,10 @@ mod tests {
|
||||
// test cas failed, the new table info is not set
|
||||
let new_table_info = new_table_info(4);
|
||||
let result = manager
|
||||
.compare_and_set(4, None, new_table_info.clone())
|
||||
.compare_and_put(4, None, new_table_info.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
let actual = TableInfoValue::try_from_raw_value(result.unwrap_err().unwrap()).unwrap();
|
||||
let actual = result.unwrap_err().unwrap();
|
||||
assert_eq!(
|
||||
actual,
|
||||
TableInfoValue {
|
||||
@@ -159,7 +234,7 @@ mod tests {
|
||||
|
||||
// test cas success
|
||||
let result = manager
|
||||
.compare_and_set(4, Some(actual), new_table_info.clone())
|
||||
.compare_and_put(4, Some(actual), new_table_info.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
@@ -171,8 +246,8 @@ mod tests {
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(b"__removed-__table_info/4", kv.0.as_slice());
|
||||
let value = TableInfoValue::try_from_raw_value(kv.1).unwrap();
|
||||
assert_eq!(b"__removed-__table_info/4", kv.key.as_slice());
|
||||
let value = TableInfoValue::try_from_raw_value(kv.value).unwrap();
|
||||
assert_eq!(value.table_info, new_table_info);
|
||||
assert_eq!(value.version, 1);
|
||||
}
|
||||
|
||||
389
src/common/meta/src/key/table_name.rs
Normal file
389
src/common/meta/src/key/table_name.rs
Normal file
@@ -0,0 +1,389 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt};
|
||||
use table::metadata::TableId;
|
||||
|
||||
use super::{TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
|
||||
use crate::error::{
|
||||
Error, InvalidTableMetadataSnafu, RenameTableSnafu, Result, TableAlreadyExistsSnafu,
|
||||
TableNotExistSnafu, UnexpectedSnafu,
|
||||
};
|
||||
use crate::key::{to_removed_key, TableMetaKey};
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::{CompareAndPutRequest, MoveValueRequest, RangeRequest};
|
||||
use crate::table_name::TableName;
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct TableNameKey<'a> {
|
||||
pub catalog: &'a str,
|
||||
pub schema: &'a str,
|
||||
pub table: &'a str,
|
||||
}
|
||||
|
||||
impl<'a> TableNameKey<'a> {
|
||||
pub fn new(catalog: &'a str, schema: &'a str, table: &'a str) -> Self {
|
||||
Self {
|
||||
catalog,
|
||||
schema,
|
||||
table,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn prefix_to_table(catalog: &str, schema: &str) -> String {
|
||||
format!("{}/{}/{}", TABLE_NAME_KEY_PREFIX, catalog, schema)
|
||||
}
|
||||
|
||||
fn strip_table_name(raw_key: &[u8]) -> Result<String> {
|
||||
let key = String::from_utf8(raw_key.to_vec()).map_err(|e| {
|
||||
InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"TableNameKey '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(raw_key)
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let captures =
|
||||
TABLE_NAME_KEY_PATTERN
|
||||
.captures(&key)
|
||||
.context(InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid TableNameKey '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
Ok(captures[3].to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl TableMetaKey for TableNameKey<'_> {
|
||||
fn as_raw_key(&self) -> Vec<u8> {
|
||||
format!(
|
||||
"{}/{}",
|
||||
Self::prefix_to_table(self.catalog, self.schema),
|
||||
self.table
|
||||
)
|
||||
.into_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a TableName> for TableNameKey<'a> {
|
||||
fn from(value: &'a TableName) -> Self {
|
||||
Self {
|
||||
catalog: &value.catalog_name,
|
||||
schema: &value.schema_name,
|
||||
table: &value.table_name,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<TableNameKey<'_>> for TableName {
|
||||
fn from(value: TableNameKey<'_>) -> Self {
|
||||
Self {
|
||||
catalog_name: value.catalog.to_string(),
|
||||
schema_name: value.schema.to_string(),
|
||||
table_name: value.table.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> TryFrom<&'a str> for TableNameKey<'a> {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(s: &'a str) -> Result<Self> {
|
||||
let captures = TABLE_NAME_KEY_PATTERN
|
||||
.captures(s)
|
||||
.context(InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Illegal TableNameKey format: '{s}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
Ok(Self {
|
||||
catalog: captures.get(1).unwrap().as_str(),
|
||||
schema: captures.get(2).unwrap().as_str(),
|
||||
table: captures.get(3).unwrap().as_str(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
|
||||
pub struct TableNameValue {
|
||||
table_id: TableId,
|
||||
}
|
||||
|
||||
impl TableNameValue {
|
||||
pub fn new(table_id: TableId) -> Self {
|
||||
Self { table_id }
|
||||
}
|
||||
|
||||
pub fn table_id(&self) -> TableId {
|
||||
self.table_id
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TableNameManager {
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
impl Default for TableNameManager {
|
||||
fn default() -> Self {
|
||||
Self::new(Arc::new(MemoryKvBackend::default()))
|
||||
}
|
||||
}
|
||||
|
||||
impl TableNameManager {
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
Self { kv_backend }
|
||||
}
|
||||
|
||||
/// Create TableName key and value. If the key already exists, check if the value is the same.
|
||||
pub async fn create(&self, key: &TableNameKey<'_>, table_id: TableId) -> Result<()> {
|
||||
let raw_key = key.as_raw_key();
|
||||
let value = TableNameValue::new(table_id);
|
||||
let raw_value = value.try_as_raw_value()?;
|
||||
let req = CompareAndPutRequest::new()
|
||||
.with_key(raw_key)
|
||||
.with_value(raw_value);
|
||||
let result = self.kv_backend.compare_and_put(req).await?;
|
||||
if !result.success {
|
||||
let Some(curr) = result
|
||||
.prev_kv
|
||||
.map(|x| TableNameValue::try_from_raw_value(x.value))
|
||||
.transpose()? else {
|
||||
return UnexpectedSnafu {
|
||||
err_msg: format!("compare_and_put expect None but failed with current value None, key: {key}, value: {value:?}"),
|
||||
}.fail()
|
||||
};
|
||||
ensure!(
|
||||
curr.table_id == table_id,
|
||||
TableAlreadyExistsSnafu {
|
||||
table_id: curr.table_id
|
||||
}
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Rename a TableNameKey to a new table name. Will check whether the TableNameValue matches the
|
||||
/// `expected_table_id` first. Can be executed again if the first invocation is successful.
|
||||
pub async fn rename(
|
||||
&self,
|
||||
key: TableNameKey<'_>,
|
||||
expected_table_id: TableId,
|
||||
new_table_name: &str,
|
||||
) -> Result<()> {
|
||||
let new_key = TableNameKey::new(key.catalog, key.schema, new_table_name);
|
||||
|
||||
if let Some(value) = self.get(key).await? {
|
||||
ensure!(
|
||||
value.table_id == expected_table_id,
|
||||
RenameTableSnafu {
|
||||
reason: format!(
|
||||
"the input table name '{}' and id '{expected_table_id}' not match",
|
||||
Into::<TableName>::into(key)
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![
|
||||
Compare::with_value(
|
||||
key.as_raw_key(),
|
||||
CompareOp::Equal,
|
||||
value.try_as_raw_value()?,
|
||||
),
|
||||
Compare::with_not_exist_value(new_key.as_raw_key(), CompareOp::Equal),
|
||||
])
|
||||
.and_then(vec![
|
||||
TxnOp::Delete(key.as_raw_key()),
|
||||
TxnOp::Put(new_key.as_raw_key(), value.try_as_raw_value()?),
|
||||
]);
|
||||
|
||||
let resp = self.kv_backend.txn(txn).await?;
|
||||
ensure!(
|
||||
resp.succeeded,
|
||||
RenameTableSnafu {
|
||||
reason: format!("txn failed with response: {:?}", resp.responses)
|
||||
}
|
||||
);
|
||||
} else {
|
||||
let Some(value) = self.get(new_key).await? else {
|
||||
// If we can't get the table by its original name, nor can we get by its altered
|
||||
// name, then the table must not exist at the first place.
|
||||
return TableNotExistSnafu {
|
||||
table_name: TableName::from(key).to_string(),
|
||||
}.fail();
|
||||
};
|
||||
|
||||
ensure!(
|
||||
value.table_id == expected_table_id,
|
||||
TableAlreadyExistsSnafu {
|
||||
table_id: value.table_id
|
||||
}
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get(&self, key: TableNameKey<'_>) -> Result<Option<TableNameValue>> {
|
||||
let raw_key = key.as_raw_key();
|
||||
self.kv_backend
|
||||
.get(&raw_key)
|
||||
.await?
|
||||
.map(|x| TableNameValue::try_from_raw_value(x.value))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub async fn tables(&self, catalog: &str, schema: &str) -> Result<Vec<String>> {
|
||||
let key = TableNameKey::prefix_to_table(catalog, schema).into_bytes();
|
||||
let req = RangeRequest::new().with_prefix(key);
|
||||
let resp = self.kv_backend.range(req).await?;
|
||||
let table_names = resp
|
||||
.kvs
|
||||
.into_iter()
|
||||
.map(|kv| TableNameKey::strip_table_name(kv.key()))
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
Ok(table_names)
|
||||
}
|
||||
|
||||
pub async fn remove(&self, key: TableNameKey<'_>) -> Result<()> {
|
||||
let raw_key = key.as_raw_key();
|
||||
let removed_key = to_removed_key(&String::from_utf8_lossy(&raw_key));
|
||||
let req = MoveValueRequest::new(raw_key, removed_key.as_bytes());
|
||||
let _ = self.kv_backend.move_value(req).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::*;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::kv_backend::KvBackend;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_table_name_manager() {
|
||||
let backend = Arc::new(MemoryKvBackend::default());
|
||||
let manager = TableNameManager::new(backend.clone());
|
||||
|
||||
for i in 1..=3 {
|
||||
let table_name = format!("table_{}", i);
|
||||
let key = TableNameKey::new("my_catalog", "my_schema", &table_name);
|
||||
assert!(manager.create(&key, i).await.is_ok());
|
||||
}
|
||||
|
||||
let key = TableNameKey::new("my_catalog", "my_schema", "my_table");
|
||||
assert!(manager.create(&key, 99).await.is_ok());
|
||||
assert!(manager.create(&key, 99).await.is_ok());
|
||||
|
||||
let result = manager.create(&key, 9).await;
|
||||
let err_msg = result.unwrap_err().to_string();
|
||||
assert!(err_msg.contains("Table already exists, table_id: 99"));
|
||||
|
||||
let value = manager.get(key).await.unwrap().unwrap();
|
||||
assert_eq!(value.table_id(), 99);
|
||||
let not_existed = TableNameKey::new("x", "y", "z");
|
||||
assert!(manager.get(not_existed).await.unwrap().is_none());
|
||||
|
||||
assert!(manager.remove(key).await.is_ok());
|
||||
let kv = backend
|
||||
.get(b"__removed-__table_name/my_catalog/my_schema/my_table")
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let value = TableNameValue::try_from_raw_value(kv.value).unwrap();
|
||||
assert_eq!(value.table_id(), 99);
|
||||
|
||||
let key = TableNameKey::new("my_catalog", "my_schema", "table_1");
|
||||
assert!(manager.rename(key, 1, "table_1_new").await.is_ok());
|
||||
assert!(manager.rename(key, 1, "table_1_new").await.is_ok());
|
||||
|
||||
let result = manager.rename(key, 2, "table_1_new").await;
|
||||
let err_msg = result.unwrap_err().to_string();
|
||||
assert!(err_msg.contains("Table already exists, table_id: 1"));
|
||||
|
||||
let result = manager
|
||||
.rename(
|
||||
TableNameKey::new("my_catalog", "my_schema", "table_2"),
|
||||
22,
|
||||
"table_2_new",
|
||||
)
|
||||
.await;
|
||||
let err_msg = result.unwrap_err().to_string();
|
||||
assert!(err_msg.contains("Failed to rename table, reason: the input table name 'my_catalog.my_schema.table_2' and id '22' not match"));
|
||||
|
||||
let result = manager.rename(not_existed, 1, "zz").await;
|
||||
let err_msg = result.unwrap_err().to_string();
|
||||
assert!(err_msg.contains("Table does not exist, table_name: x.y.z"));
|
||||
|
||||
let tables = manager.tables("my_catalog", "my_schema").await.unwrap();
|
||||
assert_eq!(tables.len(), 3);
|
||||
assert_eq!(tables, vec!["table_1_new", "table_2", "table_3"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_strip_table_name() {
|
||||
fn test_err(raw_key: &[u8]) {
|
||||
assert!(TableNameKey::strip_table_name(raw_key).is_err());
|
||||
}
|
||||
|
||||
test_err(b"");
|
||||
test_err(vec![0u8, 159, 146, 150].as_slice()); // invalid UTF8 string
|
||||
test_err(b"invalid_prefix/my_catalog/my_schema/my_table");
|
||||
test_err(b"__table_name/");
|
||||
test_err(b"__table_name/invalid_len_1");
|
||||
test_err(b"__table_name/invalid_len_2/x");
|
||||
test_err(b"__table_name/invalid_len_4/x/y/z");
|
||||
test_err(b"__table_name/000_invalid_catalog/y/z");
|
||||
test_err(b"__table_name/x/000_invalid_schema/z");
|
||||
test_err(b"__table_name/x/y/000_invalid_table");
|
||||
|
||||
fn test_ok(table_name: &str) {
|
||||
assert_eq!(
|
||||
table_name,
|
||||
TableNameKey::strip_table_name(
|
||||
format!("__table_name/my_catalog/my_schema/{}", table_name).as_bytes()
|
||||
)
|
||||
.unwrap()
|
||||
);
|
||||
}
|
||||
test_ok("my_table");
|
||||
test_ok("cpu:metrics");
|
||||
test_ok(":cpu:metrics");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serde() {
|
||||
let key = TableNameKey::new("my_catalog", "my_schema", "my_table");
|
||||
let raw_key = key.as_raw_key();
|
||||
assert_eq!(
|
||||
b"__table_name/my_catalog/my_schema/my_table",
|
||||
raw_key.as_slice()
|
||||
);
|
||||
|
||||
let value = TableNameValue::new(1);
|
||||
let literal = br#"{"table_id":1}"#;
|
||||
|
||||
assert_eq!(value.try_as_raw_value().unwrap(), literal);
|
||||
assert_eq!(
|
||||
TableNameValue::try_from_raw_value(literal.to_vec()).unwrap(),
|
||||
value
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -15,13 +15,15 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ensure;
|
||||
use store_api::storage::RegionNumber;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use super::TABLE_REGION_KEY_PREFIX;
|
||||
use crate::error::Result;
|
||||
use crate::error::{Result, UnexpectedSnafu};
|
||||
use crate::key::{to_removed_key, TableMetaKey};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::{CompareAndPutRequest, MoveValueRequest};
|
||||
use crate::DatanodeId;
|
||||
|
||||
pub type RegionDistribution = BTreeMap<DatanodeId, Vec<RegionNumber>>;
|
||||
@@ -48,6 +50,15 @@ pub struct TableRegionValue {
|
||||
version: u64,
|
||||
}
|
||||
|
||||
impl TableRegionValue {
|
||||
pub fn new(region_distribution: RegionDistribution) -> Self {
|
||||
Self {
|
||||
region_distribution,
|
||||
version: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TableRegionManager {
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
@@ -63,16 +74,50 @@ impl TableRegionManager {
|
||||
self.kv_backend
|
||||
.get(&raw_key)
|
||||
.await?
|
||||
.map(|x| TableRegionValue::try_from_raw_value(x.1))
|
||||
.map(|x| TableRegionValue::try_from_raw_value(x.value))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub async fn compare_and_set(
|
||||
/// Create TableRegion key and value. If the key already exists, check if the value is the same.
|
||||
pub async fn create(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
region_distribution: &RegionDistribution,
|
||||
) -> Result<()> {
|
||||
let result = self
|
||||
.compare_and_put(table_id, None, region_distribution.clone())
|
||||
.await?;
|
||||
if let Err(curr) = result {
|
||||
let Some(curr) = curr else {
|
||||
return UnexpectedSnafu {
|
||||
err_msg: format!("compare_and_put expect None but failed with current value None, table_id: {table_id}, region_distribution: {region_distribution:?}"),
|
||||
}.fail()
|
||||
};
|
||||
ensure!(
|
||||
&curr.region_distribution == region_distribution,
|
||||
UnexpectedSnafu {
|
||||
err_msg: format!(
|
||||
"TableRegionValue for table {table_id} is updated before it is created!"
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Compare and put value of key. `expect` is the expected value, if backend's current value associated
|
||||
/// with key is the same as `expect`, the value will be updated to `val`.
|
||||
///
|
||||
/// - If the compare-and-set operation successfully updated value, this method will return an `Ok(Ok())`
|
||||
/// - If associated value is not the same as `expect`, no value will be updated and an `Ok(Err(Vec<u8>))`
|
||||
/// will be returned, the `Err(Vec<u8>)` indicates the current associated value of key.
|
||||
/// - If any error happens during operation, an `Err(Error)` will be returned.
|
||||
pub async fn compare_and_put(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
expect: Option<TableRegionValue>,
|
||||
region_distribution: RegionDistribution,
|
||||
) -> Result<std::result::Result<(), Option<Vec<u8>>>> {
|
||||
) -> Result<std::result::Result<(), Option<TableRegionValue>>> {
|
||||
let key = TableRegionKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
|
||||
@@ -88,17 +133,30 @@ impl TableRegionManager {
|
||||
};
|
||||
let raw_value = value.try_as_raw_value()?;
|
||||
|
||||
self.kv_backend
|
||||
.compare_and_set(&raw_key, &expect, &raw_value)
|
||||
.await
|
||||
let req = CompareAndPutRequest::new()
|
||||
.with_key(raw_key)
|
||||
.with_expect(expect)
|
||||
.with_value(raw_value);
|
||||
let resp = self.kv_backend.compare_and_put(req).await?;
|
||||
Ok(if resp.success {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(resp
|
||||
.prev_kv
|
||||
.map(|x| TableRegionValue::try_from_raw_value(x.value))
|
||||
.transpose()?)
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn remove(&self, table_id: TableId) -> Result<()> {
|
||||
let key = TableRegionKey::new(table_id);
|
||||
let remove_key = to_removed_key(&String::from_utf8_lossy(key.as_raw_key().as_slice()));
|
||||
self.kv_backend
|
||||
.move_value(&key.as_raw_key(), remove_key.as_bytes())
|
||||
.await
|
||||
pub async fn remove(&self, table_id: TableId) -> Result<Option<TableRegionValue>> {
|
||||
let key = TableRegionKey::new(table_id).as_raw_key();
|
||||
let remove_key = to_removed_key(&String::from_utf8_lossy(&key));
|
||||
let req = MoveValueRequest::new(key, remove_key.as_bytes());
|
||||
|
||||
let resp = self.kv_backend.move_value(req).await?;
|
||||
resp.0
|
||||
.map(|x| TableRegionValue::try_from_raw_value(x.value))
|
||||
.transpose()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -117,35 +175,42 @@ mod tests {
|
||||
|
||||
let region_distribution =
|
||||
RegionDistribution::from([(1, vec![1, 2, 3]), (2, vec![4, 5, 6])]);
|
||||
let new_region_distribution =
|
||||
RegionDistribution::from([(1, vec![4, 5, 6]), (2, vec![1, 2, 3])]);
|
||||
|
||||
let result = manager
|
||||
.compare_and_set(1, None, region_distribution.clone())
|
||||
.compare_and_put(1, None, region_distribution.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
|
||||
let new_region_distribution =
|
||||
RegionDistribution::from([(1, vec![4, 5, 6]), (2, vec![1, 2, 3])]);
|
||||
let curr = manager
|
||||
.compare_and_set(1, None, new_region_distribution.clone())
|
||||
.compare_and_put(1, None, new_region_distribution.clone())
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap_err()
|
||||
.unwrap();
|
||||
let curr = TableRegionValue::try_from_raw_value(curr).unwrap();
|
||||
assert_eq!(
|
||||
curr,
|
||||
TableRegionValue {
|
||||
region_distribution,
|
||||
region_distribution: region_distribution.clone(),
|
||||
version: 0
|
||||
}
|
||||
);
|
||||
|
||||
assert!(manager
|
||||
.compare_and_set(1, Some(curr), new_region_distribution.clone())
|
||||
.compare_and_put(1, Some(curr), new_region_distribution.clone())
|
||||
.await
|
||||
.unwrap()
|
||||
.is_ok());
|
||||
|
||||
assert!(manager.create(99, ®ion_distribution).await.is_ok());
|
||||
assert!(manager.create(99, ®ion_distribution).await.is_ok());
|
||||
|
||||
let result = manager.create(99, &new_region_distribution).await;
|
||||
let err_msg = result.unwrap_err().to_string();
|
||||
assert!(err_msg.contains("TableRegionValue for table 99 is updated before it is created!"));
|
||||
|
||||
let value = manager.get(1).await.unwrap().unwrap();
|
||||
assert_eq!(
|
||||
value,
|
||||
@@ -154,17 +219,33 @@ mod tests {
|
||||
version: 1
|
||||
}
|
||||
);
|
||||
let value = manager.get(99).await.unwrap().unwrap();
|
||||
assert_eq!(
|
||||
value,
|
||||
TableRegionValue {
|
||||
region_distribution,
|
||||
version: 0
|
||||
}
|
||||
);
|
||||
assert!(manager.get(2).await.unwrap().is_none());
|
||||
|
||||
assert!(manager.remove(1).await.is_ok());
|
||||
let value = manager.remove(1).await.unwrap().unwrap();
|
||||
assert_eq!(
|
||||
value,
|
||||
TableRegionValue {
|
||||
region_distribution: new_region_distribution.clone(),
|
||||
version: 1
|
||||
}
|
||||
);
|
||||
assert!(manager.remove(123).await.unwrap().is_none());
|
||||
|
||||
let kv = backend
|
||||
.get(b"__removed-__table_region/1")
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(b"__removed-__table_region/1", kv.0.as_slice());
|
||||
let value = TableRegionValue::try_from_raw_value(kv.1).unwrap();
|
||||
assert_eq!(b"__removed-__table_region/1", kv.key.as_slice());
|
||||
let value = TableRegionValue::try_from_raw_value(kv.value).unwrap();
|
||||
assert_eq!(value.region_distribution, new_region_distribution);
|
||||
assert_eq!(value.version, 1);
|
||||
}
|
||||
|
||||
@@ -12,21 +12,25 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Display;
|
||||
|
||||
use api::v1::meta::TableName;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::key::to_removed_key;
|
||||
|
||||
pub const TABLE_ROUTE_PREFIX: &str = "__meta_table_route";
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct TableRouteKey<'a> {
|
||||
pub table_id: u64,
|
||||
pub table_id: TableId,
|
||||
pub catalog_name: &'a str,
|
||||
pub schema_name: &'a str,
|
||||
pub table_name: &'a str,
|
||||
}
|
||||
|
||||
impl<'a> TableRouteKey<'a> {
|
||||
pub fn with_table_name(table_id: u64, t: &'a TableName) -> Self {
|
||||
pub fn with_table_name(table_id: TableId, t: &'a TableName) -> Self {
|
||||
Self {
|
||||
table_id,
|
||||
catalog_name: &t.catalog_name,
|
||||
@@ -42,12 +46,14 @@ impl<'a> TableRouteKey<'a> {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn key(&self) -> String {
|
||||
format!("{}-{}", self.prefix(), self.table_id)
|
||||
}
|
||||
|
||||
pub fn removed_key(&self) -> String {
|
||||
to_removed_key(&self.key())
|
||||
to_removed_key(&self.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Display for TableRouteKey<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}-{}", self.prefix(), self.table_id)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,7 +75,7 @@ mod tests {
|
||||
let prefix = key.prefix();
|
||||
assert_eq!("__meta_table_route-greptime-public-demo", prefix);
|
||||
|
||||
let key_string = key.key();
|
||||
let key_string = key.to_string();
|
||||
assert_eq!("__meta_table_route-greptime-public-demo-123", key_string);
|
||||
|
||||
let removed = key.removed_key();
|
||||
|
||||
@@ -13,68 +13,94 @@
|
||||
// limitations under the License.
|
||||
|
||||
pub mod memory;
|
||||
pub mod txn;
|
||||
|
||||
use std::any::Any;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::ErrorExt;
|
||||
use futures::{Stream, StreamExt};
|
||||
pub use txn::TxnService;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, MoveValueRequest, MoveValueResponse, PutRequest, PutResponse,
|
||||
RangeRequest, RangeResponse,
|
||||
};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct Kv(pub Vec<u8>, pub Vec<u8>);
|
||||
|
||||
pub type ValueIter<'a, E> = Pin<Box<dyn Stream<Item = Result<Kv, E>> + Send + 'a>>;
|
||||
|
||||
pub type KvBackendRef = Arc<dyn KvBackend<Error = Error>>;
|
||||
pub type KvBackendRef = Arc<dyn KvBackend<Error = Error> + Send + Sync>;
|
||||
|
||||
#[async_trait]
|
||||
pub trait KvBackend: Send + Sync {
|
||||
type Error: ErrorExt;
|
||||
|
||||
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Self::Error>
|
||||
where
|
||||
'a: 'b;
|
||||
|
||||
async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), Self::Error>;
|
||||
|
||||
/// Compare and set value of key. `expect` is the expected value, if backend's current value associated
|
||||
/// with key is the same as `expect`, the value will be updated to `val`.
|
||||
///
|
||||
/// - If the compare-and-set operation successfully updated value, this method will return an `Ok(Ok())`
|
||||
/// - If associated value is not the same as `expect`, no value will be updated and an `Ok(Err(Vec<u8>))`
|
||||
/// will be returned, the `Err(Vec<u8>)` indicates the current associated value of key.
|
||||
/// - If any error happens during operation, an `Err(Error)` will be returned.
|
||||
async fn compare_and_set(
|
||||
&self,
|
||||
key: &[u8],
|
||||
expect: &[u8],
|
||||
val: &[u8],
|
||||
) -> Result<Result<(), Option<Vec<u8>>>, Self::Error>;
|
||||
|
||||
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), Self::Error>;
|
||||
|
||||
async fn delete(&self, key: &[u8]) -> Result<(), Self::Error> {
|
||||
self.delete_range(key, &[]).await
|
||||
}
|
||||
|
||||
/// Default get is implemented based on `range` method.
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<Kv>, Self::Error> {
|
||||
let mut iter = self.range(key);
|
||||
while let Some(r) = iter.next().await {
|
||||
let kv = r?;
|
||||
if kv.0 == key {
|
||||
return Ok(Some(kv));
|
||||
}
|
||||
}
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
/// MoveValue atomically renames the key to the given updated key.
|
||||
async fn move_value(&self, from_key: &[u8], to_key: &[u8]) -> Result<(), Self::Error>;
|
||||
pub trait KvBackend: TxnService
|
||||
where
|
||||
Self::Error: ErrorExt,
|
||||
{
|
||||
fn name(&self) -> &str;
|
||||
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
async fn range(&self, req: RangeRequest) -> Result<RangeResponse, Self::Error>;
|
||||
|
||||
async fn put(&self, req: PutRequest) -> Result<PutResponse, Self::Error>;
|
||||
|
||||
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse, Self::Error>;
|
||||
|
||||
async fn batch_get(&self, req: BatchGetRequest) -> Result<BatchGetResponse, Self::Error>;
|
||||
|
||||
async fn compare_and_put(
|
||||
&self,
|
||||
req: CompareAndPutRequest,
|
||||
) -> Result<CompareAndPutResponse, Self::Error>;
|
||||
|
||||
async fn delete_range(
|
||||
&self,
|
||||
req: DeleteRangeRequest,
|
||||
) -> Result<DeleteRangeResponse, Self::Error>;
|
||||
|
||||
async fn batch_delete(
|
||||
&self,
|
||||
req: BatchDeleteRequest,
|
||||
) -> Result<BatchDeleteResponse, Self::Error>;
|
||||
|
||||
/// MoveValue atomically renames the key to the given updated key.
|
||||
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse, Self::Error>;
|
||||
|
||||
// The following methods are implemented based on the above methods,
|
||||
// and a higher-level interface is provided for to simplify usage.
|
||||
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>, Self::Error> {
|
||||
let req = RangeRequest::new().with_key(key.to_vec());
|
||||
let mut resp = self.range(req).await?;
|
||||
Ok(if resp.kvs.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(resp.kvs.remove(0))
|
||||
})
|
||||
}
|
||||
|
||||
/// Check if the key exists, not returning the value.
|
||||
/// If the value is large, this method is more efficient than `get`.
|
||||
async fn exists(&self, key: &[u8]) -> Result<bool, Self::Error> {
|
||||
let req = RangeRequest::new().with_key(key.to_vec()).with_keys_only();
|
||||
let resp = self.range(req).await?;
|
||||
Ok(!resp.kvs.is_empty())
|
||||
}
|
||||
|
||||
async fn delete(&self, key: &[u8], prev_kv: bool) -> Result<Option<KeyValue>, Self::Error> {
|
||||
let mut req = DeleteRangeRequest::new().with_key(key.to_vec());
|
||||
if prev_kv {
|
||||
req = req.with_prev_kv();
|
||||
}
|
||||
|
||||
let resp = self.delete_range(req).await?;
|
||||
|
||||
if prev_kv {
|
||||
Ok(resp.prev_kvs.into_iter().next())
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,20 +16,32 @@ use std::any::Any;
|
||||
use std::collections::btree_map::Entry;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::Range;
|
||||
use std::sync::RwLock;
|
||||
|
||||
use async_stream::stream;
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_telemetry::timer;
|
||||
use serde::Serializer;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::kv_backend::{Kv, KvBackend, ValueIter};
|
||||
use crate::kv_backend::txn::{Txn, TxnOp, TxnOpResponse, TxnRequest, TxnResponse};
|
||||
use crate::kv_backend::{KvBackend, TxnService};
|
||||
use crate::metrics::METRIC_META_TXN_REQUEST;
|
||||
use crate::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, MoveValueRequest, MoveValueResponse, PutRequest, PutResponse,
|
||||
RangeRequest, RangeResponse,
|
||||
};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
pub struct MemoryKvBackend {
|
||||
pub struct MemoryKvBackend<T> {
|
||||
kvs: RwLock<BTreeMap<Vec<u8>, Vec<u8>>>,
|
||||
_phantom: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl Display for MemoryKvBackend {
|
||||
impl<T> Display for MemoryKvBackend<T> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
let kvs = self.kvs.read().unwrap();
|
||||
for (k, v) in kvs.iter() {
|
||||
@@ -42,156 +54,623 @@ impl Display for MemoryKvBackend {
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MemoryKvBackend {
|
||||
impl<T> Default for MemoryKvBackend<T> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
kvs: RwLock::new(BTreeMap::new()),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> MemoryKvBackend<T> {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
pub fn clear(&self) {
|
||||
let mut kvs = self.kvs.write().unwrap();
|
||||
kvs.clear();
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl KvBackend for MemoryKvBackend {
|
||||
type Error = Error;
|
||||
|
||||
fn range<'a, 'b>(&'a self, prefix: &[u8]) -> ValueIter<'b, Error>
|
||||
where
|
||||
'a: 'b,
|
||||
{
|
||||
let kvs = self.kvs.read().unwrap();
|
||||
let kvs = kvs.clone();
|
||||
|
||||
let prefix = prefix.to_vec();
|
||||
Box::pin(stream!({
|
||||
for (k, v) in kvs.range(prefix.clone()..) {
|
||||
if !k.starts_with(&prefix) {
|
||||
break;
|
||||
}
|
||||
yield Ok(Kv(k.clone(), v.clone()));
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), Error> {
|
||||
let mut kvs = self.kvs.write().unwrap();
|
||||
let _ = kvs.insert(key.to_vec(), val.to_vec());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn compare_and_set(
|
||||
&self,
|
||||
key: &[u8],
|
||||
expect: &[u8],
|
||||
val: &[u8],
|
||||
) -> Result<Result<(), Option<Vec<u8>>>, Error> {
|
||||
let key = key.to_vec();
|
||||
let val = val.to_vec();
|
||||
|
||||
let mut kvs = self.kvs.write().unwrap();
|
||||
let existed = kvs.entry(key);
|
||||
Ok(match existed {
|
||||
Entry::Vacant(e) => {
|
||||
if expect.is_empty() {
|
||||
let _ = e.insert(val);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(None)
|
||||
}
|
||||
}
|
||||
Entry::Occupied(mut existed) => {
|
||||
if existed.get() == expect {
|
||||
let _ = existed.insert(val);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Some(existed.get().clone()))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), Error> {
|
||||
let mut kvs = self.kvs.write().unwrap();
|
||||
if end.is_empty() {
|
||||
let _ = kvs.remove(key);
|
||||
} else {
|
||||
let start = key.to_vec();
|
||||
let end = end.to_vec();
|
||||
let range = start..end;
|
||||
|
||||
kvs.retain(|k, _| !range.contains(k));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn move_value(&self, from_key: &[u8], to_key: &[u8]) -> Result<(), Error> {
|
||||
let mut kvs = self.kvs.write().unwrap();
|
||||
if let Some(v) = kvs.remove(from_key) {
|
||||
let _ = kvs.insert(to_key.to_vec(), v);
|
||||
}
|
||||
Ok(())
|
||||
impl<T: ErrorExt + Send + Sync + 'static> KvBackend for MemoryKvBackend<T> {
|
||||
fn name(&self) -> &str {
|
||||
"Memory"
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
async fn range(&self, req: RangeRequest) -> Result<RangeResponse, Self::Error> {
|
||||
let RangeRequest {
|
||||
key,
|
||||
range_end,
|
||||
limit,
|
||||
keys_only,
|
||||
} = req;
|
||||
|
||||
let kvs = self.kvs.read().unwrap();
|
||||
|
||||
let iter: Box<dyn Iterator<Item = (&Vec<u8>, &Vec<u8>)>> = if range_end.is_empty() {
|
||||
Box::new(kvs.get_key_value(&key).into_iter())
|
||||
} else {
|
||||
Box::new(kvs.range(key..range_end))
|
||||
};
|
||||
let mut kvs = iter
|
||||
.map(|(k, v)| {
|
||||
let key = k.clone();
|
||||
let value = if keys_only { vec![] } else { v.clone() };
|
||||
KeyValue { key, value }
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let more = if limit > 0 && kvs.len() > limit as usize {
|
||||
kvs.truncate(limit as usize);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
Ok(RangeResponse { kvs, more })
|
||||
}
|
||||
|
||||
async fn put(&self, req: PutRequest) -> Result<PutResponse, Self::Error> {
|
||||
let PutRequest {
|
||||
key,
|
||||
value,
|
||||
prev_kv,
|
||||
} = req;
|
||||
|
||||
let mut kvs = self.kvs.write().unwrap();
|
||||
|
||||
let prev_kv = if prev_kv {
|
||||
kvs.insert(key.clone(), value)
|
||||
.map(|value| KeyValue { key, value })
|
||||
} else {
|
||||
kvs.insert(key, value);
|
||||
None
|
||||
};
|
||||
|
||||
Ok(PutResponse { prev_kv })
|
||||
}
|
||||
|
||||
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse, Self::Error> {
|
||||
let mut kvs = self.kvs.write().unwrap();
|
||||
|
||||
let mut prev_kvs = if req.prev_kv {
|
||||
Vec::with_capacity(req.kvs.len())
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
for kv in req.kvs {
|
||||
if req.prev_kv {
|
||||
if let Some(value) = kvs.insert(kv.key.clone(), kv.value) {
|
||||
prev_kvs.push(KeyValue { key: kv.key, value });
|
||||
}
|
||||
} else {
|
||||
kvs.insert(kv.key, kv.value);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(BatchPutResponse { prev_kvs })
|
||||
}
|
||||
|
||||
async fn batch_get(&self, req: BatchGetRequest) -> Result<BatchGetResponse, Self::Error> {
|
||||
let kvs = self.kvs.read().unwrap();
|
||||
|
||||
let kvs = req
|
||||
.keys
|
||||
.into_iter()
|
||||
.filter_map(|key| {
|
||||
kvs.get_key_value(&key).map(|(k, v)| KeyValue {
|
||||
key: k.clone(),
|
||||
value: v.clone(),
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(BatchGetResponse { kvs })
|
||||
}
|
||||
|
||||
async fn compare_and_put(
|
||||
&self,
|
||||
req: CompareAndPutRequest,
|
||||
) -> Result<CompareAndPutResponse, Self::Error> {
|
||||
let CompareAndPutRequest { key, expect, value } = req;
|
||||
|
||||
let mut kvs = self.kvs.write().unwrap();
|
||||
|
||||
let existed = kvs.entry(key);
|
||||
let (success, prev_kv) = match existed {
|
||||
Entry::Vacant(e) => {
|
||||
let expected = expect.is_empty();
|
||||
if expected {
|
||||
let _ = e.insert(value);
|
||||
}
|
||||
(expected, None)
|
||||
}
|
||||
Entry::Occupied(mut existed) => {
|
||||
let expected = existed.get() == &expect;
|
||||
let prev_kv = if expected {
|
||||
let _ = existed.insert(value);
|
||||
None
|
||||
} else {
|
||||
Some(KeyValue {
|
||||
key: existed.key().clone(),
|
||||
value: existed.get().clone(),
|
||||
})
|
||||
};
|
||||
(expected, prev_kv)
|
||||
}
|
||||
};
|
||||
|
||||
Ok(CompareAndPutResponse { success, prev_kv })
|
||||
}
|
||||
|
||||
async fn delete_range(
|
||||
&self,
|
||||
req: DeleteRangeRequest,
|
||||
) -> Result<DeleteRangeResponse, Self::Error> {
|
||||
let DeleteRangeRequest {
|
||||
key,
|
||||
range_end,
|
||||
prev_kv,
|
||||
} = req;
|
||||
|
||||
let mut kvs = self.kvs.write().unwrap();
|
||||
|
||||
let prev_kvs = if range_end.is_empty() {
|
||||
kvs.remove(&key)
|
||||
.into_iter()
|
||||
.map(|value| KeyValue {
|
||||
key: key.clone(),
|
||||
value,
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
} else {
|
||||
let range = Range {
|
||||
start: key,
|
||||
end: range_end,
|
||||
};
|
||||
kvs.drain_filter(|key, _| range.contains(key))
|
||||
.map(Into::into)
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
Ok(DeleteRangeResponse {
|
||||
deleted: prev_kvs.len() as i64,
|
||||
prev_kvs: if prev_kv { prev_kvs } else { vec![] },
|
||||
})
|
||||
}
|
||||
|
||||
async fn batch_delete(
|
||||
&self,
|
||||
req: BatchDeleteRequest,
|
||||
) -> Result<BatchDeleteResponse, Self::Error> {
|
||||
let mut kvs = self.kvs.write().unwrap();
|
||||
|
||||
let mut prev_kvs = if req.prev_kv {
|
||||
Vec::with_capacity(req.keys.len())
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
for key in req.keys {
|
||||
if req.prev_kv {
|
||||
if let Some(value) = kvs.remove(&key) {
|
||||
prev_kvs.push(KeyValue { key, value });
|
||||
}
|
||||
} else {
|
||||
kvs.remove(&key);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(BatchDeleteResponse { prev_kvs })
|
||||
}
|
||||
|
||||
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse, Self::Error> {
|
||||
let MoveValueRequest { from_key, to_key } = req;
|
||||
|
||||
let mut kvs = self.kvs.write().unwrap();
|
||||
|
||||
let kv = if let Some(v) = kvs.remove(&from_key) {
|
||||
kvs.insert(to_key, v.clone());
|
||||
Some(KeyValue {
|
||||
key: from_key,
|
||||
value: v,
|
||||
})
|
||||
} else {
|
||||
kvs.get(&to_key).map(|v| KeyValue {
|
||||
key: to_key,
|
||||
value: v.clone(),
|
||||
})
|
||||
};
|
||||
|
||||
Ok(MoveValueResponse(kv))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: ErrorExt + Send + Sync> TxnService for MemoryKvBackend<T> {
|
||||
type Error = T;
|
||||
|
||||
async fn txn(&self, txn: Txn) -> Result<TxnResponse, Self::Error> {
|
||||
let _timer = timer!(
|
||||
METRIC_META_TXN_REQUEST,
|
||||
&[("target", "memory"), ("op", "txn")]
|
||||
);
|
||||
|
||||
let TxnRequest {
|
||||
compare,
|
||||
success,
|
||||
failure,
|
||||
} = txn.into();
|
||||
|
||||
let mut kvs = self.kvs.write().unwrap();
|
||||
|
||||
let succeeded = compare
|
||||
.iter()
|
||||
.all(|x| x.compare_with_value(kvs.get(&x.key)));
|
||||
|
||||
let do_txn = |txn_op| match txn_op {
|
||||
TxnOp::Put(key, value) => {
|
||||
let prev_value = kvs.insert(key.clone(), value);
|
||||
let prev_kv = prev_value.map(|value| KeyValue { key, value });
|
||||
TxnOpResponse::ResponsePut(PutResponse { prev_kv })
|
||||
}
|
||||
|
||||
TxnOp::Get(key) => {
|
||||
let value = kvs.get(&key);
|
||||
let kvs = value
|
||||
.into_iter()
|
||||
.map(|value| KeyValue {
|
||||
key: key.clone(),
|
||||
value: value.clone(),
|
||||
})
|
||||
.collect();
|
||||
TxnOpResponse::ResponseGet(RangeResponse { kvs, more: false })
|
||||
}
|
||||
|
||||
TxnOp::Delete(key) => {
|
||||
let prev_value = kvs.remove(&key);
|
||||
let deleted = prev_value.as_ref().map(|x| x.len()).unwrap_or(0) as i64;
|
||||
|
||||
let prev_kvs = prev_value
|
||||
.into_iter()
|
||||
.map(|value| KeyValue {
|
||||
key: key.clone(),
|
||||
value,
|
||||
})
|
||||
.collect();
|
||||
TxnOpResponse::ResponseDelete(DeleteRangeResponse { deleted, prev_kvs })
|
||||
}
|
||||
};
|
||||
|
||||
let responses: Vec<_> = if succeeded { success } else { failure }
|
||||
.into_iter()
|
||||
.map(do_txn)
|
||||
.collect();
|
||||
|
||||
Ok(TxnResponse {
|
||||
succeeded,
|
||||
responses,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use futures::TryStreamExt;
|
||||
use std::sync::atomic::{AtomicU8, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::kv_backend::KvBackend;
|
||||
use crate::rpc::store::{BatchGetRequest, BatchPutRequest};
|
||||
use crate::rpc::KeyValue;
|
||||
use crate::util;
|
||||
|
||||
async fn mock_mem_store_with_data() -> MemoryKvBackend<Error> {
|
||||
let kv_store = MemoryKvBackend::<Error>::new();
|
||||
let kvs = mock_kvs();
|
||||
|
||||
assert!(kv_store
|
||||
.batch_put(BatchPutRequest {
|
||||
kvs,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.is_ok());
|
||||
|
||||
assert!(kv_store
|
||||
.put(PutRequest {
|
||||
key: b"key11".to_vec(),
|
||||
value: b"val11".to_vec(),
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.is_ok());
|
||||
|
||||
kv_store
|
||||
}
|
||||
|
||||
fn mock_kvs() -> Vec<KeyValue> {
|
||||
vec![
|
||||
KeyValue {
|
||||
key: b"key1".to_vec(),
|
||||
value: b"val1".to_vec(),
|
||||
},
|
||||
KeyValue {
|
||||
key: b"key2".to_vec(),
|
||||
value: b"val2".to_vec(),
|
||||
},
|
||||
KeyValue {
|
||||
key: b"key3".to_vec(),
|
||||
value: b"val3".to_vec(),
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_memory_kv_backend() {
|
||||
let backend = MemoryKvBackend::default();
|
||||
async fn test_put() {
|
||||
let kv_store = mock_mem_store_with_data().await;
|
||||
|
||||
for i in 1..10 {
|
||||
let key = format!("key{}", i);
|
||||
let val = format!("val{}", i);
|
||||
assert!(backend.set(key.as_bytes(), val.as_bytes()).await.is_ok());
|
||||
let resp = kv_store
|
||||
.put(PutRequest {
|
||||
key: b"key11".to_vec(),
|
||||
value: b"val12".to_vec(),
|
||||
prev_kv: false,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(resp.prev_kv.is_none());
|
||||
|
||||
let resp = kv_store
|
||||
.put(PutRequest {
|
||||
key: b"key11".to_vec(),
|
||||
value: b"val13".to_vec(),
|
||||
prev_kv: true,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let prev_kv = resp.prev_kv.unwrap();
|
||||
assert_eq!(b"key11", prev_kv.key());
|
||||
assert_eq!(b"val12", prev_kv.value());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_range() {
|
||||
let kv_store = mock_mem_store_with_data().await;
|
||||
|
||||
let key = b"key1".to_vec();
|
||||
let range_end = util::get_prefix_end_key(b"key1");
|
||||
|
||||
let resp = kv_store
|
||||
.range(RangeRequest {
|
||||
key: key.clone(),
|
||||
range_end: range_end.clone(),
|
||||
limit: 0,
|
||||
keys_only: false,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(2, resp.kvs.len());
|
||||
assert_eq!(b"key1", resp.kvs[0].key());
|
||||
assert_eq!(b"val1", resp.kvs[0].value());
|
||||
assert_eq!(b"key11", resp.kvs[1].key());
|
||||
assert_eq!(b"val11", resp.kvs[1].value());
|
||||
|
||||
let resp = kv_store
|
||||
.range(RangeRequest {
|
||||
key: key.clone(),
|
||||
range_end: range_end.clone(),
|
||||
limit: 0,
|
||||
keys_only: true,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(2, resp.kvs.len());
|
||||
assert_eq!(b"key1", resp.kvs[0].key());
|
||||
assert_eq!(b"", resp.kvs[0].value());
|
||||
assert_eq!(b"key11", resp.kvs[1].key());
|
||||
assert_eq!(b"", resp.kvs[1].value());
|
||||
|
||||
let resp = kv_store
|
||||
.range(RangeRequest {
|
||||
key: key.clone(),
|
||||
limit: 0,
|
||||
keys_only: false,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(1, resp.kvs.len());
|
||||
assert_eq!(b"key1", resp.kvs[0].key());
|
||||
assert_eq!(b"val1", resp.kvs[0].value());
|
||||
|
||||
let resp = kv_store
|
||||
.range(RangeRequest {
|
||||
key,
|
||||
range_end,
|
||||
limit: 1,
|
||||
keys_only: false,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(1, resp.kvs.len());
|
||||
assert_eq!(b"key1", resp.kvs[0].key());
|
||||
assert_eq!(b"val1", resp.kvs[0].value());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_batch_get() {
|
||||
let kv_store = mock_mem_store_with_data().await;
|
||||
|
||||
let keys = vec![];
|
||||
let resp = kv_store.batch_get(BatchGetRequest { keys }).await.unwrap();
|
||||
|
||||
assert!(resp.kvs.is_empty());
|
||||
|
||||
let keys = vec![b"key10".to_vec()];
|
||||
let resp = kv_store.batch_get(BatchGetRequest { keys }).await.unwrap();
|
||||
|
||||
assert!(resp.kvs.is_empty());
|
||||
|
||||
let keys = vec![b"key1".to_vec(), b"key3".to_vec(), b"key4".to_vec()];
|
||||
let resp = kv_store.batch_get(BatchGetRequest { keys }).await.unwrap();
|
||||
|
||||
assert_eq!(2, resp.kvs.len());
|
||||
assert_eq!(b"key1", resp.kvs[0].key());
|
||||
assert_eq!(b"val1", resp.kvs[0].value());
|
||||
assert_eq!(b"key3", resp.kvs[1].key());
|
||||
assert_eq!(b"val3", resp.kvs[1].value());
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_compare_and_put() {
|
||||
let kv_store = Arc::new(MemoryKvBackend::<Error>::new());
|
||||
let success = Arc::new(AtomicU8::new(0));
|
||||
|
||||
let mut joins = vec![];
|
||||
for _ in 0..20 {
|
||||
let kv_store_clone = kv_store.clone();
|
||||
let success_clone = success.clone();
|
||||
let join = tokio::spawn(async move {
|
||||
let req = CompareAndPutRequest {
|
||||
key: b"key".to_vec(),
|
||||
expect: vec![],
|
||||
value: b"val_new".to_vec(),
|
||||
};
|
||||
let resp = kv_store_clone.compare_and_put(req).await.unwrap();
|
||||
if resp.success {
|
||||
success_clone.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
});
|
||||
joins.push(join);
|
||||
}
|
||||
|
||||
let result = backend
|
||||
.compare_and_set(b"hello", b"what", b"world")
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(result.unwrap_err().is_none());
|
||||
for join in joins {
|
||||
join.await.unwrap();
|
||||
}
|
||||
|
||||
let result = backend
|
||||
.compare_and_set(b"hello", b"", b"world")
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(1, success.load(Ordering::SeqCst));
|
||||
}
|
||||
|
||||
let result = backend
|
||||
.compare_and_set(b"hello", b"world", b"greptime")
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
#[tokio::test]
|
||||
async fn test_delete_range() {
|
||||
let kv_store = mock_mem_store_with_data().await;
|
||||
|
||||
let result = backend
|
||||
.compare_and_set(b"hello", b"world", b"what")
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(result.unwrap_err().unwrap(), b"greptime");
|
||||
let req = DeleteRangeRequest {
|
||||
key: b"key3".to_vec(),
|
||||
range_end: vec![],
|
||||
prev_kv: true,
|
||||
};
|
||||
|
||||
assert!(backend.delete_range(b"key1", &[]).await.is_ok());
|
||||
assert!(backend.delete_range(b"key3", b"key9").await.is_ok());
|
||||
let resp = kv_store.delete_range(req).await.unwrap();
|
||||
assert_eq!(1, resp.prev_kvs.len());
|
||||
assert_eq!(b"key3", resp.prev_kvs[0].key());
|
||||
assert_eq!(b"val3", resp.prev_kvs[0].value());
|
||||
|
||||
assert!(backend.move_value(b"key9", b"key10").await.is_ok());
|
||||
let resp = kv_store.get(b"key3").await.unwrap();
|
||||
assert!(resp.is_none());
|
||||
|
||||
let req = DeleteRangeRequest {
|
||||
key: b"key2".to_vec(),
|
||||
range_end: vec![],
|
||||
prev_kv: false,
|
||||
};
|
||||
|
||||
let resp = kv_store.delete_range(req).await.unwrap();
|
||||
assert!(resp.prev_kvs.is_empty());
|
||||
|
||||
let resp = kv_store.get(b"key2").await.unwrap();
|
||||
assert!(resp.is_none());
|
||||
|
||||
let key = b"key1".to_vec();
|
||||
let range_end = util::get_prefix_end_key(b"key1");
|
||||
|
||||
let req = DeleteRangeRequest {
|
||||
key: key.clone(),
|
||||
range_end: range_end.clone(),
|
||||
prev_kv: true,
|
||||
};
|
||||
let resp = kv_store.delete_range(req).await.unwrap();
|
||||
assert_eq!(2, resp.prev_kvs.len());
|
||||
|
||||
let req = RangeRequest {
|
||||
key,
|
||||
range_end,
|
||||
..Default::default()
|
||||
};
|
||||
let resp = kv_store.range(req).await.unwrap();
|
||||
assert!(resp.kvs.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_move_value() {
|
||||
let kv_store = mock_mem_store_with_data().await;
|
||||
|
||||
let req = MoveValueRequest {
|
||||
from_key: b"key1".to_vec(),
|
||||
to_key: b"key111".to_vec(),
|
||||
};
|
||||
|
||||
let resp = kv_store.move_value(req).await.unwrap();
|
||||
assert_eq!(b"key1", resp.0.as_ref().unwrap().key());
|
||||
assert_eq!(b"val1", resp.0.as_ref().unwrap().value());
|
||||
|
||||
let kv_store = mock_mem_store_with_data().await;
|
||||
|
||||
let req = MoveValueRequest {
|
||||
from_key: b"notexistkey".to_vec(),
|
||||
to_key: b"key222".to_vec(),
|
||||
};
|
||||
|
||||
let resp = kv_store.move_value(req).await.unwrap();
|
||||
assert!(resp.0.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_batch_delete() {
|
||||
let kv_store = mock_mem_store_with_data().await;
|
||||
|
||||
assert!(kv_store.get(b"key1").await.unwrap().is_some());
|
||||
assert!(kv_store.get(b"key100").await.unwrap().is_none());
|
||||
|
||||
let req = BatchDeleteRequest {
|
||||
keys: vec![b"key1".to_vec(), b"key100".to_vec()],
|
||||
prev_kv: true,
|
||||
};
|
||||
let resp = kv_store.batch_delete(req).await.unwrap();
|
||||
assert_eq!(1, resp.prev_kvs.len());
|
||||
assert_eq!(
|
||||
backend.to_string(),
|
||||
r#"hello -> greptime
|
||||
key10 -> val9
|
||||
key2 -> val2
|
||||
"#
|
||||
vec![KeyValue {
|
||||
key: b"key1".to_vec(),
|
||||
value: b"val1".to_vec()
|
||||
}],
|
||||
resp.prev_kvs
|
||||
);
|
||||
assert!(kv_store.get(b"key1").await.unwrap().is_none());
|
||||
|
||||
let range = backend.range(b"key").try_collect::<Vec<_>>().await.unwrap();
|
||||
assert_eq!(range.len(), 2);
|
||||
assert_eq!(range[0], Kv(b"key10".to_vec(), b"val9".to_vec()));
|
||||
assert_eq!(range[1], Kv(b"key2".to_vec(), b"val2".to_vec()));
|
||||
assert!(kv_store.get(b"key2").await.unwrap().is_some());
|
||||
assert!(kv_store.get(b"key3").await.unwrap().is_some());
|
||||
|
||||
let req = BatchDeleteRequest {
|
||||
keys: vec![b"key2".to_vec(), b"key3".to_vec()],
|
||||
prev_kv: false,
|
||||
};
|
||||
let resp = kv_store.batch_delete(req).await.unwrap();
|
||||
assert!(resp.prev_kvs.is_empty());
|
||||
|
||||
assert!(kv_store.get(b"key2").await.unwrap().is_none());
|
||||
assert!(kv_store.get(b"key3").await.unwrap().is_none());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,15 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::meta::{DeleteRangeResponse, PutResponse, RangeResponse};
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
mod etcd;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
|
||||
use crate::rpc::store::{DeleteRangeResponse, PutResponse, RangeResponse};
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait TxnService: Sync + Send {
|
||||
async fn txn(&self, _txn: Txn) -> Result<TxnResponse> {
|
||||
type Error: ErrorExt;
|
||||
|
||||
async fn txn(&self, _txn: Txn) -> Result<TxnResponse, Self::Error> {
|
||||
unimplemented!("txn is not implemented")
|
||||
}
|
||||
}
|
||||
@@ -169,11 +171,14 @@ impl From<Txn> for TxnRequest {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use api::v1::meta::{KeyValue, PutRequest};
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::*;
|
||||
use crate::service::store::ext::KvStoreExt;
|
||||
use crate::service::store::kv::KvStoreRef;
|
||||
use crate::error::Error;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::PutRequest;
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
#[test]
|
||||
fn test_compare() {
|
||||
@@ -301,7 +306,7 @@ mod tests {
|
||||
async fn test_txn_compare_equal() {
|
||||
let kv_store = create_kv_store().await;
|
||||
let key = vec![101u8];
|
||||
let _ = kv_store.delete(key.clone(), false).await.unwrap();
|
||||
kv_store.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_not_exist_value(
|
||||
@@ -332,7 +337,7 @@ mod tests {
|
||||
async fn test_txn_compare_greater() {
|
||||
let kv_store = create_kv_store().await;
|
||||
let key = vec![102u8];
|
||||
let _ = kv_store.delete(key.clone(), false).await.unwrap();
|
||||
kv_store.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_not_exist_value(
|
||||
@@ -361,10 +366,9 @@ mod tests {
|
||||
assert_eq!(
|
||||
res,
|
||||
TxnOpResponse::ResponseGet(RangeResponse {
|
||||
header: None,
|
||||
kvs: vec![KeyValue {
|
||||
key,
|
||||
value: vec![1],
|
||||
value: vec![1]
|
||||
}],
|
||||
more: false,
|
||||
})
|
||||
@@ -375,7 +379,7 @@ mod tests {
|
||||
async fn test_txn_compare_less() {
|
||||
let kv_store = create_kv_store().await;
|
||||
let key = vec![103u8];
|
||||
let _ = kv_store.delete(vec![3], false).await.unwrap();
|
||||
kv_store.delete(&[3], false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_not_exist_value(
|
||||
@@ -404,10 +408,9 @@ mod tests {
|
||||
assert_eq!(
|
||||
res,
|
||||
TxnOpResponse::ResponseGet(RangeResponse {
|
||||
header: None,
|
||||
kvs: vec![KeyValue {
|
||||
key,
|
||||
value: vec![2],
|
||||
value: vec![2]
|
||||
}],
|
||||
more: false,
|
||||
})
|
||||
@@ -418,7 +421,7 @@ mod tests {
|
||||
async fn test_txn_compare_not_equal() {
|
||||
let kv_store = create_kv_store().await;
|
||||
let key = vec![104u8];
|
||||
let _ = kv_store.delete(key.clone(), false).await.unwrap();
|
||||
kv_store.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_not_exist_value(
|
||||
@@ -447,18 +450,17 @@ mod tests {
|
||||
assert_eq!(
|
||||
res,
|
||||
TxnOpResponse::ResponseGet(RangeResponse {
|
||||
header: None,
|
||||
kvs: vec![KeyValue {
|
||||
key,
|
||||
value: vec![1],
|
||||
value: vec![1]
|
||||
}],
|
||||
more: false,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
async fn create_kv_store() -> KvStoreRef {
|
||||
std::sync::Arc::new(crate::service::store::memory::MemStore::new())
|
||||
async fn create_kv_store() -> KvBackendRef {
|
||||
Arc::new(MemoryKvBackend::<Error>::new())
|
||||
// TODO(jiachun): Add a feature to test against etcd in github CI
|
||||
//
|
||||
// The same test can be run against etcd by uncommenting the following line
|
||||
@@ -12,15 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::meta::{DeleteRangeResponse, PutResponse, RangeResponse};
|
||||
use etcd_client::{
|
||||
Compare as EtcdCompare, CompareOp as EtcdCompareOp, Txn as EtcdTxn, TxnOp as EtcdTxnOp,
|
||||
TxnOpResponse as EtcdTxnOpResponse, TxnResponse as EtcdTxnResponse,
|
||||
};
|
||||
|
||||
use super::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse, TxnResponse};
|
||||
use crate::error::{self, Result};
|
||||
use crate::service::store::etcd_util::KvPair;
|
||||
use crate::service::store::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse, TxnResponse};
|
||||
use crate::rpc::store::{DeleteRangeResponse, PutResponse, RangeResponse};
|
||||
|
||||
impl From<Txn> for EtcdTxn {
|
||||
fn from(txn: Txn) -> Self {
|
||||
@@ -88,31 +87,25 @@ impl TryFrom<EtcdTxnOpResponse> for TxnOpResponse {
|
||||
fn try_from(op_resp: EtcdTxnOpResponse) -> Result<Self> {
|
||||
match op_resp {
|
||||
EtcdTxnOpResponse::Put(res) => {
|
||||
let prev_kv = res.prev_key().map(KvPair::from_etcd_kv);
|
||||
let put_res = PutResponse {
|
||||
prev_kv,
|
||||
..Default::default()
|
||||
};
|
||||
let prev_kv = res.prev_key().cloned().map(Into::into);
|
||||
let put_res = PutResponse { prev_kv };
|
||||
Ok(TxnOpResponse::ResponsePut(put_res))
|
||||
}
|
||||
EtcdTxnOpResponse::Get(res) => {
|
||||
let kvs = res.kvs().iter().map(KvPair::from_etcd_kv).collect();
|
||||
let range_res = RangeResponse {
|
||||
kvs,
|
||||
..Default::default()
|
||||
};
|
||||
let kvs = res.kvs().iter().cloned().map(Into::into).collect();
|
||||
let range_res = RangeResponse { kvs, more: false };
|
||||
Ok(TxnOpResponse::ResponseGet(range_res))
|
||||
}
|
||||
EtcdTxnOpResponse::Delete(res) => {
|
||||
let prev_kvs = res
|
||||
.prev_kvs()
|
||||
.iter()
|
||||
.map(KvPair::from_etcd_kv)
|
||||
.cloned()
|
||||
.map(Into::into)
|
||||
.collect::<Vec<_>>();
|
||||
let delete_res = DeleteRangeResponse {
|
||||
prev_kvs,
|
||||
deleted: res.deleted(),
|
||||
..Default::default()
|
||||
};
|
||||
Ok(TxnOpResponse::ResponseDelete(delete_res))
|
||||
}
|
||||
@@ -12,15 +12,20 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(btree_drain_filter)]
|
||||
|
||||
pub mod error;
|
||||
pub mod heartbeat;
|
||||
pub mod helper;
|
||||
pub mod ident;
|
||||
pub mod instruction;
|
||||
pub mod key;
|
||||
pub mod kv_backend;
|
||||
pub mod metrics;
|
||||
pub mod peer;
|
||||
pub mod rpc;
|
||||
pub mod table_name;
|
||||
pub mod util;
|
||||
|
||||
pub type ClusterId = u64;
|
||||
pub type DatanodeId = u64;
|
||||
|
||||
15
src/common/meta/src/metrics.rs
Normal file
15
src/common/meta/src/metrics.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub const METRIC_META_TXN_REQUEST: &str = "meta.txn_request";
|
||||
@@ -18,17 +18,14 @@ pub mod router;
|
||||
pub mod store;
|
||||
pub mod util;
|
||||
|
||||
use std::fmt::{Display, Formatter};
|
||||
|
||||
use api::v1::meta::{KeyValue as PbKeyValue, ResponseHeader as PbResponseHeader};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ResponseHeader(PbResponseHeader);
|
||||
|
||||
impl ResponseHeader {
|
||||
#[inline]
|
||||
pub(crate) fn new(header: PbResponseHeader) -> Self {
|
||||
Self(header)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn protocol_version(&self) -> u64 {
|
||||
self.0.protocol_version
|
||||
@@ -56,33 +53,83 @@ impl ResponseHeader {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct KeyValue(PbKeyValue);
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct KeyValue {
|
||||
pub key: Vec<u8>,
|
||||
pub value: Vec<u8>,
|
||||
}
|
||||
|
||||
impl From<KeyValue> for PbKeyValue {
|
||||
fn from(kv: KeyValue) -> Self {
|
||||
Self {
|
||||
key: kv.key,
|
||||
value: kv.value,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<etcd_client::KeyValue> for KeyValue {
|
||||
fn from(kv: etcd_client::KeyValue) -> Self {
|
||||
Self {
|
||||
key: kv.key().to_vec(),
|
||||
value: kv.value().to_vec(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<KeyValue> for (Vec<u8>, Vec<u8>) {
|
||||
fn from(kv: KeyValue) -> Self {
|
||||
(kv.key, kv.value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<(Vec<u8>, Vec<u8>)> for KeyValue {
|
||||
fn from(kv: (Vec<u8>, Vec<u8>)) -> Self {
|
||||
Self {
|
||||
key: kv.0,
|
||||
value: kv.1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for KeyValue {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"({}, {})",
|
||||
String::from_utf8_lossy(&self.key),
|
||||
String::from_utf8_lossy(&self.value)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl KeyValue {
|
||||
#[inline]
|
||||
pub(crate) fn new(kv: PbKeyValue) -> Self {
|
||||
Self(kv)
|
||||
pub fn new(kv: PbKeyValue) -> Self {
|
||||
Self {
|
||||
key: kv.key,
|
||||
value: kv.value,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn key(&self) -> &[u8] {
|
||||
&self.0.key
|
||||
&self.key
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn take_key(&mut self) -> Vec<u8> {
|
||||
std::mem::take(&mut self.0.key)
|
||||
std::mem::take(&mut self.key)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn value(&self) -> &[u8] {
|
||||
&self.0.value
|
||||
&self.value
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn take_value(&mut self) -> Vec<u8> {
|
||||
std::mem::take(&mut self.0.value)
|
||||
std::mem::take(&mut self.value)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -103,7 +150,7 @@ mod tests {
|
||||
}),
|
||||
};
|
||||
|
||||
let header = ResponseHeader::new(pb_header);
|
||||
let header = ResponseHeader(pb_header);
|
||||
assert_eq!(101, header.protocol_version());
|
||||
assert_eq!(1, header.cluster_id());
|
||||
assert_eq!(100, header.error_code());
|
||||
|
||||
@@ -16,11 +16,11 @@ use std::result;
|
||||
|
||||
use api::v1::meta::submit_ddl_task_request::Task;
|
||||
use api::v1::meta::{
|
||||
CreateTableTask as PbCreateTableTask, Partition,
|
||||
SubmitDdlTaskRequest as PbSubmitDdlTaskRequest,
|
||||
SubmitDdlTaskResponse as PbSubmitDdlTaskResponse,
|
||||
AlterTableTask as PbAlterTableTask, CreateTableTask as PbCreateTableTask,
|
||||
DropTableTask as PbDropTableTask, Partition, SubmitDdlTaskRequest as PbSubmitDdlTaskRequest,
|
||||
SubmitDdlTaskResponse as PbSubmitDdlTaskResponse, TruncateTableTask as PbTruncateTableTask,
|
||||
};
|
||||
use api::v1::CreateTableExpr;
|
||||
use api::v1::{AlterExpr, CreateTableExpr, DropTableExpr, TruncateTableExpr};
|
||||
use prost::Message;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -33,6 +33,9 @@ use crate::table_name::TableName;
|
||||
#[derive(Debug)]
|
||||
pub enum DdlTask {
|
||||
CreateTable(CreateTableTask),
|
||||
DropTable(DropTableTask),
|
||||
AlterTable(AlterTableTask),
|
||||
TruncateTable(TruncateTableTask),
|
||||
}
|
||||
|
||||
impl DdlTask {
|
||||
@@ -43,6 +46,28 @@ impl DdlTask {
|
||||
) -> Self {
|
||||
DdlTask::CreateTable(CreateTableTask::new(expr, partitions, table_info))
|
||||
}
|
||||
|
||||
pub fn new_drop_table(
|
||||
catalog: String,
|
||||
schema: String,
|
||||
table: String,
|
||||
table_id: TableId,
|
||||
) -> Self {
|
||||
DdlTask::DropTable(DropTableTask {
|
||||
catalog,
|
||||
schema,
|
||||
table,
|
||||
table_id,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new_alter_table(alter_table: AlterExpr) -> Self {
|
||||
DdlTask::AlterTable(AlterTableTask { alter_table })
|
||||
}
|
||||
|
||||
pub fn new_truncate_table(truncate_table: TruncateTableExpr) -> Self {
|
||||
DdlTask::TruncateTable(TruncateTableTask { truncate_table })
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<Task> for DdlTask {
|
||||
@@ -52,6 +77,11 @@ impl TryFrom<Task> for DdlTask {
|
||||
Task::CreateTableTask(create_table) => {
|
||||
Ok(DdlTask::CreateTable(create_table.try_into()?))
|
||||
}
|
||||
Task::DropTableTask(drop_table) => Ok(DdlTask::DropTable(drop_table.try_into()?)),
|
||||
Task::AlterTableTask(alter_table) => Ok(DdlTask::AlterTable(alter_table.try_into()?)),
|
||||
Task::TruncateTableTask(truncate_table) => {
|
||||
Ok(DdlTask::TruncateTable(truncate_table.try_into()?))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -70,7 +100,22 @@ impl TryFrom<SubmitDdlTaskRequest> for PbSubmitDdlTaskRequest {
|
||||
create_table: Some(task.create_table),
|
||||
partitions: task.partitions,
|
||||
}),
|
||||
DdlTask::DropTable(task) => Task::DropTableTask(PbDropTableTask {
|
||||
drop_table: Some(DropTableExpr {
|
||||
catalog_name: task.catalog,
|
||||
schema_name: task.schema,
|
||||
table_name: task.table,
|
||||
table_id: Some(api::v1::TableId { id: task.table_id }),
|
||||
}),
|
||||
}),
|
||||
DdlTask::AlterTable(task) => Task::AlterTableTask(PbAlterTableTask {
|
||||
alter_table: Some(task.alter_table),
|
||||
}),
|
||||
DdlTask::TruncateTable(task) => Task::TruncateTableTask(PbTruncateTableTask {
|
||||
truncate_table: Some(task.truncate_table),
|
||||
}),
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
header: None,
|
||||
task: Some(task),
|
||||
@@ -80,19 +125,65 @@ impl TryFrom<SubmitDdlTaskRequest> for PbSubmitDdlTaskRequest {
|
||||
|
||||
pub struct SubmitDdlTaskResponse {
|
||||
pub key: Vec<u8>,
|
||||
pub table_id: TableId,
|
||||
pub table_id: Option<TableId>,
|
||||
}
|
||||
|
||||
impl TryFrom<PbSubmitDdlTaskResponse> for SubmitDdlTaskResponse {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(resp: PbSubmitDdlTaskResponse) -> Result<Self> {
|
||||
let table_id = resp.table_id.context(error::InvalidProtoMsgSnafu {
|
||||
err_msg: "expected table_id",
|
||||
})?;
|
||||
let table_id = resp.table_id.map(|t| t.id);
|
||||
Ok(Self {
|
||||
key: resp.key,
|
||||
table_id: table_id.id,
|
||||
table_id,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct DropTableTask {
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
pub table: String,
|
||||
pub table_id: TableId,
|
||||
}
|
||||
|
||||
impl DropTableTask {
|
||||
pub fn table_ref(&self) -> TableReference {
|
||||
TableReference {
|
||||
catalog: &self.catalog,
|
||||
schema: &self.schema,
|
||||
table: &self.table,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn table_name(&self) -> TableName {
|
||||
TableName {
|
||||
catalog_name: self.catalog.to_string(),
|
||||
schema_name: self.schema.to_string(),
|
||||
table_name: self.table.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PbDropTableTask> for DropTableTask {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(pb: PbDropTableTask) -> Result<Self> {
|
||||
let drop_table = pb.drop_table.context(error::InvalidProtoMsgSnafu {
|
||||
err_msg: "expected drop table",
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
catalog: drop_table.catalog_name,
|
||||
schema: drop_table.schema_name,
|
||||
table: drop_table.table_name,
|
||||
table_id: drop_table
|
||||
.table_id
|
||||
.context(error::InvalidProtoMsgSnafu {
|
||||
err_msg: "expected table_id",
|
||||
})?
|
||||
.id,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -188,6 +279,138 @@ impl<'de> Deserialize<'de> for CreateTableTask {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct AlterTableTask {
|
||||
pub alter_table: AlterExpr,
|
||||
}
|
||||
|
||||
impl AlterTableTask {
|
||||
pub fn table_ref(&self) -> TableReference {
|
||||
TableReference {
|
||||
catalog: &self.alter_table.catalog_name,
|
||||
schema: &self.alter_table.schema_name,
|
||||
table: &self.alter_table.table_name,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn table_name(&self) -> TableName {
|
||||
let table = &self.alter_table;
|
||||
|
||||
TableName {
|
||||
catalog_name: table.catalog_name.to_string(),
|
||||
schema_name: table.schema_name.to_string(),
|
||||
table_name: table.table_name.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PbAlterTableTask> for AlterTableTask {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(pb: PbAlterTableTask) -> Result<Self> {
|
||||
let alter_table = pb.alter_table.context(error::InvalidProtoMsgSnafu {
|
||||
err_msg: "expected alter_table",
|
||||
})?;
|
||||
|
||||
Ok(AlterTableTask { alter_table })
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for AlterTableTask {
|
||||
fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
let pb = PbAlterTableTask {
|
||||
alter_table: Some(self.alter_table.clone()),
|
||||
};
|
||||
let buf = pb.encode_to_vec();
|
||||
serializer.serialize_bytes(&buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for AlterTableTask {
|
||||
fn deserialize<D>(deserializer: D) -> result::Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let buf = Vec::<u8>::deserialize(deserializer)?;
|
||||
let expr: PbAlterTableTask = PbAlterTableTask::decode(&*buf)
|
||||
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
|
||||
|
||||
let expr = AlterTableTask::try_from(expr)
|
||||
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
|
||||
|
||||
Ok(expr)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct TruncateTableTask {
|
||||
pub truncate_table: TruncateTableExpr,
|
||||
}
|
||||
|
||||
impl TruncateTableTask {
|
||||
pub fn table_ref(&self) -> TableReference {
|
||||
TableReference {
|
||||
catalog: &self.truncate_table.catalog_name,
|
||||
schema: &self.truncate_table.schema_name,
|
||||
table: &self.truncate_table.table_name,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn table_name(&self) -> TableName {
|
||||
let table = &self.truncate_table;
|
||||
|
||||
TableName {
|
||||
catalog_name: table.catalog_name.to_string(),
|
||||
schema_name: table.schema_name.to_string(),
|
||||
table_name: table.table_name.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PbTruncateTableTask> for TruncateTableTask {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(pb: PbTruncateTableTask) -> Result<Self> {
|
||||
let truncate_table = pb.truncate_table.context(error::InvalidProtoMsgSnafu {
|
||||
err_msg: "expected truncate_table",
|
||||
})?;
|
||||
|
||||
Ok(TruncateTableTask { truncate_table })
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for TruncateTableTask {
|
||||
fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
let pb = PbTruncateTableTask {
|
||||
truncate_table: Some(self.truncate_table.clone()),
|
||||
};
|
||||
let buf = pb.encode_to_vec();
|
||||
serializer.serialize_bytes(&buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for TruncateTableTask {
|
||||
fn deserialize<D>(deserializer: D) -> result::Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let buf = Vec::<u8>::deserialize(deserializer)?;
|
||||
let task: PbTruncateTableTask = PbTruncateTableTask::decode(&*buf)
|
||||
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
|
||||
|
||||
let task = TruncateTableTask::try_from(task)
|
||||
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
|
||||
|
||||
Ok(task)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user