Compare commits

...

50 Commits

Author SHA1 Message Date
Discord9
24f5e56196 PY_ENV_MAN for env manager choice 2023-12-07 10:42:26 +08:00
Discord9
c85d569797 feat: refactor parse_args&rm USE_ENV 2023-12-07 10:35:40 +08:00
Discord9
e95a8e070c feat: more opts for cli 2023-12-06 17:58:49 +08:00
Discord9
b71bf11772 feat: add choices for venv/conda 2023-12-06 17:39:12 +08:00
Discord9
ee0a3972fc feat: remove check script 2023-12-06 16:40:58 +08:00
Discord9
8fb40c66a4 feat: opt path 2023-12-06 16:29:00 +08:00
Discord9
e855f6370e chore: add newline in end 2023-12-06 16:29:00 +08:00
Discord9
fb5dcbc40c chore: remove python script for that 2023-12-06 16:29:00 +08:00
Discord9
0d109436b8 feat:use python script instead 2023-12-06 16:29:00 +08:00
Discord9
cbae03af07 feat: check aft build in Makefile 2023-12-06 16:29:00 +08:00
Discord9
902e6ead60 feat: shell check&install needed python shared lib 2023-12-06 16:29:00 +08:00
dennis zhuang
f9e7762c5b fix: add new column as primary key can't work (#2876) 2023-12-05 11:07:53 +00:00
Zhenchi
0b421b5177 feat(inverted_index.search): add index applier (#2868)
* feat(inverted_index.search): add fst applier

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: typos

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat(inverted_index.search): add fst values mapper

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: remove meta check

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: fmt & clippy

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor: one expect for test

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat(inverted_index.search): add index applier

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor: bitmap_full -> bitmap_full_range

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat: add check for segment_row_count

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: remove redundant code

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: reader test

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: match error in test

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: fmt

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor: add helper function to construct fst value

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor: polish unit tests

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor: bytemuck to extract offset and size

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: toml format

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor: use bytemuck

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor: reorg value in unit tests

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: update proto

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: add a TODO reminder to consider optimizing the order of apply

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor: InList predicates are applied first to benefit from higher selectivity

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: update proto

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat: add read options to control the behavior of index not found

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor: polish

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor: move read options to implementation instead of trait

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat: add SearchContext, refine doc comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat: move index_not_found_strategy as a field of SearchContext

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: rename varient

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-12-05 08:24:24 +00:00
WU Jingdi
aa89d9deef fix: replace opendal PrometheusLayer (#2861)
* fix: replace opendal `PrometheusLayer`

* chore: add docs on `PrometheusMetricsLayer`

* chore: fix code advice

* chore: fix bug on `PrometheusMetricsLayer`
2023-12-05 07:15:45 +00:00
Weny Xu
b3ffe5cd1e feat: handle the downgrade region instruction (#2855)
* feat: handle the downgrade region instruction

* test: add tests for RegionHeartbeatResponseHandler

* refactor: remove unused code
2023-12-05 03:30:55 +00:00
Ruihang Xia
d6ef7a75de fix: type conversion rule reverses operands (#2871)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-12-05 03:25:29 +00:00
LFC
6344b1e0db fix: fragile integration tests (#2870) 2023-12-05 02:35:23 +00:00
tison
7d506b3c5f feat: drop if exists (#2859)
* feat: drop if exists

Signed-off-by: tison <wander4096@gmail.com>

* sqlness cases

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2023-12-05 02:18:33 +00:00
Zhenchi
96e12e9ee5 fix: correct the previously unsuccessful decimal_ops sort result fix (#2869)
Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-12-04 15:29:02 +00:00
Zhenchi
a9db80ab1a feat(inverted_index.search): add fst values mapper (#2862)
* feat(inverted_index.search): add fst applier

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: typos

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat(inverted_index.search): add fst values mapper

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: remove meta check

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: fmt & clippy

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor: one expect for test

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: match error in test

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: fmt

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor: add helper function to construct fst value

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor: bytemuck to extract offset and size

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: toml format

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-12-04 13:29:02 +00:00
Ruihang Xia
5f5dbe0172 fix: sort result of sqlness case decimal_ops (#2867)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-12-04 12:34:27 +00:00
Wei
dac7a41cbd feat: sqlness for decimal128 (#2822) 2023-12-04 11:22:38 +00:00
Ruihang Xia
de416465a6 feat: support time() and related functions in PromQL (#2854)
* enhance empty_metric

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* implementation

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix lhs & rhs

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo, update sqlness

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove deadcode

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add cast to bool modifier

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-12-04 11:21:54 +00:00
Zhenchi
58c13739f0 feat(inverted_index.search): add fst applier (#2851)
* feat(inverted_index.search): add fst applier

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: typos

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-12-04 09:21:09 +00:00
WU Jingdi
806400caff feat: add align to / interval support in range query (#2842)
* feat: add align to / interval support in range query

* chore: fix ci

* chore: simplify `parse_duration_expr`

* chore: change s to ms
2023-12-04 08:00:41 +00:00
Weny Xu
f78dab078c chore: correct closeable typos (#2860) 2023-12-04 06:25:48 +00:00
Weny Xu
7a14db68a6 feat: add upgrade candidate region step (#2829)
* feat: add upgrade candidate region step

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2023-12-04 05:09:27 +00:00
Weny Xu
c26f2f94c0 chore: add logs and metrics (#2858)
* chore: add logs and metrics

* feat: add the timer to track heartbeat intervel

* feat: add the gauge to track region leases

* refactor: use gauge instead of the timer

* chore: apply suggestions from CR

* feat: add hit rate and etcd txn metrics
2023-12-04 02:51:30 +00:00
Weny Xu
781f2422b3 feat: add update metadata step for rollbacking downgraded region (#2812)
* feat: add update metadata step for rollbacking downgraded region

* feat: invalidate table cache after updating metadata

* feat: add migration abort step
2023-12-01 11:36:05 +00:00
Yingwen
7e68ecc498 feat: do not concat batches in MergeReader (#2833) 2023-12-01 06:52:43 +00:00
LFC
9ce9421850 refactor: add builder for Frontend (#2849) 2023-12-01 04:39:47 +00:00
zyy17
c0df2b9086 ci: set 'omitBody' true when releasing (#2845)
ci: set 'omitBody'
2023-11-30 10:53:07 +00:00
Yiran
29d344ccd2 docs: update getting-started document link (#2843) 2023-11-30 10:03:09 +00:00
Wei
fe2fc723bc refactor: DataType name function (#2836)
* refactor: DataType name function

* chore: test case
2023-11-30 03:49:09 +00:00
Wei
2332305b90 refactor: replace usage of ArrayData by clone (#2827)
* refactor: use array clone()

* refactor: slice

* chore: clippy
2023-11-30 03:27:29 +00:00
Ruihang Xia
9ccd182109 feat: implement PromQL set op AND/UNLESS (#2839)
* initial impl

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* disable OR for now

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-11-30 03:17:57 +00:00
Weny Xu
ae8153515b feat: add update metadata step for upgrading candidate region (#2811) 2023-11-29 11:10:38 +00:00
Weny Xu
cce5edc88e feat: add downgrade leader region step (#2792)
* feat: add downgrade leader region step

* chore: apply suggestions from CR

* chore: rename exist to exists

* chore: apply suggestions from CR
2023-11-29 09:17:28 +00:00
Weny Xu
616eb04914 chore: bump version to 0.4.4 (#2840)
chore: bump to v0.4.4
2023-11-29 08:59:42 +00:00
ZonaHe
7c53f92e4b feat: update dashboard to v0.4.1 (#2841)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2023-11-29 08:50:25 +00:00
Ruihang Xia
445bd92c7a feat: add arg to standalone start command (#2837)
* feat: add  arg to standalone start command

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add this arg to metasrv

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove short arg name

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-11-29 07:44:43 +00:00
Ruihang Xia
92a9802343 feat: canonicalize all unquoted identifier to lowercase (#2828)
* feat: canonicalize all unquoted identifier to lowercase

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add more tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* test altering table

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* primary key declare

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix primary key declare

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* partition by and time index

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove redundent call to canonicalize

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-11-29 06:40:10 +00:00
Yingwen
abbac46c05 fix: do not expose manifest compression algorithm (#2835)
* fix: don't allow to set manifest compression algorithm

* docs: update config examples
2023-11-29 05:49:40 +00:00
ZonaHe
d0d0f091f0 feat: update dashboard to v0.4.0 (#2832)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2023-11-28 10:38:37 +00:00
fys
707a0d5626 fix: urldecode when influxdb auth (#2831)
* fix: add url decode when influxdb auth

* chore: fmt toml
2023-11-28 09:35:03 +00:00
Weny Xu
e42767d500 fix: fix name verifying (#2825) 2023-11-28 02:47:03 +00:00
Weny Xu
ca18ccf7d4 fix: fix broken CI (#2826) 2023-11-27 14:49:39 +00:00
hygkui
b1d8812806 docs: Update README.md Add JS Client link (#2821)
* Update README.md Add JS Client link

Add JS Client link

* chore: apply suggestion

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-11-27 14:23:54 +00:00
Weny Xu
7547e7ebdf fix: fix procedure loaders not found issue (#2824) 2023-11-27 10:50:28 +00:00
Yingwen
6100cb335a fix(mito): do not check nullability of fields in delete requests (#2815)
* test: test for delete rows from table with non null columns

* test: test delete and reopen

* fix: allow deleting rows with non null column
2023-11-27 09:54:50 +00:00
195 changed files with 10758 additions and 2155 deletions

View File

@@ -31,10 +31,12 @@ runs:
echo "prerelease=false" >> $GITHUB_ENV echo "prerelease=false" >> $GITHUB_ENV
echo "makeLatest=true" >> $GITHUB_ENV echo "makeLatest=true" >> $GITHUB_ENV
echo "generateReleaseNotes=false" >> $GITHUB_ENV echo "generateReleaseNotes=false" >> $GITHUB_ENV
echo "omitBody=true" >> $GITHUB_ENV
else else
echo "prerelease=true" >> $GITHUB_ENV echo "prerelease=true" >> $GITHUB_ENV
echo "makeLatest=false" >> $GITHUB_ENV echo "makeLatest=false" >> $GITHUB_ENV
echo "generateReleaseNotes=true" >> $GITHUB_ENV echo "generateReleaseNotes=true" >> $GITHUB_ENV
echo "omitBody=false" >> $GITHUB_ENV
fi fi
- name: Publish release - name: Publish release
@@ -45,6 +47,7 @@ runs:
makeLatest: ${{ env.makeLatest }} makeLatest: ${{ env.makeLatest }}
tag: ${{ inputs.version }} tag: ${{ inputs.version }}
generateReleaseNotes: ${{ env.generateReleaseNotes }} generateReleaseNotes: ${{ env.generateReleaseNotes }}
omitBody: ${{ env.omitBody }} # omitBody is true when the release is a official release.
allowUpdates: true allowUpdates: true
artifacts: | artifacts: |
**/greptime-*/* **/greptime-*/*

257
Cargo.lock generated
View File

@@ -196,7 +196,7 @@ checksum = "8f1f8f5a6f3d50d89e3797d7593a50f96bb2aaa20ca0cc7be1fb673232c91d72"
[[package]] [[package]]
name = "api" name = "api"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"common-base", "common-base",
"common-decimal", "common-decimal",
@@ -655,7 +655,7 @@ dependencies = [
[[package]] [[package]]
name = "auth" name = "auth"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"api", "api",
"async-trait", "async-trait",
@@ -828,7 +828,7 @@ dependencies = [
[[package]] [[package]]
name = "benchmarks" name = "benchmarks"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"arrow", "arrow",
"chrono", "chrono",
@@ -1160,7 +1160,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]] [[package]]
name = "catalog" name = "catalog"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"api", "api",
"arc-swap", "arc-swap",
@@ -1429,7 +1429,7 @@ checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1"
[[package]] [[package]]
name = "client" name = "client"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"api", "api",
"arrow-flight", "arrow-flight",
@@ -1462,7 +1462,7 @@ dependencies = [
"session", "session",
"snafu", "snafu",
"substrait 0.17.1", "substrait 0.17.1",
"substrait 0.4.3", "substrait 0.4.4",
"tokio", "tokio",
"tokio-stream", "tokio-stream",
"tonic 0.10.2", "tonic 0.10.2",
@@ -1492,7 +1492,7 @@ dependencies = [
[[package]] [[package]]
name = "cmd" name = "cmd"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"anymap", "anymap",
"async-trait", "async-trait",
@@ -1540,7 +1540,7 @@ dependencies = [
"servers", "servers",
"session", "session",
"snafu", "snafu",
"substrait 0.4.3", "substrait 0.4.4",
"table", "table",
"temp-env", "temp-env",
"tikv-jemallocator", "tikv-jemallocator",
@@ -1573,7 +1573,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]] [[package]]
name = "common-base" name = "common-base"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"anymap", "anymap",
"bitvec", "bitvec",
@@ -1588,7 +1588,7 @@ dependencies = [
[[package]] [[package]]
name = "common-catalog" name = "common-catalog"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"chrono", "chrono",
"common-error", "common-error",
@@ -1599,7 +1599,7 @@ dependencies = [
[[package]] [[package]]
name = "common-config" name = "common-config"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"common-base", "common-base",
"humantime-serde", "humantime-serde",
@@ -1608,7 +1608,7 @@ dependencies = [
[[package]] [[package]]
name = "common-datasource" name = "common-datasource"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"arrow", "arrow",
"arrow-schema", "arrow-schema",
@@ -1639,7 +1639,7 @@ dependencies = [
[[package]] [[package]]
name = "common-decimal" name = "common-decimal"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"arrow", "arrow",
"bigdecimal", "bigdecimal",
@@ -1653,7 +1653,7 @@ dependencies = [
[[package]] [[package]]
name = "common-error" name = "common-error"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"snafu", "snafu",
"strum 0.25.0", "strum 0.25.0",
@@ -1661,7 +1661,7 @@ dependencies = [
[[package]] [[package]]
name = "common-function" name = "common-function"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"chrono-tz 0.6.3", "chrono-tz 0.6.3",
@@ -1684,7 +1684,7 @@ dependencies = [
[[package]] [[package]]
name = "common-greptimedb-telemetry" name = "common-greptimedb-telemetry"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"common-error", "common-error",
@@ -1703,7 +1703,7 @@ dependencies = [
[[package]] [[package]]
name = "common-grpc" name = "common-grpc"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"api", "api",
"arrow-flight", "arrow-flight",
@@ -1733,7 +1733,7 @@ dependencies = [
[[package]] [[package]]
name = "common-grpc-expr" name = "common-grpc-expr"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"api", "api",
"async-trait", "async-trait",
@@ -1752,7 +1752,7 @@ dependencies = [
[[package]] [[package]]
name = "common-macro" name = "common-macro"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"common-query", "common-query",
@@ -1767,7 +1767,7 @@ dependencies = [
[[package]] [[package]]
name = "common-mem-prof" name = "common-mem-prof"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"common-error", "common-error",
"common-macro", "common-macro",
@@ -1780,7 +1780,7 @@ dependencies = [
[[package]] [[package]]
name = "common-meta" name = "common-meta"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"api", "api",
"async-recursion", "async-recursion",
@@ -1819,7 +1819,7 @@ dependencies = [
[[package]] [[package]]
name = "common-procedure" name = "common-procedure"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"async-stream", "async-stream",
"async-trait", "async-trait",
@@ -1843,7 +1843,7 @@ dependencies = [
[[package]] [[package]]
name = "common-procedure-test" name = "common-procedure-test"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"common-procedure", "common-procedure",
@@ -1851,7 +1851,7 @@ dependencies = [
[[package]] [[package]]
name = "common-query" name = "common-query"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"api", "api",
"async-trait", "async-trait",
@@ -1866,7 +1866,7 @@ dependencies = [
"datatypes", "datatypes",
"serde", "serde",
"snafu", "snafu",
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd)", "sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=6a93567ae38d42be5c8d08b13c8ff4dde26502ef)",
"sqlparser_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "sqlparser_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"statrs", "statrs",
"tokio", "tokio",
@@ -1874,7 +1874,7 @@ dependencies = [
[[package]] [[package]]
name = "common-recordbatch" name = "common-recordbatch"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"common-error", "common-error",
"common-macro", "common-macro",
@@ -1891,7 +1891,7 @@ dependencies = [
[[package]] [[package]]
name = "common-runtime" name = "common-runtime"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"common-error", "common-error",
@@ -1909,7 +1909,7 @@ dependencies = [
[[package]] [[package]]
name = "common-telemetry" name = "common-telemetry"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"backtrace", "backtrace",
"common-error", "common-error",
@@ -1934,7 +1934,7 @@ dependencies = [
[[package]] [[package]]
name = "common-test-util" name = "common-test-util"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"once_cell", "once_cell",
"rand", "rand",
@@ -1943,7 +1943,7 @@ dependencies = [
[[package]] [[package]]
name = "common-time" name = "common-time"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"arrow", "arrow",
"chrono", "chrono",
@@ -1958,7 +1958,7 @@ dependencies = [
[[package]] [[package]]
name = "common-version" name = "common-version"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"build-data", "build-data",
] ]
@@ -2581,7 +2581,7 @@ dependencies = [
[[package]] [[package]]
name = "datanode" name = "datanode"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"api", "api",
"arrow-flight", "arrow-flight",
@@ -2640,7 +2640,7 @@ dependencies = [
"snafu", "snafu",
"sql", "sql",
"store-api", "store-api",
"substrait 0.4.3", "substrait 0.4.4",
"table", "table",
"tokio", "tokio",
"tokio-stream", "tokio-stream",
@@ -2654,7 +2654,7 @@ dependencies = [
[[package]] [[package]]
name = "datatypes" name = "datatypes"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"arrow", "arrow",
"arrow-array", "arrow-array",
@@ -2797,6 +2797,12 @@ version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8"
[[package]]
name = "difflib"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8"
[[package]] [[package]]
name = "digest" name = "digest"
version = "0.10.7" version = "0.10.7"
@@ -2895,6 +2901,12 @@ version = "0.15.7"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b"
[[package]]
name = "downcast"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1"
[[package]] [[package]]
name = "dyn-clone" name = "dyn-clone"
version = "1.0.16" version = "1.0.16"
@@ -3092,7 +3104,7 @@ dependencies = [
[[package]] [[package]]
name = "file-engine" name = "file-engine"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"api", "api",
"async-trait", "async-trait",
@@ -3182,6 +3194,15 @@ dependencies = [
"miniz_oxide", "miniz_oxide",
] ]
[[package]]
name = "float-cmp"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4"
dependencies = [
"num-traits",
]
[[package]] [[package]]
name = "fnv" name = "fnv"
version = "1.0.7" version = "1.0.7"
@@ -3190,9 +3211,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]] [[package]]
name = "form_urlencoded" name = "form_urlencoded"
version = "1.2.0" version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456"
dependencies = [ dependencies = [
"percent-encoding", "percent-encoding",
] ]
@@ -3206,9 +3227,15 @@ dependencies = [
"regex", "regex",
] ]
[[package]]
name = "fragile"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
[[package]] [[package]]
name = "frontend" name = "frontend"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"api", "api",
"arc-swap", "arc-swap",
@@ -3269,10 +3296,10 @@ dependencies = [
"session", "session",
"snafu", "snafu",
"sql", "sql",
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd)", "sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=6a93567ae38d42be5c8d08b13c8ff4dde26502ef)",
"store-api", "store-api",
"strfmt", "strfmt",
"substrait 0.4.3", "substrait 0.4.4",
"table", "table",
"tokio", "tokio",
"toml 0.7.8", "toml 0.7.8",
@@ -3543,7 +3570,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]] [[package]]
name = "greptime-proto" name = "greptime-proto"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=2b3ae45740a49ec6a0830d71fc09c3093aeb5fe7#2b3ae45740a49ec6a0830d71fc09c3093aeb5fe7" source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=b1d403088f02136bcebde53d604f491c260ca8e2#b1d403088f02136bcebde53d604f491c260ca8e2"
dependencies = [ dependencies = [
"prost 0.12.2", "prost 0.12.2",
"serde", "serde",
@@ -3875,9 +3902,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]] [[package]]
name = "idna" name = "idna"
version = "0.4.0" version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6"
dependencies = [ dependencies = [
"unicode-bidi", "unicode-bidi",
"unicode-normalization", "unicode-normalization",
@@ -3910,16 +3937,20 @@ dependencies = [
[[package]] [[package]]
name = "index" name = "index"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"bytemuck",
"common-base", "common-base",
"common-error", "common-error",
"common-macro", "common-macro",
"fst", "fst",
"futures", "futures",
"greptime-proto", "greptime-proto",
"mockall",
"prost 0.12.2", "prost 0.12.2",
"regex",
"regex-automata 0.1.10",
"snafu", "snafu",
"tokio", "tokio",
"tokio-util", "tokio-util",
@@ -4365,7 +4396,7 @@ checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
[[package]] [[package]]
name = "log-store" name = "log-store"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"async-stream", "async-stream",
"async-trait", "async-trait",
@@ -4635,7 +4666,7 @@ dependencies = [
[[package]] [[package]]
name = "meta-client" name = "meta-client"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"api", "api",
"async-trait", "async-trait",
@@ -4665,7 +4696,7 @@ dependencies = [
[[package]] [[package]]
name = "meta-srv" name = "meta-srv"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"anymap", "anymap",
"api", "api",
@@ -4743,7 +4774,7 @@ dependencies = [
[[package]] [[package]]
name = "metric-engine" name = "metric-engine"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"ahash 0.8.6", "ahash 0.8.6",
"api", "api",
@@ -4814,7 +4845,7 @@ dependencies = [
[[package]] [[package]]
name = "mito2" name = "mito2"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"anymap", "anymap",
"api", "api",
@@ -4871,6 +4902,33 @@ dependencies = [
"uuid", "uuid",
] ]
[[package]]
name = "mockall"
version = "0.11.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96"
dependencies = [
"cfg-if 1.0.0",
"downcast",
"fragile",
"lazy_static",
"mockall_derive",
"predicates",
"predicates-tree",
]
[[package]]
name = "mockall_derive"
version = "0.11.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb"
dependencies = [
"cfg-if 1.0.0",
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]] [[package]]
name = "moka" name = "moka"
version = "0.12.1" version = "0.12.1"
@@ -5099,6 +5157,12 @@ dependencies = [
"minimal-lexical", "minimal-lexical",
] ]
[[package]]
name = "normalize-line-endings"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be"
[[package]] [[package]]
name = "nu-ansi-term" name = "nu-ansi-term"
version = "0.46.0" version = "0.46.0"
@@ -5272,7 +5336,7 @@ dependencies = [
[[package]] [[package]]
name = "object-store" name = "object-store"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-trait", "async-trait",
@@ -5349,7 +5413,6 @@ dependencies = [
"parking_lot 0.12.1", "parking_lot 0.12.1",
"percent-encoding", "percent-encoding",
"pin-project", "pin-project",
"prometheus",
"quick-xml 0.29.0", "quick-xml 0.29.0",
"reqsign", "reqsign",
"reqwest", "reqwest",
@@ -5519,7 +5582,7 @@ dependencies = [
[[package]] [[package]]
name = "operator" name = "operator"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"api", "api",
"async-compat", "async-compat",
@@ -5562,9 +5625,9 @@ dependencies = [
"session", "session",
"snafu", "snafu",
"sql", "sql",
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd)", "sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=6a93567ae38d42be5c8d08b13c8ff4dde26502ef)",
"store-api", "store-api",
"substrait 0.4.3", "substrait 0.4.4",
"table", "table",
"tokio", "tokio",
"tonic 0.10.2", "tonic 0.10.2",
@@ -5784,7 +5847,7 @@ dependencies = [
[[package]] [[package]]
name = "partition" name = "partition"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"api", "api",
"async-trait", "async-trait",
@@ -5876,9 +5939,9 @@ dependencies = [
[[package]] [[package]]
name = "percent-encoding" name = "percent-encoding"
version = "2.3.0" version = "2.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
[[package]] [[package]]
name = "pest" name = "pest"
@@ -6122,7 +6185,7 @@ dependencies = [
[[package]] [[package]]
name = "plugins" name = "plugins"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"auth", "auth",
"common-base", "common-base",
@@ -6223,6 +6286,36 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c"
[[package]]
name = "predicates"
version = "2.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd"
dependencies = [
"difflib",
"float-cmp",
"itertools 0.10.5",
"normalize-line-endings",
"predicates-core",
"regex",
]
[[package]]
name = "predicates-core"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b794032607612e7abeb4db69adb4e33590fa6cf1149e95fd7cb00e634b92f174"
[[package]]
name = "predicates-tree"
version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "368ba315fb8c5052ab692e68a0eefec6ec57b23a36959c14496f0b0df2c0cecf"
dependencies = [
"predicates-core",
"termtree",
]
[[package]] [[package]]
name = "prettydiff" name = "prettydiff"
version = "0.6.4" version = "0.6.4"
@@ -6349,7 +6442,7 @@ dependencies = [
[[package]] [[package]]
name = "promql" name = "promql"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"async-recursion", "async-recursion",
"async-trait", "async-trait",
@@ -6558,7 +6651,7 @@ dependencies = [
[[package]] [[package]]
name = "puffin" name = "puffin"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"bitflags 2.4.1", "bitflags 2.4.1",
@@ -6669,7 +6762,7 @@ dependencies = [
[[package]] [[package]]
name = "query" name = "query"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"ahash 0.8.6", "ahash 0.8.6",
"api", "api",
@@ -6727,7 +6820,7 @@ dependencies = [
"stats-cli", "stats-cli",
"store-api", "store-api",
"streaming-stats", "streaming-stats",
"substrait 0.4.3", "substrait 0.4.4",
"table", "table",
"tokio", "tokio",
"tokio-stream", "tokio-stream",
@@ -6953,6 +7046,7 @@ version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
dependencies = [ dependencies = [
"fst",
"regex-syntax 0.6.29", "regex-syntax 0.6.29",
] ]
@@ -7928,7 +8022,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]] [[package]]
name = "script" name = "script"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"api", "api",
"arc-swap", "arc-swap",
@@ -8188,7 +8282,7 @@ dependencies = [
[[package]] [[package]]
name = "servers" name = "servers"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"aide", "aide",
"api", "api",
@@ -8277,11 +8371,12 @@ dependencies = [
"tonic-reflection", "tonic-reflection",
"tower", "tower",
"tower-http", "tower-http",
"urlencoding",
] ]
[[package]] [[package]]
name = "session" name = "session"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"api", "api",
"arc-swap", "arc-swap",
@@ -8542,7 +8637,7 @@ dependencies = [
[[package]] [[package]]
name = "sql" name = "sql"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"api", "api",
"common-base", "common-base",
@@ -8561,7 +8656,7 @@ dependencies = [
"once_cell", "once_cell",
"regex", "regex",
"snafu", "snafu",
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd)", "sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=6a93567ae38d42be5c8d08b13c8ff4dde26502ef)",
"sqlparser_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "sqlparser_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"table", "table",
] ]
@@ -8594,7 +8689,7 @@ dependencies = [
[[package]] [[package]]
name = "sqlness-runner" name = "sqlness-runner"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"clap 4.4.8", "clap 4.4.8",
@@ -8624,13 +8719,13 @@ dependencies = [
[[package]] [[package]]
name = "sqlparser" name = "sqlparser"
version = "0.38.0" version = "0.38.0"
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd#0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd" source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=6a93567ae38d42be5c8d08b13c8ff4dde26502ef#6a93567ae38d42be5c8d08b13c8ff4dde26502ef"
dependencies = [ dependencies = [
"lazy_static", "lazy_static",
"log", "log",
"regex", "regex",
"sqlparser 0.38.0 (registry+https://github.com/rust-lang/crates.io-index)", "sqlparser 0.38.0 (registry+https://github.com/rust-lang/crates.io-index)",
"sqlparser_derive 0.1.1 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd)", "sqlparser_derive 0.1.1 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=6a93567ae38d42be5c8d08b13c8ff4dde26502ef)",
] ]
[[package]] [[package]]
@@ -8647,7 +8742,7 @@ dependencies = [
[[package]] [[package]]
name = "sqlparser_derive" name = "sqlparser_derive"
version = "0.1.1" version = "0.1.1"
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd#0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd" source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=6a93567ae38d42be5c8d08b13c8ff4dde26502ef#6a93567ae38d42be5c8d08b13c8ff4dde26502ef"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@@ -8800,7 +8895,7 @@ dependencies = [
[[package]] [[package]]
name = "store-api" name = "store-api"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"api", "api",
"aquamarine", "aquamarine",
@@ -8939,7 +9034,7 @@ dependencies = [
[[package]] [[package]]
name = "substrait" name = "substrait"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"async-recursion", "async-recursion",
"async-trait", "async-trait",
@@ -9087,7 +9182,7 @@ dependencies = [
[[package]] [[package]]
name = "table" name = "table"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"anymap", "anymap",
"async-trait", "async-trait",
@@ -9191,9 +9286,15 @@ dependencies = [
"libc", "libc",
] ]
[[package]]
name = "termtree"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
[[package]] [[package]]
name = "tests-integration" name = "tests-integration"
version = "0.4.3" version = "0.4.4"
dependencies = [ dependencies = [
"api", "api",
"async-trait", "async-trait",
@@ -9249,7 +9350,7 @@ dependencies = [
"sql", "sql",
"sqlx", "sqlx",
"store-api", "store-api",
"substrait 0.4.3", "substrait 0.4.4",
"table", "table",
"tempfile", "tempfile",
"time", "time",
@@ -10248,9 +10349,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
[[package]] [[package]]
name = "url" name = "url"
version = "2.4.1" version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633"
dependencies = [ dependencies = [
"form_urlencoded", "form_urlencoded",
"idna", "idna",

View File

@@ -58,7 +58,7 @@ members = [
resolver = "2" resolver = "2"
[workspace.package] [workspace.package]
version = "0.4.3" version = "0.4.4"
edition = "2021" edition = "2021"
license = "Apache-2.0" license = "Apache-2.0"
@@ -74,6 +74,7 @@ async-trait = "0.1"
base64 = "0.21" base64 = "0.21"
bigdecimal = "0.4.2" bigdecimal = "0.4.2"
bitflags = "2.4.1" bitflags = "2.4.1"
bytemuck = "1.12"
chrono = { version = "0.4", features = ["serde"] } chrono = { version = "0.4", features = ["serde"] }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" } datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" } datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
@@ -87,11 +88,12 @@ etcd-client = "0.12"
fst = "0.4.7" fst = "0.4.7"
futures = "0.3" futures = "0.3"
futures-util = "0.3" futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "2b3ae45740a49ec6a0830d71fc09c3093aeb5fe7" } greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "b1d403088f02136bcebde53d604f491c260ca8e2" }
humantime-serde = "1.1" humantime-serde = "1.1"
itertools = "0.10" itertools = "0.10"
lazy_static = "1.4" lazy_static = "1.4"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" } meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
mockall = "0.11.4"
moka = "0.12" moka = "0.12"
once_cell = "1.18" once_cell = "1.18"
opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [ opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
@@ -107,6 +109,7 @@ prost = "0.12"
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" } raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
rand = "0.8" rand = "0.8"
regex = "1.8" regex = "1.8"
regex-automata = { version = "0.1", features = ["transducer"] }
reqwest = { version = "0.11", default-features = false, features = [ reqwest = { version = "0.11", default-features = false, features = [
"json", "json",
"rustls-tls-native-roots", "rustls-tls-native-roots",
@@ -118,7 +121,7 @@ serde_json = "1.0"
smallvec = "1" smallvec = "1"
snafu = "0.7" snafu = "0.7"
# on branch v0.38.x # on branch v0.38.x
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd", features = [ sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "6a93567ae38d42be5c8d08b13c8ff4dde26502ef", features = [
"visitor", "visitor",
] } ] }
strum = { version = "0.25", features = ["derive"] } strum = { version = "0.25", features = ["derive"] }

View File

@@ -100,7 +100,7 @@ Please see the online document site for more installation options and [operation
### Get started ### Get started
Read the [complete getting started guide](https://docs.greptime.com/getting-started/try-out-greptimedb) on our [official document site](https://docs.greptime.com/). Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview) on our [official document site](https://docs.greptime.com/).
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview). To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
@@ -135,6 +135,7 @@ To write and query data, GreptimeDB is compatible with multiple [protocols and c
- [GreptimeDB Java Client](https://github.com/GreptimeTeam/greptimedb-client-java) - [GreptimeDB Java Client](https://github.com/GreptimeTeam/greptimedb-client-java)
- [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP) - [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
- [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust) - [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
- [GreptimeDB JavaScript Client](https://github.com/GreptimeTeam/greptime-js-sdk)
## Project Status ## Project Status

View File

@@ -64,8 +64,8 @@ worker_channel_size = 128
worker_request_batch_size = 64 worker_request_batch_size = 64
# Number of meta action updated to trigger a new checkpoint for the manifest # Number of meta action updated to trigger a new checkpoint for the manifest
manifest_checkpoint_distance = 10 manifest_checkpoint_distance = 10
# Manifest compression type # Whether to compress manifest and checkpoint file by gzip (default false).
manifest_compress_type = "uncompressed" compress_manifest = false
# Max number of running background jobs # Max number of running background jobs
max_background_jobs = 4 max_background_jobs = 4
# Interval to auto flush a region if it has not flushed yet. # Interval to auto flush a region if it has not flushed yet.

View File

@@ -133,8 +133,8 @@ worker_channel_size = 128
worker_request_batch_size = 64 worker_request_batch_size = 64
# Number of meta action updated to trigger a new checkpoint for the manifest # Number of meta action updated to trigger a new checkpoint for the manifest
manifest_checkpoint_distance = 10 manifest_checkpoint_distance = 10
# Manifest compression type # Whether to compress manifest and checkpoint file by gzip (default false).
manifest_compress_type = "uncompressed" compress_manifest = false
# Max number of running background jobs # Max number of running background jobs
max_background_jobs = 4 max_background_jobs = 4
# Interval to auto flush a region if it has not flushed yet. # Interval to auto flush a region if it has not flushed yet.

157
scripts/run-pyo3-greptime.sh Executable file
View File

@@ -0,0 +1,157 @@
#!/bin/bash
# This script configures the environment to run 'greptime' with the required Python version
# This script should be compatible both in Linux and macOS
OS_TYPE="$(uname)"
readonly OS_TYPE
check_command_existence() {
command -v "$1" &> /dev/null
}
get_python_version() {
case "$OS_TYPE" in
Darwin)
otool -L $GREPTIME_BIN_PATH | grep -o 'Python.framework/Versions/3.[0-9]\+/Python' | grep -o '3.[0-9]\+'
;;
Linux)
ldd $GREPTIME_BIN_PATH | grep -o 'libpython3\.[0-9]\+' | grep -o '3\.[0-9]\+'
;;
*)
echo "Unsupported OS type: $OS_TYPE"
exit 1
;;
esac
}
setup_virtualenv() {
local req_py_version="$1"
local env_name="GreptimeTmpVenv$req_py_version"
virtualenv --python=python"$req_py_version" "$env_name"
source "$env_name/bin/activate"
}
setup_conda_env() {
local req_py_version="$1"
local conda_base
conda_base=$(conda info --base) || { echo "Error obtaining conda base directory"; exit 1; }
. "$conda_base/etc/profile.d/conda.sh"
if ! conda list --name "GreptimeTmpPyO3Env$req_py_version" &> /dev/null; then
conda create --yes --name "GreptimeTmpPyO3Env$req_py_version" python="$req_py_version"
fi
conda activate "GreptimeTmpPyO3Env$req_py_version"
}
GREPTIME_BIN_PATH="./greptime"
YES="false"
usage() {
echo "Usage:"
echo " $0 -f <greptime-bin-path> [-y] <args-pass-to-greptime>"
echo "Set $PY_ENV_MAN to 1 to use virtualenv, 2 to use conda"
exit 1
}
function parse_args() {
while getopts ":f:y" opt; do
case $opt in
f)
GREPTIME_BIN_PATH=$OPTARG
;;
y)
YES="yes"
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
shift $((OPTIND -1))
REST_ARGS=$*
if [ -z "$GREPTIME_BIN_PATH" ]; then
usage
fi
echo "Run greptime binary at '$GREPTIME_BIN_PATH' (yes=$YES)..."
echo "The args pass to greptime: '$REST_ARGS'"
}
# Set library path and pass all arguments to greptime to run it
execute_greptime() {
if [[ "$OS_TYPE" == "Darwin" ]]; then
DYLD_LIBRARY_PATH="${CONDA_PREFIX:-$PREFIX}/lib:${LD_LIBRARY_PATH:-}" $GREPTIME_BIN_PATH $@
elif [[ "$OS_TYPE" == "Linux" ]]; then
LD_LIBRARY_PATH="${CONDA_PREFIX:-$PREFIX}/lib:${LD_LIBRARY_PATH:-}" $GREPTIME_BIN_PATH $@
fi
}
main() {
parse_args $@
local req_py_version
req_py_version=$(get_python_version)
readonly req_py_version
if [[ -z "$req_py_version" ]]; then
if $GREPTIME_BIN_PATH --version &> /dev/null; then
$GREPTIME_BIN_PATH $REST_ARGS
else
echo "The 'greptime' binary is not valid or encountered an error."
$GREPTIME_BIN_PATH --version
exit 1
fi
return
fi
echo "The required version of Python shared library is $req_py_version"
# if YES exist, assign it to yn, else read from stdin
if [[ -z "$YES" ]]; then
echo "Now this script will try to install or find correct Python Version"
echo "Do you want to continue? (yes/no): "
read -r yn
else
yn="$YES"
fi
case $yn in
[Yy]* ) ;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
# if USE_ENV exist, assign it to option
# else read from stdin
if [[ -z "$PY_ENV_MAN" ]]; then
echo "Do you want to use virtualenv or conda? (virtualenv(1)/conda(2)): "
read -r option
else
option="$PY_ENV_MAN"
fi
case $option in
1)
setup_virtualenv "$req_py_version"
;;
2)
setup_conda_env "$req_py_version"
;;
*)
echo "Please input 1 or 2"; exit 1
;;
esac
execute_greptime $REST_ARGS
}
main "$@"

View File

@@ -202,7 +202,7 @@ impl InformationSchemaColumnsBuilder {
&schema_name, &schema_name,
&table_name, &table_name,
&column.name, &column.name,
column.data_type.name(), &column.data_type.name(),
semantic_type, semantic_type,
); );
} }

View File

@@ -19,7 +19,6 @@ use std::sync::{Arc, Weak};
use common_catalog::consts::{DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID}; use common_catalog::consts::{DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID};
use common_error::ext::BoxedError; use common_error::ext::BoxedError;
use common_meta::cache_invalidator::{CacheInvalidator, CacheInvalidatorRef, Context}; use common_meta::cache_invalidator::{CacheInvalidator, CacheInvalidatorRef, Context};
use common_meta::datanode_manager::DatanodeManagerRef;
use common_meta::error::Result as MetaResult; use common_meta::error::Result as MetaResult;
use common_meta::key::catalog_name::CatalogNameKey; use common_meta::key::catalog_name::CatalogNameKey;
use common_meta::key::schema_name::SchemaNameKey; use common_meta::key::schema_name::SchemaNameKey;
@@ -55,7 +54,6 @@ pub struct KvBackendCatalogManager {
cache_invalidator: CacheInvalidatorRef, cache_invalidator: CacheInvalidatorRef,
partition_manager: PartitionRuleManagerRef, partition_manager: PartitionRuleManagerRef,
table_metadata_manager: TableMetadataManagerRef, table_metadata_manager: TableMetadataManagerRef,
datanode_manager: DatanodeManagerRef,
/// A sub-CatalogManager that handles system tables /// A sub-CatalogManager that handles system tables
system_catalog: SystemCatalog, system_catalog: SystemCatalog,
} }
@@ -76,16 +74,11 @@ impl CacheInvalidator for KvBackendCatalogManager {
} }
impl KvBackendCatalogManager { impl KvBackendCatalogManager {
pub fn new( pub fn new(backend: KvBackendRef, cache_invalidator: CacheInvalidatorRef) -> Arc<Self> {
backend: KvBackendRef,
cache_invalidator: CacheInvalidatorRef,
datanode_manager: DatanodeManagerRef,
) -> Arc<Self> {
Arc::new_cyclic(|me| Self { Arc::new_cyclic(|me| Self {
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())), partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)), table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
cache_invalidator, cache_invalidator,
datanode_manager,
system_catalog: SystemCatalog { system_catalog: SystemCatalog {
catalog_manager: me.clone(), catalog_manager: me.clone(),
}, },
@@ -99,10 +92,6 @@ impl KvBackendCatalogManager {
pub fn table_metadata_manager_ref(&self) -> &TableMetadataManagerRef { pub fn table_metadata_manager_ref(&self) -> &TableMetadataManagerRef {
&self.table_metadata_manager &self.table_metadata_manager
} }
pub fn datanode_manager(&self) -> DatanodeManagerRef {
self.datanode_manager.clone()
}
} }
#[async_trait::async_trait] #[async_trait::async_trait]

View File

@@ -17,7 +17,6 @@ use std::sync::Arc;
use std::time::Instant; use std::time::Instant;
use catalog::kvbackend::{CachedMetaKvBackend, KvBackendCatalogManager}; use catalog::kvbackend::{CachedMetaKvBackend, KvBackendCatalogManager};
use client::client_manager::DatanodeClients;
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_base::Plugins; use common_base::Plugins;
use common_error::ext::ErrorExt; use common_error::ext::ErrorExt;
@@ -250,13 +249,8 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
let cached_meta_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone())); let cached_meta_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
let datanode_clients = Arc::new(DatanodeClients::default()); let catalog_list =
KvBackendCatalogManager::new(cached_meta_backend.clone(), cached_meta_backend);
let catalog_list = KvBackendCatalogManager::new(
cached_meta_backend.clone(),
cached_meta_backend.clone(),
datanode_clients,
);
let plugins: Plugins = Default::default(); let plugins: Plugins = Default::default();
let state = Arc::new(QueryEngineState::new( let state = Arc::new(QueryEngineState::new(
catalog_list, catalog_list,

View File

@@ -37,6 +37,12 @@ pub enum Error {
source: common_meta::error::Error, source: common_meta::error::Error,
}, },
#[snafu(display("Failed to init DDL manager"))]
InitDdlManager {
location: Location,
source: common_meta::error::Error,
},
#[snafu(display("Failed to start procedure manager"))] #[snafu(display("Failed to start procedure manager"))]
StartProcedureManager { StartProcedureManager {
location: Location, location: Location,
@@ -225,13 +231,6 @@ pub enum Error {
#[snafu(source)] #[snafu(source)]
error: std::io::Error, error: std::io::Error,
}, },
#[snafu(display("Failed to parse address {}", addr))]
ParseAddr {
addr: String,
#[snafu(source)]
error: std::net::AddrParseError,
},
} }
pub type Result<T> = std::result::Result<T, Error>; pub type Result<T> = std::result::Result<T, Error>;
@@ -247,9 +246,11 @@ impl ErrorExt for Error {
Error::ShutdownMetaServer { source, .. } => source.status_code(), Error::ShutdownMetaServer { source, .. } => source.status_code(),
Error::BuildMetaServer { source, .. } => source.status_code(), Error::BuildMetaServer { source, .. } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(), Error::UnsupportedSelectorType { source, .. } => source.status_code(),
Error::IterStream { source, .. } | Error::InitMetadata { source, .. } => {
source.status_code() Error::IterStream { source, .. }
} | Error::InitMetadata { source, .. }
| Error::InitDdlManager { source, .. } => source.status_code(),
Error::ConnectServer { source, .. } => source.status_code(), Error::ConnectServer { source, .. } => source.status_code(),
Error::MissingConfig { .. } Error::MissingConfig { .. }
| Error::LoadLayeredConfig { .. } | Error::LoadLayeredConfig { .. }
@@ -259,8 +260,7 @@ impl ErrorExt for Error {
| Error::NotDataFromOutput { .. } | Error::NotDataFromOutput { .. }
| Error::CreateDir { .. } | Error::CreateDir { .. }
| Error::EmptyResult { .. } | Error::EmptyResult { .. }
| Error::InvalidDatabaseName { .. } | Error::InvalidDatabaseName { .. } => StatusCode::InvalidArguments,
| Error::ParseAddr { .. } => StatusCode::InvalidArguments,
Error::StartProcedureManager { source, .. } Error::StartProcedureManager { source, .. }
| Error::StopProcedureManager { source, .. } => source.status_code(), | Error::StopProcedureManager { source, .. } => source.status_code(),

View File

@@ -12,18 +12,26 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use catalog::kvbackend::CachedMetaKvBackend;
use clap::Parser; use clap::Parser;
use client::client_manager::DatanodeClients;
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
use common_meta::heartbeat::handler::HandlerGroupExecutor;
use common_telemetry::logging; use common_telemetry::logging;
use frontend::frontend::FrontendOptions; use frontend::frontend::FrontendOptions;
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
use frontend::heartbeat::HeartbeatTask;
use frontend::instance::builder::FrontendBuilder;
use frontend::instance::{FrontendInstance, Instance as FeInstance}; use frontend::instance::{FrontendInstance, Instance as FeInstance};
use meta_client::MetaClientOptions; use meta_client::MetaClientOptions;
use servers::tls::{TlsMode, TlsOption}; use servers::tls::{TlsMode, TlsOption};
use servers::Mode; use servers::Mode;
use snafu::ResultExt; use snafu::{OptionExt, ResultExt};
use crate::error::{self, Result, StartFrontendSnafu}; use crate::error::{self, MissingConfigSnafu, Result, StartFrontendSnafu};
use crate::options::{Options, TopLevelOptions}; use crate::options::{Options, TopLevelOptions};
pub struct Instance { pub struct Instance {
@@ -196,10 +204,38 @@ impl StartCommand {
logging::info!("Frontend start command: {:#?}", self); logging::info!("Frontend start command: {:#?}", self);
logging::info!("Frontend options: {:#?}", opts); logging::info!("Frontend options: {:#?}", opts);
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone()) let meta_client_options = opts.meta_client.as_ref().context(MissingConfigSnafu {
msg: "'meta_client'",
})?;
let meta_client = FeInstance::create_meta_client(meta_client_options)
.await .await
.context(StartFrontendSnafu)?; .context(StartFrontendSnafu)?;
let meta_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
let executor = HandlerGroupExecutor::new(vec![
Arc::new(ParseMailboxMessageHandler),
Arc::new(InvalidateTableCacheHandler::new(meta_backend.clone())),
]);
let heartbeat_task = HeartbeatTask::new(
meta_client.clone(),
opts.heartbeat.clone(),
Arc::new(executor),
);
let mut instance = FrontendBuilder::new(
meta_backend.clone(),
Arc::new(DatanodeClients::default()),
meta_client,
)
.with_cache_invalidator(meta_backend)
.with_plugin(plugins)
.with_heartbeat_task(heartbeat_task)
.try_build()
.await
.context(StartFrontendSnafu)?;
instance instance
.build_servers(opts) .build_servers(opts)
.await .await

View File

@@ -100,6 +100,9 @@ struct StartCommand {
http_timeout: Option<u64>, http_timeout: Option<u64>,
#[clap(long, default_value = "GREPTIMEDB_METASRV")] #[clap(long, default_value = "GREPTIMEDB_METASRV")]
env_prefix: String, env_prefix: String,
/// The working home directory of this metasrv instance.
#[clap(long)]
data_home: Option<String>,
} }
impl StartCommand { impl StartCommand {
@@ -152,6 +155,10 @@ impl StartCommand {
opts.http.timeout = Duration::from_secs(http_timeout); opts.http.timeout = Duration::from_secs(http_timeout);
} }
if let Some(data_home) = &self.data_home {
opts.data_home = data_home.clone();
}
// Disable dashboard in metasrv. // Disable dashboard in metasrv.
opts.http.disable_dashboard = true; opts.http.disable_dashboard = true;
@@ -166,7 +173,12 @@ impl StartCommand {
logging::info!("MetaSrv start command: {:#?}", self); logging::info!("MetaSrv start command: {:#?}", self);
logging::info!("MetaSrv options: {:#?}", opts); logging::info!("MetaSrv options: {:#?}", opts);
let instance = MetaSrvInstance::new(opts, plugins) let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins.clone(), None)
.await
.context(error::BuildMetaServerSnafu)?;
let metasrv = builder.build().await.context(error::BuildMetaServerSnafu)?;
let instance = MetaSrvInstance::new(opts, plugins, metasrv)
.await .await
.context(error::BuildMetaServerSnafu)?; .context(error::BuildMetaServerSnafu)?;

View File

@@ -15,21 +15,23 @@
use std::sync::Arc; use std::sync::Arc;
use std::{fs, path}; use std::{fs, path};
use catalog::kvbackend::KvBackendCatalogManager;
use catalog::CatalogManagerRef;
use clap::Parser; use clap::Parser;
use common_base::Plugins;
use common_config::{metadata_store_dir, KvBackendConfig, WalConfig}; use common_config::{metadata_store_dir, KvBackendConfig, WalConfig};
use common_meta::cache_invalidator::DummyKvCacheInvalidator; use common_meta::cache_invalidator::DummyCacheInvalidator;
use common_meta::datanode_manager::DatanodeManagerRef;
use common_meta::ddl::DdlTaskExecutorRef;
use common_meta::ddl_manager::DdlManager;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef; use common_meta::kv_backend::KvBackendRef;
use common_procedure::ProcedureManagerRef; use common_procedure::ProcedureManagerRef;
use common_telemetry::info; use common_telemetry::info;
use common_telemetry::logging::LoggingOptions; use common_telemetry::logging::LoggingOptions;
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig}; use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
use datanode::datanode::{Datanode, DatanodeBuilder}; use datanode::datanode::{Datanode, DatanodeBuilder};
use datanode::region_server::RegionServer;
use file_engine::config::EngineConfig as FileEngineConfig; use file_engine::config::EngineConfig as FileEngineConfig;
use frontend::frontend::FrontendOptions; use frontend::frontend::FrontendOptions;
use frontend::instance::builder::FrontendBuilder;
use frontend::instance::standalone::StandaloneTableMetadataCreator;
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager}; use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
use frontend::service_config::{ use frontend::service_config::{
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions, GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
@@ -42,9 +44,9 @@ use servers::Mode;
use snafu::ResultExt; use snafu::ResultExt;
use crate::error::{ use crate::error::{
CreateDirSnafu, IllegalConfigSnafu, InitMetadataSnafu, Result, ShutdownDatanodeSnafu, CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu, InitMetadataSnafu, Result,
ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu, ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
StopProcedureManagerSnafu, StartProcedureManagerSnafu, StopProcedureManagerSnafu,
}; };
use crate::options::{MixOptions, Options, TopLevelOptions}; use crate::options::{MixOptions, Options, TopLevelOptions};
@@ -156,6 +158,7 @@ impl StandaloneOptions {
wal: self.wal, wal: self.wal,
storage: self.storage, storage: self.storage,
region_engine: self.region_engine, region_engine: self.region_engine,
rpc_addr: self.grpc.addr,
..Default::default() ..Default::default()
} }
} }
@@ -227,6 +230,9 @@ struct StartCommand {
user_provider: Option<String>, user_provider: Option<String>,
#[clap(long, default_value = "GREPTIMEDB_STANDALONE")] #[clap(long, default_value = "GREPTIMEDB_STANDALONE")]
env_prefix: String, env_prefix: String,
/// The working home directory of this standalone instance.
#[clap(long)]
data_home: Option<String>,
} }
impl StartCommand { impl StartCommand {
@@ -257,6 +263,10 @@ impl StartCommand {
opts.http.addr = addr.clone() opts.http.addr = addr.clone()
} }
if let Some(data_home) = &self.data_home {
opts.storage.data_home = data_home.clone();
}
if let Some(addr) = &self.rpc_addr { if let Some(addr) = &self.rpc_addr {
// frontend grpc addr conflict with datanode default grpc addr // frontend grpc addr conflict with datanode default grpc addr
let datanode_grpc_addr = DatanodeOptions::default().rpc_addr; let datanode_grpc_addr = DatanodeOptions::default().rpc_addr;
@@ -340,36 +350,25 @@ impl StartCommand {
.await .await
.context(StartFrontendSnafu)?; .context(StartFrontendSnafu)?;
let datanode = DatanodeBuilder::new(dn_opts, fe_plugins.clone()) let builder =
.with_kv_backend(kv_backend.clone()) DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
.build() let datanode = builder.build().await.context(StartDatanodeSnafu)?;
.await
.context(StartDatanodeSnafu)?;
let region_server = datanode.region_server(); let datanode_manager = Arc::new(StandaloneDatanodeManager(datanode.region_server()));
let catalog_manager = KvBackendCatalogManager::new( let ddl_task_executor = Self::create_ddl_task_executor(
kv_backend.clone(), kv_backend.clone(),
Arc::new(DummyKvCacheInvalidator),
Arc::new(StandaloneDatanodeManager(region_server.clone())),
);
catalog_manager
.table_metadata_manager_ref()
.init()
.await
.context(InitMetadataSnafu)?;
// TODO: build frontend instance like in distributed mode
let mut frontend = build_frontend(
fe_plugins,
kv_backend,
procedure_manager.clone(), procedure_manager.clone(),
catalog_manager, datanode_manager.clone(),
region_server,
) )
.await?; .await?;
let mut frontend = FrontendBuilder::new(kv_backend, datanode_manager, ddl_task_executor)
.with_plugin(fe_plugins)
.try_build()
.await
.context(StartFrontendSnafu)?;
frontend frontend
.build_servers(opts) .build_servers(opts)
.await .await
@@ -381,26 +380,41 @@ impl StartCommand {
procedure_manager, procedure_manager,
}) })
} }
}
/// Build frontend instance in standalone mode async fn create_ddl_task_executor(
async fn build_frontend( kv_backend: KvBackendRef,
plugins: Plugins, procedure_manager: ProcedureManagerRef,
kv_backend: KvBackendRef, datanode_manager: DatanodeManagerRef,
procedure_manager: ProcedureManagerRef, ) -> Result<DdlTaskExecutorRef> {
catalog_manager: CatalogManagerRef, let table_metadata_manager =
region_server: RegionServer, Self::create_table_metadata_manager(kv_backend.clone()).await?;
) -> Result<FeInstance> {
let frontend_instance = FeInstance::try_new_standalone( let ddl_task_executor: DdlTaskExecutorRef = Arc::new(
kv_backend, DdlManager::try_new(
procedure_manager, procedure_manager,
catalog_manager, datanode_manager,
plugins, Arc::new(DummyCacheInvalidator),
region_server, table_metadata_manager,
) Arc::new(StandaloneTableMetadataCreator::new(kv_backend)),
.await )
.context(StartFrontendSnafu)?; .context(InitDdlManagerSnafu)?,
Ok(frontend_instance) );
Ok(ddl_task_executor)
}
async fn create_table_metadata_manager(
kv_backend: KvBackendRef,
) -> Result<TableMetadataManagerRef> {
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend));
table_metadata_manager
.init()
.await
.context(InitMetadataSnafu)?;
Ok(table_metadata_manager)
}
} }
#[cfg(test)] #[cfg(test)]

View File

@@ -42,5 +42,6 @@ tonic.workspace = true
[dev-dependencies] [dev-dependencies]
chrono.workspace = true chrono.workspace = true
common-procedure = { workspace = true, features = ["testing"] }
datatypes.workspace = true datatypes.workspace = true
hyper = { version = "0.14", features = ["full"] } hyper = { version = "0.14", features = ["full"] }

View File

@@ -86,6 +86,10 @@ impl DropTableProcedure {
)) ))
.await?; .await?;
if !exist && self.data.task.drop_if_exists {
return Ok(Status::Done);
}
ensure!( ensure!(
exist, exist,
error::TableNotFoundSnafu { error::TableNotFoundSnafu {

View File

@@ -43,9 +43,9 @@ use crate::rpc::ddl::{
TruncateTableTask, TruncateTableTask,
}; };
use crate::rpc::router::RegionRoute; use crate::rpc::router::RegionRoute;
pub type DdlManagerRef = Arc<DdlManager>; pub type DdlManagerRef = Arc<DdlManager>;
/// The [DdlManager] provides the ability to execute Ddl.
pub struct DdlManager { pub struct DdlManager {
procedure_manager: ProcedureManagerRef, procedure_manager: ProcedureManagerRef,
datanode_manager: DatanodeManagerRef, datanode_manager: DatanodeManagerRef,
@@ -55,26 +55,31 @@ pub struct DdlManager {
} }
impl DdlManager { impl DdlManager {
pub fn new( /// Returns a new [DdlManager] with all Ddl [BoxedProcedureLoader](common_procedure::procedure::BoxedProcedureLoader)s registered.
pub fn try_new(
procedure_manager: ProcedureManagerRef, procedure_manager: ProcedureManagerRef,
datanode_clients: DatanodeManagerRef, datanode_clients: DatanodeManagerRef,
cache_invalidator: CacheInvalidatorRef, cache_invalidator: CacheInvalidatorRef,
table_metadata_manager: TableMetadataManagerRef, table_metadata_manager: TableMetadataManagerRef,
table_meta_allocator: TableMetadataAllocatorRef, table_meta_allocator: TableMetadataAllocatorRef,
) -> Self { ) -> Result<Self> {
Self { let manager = Self {
procedure_manager, procedure_manager,
datanode_manager: datanode_clients, datanode_manager: datanode_clients,
cache_invalidator, cache_invalidator,
table_metadata_manager, table_metadata_manager,
table_meta_allocator, table_meta_allocator,
} };
manager.register_loaders()?;
Ok(manager)
} }
/// Returns the [TableMetadataManagerRef].
pub fn table_metadata_manager(&self) -> &TableMetadataManagerRef { pub fn table_metadata_manager(&self) -> &TableMetadataManagerRef {
&self.table_metadata_manager &self.table_metadata_manager
} }
/// Returns the [DdlContext]
pub fn create_context(&self) -> DdlContext { pub fn create_context(&self) -> DdlContext {
DdlContext { DdlContext {
datanode_manager: self.datanode_manager.clone(), datanode_manager: self.datanode_manager.clone(),
@@ -83,7 +88,7 @@ impl DdlManager {
} }
} }
pub fn try_start(&self) -> Result<()> { fn register_loaders(&self) -> Result<()> {
let context = self.create_context(); let context = self.create_context();
self.procedure_manager self.procedure_manager
@@ -142,6 +147,7 @@ impl DdlManager {
} }
#[tracing::instrument(skip_all)] #[tracing::instrument(skip_all)]
/// Submits and executes an alter table task.
pub async fn submit_alter_table_task( pub async fn submit_alter_table_task(
&self, &self,
cluster_id: u64, cluster_id: u64,
@@ -159,6 +165,7 @@ impl DdlManager {
} }
#[tracing::instrument(skip_all)] #[tracing::instrument(skip_all)]
/// Submits and executes a create table task.
pub async fn submit_create_table_task( pub async fn submit_create_table_task(
&self, &self,
cluster_id: u64, cluster_id: u64,
@@ -176,6 +183,7 @@ impl DdlManager {
} }
#[tracing::instrument(skip_all)] #[tracing::instrument(skip_all)]
/// Submits and executes a drop table task.
pub async fn submit_drop_table_task( pub async fn submit_drop_table_task(
&self, &self,
cluster_id: u64, cluster_id: u64,
@@ -199,6 +207,7 @@ impl DdlManager {
} }
#[tracing::instrument(skip_all)] #[tracing::instrument(skip_all)]
/// Submits and executes a truncate table task.
pub async fn submit_truncate_table_task( pub async fn submit_truncate_table_task(
&self, &self,
cluster_id: u64, cluster_id: u64,
@@ -416,3 +425,80 @@ impl DdlTaskExecutor for DdlManager {
.await .await
} }
} }
#[cfg(test)]
mod tests {
use std::sync::Arc;
use api::v1::meta::Partition;
use common_procedure::local::LocalManager;
use table::metadata::{RawTableInfo, TableId};
use super::DdlManager;
use crate::cache_invalidator::DummyCacheInvalidator;
use crate::datanode_manager::{DatanodeManager, DatanodeRef};
use crate::ddl::alter_table::AlterTableProcedure;
use crate::ddl::create_table::CreateTableProcedure;
use crate::ddl::drop_table::DropTableProcedure;
use crate::ddl::truncate_table::TruncateTableProcedure;
use crate::ddl::{TableMetadataAllocator, TableMetadataAllocatorContext};
use crate::error::Result;
use crate::key::TableMetadataManager;
use crate::kv_backend::memory::MemoryKvBackend;
use crate::peer::Peer;
use crate::rpc::router::RegionRoute;
use crate::state_store::KvStateStore;
/// A dummy implemented [DatanodeManager].
pub struct DummyDatanodeManager;
#[async_trait::async_trait]
impl DatanodeManager for DummyDatanodeManager {
async fn datanode(&self, _datanode: &Peer) -> DatanodeRef {
unimplemented!()
}
}
/// A dummy implemented [TableMetadataAllocator].
pub struct DummyTableMetadataAllocator;
#[async_trait::async_trait]
impl TableMetadataAllocator for DummyTableMetadataAllocator {
async fn create(
&self,
_ctx: &TableMetadataAllocatorContext,
_table_info: &mut RawTableInfo,
_partitions: &[Partition],
) -> Result<(TableId, Vec<RegionRoute>)> {
unimplemented!()
}
}
#[test]
fn test_try_new() {
let kv_backend = Arc::new(MemoryKvBackend::new());
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
let state_store = Arc::new(KvStateStore::new(kv_backend));
let procedure_manager = Arc::new(LocalManager::new(Default::default(), state_store));
let _ = DdlManager::try_new(
procedure_manager.clone(),
Arc::new(DummyDatanodeManager),
Arc::new(DummyCacheInvalidator),
table_metadata_manager,
Arc::new(DummyTableMetadataAllocator),
);
let expected_loaders = vec![
CreateTableProcedure::TYPE_NAME,
AlterTableProcedure::TYPE_NAME,
DropTableProcedure::TYPE_NAME,
TruncateTableProcedure::TYPE_NAME,
];
for loader in expected_loaders {
assert!(procedure_manager.contains_loader(loader));
}
}
}

View File

@@ -33,5 +33,8 @@ pub const DATANODE_LEASE_SECS: u64 = REGION_LEASE_SECS;
/// The lease seconds of metasrv leader. /// The lease seconds of metasrv leader.
pub const META_LEASE_SECS: u64 = 3; pub const META_LEASE_SECS: u64 = 3;
// In a lease, there are two opportunities for renewal. /// In a lease, there are two opportunities for renewal.
pub const META_KEEP_ALIVE_INTERVAL_SECS: u64 = META_LEASE_SECS / 2; pub const META_KEEP_ALIVE_INTERVAL_SECS: u64 = META_LEASE_SECS / 2;
/// The default mailbox round-trip timeout.
pub const MAILBOX_RTT_SECS: u64 = 1;

View File

@@ -37,7 +37,7 @@ pub struct HeartbeatResponseHandlerContext {
/// HandleControl /// HandleControl
/// ///
/// Controls process of handling heartbeat response. /// Controls process of handling heartbeat response.
#[derive(PartialEq)] #[derive(Debug, PartialEq)]
pub enum HandleControl { pub enum HandleControl {
Continue, Continue,
Done, Done,

View File

@@ -30,8 +30,8 @@ pub struct MessageMeta {
pub from: String, pub from: String,
} }
#[cfg(test)]
impl MessageMeta { impl MessageMeta {
#[cfg(any(test, feature = "testing"))]
pub fn new_test(id: u64, subject: &str, to: &str, from: &str) -> Self { pub fn new_test(id: u64, subject: &str, to: &str, from: &str) -> Self {
MessageMeta { MessageMeta {
id, id,

View File

@@ -48,6 +48,27 @@ impl Display for RegionIdent {
} }
} }
/// The result of downgrade leader region.
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
pub struct DowngradeRegionReply {
/// Returns the `last_entry_id` if available.
pub last_entry_id: Option<u64>,
/// Indicates whether the region exists.
pub exists: bool,
/// Return error if any during the operation.
pub error: Option<String>,
}
impl Display for DowngradeRegionReply {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"(last_entry_id={:?}, exists={}, error={:?})",
self.last_entry_id, self.exists, self.error
)
}
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
pub struct SimpleReply { pub struct SimpleReply {
pub result: bool, pub result: bool,
@@ -87,20 +108,82 @@ impl OpenRegion {
} }
} }
/// The instruction of downgrading leader region.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DowngradeRegion {
/// The [RegionId].
pub region_id: RegionId,
}
impl Display for DowngradeRegion {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "DowngradeRegion(region_id={})", self.region_id)
}
}
/// Upgrades a follower region to leader region.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UpgradeRegion {
/// The [RegionId].
pub region_id: RegionId,
/// The `last_entry_id` of old leader region.
pub last_entry_id: Option<u64>,
/// The second of waiting for a wal replay.
///
/// `None` stands for no wait,
/// it's helpful to verify whether the leader region is ready.
pub wait_for_replay_secs: Option<u64>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Display)] #[derive(Debug, Clone, Serialize, Deserialize, Display)]
pub enum Instruction { pub enum Instruction {
/// Opens a region.
///
/// - Returns true if a specified region exists.
OpenRegion(OpenRegion), OpenRegion(OpenRegion),
/// Closes a region.
///
/// - Returns true if a specified region does not exist.
CloseRegion(RegionIdent), CloseRegion(RegionIdent),
/// Upgrades a region.
UpgradeRegion(UpgradeRegion),
/// Downgrades a region.
DowngradeRegion(DowngradeRegion),
/// Invalidates a specified table cache.
InvalidateTableIdCache(TableId), InvalidateTableIdCache(TableId),
/// Invalidates a specified table name index cache.
InvalidateTableNameCache(TableName), InvalidateTableNameCache(TableName),
} }
/// The reply of [UpgradeRegion].
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
pub struct UpgradeRegionReply {
/// Returns true if `last_entry_id` has been replayed to the latest.
pub ready: bool,
/// Indicates whether the region exists.
pub exists: bool,
/// Returns error if any.
pub error: Option<String>,
}
impl Display for UpgradeRegionReply {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"(ready={}, exists={}, error={:?})",
self.ready, self.exists, self.error
)
}
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
#[serde(tag = "type", rename_all = "snake_case")] #[serde(tag = "type", rename_all = "snake_case")]
pub enum InstructionReply { pub enum InstructionReply {
OpenRegion(SimpleReply), OpenRegion(SimpleReply),
CloseRegion(SimpleReply), CloseRegion(SimpleReply),
UpgradeRegion(UpgradeRegionReply),
InvalidateTableCache(SimpleReply), InvalidateTableCache(SimpleReply),
DowngradeRegion(DowngradeRegionReply),
} }
impl Display for InstructionReply { impl Display for InstructionReply {
@@ -108,9 +191,13 @@ impl Display for InstructionReply {
match self { match self {
Self::OpenRegion(reply) => write!(f, "InstructionReply::OpenRegion({})", reply), Self::OpenRegion(reply) => write!(f, "InstructionReply::OpenRegion({})", reply),
Self::CloseRegion(reply) => write!(f, "InstructionReply::CloseRegion({})", reply), Self::CloseRegion(reply) => write!(f, "InstructionReply::CloseRegion({})", reply),
Self::UpgradeRegion(reply) => write!(f, "InstructionReply::UpgradeRegion({})", reply),
Self::InvalidateTableCache(reply) => { Self::InvalidateTableCache(reply) => {
write!(f, "InstructionReply::Invalidate({})", reply) write!(f, "InstructionReply::Invalidate({})", reply)
} }
Self::DowngradeRegion(reply) => {
write!(f, "InstructionReply::DowngradeRegion({})", reply)
}
} }
} }
} }

View File

@@ -89,7 +89,7 @@ use crate::DatanodeId;
pub const REMOVED_PREFIX: &str = "__removed"; pub const REMOVED_PREFIX: &str = "__removed";
const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*"; pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table"; const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
const TABLE_REGION_KEY_PREFIX: &str = "__table_region"; const TABLE_REGION_KEY_PREFIX: &str = "__table_region";
@@ -584,7 +584,7 @@ impl TableMetadataManager {
&self, &self,
table_id: TableId, table_id: TableId,
region_info: RegionInfo, region_info: RegionInfo,
current_table_route_value: DeserializedValueWithBytes<TableRouteValue>, current_table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
new_region_routes: Vec<RegionRoute>, new_region_routes: Vec<RegionRoute>,
new_region_options: &HashMap<String, String>, new_region_options: &HashMap<String, String>,
) -> Result<()> { ) -> Result<()> {
@@ -606,7 +606,7 @@ impl TableMetadataManager {
let (update_table_route_txn, on_update_table_route_failure) = self let (update_table_route_txn, on_update_table_route_failure) = self
.table_route_manager() .table_route_manager()
.build_update_txn(table_id, &current_table_route_value, &new_table_route_value)?; .build_update_txn(table_id, current_table_route_value, &new_table_route_value)?;
let txn = Txn::merge_all(vec![update_datanode_table_txn, update_table_route_txn]); let txn = Txn::merge_all(vec![update_datanode_table_txn, update_table_route_txn]);
@@ -1173,7 +1173,7 @@ mod tests {
region_storage_path: region_storage_path.to_string(), region_storage_path: region_storage_path.to_string(),
region_options: HashMap::new(), region_options: HashMap::new(),
}, },
current_table_route_value.clone(), &current_table_route_value,
new_region_routes.clone(), new_region_routes.clone(),
&HashMap::new(), &HashMap::new(),
) )
@@ -1190,7 +1190,7 @@ mod tests {
region_storage_path: region_storage_path.to_string(), region_storage_path: region_storage_path.to_string(),
region_options: HashMap::new(), region_options: HashMap::new(),
}, },
current_table_route_value.clone(), &current_table_route_value,
new_region_routes.clone(), new_region_routes.clone(),
&HashMap::new(), &HashMap::new(),
) )
@@ -1212,7 +1212,7 @@ mod tests {
region_storage_path: region_storage_path.to_string(), region_storage_path: region_storage_path.to_string(),
region_options: HashMap::new(), region_options: HashMap::new(),
}, },
current_table_route_value.clone(), &current_table_route_value,
new_region_routes.clone(), new_region_routes.clone(),
&HashMap::new(), &HashMap::new(),
) )
@@ -1237,7 +1237,7 @@ mod tests {
region_storage_path: region_storage_path.to_string(), region_storage_path: region_storage_path.to_string(),
region_options: HashMap::new(), region_options: HashMap::new(),
}, },
wrong_table_route_value, &wrong_table_route_value,
new_region_routes, new_region_routes,
&HashMap::new(), &HashMap::new(),
) )

View File

@@ -68,6 +68,9 @@ impl EtcdStore {
async fn do_multi_txn(&self, txn_ops: Vec<TxnOp>) -> Result<Vec<TxnResponse>> { async fn do_multi_txn(&self, txn_ops: Vec<TxnOp>) -> Result<Vec<TxnResponse>> {
if txn_ops.len() < MAX_TXN_SIZE { if txn_ops.len() < MAX_TXN_SIZE {
// fast path // fast path
let _timer = METRIC_META_TXN_REQUEST
.with_label_values(&["etcd", "txn"])
.start_timer();
let txn = Txn::new().and_then(txn_ops); let txn = Txn::new().and_then(txn_ops);
let txn_res = self let txn_res = self
.client .client
@@ -81,6 +84,9 @@ impl EtcdStore {
let txns = txn_ops let txns = txn_ops
.chunks(MAX_TXN_SIZE) .chunks(MAX_TXN_SIZE)
.map(|part| async move { .map(|part| async move {
let _timer = METRIC_META_TXN_REQUEST
.with_label_values(&["etcd", "txn"])
.start_timer();
let txn = Txn::new().and_then(part); let txn = Txn::new().and_then(part);
self.client.kv_client().txn(txn).await self.client.kv_client().txn(txn).await
}) })

View File

@@ -54,12 +54,14 @@ impl DdlTask {
schema: String, schema: String,
table: String, table: String,
table_id: TableId, table_id: TableId,
drop_if_exists: bool,
) -> Self { ) -> Self {
DdlTask::DropTable(DropTableTask { DdlTask::DropTable(DropTableTask {
catalog, catalog,
schema, schema,
table, table,
table_id, table_id,
drop_if_exists,
}) })
} }
@@ -118,6 +120,7 @@ impl TryFrom<SubmitDdlTaskRequest> for PbSubmitDdlTaskRequest {
schema_name: task.schema, schema_name: task.schema,
table_name: task.table, table_name: task.table,
table_id: Some(api::v1::TableId { id: task.table_id }), table_id: Some(api::v1::TableId { id: task.table_id }),
drop_if_exists: task.drop_if_exists,
}), }),
}), }),
DdlTask::AlterTable(task) => Task::AlterTableTask(PbAlterTableTask { DdlTask::AlterTable(task) => Task::AlterTableTask(PbAlterTableTask {
@@ -176,6 +179,8 @@ pub struct DropTableTask {
pub schema: String, pub schema: String,
pub table: String, pub table: String,
pub table_id: TableId, pub table_id: TableId,
#[serde(default)]
pub drop_if_exists: bool,
} }
impl DropTableTask { impl DropTableTask {
@@ -214,6 +219,7 @@ impl TryFrom<PbDropTableTask> for DropTableTask {
err_msg: "expected table_id", err_msg: "expected table_id",
})? })?
.id, .id,
drop_if_exists: drop_table.drop_if_exists,
}) })
} }
} }

View File

@@ -4,6 +4,9 @@ version.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true
[features]
testing = []
[dependencies] [dependencies]
async-stream.workspace = true async-stream.workspace = true
async-trait.workspace = true async-trait.workspace = true

View File

@@ -527,6 +527,13 @@ impl LocalManager {
Ok(()) Ok(())
} }
#[cfg(any(test, feature = "testing"))]
/// Returns true if contains a specified loader.
pub fn contains_loader(&self, name: &str) -> bool {
let loaders = self.manager_ctx.loaders.lock().unwrap();
loaders.contains_key(name)
}
} }
#[async_trait] #[async_trait]

View File

@@ -4,9 +4,6 @@ version.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true
[features]
testing = []
[dependencies] [dependencies]
api.workspace = true api.workspace = true
arrow-flight.workspace = true arrow-flight.workspace = true
@@ -77,7 +74,9 @@ uuid.workspace = true
[dev-dependencies] [dev-dependencies]
axum-test-helper = { git = "https://github.com/sunng87/axum-test-helper.git", branch = "patch-1" } axum-test-helper = { git = "https://github.com/sunng87/axum-test-helper.git", branch = "patch-1" }
client.workspace = true client.workspace = true
common-meta = { workspace = true, features = ["testing"] }
common-query.workspace = true common-query.workspace = true
common-test-util.workspace = true common-test-util.workspace = true
datafusion-common.workspace = true datafusion-common.workspace = true
mito2 = { workspace = true, features = ["test"] }
session.workspace = true session.workspace = true

View File

@@ -129,8 +129,12 @@ impl RegionAliveKeeper {
let (role, region_id) = (region.role().into(), RegionId::from(region.region_id)); let (role, region_id) = (region.role().into(), RegionId::from(region.region_id));
if let Some(handle) = self.find_handle(region_id).await { if let Some(handle) = self.find_handle(region_id).await {
handle.reset_deadline(role, deadline).await; handle.reset_deadline(role, deadline).await;
} else {
warn!(
"Trying to renew the lease for region {region_id}, the keeper handler is not found!"
);
// Else the region alive keeper might be triggered by lagging messages, we can safely ignore it.
} }
// Else the region alive keeper might be triggered by lagging messages, we can safely ignore it.
} }
} }

View File

@@ -37,6 +37,7 @@ use crate::alive_keeper::RegionAliveKeeper;
use crate::config::DatanodeOptions; use crate::config::DatanodeOptions;
use crate::error::{self, MetaClientInitSnafu, Result}; use crate::error::{self, MetaClientInitSnafu, Result};
use crate::event_listener::RegionServerEventReceiver; use crate::event_listener::RegionServerEventReceiver;
use crate::metrics;
use crate::region_server::RegionServer; use crate::region_server::RegionServer;
pub(crate) mod handler; pub(crate) mod handler;
@@ -72,9 +73,9 @@ impl HeartbeatTask {
opts.heartbeat.interval.as_millis() as u64, opts.heartbeat.interval.as_millis() as u64,
)); ));
let resp_handler_executor = Arc::new(HandlerGroupExecutor::new(vec![ let resp_handler_executor = Arc::new(HandlerGroupExecutor::new(vec![
region_alive_keeper.clone(),
Arc::new(ParseMailboxMessageHandler), Arc::new(ParseMailboxMessageHandler),
Arc::new(RegionHeartbeatResponseHandler::new(region_server.clone())), Arc::new(RegionHeartbeatResponseHandler::new(region_server.clone())),
region_alive_keeper.clone(),
])); ]));
Ok(Self { Ok(Self {
@@ -101,8 +102,10 @@ impl HeartbeatTask {
quit_signal: Arc<Notify>, quit_signal: Arc<Notify>,
) -> Result<HeartbeatSender> { ) -> Result<HeartbeatSender> {
let client_id = meta_client.id(); let client_id = meta_client.id();
let (tx, mut rx) = meta_client.heartbeat().await.context(MetaClientInitSnafu)?; let (tx, mut rx) = meta_client.heartbeat().await.context(MetaClientInitSnafu)?;
let mut last_received_lease = Instant::now();
let _handle = common_runtime::spawn_bg(async move { let _handle = common_runtime::spawn_bg(async move {
while let Some(res) = match rx.message().await { while let Some(res) = match rx.message().await {
Ok(m) => m, Ok(m) => m,
@@ -114,6 +117,28 @@ impl HeartbeatTask {
if let Some(msg) = res.mailbox_message.as_ref() { if let Some(msg) = res.mailbox_message.as_ref() {
info!("Received mailbox message: {msg:?}, meta_client id: {client_id:?}"); info!("Received mailbox message: {msg:?}, meta_client id: {client_id:?}");
} }
if let Some(lease) = res.region_lease.as_ref() {
metrics::LAST_RECEIVED_HEARTBEAT_ELAPSED
.set(last_received_lease.elapsed().as_millis() as i64);
// Resets the timer.
last_received_lease = Instant::now();
let mut leader_region_lease_count = 0;
let mut follower_region_lease_count = 0;
for lease in &lease.regions {
match lease.role() {
RegionRole::Leader => leader_region_lease_count += 1,
RegionRole::Follower => follower_region_lease_count += 1,
}
}
metrics::HEARTBEAT_REGION_LEASES
.with_label_values(&["leader"])
.set(leader_region_lease_count);
metrics::HEARTBEAT_REGION_LEASES
.with_label_values(&["follower"])
.set(follower_region_lease_count);
}
let ctx = HeartbeatResponseHandlerContext::new(mailbox.clone(), res); let ctx = HeartbeatResponseHandlerContext::new(mailbox.clone(), res);
if let Err(e) = Self::handle_response(ctx, handler_executor.clone()).await { if let Err(e) = Self::handle_response(ctx, handler_executor.clone()).await {
error!(e; "Error while handling heartbeat response"); error!(e; "Error while handling heartbeat response");

View File

@@ -13,54 +13,119 @@
// limitations under the License. // limitations under the License.
use async_trait::async_trait; use async_trait::async_trait;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_meta::error::{InvalidHeartbeatResponseSnafu, Result as MetaResult}; use common_meta::error::{InvalidHeartbeatResponseSnafu, Result as MetaResult};
use common_meta::heartbeat::handler::{ use common_meta::heartbeat::handler::{
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext, HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
}; };
use common_meta::instruction::{Instruction, InstructionReply, OpenRegion, SimpleReply}; use common_meta::instruction::{
DowngradeRegion, DowngradeRegionReply, Instruction, InstructionReply, OpenRegion, SimpleReply,
};
use common_meta::RegionIdent; use common_meta::RegionIdent;
use common_query::Output;
use common_telemetry::error; use common_telemetry::error;
use futures::future::BoxFuture;
use snafu::OptionExt; use snafu::OptionExt;
use store_api::path_utils::region_dir; use store_api::path_utils::region_dir;
use store_api::region_engine::SetReadonlyResponse;
use store_api::region_request::{RegionCloseRequest, RegionOpenRequest, RegionRequest}; use store_api::region_request::{RegionCloseRequest, RegionOpenRequest, RegionRequest};
use store_api::storage::RegionId; use store_api::storage::RegionId;
use crate::error::Result; use crate::error;
use crate::region_server::RegionServer; use crate::region_server::RegionServer;
/// Handler for [Instruction::OpenRegion] and [Instruction::CloseRegion]. /// Handler for [Instruction::OpenRegion] and [Instruction::CloseRegion].
#[derive(Clone)] #[derive(Clone)]
pub struct RegionHeartbeatResponseHandler { pub struct RegionHeartbeatResponseHandler {
region_server: RegionServer, region_server: RegionServer,
} }
/// Handler of the instruction.
pub type InstructionHandler =
Box<dyn FnOnce(RegionServer) -> BoxFuture<'static, InstructionReply> + Send>;
impl RegionHeartbeatResponseHandler { impl RegionHeartbeatResponseHandler {
/// Returns the [RegionHeartbeatResponseHandler].
pub fn new(region_server: RegionServer) -> Self { pub fn new(region_server: RegionServer) -> Self {
Self { region_server } Self { region_server }
} }
fn instruction_to_request(instruction: Instruction) -> MetaResult<(RegionId, RegionRequest)> { /// Builds the [InstructionHandler].
fn build_handler(instruction: Instruction) -> MetaResult<InstructionHandler> {
match instruction { match instruction {
Instruction::OpenRegion(OpenRegion { Instruction::OpenRegion(OpenRegion {
region_ident, region_ident,
region_storage_path, region_storage_path,
options, options,
}) => { }) => Ok(Box::new(|region_server| {
let region_id = Self::region_ident_to_region_id(&region_ident); Box::pin(async move {
let open_region_req = RegionRequest::Open(RegionOpenRequest { let region_id = Self::region_ident_to_region_id(&region_ident);
engine: region_ident.engine, let request = RegionRequest::Open(RegionOpenRequest {
region_dir: region_dir(&region_storage_path, region_id), engine: region_ident.engine,
options, region_dir: region_dir(&region_storage_path, region_id),
}); options,
Ok((region_id, open_region_req)) });
let result = region_server.handle_request(region_id, request).await;
let success = result.is_ok();
let error = result.as_ref().map_err(|e| e.to_string()).err();
InstructionReply::OpenRegion(SimpleReply {
result: success,
error,
})
})
})),
Instruction::CloseRegion(region_ident) => Ok(Box::new(|region_server| {
Box::pin(async move {
let region_id = Self::region_ident_to_region_id(&region_ident);
let request = RegionRequest::Close(RegionCloseRequest {});
let result = region_server.handle_request(region_id, request).await;
match result {
Ok(_) => InstructionReply::CloseRegion(SimpleReply {
result: true,
error: None,
}),
Err(error::Error::RegionNotFound { .. }) => {
InstructionReply::CloseRegion(SimpleReply {
result: true,
error: None,
})
}
Err(err) => InstructionReply::CloseRegion(SimpleReply {
result: false,
error: Some(err.to_string()),
}),
}
})
})),
Instruction::DowngradeRegion(DowngradeRegion { region_id }) => {
Ok(Box::new(move |region_server| {
Box::pin(async move {
match region_server.set_readonly_gracefully(region_id).await {
Ok(SetReadonlyResponse::Success { last_entry_id }) => {
InstructionReply::DowngradeRegion(DowngradeRegionReply {
last_entry_id,
exists: true,
error: None,
})
}
Ok(SetReadonlyResponse::NotFound) => {
InstructionReply::DowngradeRegion(DowngradeRegionReply {
last_entry_id: None,
exists: false,
error: None,
})
}
Err(err) => InstructionReply::DowngradeRegion(DowngradeRegionReply {
last_entry_id: None,
exists: false,
error: Some(err.to_string()),
}),
}
})
}))
} }
Instruction::CloseRegion(region_ident) => { Instruction::UpgradeRegion(_) => {
let region_id = Self::region_ident_to_region_id(&region_ident); todo!()
let close_region_req = RegionRequest::Close(RegionCloseRequest {});
Ok((region_id, close_region_req))
} }
Instruction::InvalidateTableIdCache(_) | Instruction::InvalidateTableNameCache(_) => { Instruction::InvalidateTableIdCache(_) | Instruction::InvalidateTableNameCache(_) => {
InvalidHeartbeatResponseSnafu.fail() InvalidHeartbeatResponseSnafu.fail()
@@ -71,53 +136,6 @@ impl RegionHeartbeatResponseHandler {
fn region_ident_to_region_id(region_ident: &RegionIdent) -> RegionId { fn region_ident_to_region_id(region_ident: &RegionIdent) -> RegionId {
RegionId::new(region_ident.table_id, region_ident.region_number) RegionId::new(region_ident.table_id, region_ident.region_number)
} }
fn reply_template_from_instruction(instruction: &Instruction) -> InstructionReply {
match instruction {
Instruction::OpenRegion(_) => InstructionReply::OpenRegion(SimpleReply {
result: false,
error: None,
}),
Instruction::CloseRegion(_) => InstructionReply::CloseRegion(SimpleReply {
result: false,
error: None,
}),
Instruction::InvalidateTableIdCache(_) | Instruction::InvalidateTableNameCache(_) => {
InstructionReply::InvalidateTableCache(SimpleReply {
result: false,
error: None,
})
}
}
}
fn fill_reply(mut template: InstructionReply, result: Result<Output>) -> InstructionReply {
let success = result.is_ok();
let error = result.as_ref().map_err(|e| e.to_string()).err();
match &mut template {
InstructionReply::OpenRegion(reply) => {
reply.result = success;
reply.error = error;
}
InstructionReply::CloseRegion(reply) => match result {
Err(e) => {
if e.status_code() == StatusCode::RegionNotFound {
reply.result = true;
}
}
_ => {
reply.result = success;
reply.error = error;
}
},
InstructionReply::InvalidateTableCache(reply) => {
reply.result = success;
reply.error = error;
}
}
template
}
} }
#[async_trait] #[async_trait]
@@ -125,7 +143,9 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool { fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool {
matches!( matches!(
ctx.incoming_message.as_ref(), ctx.incoming_message.as_ref(),
Some((_, Instruction::OpenRegion { .. })) | Some((_, Instruction::CloseRegion { .. })) Some((_, Instruction::OpenRegion { .. }))
| Some((_, Instruction::CloseRegion { .. }))
| Some((_, Instruction::DowngradeRegion { .. }))
) )
} }
@@ -137,15 +157,11 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
let mailbox = ctx.mailbox.clone(); let mailbox = ctx.mailbox.clone();
let region_server = self.region_server.clone(); let region_server = self.region_server.clone();
let reply_template = Self::reply_template_from_instruction(&instruction); let handler = Self::build_handler(instruction)?;
let (region_id, region_req) = Self::instruction_to_request(instruction)?;
let _handle = common_runtime::spawn_bg(async move { let _handle = common_runtime::spawn_bg(async move {
let result = region_server.handle_request(region_id, region_req).await; let reply = handler(region_server).await;
if let Err(e) = mailbox if let Err(e) = mailbox.send((meta, reply)).await {
.send((meta, Self::fill_reply(reply_template, result)))
.await
{
error!(e; "Failed to send reply to mailbox"); error!(e; "Failed to send reply to mailbox");
} }
}); });
@@ -153,3 +169,266 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
Ok(HandleControl::Done) Ok(HandleControl::Done)
} }
} }
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::collections::HashMap;
use std::sync::Arc;
use common_meta::heartbeat::mailbox::{
HeartbeatMailbox, IncomingMessage, MailboxRef, MessageMeta,
};
use mito2::config::MitoConfig;
use mito2::engine::MITO_ENGINE_NAME;
use mito2::test_util::{CreateRequestBuilder, TestEnv};
use store_api::region_request::RegionRequest;
use store_api::storage::RegionId;
use tokio::sync::mpsc::{self, Receiver};
use super::*;
use crate::error;
use crate::tests::mock_region_server;
pub struct HeartbeatResponseTestEnv {
mailbox: MailboxRef,
receiver: Receiver<(MessageMeta, InstructionReply)>,
}
impl HeartbeatResponseTestEnv {
pub fn new() -> Self {
let (tx, rx) = mpsc::channel(8);
let mailbox = Arc::new(HeartbeatMailbox::new(tx));
HeartbeatResponseTestEnv {
mailbox,
receiver: rx,
}
}
pub fn create_handler_ctx(
&self,
incoming_message: IncomingMessage,
) -> HeartbeatResponseHandlerContext {
HeartbeatResponseHandlerContext {
mailbox: self.mailbox.clone(),
response: Default::default(),
incoming_message: Some(incoming_message),
}
}
}
fn close_region_instruction(region_id: RegionId) -> Instruction {
Instruction::CloseRegion(RegionIdent {
table_id: region_id.table_id(),
region_number: region_id.region_number(),
cluster_id: 1,
datanode_id: 2,
engine: MITO_ENGINE_NAME.to_string(),
})
}
fn open_region_instruction(region_id: RegionId, path: &str) -> Instruction {
Instruction::OpenRegion(OpenRegion::new(
RegionIdent {
table_id: region_id.table_id(),
region_number: region_id.region_number(),
cluster_id: 1,
datanode_id: 2,
engine: MITO_ENGINE_NAME.to_string(),
},
path,
HashMap::new(),
))
}
#[tokio::test]
async fn test_close_region() {
common_telemetry::init_default_ut_logging();
let mut region_server = mock_region_server();
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
let mut engine_env = TestEnv::with_prefix("close-region");
let engine = engine_env.create_engine(MitoConfig::default()).await;
region_server.register_engine(Arc::new(engine));
let region_id = RegionId::new(1024, 1);
let builder = CreateRequestBuilder::new();
let create_req = builder.build();
region_server
.handle_request(region_id, RegionRequest::Create(create_req))
.await
.unwrap();
let mut heartbeat_env = HeartbeatResponseTestEnv::new();
// Should be ok, if we try to close it twice.
for _ in 0..2 {
let meta = MessageMeta::new_test(1, "test", "dn-1", "me-0");
let instruction = close_region_instruction(region_id);
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
assert_matches!(control, HandleControl::Done);
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
if let InstructionReply::CloseRegion(reply) = reply {
assert!(reply.result);
assert!(reply.error.is_none());
} else {
unreachable!()
}
assert_matches!(
region_server.set_writable(region_id, true).unwrap_err(),
error::Error::RegionNotFound { .. }
);
}
}
#[tokio::test]
async fn test_open_region_ok() {
common_telemetry::init_default_ut_logging();
let mut region_server = mock_region_server();
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
let mut engine_env = TestEnv::with_prefix("open-region");
let engine = engine_env.create_engine(MitoConfig::default()).await;
region_server.register_engine(Arc::new(engine));
let region_id = RegionId::new(1024, 1);
let builder = CreateRequestBuilder::new();
let mut create_req = builder.build();
let storage_path = "test";
create_req.region_dir = region_dir(storage_path, region_id);
region_server
.handle_request(region_id, RegionRequest::Create(create_req))
.await
.unwrap();
region_server
.handle_request(region_id, RegionRequest::Close(RegionCloseRequest {}))
.await
.unwrap();
let mut heartbeat_env = HeartbeatResponseTestEnv::new();
// Should be ok, if we try to open it twice.
for _ in 0..2 {
let meta = MessageMeta::new_test(1, "test", "dn-1", "me-0");
let instruction = open_region_instruction(region_id, storage_path);
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
assert_matches!(control, HandleControl::Done);
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
if let InstructionReply::OpenRegion(reply) = reply {
assert!(reply.result);
assert!(reply.error.is_none());
} else {
unreachable!()
}
}
}
#[tokio::test]
async fn test_open_not_exists_region() {
common_telemetry::init_default_ut_logging();
let mut region_server = mock_region_server();
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
let mut engine_env = TestEnv::with_prefix("open-not-exists-region");
let engine = engine_env.create_engine(MitoConfig::default()).await;
region_server.register_engine(Arc::new(engine));
let region_id = RegionId::new(1024, 1);
let storage_path = "test";
let mut heartbeat_env = HeartbeatResponseTestEnv::new();
let meta = MessageMeta::new_test(1, "test", "dn-1", "me-0");
let instruction = open_region_instruction(region_id, storage_path);
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
assert_matches!(control, HandleControl::Done);
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
if let InstructionReply::OpenRegion(reply) = reply {
assert!(!reply.result);
assert!(reply.error.is_some());
} else {
unreachable!()
}
}
#[tokio::test]
async fn test_downgrade_region() {
common_telemetry::init_default_ut_logging();
let mut region_server = mock_region_server();
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
let mut engine_env = TestEnv::with_prefix("downgrade-region");
let engine = engine_env.create_engine(MitoConfig::default()).await;
region_server.register_engine(Arc::new(engine));
let region_id = RegionId::new(1024, 1);
let builder = CreateRequestBuilder::new();
let mut create_req = builder.build();
let storage_path = "test";
create_req.region_dir = region_dir(storage_path, region_id);
region_server
.handle_request(region_id, RegionRequest::Create(create_req))
.await
.unwrap();
let mut heartbeat_env = HeartbeatResponseTestEnv::new();
// Should be ok, if we try to downgrade it twice.
for _ in 0..2 {
let meta = MessageMeta::new_test(1, "test", "dn-1", "me-0");
let instruction = Instruction::DowngradeRegion(DowngradeRegion { region_id });
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
assert_matches!(control, HandleControl::Done);
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
if let InstructionReply::DowngradeRegion(reply) = reply {
assert!(reply.exists);
assert!(reply.error.is_none());
assert_eq!(reply.last_entry_id.unwrap(), 0);
} else {
unreachable!()
}
}
// Downgrades a not exists region.
let meta = MessageMeta::new_test(1, "test", "dn-1", "me-0");
let instruction = Instruction::DowngradeRegion(DowngradeRegion {
region_id: RegionId::new(2048, 1),
});
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
assert_matches!(control, HandleControl::Done);
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
if let InstructionReply::DowngradeRegion(reply) = reply {
assert!(!reply.exists);
assert!(reply.error.is_none());
assert!(reply.last_entry_id.is_none());
} else {
unreachable!()
}
}
}

View File

@@ -26,5 +26,4 @@ pub mod metrics;
pub mod region_server; pub mod region_server;
mod store; mod store;
#[cfg(test)] #[cfg(test)]
#[allow(dead_code)]
mod tests; mod tests;

View File

@@ -18,6 +18,8 @@ use prometheus::*;
/// Region request type label. /// Region request type label.
pub const REGION_REQUEST_TYPE: &str = "datanode_region_request_type"; pub const REGION_REQUEST_TYPE: &str = "datanode_region_request_type";
pub const REGION_ROLE: &str = "region_role";
lazy_static! { lazy_static! {
/// The elapsed time of handling a request in the region_server. /// The elapsed time of handling a request in the region_server.
pub static ref HANDLE_REGION_REQUEST_ELAPSED: HistogramVec = register_histogram_vec!( pub static ref HANDLE_REGION_REQUEST_ELAPSED: HistogramVec = register_histogram_vec!(
@@ -26,4 +28,17 @@ lazy_static! {
&[REGION_REQUEST_TYPE] &[REGION_REQUEST_TYPE]
) )
.unwrap(); .unwrap();
/// The elapsed time since the last received heartbeat.
pub static ref LAST_RECEIVED_HEARTBEAT_ELAPSED: IntGauge = register_int_gauge!(
"last_received_heartbeat_lease_elapsed",
"last received heartbeat lease elapsed",
)
.unwrap();
/// The received region leases via heartbeat.
pub static ref HEARTBEAT_REGION_LEASES: IntGaugeVec = register_int_gauge_vec!(
"heartbeat_region_leases",
"received region leases via heartbeat",
&[REGION_ROLE]
)
.unwrap();
} }

View File

@@ -49,7 +49,7 @@ use servers::grpc::region_server::RegionServerHandler;
use session::context::{QueryContextBuilder, QueryContextRef}; use session::context::{QueryContextBuilder, QueryContextRef};
use snafu::{OptionExt, ResultExt}; use snafu::{OptionExt, ResultExt};
use store_api::metadata::RegionMetadataRef; use store_api::metadata::RegionMetadataRef;
use store_api::region_engine::{RegionEngineRef, RegionRole}; use store_api::region_engine::{RegionEngineRef, RegionRole, SetReadonlyResponse};
use store_api::region_request::{RegionCloseRequest, RegionRequest}; use store_api::region_request::{RegionCloseRequest, RegionRequest};
use store_api::storage::{RegionId, ScanRequest}; use store_api::storage::{RegionId, ScanRequest};
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan}; use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
@@ -148,6 +148,19 @@ impl RegionServer {
.with_context(|_| HandleRegionRequestSnafu { region_id }) .with_context(|_| HandleRegionRequestSnafu { region_id })
} }
pub async fn set_readonly_gracefully(
&self,
region_id: RegionId,
) -> Result<SetReadonlyResponse> {
match self.inner.region_map.get(&region_id) {
Some(engine) => Ok(engine
.set_readonly_gracefully(region_id)
.await
.with_context(|_| HandleRegionRequestSnafu { region_id })?),
None => Ok(SetReadonlyResponse::NotFound),
}
}
pub fn runtime(&self) -> Arc<Runtime> { pub fn runtime(&self) -> Arc<Runtime> {
self.inner.runtime.clone() self.inner.runtime.clone()
} }

View File

@@ -66,23 +66,9 @@ pub(crate) async fn new_object_store(opts: &DatanodeOptions) -> Result<ObjectSto
.with_error_level(Some("debug")) .with_error_level(Some("debug"))
.expect("input error level must be valid"), .expect("input error level must be valid"),
) )
.layer(TracingLayer); .layer(TracingLayer)
.layer(object_store::layers::PrometheusMetricsLayer);
// In the test environment, multiple datanodes will be started in the same process. Ok(store)
// If each datanode registers Prometheus metric when it starts, it will cause the program to crash. (Because the same metric is registered repeatedly.)
// So the Prometheus metric layer is disabled in the test environment.
#[cfg(feature = "testing")]
return Ok(store);
#[cfg(not(feature = "testing"))]
{
let registry = prometheus::default_registry();
Ok(
store.layer(object_store::layers::PrometheusLayer::with_registry(
registry.clone(),
)),
)
}
} }
async fn create_object_store_with_cache( async fn create_object_store_with_cache(

View File

@@ -13,19 +13,12 @@
// limitations under the License. // limitations under the License.
use std::any::Any; use std::any::Any;
use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use api::v1::meta::HeartbeatResponse;
use async_trait::async_trait; use async_trait::async_trait;
use common_error::ext::BoxedError; use common_error::ext::BoxedError;
use common_function::scalars::aggregate::AggregateFunctionMetaRef; use common_function::scalars::aggregate::AggregateFunctionMetaRef;
use common_function::scalars::FunctionRef; use common_function::scalars::FunctionRef;
use common_meta::heartbeat::handler::{
HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutor,
};
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MessageMeta};
use common_meta::instruction::{Instruction, OpenRegion, RegionIdent};
use common_query::prelude::ScalarUdf; use common_query::prelude::ScalarUdf;
use common_query::Output; use common_query::Output;
use common_recordbatch::SendableRecordBatchStream; use common_recordbatch::SendableRecordBatchStream;
@@ -46,51 +39,6 @@ use tokio::sync::mpsc::{Receiver, Sender};
use crate::event_listener::NoopRegionServerEventListener; use crate::event_listener::NoopRegionServerEventListener;
use crate::region_server::RegionServer; use crate::region_server::RegionServer;
pub fn test_message_meta(id: u64, subject: &str, to: &str, from: &str) -> MessageMeta {
MessageMeta {
id,
subject: subject.to_string(),
to: to.to_string(),
from: from.to_string(),
}
}
async fn handle_instruction(
executor: Arc<dyn HeartbeatResponseHandlerExecutor>,
mailbox: Arc<HeartbeatMailbox>,
instruction: Instruction,
) {
let response = HeartbeatResponse::default();
let mut ctx: HeartbeatResponseHandlerContext =
HeartbeatResponseHandlerContext::new(mailbox, response);
ctx.incoming_message = Some((test_message_meta(1, "hi", "foo", "bar"), instruction));
executor.handle(ctx).await.unwrap();
}
fn close_region_instruction() -> Instruction {
Instruction::CloseRegion(RegionIdent {
table_id: 1024,
region_number: 0,
cluster_id: 1,
datanode_id: 2,
engine: "mito2".to_string(),
})
}
fn open_region_instruction() -> Instruction {
Instruction::OpenRegion(OpenRegion::new(
RegionIdent {
table_id: 1024,
region_number: 0,
cluster_id: 1,
datanode_id: 2,
engine: "mito2".to_string(),
},
"path/dir",
HashMap::new(),
))
}
pub struct MockQueryEngine; pub struct MockQueryEngine;
#[async_trait] #[async_trait]

View File

@@ -85,31 +85,48 @@ pub enum ConcreteDataType {
impl fmt::Display for ConcreteDataType { impl fmt::Display for ConcreteDataType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self { match self {
ConcreteDataType::Null(_) => write!(f, "Null"), ConcreteDataType::Null(v) => write!(f, "{}", v.name()),
ConcreteDataType::Boolean(_) => write!(f, "Boolean"), ConcreteDataType::Boolean(v) => write!(f, "{}", v.name()),
ConcreteDataType::Int8(_) => write!(f, "Int8"), ConcreteDataType::Int8(v) => write!(f, "{}", v.name()),
ConcreteDataType::Int16(_) => write!(f, "Int16"), ConcreteDataType::Int16(v) => write!(f, "{}", v.name()),
ConcreteDataType::Int32(_) => write!(f, "Int32"), ConcreteDataType::Int32(v) => write!(f, "{}", v.name()),
ConcreteDataType::Int64(_) => write!(f, "Int64"), ConcreteDataType::Int64(v) => write!(f, "{}", v.name()),
ConcreteDataType::UInt8(_) => write!(f, "UInt8"), ConcreteDataType::UInt8(v) => write!(f, "{}", v.name()),
ConcreteDataType::UInt16(_) => write!(f, "UInt16"), ConcreteDataType::UInt16(v) => write!(f, "{}", v.name()),
ConcreteDataType::UInt32(_) => write!(f, "UInt32"), ConcreteDataType::UInt32(v) => write!(f, "{}", v.name()),
ConcreteDataType::UInt64(_) => write!(f, "UInt64"), ConcreteDataType::UInt64(v) => write!(f, "{}", v.name()),
ConcreteDataType::Float32(_) => write!(f, "Float32"), ConcreteDataType::Float32(v) => write!(f, "{}", v.name()),
ConcreteDataType::Float64(_) => write!(f, "Float64"), ConcreteDataType::Float64(v) => write!(f, "{}", v.name()),
ConcreteDataType::Binary(_) => write!(f, "Binary"), ConcreteDataType::Binary(v) => write!(f, "{}", v.name()),
ConcreteDataType::String(_) => write!(f, "String"), ConcreteDataType::String(v) => write!(f, "{}", v.name()),
ConcreteDataType::Date(_) => write!(f, "Date"), ConcreteDataType::Date(v) => write!(f, "{}", v.name()),
ConcreteDataType::DateTime(_) => write!(f, "DateTime"), ConcreteDataType::DateTime(v) => write!(f, "{}", v.name()),
ConcreteDataType::Timestamp(_) => write!(f, "Timestamp"), ConcreteDataType::Timestamp(t) => match t {
ConcreteDataType::Time(_) => write!(f, "Time"), TimestampType::Second(v) => write!(f, "{}", v.name()),
ConcreteDataType::List(_) => write!(f, "List"), TimestampType::Millisecond(v) => write!(f, "{}", v.name()),
ConcreteDataType::Dictionary(_) => write!(f, "Dictionary"), TimestampType::Microsecond(v) => write!(f, "{}", v.name()),
ConcreteDataType::Interval(_) => write!(f, "Interval"), TimestampType::Nanosecond(v) => write!(f, "{}", v.name()),
ConcreteDataType::Duration(_) => write!(f, "Duration"), },
ConcreteDataType::Decimal128(d) => { ConcreteDataType::Time(t) => match t {
write!(f, "Decimal128({},{})", d.precision(), d.scale()) TimeType::Second(v) => write!(f, "{}", v.name()),
} TimeType::Millisecond(v) => write!(f, "{}", v.name()),
TimeType::Microsecond(v) => write!(f, "{}", v.name()),
TimeType::Nanosecond(v) => write!(f, "{}", v.name()),
},
ConcreteDataType::Interval(i) => match i {
IntervalType::YearMonth(v) => write!(f, "{}", v.name()),
IntervalType::DayTime(v) => write!(f, "{}", v.name()),
IntervalType::MonthDayNano(v) => write!(f, "{}", v.name()),
},
ConcreteDataType::Duration(d) => match d {
DurationType::Second(v) => write!(f, "{}", v.name()),
DurationType::Millisecond(v) => write!(f, "{}", v.name()),
DurationType::Microsecond(v) => write!(f, "{}", v.name()),
DurationType::Nanosecond(v) => write!(f, "{}", v.name()),
},
ConcreteDataType::Decimal128(v) => write!(f, "{}", v.name()),
ConcreteDataType::List(v) => write!(f, "{}", v.name()),
ConcreteDataType::Dictionary(v) => write!(f, "{}", v.name()),
} }
} }
} }
@@ -492,7 +509,7 @@ impl ConcreteDataType {
#[enum_dispatch::enum_dispatch] #[enum_dispatch::enum_dispatch]
pub trait DataType: std::fmt::Debug + Send + Sync { pub trait DataType: std::fmt::Debug + Send + Sync {
/// Name of this data type. /// Name of this data type.
fn name(&self) -> &str; fn name(&self) -> String;
/// Returns id of the Logical data type. /// Returns id of the Logical data type.
fn logical_type_id(&self) -> LogicalTypeId; fn logical_type_id(&self) -> LogicalTypeId;
@@ -523,7 +540,7 @@ mod tests {
fn test_concrete_type_as_datatype_trait() { fn test_concrete_type_as_datatype_trait() {
let concrete_type = ConcreteDataType::boolean_datatype(); let concrete_type = ConcreteDataType::boolean_datatype();
assert_eq!("Boolean", concrete_type.name()); assert_eq!("Boolean", concrete_type.to_string());
assert_eq!(Value::Boolean(false), concrete_type.default_value()); assert_eq!(Value::Boolean(false), concrete_type.default_value());
assert_eq!(LogicalTypeId::Boolean, concrete_type.logical_type_id()); assert_eq!(LogicalTypeId::Boolean, concrete_type.logical_type_id());
assert_eq!(ArrowDataType::Boolean, concrete_type.as_arrow_type()); assert_eq!(ArrowDataType::Boolean, concrete_type.as_arrow_type());
@@ -767,94 +784,68 @@ mod tests {
#[test] #[test]
fn test_display_concrete_data_type() { fn test_display_concrete_data_type() {
assert_eq!(ConcreteDataType::null_datatype().to_string(), "Null");
assert_eq!(ConcreteDataType::boolean_datatype().to_string(), "Boolean");
assert_eq!(ConcreteDataType::binary_datatype().to_string(), "Binary");
assert_eq!(ConcreteDataType::int8_datatype().to_string(), "Int8");
assert_eq!(ConcreteDataType::int16_datatype().to_string(), "Int16");
assert_eq!(ConcreteDataType::int32_datatype().to_string(), "Int32");
assert_eq!(ConcreteDataType::int64_datatype().to_string(), "Int64");
assert_eq!(ConcreteDataType::uint8_datatype().to_string(), "UInt8");
assert_eq!(ConcreteDataType::uint16_datatype().to_string(), "UInt16");
assert_eq!(ConcreteDataType::uint32_datatype().to_string(), "UInt32");
assert_eq!(ConcreteDataType::uint64_datatype().to_string(), "UInt64");
assert_eq!(ConcreteDataType::float32_datatype().to_string(), "Float32");
assert_eq!(ConcreteDataType::float64_datatype().to_string(), "Float64");
assert_eq!(ConcreteDataType::string_datatype().to_string(), "String");
assert_eq!(ConcreteDataType::date_datatype().to_string(), "Date");
assert_eq!( assert_eq!(
ConcreteDataType::from_arrow_type(&ArrowDataType::Null).to_string(), ConcreteDataType::timestamp_millisecond_datatype().to_string(),
"Null" "TimestampMillisecond"
); );
assert_eq!( assert_eq!(
ConcreteDataType::from_arrow_type(&ArrowDataType::Boolean).to_string(), ConcreteDataType::time_millisecond_datatype().to_string(),
"Boolean" "TimeMillisecond"
); );
assert_eq!( assert_eq!(
ConcreteDataType::from_arrow_type(&ArrowDataType::Binary).to_string(), ConcreteDataType::interval_month_day_nano_datatype().to_string(),
"Binary" "IntervalMonthDayNano"
);
assert_eq!(
ConcreteDataType::from_arrow_type(&ArrowDataType::LargeBinary).to_string(),
"Binary"
);
assert_eq!(
ConcreteDataType::from_arrow_type(&ArrowDataType::Int8).to_string(),
"Int8"
);
assert_eq!(
ConcreteDataType::from_arrow_type(&ArrowDataType::Int16).to_string(),
"Int16"
);
assert_eq!(
ConcreteDataType::from_arrow_type(&ArrowDataType::Int32).to_string(),
"Int32"
);
assert_eq!(
ConcreteDataType::from_arrow_type(&ArrowDataType::Int64).to_string(),
"Int64"
);
assert_eq!(
ConcreteDataType::from_arrow_type(&ArrowDataType::UInt8).to_string(),
"UInt8"
);
assert_eq!(
ConcreteDataType::from_arrow_type(&ArrowDataType::UInt16).to_string(),
"UInt16"
);
assert_eq!(
ConcreteDataType::from_arrow_type(&ArrowDataType::UInt32).to_string(),
"UInt32"
);
assert_eq!(
ConcreteDataType::from_arrow_type(&ArrowDataType::UInt64).to_string(),
"UInt64"
);
assert_eq!(
ConcreteDataType::from_arrow_type(&ArrowDataType::Float32).to_string(),
"Float32"
);
assert_eq!(
ConcreteDataType::from_arrow_type(&ArrowDataType::Float64).to_string(),
"Float64"
);
assert_eq!(
ConcreteDataType::from_arrow_type(&ArrowDataType::Utf8).to_string(),
"String"
);
assert_eq!(
ConcreteDataType::from_arrow_type(&ArrowDataType::List(Arc::new(Field::new(
"item",
ArrowDataType::Int32,
true,
))))
.to_string(),
"List"
);
assert_eq!(
ConcreteDataType::from_arrow_type(&ArrowDataType::Date32).to_string(),
"Date"
);
assert_eq!(ConcreteDataType::time_second_datatype().to_string(), "Time");
assert_eq!(
ConcreteDataType::from_arrow_type(&ArrowDataType::Interval(
arrow_schema::IntervalUnit::MonthDayNano,
))
.to_string(),
"Interval"
); );
assert_eq!( assert_eq!(
ConcreteDataType::duration_second_datatype().to_string(), ConcreteDataType::duration_second_datatype().to_string(),
"Duration" "DurationSecond"
); );
assert_eq!( assert_eq!(
ConcreteDataType::decimal128_datatype(10, 2).to_string(), ConcreteDataType::decimal128_datatype(10, 2).to_string(),
"Decimal128(10,2)" "Decimal(10, 2)"
);
// Nested types
assert_eq!(
ConcreteDataType::list_datatype(ConcreteDataType::int32_datatype()).to_string(),
"List<Int32>"
);
assert_eq!(
ConcreteDataType::list_datatype(ConcreteDataType::Dictionary(DictionaryType::new(
ConcreteDataType::int32_datatype(),
ConcreteDataType::string_datatype()
)))
.to_string(),
"List<Dictionary<Int32, String>>"
);
assert_eq!(
ConcreteDataType::list_datatype(ConcreteDataType::list_datatype(
ConcreteDataType::list_datatype(ConcreteDataType::int32_datatype())
))
.to_string(),
"List<List<List<Int32>>>"
);
assert_eq!(
ConcreteDataType::dictionary_datatype(
ConcreteDataType::int32_datatype(),
ConcreteDataType::string_datatype()
)
.to_string(),
"Dictionary<Int32, String>"
); );
} }
} }

View File

@@ -34,8 +34,8 @@ impl BinaryType {
} }
impl DataType for BinaryType { impl DataType for BinaryType {
fn name(&self) -> &str { fn name(&self) -> String {
"Binary" "Binary".to_string()
} }
fn logical_type_id(&self) -> LogicalTypeId { fn logical_type_id(&self) -> LogicalTypeId {

View File

@@ -34,8 +34,8 @@ impl BooleanType {
} }
impl DataType for BooleanType { impl DataType for BooleanType {
fn name(&self) -> &str { fn name(&self) -> String {
"Boolean" "Boolean".to_string()
} }
fn logical_type_id(&self) -> LogicalTypeId { fn logical_type_id(&self) -> LogicalTypeId {

View File

@@ -248,7 +248,7 @@ mod tests {
assert!(res.is_err()); assert!(res.is_err());
assert_eq!( assert_eq!(
res.unwrap_err().to_string(), res.unwrap_err().to_string(),
"Type Timestamp with value 1970-01-01 08:00:10+0800 can't be cast to the destination type Int8" "Type TimestampSecond with value 1970-01-01 08:00:10+0800 can't be cast to the destination type Int8"
); );
} }

View File

@@ -32,8 +32,8 @@ use crate::vectors::{DateVector, DateVectorBuilder, MutableVector, Vector};
pub struct DateType; pub struct DateType;
impl DataType for DateType { impl DataType for DateType {
fn name(&self) -> &str { fn name(&self) -> String {
"Date" "Date".to_string()
} }
fn logical_type_id(&self) -> LogicalTypeId { fn logical_type_id(&self) -> LogicalTypeId {

View File

@@ -30,8 +30,8 @@ use crate::vectors::{DateTimeVector, DateTimeVectorBuilder, PrimitiveVector};
pub struct DateTimeType; pub struct DateTimeType;
impl DataType for DateTimeType { impl DataType for DateTimeType {
fn name(&self) -> &str { fn name(&self) -> String {
"DateTime" "DateTime".to_string()
} }
fn logical_type_id(&self) -> LogicalTypeId { fn logical_type_id(&self) -> LogicalTypeId {

View File

@@ -56,9 +56,8 @@ impl Decimal128Type {
} }
impl DataType for Decimal128Type { impl DataType for Decimal128Type {
fn name(&self) -> &str { fn name(&self) -> String {
// TODO(QuenKar): support precision and scale information in name format!("Decimal({}, {})", self.precision, self.scale)
"decimal"
} }
fn logical_type_id(&self) -> LogicalTypeId { fn logical_type_id(&self) -> LogicalTypeId {

View File

@@ -62,8 +62,12 @@ impl DictionaryType {
} }
impl DataType for DictionaryType { impl DataType for DictionaryType {
fn name(&self) -> &str { fn name(&self) -> String {
"Dictionary" format!(
"Dictionary<{}, {}>",
self.key_type.name(),
self.value_type.name()
)
} }
fn logical_type_id(&self) -> LogicalTypeId { fn logical_type_id(&self) -> LogicalTypeId {

View File

@@ -78,8 +78,8 @@ macro_rules! impl_data_type_for_duration {
pub struct [<Duration $unit Type>]; pub struct [<Duration $unit Type>];
impl DataType for [<Duration $unit Type>] { impl DataType for [<Duration $unit Type>] {
fn name(&self) -> &str { fn name(&self) -> String {
stringify!([<Duration $unit>]) stringify!([<Duration $unit>]).to_string()
} }
fn logical_type_id(&self) -> LogicalTypeId { fn logical_type_id(&self) -> LogicalTypeId {

View File

@@ -66,8 +66,8 @@ macro_rules! impl_data_type_for_interval {
pub struct [<Interval $unit Type>]; pub struct [<Interval $unit Type>];
impl DataType for [<Interval $unit Type>] { impl DataType for [<Interval $unit Type>] {
fn name(&self) -> &str { fn name(&self) -> String {
stringify!([<Interval $unit>]) stringify!([<Interval $unit>]).to_string()
} }
fn logical_type_id(&self) -> LogicalTypeId { fn logical_type_id(&self) -> LogicalTypeId {

View File

@@ -52,8 +52,8 @@ impl ListType {
} }
impl DataType for ListType { impl DataType for ListType {
fn name(&self) -> &str { fn name(&self) -> String {
"List" format!("List<{}>", self.item_type.name())
} }
fn logical_type_id(&self) -> LogicalTypeId { fn logical_type_id(&self) -> LogicalTypeId {
@@ -92,7 +92,7 @@ mod tests {
#[test] #[test]
fn test_list_type() { fn test_list_type() {
let t = ListType::new(ConcreteDataType::boolean_datatype()); let t = ListType::new(ConcreteDataType::boolean_datatype());
assert_eq!("List", t.name()); assert_eq!("List<Boolean>", t.name());
assert_eq!(LogicalTypeId::List, t.logical_type_id()); assert_eq!(LogicalTypeId::List, t.logical_type_id());
assert_eq!( assert_eq!(
Value::List(ListValue::new(None, ConcreteDataType::boolean_datatype())), Value::List(ListValue::new(None, ConcreteDataType::boolean_datatype())),

View File

@@ -32,8 +32,8 @@ impl NullType {
} }
impl DataType for NullType { impl DataType for NullType {
fn name(&self) -> &str { fn name(&self) -> String {
"Null" "Null".to_string()
} }
fn logical_type_id(&self) -> LogicalTypeId { fn logical_type_id(&self) -> LogicalTypeId {

View File

@@ -251,8 +251,8 @@ macro_rules! define_non_timestamp_primitive {
define_logical_primitive_type!($Native, $TypeId, $DataType, $Largest); define_logical_primitive_type!($Native, $TypeId, $DataType, $Largest);
impl DataType for $DataType { impl DataType for $DataType {
fn name(&self) -> &str { fn name(&self) -> String {
stringify!($TypeId) stringify!($TypeId).to_string()
} }
fn logical_type_id(&self) -> LogicalTypeId { fn logical_type_id(&self) -> LogicalTypeId {
@@ -350,8 +350,8 @@ define_logical_primitive_type!(i64, Int64, Int64Type, Int64Type);
define_logical_primitive_type!(i32, Int32, Int32Type, Int64Type); define_logical_primitive_type!(i32, Int32, Int32Type, Int64Type);
impl DataType for Int64Type { impl DataType for Int64Type {
fn name(&self) -> &str { fn name(&self) -> String {
"Int64" "Int64".to_string()
} }
fn logical_type_id(&self) -> LogicalTypeId { fn logical_type_id(&self) -> LogicalTypeId {
@@ -397,8 +397,8 @@ impl DataType for Int64Type {
} }
impl DataType for Int32Type { impl DataType for Int32Type {
fn name(&self) -> &str { fn name(&self) -> String {
"Int32" "Int32".to_string()
} }
fn logical_type_id(&self) -> LogicalTypeId { fn logical_type_id(&self) -> LogicalTypeId {

View File

@@ -34,8 +34,8 @@ impl StringType {
} }
impl DataType for StringType { impl DataType for StringType {
fn name(&self) -> &str { fn name(&self) -> String {
"String" "String".to_string()
} }
fn logical_type_id(&self) -> LogicalTypeId { fn logical_type_id(&self) -> LogicalTypeId {

View File

@@ -92,8 +92,8 @@ macro_rules! impl_data_type_for_time {
pub struct [<Time $unit Type>]; pub struct [<Time $unit Type>];
impl DataType for [<Time $unit Type>] { impl DataType for [<Time $unit Type>] {
fn name(&self) -> &str { fn name(&self) -> String {
stringify!([<Time $unit>]) stringify!([<Time $unit>]).to_string()
} }
fn logical_type_id(&self) -> LogicalTypeId { fn logical_type_id(&self) -> LogicalTypeId {

View File

@@ -109,8 +109,8 @@ macro_rules! impl_data_type_for_timestamp {
pub struct [<Timestamp $unit Type>]; pub struct [<Timestamp $unit Type>];
impl DataType for [<Timestamp $unit Type>] { impl DataType for [<Timestamp $unit Type>] {
fn name(&self) -> &str { fn name(&self) -> String {
stringify!([<Timestamp $unit>]) stringify!([<Timestamp $unit>]).to_string()
} }
fn logical_type_id(&self) -> LogicalTypeId { fn logical_type_id(&self) -> LogicalTypeId {

View File

@@ -229,17 +229,16 @@ macro_rules! impl_try_from_arrow_array_for_vector {
) -> crate::error::Result<$Vector> { ) -> crate::error::Result<$Vector> {
use snafu::OptionExt; use snafu::OptionExt;
let data = array let arrow_array = array
.as_ref() .as_ref()
.as_any() .as_any()
.downcast_ref::<$Array>() .downcast_ref::<$Array>()
.with_context(|| crate::error::ConversionSnafu { .with_context(|| crate::error::ConversionSnafu {
from: std::format!("{:?}", array.as_ref().data_type()), from: std::format!("{:?}", array.as_ref().data_type()),
})? })?
.to_data(); .clone();
let concrete_array = $Array::from(data); Ok($Vector::from(arrow_array))
Ok($Vector::from(concrete_array))
} }
} }
}; };

View File

@@ -15,7 +15,7 @@
use std::any::Any; use std::any::Any;
use std::sync::Arc; use std::sync::Arc;
use arrow::array::{Array, ArrayBuilder, ArrayData, ArrayIter, ArrayRef}; use arrow::array::{Array, ArrayBuilder, ArrayIter, ArrayRef};
use snafu::ResultExt; use snafu::ResultExt;
use crate::arrow_array::{BinaryArray, MutableBinaryArray}; use crate::arrow_array::{BinaryArray, MutableBinaryArray};
@@ -36,10 +36,6 @@ impl BinaryVector {
pub(crate) fn as_arrow(&self) -> &dyn Array { pub(crate) fn as_arrow(&self) -> &dyn Array {
&self.array &self.array
} }
fn to_array_data(&self) -> ArrayData {
self.array.to_data()
}
} }
impl From<BinaryArray> for BinaryVector { impl From<BinaryArray> for BinaryVector {
@@ -74,13 +70,11 @@ impl Vector for BinaryVector {
} }
fn to_arrow_array(&self) -> ArrayRef { fn to_arrow_array(&self) -> ArrayRef {
let data = self.to_array_data(); Arc::new(self.array.clone())
Arc::new(BinaryArray::from(data))
} }
fn to_boxed_arrow_array(&self) -> Box<dyn Array> { fn to_boxed_arrow_array(&self) -> Box<dyn Array> {
let data = self.to_array_data(); Box::new(self.array.clone())
Box::new(BinaryArray::from(data))
} }
fn validity(&self) -> Validity { fn validity(&self) -> Validity {

View File

@@ -16,9 +16,7 @@ use std::any::Any;
use std::borrow::Borrow; use std::borrow::Borrow;
use std::sync::Arc; use std::sync::Arc;
use arrow::array::{ use arrow::array::{Array, ArrayBuilder, ArrayIter, ArrayRef, BooleanArray, BooleanBuilder};
Array, ArrayBuilder, ArrayData, ArrayIter, ArrayRef, BooleanArray, BooleanBuilder,
};
use snafu::ResultExt; use snafu::ResultExt;
use crate::data_type::ConcreteDataType; use crate::data_type::ConcreteDataType;
@@ -44,16 +42,6 @@ impl BooleanVector {
&self.array &self.array
} }
fn to_array_data(&self) -> ArrayData {
self.array.to_data()
}
fn from_array_data(data: ArrayData) -> BooleanVector {
BooleanVector {
array: BooleanArray::from(data),
}
}
pub(crate) fn false_count(&self) -> usize { pub(crate) fn false_count(&self) -> usize {
self.array.false_count() self.array.false_count()
} }
@@ -107,13 +95,11 @@ impl Vector for BooleanVector {
} }
fn to_arrow_array(&self) -> ArrayRef { fn to_arrow_array(&self) -> ArrayRef {
let data = self.to_array_data(); Arc::new(self.array.clone())
Arc::new(BooleanArray::from(data))
} }
fn to_boxed_arrow_array(&self) -> Box<dyn Array> { fn to_boxed_arrow_array(&self) -> Box<dyn Array> {
let data = self.to_array_data(); Box::new(self.array.clone())
Box::new(BooleanArray::from(data))
} }
fn validity(&self) -> Validity { fn validity(&self) -> Validity {
@@ -133,8 +119,7 @@ impl Vector for BooleanVector {
} }
fn slice(&self, offset: usize, length: usize) -> VectorRef { fn slice(&self, offset: usize, length: usize) -> VectorRef {
let data = self.array.to_data().slice(offset, length); Arc::new(Self::from(self.array.slice(offset, length)))
Arc::new(Self::from_array_data(data))
} }
fn get(&self, index: usize) -> Value { fn get(&self, index: usize) -> Value {

View File

@@ -196,8 +196,7 @@ impl Vector for Decimal128Vector {
} }
fn slice(&self, offset: usize, length: usize) -> VectorRef { fn slice(&self, offset: usize, length: usize) -> VectorRef {
let array = self.array.slice(offset, length); Arc::new(self.get_slice(offset, length))
Arc::new(Self { array })
} }
fn get(&self, index: usize) -> Value { fn get(&self, index: usize) -> Value {
@@ -535,23 +534,23 @@ pub mod tests {
// because 100 is out of Decimal(3, 1) range, so it will be null // because 100 is out of Decimal(3, 1) range, so it will be null
assert!(array.is_null(4)); assert!(array.is_null(4));
} }
}
#[test] #[test]
fn test_decimal28_vector_iter_data() { fn test_decimal28_vector_iter_data() {
let vector = Decimal128Vector::from_values(vec![1, 2, 3, 4]) let vector = Decimal128Vector::from_values(vec![1, 2, 3, 4])
.with_precision_and_scale(3, 1) .with_precision_and_scale(3, 1)
.unwrap(); .unwrap();
let mut iter = vector.iter_data(); let mut iter = vector.iter_data();
assert_eq!(iter.next(), Some(Some(Decimal128::new(1, 3, 1)))); assert_eq!(iter.next(), Some(Some(Decimal128::new(1, 3, 1))));
assert_eq!(iter.next(), Some(Some(Decimal128::new(2, 3, 1)))); assert_eq!(iter.next(), Some(Some(Decimal128::new(2, 3, 1))));
assert_eq!(iter.next(), Some(Some(Decimal128::new(3, 3, 1)))); assert_eq!(iter.next(), Some(Some(Decimal128::new(3, 3, 1))));
assert_eq!(iter.next(), Some(Some(Decimal128::new(4, 3, 1)))); assert_eq!(iter.next(), Some(Some(Decimal128::new(4, 3, 1))));
assert_eq!(iter.next(), None); assert_eq!(iter.next(), None);
let values = vector let values = vector
.iter_data() .iter_data()
.filter_map(|v| v.map(|x| x.val() * 2)) .filter_map(|v| v.map(|x| x.val() * 2))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
assert_eq!(values, vec![2, 4, 6, 8]); assert_eq!(values, vec![2, 4, 6, 8]);
}
} }

View File

@@ -284,23 +284,21 @@ impl Helper {
ArrowDataType::Date64 => Arc::new(DateTimeVector::try_from_arrow_array(array)?), ArrowDataType::Date64 => Arc::new(DateTimeVector::try_from_arrow_array(array)?),
ArrowDataType::List(_) => Arc::new(ListVector::try_from_arrow_array(array)?), ArrowDataType::List(_) => Arc::new(ListVector::try_from_arrow_array(array)?),
ArrowDataType::Timestamp(unit, _) => match unit { ArrowDataType::Timestamp(unit, _) => match unit {
TimeUnit::Second => Arc::new( TimeUnit::Second => Arc::new(TimestampSecondVector::try_from_arrow_array(array)?),
TimestampSecondVector::try_from_arrow_timestamp_array(array)?, TimeUnit::Millisecond => {
), Arc::new(TimestampMillisecondVector::try_from_arrow_array(array)?)
TimeUnit::Millisecond => Arc::new( }
TimestampMillisecondVector::try_from_arrow_timestamp_array(array)?, TimeUnit::Microsecond => {
), Arc::new(TimestampMicrosecondVector::try_from_arrow_array(array)?)
TimeUnit::Microsecond => Arc::new( }
TimestampMicrosecondVector::try_from_arrow_timestamp_array(array)?, TimeUnit::Nanosecond => {
), Arc::new(TimestampNanosecondVector::try_from_arrow_array(array)?)
TimeUnit::Nanosecond => Arc::new( }
TimestampNanosecondVector::try_from_arrow_timestamp_array(array)?,
),
}, },
ArrowDataType::Time32(unit) => match unit { ArrowDataType::Time32(unit) => match unit {
TimeUnit::Second => Arc::new(TimeSecondVector::try_from_arrow_time_array(array)?), TimeUnit::Second => Arc::new(TimeSecondVector::try_from_arrow_array(array)?),
TimeUnit::Millisecond => { TimeUnit::Millisecond => {
Arc::new(TimeMillisecondVector::try_from_arrow_time_array(array)?) Arc::new(TimeMillisecondVector::try_from_arrow_array(array)?)
} }
// Arrow use time32 for second/millisecond. // Arrow use time32 for second/millisecond.
_ => unreachable!( _ => unreachable!(
@@ -310,10 +308,10 @@ impl Helper {
}, },
ArrowDataType::Time64(unit) => match unit { ArrowDataType::Time64(unit) => match unit {
TimeUnit::Microsecond => { TimeUnit::Microsecond => {
Arc::new(TimeMicrosecondVector::try_from_arrow_time_array(array)?) Arc::new(TimeMicrosecondVector::try_from_arrow_array(array)?)
} }
TimeUnit::Nanosecond => { TimeUnit::Nanosecond => {
Arc::new(TimeNanosecondVector::try_from_arrow_time_array(array)?) Arc::new(TimeNanosecondVector::try_from_arrow_array(array)?)
} }
// Arrow use time64 for microsecond/nanosecond. // Arrow use time64 for microsecond/nanosecond.
_ => unreachable!( _ => unreachable!(
@@ -322,29 +320,27 @@ impl Helper {
), ),
}, },
ArrowDataType::Interval(unit) => match unit { ArrowDataType::Interval(unit) => match unit {
IntervalUnit::YearMonth => Arc::new( IntervalUnit::YearMonth => {
IntervalYearMonthVector::try_from_arrow_interval_array(array)?, Arc::new(IntervalYearMonthVector::try_from_arrow_array(array)?)
), }
IntervalUnit::DayTime => { IntervalUnit::DayTime => {
Arc::new(IntervalDayTimeVector::try_from_arrow_interval_array(array)?) Arc::new(IntervalDayTimeVector::try_from_arrow_array(array)?)
}
IntervalUnit::MonthDayNano => {
Arc::new(IntervalMonthDayNanoVector::try_from_arrow_array(array)?)
} }
IntervalUnit::MonthDayNano => Arc::new(
IntervalMonthDayNanoVector::try_from_arrow_interval_array(array)?,
),
}, },
ArrowDataType::Duration(unit) => match unit { ArrowDataType::Duration(unit) => match unit {
TimeUnit::Second => { TimeUnit::Second => Arc::new(DurationSecondVector::try_from_arrow_array(array)?),
Arc::new(DurationSecondVector::try_from_arrow_duration_array(array)?) TimeUnit::Millisecond => {
Arc::new(DurationMillisecondVector::try_from_arrow_array(array)?)
}
TimeUnit::Microsecond => {
Arc::new(DurationMicrosecondVector::try_from_arrow_array(array)?)
}
TimeUnit::Nanosecond => {
Arc::new(DurationNanosecondVector::try_from_arrow_array(array)?)
} }
TimeUnit::Millisecond => Arc::new(
DurationMillisecondVector::try_from_arrow_duration_array(array)?,
),
TimeUnit::Microsecond => Arc::new(
DurationMicrosecondVector::try_from_arrow_duration_array(array)?,
),
TimeUnit::Nanosecond => Arc::new(
DurationNanosecondVector::try_from_arrow_duration_array(array)?,
),
}, },
ArrowDataType::Decimal128(_, _) => { ArrowDataType::Decimal128(_, _) => {
Arc::new(Decimal128Vector::try_from_arrow_array(array)?) Arc::new(Decimal128Vector::try_from_arrow_array(array)?)

View File

@@ -46,17 +46,6 @@ impl ListVector {
.map(|value_opt| value_opt.map(Helper::try_into_vector).transpose()) .map(|value_opt| value_opt.map(Helper::try_into_vector).transpose())
} }
fn to_array_data(&self) -> ArrayData {
self.array.to_data()
}
fn from_array_data_and_type(data: ArrayData, item_type: ConcreteDataType) -> Self {
Self {
array: ListArray::from(data),
item_type,
}
}
pub(crate) fn as_arrow(&self) -> &dyn Array { pub(crate) fn as_arrow(&self) -> &dyn Array {
&self.array &self.array
} }
@@ -80,13 +69,11 @@ impl Vector for ListVector {
} }
fn to_arrow_array(&self) -> ArrayRef { fn to_arrow_array(&self) -> ArrayRef {
let data = self.to_array_data(); Arc::new(self.array.clone())
Arc::new(ListArray::from(data))
} }
fn to_boxed_arrow_array(&self) -> Box<dyn Array> { fn to_boxed_arrow_array(&self) -> Box<dyn Array> {
let data = self.to_array_data(); Box::new(self.array.clone())
Box::new(ListArray::from(data))
} }
fn validity(&self) -> Validity { fn validity(&self) -> Validity {
@@ -106,8 +93,10 @@ impl Vector for ListVector {
} }
fn slice(&self, offset: usize, length: usize) -> VectorRef { fn slice(&self, offset: usize, length: usize) -> VectorRef {
let data = self.array.to_data().slice(offset, length); Arc::new(Self {
Arc::new(Self::from_array_data_and_type(data, self.item_type.clone())) array: self.array.slice(offset, length),
item_type: self.item_type.clone(),
})
} }
fn get(&self, index: usize) -> Value { fn get(&self, index: usize) -> Value {

View File

@@ -16,23 +16,12 @@ use std::any::Any;
use std::fmt; use std::fmt;
use std::sync::Arc; use std::sync::Arc;
use arrow::array::{ use arrow::array::{Array, ArrayBuilder, ArrayIter, ArrayRef, PrimitiveArray, PrimitiveBuilder};
Array, ArrayBuilder, ArrayData, ArrayIter, ArrayRef, PrimitiveArray, PrimitiveBuilder,
Time32MillisecondArray as TimeMillisecondArray, Time32SecondArray as TimeSecondArray,
Time64MicrosecondArray as TimeMicrosecondArray, Time64NanosecondArray as TimeNanosecondArray,
TimestampMicrosecondArray, TimestampMillisecondArray, TimestampNanosecondArray,
TimestampSecondArray,
};
use arrow_array::{
DurationMicrosecondArray, DurationMillisecondArray, DurationNanosecondArray,
DurationSecondArray, IntervalDayTimeArray, IntervalMonthDayNanoArray, IntervalYearMonthArray,
};
use arrow_schema::DataType;
use serde_json::Value as JsonValue; use serde_json::Value as JsonValue;
use snafu::OptionExt; use snafu::OptionExt;
use crate::data_type::ConcreteDataType; use crate::data_type::ConcreteDataType;
use crate::error::{self, CastTypeSnafu, Result}; use crate::error::{self, Result};
use crate::scalars::{Scalar, ScalarRef, ScalarVector, ScalarVectorBuilder}; use crate::scalars::{Scalar, ScalarRef, ScalarVector, ScalarVectorBuilder};
use crate::serialize::Serializable; use crate::serialize::Serializable;
use crate::types::{ use crate::types::{
@@ -66,178 +55,15 @@ impl<T: LogicalPrimitiveType> PrimitiveVector<T> {
} }
pub fn try_from_arrow_array(array: impl AsRef<dyn Array>) -> Result<Self> { pub fn try_from_arrow_array(array: impl AsRef<dyn Array>) -> Result<Self> {
let data = array let arrow_array = array
.as_ref() .as_ref()
.as_any() .as_any()
.downcast_ref::<PrimitiveArray<T::ArrowPrimitive>>() .downcast_ref::<PrimitiveArray<T::ArrowPrimitive>>()
.with_context(|| error::ConversionSnafu { .with_context(|| error::ConversionSnafu {
from: format!("{:?}", array.as_ref().data_type()), from: format!("{:?}", array.as_ref().data_type()),
})? })?;
.to_data();
let concrete_array = PrimitiveArray::<T::ArrowPrimitive>::from(data);
Ok(Self::new(concrete_array))
}
/// Converts arrow timestamp array to vectors, ignoring time zone info. Ok(Self::new(arrow_array.clone()))
pub fn try_from_arrow_timestamp_array(array: impl AsRef<dyn Array>) -> Result<Self> {
let array = array.as_ref();
let array_data = match array.data_type() {
DataType::Timestamp(unit, _) => match unit {
arrow_schema::TimeUnit::Second => array
.as_any()
.downcast_ref::<TimestampSecondArray>()
.unwrap()
.clone()
.with_timezone_opt(None::<String>)
.to_data(),
arrow_schema::TimeUnit::Millisecond => array
.as_any()
.downcast_ref::<TimestampMillisecondArray>()
.unwrap()
.clone()
.with_timezone_opt(None::<String>)
.to_data(),
arrow_schema::TimeUnit::Microsecond => array
.as_any()
.downcast_ref::<TimestampMicrosecondArray>()
.unwrap()
.clone()
.with_timezone_opt(None::<String>)
.to_data(),
arrow_schema::TimeUnit::Nanosecond => array
.as_any()
.downcast_ref::<TimestampNanosecondArray>()
.unwrap()
.clone()
.with_timezone_opt(None::<String>)
.to_data(),
},
arrow_type => {
return CastTypeSnafu {
msg: format!(
"Failed to cast arrow array {:?} to timestamp vector",
arrow_type,
),
}
.fail()?;
}
};
let concrete_array = PrimitiveArray::<T::ArrowPrimitive>::from(array_data);
Ok(Self::new(concrete_array))
}
/// Converts arrow time array to vectors
pub fn try_from_arrow_time_array(array: impl AsRef<dyn Array>) -> Result<Self> {
let array = array.as_ref();
let array_data = match array.data_type() {
DataType::Time32(unit) => match unit {
arrow_schema::TimeUnit::Second => array
.as_any()
.downcast_ref::<TimeSecondArray>()
.unwrap()
.to_data(),
arrow_schema::TimeUnit::Millisecond => array
.as_any()
.downcast_ref::<TimeMillisecondArray>()
.unwrap()
.to_data(),
_ => unreachable!(),
},
DataType::Time64(unit) => match unit {
arrow_schema::TimeUnit::Microsecond => array
.as_any()
.downcast_ref::<TimeMicrosecondArray>()
.unwrap()
.to_data(),
arrow_schema::TimeUnit::Nanosecond => array
.as_any()
.downcast_ref::<TimeNanosecondArray>()
.unwrap()
.to_data(),
_ => unreachable!(),
},
arrow_type => {
return CastTypeSnafu {
msg: format!("Failed to cast arrow array {:?} to time vector", arrow_type,),
}
.fail()?;
}
};
let concrete_array = PrimitiveArray::<T::ArrowPrimitive>::from(array_data);
Ok(Self::new(concrete_array))
}
pub fn try_from_arrow_interval_array(array: impl AsRef<dyn Array>) -> Result<Self> {
let array = array.as_ref();
let array_data = match array.data_type() {
DataType::Interval(unit) => match unit {
arrow_schema::IntervalUnit::YearMonth => array
.as_any()
.downcast_ref::<IntervalYearMonthArray>()
.unwrap()
.to_data(),
arrow_schema::IntervalUnit::DayTime => array
.as_any()
.downcast_ref::<IntervalDayTimeArray>()
.unwrap()
.to_data(),
arrow_schema::IntervalUnit::MonthDayNano => array
.as_any()
.downcast_ref::<IntervalMonthDayNanoArray>()
.unwrap()
.to_data(),
},
arrow_type => {
return CastTypeSnafu {
msg: format!(
"Failed to cast arrow array {:?} to interval vector",
arrow_type,
),
}
.fail()?;
}
};
let concrete_array = PrimitiveArray::<T::ArrowPrimitive>::from(array_data);
Ok(Self::new(concrete_array))
}
pub fn try_from_arrow_duration_array(array: impl AsRef<dyn Array>) -> Result<Self> {
let array = array.as_ref();
let array_data = match array.data_type() {
DataType::Duration(unit) => match unit {
arrow_schema::TimeUnit::Second => array
.as_any()
.downcast_ref::<DurationSecondArray>()
.unwrap()
.to_data(),
arrow_schema::TimeUnit::Millisecond => array
.as_any()
.downcast_ref::<DurationMillisecondArray>()
.unwrap()
.to_data(),
arrow_schema::TimeUnit::Microsecond => array
.as_any()
.downcast_ref::<DurationMicrosecondArray>()
.unwrap()
.to_data(),
arrow_schema::TimeUnit::Nanosecond => array
.as_any()
.downcast_ref::<DurationNanosecondArray>()
.unwrap()
.to_data(),
},
arrow_type => {
return CastTypeSnafu {
msg: format!(
"Failed to cast arrow array {:?} to interval vector",
arrow_type,
),
}
.fail()?;
}
};
let concrete_array = PrimitiveArray::<T::ArrowPrimitive>::from(array_data);
Ok(Self::new(concrete_array))
} }
pub fn from_slice<P: AsRef<[T::Native]>>(slice: P) -> Self { pub fn from_slice<P: AsRef<[T::Native]>>(slice: P) -> Self {
@@ -277,24 +103,15 @@ impl<T: LogicalPrimitiveType> PrimitiveVector<T> {
&self.array &self.array
} }
fn to_array_data(&self) -> ArrayData {
self.array.to_data()
}
fn from_array_data(data: ArrayData) -> Self {
Self {
array: PrimitiveArray::from(data),
}
}
// To distinguish with `Vector::slice()`. // To distinguish with `Vector::slice()`.
/// Slice the vector, returning a new vector. /// Slice the vector, returning a new vector.
/// ///
/// # Panics /// # Panics
/// This function panics if `offset + length > self.len()`. /// This function panics if `offset + length > self.len()`.
pub fn get_slice(&self, offset: usize, length: usize) -> Self { pub fn get_slice(&self, offset: usize, length: usize) -> Self {
let data = self.array.to_data().slice(offset, length); Self {
Self::from_array_data(data) array: self.array.slice(offset, length),
}
} }
} }
@@ -316,13 +133,11 @@ impl<T: LogicalPrimitiveType> Vector for PrimitiveVector<T> {
} }
fn to_arrow_array(&self) -> ArrayRef { fn to_arrow_array(&self) -> ArrayRef {
let data = self.to_array_data(); Arc::new(self.array.clone())
Arc::new(PrimitiveArray::<T::ArrowPrimitive>::from(data))
} }
fn to_boxed_arrow_array(&self) -> Box<dyn Array> { fn to_boxed_arrow_array(&self) -> Box<dyn Array> {
let data = self.to_array_data(); Box::new(self.array.clone())
Box::new(PrimitiveArray::<T::ArrowPrimitive>::from(data))
} }
fn validity(&self) -> Validity { fn validity(&self) -> Validity {
@@ -580,7 +395,12 @@ mod tests {
Time64NanosecondArray, Time64NanosecondArray,
}; };
use arrow::datatypes::DataType as ArrowDataType; use arrow::datatypes::DataType as ArrowDataType;
use arrow_array::{DurationSecondArray, IntervalDayTimeArray, IntervalYearMonthArray}; use arrow_array::{
DurationMicrosecondArray, DurationMillisecondArray, DurationNanosecondArray,
DurationSecondArray, IntervalDayTimeArray, IntervalYearMonthArray,
TimestampMicrosecondArray, TimestampMillisecondArray, TimestampNanosecondArray,
TimestampSecondArray,
};
use serde_json; use serde_json;
use super::*; use super::*;
@@ -703,6 +523,14 @@ mod tests {
assert_eq!(128, v.memory_size()); assert_eq!(128, v.memory_size());
} }
#[test]
fn test_get_slice() {
let v = Int32Vector::from_slice(vec![1, 2, 3, 4, 5]);
let slice = v.get_slice(1, 3);
assert_eq!(v, Int32Vector::from_slice(vec![1, 2, 3, 4, 5]));
assert_eq!(slice, Int32Vector::from_slice(vec![2, 3, 4]));
}
#[test] #[test]
fn test_primitive_vector_builder() { fn test_primitive_vector_builder() {
let mut builder = Int64Type::default().create_mutable_vector(3); let mut builder = Int64Type::default().create_mutable_vector(3);
@@ -748,48 +576,48 @@ mod tests {
#[test] #[test]
fn test_try_from_arrow_time_array() { fn test_try_from_arrow_time_array() {
let array: ArrayRef = Arc::new(Time32SecondArray::from(vec![1i32, 2, 3])); let array: ArrayRef = Arc::new(Time32SecondArray::from(vec![1i32, 2, 3]));
let vector = TimeSecondVector::try_from_arrow_time_array(array).unwrap(); let vector = TimeSecondVector::try_from_arrow_array(array).unwrap();
assert_eq!(TimeSecondVector::from_values(vec![1, 2, 3]), vector); assert_eq!(TimeSecondVector::from_values(vec![1, 2, 3]), vector);
let array: ArrayRef = Arc::new(Time32MillisecondArray::from(vec![1i32, 2, 3])); let array: ArrayRef = Arc::new(Time32MillisecondArray::from(vec![1i32, 2, 3]));
let vector = TimeMillisecondVector::try_from_arrow_time_array(array).unwrap(); let vector = TimeMillisecondVector::try_from_arrow_array(array).unwrap();
assert_eq!(TimeMillisecondVector::from_values(vec![1, 2, 3]), vector); assert_eq!(TimeMillisecondVector::from_values(vec![1, 2, 3]), vector);
let array: ArrayRef = Arc::new(Time64MicrosecondArray::from(vec![1i64, 2, 3])); let array: ArrayRef = Arc::new(Time64MicrosecondArray::from(vec![1i64, 2, 3]));
let vector = TimeMicrosecondVector::try_from_arrow_time_array(array).unwrap(); let vector = TimeMicrosecondVector::try_from_arrow_array(array).unwrap();
assert_eq!(TimeMicrosecondVector::from_values(vec![1, 2, 3]), vector); assert_eq!(TimeMicrosecondVector::from_values(vec![1, 2, 3]), vector);
let array: ArrayRef = Arc::new(Time64NanosecondArray::from(vec![1i64, 2, 3])); let array: ArrayRef = Arc::new(Time64NanosecondArray::from(vec![1i64, 2, 3]));
let vector = TimeNanosecondVector::try_from_arrow_time_array(array).unwrap(); let vector = TimeNanosecondVector::try_from_arrow_array(array).unwrap();
assert_eq!(TimeNanosecondVector::from_values(vec![1, 2, 3]), vector); assert_eq!(TimeNanosecondVector::from_values(vec![1, 2, 3]), vector);
// Test convert error // Test convert error
let array: ArrayRef = Arc::new(Int32Array::from(vec![1i32, 2, 3])); let array: ArrayRef = Arc::new(Int32Array::from(vec![1i32, 2, 3]));
assert!(TimeSecondVector::try_from_arrow_time_array(array).is_err()); assert!(TimeSecondVector::try_from_arrow_array(array).is_err());
} }
#[test] #[test]
fn test_try_from_arrow_timestamp_array() { fn test_try_from_arrow_timestamp_array() {
let array: ArrayRef = Arc::new(TimestampSecondArray::from(vec![1i64, 2, 3])); let array: ArrayRef = Arc::new(TimestampSecondArray::from(vec![1i64, 2, 3]));
let vector = TimestampSecondVector::try_from_arrow_timestamp_array(array).unwrap(); let vector = TimestampSecondVector::try_from_arrow_array(array).unwrap();
assert_eq!(TimestampSecondVector::from_values(vec![1, 2, 3]), vector); assert_eq!(TimestampSecondVector::from_values(vec![1, 2, 3]), vector);
let array: ArrayRef = Arc::new(TimestampMillisecondArray::from(vec![1i64, 2, 3])); let array: ArrayRef = Arc::new(TimestampMillisecondArray::from(vec![1i64, 2, 3]));
let vector = TimestampMillisecondVector::try_from_arrow_timestamp_array(array).unwrap(); let vector = TimestampMillisecondVector::try_from_arrow_array(array).unwrap();
assert_eq!( assert_eq!(
TimestampMillisecondVector::from_values(vec![1, 2, 3]), TimestampMillisecondVector::from_values(vec![1, 2, 3]),
vector vector
); );
let array: ArrayRef = Arc::new(TimestampMicrosecondArray::from(vec![1i64, 2, 3])); let array: ArrayRef = Arc::new(TimestampMicrosecondArray::from(vec![1i64, 2, 3]));
let vector = TimestampMicrosecondVector::try_from_arrow_timestamp_array(array).unwrap(); let vector = TimestampMicrosecondVector::try_from_arrow_array(array).unwrap();
assert_eq!( assert_eq!(
TimestampMicrosecondVector::from_values(vec![1, 2, 3]), TimestampMicrosecondVector::from_values(vec![1, 2, 3]),
vector vector
); );
let array: ArrayRef = Arc::new(TimestampNanosecondArray::from(vec![1i64, 2, 3])); let array: ArrayRef = Arc::new(TimestampNanosecondArray::from(vec![1i64, 2, 3]));
let vector = TimestampNanosecondVector::try_from_arrow_timestamp_array(array).unwrap(); let vector = TimestampNanosecondVector::try_from_arrow_array(array).unwrap();
assert_eq!( assert_eq!(
TimestampNanosecondVector::from_values(vec![1, 2, 3]), TimestampNanosecondVector::from_values(vec![1, 2, 3]),
vector vector
@@ -797,27 +625,27 @@ mod tests {
// Test convert error // Test convert error
let array: ArrayRef = Arc::new(Int32Array::from(vec![1i32, 2, 3])); let array: ArrayRef = Arc::new(Int32Array::from(vec![1i32, 2, 3]));
assert!(TimestampSecondVector::try_from_arrow_timestamp_array(array).is_err()); assert!(TimestampSecondVector::try_from_arrow_array(array).is_err());
} }
#[test] #[test]
fn test_try_from_arrow_interval_array() { fn test_try_from_arrow_interval_array() {
let array: ArrayRef = Arc::new(IntervalYearMonthArray::from(vec![1000, 2000, 3000])); let array: ArrayRef = Arc::new(IntervalYearMonthArray::from(vec![1000, 2000, 3000]));
let vector = IntervalYearMonthVector::try_from_arrow_interval_array(array).unwrap(); let vector = IntervalYearMonthVector::try_from_arrow_array(array).unwrap();
assert_eq!( assert_eq!(
IntervalYearMonthVector::from_values(vec![1000, 2000, 3000]), IntervalYearMonthVector::from_values(vec![1000, 2000, 3000]),
vector vector
); );
let array: ArrayRef = Arc::new(IntervalDayTimeArray::from(vec![1000, 2000, 3000])); let array: ArrayRef = Arc::new(IntervalDayTimeArray::from(vec![1000, 2000, 3000]));
let vector = IntervalDayTimeVector::try_from_arrow_interval_array(array).unwrap(); let vector = IntervalDayTimeVector::try_from_arrow_array(array).unwrap();
assert_eq!( assert_eq!(
IntervalDayTimeVector::from_values(vec![1000, 2000, 3000]), IntervalDayTimeVector::from_values(vec![1000, 2000, 3000]),
vector vector
); );
let array: ArrayRef = Arc::new(IntervalYearMonthArray::from(vec![1000, 2000, 3000])); let array: ArrayRef = Arc::new(IntervalYearMonthArray::from(vec![1000, 2000, 3000]));
let vector = IntervalYearMonthVector::try_from_arrow_interval_array(array).unwrap(); let vector = IntervalYearMonthVector::try_from_arrow_array(array).unwrap();
assert_eq!( assert_eq!(
IntervalYearMonthVector::from_values(vec![1000, 2000, 3000]), IntervalYearMonthVector::from_values(vec![1000, 2000, 3000]),
vector vector
@@ -827,28 +655,28 @@ mod tests {
#[test] #[test]
fn test_try_from_arrow_duration_array() { fn test_try_from_arrow_duration_array() {
let array: ArrayRef = Arc::new(DurationSecondArray::from(vec![1000, 2000, 3000])); let array: ArrayRef = Arc::new(DurationSecondArray::from(vec![1000, 2000, 3000]));
let vector = DurationSecondVector::try_from_arrow_duration_array(array).unwrap(); let vector = DurationSecondVector::try_from_arrow_array(array).unwrap();
assert_eq!( assert_eq!(
DurationSecondVector::from_values(vec![1000, 2000, 3000]), DurationSecondVector::from_values(vec![1000, 2000, 3000]),
vector vector
); );
let array: ArrayRef = Arc::new(DurationMillisecondArray::from(vec![1000, 2000, 3000])); let array: ArrayRef = Arc::new(DurationMillisecondArray::from(vec![1000, 2000, 3000]));
let vector = DurationMillisecondVector::try_from_arrow_duration_array(array).unwrap(); let vector = DurationMillisecondVector::try_from_arrow_array(array).unwrap();
assert_eq!( assert_eq!(
DurationMillisecondVector::from_values(vec![1000, 2000, 3000]), DurationMillisecondVector::from_values(vec![1000, 2000, 3000]),
vector vector
); );
let array: ArrayRef = Arc::new(DurationMicrosecondArray::from(vec![1000, 2000, 3000])); let array: ArrayRef = Arc::new(DurationMicrosecondArray::from(vec![1000, 2000, 3000]));
let vector = DurationMicrosecondVector::try_from_arrow_duration_array(array).unwrap(); let vector = DurationMicrosecondVector::try_from_arrow_array(array).unwrap();
assert_eq!( assert_eq!(
DurationMicrosecondVector::from_values(vec![1000, 2000, 3000]), DurationMicrosecondVector::from_values(vec![1000, 2000, 3000]),
vector vector
); );
let array: ArrayRef = Arc::new(DurationNanosecondArray::from(vec![1000, 2000, 3000])); let array: ArrayRef = Arc::new(DurationNanosecondArray::from(vec![1000, 2000, 3000]));
let vector = DurationNanosecondVector::try_from_arrow_duration_array(array).unwrap(); let vector = DurationNanosecondVector::try_from_arrow_array(array).unwrap();
assert_eq!( assert_eq!(
DurationNanosecondVector::from_values(vec![1000, 2000, 3000]), DurationNanosecondVector::from_values(vec![1000, 2000, 3000]),
vector vector

View File

@@ -15,7 +15,7 @@
use std::any::Any; use std::any::Any;
use std::sync::Arc; use std::sync::Arc;
use arrow::array::{Array, ArrayBuilder, ArrayData, ArrayIter, ArrayRef}; use arrow::array::{Array, ArrayBuilder, ArrayIter, ArrayRef};
use snafu::ResultExt; use snafu::ResultExt;
use crate::arrow_array::{MutableStringArray, StringArray}; use crate::arrow_array::{MutableStringArray, StringArray};
@@ -36,16 +36,6 @@ impl StringVector {
pub(crate) fn as_arrow(&self) -> &dyn Array { pub(crate) fn as_arrow(&self) -> &dyn Array {
&self.array &self.array
} }
fn to_array_data(&self) -> ArrayData {
self.array.to_data()
}
fn from_array_data(data: ArrayData) -> Self {
Self {
array: StringArray::from(data),
}
}
} }
impl From<StringArray> for StringVector { impl From<StringArray> for StringVector {
@@ -120,13 +110,11 @@ impl Vector for StringVector {
} }
fn to_arrow_array(&self) -> ArrayRef { fn to_arrow_array(&self) -> ArrayRef {
let data = self.to_array_data(); Arc::new(self.array.clone())
Arc::new(StringArray::from(data))
} }
fn to_boxed_arrow_array(&self) -> Box<dyn Array> { fn to_boxed_arrow_array(&self) -> Box<dyn Array> {
let data = self.to_array_data(); Box::new(self.array.clone())
Box::new(StringArray::from(data))
} }
fn validity(&self) -> Validity { fn validity(&self) -> Validity {
@@ -146,8 +134,7 @@ impl Vector for StringVector {
} }
fn slice(&self, offset: usize, length: usize) -> VectorRef { fn slice(&self, offset: usize, length: usize) -> VectorRef {
let data = self.array.to_data().slice(offset, length); Arc::new(Self::from(self.array.slice(offset, length)))
Arc::new(Self::from_array_data(data))
} }
fn get(&self, index: usize) -> Value { fn get(&self, index: usize) -> Value {
@@ -256,6 +243,7 @@ vectors::impl_try_from_arrow_array_for_vector!(StringArray, StringVector);
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use arrow::datatypes::DataType; use arrow::datatypes::DataType;
use super::*; use super::*;

View File

@@ -26,6 +26,12 @@ use store_api::storage::RegionNumber;
#[snafu(visibility(pub))] #[snafu(visibility(pub))]
#[stack_trace_debug] #[stack_trace_debug]
pub enum Error { pub enum Error {
#[snafu(display("Failed to init ddl manager"))]
InitDdlManager {
location: Location,
source: common_meta::error::Error,
},
#[snafu(display("Failed to invalidate table cache"))] #[snafu(display("Failed to invalidate table cache"))]
InvalidateTableCache { InvalidateTableCache {
location: Location, location: Location,
@@ -176,9 +182,6 @@ pub enum Error {
source: servers::error::Error, source: servers::error::Error,
}, },
#[snafu(display("Missing meta_client_options section in config"))]
MissingMetasrvOpts { location: Location },
#[snafu(display("Failed to find leaders when altering table, table: {}", table))] #[snafu(display("Failed to find leaders when altering table, table: {}", table))]
LeaderNotFound { table: String, location: Location }, LeaderNotFound { table: String, location: Location },
@@ -293,7 +296,6 @@ impl ErrorExt for Error {
| Error::IllegalPrimaryKeysDef { .. } | Error::IllegalPrimaryKeysDef { .. }
| Error::SchemaExists { .. } | Error::SchemaExists { .. }
| Error::ColumnNotFound { .. } | Error::ColumnNotFound { .. }
| Error::MissingMetasrvOpts { .. }
| Error::UnsupportedFormat { .. } | Error::UnsupportedFormat { .. }
| Error::IllegalAuthConfig { .. } | Error::IllegalAuthConfig { .. }
| Error::EmptyData { .. } | Error::EmptyData { .. }
@@ -319,7 +321,9 @@ impl ErrorExt for Error {
Error::ParseSql { source, .. } => source.status_code(), Error::ParseSql { source, .. } => source.status_code(),
Error::InvalidateTableCache { source, .. } => source.status_code(), Error::InvalidateTableCache { source, .. } | Error::InitDdlManager { source, .. } => {
source.status_code()
}
Error::Table { source, .. } Error::Table { source, .. }
| Error::CopyTable { source, .. } | Error::CopyTable { source, .. }

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
pub mod builder;
mod grpc; mod grpc;
mod influxdb; mod influxdb;
mod opentsdb; mod opentsdb;
@@ -21,24 +22,16 @@ mod region_query;
mod script; mod script;
pub mod standalone; pub mod standalone;
use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use api::v1::meta::Role; use api::v1::meta::Role;
use async_trait::async_trait; use async_trait::async_trait;
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq}; use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
use catalog::kvbackend::{CachedMetaKvBackend, KvBackendCatalogManager};
use catalog::CatalogManagerRef; use catalog::CatalogManagerRef;
use client::client_manager::DatanodeClients;
use common_base::Plugins; use common_base::Plugins;
use common_config::KvBackendConfig; use common_config::KvBackendConfig;
use common_error::ext::BoxedError; use common_error::ext::BoxedError;
use common_grpc::channel_manager::{ChannelConfig, ChannelManager}; use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
use common_meta::cache_invalidator::DummyCacheInvalidator;
use common_meta::ddl_manager::DdlManager;
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
use common_meta::heartbeat::handler::HandlerGroupExecutor;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::KvBackendRef; use common_meta::kv_backend::KvBackendRef;
use common_meta::state_store::KvStateStore; use common_meta::state_store::KvStateStore;
use common_procedure::local::{LocalManager, ManagerConfig}; use common_procedure::local::{LocalManager, ManagerConfig};
@@ -47,19 +40,18 @@ use common_procedure::ProcedureManagerRef;
use common_query::Output; use common_query::Output;
use common_telemetry::error; use common_telemetry::error;
use common_telemetry::logging::info; use common_telemetry::logging::info;
use datanode::region_server::RegionServer;
use log_store::raft_engine::RaftEngineBackend; use log_store::raft_engine::RaftEngineBackend;
use meta_client::client::{MetaClient, MetaClientBuilder}; use meta_client::client::{MetaClient, MetaClientBuilder};
use operator::delete::{Deleter, DeleterRef}; use meta_client::MetaClientOptions;
use operator::insert::{Inserter, InserterRef}; use operator::delete::DeleterRef;
use operator::insert::InserterRef;
use operator::statement::StatementExecutor; use operator::statement::StatementExecutor;
use operator::table::{table_idents_to_full_name, TableMutationOperator}; use operator::table::table_idents_to_full_name;
use partition::manager::PartitionRuleManager;
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement}; use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
use query::plan::LogicalPlan; use query::plan::LogicalPlan;
use query::query_engine::options::{validate_catalog_and_schema, QueryOptions}; use query::query_engine::options::{validate_catalog_and_schema, QueryOptions};
use query::query_engine::DescribeResult; use query::query_engine::DescribeResult;
use query::{QueryEngineFactory, QueryEngineRef}; use query::QueryEngineRef;
use raft_engine::{Config, ReadableSize, RecoveryMode}; use raft_engine::{Config, ReadableSize, RecoveryMode};
use servers::error as server_error; use servers::error as server_error;
use servers::error::{AuthSnafu, ExecuteQuerySnafu, ParsePromQLSnafu}; use servers::error::{AuthSnafu, ExecuteQuerySnafu, ParsePromQLSnafu};
@@ -83,15 +75,11 @@ use sql::statements::statement::Statement;
use sqlparser::ast::ObjectName; use sqlparser::ast::ObjectName;
pub use standalone::StandaloneDatanodeManager; pub use standalone::StandaloneDatanodeManager;
use self::region_query::FrontendRegionQueryHandler;
use self::standalone::StandaloneTableMetadataCreator;
use crate::error::{ use crate::error::{
self, Error, ExecLogicalPlanSnafu, ExecutePromqlSnafu, ExternalSnafu, MissingMetasrvOptsSnafu, self, Error, ExecLogicalPlanSnafu, ExecutePromqlSnafu, ExternalSnafu, ParseSqlSnafu,
ParseSqlSnafu, PermissionSnafu, PlanStatementSnafu, Result, SqlExecInterceptedSnafu, PermissionSnafu, PlanStatementSnafu, Result, SqlExecInterceptedSnafu, TableOperationSnafu,
TableOperationSnafu,
}; };
use crate::frontend::{FrontendOptions, TomlSerializable}; use crate::frontend::{FrontendOptions, TomlSerializable};
use crate::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
use crate::heartbeat::HeartbeatTask; use crate::heartbeat::HeartbeatTask;
use crate::metrics; use crate::metrics;
use crate::script::ScriptExecutor; use crate::script::ScriptExecutor;
@@ -131,99 +119,9 @@ pub struct Instance {
} }
impl Instance { impl Instance {
pub async fn try_new_distributed(opts: &FrontendOptions, plugins: Plugins) -> Result<Self> { pub async fn create_meta_client(
let meta_client = Self::create_meta_client(opts).await?; meta_client_options: &MetaClientOptions,
) -> Result<Arc<MetaClient>> {
let datanode_clients = Arc::new(DatanodeClients::default());
Self::try_new_distributed_with(meta_client, datanode_clients, plugins, opts).await
}
pub async fn try_new_distributed_with(
meta_client: Arc<MetaClient>,
datanode_clients: Arc<DatanodeClients>,
plugins: Plugins,
opts: &FrontendOptions,
) -> Result<Self> {
let meta_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
let catalog_manager = KvBackendCatalogManager::new(
meta_backend.clone(),
meta_backend.clone(),
datanode_clients.clone(),
);
let partition_manager = Arc::new(PartitionRuleManager::new(meta_backend.clone()));
let region_query_handler = FrontendRegionQueryHandler::arc(
partition_manager.clone(),
catalog_manager.datanode_manager().clone(),
);
let inserter = Arc::new(Inserter::new(
catalog_manager.clone(),
partition_manager.clone(),
datanode_clients.clone(),
));
let deleter = Arc::new(Deleter::new(
catalog_manager.clone(),
partition_manager,
datanode_clients,
));
let table_mutation_handler = Arc::new(TableMutationOperator::new(
inserter.clone(),
deleter.clone(),
));
let query_engine = QueryEngineFactory::new_with_plugins(
catalog_manager.clone(),
Some(region_query_handler.clone()),
Some(table_mutation_handler),
true,
plugins.clone(),
)
.query_engine();
let statement_executor = Arc::new(StatementExecutor::new(
catalog_manager.clone(),
query_engine.clone(),
meta_client.clone(),
meta_backend.clone(),
catalog_manager.clone(),
inserter.clone(),
));
plugins.insert::<StatementExecutorRef>(statement_executor.clone());
let script_executor =
Arc::new(ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?);
let handlers_executor = HandlerGroupExecutor::new(vec![
Arc::new(ParseMailboxMessageHandler),
Arc::new(InvalidateTableCacheHandler::new(meta_backend)),
]);
let heartbeat_task = Some(HeartbeatTask::new(
meta_client.clone(),
opts.heartbeat.clone(),
Arc::new(handlers_executor),
));
Ok(Instance {
catalog_manager,
script_executor,
statement_executor,
query_engine,
plugins: plugins.clone(),
servers: Arc::new(HashMap::new()),
heartbeat_task,
inserter,
deleter,
})
}
async fn create_meta_client(opts: &FrontendOptions) -> Result<Arc<MetaClient>> {
let meta_client_options = opts.meta_client.as_ref().context(MissingMetasrvOptsSnafu)?;
info!( info!(
"Creating Frontend instance in distributed mode with Meta server addr {:?}", "Creating Frontend instance in distributed mode with Meta server addr {:?}",
meta_client_options.metasrv_addrs meta_client_options.metasrv_addrs
@@ -285,79 +183,6 @@ impl Instance {
Ok((kv_backend, procedure_manager)) Ok((kv_backend, procedure_manager))
} }
pub async fn try_new_standalone(
kv_backend: KvBackendRef,
procedure_manager: ProcedureManagerRef,
catalog_manager: CatalogManagerRef,
plugins: Plugins,
region_server: RegionServer,
) -> Result<Self> {
let partition_manager = Arc::new(PartitionRuleManager::new(kv_backend.clone()));
let datanode_manager = Arc::new(StandaloneDatanodeManager(region_server));
let region_query_handler =
FrontendRegionQueryHandler::arc(partition_manager.clone(), datanode_manager.clone());
let inserter = Arc::new(Inserter::new(
catalog_manager.clone(),
partition_manager.clone(),
datanode_manager.clone(),
));
let deleter = Arc::new(Deleter::new(
catalog_manager.clone(),
partition_manager,
datanode_manager.clone(),
));
let table_mutation_handler = Arc::new(TableMutationOperator::new(
inserter.clone(),
deleter.clone(),
));
let query_engine = QueryEngineFactory::new_with_plugins(
catalog_manager.clone(),
Some(region_query_handler),
Some(table_mutation_handler),
true,
plugins.clone(),
)
.query_engine();
let script_executor =
Arc::new(ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?);
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
let cache_invalidator = Arc::new(DummyCacheInvalidator);
let ddl_executor = Arc::new(DdlManager::new(
procedure_manager,
datanode_manager,
cache_invalidator.clone(),
table_metadata_manager.clone(),
Arc::new(StandaloneTableMetadataCreator::new(kv_backend.clone())),
));
let statement_executor = Arc::new(StatementExecutor::new(
catalog_manager.clone(),
query_engine.clone(),
ddl_executor,
kv_backend.clone(),
cache_invalidator,
inserter.clone(),
));
Ok(Instance {
catalog_manager: catalog_manager.clone(),
script_executor,
statement_executor,
query_engine,
plugins,
servers: Arc::new(HashMap::new()),
heartbeat_task: None,
inserter,
deleter,
})
}
pub async fn build_servers( pub async fn build_servers(
&mut self, &mut self,
opts: impl Into<FrontendOptions> + TomlSerializable, opts: impl Into<FrontendOptions> + TomlSerializable,
@@ -397,10 +222,13 @@ impl FrontendInstance for Instance {
self.script_executor.start(self)?; self.script_executor.start(self)?;
futures::future::try_join_all(self.servers.values().map(start_server)) futures::future::try_join_all(self.servers.iter().map(|(name, handler)| async move {
.await info!("Starting service: {name}");
.context(error::StartServerSnafu) start_server(handler).await
.map(|_| ()) }))
.await
.context(error::StartServerSnafu)
.map(|_| ())
} }
} }

View File

@@ -0,0 +1,149 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::sync::Arc;
use catalog::kvbackend::KvBackendCatalogManager;
use common_base::Plugins;
use common_meta::cache_invalidator::{CacheInvalidatorRef, DummyCacheInvalidator};
use common_meta::datanode_manager::DatanodeManagerRef;
use common_meta::ddl::DdlTaskExecutorRef;
use common_meta::kv_backend::KvBackendRef;
use operator::delete::Deleter;
use operator::insert::Inserter;
use operator::statement::StatementExecutor;
use operator::table::TableMutationOperator;
use partition::manager::PartitionRuleManager;
use query::QueryEngineFactory;
use crate::error::Result;
use crate::heartbeat::HeartbeatTask;
use crate::instance::region_query::FrontendRegionQueryHandler;
use crate::instance::{Instance, StatementExecutorRef};
use crate::script::ScriptExecutor;
pub struct FrontendBuilder {
kv_backend: KvBackendRef,
cache_invalidator: Option<CacheInvalidatorRef>,
datanode_manager: DatanodeManagerRef,
plugins: Option<Plugins>,
ddl_task_executor: DdlTaskExecutorRef,
heartbeat_task: Option<HeartbeatTask>,
}
impl FrontendBuilder {
pub fn new(
kv_backend: KvBackendRef,
datanode_manager: DatanodeManagerRef,
ddl_task_executor: DdlTaskExecutorRef,
) -> Self {
Self {
kv_backend,
cache_invalidator: None,
datanode_manager,
plugins: None,
ddl_task_executor,
heartbeat_task: None,
}
}
pub fn with_cache_invalidator(self, cache_invalidator: CacheInvalidatorRef) -> Self {
Self {
cache_invalidator: Some(cache_invalidator),
..self
}
}
pub fn with_plugin(self, plugins: Plugins) -> Self {
Self {
plugins: Some(plugins),
..self
}
}
pub fn with_heartbeat_task(self, heartbeat_task: HeartbeatTask) -> Self {
Self {
heartbeat_task: Some(heartbeat_task),
..self
}
}
pub async fn try_build(self) -> Result<Instance> {
let kv_backend = self.kv_backend;
let datanode_manager = self.datanode_manager;
let plugins = self.plugins.unwrap_or_default();
let catalog_manager = KvBackendCatalogManager::new(
kv_backend.clone(),
self.cache_invalidator
.unwrap_or_else(|| Arc::new(DummyCacheInvalidator)),
);
let partition_manager = Arc::new(PartitionRuleManager::new(kv_backend.clone()));
let region_query_handler =
FrontendRegionQueryHandler::arc(partition_manager.clone(), datanode_manager.clone());
let inserter = Arc::new(Inserter::new(
catalog_manager.clone(),
partition_manager.clone(),
datanode_manager.clone(),
));
let deleter = Arc::new(Deleter::new(
catalog_manager.clone(),
partition_manager,
datanode_manager.clone(),
));
let table_mutation_handler = Arc::new(TableMutationOperator::new(
inserter.clone(),
deleter.clone(),
));
let query_engine = QueryEngineFactory::new_with_plugins(
catalog_manager.clone(),
Some(region_query_handler.clone()),
Some(table_mutation_handler),
true,
plugins.clone(),
)
.query_engine();
let script_executor =
Arc::new(ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?);
let statement_executor = Arc::new(StatementExecutor::new(
catalog_manager.clone(),
query_engine.clone(),
self.ddl_task_executor,
kv_backend,
catalog_manager.clone(),
inserter.clone(),
));
plugins.insert::<StatementExecutorRef>(statement_executor.clone());
Ok(Instance {
catalog_manager,
script_executor,
statement_executor,
query_engine,
plugins,
servers: Arc::new(HashMap::new()),
heartbeat_task: self.heartbeat_task,
inserter,
deleter,
})
}
}

View File

@@ -122,7 +122,9 @@ impl GrpcQueryHandler for Instance {
DdlExpr::DropTable(expr) => { DdlExpr::DropTable(expr) => {
let table_name = let table_name =
TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name); TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
self.statement_executor.drop_table(table_name).await? self.statement_executor
.drop_table(table_name, expr.drop_if_exists)
.await?
} }
DdlExpr::TruncateTable(expr) => { DdlExpr::TruncateTable(expr) => {
let table_name = let table_name =

View File

@@ -107,7 +107,7 @@ impl Datanode for RegionInvoker {
} }
} }
pub(crate) struct StandaloneTableMetadataCreator { pub struct StandaloneTableMetadataCreator {
table_id_sequence: SequenceRef, table_id_sequence: SequenceRef,
} }

View File

@@ -6,13 +6,17 @@ license.workspace = true
[dependencies] [dependencies]
async-trait.workspace = true async-trait.workspace = true
bytemuck.workspace = true
common-base.workspace = true common-base.workspace = true
common-error.workspace = true common-error.workspace = true
common-macro.workspace = true common-macro.workspace = true
fst.workspace = true fst.workspace = true
futures.workspace = true futures.workspace = true
greptime-proto.workspace = true greptime-proto.workspace = true
mockall.workspace = true
prost.workspace = true prost.workspace = true
regex-automata.workspace = true
regex.workspace = true
snafu.workspace = true snafu.workspace = true
[dev-dependencies] [dev-dependencies]

View File

@@ -14,3 +14,7 @@
pub mod error; pub mod error;
pub mod format; pub mod format;
pub mod search;
pub type FstMap = fst::Map<Vec<u8>>;
pub type Bytes = Vec<u8>;

View File

@@ -20,6 +20,8 @@ use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug; use common_macro::stack_trace_debug;
use snafu::{Location, Snafu}; use snafu::{Location, Snafu};
use crate::inverted_index::search::predicate::Predicate;
#[derive(Snafu)] #[derive(Snafu)]
#[snafu(visibility(pub))] #[snafu(visibility(pub))]
#[stack_trace_debug] #[stack_trace_debug]
@@ -62,6 +64,9 @@ pub enum Error {
payload_size: u64, payload_size: u64,
}, },
#[snafu(display("Unexpected zero segment row count"))]
UnexpectedZeroSegmentRowCount { location: Location },
#[snafu(display("Failed to decode fst"))] #[snafu(display("Failed to decode fst"))]
DecodeFst { DecodeFst {
#[snafu(source)] #[snafu(source)]
@@ -75,6 +80,41 @@ pub enum Error {
error: prost::DecodeError, error: prost::DecodeError,
location: Location, location: Location,
}, },
#[snafu(display("Failed to parse regex pattern: {pattern}"))]
ParseRegex {
#[snafu(source)]
error: regex::Error,
pattern: String,
location: Location,
},
#[snafu(display("Failed to parse regex DFA"))]
ParseDFA {
#[snafu(source)]
error: regex_automata::Error,
location: Location,
},
#[snafu(display("Unexpected empty predicates to construct fst applier"))]
EmptyPredicates { location: Location },
#[snafu(display("Failed to construct intersection fst applier with InList predicate"))]
IntersectionApplierWithInList { location: Location },
#[snafu(display("Failed to construct keys fst applier without InList predicate"))]
KeysApplierWithoutInList { location: Location },
#[snafu(display(
"Failed to construct keys fst applier with unexpected predicates: {predicates:?}"
))]
KeysApplierUnexpectedPredicates {
location: Location,
predicates: Vec<Predicate>,
},
#[snafu(display("index not found, name: {name}"))]
IndexNotFound { name: String, location: Location },
} }
impl ErrorExt for Error { impl ErrorExt for Error {
@@ -84,10 +124,19 @@ impl ErrorExt for Error {
Seek { .. } Seek { .. }
| Read { .. } | Read { .. }
| UnexpectedFooterPayloadSize { .. } | UnexpectedFooterPayloadSize { .. }
| UnexpectedZeroSegmentRowCount { .. }
| UnexpectedOffsetSize { .. } | UnexpectedOffsetSize { .. }
| UnexpectedBlobSize { .. } | UnexpectedBlobSize { .. }
| DecodeProto { .. } | DecodeProto { .. }
| DecodeFst { .. } => StatusCode::Unexpected, | DecodeFst { .. }
| KeysApplierUnexpectedPredicates { .. } => StatusCode::Unexpected,
ParseRegex { .. }
| ParseDFA { .. }
| KeysApplierWithoutInList { .. }
| IntersectionApplierWithInList { .. }
| EmptyPredicates { .. }
| IndexNotFound { .. } => StatusCode::InvalidArguments,
} }
} }

View File

@@ -17,16 +17,15 @@ mod footer;
use async_trait::async_trait; use async_trait::async_trait;
use common_base::BitVec; use common_base::BitVec;
use fst::Map;
use greptime_proto::v1::index::{InvertedIndexMeta, InvertedIndexMetas}; use greptime_proto::v1::index::{InvertedIndexMeta, InvertedIndexMetas};
use crate::inverted_index::error::Result; use crate::inverted_index::error::Result;
use crate::inverted_index::FstMap;
pub type FstMap = Map<Vec<u8>>;
/// InvertedIndexReader defines an asynchronous reader of inverted index data /// InvertedIndexReader defines an asynchronous reader of inverted index data
#[mockall::automock]
#[async_trait] #[async_trait]
pub trait InvertedIndexReader { pub trait InvertedIndexReader: Send {
/// Retrieve metadata of all inverted indices stored within the blob. /// Retrieve metadata of all inverted indices stored within the blob.
async fn metadata(&mut self) -> Result<InvertedIndexMetas>; async fn metadata(&mut self) -> Result<InvertedIndexMetas>;

View File

@@ -143,7 +143,11 @@ mod tests {
}; };
// metas // metas
let mut metas = InvertedIndexMetas::default(); let mut metas = InvertedIndexMetas {
total_row_count: 10,
segment_row_count: 1,
..Default::default()
};
metas.metas.insert(meta.name.clone(), meta); metas.metas.insert(meta.name.clone(), meta);
metas.metas.insert(meta1.name.clone(), meta1); metas.metas.insert(meta1.name.clone(), meta1);
let mut meta_buf = Vec::new(); let mut meta_buf = Vec::new();

View File

@@ -21,7 +21,7 @@ use snafu::{ensure, ResultExt};
use crate::inverted_index::error::{ use crate::inverted_index::error::{
DecodeProtoSnafu, ReadSnafu, Result, SeekSnafu, UnexpectedFooterPayloadSizeSnafu, DecodeProtoSnafu, ReadSnafu, Result, SeekSnafu, UnexpectedFooterPayloadSizeSnafu,
UnexpectedOffsetSizeSnafu, UnexpectedOffsetSizeSnafu, UnexpectedZeroSegmentRowCountSnafu,
}; };
use crate::inverted_index::format::FOOTER_PAYLOAD_SIZE_SIZE; use crate::inverted_index::format::FOOTER_PAYLOAD_SIZE_SIZE;
@@ -85,6 +85,11 @@ impl<R: AsyncRead + AsyncSeek + Unpin> InvertedIndeFooterReader<R> {
/// Check if the read metadata is consistent with expected sizes and offsets. /// Check if the read metadata is consistent with expected sizes and offsets.
fn validate_metas(&self, metas: &InvertedIndexMetas, payload_size: u64) -> Result<()> { fn validate_metas(&self, metas: &InvertedIndexMetas, payload_size: u64) -> Result<()> {
ensure!(
metas.segment_row_count > 0,
UnexpectedZeroSegmentRowCountSnafu
);
for meta in metas.metas.values() { for meta in metas.metas.values() {
let InvertedIndexMeta { let InvertedIndexMeta {
base_offset, base_offset,
@@ -116,7 +121,10 @@ mod tests {
use super::*; use super::*;
fn create_test_payload(meta: InvertedIndexMeta) -> Vec<u8> { fn create_test_payload(meta: InvertedIndexMeta) -> Vec<u8> {
let mut metas = InvertedIndexMetas::default(); let mut metas = InvertedIndexMetas {
segment_row_count: 1,
..Default::default()
};
metas.metas.insert("test".to_string(), meta); metas.metas.insert("test".to_string(), meta);
let mut payload_buf = vec![]; let mut payload_buf = vec![];
@@ -131,7 +139,6 @@ mod tests {
async fn test_read_payload() { async fn test_read_payload() {
let meta = InvertedIndexMeta { let meta = InvertedIndexMeta {
name: "test".to_string(), name: "test".to_string(),
segment_row_count: 4096,
..Default::default() ..Default::default()
}; };
@@ -145,14 +152,12 @@ mod tests {
assert_eq!(metas.metas.len(), 1); assert_eq!(metas.metas.len(), 1);
let index_meta = &metas.metas.get("test").unwrap(); let index_meta = &metas.metas.get("test").unwrap();
assert_eq!(index_meta.name, "test"); assert_eq!(index_meta.name, "test");
assert_eq!(index_meta.segment_row_count, 4096);
} }
#[tokio::test] #[tokio::test]
async fn test_invalid_footer_payload_size() { async fn test_invalid_footer_payload_size() {
let meta = InvertedIndexMeta { let meta = InvertedIndexMeta {
name: "test".to_string(), name: "test".to_string(),
segment_row_count: 4096,
..Default::default() ..Default::default()
}; };
@@ -171,7 +176,6 @@ mod tests {
name: "test".to_string(), name: "test".to_string(),
base_offset: 0, base_offset: 0,
inverted_index_size: 1, // Set size to 1 to make ecceed the blob size inverted_index_size: 1, // Set size to 1 to make ecceed the blob size
segment_row_count: 4096,
..Default::default() ..Default::default()
}; };

View File

@@ -0,0 +1,18 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod fst_apply;
pub mod fst_values_mapper;
pub mod index_apply;
pub mod predicate;

View File

@@ -0,0 +1,33 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod intersection_apply;
mod keys_apply;
pub use intersection_apply::IntersectionFstApplier;
pub use keys_apply::KeysFstApplier;
use crate::inverted_index::FstMap;
/// A trait for objects that can process a finite state transducer (FstMap) and return
/// associated values.
#[mockall::automock]
pub trait FstApplier: Send + Sync {
/// Retrieves values from an FstMap.
///
/// * `fst`: A reference to the FstMap from which the values will be fetched.
///
/// Returns a `Vec<u64>`, with each u64 being a value from the FstMap.
fn apply(&self, fst: &FstMap) -> Vec<u64>;
}

View File

@@ -0,0 +1,325 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use fst::map::OpBuilder;
use fst::{IntoStreamer, Streamer};
use regex_automata::DenseDFA;
use snafu::{ensure, ResultExt};
use crate::inverted_index::error::{
EmptyPredicatesSnafu, IntersectionApplierWithInListSnafu, ParseDFASnafu, Result,
};
use crate::inverted_index::search::fst_apply::FstApplier;
use crate::inverted_index::search::predicate::{Predicate, Range};
use crate::inverted_index::FstMap;
type Dfa = DenseDFA<Vec<usize>, usize>;
/// `IntersectionFstApplier` applies intersection operations on an FstMap using specified ranges and regex patterns.
pub struct IntersectionFstApplier {
/// A list of `Range` which define inclusive or exclusive ranges for keys to be queried in the FstMap.
ranges: Vec<Range>,
/// A list of `Dfa` compiled from regular expression patterns.
dfas: Vec<Dfa>,
}
impl FstApplier for IntersectionFstApplier {
fn apply(&self, fst: &FstMap) -> Vec<u64> {
let mut op = OpBuilder::new();
for range in &self.ranges {
match (range.lower.as_ref(), range.upper.as_ref()) {
(Some(lower), Some(upper)) => match (lower.inclusive, upper.inclusive) {
(true, true) => op.push(fst.range().ge(&lower.value).le(&upper.value)),
(true, false) => op.push(fst.range().ge(&lower.value).lt(&upper.value)),
(false, true) => op.push(fst.range().gt(&lower.value).le(&upper.value)),
(false, false) => op.push(fst.range().gt(&lower.value).lt(&upper.value)),
},
(Some(lower), None) => match lower.inclusive {
true => op.push(fst.range().ge(&lower.value)),
false => op.push(fst.range().gt(&lower.value)),
},
(None, Some(upper)) => match upper.inclusive {
true => op.push(fst.range().le(&upper.value)),
false => op.push(fst.range().lt(&upper.value)),
},
(None, None) => op.push(fst),
}
}
for dfa in &self.dfas {
op.push(fst.search(dfa));
}
let mut stream = op.intersection().into_stream();
let mut values = Vec::new();
while let Some((_, v)) = stream.next() {
values.push(v[0].value)
}
values
}
}
impl IntersectionFstApplier {
/// Attempts to create an `IntersectionFstApplier` from a list of `Predicate`.
///
/// This function only accepts predicates of the variants `Range` and `RegexMatch`.
/// It does not accept `InList` predicates and will return an error if any are found.
/// `InList` predicates are handled by `KeysFstApplier`.
pub fn try_from(predicates: Vec<Predicate>) -> Result<Self> {
ensure!(!predicates.is_empty(), EmptyPredicatesSnafu);
let mut dfas = Vec::with_capacity(predicates.len());
let mut ranges = Vec::with_capacity(predicates.len());
for predicate in predicates {
match predicate {
Predicate::Range(range) => ranges.push(range.range),
Predicate::RegexMatch(regex) => {
let dfa = DenseDFA::new(&regex.pattern);
let dfa = dfa.context(ParseDFASnafu)?;
dfas.push(dfa);
}
// Rejection of `InList` predicates is enforced here.
Predicate::InList(_) => {
return IntersectionApplierWithInListSnafu.fail();
}
}
}
Ok(Self { dfas, ranges })
}
}
impl TryFrom<Vec<Predicate>> for IntersectionFstApplier {
type Error = crate::inverted_index::error::Error;
fn try_from(predicates: Vec<Predicate>) -> Result<Self> {
Self::try_from(predicates)
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use super::*;
use crate::inverted_index::error::Error;
use crate::inverted_index::search::predicate::{
Bound, InListPredicate, RangePredicate, RegexMatchPredicate,
};
fn create_applier_from_range(range: Range) -> Result<IntersectionFstApplier> {
IntersectionFstApplier::try_from(vec![Predicate::Range(RangePredicate { range })])
}
fn create_applier_from_pattern(pattern: &str) -> Result<IntersectionFstApplier> {
IntersectionFstApplier::try_from(vec![Predicate::RegexMatch(RegexMatchPredicate {
pattern: pattern.to_string(),
})])
}
#[test]
fn test_intersection_fst_applier_with_ranges() {
let test_fst = FstMap::from_iter([("aa", 1), ("bb", 2), ("cc", 3)]).unwrap();
let applier_inclusive_lower = create_applier_from_range(Range {
lower: Some(Bound {
value: b"bb".to_vec(),
inclusive: true,
}),
upper: None,
})
.unwrap();
let results = applier_inclusive_lower.apply(&test_fst);
assert_eq!(results, vec![2, 3]);
let applier_exclusive_lower = create_applier_from_range(Range {
lower: Some(Bound {
value: b"bb".to_vec(),
inclusive: false,
}),
upper: None,
})
.unwrap();
let results = applier_exclusive_lower.apply(&test_fst);
assert_eq!(results, vec![3]);
let applier_inclusive_upper = create_applier_from_range(Range {
lower: None,
upper: Some(Bound {
value: b"bb".to_vec(),
inclusive: true,
}),
})
.unwrap();
let results = applier_inclusive_upper.apply(&test_fst);
assert_eq!(results, vec![1, 2]);
let applier_exclusive_upper = create_applier_from_range(Range {
lower: None,
upper: Some(Bound {
value: b"bb".to_vec(),
inclusive: false,
}),
})
.unwrap();
let results = applier_exclusive_upper.apply(&test_fst);
assert_eq!(results, vec![1]);
let applier_inclusive_bounds = create_applier_from_range(Range {
lower: Some(Bound {
value: b"aa".to_vec(),
inclusive: true,
}),
upper: Some(Bound {
value: b"cc".to_vec(),
inclusive: true,
}),
})
.unwrap();
let results = applier_inclusive_bounds.apply(&test_fst);
assert_eq!(results, vec![1, 2, 3]);
let applier_exclusive_bounds = create_applier_from_range(Range {
lower: Some(Bound {
value: b"aa".to_vec(),
inclusive: false,
}),
upper: Some(Bound {
value: b"cc".to_vec(),
inclusive: false,
}),
})
.unwrap();
let results = applier_exclusive_bounds.apply(&test_fst);
assert_eq!(results, vec![2]);
}
#[test]
fn test_intersection_fst_applier_with_valid_pattern() {
let test_fst = FstMap::from_iter([("aa", 1), ("bb", 2), ("cc", 3)]).unwrap();
let applier = create_applier_from_pattern("a.?").unwrap();
let results = applier.apply(&test_fst);
assert_eq!(results, vec![1]);
let applier = create_applier_from_pattern("b.?").unwrap();
let results = applier.apply(&test_fst);
assert_eq!(results, vec![2]);
let applier = create_applier_from_pattern("c.?").unwrap();
let results = applier.apply(&test_fst);
assert_eq!(results, vec![3]);
let applier = create_applier_from_pattern("a.*").unwrap();
let results = applier.apply(&test_fst);
assert_eq!(results, vec![1]);
let applier = create_applier_from_pattern("b.*").unwrap();
let results = applier.apply(&test_fst);
assert_eq!(results, vec![2]);
let applier = create_applier_from_pattern("c.*").unwrap();
let results = applier.apply(&test_fst);
assert_eq!(results, vec![3]);
let applier = create_applier_from_pattern("d.?").unwrap();
let results = applier.apply(&test_fst);
assert!(results.is_empty());
let applier = create_applier_from_pattern("a.?|b.?").unwrap();
let results = applier.apply(&test_fst);
assert_eq!(results, vec![1, 2]);
let applier = create_applier_from_pattern("d.?|a.?").unwrap();
let results = applier.apply(&test_fst);
assert_eq!(results, vec![1]);
let applier = create_applier_from_pattern(".*").unwrap();
let results = applier.apply(&test_fst);
assert_eq!(results, vec![1, 2, 3]);
}
#[test]
fn test_intersection_fst_applier_with_composite_predicates() {
let test_fst = FstMap::from_iter([("aa", 1), ("bb", 2), ("cc", 3)]).unwrap();
let applier = IntersectionFstApplier::try_from(vec![
Predicate::Range(RangePredicate {
range: Range {
lower: Some(Bound {
value: b"aa".to_vec(),
inclusive: true,
}),
upper: Some(Bound {
value: b"cc".to_vec(),
inclusive: true,
}),
},
}),
Predicate::RegexMatch(RegexMatchPredicate {
pattern: "a.?".to_string(),
}),
])
.unwrap();
let results = applier.apply(&test_fst);
assert_eq!(results, vec![1]);
let applier = IntersectionFstApplier::try_from(vec![
Predicate::Range(RangePredicate {
range: Range {
lower: Some(Bound {
value: b"aa".to_vec(),
inclusive: false,
}),
upper: Some(Bound {
value: b"cc".to_vec(),
inclusive: true,
}),
},
}),
Predicate::RegexMatch(RegexMatchPredicate {
pattern: "a.?".to_string(),
}),
])
.unwrap();
let results = applier.apply(&test_fst);
assert!(results.is_empty());
}
#[test]
fn test_intersection_fst_applier_with_invalid_pattern() {
let result = create_applier_from_pattern("a(");
assert!(matches!(result, Err(Error::ParseDFA { .. })));
}
#[test]
fn test_intersection_fst_applier_with_empty_predicates() {
let result = IntersectionFstApplier::try_from(vec![]);
assert!(matches!(result, Err(Error::EmptyPredicates { .. })));
}
#[test]
fn test_intersection_fst_applier_with_in_list_predicate() {
let result = IntersectionFstApplier::try_from(vec![Predicate::InList(InListPredicate {
list: HashSet::from_iter([b"one".to_vec(), b"two".to_vec()]),
})]);
assert!(matches!(
result,
Err(Error::IntersectionApplierWithInList { .. })
));
}
}

View File

@@ -0,0 +1,305 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashSet;
use snafu::{ensure, ResultExt};
use crate::inverted_index::error::{
EmptyPredicatesSnafu, KeysApplierUnexpectedPredicatesSnafu, KeysApplierWithoutInListSnafu,
ParseRegexSnafu, Result,
};
use crate::inverted_index::search::fst_apply::FstApplier;
use crate::inverted_index::search::predicate::Predicate;
use crate::inverted_index::{Bytes, FstMap};
/// `KeysFstApplier` is responsible for applying a search using a set of predefined keys
/// against an FstMap to fetch associated values.
pub struct KeysFstApplier {
/// A list of keys to be fetched directly from the FstMap.
keys: Vec<Bytes>,
}
impl FstApplier for KeysFstApplier {
fn apply(&self, fst: &FstMap) -> Vec<u64> {
self.keys.iter().filter_map(|k| fst.get(k)).collect()
}
}
impl KeysFstApplier {
/// Tries to create a `KeysFstApplier` from a list of predicates.
///
/// This function constructs the applier by intersecting keys from one or more `InList` predicates,
/// which are required. It then optionally refines this set using any additional `Range` and `RegexMatch`
/// predicates provided.
pub fn try_from(mut predicates: Vec<Predicate>) -> Result<Self> {
ensure!(!predicates.is_empty(), EmptyPredicatesSnafu);
let (in_lists, others) = Self::split_at_in_lists(&mut predicates);
let (ranges, regexes) = Self::split_at_ranges(others);
Self::ensure_all_regexes(regexes)?;
ensure!(!in_lists.is_empty(), KeysApplierWithoutInListSnafu);
let intersected_keys = Self::intersect_with_lists(in_lists);
let range_matched_keys = Self::filter_by_ranges(intersected_keys, ranges);
let regex_matched_keys = Self::filter_by_regexes(range_matched_keys, regexes)?;
Ok(Self {
keys: regex_matched_keys,
})
}
fn split_at_in_lists(predicates: &mut [Predicate]) -> (&mut [Predicate], &mut [Predicate]) {
let in_list_index = predicates
.iter_mut()
.partition_in_place(|p| matches!(p, Predicate::InList(_)));
predicates.split_at_mut(in_list_index)
}
fn split_at_ranges(predicates: &mut [Predicate]) -> (&mut [Predicate], &mut [Predicate]) {
let range_index = predicates
.iter_mut()
.partition_in_place(|p| matches!(p, Predicate::Range(_)));
predicates.split_at_mut(range_index)
}
fn ensure_all_regexes(ps: &[Predicate]) -> Result<()> {
ensure!(
ps.iter().all(|p| matches!(p, Predicate::RegexMatch(_))),
KeysApplierUnexpectedPredicatesSnafu {
predicates: ps.to_vec()
}
);
Ok(())
}
fn intersect_with_lists(in_lists: &mut [Predicate]) -> Vec<Bytes> {
#[inline]
fn get_list(p: &Predicate) -> &HashSet<Bytes> {
match p {
Predicate::InList(i) => &i.list,
_ => unreachable!(), // `in_lists` is filtered by `split_at_in_lists`
}
}
in_lists.sort_unstable_by_key(|p| get_list(p).len());
get_list(&in_lists[0])
.iter()
.filter(|c| in_lists[1..].iter().all(|s| get_list(s).contains(*c)))
.cloned()
.collect()
}
fn filter_by_ranges(mut keys: Vec<Bytes>, ranges: &[Predicate]) -> Vec<Bytes> {
#[inline]
fn range_contains(p: &Predicate, key: &Bytes) -> bool {
let (lower, upper) = match p {
Predicate::Range(r) => (&r.range.lower, &r.range.upper),
_ => unreachable!(), // `ranges` is filtered by `split_at_ranges`
};
match (lower, upper) {
(Some(lower), Some(upper)) => match (lower.inclusive, upper.inclusive) {
(true, true) => &lower.value <= key && key <= &upper.value,
(true, false) => &lower.value <= key && key < &upper.value,
(false, true) => &lower.value < key && key <= &upper.value,
(false, false) => &lower.value < key && key < &upper.value,
},
(Some(lower), None) => match lower.inclusive {
true => &lower.value <= key,
false => &lower.value < key,
},
(None, Some(upper)) => match upper.inclusive {
true => key <= &upper.value,
false => key < &upper.value,
},
(None, None) => true,
}
}
keys.retain(|k| ranges.iter().all(|r| range_contains(r, k)));
keys
}
fn filter_by_regexes(mut keys: Vec<Bytes>, regexes: &[Predicate]) -> Result<Vec<Bytes>> {
for p in regexes {
let pattern = match p {
Predicate::RegexMatch(r) => &r.pattern,
_ => unreachable!(), // checked by `ensure_all_regexes`
};
let regex = regex::Regex::new(pattern).with_context(|_| ParseRegexSnafu {
pattern: pattern.to_owned(),
})?;
keys.retain(|k| {
std::str::from_utf8(k)
.map(|k| regex.is_match(k))
.unwrap_or_default()
});
if keys.is_empty() {
return Ok(keys);
}
}
Ok(keys)
}
}
impl TryFrom<Vec<Predicate>> for KeysFstApplier {
type Error = crate::inverted_index::error::Error;
fn try_from(predicates: Vec<Predicate>) -> Result<Self> {
Self::try_from(predicates)
}
}
#[cfg(test)]
mod tests {
use fst::Map as FstMap;
use super::*;
use crate::inverted_index::error::Error;
use crate::inverted_index::search::predicate::{
Bound, InListPredicate, Predicate, Range, RangePredicate, RegexMatchPredicate,
};
fn create_fst_map(items: &[(&[u8], u64)]) -> FstMap<Vec<u8>> {
let mut items = items
.iter()
.map(|(k, v)| (k.to_vec(), *v))
.collect::<Vec<_>>();
items.sort();
FstMap::from_iter(items).unwrap()
}
fn b(s: &str) -> Vec<u8> {
s.as_bytes().to_vec()
}
#[test]
fn test_keys_fst_applier_apply() {
let test_fst = create_fst_map(&[(b"foo", 1), (b"bar", 2), (b"baz", 3)]);
let applier = KeysFstApplier {
keys: vec![b("foo"), b("baz")],
};
let results = applier.apply(&test_fst);
assert_eq!(results, vec![1, 3]);
}
#[test]
fn test_keys_fst_applier_with_empty_keys() {
let test_fst = create_fst_map(&[(b"foo", 1), (b"bar", 2), (b"baz", 3)]);
let applier = KeysFstApplier { keys: vec![] };
let results = applier.apply(&test_fst);
assert!(results.is_empty());
}
#[test]
fn test_keys_fst_applier_with_unmatched_keys() {
let test_fst = create_fst_map(&[(b"foo", 1), (b"bar", 2), (b"baz", 3)]);
let applier = KeysFstApplier {
keys: vec![b("qux"), b("quux")],
};
let results = applier.apply(&test_fst);
assert!(results.is_empty());
}
#[test]
fn test_keys_fst_applier_try_from() {
let predicates = vec![
Predicate::InList(InListPredicate {
list: HashSet::from_iter(vec![b("foo"), b("bar")]),
}),
Predicate::Range(RangePredicate {
range: Range {
lower: Some(Bound {
value: b("bar"),
inclusive: true,
}),
upper: None,
},
}),
Predicate::RegexMatch(RegexMatchPredicate {
pattern: ".*r".to_string(),
}),
];
let applier = KeysFstApplier::try_from(predicates).unwrap();
assert_eq!(applier.keys, vec![b("bar")]);
}
#[test]
fn test_keys_fst_applier_try_from_filter_out_unmatched_keys() {
let predicates = vec![
Predicate::InList(InListPredicate {
list: HashSet::from_iter(vec![b("foo"), b("bar")]),
}),
Predicate::Range(RangePredicate {
range: Range {
lower: Some(Bound {
value: b("f"),
inclusive: true,
}),
upper: None,
},
}),
Predicate::RegexMatch(RegexMatchPredicate {
pattern: ".*o".to_string(),
}),
];
let applier = KeysFstApplier::try_from(predicates).unwrap();
assert_eq!(applier.keys, vec![b("foo")]);
}
#[test]
fn test_keys_fst_applier_try_from_empty_predicates() {
let predicates = vec![];
let result = KeysFstApplier::try_from(predicates);
assert!(matches!(result, Err(Error::EmptyPredicates { .. })));
}
#[test]
fn test_keys_fst_applier_try_from_without_in_list() {
let predicates = vec![Predicate::Range(RangePredicate {
range: Range {
lower: Some(Bound {
value: b("bar"),
inclusive: true,
}),
upper: None,
},
})];
let result = KeysFstApplier::try_from(predicates);
assert!(matches!(
result,
Err(Error::KeysApplierWithoutInList { .. })
));
}
#[test]
fn test_keys_fst_applier_try_from_with_invalid_regex() {
let predicates = vec![
Predicate::InList(InListPredicate {
list: HashSet::from_iter(vec![b("foo"), b("bar")]),
}),
Predicate::RegexMatch(RegexMatchPredicate {
pattern: "*invalid regex".to_string(),
}),
];
let result = KeysFstApplier::try_from(predicates);
assert!(matches!(result, Err(Error::ParseRegex { .. })));
}
}

View File

@@ -0,0 +1,112 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_base::BitVec;
use greptime_proto::v1::index::InvertedIndexMeta;
use crate::inverted_index::error::Result;
use crate::inverted_index::format::reader::InvertedIndexReader;
/// `FstValuesMapper` maps FST-encoded u64 values to their corresponding bitmaps
/// within an inverted index. The higher 32 bits of each u64 value represent the
/// bitmap offset and the lower 32 bits represent its size. This mapper uses these
/// combined offset-size pairs to fetch and union multiple bitmaps into a single `BitVec`.
pub struct FstValuesMapper<'a> {
/// `reader` retrieves bitmap data using offsets and sizes from FST values.
reader: &'a mut dyn InvertedIndexReader,
/// `metadata` provides context for interpreting the index structures.
metadata: &'a InvertedIndexMeta,
}
impl<'a> FstValuesMapper<'a> {
pub fn new(
reader: &'a mut dyn InvertedIndexReader,
metadata: &'a InvertedIndexMeta,
) -> FstValuesMapper<'a> {
FstValuesMapper { reader, metadata }
}
/// Maps an array of FST values to a `BitVec` by retrieving and combining bitmaps.
pub async fn map_values(&mut self, values: &[u64]) -> Result<BitVec> {
let mut bitmap = BitVec::new();
for value in values {
// relative_offset (higher 32 bits), size (lower 32 bits)
let [relative_offset, size] = bytemuck::cast::<u64, [u32; 2]>(*value);
let bm = self
.reader
.bitmap(self.metadata, relative_offset, size)
.await?;
// Ensure the longest BitVec is the left operand to prevent truncation during OR.
if bm.len() > bitmap.len() {
bitmap = bm | bitmap
} else {
bitmap |= bm
}
}
Ok(bitmap)
}
}
#[cfg(test)]
mod tests {
use common_base::bit_vec::prelude::*;
use super::*;
use crate::inverted_index::format::reader::MockInvertedIndexReader;
fn value(offset: u32, size: u32) -> u64 {
bytemuck::cast::<[u32; 2], u64>([offset, size])
}
#[tokio::test]
async fn test_map_values() {
let mut mock_reader = MockInvertedIndexReader::new();
mock_reader
.expect_bitmap()
.returning(|_, offset, size| match (offset, size) {
(1, 1) => Ok(bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1]),
(2, 1) => Ok(bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 1, 0, 1]),
_ => unreachable!(),
});
let meta = InvertedIndexMeta::default();
let mut values_mapper = FstValuesMapper::new(&mut mock_reader, &meta);
let result = values_mapper.map_values(&[]).await.unwrap();
assert_eq!(result.count_ones(), 0);
let result = values_mapper.map_values(&[value(1, 1)]).await.unwrap();
assert_eq!(result, bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1]);
let result = values_mapper.map_values(&[value(2, 1)]).await.unwrap();
assert_eq!(result, bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 1, 0, 1]);
let result = values_mapper
.map_values(&[value(1, 1), value(2, 1)])
.await
.unwrap();
assert_eq!(result, bitvec![u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1]);
let result = values_mapper
.map_values(&[value(2, 1), value(1, 1)])
.await
.unwrap();
assert_eq!(result, bitvec![u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1]);
}
}

View File

@@ -0,0 +1,57 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod predicates_apply;
use async_trait::async_trait;
pub use predicates_apply::PredicatesIndexApplier;
use crate::inverted_index::error::Result;
use crate::inverted_index::format::reader::InvertedIndexReader;
/// A trait for processing and transforming indices obtained from an inverted index.
///
/// Applier instances are reusable and work with various `InvertedIndexReader` instances,
/// avoiding repeated compilation of fixed predicates such as regex patterns.
#[async_trait]
pub trait IndexApplier {
/// Applies the predefined predicates to the data read by the given index reader, returning
/// a list of relevant indices (e.g., post IDs, group IDs, row IDs).
async fn apply(
&self,
context: SearchContext,
reader: &mut dyn InvertedIndexReader,
) -> Result<Vec<usize>>;
}
/// A context for searching the inverted index.
#[derive(Clone, Debug, Eq, PartialEq, Default)]
pub struct SearchContext {
/// `index_not_found_strategy` controls the behavior of the applier when the index is not found.
pub index_not_found_strategy: IndexNotFoundStrategy,
}
/// Defines the behavior of an applier when the index is not found.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Default)]
pub enum IndexNotFoundStrategy {
/// Return an empty list of indices.
#[default]
ReturnEmpty,
/// Ignore the index and continue.
Ignore,
/// Throw an error.
ThrowError,
}

View File

@@ -0,0 +1,346 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use async_trait::async_trait;
use common_base::BitVec;
use greptime_proto::v1::index::InvertedIndexMetas;
use crate::inverted_index::error::{IndexNotFoundSnafu, Result};
use crate::inverted_index::format::reader::InvertedIndexReader;
use crate::inverted_index::search::fst_apply::{
FstApplier, IntersectionFstApplier, KeysFstApplier,
};
use crate::inverted_index::search::fst_values_mapper::FstValuesMapper;
use crate::inverted_index::search::index_apply::{
IndexApplier, IndexNotFoundStrategy, SearchContext,
};
use crate::inverted_index::search::predicate::Predicate;
type IndexName = String;
/// `PredicatesIndexApplier` contains a collection of `FstApplier`s, each associated with an index name,
/// to process and filter index data based on compiled predicates.
pub struct PredicatesIndexApplier {
/// A list of `FstApplier`s, each associated with a specific index name
/// (e.g. a tag field uses its column name as index name)
fst_appliers: Vec<(IndexName, Box<dyn FstApplier>)>,
}
#[async_trait]
impl IndexApplier for PredicatesIndexApplier {
/// Applies all `FstApplier`s to the data in the inverted index reader, intersecting the individual
/// bitmaps obtained for each index to result in a final set of indices.
async fn apply(
&self,
context: SearchContext,
reader: &mut dyn InvertedIndexReader,
) -> Result<Vec<usize>> {
let metadata = reader.metadata().await?;
let mut bitmap = Self::bitmap_full_range(&metadata);
// TODO(zhongzc): optimize the order of applying to make it quicker to return empty.
for (name, fst_applier) in &self.fst_appliers {
if bitmap.count_ones() == 0 {
break;
}
let Some(meta) = metadata.metas.get(name) else {
match context.index_not_found_strategy {
IndexNotFoundStrategy::ReturnEmpty => {
return Ok(vec![]);
}
IndexNotFoundStrategy::Ignore => {
continue;
}
IndexNotFoundStrategy::ThrowError => {
return IndexNotFoundSnafu { name }.fail();
}
}
};
let fst = reader.fst(meta).await?;
let values = fst_applier.apply(&fst);
let mut mapper = FstValuesMapper::new(&mut *reader, meta);
let bm = mapper.map_values(&values).await?;
bitmap &= bm;
}
Ok(bitmap.iter_ones().collect())
}
}
impl PredicatesIndexApplier {
/// Constructs an instance of `PredicatesIndexApplier` based on a list of tag predicates.
/// Chooses an appropriate `FstApplier` for each index name based on the nature of its predicates.
pub fn try_from(mut predicates: Vec<(IndexName, Vec<Predicate>)>) -> Result<Self> {
let mut fst_appliers = Vec::with_capacity(predicates.len());
// InList predicates are applied first to benefit from higher selectivity.
let in_list_index = predicates
.iter_mut()
.partition_in_place(|(_, ps)| ps.iter().any(|p| matches!(p, Predicate::InList(_))));
let mut iter = predicates.into_iter();
for _ in 0..in_list_index {
let (tag_name, predicates) = iter.next().unwrap();
let fst_applier = Box::new(KeysFstApplier::try_from(predicates)?) as _;
fst_appliers.push((tag_name, fst_applier));
}
for (tag_name, predicates) in iter {
if predicates.is_empty() {
continue;
}
let fst_applier = Box::new(IntersectionFstApplier::try_from(predicates)?) as _;
fst_appliers.push((tag_name, fst_applier));
}
Ok(PredicatesIndexApplier { fst_appliers })
}
/// Creates a `BitVec` representing the full range of data in the index for initial scanning.
fn bitmap_full_range(metadata: &InvertedIndexMetas) -> BitVec {
let total_count = metadata.total_row_count;
let segment_count = metadata.segment_row_count;
let len = (total_count + segment_count - 1) / segment_count;
BitVec::repeat(true, len as _)
}
}
impl TryFrom<Vec<(String, Vec<Predicate>)>> for PredicatesIndexApplier {
type Error = crate::inverted_index::error::Error;
fn try_from(predicates: Vec<(String, Vec<Predicate>)>) -> Result<Self> {
Self::try_from(predicates)
}
}
#[cfg(test)]
mod tests {
use common_base::bit_vec::prelude::*;
use greptime_proto::v1::index::InvertedIndexMeta;
use super::*;
use crate::inverted_index::error::Error;
use crate::inverted_index::format::reader::MockInvertedIndexReader;
use crate::inverted_index::search::fst_apply::MockFstApplier;
use crate::inverted_index::FstMap;
fn s(s: &'static str) -> String {
s.to_owned()
}
fn mock_metas(tags: impl IntoIterator<Item = &'static str>) -> InvertedIndexMetas {
let mut metas = InvertedIndexMetas {
total_row_count: 8,
segment_row_count: 1,
..Default::default()
};
for tag in tags.into_iter() {
let meta = InvertedIndexMeta {
name: s(tag),
..Default::default()
};
metas.metas.insert(s(tag), meta);
}
metas
}
fn key_fst_applier(value: &'static str) -> Box<dyn FstApplier> {
let mut mock_fst_applier = MockFstApplier::new();
mock_fst_applier
.expect_apply()
.returning(move |fst| fst.get(value).into_iter().collect());
Box::new(mock_fst_applier)
}
fn fst_value(offset: u32, size: u32) -> u64 {
bytemuck::cast::<_, u64>([offset, size])
}
#[tokio::test]
async fn test_index_applier_apply_get_key() {
// An index applier that point-gets "tag-0_value-0" on tag "tag-0"
let applier = PredicatesIndexApplier {
fst_appliers: vec![(s("tag-0"), key_fst_applier("tag-0_value-0"))],
};
// An index reader with a single tag "tag-0" and a corresponding value "tag-0_value-0"
let mut mock_reader = MockInvertedIndexReader::new();
mock_reader
.expect_metadata()
.returning(|| Ok(mock_metas(["tag-0"])));
mock_reader
.expect_fst()
.returning(|meta| match meta.name.as_str() {
"tag-0" => Ok(FstMap::from_iter([(b"tag-0_value-0", fst_value(2, 1))]).unwrap()),
_ => unreachable!(),
});
mock_reader.expect_bitmap().returning(|meta, offset, size| {
match (meta.name.as_str(), offset, size) {
("tag-0", 2, 1) => Ok(bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1, 0]),
_ => unreachable!(),
}
});
let indices = applier
.apply(SearchContext::default(), &mut mock_reader)
.await
.unwrap();
assert_eq!(indices, vec![0, 2, 4, 6]);
// An index reader with a single tag "tag-0" but without value "tag-0_value-0"
let mut mock_reader = MockInvertedIndexReader::new();
mock_reader
.expect_metadata()
.returning(|| Ok(mock_metas(["tag-0"])));
mock_reader
.expect_fst()
.returning(|meta| match meta.name.as_str() {
"tag-0" => Ok(FstMap::from_iter([(b"tag-0_value-1", fst_value(2, 1))]).unwrap()),
_ => unreachable!(),
});
let indices = applier
.apply(SearchContext::default(), &mut mock_reader)
.await
.unwrap();
assert!(indices.is_empty());
}
#[tokio::test]
async fn test_index_applier_apply_intersection_with_two_tags() {
// An index applier that intersects "tag-0_value-0" on tag "tag-0" and "tag-1_value-a" on tag "tag-1"
let applier = PredicatesIndexApplier {
fst_appliers: vec![
(s("tag-0"), key_fst_applier("tag-0_value-0")),
(s("tag-1"), key_fst_applier("tag-1_value-a")),
],
};
// An index reader with two tags "tag-0" and "tag-1" and respective values "tag-0_value-0" and "tag-1_value-a"
let mut mock_reader = MockInvertedIndexReader::new();
mock_reader
.expect_metadata()
.returning(|| Ok(mock_metas(["tag-0", "tag-1"])));
mock_reader
.expect_fst()
.returning(|meta| match meta.name.as_str() {
"tag-0" => Ok(FstMap::from_iter([(b"tag-0_value-0", fst_value(1, 1))]).unwrap()),
"tag-1" => Ok(FstMap::from_iter([(b"tag-1_value-a", fst_value(2, 1))]).unwrap()),
_ => unreachable!(),
});
mock_reader.expect_bitmap().returning(|meta, offset, size| {
match (meta.name.as_str(), offset, size) {
("tag-0", 1, 1) => Ok(bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1, 0]),
("tag-1", 2, 1) => Ok(bitvec![u8, Lsb0; 1, 1, 0, 1, 1, 0, 1, 1]),
_ => unreachable!(),
}
});
let indices = applier
.apply(SearchContext::default(), &mut mock_reader)
.await
.unwrap();
assert_eq!(indices, vec![0, 4, 6]);
}
#[tokio::test]
async fn test_index_applier_without_predicates() {
let applier = PredicatesIndexApplier {
fst_appliers: vec![],
};
let mut mock_reader: MockInvertedIndexReader = MockInvertedIndexReader::new();
mock_reader
.expect_metadata()
.returning(|| Ok(mock_metas(["tag-0"])));
let indices = applier
.apply(SearchContext::default(), &mut mock_reader)
.await
.unwrap();
assert_eq!(indices, vec![0, 1, 2, 3, 4, 5, 6, 7]); // full range to scan
}
#[tokio::test]
async fn test_index_applier_with_empty_index() {
let mut mock_reader = MockInvertedIndexReader::new();
mock_reader.expect_metadata().returning(move || {
Ok(InvertedIndexMetas {
total_row_count: 0, // No rows
segment_row_count: 1,
..Default::default()
})
});
let mut mock_fst_applier = MockFstApplier::new();
mock_fst_applier.expect_apply().never();
let applier = PredicatesIndexApplier {
fst_appliers: vec![(s("tag-0"), Box::new(mock_fst_applier))],
};
let indices = applier
.apply(SearchContext::default(), &mut mock_reader)
.await
.unwrap();
assert!(indices.is_empty());
}
#[tokio::test]
async fn test_index_applier_with_nonexistent_index() {
let mut mock_reader = MockInvertedIndexReader::new();
mock_reader
.expect_metadata()
.returning(|| Ok(mock_metas(vec![])));
let mut mock_fst_applier = MockFstApplier::new();
mock_fst_applier.expect_apply().never();
let applier = PredicatesIndexApplier {
fst_appliers: vec![(s("tag-0"), Box::new(mock_fst_applier))],
};
let result = applier
.apply(
SearchContext {
index_not_found_strategy: IndexNotFoundStrategy::ThrowError,
},
&mut mock_reader,
)
.await;
assert!(matches!(result, Err(Error::IndexNotFound { .. })));
let indices = applier
.apply(
SearchContext {
index_not_found_strategy: IndexNotFoundStrategy::ReturnEmpty,
},
&mut mock_reader,
)
.await
.unwrap();
assert!(indices.is_empty());
let indices = applier
.apply(
SearchContext {
index_not_found_strategy: IndexNotFoundStrategy::Ignore,
},
&mut mock_reader,
)
.await
.unwrap();
assert_eq!(indices, vec![0, 1, 2, 3, 4, 5, 6, 7]);
}
}

View File

@@ -0,0 +1,73 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashSet;
use crate::inverted_index::Bytes;
/// Enumerates types of predicates for value filtering.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Predicate {
/// Predicate for matching values in a list.
InList(InListPredicate),
/// Predicate for matching values within a range.
Range(RangePredicate),
/// Predicate for matching values against a regex pattern.
RegexMatch(RegexMatchPredicate),
}
/// `InListPredicate` contains a list of acceptable values. A value needs to match at least
/// one of the elements (logical OR semantic) for the predicate to be satisfied.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct InListPredicate {
/// List of acceptable values.
pub list: HashSet<Bytes>,
}
/// `Bound` is a sub-component of a range, representing a single-sided limit that could be inclusive or exclusive.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Bound {
/// Whether the bound is inclusive or exclusive.
pub inclusive: bool,
/// The value of the bound.
pub value: Bytes,
}
/// `Range` defines a single continuous range which can optionally have a lower and/or upper limit.
/// Both the lower and upper bounds must be satisfied for the range condition to be true.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Range {
/// The lower bound of the range.
pub lower: Option<Bound>,
/// The upper bound of the range.
pub upper: Option<Bound>,
}
/// `RangePredicate` encapsulates a range condition that must be satisfied
/// for the predicate to hold true (logical AND semantic between the bounds).
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct RangePredicate {
/// The range condition.
pub range: Range,
}
/// `RegexMatchPredicate` encapsulates a single regex pattern. A value must match
/// the pattern for the predicate to be satisfied.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct RegexMatchPredicate {
/// The regex pattern.
pub pattern: String,
}

View File

@@ -12,4 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#![feature(iter_partition_in_place)]
pub mod inverted_index; pub mod inverted_index;

View File

@@ -22,7 +22,7 @@ use api::v1::meta::store_server::StoreServer;
use common_base::Plugins; use common_base::Plugins;
use common_meta::kv_backend::etcd::EtcdStore; use common_meta::kv_backend::etcd::EtcdStore;
use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::kv_backend::ResettableKvBackendRef; use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
use common_telemetry::info; use common_telemetry::info;
use etcd_client::Client; use etcd_client::Client;
use servers::configurator::ConfiguratorRef; use servers::configurator::ConfiguratorRef;
@@ -60,8 +60,11 @@ pub struct MetaSrvInstance {
} }
impl MetaSrvInstance { impl MetaSrvInstance {
pub async fn new(opts: MetaSrvOptions, plugins: Plugins) -> Result<MetaSrvInstance> { pub async fn new(
let meta_srv = build_meta_srv(&opts, plugins.clone()).await?; opts: MetaSrvOptions,
plugins: Plugins,
meta_srv: MetaSrv,
) -> Result<MetaSrvInstance> {
let http_srv = Arc::new( let http_srv = Arc::new(
HttpServerBuilder::new(opts.http.clone()) HttpServerBuilder::new(opts.http.clone())
.with_metrics_handler(MetricsHandler) .with_metrics_handler(MetricsHandler)
@@ -161,28 +164,26 @@ pub fn router(meta_srv: MetaSrv) -> Router {
.add_service(admin::make_admin_service(meta_srv)) .add_service(admin::make_admin_service(meta_srv))
} }
pub async fn build_meta_srv(opts: &MetaSrvOptions, plugins: Plugins) -> Result<MetaSrv> { pub async fn metasrv_builder(
let (kv_backend, election, lock) = if opts.use_memory_store { opts: &MetaSrvOptions,
( plugins: Plugins,
kv_backend: Option<KvBackendRef>,
) -> Result<MetaSrvBuilder> {
let (kv_backend, election, lock) = match (kv_backend, opts.use_memory_store) {
(Some(kv_backend), _) => (kv_backend, None, Some(Arc::new(MemLock::default()) as _)),
(None, true) => (
Arc::new(MemoryKvBackend::new()) as _, Arc::new(MemoryKvBackend::new()) as _,
None, None,
Some(Arc::new(MemLock::default()) as _), Some(Arc::new(MemLock::default()) as _),
) ),
} else { (None, false) => {
let etcd_endpoints = opts let etcd_client = create_etcd_client(opts).await?;
.store_addr (
.split(',') EtcdStore::with_etcd_client(etcd_client.clone()),
.map(|x| x.trim()) Some(EtcdElection::with_etcd_client(&opts.server_addr, etcd_client.clone()).await?),
.filter(|x| !x.is_empty()) Some(EtcdLock::with_etcd_client(etcd_client)?),
.collect::<Vec<_>>(); )
let etcd_client = Client::connect(&etcd_endpoints, None) }
.await
.context(error::ConnectEtcdSnafu)?;
(
EtcdStore::with_etcd_client(etcd_client.clone()),
Some(EtcdElection::with_etcd_client(&opts.server_addr, etcd_client.clone()).await?),
Some(EtcdLock::with_etcd_client(etcd_client)?),
)
}; };
let in_memory = Arc::new(MemoryKvBackend::new()) as ResettableKvBackendRef; let in_memory = Arc::new(MemoryKvBackend::new()) as ResettableKvBackendRef;
@@ -192,14 +193,24 @@ pub async fn build_meta_srv(opts: &MetaSrvOptions, plugins: Plugins) -> Result<M
SelectorType::LeaseBased => Arc::new(LeaseBasedSelector) as SelectorRef, SelectorType::LeaseBased => Arc::new(LeaseBasedSelector) as SelectorRef,
}; };
MetaSrvBuilder::new() Ok(MetaSrvBuilder::new()
.options(opts.clone()) .options(opts.clone())
.kv_backend(kv_backend) .kv_backend(kv_backend)
.in_memory(in_memory) .in_memory(in_memory)
.selector(selector) .selector(selector)
.election(election) .election(election)
.lock(lock) .lock(lock)
.plugins(plugins) .plugins(plugins))
.build() }
.await
async fn create_etcd_client(opts: &MetaSrvOptions) -> Result<Client> {
let etcd_endpoints = opts
.store_addr
.split(',')
.map(|x| x.trim())
.filter(|x| !x.is_empty())
.collect::<Vec<_>>();
Client::connect(&etcd_endpoints, None)
.await
.context(error::ConnectEtcdSnafu)
} }

View File

@@ -32,6 +32,9 @@ use crate::pubsub::Message;
#[snafu(visibility(pub))] #[snafu(visibility(pub))]
#[stack_trace_debug] #[stack_trace_debug]
pub enum Error { pub enum Error {
#[snafu(display("The region migration procedure aborted, reason: {}", reason))]
MigrationAbort { location: Location, reason: String },
#[snafu(display( #[snafu(display(
"Another procedure is opening the region: {} on peer: {}", "Another procedure is opening the region: {} on peer: {}",
region_id, region_id,
@@ -43,6 +46,12 @@ pub enum Error {
region_id: RegionId, region_id: RegionId,
}, },
#[snafu(display("Failed to init ddl manager"))]
InitDdlManager {
location: Location,
source: common_meta::error::Error,
},
#[snafu(display("Failed to create default catalog and schema"))] #[snafu(display("Failed to create default catalog and schema"))]
InitMetadata { InitMetadata {
location: Location, location: Location,
@@ -292,6 +301,12 @@ pub enum Error {
location: Location, location: Location,
}, },
#[snafu(display("Failed to find table route for {region_id}"))]
RegionRouteNotFound {
region_id: RegionId,
location: Location,
},
#[snafu(display("Table info not found: {}", table_id))] #[snafu(display("Table info not found: {}", table_id))]
TableInfoNotFound { TableInfoNotFound {
table_id: TableId, table_id: TableId,
@@ -652,7 +667,9 @@ impl ErrorExt for Error {
| Error::Unexpected { .. } | Error::Unexpected { .. }
| Error::Txn { .. } | Error::Txn { .. }
| Error::TableIdChanged { .. } | Error::TableIdChanged { .. }
| Error::RegionOpeningRace { .. } => StatusCode::Unexpected, | Error::RegionOpeningRace { .. }
| Error::RegionRouteNotFound { .. }
| Error::MigrationAbort { .. } => StatusCode::Unexpected,
Error::TableNotFound { .. } => StatusCode::TableNotFound, Error::TableNotFound { .. } => StatusCode::TableNotFound,
Error::InvalidateTableCache { source, .. } => source.status_code(), Error::InvalidateTableCache { source, .. } => source.status_code(),
Error::RequestDatanode { source, .. } => source.status_code(), Error::RequestDatanode { source, .. } => source.status_code(),
@@ -685,7 +702,9 @@ impl ErrorExt for Error {
| Error::UpdateTableRoute { source, .. } | Error::UpdateTableRoute { source, .. }
| Error::GetFullTableInfo { source, .. } => source.status_code(), | Error::GetFullTableInfo { source, .. } => source.status_code(),
Error::InitMetadata { source, .. } => source.status_code(), Error::InitMetadata { source, .. } | Error::InitDdlManager { source, .. } => {
source.status_code()
}
Error::Other { source, .. } => source.status_code(), Error::Other { source, .. } => source.status_code(),
} }

View File

@@ -298,6 +298,19 @@ impl HeartbeatMailbox {
serde_json::from_str(payload).context(DeserializeFromJsonSnafu { input: payload }) serde_json::from_str(payload).context(DeserializeFromJsonSnafu { input: payload })
} }
/// Parses the [Instruction] from [MailboxMessage].
#[cfg(test)]
pub(crate) fn json_instruction(msg: &MailboxMessage) -> Result<Instruction> {
let Payload::Json(payload) =
msg.payload
.as_ref()
.with_context(|| UnexpectedInstructionReplySnafu {
mailbox_message: msg.to_string(),
reason: format!("empty payload, msg: {msg:?}"),
})?;
serde_json::from_str(payload).context(DeserializeFromJsonSnafu { input: payload })
}
pub fn create(pushers: Pushers, sequence: Sequence) -> MailboxRef { pub fn create(pushers: Pushers, sequence: Sequence) -> MailboxRef {
let mailbox = Arc::new(Self::new(pushers, sequence)); let mailbox = Arc::new(Self::new(pushers, sequence));

View File

@@ -18,6 +18,7 @@ use std::sync::Arc;
use api::v1::meta::{HeartbeatRequest, RegionLease, Role}; use api::v1::meta::{HeartbeatRequest, RegionLease, Role};
use async_trait::async_trait; use async_trait::async_trait;
use common_meta::key::TableMetadataManagerRef; use common_meta::key::TableMetadataManagerRef;
use common_telemetry::info;
use store_api::region_engine::{GrantedRegion, RegionRole}; use store_api::region_engine::{GrantedRegion, RegionRole};
use store_api::storage::RegionId; use store_api::storage::RegionId;
@@ -60,19 +61,19 @@ fn flip_role(role: RegionRole) -> RegionRole {
/// ///
/// - If a region is in an `operable` set, it will be granted an `flip_role(current)`([RegionRole]); /// - If a region is in an `operable` set, it will be granted an `flip_role(current)`([RegionRole]);
/// otherwise, it will be granted a `current`([RegionRole]). /// otherwise, it will be granted a `current`([RegionRole]).
/// - If a region is in a `closable` set, it won't be granted. /// - If a region is in a `closeable` set, it won't be granted.
fn grant( fn grant(
granted_regions: &mut Vec<GrantedRegion>, granted_regions: &mut Vec<GrantedRegion>,
operable: &HashSet<RegionId>, operable: &HashSet<RegionId>,
closable: &HashSet<RegionId>, closeable: &HashSet<RegionId>,
regions: &[RegionId], regions: &[RegionId],
current: RegionRole, current: RegionRole,
) { ) {
for region in regions { for region in regions {
if operable.contains(region) { if operable.contains(region) {
granted_regions.push(GrantedRegion::new(*region, flip_role(current))); granted_regions.push(GrantedRegion::new(*region, flip_role(current)));
} else if closable.contains(region) { } else if closeable.contains(region) {
// Filters out the closable regions. // Filters out the closeable regions.
} else { } else {
granted_regions.push(GrantedRegion::new(*region, current)) granted_regions.push(GrantedRegion::new(*region, current))
} }
@@ -111,7 +112,7 @@ impl HeartbeatHandler for RegionLeaseHandler {
let leaders = leaders.into_iter().flatten().collect::<Vec<_>>(); let leaders = leaders.into_iter().flatten().collect::<Vec<_>>();
let (downgradable, closable) = self let (downgradable, closeable) = self
.region_lease_keeper .region_lease_keeper
.find_staled_leader_regions(cluster_id, datanode_id, &leaders) .find_staled_leader_regions(cluster_id, datanode_id, &leaders)
.await?; .await?;
@@ -119,32 +120,44 @@ impl HeartbeatHandler for RegionLeaseHandler {
grant( grant(
&mut granted_regions, &mut granted_regions,
&downgradable, &downgradable,
&closable, &closeable,
&leaders, &leaders,
RegionRole::Leader, RegionRole::Leader,
); );
inactive_regions.extend(closable); if !closeable.is_empty() {
info!(
"Granting region lease, found closeable leader regions: {:?} on datanode {}",
closeable, datanode_id
);
}
inactive_regions.extend(closeable);
let followers = followers.into_iter().flatten().collect::<Vec<_>>(); let followers = followers.into_iter().flatten().collect::<Vec<_>>();
let (upgradeable, closable) = self let (upgradeable, closeable) = self
.region_lease_keeper .region_lease_keeper
.find_staled_follower_regions(cluster_id, datanode_id, &followers) .find_staled_follower_regions(cluster_id, datanode_id, &followers)
.await?; .await?;
// If a region is opening, it will be filtered out from the closable regions set. // If a region is opening, it will be filtered out from the closeable regions set.
let closable = self let closeable = self
.opening_region_keeper .opening_region_keeper
.filter_opening_regions(datanode_id, closable); .filter_opening_regions(datanode_id, closeable);
grant( grant(
&mut granted_regions, &mut granted_regions,
&upgradeable, &upgradeable,
&closable, &closeable,
&followers, &followers,
RegionRole::Follower, RegionRole::Follower,
); );
inactive_regions.extend(closable); if !closeable.is_empty() {
info!(
"Granting region lease, found closeable follower regions {:?} on datanode {}",
closeable, datanode_id
);
}
inactive_regions.extend(closeable);
acc.inactive_region_ids = inactive_regions; acc.inactive_region_ids = inactive_regions;
acc.region_lease = Some(RegionLease { acc.region_lease = Some(RegionLease {
@@ -154,6 +167,7 @@ impl HeartbeatHandler for RegionLeaseHandler {
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
duration_since_epoch: req.duration_since_epoch, duration_since_epoch: req.duration_since_epoch,
lease_seconds: self.region_lease_seconds, lease_seconds: self.region_lease_seconds,
closeable_region_ids: vec![],
}); });
Ok(HandleControl::Continue) Ok(HandleControl::Continue)

View File

@@ -15,6 +15,8 @@
#![feature(async_closure)] #![feature(async_closure)]
#![feature(result_flattening)] #![feature(result_flattening)]
#![feature(assert_matches)] #![feature(assert_matches)]
#![feature(option_take_if)]
#![feature(extract_if)]
pub mod bootstrap; pub mod bootstrap;
mod cache_invalidator; mod cache_invalidator;

View File

@@ -19,19 +19,22 @@ use std::time::Duration;
use client::client_manager::DatanodeClients; use client::client_manager::DatanodeClients;
use common_base::Plugins; use common_base::Plugins;
use common_grpc::channel_manager::ChannelConfig; use common_grpc::channel_manager::ChannelConfig;
use common_meta::datanode_manager::DatanodeManagerRef;
use common_meta::ddl::TableMetadataAllocatorRef;
use common_meta::ddl_manager::{DdlManager, DdlManagerRef}; use common_meta::ddl_manager::{DdlManager, DdlManagerRef};
use common_meta::distributed_time_constants; use common_meta::distributed_time_constants;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef}; use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef}; use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
use common_meta::sequence::{Sequence, SequenceRef}; use common_meta::sequence::Sequence;
use common_meta::state_store::KvStateStore; use common_meta::state_store::KvStateStore;
use common_procedure::local::{LocalManager, ManagerConfig}; use common_procedure::local::{LocalManager, ManagerConfig};
use common_procedure::ProcedureManagerRef; use common_procedure::ProcedureManagerRef;
use snafu::ResultExt;
use crate::cache_invalidator::MetasrvCacheInvalidator; use crate::cache_invalidator::MetasrvCacheInvalidator;
use crate::cluster::{MetaPeerClientBuilder, MetaPeerClientRef}; use crate::cluster::{MetaPeerClientBuilder, MetaPeerClientRef};
use crate::error::Result; use crate::error::{self, Result};
use crate::greptimedb_telemetry::get_greptimedb_telemetry_task; use crate::greptimedb_telemetry::get_greptimedb_telemetry_task;
use crate::handler::check_leader_handler::CheckLeaderHandler; use crate::handler::check_leader_handler::CheckLeaderHandler;
use crate::handler::collect_stats_handler::CollectStatsHandler; use crate::handler::collect_stats_handler::CollectStatsHandler;
@@ -69,8 +72,9 @@ pub struct MetaSrvBuilder {
election: Option<ElectionRef>, election: Option<ElectionRef>,
meta_peer_client: Option<MetaPeerClientRef>, meta_peer_client: Option<MetaPeerClientRef>,
lock: Option<DistLockRef>, lock: Option<DistLockRef>,
datanode_clients: Option<Arc<DatanodeClients>>, datanode_manager: Option<DatanodeManagerRef>,
plugins: Option<Plugins>, plugins: Option<Plugins>,
table_metadata_allocator: Option<TableMetadataAllocatorRef>,
} }
impl MetaSrvBuilder { impl MetaSrvBuilder {
@@ -84,8 +88,9 @@ impl MetaSrvBuilder {
election: None, election: None,
options: None, options: None,
lock: None, lock: None,
datanode_clients: None, datanode_manager: None,
plugins: None, plugins: None,
table_metadata_allocator: None,
} }
} }
@@ -129,8 +134,8 @@ impl MetaSrvBuilder {
self self
} }
pub fn datanode_clients(mut self, clients: Arc<DatanodeClients>) -> Self { pub fn datanode_manager(mut self, datanode_manager: DatanodeManagerRef) -> Self {
self.datanode_clients = Some(clients); self.datanode_manager = Some(datanode_manager);
self self
} }
@@ -139,6 +144,14 @@ impl MetaSrvBuilder {
self self
} }
pub fn table_metadata_allocator(
mut self,
table_metadata_allocator: TableMetadataAllocatorRef,
) -> Self {
self.table_metadata_allocator = Some(table_metadata_allocator);
self
}
pub async fn build(self) -> Result<MetaSrv> { pub async fn build(self) -> Result<MetaSrv> {
let started = Arc::new(AtomicBool::new(false)); let started = Arc::new(AtomicBool::new(false));
@@ -151,8 +164,9 @@ impl MetaSrvBuilder {
selector, selector,
handler_group, handler_group,
lock, lock,
datanode_clients, datanode_manager,
plugins, plugins,
table_metadata_allocator,
} = self; } = self;
let options = options.unwrap_or_default(); let options = options.unwrap_or_default();
@@ -188,16 +202,23 @@ impl MetaSrvBuilder {
meta_peer_client: meta_peer_client.clone(), meta_peer_client: meta_peer_client.clone(),
table_id: None, table_id: None,
}; };
let table_metadata_allocator = table_metadata_allocator.unwrap_or_else(|| {
Arc::new(MetaSrvTableMetadataAllocator::new(
selector_ctx.clone(),
selector.clone(),
table_id_sequence.clone(),
))
});
let ddl_manager = build_ddl_manager( let ddl_manager = build_ddl_manager(
&options, &options,
datanode_clients, datanode_manager,
&procedure_manager, &procedure_manager,
&mailbox, &mailbox,
&table_metadata_manager, &table_metadata_manager,
(&selector, &selector_ctx), table_metadata_allocator,
&table_id_sequence, )?;
);
let _ = ddl_manager.try_start();
let opening_region_keeper = Arc::new(OpeningRegionKeeper::default()); let opening_region_keeper = Arc::new(OpeningRegionKeeper::default());
let handler_group = match handler_group { let handler_group = match handler_group {
@@ -324,13 +345,12 @@ fn build_procedure_manager(
fn build_ddl_manager( fn build_ddl_manager(
options: &MetaSrvOptions, options: &MetaSrvOptions,
datanode_clients: Option<Arc<DatanodeClients>>, datanode_clients: Option<DatanodeManagerRef>,
procedure_manager: &ProcedureManagerRef, procedure_manager: &ProcedureManagerRef,
mailbox: &MailboxRef, mailbox: &MailboxRef,
table_metadata_manager: &TableMetadataManagerRef, table_metadata_manager: &TableMetadataManagerRef,
(selector, selector_ctx): (&SelectorRef, &SelectorContext), table_metadata_allocator: TableMetadataAllocatorRef,
table_id_sequence: &SequenceRef, ) -> Result<DdlManagerRef> {
) -> DdlManagerRef {
let datanode_clients = datanode_clients.unwrap_or_else(|| { let datanode_clients = datanode_clients.unwrap_or_else(|| {
let datanode_client_channel_config = ChannelConfig::new() let datanode_client_channel_config = ChannelConfig::new()
.timeout(Duration::from_millis( .timeout(Duration::from_millis(
@@ -349,18 +369,15 @@ fn build_ddl_manager(
}, },
)); ));
let table_meta_allocator = Arc::new(MetaSrvTableMetadataAllocator::new( Ok(Arc::new(
selector_ctx.clone(), DdlManager::try_new(
selector.clone(), procedure_manager.clone(),
table_id_sequence.clone(), datanode_clients,
)); cache_invalidator,
table_metadata_manager.clone(),
Arc::new(DdlManager::new( table_metadata_allocator,
procedure_manager.clone(), )
datanode_clients, .context(error::InitDdlManagerSnafu)?,
cache_invalidator,
table_metadata_manager.clone(),
table_meta_allocator,
)) ))
} }

View File

@@ -34,4 +34,19 @@ lazy_static! {
pub static ref METRIC_META_LEADER_CACHED_KV_LOAD: HistogramVec = pub static ref METRIC_META_LEADER_CACHED_KV_LOAD: HistogramVec =
register_histogram_vec!("meta_leader_cache_kv_load", "meta load cache", &["prefix"]) register_histogram_vec!("meta_leader_cache_kv_load", "meta load cache", &["prefix"])
.unwrap(); .unwrap();
pub static ref METRIC_META_LOAD_FOLLOWER_METADATA: Histogram = register_histogram!(
"meta_load_follower_metadata",
"meta load follower regions metadata elapsed"
)
.unwrap();
pub static ref METRIC_META_LOAD_LEADER_METADATA: Histogram = register_histogram!(
"meta_load_leader_metadata",
"meta load leader regions metadata elapsed"
)
.unwrap();
pub static ref METRIC_META_KV_CACHE_BATCH_GET_HIT_RATE: Gauge = register_gauge!(
"meta_kv_cache_batch_get_hit_rate",
"meta kv cache batch get hit rate"
)
.unwrap();
} }

View File

@@ -70,7 +70,7 @@ pub async fn mock(
}; };
let builder = match datanode_clients { let builder = match datanode_clients {
Some(clients) => builder.datanode_clients(clients), Some(clients) => builder.datanode_manager(clients),
None => builder, None => builder,
}; };

View File

@@ -105,7 +105,7 @@ impl UpdateRegionMetadata {
region_storage_path: self.region_storage_path.to_string(), region_storage_path: self.region_storage_path.to_string(),
region_options: self.region_options.clone(), region_options: self.region_options.clone(),
}, },
table_route_value, &table_route_value,
new_region_routes, new_region_routes,
&self.region_options, &self.region_options,
) )

View File

@@ -13,16 +13,22 @@
// limitations under the License. // limitations under the License.
pub(crate) mod downgrade_leader_region; pub(crate) mod downgrade_leader_region;
pub(crate) mod migration_abort;
pub(crate) mod migration_end; pub(crate) mod migration_end;
pub(crate) mod migration_start; pub(crate) mod migration_start;
pub(crate) mod open_candidate_region; pub(crate) mod open_candidate_region;
#[cfg(test)] #[cfg(test)]
pub(crate) mod test_util; pub(crate) mod test_util;
pub(crate) mod update_metadata; pub(crate) mod update_metadata;
pub(crate) mod upgrade_candidate_region;
use std::any::Any; use std::any::Any;
use std::fmt::Debug; use std::fmt::Debug;
use std::time::Duration;
use api::v1::meta::MailboxMessage;
use common_meta::instruction::Instruction;
use common_meta::key::table_info::TableInfoValue;
use common_meta::key::table_route::TableRouteValue; use common_meta::key::table_route::TableRouteValue;
use common_meta::key::{DeserializedValueWithBytes, TableMetadataManagerRef}; use common_meta::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
use common_meta::peer::Peer; use common_meta::peer::Peer;
@@ -34,12 +40,13 @@ use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use snafu::{location, Location, OptionExt, ResultExt}; use snafu::{location, Location, OptionExt, ResultExt};
use store_api::storage::RegionId; use store_api::storage::RegionId;
use tokio::time::Instant;
use self::migration_start::RegionMigrationStart; use self::migration_start::RegionMigrationStart;
use crate::error::{self, Error, Result}; use crate::error::{self, Error, Result};
use crate::procedure::utils::region_lock_key; use crate::procedure::utils::region_lock_key;
use crate::region::lease_keeper::{OpeningRegionGuard, OpeningRegionKeeperRef}; use crate::region::lease_keeper::{OpeningRegionGuard, OpeningRegionKeeperRef};
use crate::service::mailbox::MailboxRef; use crate::service::mailbox::{BroadcastChannel, MailboxRef};
/// It's shared in each step and available even after recovering. /// It's shared in each step and available even after recovering.
/// ///
@@ -78,8 +85,36 @@ pub struct VolatileContext {
/// the corresponding [RegionRoute](common_meta::rpc::router::RegionRoute) of the opening region /// the corresponding [RegionRoute](common_meta::rpc::router::RegionRoute) of the opening region
/// was written into [TableRouteValue](common_meta::key::table_route::TableRouteValue). /// was written into [TableRouteValue](common_meta::key::table_route::TableRouteValue).
opening_region_guard: Option<OpeningRegionGuard>, opening_region_guard: Option<OpeningRegionGuard>,
/// `table_route_info` is stored via previous steps for future use. /// `table_route` is stored via previous steps for future use.
table_route_info: Option<DeserializedValueWithBytes<TableRouteValue>>, table_route: Option<DeserializedValueWithBytes<TableRouteValue>>,
/// `table_info` is stored via previous steps for future use.
///
/// `table_info` should remain unchanged during the procedure;
/// no other DDL procedure executed concurrently for the current table.
table_info: Option<DeserializedValueWithBytes<TableInfoValue>>,
/// The deadline of leader region lease.
leader_region_lease_deadline: Option<Instant>,
/// The last_entry_id of leader region.
leader_region_last_entry_id: Option<u64>,
}
impl VolatileContext {
/// Sets the `leader_region_lease_deadline` if it does not exist.
pub fn set_leader_region_lease_deadline(&mut self, lease_timeout: Duration) {
if self.leader_region_lease_deadline.is_none() {
self.leader_region_lease_deadline = Some(Instant::now() + lease_timeout);
}
}
/// Resets the `leader_region_lease_deadline`.
pub fn reset_leader_region_lease_deadline(&mut self) {
self.leader_region_lease_deadline = None;
}
/// Sets the `leader_region_last_entry_id`.
pub fn set_last_entry_id(&mut self, last_entry_id: u64) {
self.leader_region_last_entry_id = Some(last_entry_id)
}
} }
/// Used to generate new [Context]. /// Used to generate new [Context].
@@ -127,7 +162,7 @@ impl Context {
&self.server_addr &self.server_addr
} }
/// Returns the `table_route_value` of [VolatileContext] if any. /// Returns the `table_route` of [VolatileContext] if any.
/// Otherwise, returns the value retrieved from remote. /// Otherwise, returns the value retrieved from remote.
/// ///
/// Retry: /// Retry:
@@ -135,7 +170,7 @@ impl Context {
pub async fn get_table_route_value( pub async fn get_table_route_value(
&mut self, &mut self,
) -> Result<&DeserializedValueWithBytes<TableRouteValue>> { ) -> Result<&DeserializedValueWithBytes<TableRouteValue>> {
let table_route_value = &mut self.volatile_ctx.table_route_info; let table_route_value = &mut self.volatile_ctx.table_route;
if table_route_value.is_none() { if table_route_value.is_none() {
let table_id = self.persistent_ctx.region_id.table_id(); let table_id = self.persistent_ctx.region_id.table_id();
@@ -157,9 +192,45 @@ impl Context {
Ok(table_route_value.as_ref().unwrap()) Ok(table_route_value.as_ref().unwrap())
} }
/// Removes the `table_route_value` of [VolatileContext], returns true if any. /// Removes the `table_route` of [VolatileContext], returns true if any.
pub fn remove_table_route_value(&mut self) -> bool { pub fn remove_table_route_value(&mut self) -> bool {
let value = self.volatile_ctx.table_route_info.take(); let value = self.volatile_ctx.table_route.take();
value.is_some()
}
/// Returns the `table_info` of [VolatileContext] if any.
/// Otherwise, returns the value retrieved from remote.
///
/// Retry:
/// - Failed to retrieve the metadata of table.
pub async fn get_table_info_value(
&mut self,
) -> Result<&DeserializedValueWithBytes<TableInfoValue>> {
let table_info_value = &mut self.volatile_ctx.table_info;
if table_info_value.is_none() {
let table_id = self.persistent_ctx.region_id.table_id();
let table_info = self
.table_metadata_manager
.table_info_manager()
.get(table_id)
.await
.context(error::TableMetadataManagerSnafu)
.map_err(|e| error::Error::RetryLater {
reason: e.to_string(),
location: location!(),
})?
.context(error::TableInfoNotFoundSnafu { table_id })?;
*table_info_value = Some(table_info);
}
Ok(table_info_value.as_ref().unwrap())
}
/// Removes the `table_info` of [VolatileContext], returns true if any.
pub fn remove_table_info_value(&mut self) -> bool {
let value = self.volatile_ctx.table_info.take();
value.is_some() value.is_some()
} }
@@ -167,6 +238,27 @@ impl Context {
pub fn region_id(&self) -> RegionId { pub fn region_id(&self) -> RegionId {
self.persistent_ctx.region_id self.persistent_ctx.region_id
} }
/// Broadcasts the invalidate table cache message.
pub async fn invalidate_table_cache(&self) -> Result<()> {
let table_id = self.region_id().table_id();
let instruction = Instruction::InvalidateTableIdCache(table_id);
let msg = &MailboxMessage::json_message(
"Invalidate Table Cache",
&format!("Metasrv@{}", self.server_addr()),
"Frontend broadcast",
common_time::util::current_time_millis(),
&instruction,
)
.with_context(|_| error::SerializeToJsonSnafu {
input: instruction.to_string(),
})?;
self.mailbox
.broadcast(&BroadcastChannel::Frontend, msg)
.await
}
} }
#[async_trait::async_trait] #[async_trait::async_trait]
@@ -278,7 +370,9 @@ mod tests {
use super::migration_end::RegionMigrationEnd; use super::migration_end::RegionMigrationEnd;
use super::*; use super::*;
use crate::handler::HeartbeatMailbox;
use crate::procedure::region_migration::test_util::TestingEnv; use crate::procedure::region_migration::test_util::TestingEnv;
use crate::service::mailbox::Channel;
fn new_persistent_context() -> PersistentContext { fn new_persistent_context() -> PersistentContext {
PersistentContext { PersistentContext {
@@ -378,4 +472,29 @@ mod tests {
assert_eq!(procedure.context.persistent_ctx.cluster_id, 2); assert_eq!(procedure.context.persistent_ctx.cluster_id, 2);
assert_matches!(status.unwrap(), Status::Done); assert_matches!(status.unwrap(), Status::Done);
} }
#[tokio::test]
async fn test_broadcast_invalidate_table_cache() {
let mut env = TestingEnv::new();
let persistent_context = test_util::new_persistent_context(1, 2, RegionId::new(1024, 1));
let ctx = env.context_factory().new_context(persistent_context);
let mailbox_ctx = env.mailbox_context();
// No receivers.
ctx.invalidate_table_cache().await.unwrap();
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx
.insert_heartbeat_response_receiver(Channel::Frontend(1), tx)
.await;
ctx.invalidate_table_cache().await.unwrap();
let resp = rx.recv().await.unwrap().unwrap();
let msg = resp.mailbox_message.unwrap();
let instruction = HeartbeatMailbox::json_instruction(&msg).unwrap();
assert_matches!(instruction, Instruction::InvalidateTableIdCache(1024));
}
} }

View File

@@ -13,23 +13,506 @@
// limitations under the License. // limitations under the License.
use std::any::Any; use std::any::Any;
use std::time::Duration;
use api::v1::meta::MailboxMessage;
use common_meta::distributed_time_constants::{MAILBOX_RTT_SECS, REGION_LEASE_SECS};
use common_meta::instruction::{
DowngradeRegion, DowngradeRegionReply, Instruction, InstructionReply,
};
use common_telemetry::warn;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use tokio::time::sleep;
use crate::error::Result; use super::upgrade_candidate_region::UpgradeCandidateRegion;
use crate::error::{self, Result};
use crate::handler::HeartbeatMailbox;
use crate::procedure::region_migration::{Context, State}; use crate::procedure::region_migration::{Context, State};
use crate::service::mailbox::Channel;
const DOWNGRADE_LEADER_REGION_TIMEOUT: Duration = Duration::from_secs(MAILBOX_RTT_SECS);
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub struct DowngradeLeaderRegion; pub struct DowngradeLeaderRegion {
// The optimistic retry times.
optimistic_retry: usize,
// The retry initial interval.
retry_initial_interval: Duration,
}
impl Default for DowngradeLeaderRegion {
fn default() -> Self {
Self {
optimistic_retry: 3,
retry_initial_interval: Duration::from_millis(500),
}
}
}
#[async_trait::async_trait] #[async_trait::async_trait]
#[typetag::serde] #[typetag::serde]
impl State for DowngradeLeaderRegion { impl State for DowngradeLeaderRegion {
async fn next(&mut self, _ctx: &mut Context) -> Result<Box<dyn State>> { async fn next(&mut self, ctx: &mut Context) -> Result<Box<dyn State>> {
todo!() // Ensures the `leader_region_lease_deadline` must exist after recovering.
ctx.volatile_ctx
.set_leader_region_lease_deadline(Duration::from_secs(REGION_LEASE_SECS));
self.downgrade_region_with_retry(ctx).await;
// Safety: must exist.
if let Some(deadline) = ctx.volatile_ctx.leader_region_lease_deadline.as_ref() {
tokio::time::sleep_until(*deadline).await;
}
Ok(Box::<UpgradeCandidateRegion>::default())
} }
fn as_any(&self) -> &dyn Any { fn as_any(&self) -> &dyn Any {
self self
} }
} }
impl DowngradeLeaderRegion {
/// Builds downgrade region instruction.
fn build_downgrade_region_instruction(&self, ctx: &Context) -> Instruction {
let pc = &ctx.persistent_ctx;
let region_id = pc.region_id;
Instruction::DowngradeRegion(DowngradeRegion { region_id })
}
/// Tries to downgrade a leader region.
///
/// Retry:
/// - [MailboxTimeout](error::Error::MailboxTimeout), Timeout.
/// - Failed to downgrade region on the Datanode.
///
/// Abort:
/// - [PusherNotFound](error::Error::PusherNotFound), The datanode is unreachable.
/// - [PushMessage](error::Error::PushMessage), The receiver is dropped.
/// - [MailboxReceiver](error::Error::MailboxReceiver), The sender is dropped without sending (impossible).
/// - [UnexpectedInstructionReply](error::Error::UnexpectedInstructionReply).
/// - Invalid JSON.
async fn downgrade_region(
&self,
ctx: &mut Context,
downgrade_instruction: &Instruction,
) -> Result<()> {
let pc = &ctx.persistent_ctx;
let region_id = pc.region_id;
let leader = &pc.from_peer;
let msg = MailboxMessage::json_message(
&format!("Downgrade leader region: {}", region_id),
&format!("Meta@{}", ctx.server_addr()),
&format!("Datanode-{}@{}", leader.id, leader.addr),
common_time::util::current_time_millis(),
downgrade_instruction,
)
.with_context(|_| error::SerializeToJsonSnafu {
input: downgrade_instruction.to_string(),
})?;
let ch = Channel::Datanode(leader.id);
let receiver = ctx
.mailbox
.send(&ch, msg, DOWNGRADE_LEADER_REGION_TIMEOUT)
.await?;
match receiver.await? {
Ok(msg) => {
let reply = HeartbeatMailbox::json_reply(&msg)?;
let InstructionReply::DowngradeRegion(DowngradeRegionReply {
last_entry_id,
exists,
error,
}) = reply
else {
return error::UnexpectedInstructionReplySnafu {
mailbox_message: msg.to_string(),
reason: "expect downgrade region reply",
}
.fail();
};
if error.is_some() {
return error::RetryLaterSnafu {
reason: format!(
"Failed to downgrade the region {} on Datanode {:?}, error: {:?}",
region_id, leader, error
),
}
.fail();
}
if !exists {
warn!(
"Trying to downgrade the region {} on Datanode {}, but region doesn't exist!",
region_id, leader
);
}
if let Some(last_entry_id) = last_entry_id {
ctx.volatile_ctx.set_last_entry_id(last_entry_id);
}
Ok(())
}
Err(error::Error::MailboxTimeout { .. }) => {
let reason = format!(
"Mailbox received timeout for downgrade leader region {region_id} on datanode {:?}",
leader,
);
error::RetryLaterSnafu { reason }.fail()
}
Err(err) => Err(err),
}
}
/// Downgrades a leader region.
///
/// Fast path:
/// - Waits for the reply of downgrade instruction.
///
/// Slow path:
/// - Waits for the lease of the leader region expired.
async fn downgrade_region_with_retry(&self, ctx: &mut Context) {
let instruction = self.build_downgrade_region_instruction(ctx);
let mut retry = 0;
loop {
if let Err(err) = self.downgrade_region(ctx, &instruction).await {
retry += 1;
if err.is_retryable() && retry < self.optimistic_retry {
warn!("Failed to downgrade region, error: {err:?}, retry later");
sleep(self.retry_initial_interval).await;
} else {
break;
}
} else {
// Resets the deadline.
ctx.volatile_ctx.reset_leader_region_lease_deadline();
break;
}
}
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use api::v1::meta::mailbox_message::Payload;
use common_meta::peer::Peer;
use common_time::util::current_time_millis;
use store_api::storage::RegionId;
use tokio::time::Instant;
use super::*;
use crate::error::Error;
use crate::procedure::region_migration::test_util::{
new_close_region_reply, send_mock_reply, TestingEnv,
};
use crate::procedure::region_migration::{ContextFactory, PersistentContext};
fn new_persistent_context() -> PersistentContext {
PersistentContext {
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
region_id: RegionId::new(1024, 1),
cluster_id: 0,
}
}
fn new_downgrade_region_reply(
id: u64,
last_entry_id: Option<u64>,
exist: bool,
error: Option<String>,
) -> MailboxMessage {
MailboxMessage {
id,
subject: "mock".to_string(),
from: "datanode".to_string(),
to: "meta".to_string(),
timestamp_millis: current_time_millis(),
payload: Some(Payload::Json(
serde_json::to_string(&InstructionReply::DowngradeRegion(DowngradeRegionReply {
last_entry_id,
exists: exist,
error,
}))
.unwrap(),
)),
}
}
#[tokio::test]
async fn test_datanode_is_unreachable() {
let state = DowngradeLeaderRegion::default();
let persistent_context = new_persistent_context();
let env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
let instruction = &state.build_downgrade_region_instruction(&ctx);
let err = state
.downgrade_region(&mut ctx, instruction)
.await
.unwrap_err();
assert_matches!(err, Error::PusherNotFound { .. });
assert!(!err.is_retryable());
}
#[tokio::test]
async fn test_pusher_dropped() {
let state = DowngradeLeaderRegion::default();
let persistent_context = new_persistent_context();
let from_peer_id = persistent_context.from_peer.id;
let mut env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
let mailbox_ctx = env.mailbox_context();
let (tx, rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx
.insert_heartbeat_response_receiver(Channel::Datanode(from_peer_id), tx)
.await;
drop(rx);
let instruction = &state.build_downgrade_region_instruction(&ctx);
let err = state
.downgrade_region(&mut ctx, instruction)
.await
.unwrap_err();
assert_matches!(err, Error::PushMessage { .. });
assert!(!err.is_retryable());
}
#[tokio::test]
async fn test_unexpected_instruction_reply() {
let state = DowngradeLeaderRegion::default();
let persistent_context = new_persistent_context();
let from_peer_id = persistent_context.from_peer.id;
let mut env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
let (tx, rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx
.insert_heartbeat_response_receiver(Channel::Datanode(from_peer_id), tx)
.await;
// Sends an incorrect reply.
send_mock_reply(mailbox, rx, |id| Ok(new_close_region_reply(id)));
let instruction = &state.build_downgrade_region_instruction(&ctx);
let err = state
.downgrade_region(&mut ctx, instruction)
.await
.unwrap_err();
assert_matches!(err, Error::UnexpectedInstructionReply { .. });
assert!(!err.is_retryable());
}
#[tokio::test]
async fn test_instruction_exceeded_deadline() {
let state = DowngradeLeaderRegion::default();
let persistent_context = new_persistent_context();
let from_peer_id = persistent_context.from_peer.id;
let mut env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
let (tx, rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx
.insert_heartbeat_response_receiver(Channel::Datanode(from_peer_id), tx)
.await;
send_mock_reply(mailbox, rx, |id| {
Err(error::MailboxTimeoutSnafu { id }.build())
});
let instruction = &state.build_downgrade_region_instruction(&ctx);
let err = state
.downgrade_region(&mut ctx, instruction)
.await
.unwrap_err();
assert_matches!(err, Error::RetryLater { .. });
assert!(err.is_retryable());
}
#[tokio::test]
async fn test_downgrade_region_failed() {
let state = DowngradeLeaderRegion::default();
let persistent_context = new_persistent_context();
let from_peer_id = persistent_context.from_peer.id;
let mut env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
let (tx, rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx
.insert_heartbeat_response_receiver(Channel::Datanode(from_peer_id), tx)
.await;
send_mock_reply(mailbox, rx, |id| {
Ok(new_downgrade_region_reply(
id,
None,
false,
Some("test mocked".to_string()),
))
});
let instruction = &state.build_downgrade_region_instruction(&ctx);
let err = state
.downgrade_region(&mut ctx, instruction)
.await
.unwrap_err();
assert_matches!(err, Error::RetryLater { .. });
assert!(err.is_retryable());
assert!(err.to_string().contains("test mocked"));
}
#[tokio::test]
async fn test_downgrade_region_with_retry_fast_path() {
let state = DowngradeLeaderRegion::default();
let persistent_context = new_persistent_context();
let from_peer_id = persistent_context.from_peer.id;
let mut env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx
.insert_heartbeat_response_receiver(Channel::Datanode(from_peer_id), tx)
.await;
common_runtime::spawn_bg(async move {
// retry: 0.
let resp = rx.recv().await.unwrap().unwrap();
let reply_id = resp.mailbox_message.unwrap().id;
mailbox
.on_recv(
reply_id,
Err(error::MailboxTimeoutSnafu { id: reply_id }.build()),
)
.await
.unwrap();
// retry: 1.
let resp = rx.recv().await.unwrap().unwrap();
let reply_id = resp.mailbox_message.unwrap().id;
mailbox
.on_recv(
reply_id,
Ok(new_downgrade_region_reply(reply_id, Some(1), true, None)),
)
.await
.unwrap();
});
state.downgrade_region_with_retry(&mut ctx).await;
assert_eq!(ctx.volatile_ctx.leader_region_last_entry_id, Some(1));
assert!(ctx.volatile_ctx.leader_region_lease_deadline.is_none());
}
#[tokio::test]
async fn test_downgrade_region_with_retry_slow_path() {
let state = DowngradeLeaderRegion {
optimistic_retry: 3,
retry_initial_interval: Duration::from_millis(100),
};
let persistent_context = new_persistent_context();
let from_peer_id = persistent_context.from_peer.id;
let mut env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx
.insert_heartbeat_response_receiver(Channel::Datanode(from_peer_id), tx)
.await;
common_runtime::spawn_bg(async move {
for _ in 0..3 {
let resp = rx.recv().await.unwrap().unwrap();
let reply_id = resp.mailbox_message.unwrap().id;
mailbox
.on_recv(
reply_id,
Err(error::MailboxTimeoutSnafu { id: reply_id }.build()),
)
.await
.unwrap();
}
});
ctx.volatile_ctx
.set_leader_region_lease_deadline(Duration::from_secs(5));
let expected_deadline = ctx.volatile_ctx.leader_region_lease_deadline.unwrap();
state.downgrade_region_with_retry(&mut ctx).await;
assert_eq!(ctx.volatile_ctx.leader_region_last_entry_id, None);
// Should remain no change.
assert_eq!(
ctx.volatile_ctx.leader_region_lease_deadline.unwrap(),
expected_deadline
)
}
#[tokio::test]
async fn test_next_upgrade_candidate_state() {
let mut state = Box::<DowngradeLeaderRegion>::default();
let persistent_context = new_persistent_context();
let from_peer_id = persistent_context.from_peer.id;
let mut env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
let (tx, rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx
.insert_heartbeat_response_receiver(Channel::Datanode(from_peer_id), tx)
.await;
send_mock_reply(mailbox, rx, |id| {
Ok(new_downgrade_region_reply(id, Some(1), true, None))
});
let timer = Instant::now();
let next = state.next(&mut ctx).await.unwrap();
let elapsed = timer.elapsed().as_secs();
assert!(elapsed < REGION_LEASE_SECS / 2);
assert_eq!(ctx.volatile_ctx.leader_region_last_entry_id, Some(1));
assert!(ctx.volatile_ctx.leader_region_lease_deadline.is_none());
let _ = next
.as_any()
.downcast_ref::<UpgradeCandidateRegion>()
.unwrap();
}
}

View File

@@ -0,0 +1,54 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use common_procedure::Status;
use serde::{Deserialize, Serialize};
use crate::error::{self, Result};
use crate::procedure::region_migration::{Context, State};
#[derive(Debug, Serialize, Deserialize)]
pub struct RegionMigrationAbort {
reason: String,
}
impl RegionMigrationAbort {
/// Returns the [RegionMigrationAbort] with `reason`.
pub fn new(reason: &str) -> Self {
Self {
reason: reason.to_string(),
}
}
}
#[async_trait::async_trait]
#[typetag::serde]
impl State for RegionMigrationAbort {
async fn next(&mut self, _: &mut Context) -> Result<Box<dyn State>> {
error::MigrationAbortSnafu {
reason: &self.reason,
}
.fail()
}
fn status(&self) -> Status {
Status::Done
}
fn as_any(&self) -> &dyn Any {
self
}
}

View File

@@ -47,7 +47,7 @@ impl State for RegionMigrationStart {
if self.check_leader_region_on_peer(&region_route, to_peer)? { if self.check_leader_region_on_peer(&region_route, to_peer)? {
Ok(Box::new(RegionMigrationEnd)) Ok(Box::new(RegionMigrationEnd))
} else if self.check_candidate_region_on_peer(&region_route, to_peer) { } else if self.check_candidate_region_on_peer(&region_route, to_peer) {
Ok(Box::new(DowngradeLeaderRegion)) Ok(Box::<DowngradeLeaderRegion>::default())
} else { } else {
Ok(Box::new(OpenCandidateRegion)) Ok(Box::new(OpenCandidateRegion))
} }
@@ -137,16 +137,11 @@ mod tests {
use super::*; use super::*;
use crate::error::Error; use crate::error::Error;
use crate::procedure::region_migration::test_util::TestingEnv; use crate::procedure::region_migration::test_util::{self, TestingEnv};
use crate::procedure::region_migration::{ContextFactory, PersistentContext}; use crate::procedure::region_migration::{ContextFactory, PersistentContext};
fn new_persistent_context() -> PersistentContext { fn new_persistent_context() -> PersistentContext {
PersistentContext { test_util::new_persistent_context(1, 2, RegionId::new(1024, 1))
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
region_id: RegionId::new(1024, 1),
cluster_id: 0,
}
} }
#[tokio::test] #[tokio::test]

View File

@@ -18,10 +18,11 @@ use std::time::Duration;
use api::v1::meta::MailboxMessage; use api::v1::meta::MailboxMessage;
use common_meta::ddl::utils::region_storage_path; use common_meta::ddl::utils::region_storage_path;
use common_meta::distributed_time_constants::MAILBOX_RTT_SECS;
use common_meta::instruction::{Instruction, InstructionReply, OpenRegion, SimpleReply}; use common_meta::instruction::{Instruction, InstructionReply, OpenRegion, SimpleReply};
use common_meta::RegionIdent; use common_meta::RegionIdent;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use snafu::{location, Location, OptionExt, ResultExt}; use snafu::{OptionExt, ResultExt};
use crate::error::{self, Result}; use crate::error::{self, Result};
use crate::handler::HeartbeatMailbox; use crate::handler::HeartbeatMailbox;
@@ -29,7 +30,7 @@ use crate::procedure::region_migration::downgrade_leader_region::DowngradeLeader
use crate::procedure::region_migration::{Context, State}; use crate::procedure::region_migration::{Context, State};
use crate::service::mailbox::Channel; use crate::service::mailbox::Channel;
const OPEN_CANDIDATE_REGION_TIMEOUT: Duration = Duration::from_secs(1); const OPEN_CANDIDATE_REGION_TIMEOUT: Duration = Duration::from_secs(MAILBOX_RTT_SECS);
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub struct OpenCandidateRegion; pub struct OpenCandidateRegion;
@@ -41,7 +42,7 @@ impl State for OpenCandidateRegion {
let instruction = self.build_open_region_instruction(ctx).await?; let instruction = self.build_open_region_instruction(ctx).await?;
self.open_candidate_region(ctx, instruction).await?; self.open_candidate_region(ctx, instruction).await?;
Ok(Box::new(DowngradeLeaderRegion)) Ok(Box::<DowngradeLeaderRegion>::default())
} }
fn as_any(&self) -> &dyn Any { fn as_any(&self) -> &dyn Any {
@@ -54,38 +55,28 @@ impl OpenCandidateRegion {
/// ///
/// Abort(non-retry): /// Abort(non-retry):
/// - Table Info is not found. /// - Table Info is not found.
async fn build_open_region_instruction(&self, ctx: &Context) -> Result<Instruction> { async fn build_open_region_instruction(&self, ctx: &mut Context) -> Result<Instruction> {
let pc = &ctx.persistent_ctx; let pc = &ctx.persistent_ctx;
let cluster_id = pc.cluster_id; let cluster_id = pc.cluster_id;
let table_id = pc.region_id.table_id(); let table_id = pc.region_id.table_id();
let region_number = pc.region_id.region_number(); let region_number = pc.region_id.region_number();
let candidate = &pc.to_peer; let candidate_id = pc.to_peer.id;
let table_info = ctx
.table_metadata_manager let table_info_value = ctx.get_table_info_value().await?;
.table_info_manager() let table_info = &table_info_value.table_info;
.get(table_id)
.await
.context(error::TableMetadataManagerSnafu)
.map_err(|e| error::Error::RetryLater {
reason: e.to_string(),
location: location!(),
})?
.context(error::TableInfoNotFoundSnafu { table_id })?
.into_inner()
.table_info;
// The region storage path is immutable after the region is created. // The region storage path is immutable after the region is created.
// Therefore, it's safe to store it in `VolatileContext` for future use. // Therefore, it's safe to store it in `VolatileContext` for future use.
let region_storage_path = let region_storage_path =
region_storage_path(&table_info.catalog_name, &table_info.schema_name); region_storage_path(&table_info.catalog_name, &table_info.schema_name);
let engine = table_info.meta.engine; let engine = table_info.meta.engine.clone();
let region_options: HashMap<String, String> = (&table_info.meta.options).into(); let region_options: HashMap<String, String> = (&table_info.meta.options).into();
let open_instruction = Instruction::OpenRegion(OpenRegion::new( let open_instruction = Instruction::OpenRegion(OpenRegion::new(
RegionIdent { RegionIdent {
cluster_id, cluster_id,
datanode_id: candidate.id, datanode_id: candidate_id,
table_id, table_id,
region_number, region_number,
engine, engine,
@@ -162,7 +153,7 @@ impl OpenCandidateRegion {
} else { } else {
error::RetryLaterSnafu { error::RetryLaterSnafu {
reason: format!( reason: format!(
"Region {region_id} is not opened by Datanode {:?}, error: {error:?}", "Region {region_id} is not opened by datanode {:?}, error: {error:?}",
candidate, candidate,
), ),
} }
@@ -171,7 +162,7 @@ impl OpenCandidateRegion {
} }
Err(error::Error::MailboxTimeout { .. }) => { Err(error::Error::MailboxTimeout { .. }) => {
let reason = format!( let reason = format!(
"Mailbox received timeout for open candidate region {region_id} on Datanode {:?}", "Mailbox received timeout for open candidate region {region_id} on datanode {:?}",
candidate, candidate,
); );
error::RetryLaterSnafu { reason }.fail() error::RetryLaterSnafu { reason }.fail()
@@ -197,16 +188,13 @@ mod tests {
use super::*; use super::*;
use crate::error::Error; use crate::error::Error;
use crate::procedure::region_migration::downgrade_leader_region::DowngradeLeaderRegion; use crate::procedure::region_migration::downgrade_leader_region::DowngradeLeaderRegion;
use crate::procedure::region_migration::test_util::TestingEnv; use crate::procedure::region_migration::test_util::{
self, new_close_region_reply, send_mock_reply, TestingEnv,
};
use crate::procedure::region_migration::{ContextFactory, PersistentContext}; use crate::procedure::region_migration::{ContextFactory, PersistentContext};
fn new_persistent_context() -> PersistentContext { fn new_persistent_context() -> PersistentContext {
PersistentContext { test_util::new_persistent_context(1, 2, RegionId::new(1024, 1))
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
region_id: RegionId::new(1024, 1),
cluster_id: 0,
}
} }
fn new_mock_open_instruction(datanode_id: DatanodeId, region_id: RegionId) -> Instruction { fn new_mock_open_instruction(datanode_id: DatanodeId, region_id: RegionId) -> Instruction {
@@ -223,23 +211,6 @@ mod tests {
}) })
} }
fn new_close_region_reply(id: u64) -> MailboxMessage {
MailboxMessage {
id,
subject: "mock".to_string(),
from: "datanode".to_string(),
to: "meta".to_string(),
timestamp_millis: current_time_millis(),
payload: Some(Payload::Json(
serde_json::to_string(&InstructionReply::CloseRegion(SimpleReply {
result: false,
error: None,
}))
.unwrap(),
)),
}
}
fn new_open_region_reply(id: u64, result: bool, error: Option<String>) -> MailboxMessage { fn new_open_region_reply(id: u64, result: bool, error: Option<String>) -> MailboxMessage {
MailboxMessage { MailboxMessage {
id, id,
@@ -259,9 +230,12 @@ mod tests {
let state = OpenCandidateRegion; let state = OpenCandidateRegion;
let persistent_context = new_persistent_context(); let persistent_context = new_persistent_context();
let env = TestingEnv::new(); let env = TestingEnv::new();
let ctx = env.context_factory().new_context(persistent_context); let mut ctx = env.context_factory().new_context(persistent_context);
let err = state.build_open_region_instruction(&ctx).await.unwrap_err(); let err = state
.build_open_region_instruction(&mut ctx)
.await
.unwrap_err();
assert_matches!(err, Error::TableInfoNotFound { .. }); assert_matches!(err, Error::TableInfoNotFound { .. });
assert!(!err.is_retryable()); assert!(!err.is_retryable());
@@ -328,21 +302,14 @@ mod tests {
let mailbox_ctx = env.mailbox_context(); let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone(); let mailbox = mailbox_ctx.mailbox().clone();
let (tx, mut rx) = tokio::sync::mpsc::channel(1); let (tx, rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx mailbox_ctx
.insert_heartbeat_response_receiver(to_peer_id, tx) .insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
.await; .await;
// Sends an incorrect reply. // Sends an incorrect reply.
common_runtime::spawn_bg(async move { send_mock_reply(mailbox, rx, |id| Ok(new_close_region_reply(id)));
let resp = rx.recv().await.unwrap().unwrap();
let reply_id = resp.mailbox_message.unwrap().id;
mailbox
.on_recv(reply_id, Ok(new_close_region_reply(reply_id)))
.await
.unwrap();
});
let open_instruction = new_mock_open_instruction(to_peer_id, region_id); let open_instruction = new_mock_open_instruction(to_peer_id, region_id);
let err = state let err = state
@@ -368,23 +335,15 @@ mod tests {
let mailbox_ctx = env.mailbox_context(); let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone(); let mailbox = mailbox_ctx.mailbox().clone();
let (tx, mut rx) = tokio::sync::mpsc::channel(1); let (tx, rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx mailbox_ctx
.insert_heartbeat_response_receiver(to_peer_id, tx) .insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
.await; .await;
// Sends an timeout error. // Sends an timeout error.
common_runtime::spawn_bg(async move { send_mock_reply(mailbox, rx, |id| {
let resp = rx.recv().await.unwrap().unwrap(); Err(error::MailboxTimeoutSnafu { id }.build())
let reply_id = resp.mailbox_message.unwrap().id;
mailbox
.on_recv(
reply_id,
Err(error::MailboxTimeoutSnafu { id: reply_id }.build()),
)
.await
.unwrap();
}); });
let open_instruction = new_mock_open_instruction(to_peer_id, region_id); let open_instruction = new_mock_open_instruction(to_peer_id, region_id);
@@ -411,26 +370,18 @@ mod tests {
let mailbox_ctx = env.mailbox_context(); let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone(); let mailbox = mailbox_ctx.mailbox().clone();
let (tx, mut rx) = tokio::sync::mpsc::channel(1); let (tx, rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx mailbox_ctx
.insert_heartbeat_response_receiver(to_peer_id, tx) .insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
.await; .await;
common_runtime::spawn_bg(async move { send_mock_reply(mailbox, rx, |id| {
let resp = rx.recv().await.unwrap().unwrap(); Ok(new_open_region_reply(
let reply_id = resp.mailbox_message.unwrap().id; id,
mailbox false,
.on_recv( Some("test mocked".to_string()),
reply_id, ))
Ok(new_open_region_reply(
reply_id,
false,
Some("test mocked".to_string()),
)),
)
.await
.unwrap();
}); });
let open_instruction = new_mock_open_instruction(to_peer_id, region_id); let open_instruction = new_mock_open_instruction(to_peer_id, region_id);
@@ -471,20 +422,13 @@ mod tests {
let mailbox_ctx = env.mailbox_context(); let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone(); let mailbox = mailbox_ctx.mailbox().clone();
let (tx, mut rx) = tokio::sync::mpsc::channel(1); let (tx, rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx mailbox_ctx
.insert_heartbeat_response_receiver(to_peer_id, tx) .insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
.await; .await;
common_runtime::spawn_bg(async move { send_mock_reply(mailbox, rx, |id| Ok(new_open_region_reply(id, true, None)));
let resp = rx.recv().await.unwrap().unwrap();
let reply_id = resp.mailbox_message.unwrap().id;
mailbox
.on_recv(reply_id, Ok(new_open_region_reply(reply_id, true, None)))
.await
.unwrap();
});
let next = state.next(&mut ctx).await.unwrap(); let next = state.next(&mut ctx).await.unwrap();
let vc = ctx.volatile_ctx; let vc = ctx.volatile_ctx;

View File

@@ -14,20 +14,28 @@
use std::sync::Arc; use std::sync::Arc;
use api::v1::meta::{HeartbeatResponse, RequestHeader}; use api::v1::meta::mailbox_message::Payload;
use api::v1::meta::{HeartbeatResponse, MailboxMessage, RequestHeader};
use common_meta::instruction::{InstructionReply, SimpleReply};
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef}; use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::peer::Peer;
use common_meta::sequence::Sequence; use common_meta::sequence::Sequence;
use common_meta::DatanodeId;
use common_procedure::{Context as ProcedureContext, ProcedureId}; use common_procedure::{Context as ProcedureContext, ProcedureId};
use common_procedure_test::MockContextProvider; use common_procedure_test::MockContextProvider;
use tokio::sync::mpsc::Sender; use common_time::util::current_time_millis;
use store_api::storage::RegionId;
use tokio::sync::mpsc::{Receiver, Sender};
use super::ContextFactoryImpl; use super::ContextFactoryImpl;
use crate::error::Result;
use crate::handler::{HeartbeatMailbox, Pusher, Pushers}; use crate::handler::{HeartbeatMailbox, Pusher, Pushers};
use crate::procedure::region_migration::PersistentContext;
use crate::region::lease_keeper::{OpeningRegionKeeper, OpeningRegionKeeperRef}; use crate::region::lease_keeper::{OpeningRegionKeeper, OpeningRegionKeeperRef};
use crate::service::mailbox::{Channel, MailboxRef}; use crate::service::mailbox::{Channel, MailboxRef};
pub type MockHeartbeatReceiver = Receiver<std::result::Result<HeartbeatResponse, tonic::Status>>;
/// The context of mailbox. /// The context of mailbox.
pub struct MailboxContext { pub struct MailboxContext {
mailbox: MailboxRef, mailbox: MailboxRef,
@@ -46,10 +54,10 @@ impl MailboxContext {
/// Inserts a pusher for `datanode_id` /// Inserts a pusher for `datanode_id`
pub async fn insert_heartbeat_response_receiver( pub async fn insert_heartbeat_response_receiver(
&mut self, &mut self,
datanode_id: DatanodeId, channel: Channel,
tx: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>, tx: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>,
) { ) {
let pusher_id = Channel::Datanode(datanode_id).pusher_id(); let pusher_id = channel.pusher_id();
let pusher = Pusher::new(tx, &RequestHeader::default()); let pusher = Pusher::new(tx, &RequestHeader::default());
let _ = self.pushers.insert(pusher_id, pusher).await; let _ = self.pushers.insert(pusher_id, pusher).await;
} }
@@ -120,3 +128,44 @@ impl TestingEnv {
} }
} }
} }
/// Generates a [InstructionReply::CloseRegion] reply.
pub fn new_close_region_reply(id: u64) -> MailboxMessage {
MailboxMessage {
id,
subject: "mock".to_string(),
from: "datanode".to_string(),
to: "meta".to_string(),
timestamp_millis: current_time_millis(),
payload: Some(Payload::Json(
serde_json::to_string(&InstructionReply::CloseRegion(SimpleReply {
result: false,
error: None,
}))
.unwrap(),
)),
}
}
/// Sends a mock reply.
pub fn send_mock_reply(
mailbox: MailboxRef,
mut rx: MockHeartbeatReceiver,
msg: impl FnOnce(u64) -> Result<MailboxMessage> + Send + 'static,
) {
common_runtime::spawn_bg(async move {
let resp = rx.recv().await.unwrap().unwrap();
let reply_id = resp.mailbox_message.unwrap().id;
mailbox.on_recv(reply_id, msg(reply_id)).await.unwrap();
});
}
/// Generates a [PersistentContext].
pub fn new_persistent_context(from: u64, to: u64, region_id: RegionId) -> PersistentContext {
PersistentContext {
from_peer: Peer::empty(from),
to_peer: Peer::empty(to),
region_id,
cluster_id: 0,
}
}

View File

@@ -12,20 +12,30 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
pub(crate) mod downgrade_leader_region;
pub(crate) mod rollback_downgraded_region;
pub(crate) mod upgrade_candidate_region;
use std::any::Any; use std::any::Any;
use common_meta::rpc::router::RegionStatus; use common_telemetry::warn;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use crate::error::{self, Result}; use super::migration_abort::RegionMigrationAbort;
use super::migration_end::RegionMigrationEnd;
use crate::error::Result;
use crate::procedure::region_migration::downgrade_leader_region::DowngradeLeaderRegion; use crate::procedure::region_migration::downgrade_leader_region::DowngradeLeaderRegion;
use crate::procedure::region_migration::{Context, State}; use crate::procedure::region_migration::{Context, State};
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
#[serde(tag = "UpdateMetadata")] #[serde(tag = "UpdateMetadata")]
pub enum UpdateMetadata { pub enum UpdateMetadata {
/// Downgrades the leader region.
Downgrade, Downgrade,
/// Upgrades the candidate region.
Upgrade,
/// Rolls back the downgraded region.
Rollback,
} }
#[async_trait::async_trait] #[async_trait::async_trait]
@@ -36,7 +46,25 @@ impl State for UpdateMetadata {
UpdateMetadata::Downgrade => { UpdateMetadata::Downgrade => {
self.downgrade_leader_region(ctx).await?; self.downgrade_leader_region(ctx).await?;
Ok(Box::new(DowngradeLeaderRegion)) Ok(Box::<DowngradeLeaderRegion>::default())
}
UpdateMetadata::Upgrade => {
self.upgrade_candidate_region(ctx).await?;
if let Err(err) = ctx.invalidate_table_cache().await {
warn!("Failed to broadcast the invalidate table cache message during the upgrade candidate, error: {err:?}");
};
Ok(Box::new(RegionMigrationEnd))
}
UpdateMetadata::Rollback => {
self.rollback_downgraded_region(ctx).await?;
if let Err(err) = ctx.invalidate_table_cache().await {
warn!("Failed to broadcast the invalidate table cache message during the rollback, error: {err:?}");
};
Ok(Box::new(RegionMigrationAbort::new(
"Failed to upgrade the candidate region.",
)))
} }
} }
} }
@@ -45,195 +73,3 @@ impl State for UpdateMetadata {
self self
} }
} }
impl UpdateMetadata {
/// Downgrades the leader region.
///
/// Abort(non-retry):
/// - TableRoute is not found.
///
/// Retry:
/// - Failed to update [TableRouteValue](common_meta::key::table_region::TableRegionValue).
/// - Failed to retrieve the metadata of table.
///
/// About the failure of updating the [TableRouteValue](common_meta::key::table_region::TableRegionValue):
///
/// - There may be another [RegionMigrationProcedure](crate::procedure::region_migration::RegionMigrationProcedure)
/// that is executed concurrently for **other region**.
/// It will only update **other region** info. Therefore, It's safe to retry after failure.
///
/// - There is no other DDL procedure executed concurrently for the current table.
async fn downgrade_leader_region(&self, ctx: &mut Context) -> Result<()> {
let table_metadata_manager = ctx.table_metadata_manager.clone();
let region_id = ctx.region_id();
let table_id = region_id.table_id();
let current_table_route_value = ctx.get_table_route_value().await?;
if let Err(err) = table_metadata_manager
.update_leader_region_status(table_id, current_table_route_value, |route| {
if route.region.id == region_id {
Some(Some(RegionStatus::Downgraded))
} else {
None
}
})
.await
.context(error::TableMetadataManagerSnafu)
{
debug_assert!(ctx.remove_table_route_value());
return error::RetryLaterSnafu {
reason: format!("Failed to update the table route during the downgrading leader region, error: {err}")
}.fail();
}
debug_assert!(ctx.remove_table_route_value());
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
use common_meta::rpc::router::{Region, RegionRoute};
use store_api::storage::RegionId;
use super::*;
use crate::error::Error;
use crate::procedure::region_migration::test_util::TestingEnv;
use crate::procedure::region_migration::{ContextFactory, PersistentContext};
fn new_persistent_context() -> PersistentContext {
PersistentContext {
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
region_id: RegionId::new(1024, 1),
cluster_id: 0,
}
}
#[test]
fn test_state_serialization() {
let state = UpdateMetadata::Downgrade;
let expected = r#"{"UpdateMetadata":"Downgrade"}"#;
assert_eq!(expected, serde_json::to_string(&state).unwrap());
}
#[tokio::test]
async fn test_table_route_is_not_found_error() {
let state = UpdateMetadata::Downgrade;
let env = TestingEnv::new();
let persistent_context = new_persistent_context();
let mut ctx = env.context_factory().new_context(persistent_context);
let err = state.downgrade_leader_region(&mut ctx).await.unwrap_err();
assert_matches!(err, Error::TableRouteNotFound { .. });
assert!(!err.is_retryable());
}
#[tokio::test]
async fn test_failed_to_update_table_route_error() {
let state = UpdateMetadata::Downgrade;
let persistent_context = new_persistent_context();
let from_peer = persistent_context.from_peer.clone();
let env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
let table_id = ctx.region_id().table_id();
let table_info = new_test_table_info(1024, vec![1, 2]).into();
let region_routes = vec![
RegionRoute {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(from_peer.clone()),
..Default::default()
},
RegionRoute {
region: Region::new_test(RegionId::new(1024, 2)),
leader_peer: Some(Peer::empty(4)),
..Default::default()
},
];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes)
.await
.unwrap();
let original_table_route = table_metadata_manager
.table_route_manager()
.get(table_id)
.await
.unwrap()
.unwrap();
// modifies the table route.
table_metadata_manager
.update_leader_region_status(table_id, &original_table_route, |route| {
if route.region.id == RegionId::new(1024, 2) {
Some(Some(RegionStatus::Downgraded))
} else {
None
}
})
.await
.unwrap();
// sets the old table route.
ctx.volatile_ctx.table_route_info = Some(original_table_route);
let err = state.downgrade_leader_region(&mut ctx).await.unwrap_err();
assert_matches!(err, Error::RetryLater { .. });
assert!(err.is_retryable());
assert!(err.to_string().contains("Failed to update the table route"));
}
#[tokio::test]
async fn test_next_downgrade_leader_region_state() {
let mut state = Box::new(UpdateMetadata::Downgrade);
let persistent_context = new_persistent_context();
let from_peer = persistent_context.from_peer.clone();
let env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
let table_id = ctx.region_id().table_id();
let table_info = new_test_table_info(1024, vec![1, 2]).into();
let region_routes = vec![RegionRoute {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(from_peer.clone()),
..Default::default()
}];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes)
.await
.unwrap();
let next = state.next(&mut ctx).await.unwrap();
let _ = next
.as_any()
.downcast_ref::<DowngradeLeaderRegion>()
.unwrap();
let latest_table_route = table_metadata_manager
.table_route_manager()
.get(table_id)
.await
.unwrap()
.unwrap();
assert!(latest_table_route.region_routes[0].is_leader_downgraded());
assert!(ctx.volatile_ctx.table_route_info.is_none());
}
}

View File

@@ -0,0 +1,210 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_meta::rpc::router::RegionStatus;
use snafu::ResultExt;
use crate::error::{self, Result};
use crate::procedure::region_migration::update_metadata::UpdateMetadata;
use crate::procedure::region_migration::Context;
impl UpdateMetadata {
/// Downgrades the leader region.
///
/// Abort(non-retry):
/// - TableRoute is not found.
///
/// Retry:
/// - Failed to update [TableRouteValue](common_meta::key::table_region::TableRegionValue).
/// - Failed to retrieve the metadata of table.
///
/// About the failure of updating the [TableRouteValue](common_meta::key::table_region::TableRegionValue):
///
/// - There may be another [RegionMigrationProcedure](crate::procedure::region_migration::RegionMigrationProcedure)
/// that is executed concurrently for **other region**.
/// It will only update **other region** info. Therefore, It's safe to retry after failure.
///
/// - There is no other DDL procedure executed concurrently for the current table.
pub async fn downgrade_leader_region(&self, ctx: &mut Context) -> Result<()> {
let table_metadata_manager = ctx.table_metadata_manager.clone();
let region_id = ctx.region_id();
let table_id = region_id.table_id();
let current_table_route_value = ctx.get_table_route_value().await?;
if let Err(err) = table_metadata_manager
.update_leader_region_status(table_id, current_table_route_value, |route| {
if route.region.id == region_id {
Some(Some(RegionStatus::Downgraded))
} else {
None
}
})
.await
.context(error::TableMetadataManagerSnafu)
{
debug_assert!(ctx.remove_table_route_value());
return error::RetryLaterSnafu {
reason: format!("Failed to update the table route during the downgrading leader region, error: {err}")
}.fail();
}
debug_assert!(ctx.remove_table_route_value());
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
use common_meta::rpc::router::{Region, RegionRoute, RegionStatus};
use store_api::storage::RegionId;
use crate::error::Error;
use crate::procedure::region_migration::downgrade_leader_region::DowngradeLeaderRegion;
use crate::procedure::region_migration::test_util::{self, TestingEnv};
use crate::procedure::region_migration::update_metadata::UpdateMetadata;
use crate::procedure::region_migration::{ContextFactory, PersistentContext, State};
fn new_persistent_context() -> PersistentContext {
test_util::new_persistent_context(1, 2, RegionId::new(1024, 1))
}
#[test]
fn test_state_serialization() {
let state = UpdateMetadata::Downgrade;
let expected = r#"{"UpdateMetadata":"Downgrade"}"#;
assert_eq!(expected, serde_json::to_string(&state).unwrap());
}
#[tokio::test]
async fn test_table_route_is_not_found_error() {
let state = UpdateMetadata::Downgrade;
let env = TestingEnv::new();
let persistent_context = new_persistent_context();
let mut ctx = env.context_factory().new_context(persistent_context);
let err = state.downgrade_leader_region(&mut ctx).await.unwrap_err();
assert_matches!(err, Error::TableRouteNotFound { .. });
assert!(!err.is_retryable());
}
#[tokio::test]
async fn test_failed_to_update_table_route_error() {
let state = UpdateMetadata::Downgrade;
let persistent_context = new_persistent_context();
let from_peer = persistent_context.from_peer.clone();
let env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
let table_id = ctx.region_id().table_id();
let table_info = new_test_table_info(1024, vec![1, 2]).into();
let region_routes = vec![
RegionRoute {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(from_peer.clone()),
..Default::default()
},
RegionRoute {
region: Region::new_test(RegionId::new(1024, 2)),
leader_peer: Some(Peer::empty(4)),
..Default::default()
},
];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes)
.await
.unwrap();
let original_table_route = table_metadata_manager
.table_route_manager()
.get(table_id)
.await
.unwrap()
.unwrap();
// modifies the table route.
table_metadata_manager
.update_leader_region_status(table_id, &original_table_route, |route| {
if route.region.id == RegionId::new(1024, 2) {
Some(Some(RegionStatus::Downgraded))
} else {
None
}
})
.await
.unwrap();
// sets the old table route.
ctx.volatile_ctx.table_route = Some(original_table_route);
let err = state.downgrade_leader_region(&mut ctx).await.unwrap_err();
assert!(ctx.volatile_ctx.table_route.is_none());
assert_matches!(err, Error::RetryLater { .. });
assert!(err.is_retryable());
assert!(err.to_string().contains("Failed to update the table route"));
}
#[tokio::test]
async fn test_next_downgrade_leader_region_state() {
let mut state = Box::new(UpdateMetadata::Downgrade);
let persistent_context = new_persistent_context();
let from_peer = persistent_context.from_peer.clone();
let env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
let table_id = ctx.region_id().table_id();
let table_info = new_test_table_info(1024, vec![1, 2]).into();
let region_routes = vec![RegionRoute {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(from_peer.clone()),
..Default::default()
}];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes)
.await
.unwrap();
let next = state.next(&mut ctx).await.unwrap();
let _ = next
.as_any()
.downcast_ref::<DowngradeLeaderRegion>()
.unwrap();
let latest_table_route = table_metadata_manager
.table_route_manager()
.get(table_id)
.await
.unwrap()
.unwrap();
assert!(latest_table_route.region_routes[0].is_leader_downgraded());
assert!(ctx.volatile_ctx.table_route.is_none());
}
}

View File

@@ -0,0 +1,241 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use snafu::ResultExt;
use crate::error::{self, Result};
use crate::procedure::region_migration::update_metadata::UpdateMetadata;
use crate::procedure::region_migration::Context;
impl UpdateMetadata {
/// Rollbacks the downgraded leader region if the candidate region is unreachable.
///
/// Abort(non-retry):
/// - TableRoute is not found.
///
/// Retry:
/// - Failed to update [TableRouteValue](common_meta::key::table_region::TableRegionValue).
/// - Failed to retrieve the metadata of table.
pub async fn rollback_downgraded_region(&self, ctx: &mut Context) -> Result<()> {
let table_metadata_manager = ctx.table_metadata_manager.clone();
let region_id = ctx.region_id();
let table_id = region_id.table_id();
let current_table_route_value = ctx.get_table_route_value().await?;
if let Err(err) = table_metadata_manager
.update_leader_region_status(table_id, current_table_route_value, |route| {
if route.region.id == region_id {
Some(None)
} else {
None
}
})
.await
.context(error::TableMetadataManagerSnafu)
{
debug_assert!(ctx.remove_table_route_value());
return error::RetryLaterSnafu {
reason: format!("Failed to update the table route during the rollback downgraded leader region, error: {err}")
}.fail();
}
debug_assert!(ctx.remove_table_route_value());
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
use common_meta::rpc::router::{Region, RegionRoute, RegionStatus};
use store_api::storage::RegionId;
use crate::error::Error;
use crate::procedure::region_migration::migration_abort::RegionMigrationAbort;
use crate::procedure::region_migration::test_util::{self, TestingEnv};
use crate::procedure::region_migration::update_metadata::UpdateMetadata;
use crate::procedure::region_migration::{ContextFactory, PersistentContext, State};
fn new_persistent_context() -> PersistentContext {
test_util::new_persistent_context(1, 2, RegionId::new(1024, 1))
}
#[tokio::test]
async fn test_table_route_is_not_found_error() {
let state = UpdateMetadata::Rollback;
let env = TestingEnv::new();
let persistent_context = new_persistent_context();
let mut ctx = env.context_factory().new_context(persistent_context);
let err = state.downgrade_leader_region(&mut ctx).await.unwrap_err();
assert_matches!(err, Error::TableRouteNotFound { .. });
assert!(!err.is_retryable());
}
#[tokio::test]
async fn test_update_table_route_with_retry() {
let state = UpdateMetadata::Rollback;
let persistent_context = new_persistent_context();
let from_peer = persistent_context.from_peer.clone();
let env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
let table_id = ctx.region_id().table_id();
let table_info = new_test_table_info(1024, vec![1, 2, 3]).into();
let region_routes = vec![
RegionRoute {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(from_peer.clone()),
leader_status: Some(RegionStatus::Downgraded),
..Default::default()
},
RegionRoute {
region: Region::new_test(RegionId::new(1024, 2)),
leader_peer: Some(Peer::empty(4)),
leader_status: Some(RegionStatus::Downgraded),
..Default::default()
},
RegionRoute {
region: Region::new_test(RegionId::new(1024, 3)),
leader_peer: Some(Peer::empty(5)),
..Default::default()
},
];
let expected_region_routes = {
let mut region_routes = region_routes.clone();
region_routes[0].leader_status = None;
region_routes[1].leader_status = None;
region_routes
};
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes)
.await
.unwrap();
let old_table_route = table_metadata_manager
.table_route_manager()
.get(table_id)
.await
.unwrap()
.unwrap();
// modifies the table route.
table_metadata_manager
.update_leader_region_status(table_id, &old_table_route, |route| {
if route.region.id == RegionId::new(1024, 2) {
Some(None)
} else {
None
}
})
.await
.unwrap();
ctx.volatile_ctx.table_route = Some(old_table_route);
let err = state
.rollback_downgraded_region(&mut ctx)
.await
.unwrap_err();
assert!(ctx.volatile_ctx.table_route.is_none());
assert_matches!(err, Error::RetryLater { .. });
assert!(err.is_retryable());
assert!(err.to_string().contains("Failed to update the table route"));
state.rollback_downgraded_region(&mut ctx).await.unwrap();
let region_routes = table_metadata_manager
.table_route_manager()
.get(table_id)
.await
.unwrap()
.unwrap()
.into_inner()
.region_routes;
assert_eq!(expected_region_routes, region_routes);
}
#[tokio::test]
async fn test_next_migration_end_state() {
let mut state = Box::new(UpdateMetadata::Rollback);
let persistent_context = new_persistent_context();
let from_peer = persistent_context.from_peer.clone();
let env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
let table_id = ctx.region_id().table_id();
let table_info = new_test_table_info(1024, vec![1, 2, 3]).into();
let region_routes = vec![
RegionRoute {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(from_peer.clone()),
leader_status: Some(RegionStatus::Downgraded),
..Default::default()
},
RegionRoute {
region: Region::new_test(RegionId::new(1024, 2)),
leader_peer: Some(Peer::empty(4)),
leader_status: Some(RegionStatus::Downgraded),
..Default::default()
},
RegionRoute {
region: Region::new_test(RegionId::new(1024, 3)),
leader_peer: Some(Peer::empty(5)),
..Default::default()
},
];
let expected_region_routes = {
let mut region_routes = region_routes.clone();
region_routes[0].leader_status = None;
region_routes
};
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes)
.await
.unwrap();
let next = state.next(&mut ctx).await.unwrap();
let _ = next
.as_any()
.downcast_ref::<RegionMigrationAbort>()
.unwrap();
assert!(ctx.volatile_ctx.table_route.is_none());
let region_routes = table_metadata_manager
.table_route_manager()
.get(table_id)
.await
.unwrap()
.unwrap()
.into_inner()
.region_routes;
assert_eq!(expected_region_routes, region_routes);
}
}

View File

@@ -0,0 +1,376 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use common_meta::ddl::utils::region_storage_path;
use common_meta::key::datanode_table::RegionInfo;
use common_meta::rpc::router::RegionRoute;
use common_telemetry::{info, warn};
use snafu::{ensure, OptionExt, ResultExt};
use crate::error::{self, Result};
use crate::procedure::region_migration::update_metadata::UpdateMetadata;
use crate::procedure::region_migration::Context;
impl UpdateMetadata {
/// Returns new [Vec<RegionRoute>].
async fn build_upgrade_candidate_region_metadata(
&self,
ctx: &mut Context,
) -> Result<Vec<RegionRoute>> {
let region_id = ctx.region_id();
let table_route_value = ctx.get_table_route_value().await?.clone();
let mut region_routes = table_route_value.region_routes.clone();
let region_route = region_routes
.iter_mut()
.find(|route| route.region.id == region_id)
.context(error::RegionRouteNotFoundSnafu { region_id })?;
// Removes downgraded status.
region_route.set_leader_status(None);
let candidate = &ctx.persistent_ctx.to_peer;
let expected_old_leader = &ctx.persistent_ctx.from_peer;
// Upgrades candidate to leader.
ensure!(region_route
.leader_peer
.take_if(|old_leader| old_leader.id == expected_old_leader.id)
.is_some(),
error::UnexpectedSnafu{
violated: format!("Unexpected region leader: {:?} during the upgrading candidate metadata, expected: {:?}", region_route.leader_peer, expected_old_leader),
}
);
region_route.leader_peer = Some(candidate.clone());
info!(
"Upgrading candidate region to leader region: {:?} for region: {}",
candidate, region_id
);
// Removes the candidate region in followers.
let removed = region_route
.follower_peers
.extract_if(|peer| peer.id == candidate.id)
.collect::<Vec<_>>();
if removed.len() > 1 {
warn!(
"Removes duplicated regions: {removed:?} during the upgrading candidate metadata for region: {region_id}"
);
}
Ok(region_routes)
}
/// Upgrades the candidate region.
///
/// Abort(non-retry):
/// - TableRoute or RegionRoute is not found.
/// Typically, it's impossible, there is no other DDL procedure executed concurrently for the current table.
///
/// Retry:
/// - Failed to update [TableRouteValue](common_meta::key::table_region::TableRegionValue).
/// - Failed to retrieve the metadata of table.
pub async fn upgrade_candidate_region(&self, ctx: &mut Context) -> Result<()> {
let region_id = ctx.region_id();
let table_metadata_manager = ctx.table_metadata_manager.clone();
let region_routes = self.build_upgrade_candidate_region_metadata(ctx).await?;
let table_info_value = ctx.get_table_info_value().await?;
let table_info = &table_info_value.table_info;
let region_storage_path =
region_storage_path(&table_info.catalog_name, &table_info.schema_name);
let engine = table_info.meta.engine.clone();
let region_options: HashMap<String, String> = (&table_info.meta.options).into();
// No remote fetch.
let table_route_value = ctx.get_table_route_value().await?;
if let Err(err) = table_metadata_manager
.update_table_route(
region_id.table_id(),
RegionInfo {
engine: engine.to_string(),
region_storage_path: region_storage_path.to_string(),
region_options: region_options.clone(),
},
table_route_value,
region_routes,
&region_options,
)
.await
.context(error::TableMetadataManagerSnafu)
{
debug_assert!(ctx.remove_table_route_value());
return error::RetryLaterSnafu {
reason: format!("Failed to update the table route during the upgrading candidate region, error: {err}")
}.fail();
};
debug_assert!(ctx.remove_table_route_value());
// Consumes the guard.
ctx.volatile_ctx.opening_region_guard.take();
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
use common_meta::rpc::router::{Region, RegionRoute, RegionStatus};
use store_api::storage::RegionId;
use crate::error::Error;
use crate::procedure::region_migration::migration_end::RegionMigrationEnd;
use crate::procedure::region_migration::test_util::{self, TestingEnv};
use crate::procedure::region_migration::update_metadata::UpdateMetadata;
use crate::procedure::region_migration::{ContextFactory, PersistentContext, State};
use crate::region::lease_keeper::OpeningRegionKeeper;
fn new_persistent_context() -> PersistentContext {
test_util::new_persistent_context(1, 2, RegionId::new(1024, 1))
}
#[tokio::test]
async fn test_table_route_is_not_found_error() {
let state = UpdateMetadata::Upgrade;
let env = TestingEnv::new();
let persistent_context = new_persistent_context();
let mut ctx = env.context_factory().new_context(persistent_context);
let err = state
.build_upgrade_candidate_region_metadata(&mut ctx)
.await
.unwrap_err();
assert_matches!(err, Error::TableRouteNotFound { .. });
assert!(!err.is_retryable());
}
#[tokio::test]
async fn test_region_route_is_not_found() {
let state = UpdateMetadata::Upgrade;
let env = TestingEnv::new();
let persistent_context = new_persistent_context();
let mut ctx = env.context_factory().new_context(persistent_context);
let table_info = new_test_table_info(1024, vec![2]).into();
let region_routes = vec![RegionRoute {
region: Region::new_test(RegionId::new(1024, 2)),
leader_peer: Some(Peer::empty(4)),
..Default::default()
}];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes)
.await
.unwrap();
let err = state
.build_upgrade_candidate_region_metadata(&mut ctx)
.await
.unwrap_err();
assert_matches!(err, Error::RegionRouteNotFound { .. });
assert!(!err.is_retryable());
}
#[tokio::test]
async fn test_region_route_expected_leader() {
let state = UpdateMetadata::Upgrade;
let env = TestingEnv::new();
let persistent_context = new_persistent_context();
let mut ctx = env.context_factory().new_context(persistent_context);
let table_info = new_test_table_info(1024, vec![1]).into();
let region_routes = vec![RegionRoute {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(Peer::empty(3)),
..Default::default()
}];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes)
.await
.unwrap();
let err = state
.build_upgrade_candidate_region_metadata(&mut ctx)
.await
.unwrap_err();
assert_matches!(err, Error::Unexpected { .. });
assert!(!err.is_retryable());
assert!(err.to_string().contains("Unexpected region leader"));
}
#[tokio::test]
async fn test_build_upgrade_candidate_region_metadata() {
let state = UpdateMetadata::Upgrade;
let env = TestingEnv::new();
let persistent_context = new_persistent_context();
let mut ctx = env.context_factory().new_context(persistent_context);
let table_info = new_test_table_info(1024, vec![1]).into();
let region_routes = vec![RegionRoute {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(Peer::empty(1)),
follower_peers: vec![Peer::empty(2), Peer::empty(3)],
leader_status: Some(RegionStatus::Downgraded),
}];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes)
.await
.unwrap();
let new_region_routes = state
.build_upgrade_candidate_region_metadata(&mut ctx)
.await
.unwrap();
assert!(!new_region_routes[0].is_leader_downgraded());
assert_eq!(new_region_routes[0].follower_peers, vec![Peer::empty(3)]);
assert_eq!(new_region_routes[0].leader_peer.as_ref().unwrap().id, 2);
}
#[tokio::test]
async fn test_failed_to_update_table_route_error() {
let state = UpdateMetadata::Upgrade;
let env = TestingEnv::new();
let persistent_context = new_persistent_context();
let mut ctx = env.context_factory().new_context(persistent_context);
let opening_keeper = OpeningRegionKeeper::default();
let table_id = 1024;
let table_info = new_test_table_info(table_id, vec![1]).into();
let region_routes = vec![
RegionRoute {
region: Region::new_test(RegionId::new(table_id, 1)),
leader_peer: Some(Peer::empty(1)),
follower_peers: vec![Peer::empty(5), Peer::empty(3)],
leader_status: Some(RegionStatus::Downgraded),
},
RegionRoute {
region: Region::new_test(RegionId::new(table_id, 2)),
leader_peer: Some(Peer::empty(4)),
leader_status: Some(RegionStatus::Downgraded),
..Default::default()
},
];
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes)
.await
.unwrap();
let original_table_route = table_metadata_manager
.table_route_manager()
.get(table_id)
.await
.unwrap()
.unwrap();
// modifies the table route.
table_metadata_manager
.update_leader_region_status(table_id, &original_table_route, |route| {
if route.region.id == RegionId::new(1024, 2) {
// Removes the status.
Some(None)
} else {
None
}
})
.await
.unwrap();
// sets the old table route.
ctx.volatile_ctx.table_route = Some(original_table_route);
let guard = opening_keeper
.register(2, RegionId::new(table_id, 1))
.unwrap();
ctx.volatile_ctx.opening_region_guard = Some(guard);
let err = state.upgrade_candidate_region(&mut ctx).await.unwrap_err();
assert!(ctx.volatile_ctx.table_route.is_none());
assert!(ctx.volatile_ctx.opening_region_guard.is_some());
assert_matches!(err, Error::RetryLater { .. });
assert!(err.is_retryable());
assert!(err.to_string().contains("Failed to update the table route"));
}
#[tokio::test]
async fn test_next_migration_end_state() {
let mut state = Box::new(UpdateMetadata::Upgrade);
let env = TestingEnv::new();
let persistent_context = new_persistent_context();
let mut ctx = env.context_factory().new_context(persistent_context);
let opening_keeper = OpeningRegionKeeper::default();
let table_id = 1024;
let table_info = new_test_table_info(table_id, vec![1]).into();
let region_routes = vec![RegionRoute {
region: Region::new_test(RegionId::new(table_id, 1)),
leader_peer: Some(Peer::empty(1)),
leader_status: Some(RegionStatus::Downgraded),
..Default::default()
}];
let guard = opening_keeper
.register(2, RegionId::new(table_id, 1))
.unwrap();
ctx.volatile_ctx.opening_region_guard = Some(guard);
let table_metadata_manager = env.table_metadata_manager();
table_metadata_manager
.create_table_metadata(table_info, region_routes)
.await
.unwrap();
let next = state.next(&mut ctx).await.unwrap();
let _ = next.as_any().downcast_ref::<RegionMigrationEnd>().unwrap();
let region_routes = table_metadata_manager
.table_route_manager()
.get(table_id)
.await
.unwrap()
.unwrap()
.into_inner()
.region_routes;
assert!(ctx.volatile_ctx.table_route.is_none());
assert!(ctx.volatile_ctx.opening_region_guard.is_none());
assert_eq!(region_routes.len(), 1);
assert!(!region_routes[0].is_leader_downgraded());
assert!(region_routes[0].follower_peers.is_empty());
assert_eq!(region_routes[0].leader_peer.as_ref().unwrap().id, 2);
}
}

View File

@@ -0,0 +1,562 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::time::Duration;
use api::v1::meta::MailboxMessage;
use common_meta::distributed_time_constants::MAILBOX_RTT_SECS;
use common_meta::instruction::{Instruction, InstructionReply, UpgradeRegion, UpgradeRegionReply};
use common_telemetry::warn;
use serde::{Deserialize, Serialize};
use snafu::{ensure, ResultExt};
use tokio::time::sleep;
use super::update_metadata::UpdateMetadata;
use crate::error::{self, Result};
use crate::handler::HeartbeatMailbox;
use crate::procedure::region_migration::{Context, State};
use crate::service::mailbox::Channel;
#[derive(Debug, Serialize, Deserialize)]
pub struct UpgradeCandidateRegion {
// The optimistic retry times.
optimistic_retry: usize,
// The retry initial interval.
retry_initial_interval: Duration,
// The replay timeout of a instruction.
replay_timeout: Duration,
// If it's true it requires the candidate region MUST replay the WAL to the latest entry id.
// Otherwise, it will rollback to the old leader region.
require_ready: bool,
}
impl Default for UpgradeCandidateRegion {
fn default() -> Self {
Self {
optimistic_retry: 3,
retry_initial_interval: Duration::from_millis(500),
replay_timeout: Duration::from_millis(1000),
require_ready: true,
}
}
}
#[async_trait::async_trait]
#[typetag::serde]
impl State for UpgradeCandidateRegion {
async fn next(&mut self, ctx: &mut Context) -> Result<Box<dyn State>> {
if self.upgrade_region_with_retry(ctx).await {
Ok(Box::new(UpdateMetadata::Upgrade))
} else {
Ok(Box::new(UpdateMetadata::Rollback))
}
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl UpgradeCandidateRegion {
const UPGRADE_CANDIDATE_REGION_RTT: Duration = Duration::from_secs(MAILBOX_RTT_SECS);
/// Returns the timeout of the upgrade candidate region.
///
/// Equals `replay_timeout` + RTT
fn send_upgrade_candidate_region_timeout(&self) -> Duration {
self.replay_timeout + UpgradeCandidateRegion::UPGRADE_CANDIDATE_REGION_RTT
}
/// Builds upgrade region instruction.
fn build_upgrade_region_instruction(&self, ctx: &Context) -> Instruction {
let pc = &ctx.persistent_ctx;
let region_id = pc.region_id;
let last_entry_id = ctx.volatile_ctx.leader_region_last_entry_id;
Instruction::UpgradeRegion(UpgradeRegion {
region_id,
last_entry_id,
wait_for_replay_secs: Some(self.replay_timeout.as_secs()),
})
}
/// Tries to upgrade a candidate region.
///
/// Retry:
/// - If `require_ready` is true, but the candidate region returns `ready` is false.
/// - [MailboxTimeout](error::Error::MailboxTimeout), Timeout.
///
/// Abort:
/// - The candidate region doesn't exist.
/// - [PusherNotFound](error::Error::PusherNotFound), The datanode is unreachable.
/// - [PushMessage](error::Error::PushMessage), The receiver is dropped.
/// - [MailboxReceiver](error::Error::MailboxReceiver), The sender is dropped without sending (impossible).
/// - [UnexpectedInstructionReply](error::Error::UnexpectedInstructionReply) (impossible).
/// - Invalid JSON (impossible).
async fn upgrade_region(&self, ctx: &Context, upgrade_instruction: &Instruction) -> Result<()> {
let pc = &ctx.persistent_ctx;
let region_id = pc.region_id;
let candidate = &pc.to_peer;
let msg = MailboxMessage::json_message(
&format!("Upgrade candidate region: {}", region_id),
&format!("Meta@{}", ctx.server_addr()),
&format!("Datanode-{}@{}", candidate.id, candidate.addr),
common_time::util::current_time_millis(),
upgrade_instruction,
)
.with_context(|_| error::SerializeToJsonSnafu {
input: upgrade_instruction.to_string(),
})?;
let ch = Channel::Datanode(candidate.id);
let receiver = ctx
.mailbox
.send(&ch, msg, self.send_upgrade_candidate_region_timeout())
.await?;
match receiver.await? {
Ok(msg) => {
let reply = HeartbeatMailbox::json_reply(&msg)?;
let InstructionReply::UpgradeRegion(UpgradeRegionReply {
ready,
exists,
error,
}) = reply
else {
return error::UnexpectedInstructionReplySnafu {
mailbox_message: msg.to_string(),
reason: "Unexpected reply of the upgrade region instruction",
}
.fail();
};
// Notes: The order of handling is important.
if error.is_some() {
return error::RetryLaterSnafu {
reason: format!(
"Failed to upgrade the region {} on datanode {:?}, error: {:?}",
region_id, candidate, error
),
}
.fail();
}
ensure!(
exists,
error::UnexpectedSnafu {
violated: format!(
"Expected region {} doesn't exist on datanode {:?}",
region_id, candidate
)
}
);
if self.require_ready && !ready {
return error::RetryLaterSnafu {
reason: format!(
"Candidate region {} still replaying the wal on datanode {:?}",
region_id, candidate
),
}
.fail();
}
Ok(())
}
Err(error::Error::MailboxTimeout { .. }) => {
let reason = format!(
"Mailbox received timeout for upgrade candidate region {region_id} on datanode {:?}",
candidate,
);
error::RetryLaterSnafu { reason }.fail()
}
Err(err) => Err(err),
}
}
/// Upgrades a candidate region.
///
/// Returns true if the candidate region is upgraded successfully.
async fn upgrade_region_with_retry(&self, ctx: &Context) -> bool {
let upgrade_instruction = self.build_upgrade_region_instruction(ctx);
let mut retry = 0;
let mut upgraded = false;
loop {
if let Err(err) = self.upgrade_region(ctx, &upgrade_instruction).await {
retry += 1;
if err.is_retryable() && retry < self.optimistic_retry {
warn!("Failed to upgrade region, error: {err:?}, retry later");
sleep(self.retry_initial_interval).await;
} else {
break;
}
} else {
upgraded = true;
break;
}
}
upgraded
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use api::v1::meta::mailbox_message::Payload;
use common_meta::peer::Peer;
use common_time::util::current_time_millis;
use store_api::storage::RegionId;
use super::*;
use crate::error::Error;
use crate::procedure::region_migration::test_util::{
new_close_region_reply, send_mock_reply, TestingEnv,
};
use crate::procedure::region_migration::{ContextFactory, PersistentContext};
fn new_persistent_context() -> PersistentContext {
PersistentContext {
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
region_id: RegionId::new(1024, 1),
cluster_id: 0,
}
}
fn new_upgrade_region_reply(
id: u64,
ready: bool,
exists: bool,
error: Option<String>,
) -> MailboxMessage {
MailboxMessage {
id,
subject: "mock".to_string(),
from: "datanode".to_string(),
to: "meta".to_string(),
timestamp_millis: current_time_millis(),
payload: Some(Payload::Json(
serde_json::to_string(&InstructionReply::UpgradeRegion(UpgradeRegionReply {
ready,
exists,
error,
}))
.unwrap(),
)),
}
}
#[tokio::test]
async fn test_datanode_is_unreachable() {
let state = UpgradeCandidateRegion::default();
let persistent_context = new_persistent_context();
let env = TestingEnv::new();
let ctx = env.context_factory().new_context(persistent_context);
let instruction = &state.build_upgrade_region_instruction(&ctx);
let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
assert_matches!(err, Error::PusherNotFound { .. });
assert!(!err.is_retryable());
}
#[tokio::test]
async fn test_pusher_dropped() {
let state = UpgradeCandidateRegion::default();
let persistent_context = new_persistent_context();
let to_peer_id = persistent_context.to_peer.id;
let mut env = TestingEnv::new();
let ctx = env.context_factory().new_context(persistent_context);
let mailbox_ctx = env.mailbox_context();
let (tx, rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx
.insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
.await;
drop(rx);
let instruction = &state.build_upgrade_region_instruction(&ctx);
let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
assert_matches!(err, Error::PushMessage { .. });
assert!(!err.is_retryable());
}
#[tokio::test]
async fn test_unexpected_instruction_reply() {
let state = UpgradeCandidateRegion::default();
let persistent_context = new_persistent_context();
let to_peer_id = persistent_context.to_peer.id;
let mut env = TestingEnv::new();
let ctx = env.context_factory().new_context(persistent_context);
let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
let (tx, rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx
.insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
.await;
send_mock_reply(mailbox, rx, |id| Ok(new_close_region_reply(id)));
let instruction = &state.build_upgrade_region_instruction(&ctx);
let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
assert_matches!(err, Error::UnexpectedInstructionReply { .. });
assert!(!err.is_retryable());
}
#[tokio::test]
async fn test_upgrade_region_failed() {
let state = UpgradeCandidateRegion::default();
let persistent_context = new_persistent_context();
let to_peer_id = persistent_context.to_peer.id;
let mut env = TestingEnv::new();
let ctx = env.context_factory().new_context(persistent_context);
let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
let (tx, rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx
.insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
.await;
// A reply contains an error.
send_mock_reply(mailbox, rx, |id| {
Ok(new_upgrade_region_reply(
id,
true,
true,
Some("test mocked".to_string()),
))
});
let instruction = &state.build_upgrade_region_instruction(&ctx);
let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
assert_matches!(err, Error::RetryLater { .. });
assert!(err.is_retryable());
assert!(err.to_string().contains("test mocked"));
}
#[tokio::test]
async fn test_upgrade_region_not_found() {
let state = UpgradeCandidateRegion::default();
let persistent_context = new_persistent_context();
let to_peer_id = persistent_context.to_peer.id;
let mut env = TestingEnv::new();
let ctx = env.context_factory().new_context(persistent_context);
let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
let (tx, rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx
.insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
.await;
send_mock_reply(mailbox, rx, |id| {
Ok(new_upgrade_region_reply(id, true, false, None))
});
let instruction = &state.build_upgrade_region_instruction(&ctx);
let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
assert_matches!(err, Error::Unexpected { .. });
assert!(!err.is_retryable());
assert!(err.to_string().contains("doesn't exist"));
}
#[tokio::test]
async fn test_upgrade_region_require_ready() {
let mut state = UpgradeCandidateRegion {
require_ready: true,
..Default::default()
};
let persistent_context = new_persistent_context();
let to_peer_id = persistent_context.to_peer.id;
let mut env = TestingEnv::new();
let ctx = env.context_factory().new_context(persistent_context);
let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
let (tx, rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx
.insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
.await;
send_mock_reply(mailbox, rx, |id| {
Ok(new_upgrade_region_reply(id, false, true, None))
});
let instruction = &state.build_upgrade_region_instruction(&ctx);
let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
assert_matches!(err, Error::RetryLater { .. });
assert!(err.is_retryable());
assert!(err.to_string().contains("still replaying the wal"));
// Sets the `require_ready` to false.
state.require_ready = false;
let mailbox = mailbox_ctx.mailbox().clone();
let (tx, rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx
.insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
.await;
send_mock_reply(mailbox, rx, |id| {
Ok(new_upgrade_region_reply(id, false, true, None))
});
let instruction = &state.build_upgrade_region_instruction(&ctx);
state.upgrade_region(&ctx, instruction).await.unwrap();
}
#[tokio::test]
async fn test_upgrade_region_with_retry_ok() {
let mut state = Box::<UpgradeCandidateRegion>::default();
state.retry_initial_interval = Duration::from_millis(100);
let persistent_context = new_persistent_context();
let to_peer_id = persistent_context.to_peer.id;
let mut env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx
.insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
.await;
common_runtime::spawn_bg(async move {
let resp = rx.recv().await.unwrap().unwrap();
let reply_id = resp.mailbox_message.unwrap().id;
mailbox
.on_recv(
reply_id,
Err(error::MailboxTimeoutSnafu { id: reply_id }.build()),
)
.await
.unwrap();
// retry: 1
let resp = rx.recv().await.unwrap().unwrap();
let reply_id = resp.mailbox_message.unwrap().id;
mailbox
.on_recv(
reply_id,
Ok(new_upgrade_region_reply(reply_id, false, true, None)),
)
.await
.unwrap();
// retry: 2
let resp = rx.recv().await.unwrap().unwrap();
let reply_id = resp.mailbox_message.unwrap().id;
mailbox
.on_recv(
reply_id,
Ok(new_upgrade_region_reply(reply_id, true, true, None)),
)
.await
.unwrap();
});
let next = state.next(&mut ctx).await.unwrap();
let update_metadata = next.as_any().downcast_ref::<UpdateMetadata>().unwrap();
assert_matches!(update_metadata, UpdateMetadata::Upgrade);
}
#[tokio::test]
async fn test_upgrade_region_with_retry_failed() {
let mut state = Box::<UpgradeCandidateRegion>::default();
state.retry_initial_interval = Duration::from_millis(100);
let persistent_context = new_persistent_context();
let to_peer_id = persistent_context.to_peer.id;
let mut env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx
.insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
.await;
common_runtime::spawn_bg(async move {
let resp = rx.recv().await.unwrap().unwrap();
let reply_id = resp.mailbox_message.unwrap().id;
mailbox
.on_recv(
reply_id,
Err(error::MailboxTimeoutSnafu { id: reply_id }.build()),
)
.await
.unwrap();
// retry: 1
let resp = rx.recv().await.unwrap().unwrap();
let reply_id = resp.mailbox_message.unwrap().id;
mailbox
.on_recv(
reply_id,
Ok(new_upgrade_region_reply(reply_id, false, true, None)),
)
.await
.unwrap();
// retry: 2
let resp = rx.recv().await.unwrap().unwrap();
let reply_id = resp.mailbox_message.unwrap().id;
mailbox
.on_recv(
reply_id,
Ok(new_upgrade_region_reply(reply_id, false, false, None)),
)
.await
.unwrap();
});
let next = state.next(&mut ctx).await.unwrap();
let update_metadata = next.as_any().downcast_ref::<UpdateMetadata>().unwrap();
assert_matches!(update_metadata, UpdateMetadata::Rollback);
}
}

View File

@@ -235,6 +235,7 @@ async fn test_on_datanode_drop_regions() {
schema: "my_schema".to_string(), schema: "my_schema".to_string(),
table: "my_table".to_string(), table: "my_table".to_string(),
table_id: 42, table_id: 42,
drop_if_exists: false,
}; };
let (region_server, mut rx) = EchoRegionServer::new(); let (region_server, mut rx) = EchoRegionServer::new();

Some files were not shown because too many files have changed in this diff Show More