Compare commits

...

95 Commits

Author SHA1 Message Date
LFC
b2a09c888a feat: phi accrual failure detector (#1200) 2023-03-21 11:47:47 +08:00
LFC
af101480b3 feat: add gRPC reflection service (#1208)
* feat: add gRPC reflection service

* feat: add gRPC reflection service
2023-03-21 11:23:29 +08:00
Weny Xu
b8f7f603cf test: add copy clause sqlness tests (#1198) 2023-03-21 11:22:26 +08:00
dennis zhuang
8fb97ea1d8 fix: losing region numbers after altering table (#1209) 2023-03-21 11:19:43 +08:00
discord9
21ce9c1163 docs: more explain in readme (#1195)
* docs: more explain in readme

* fix: typos

* fix: CR advices
2023-03-20 21:56:34 +08:00
Ruihang Xia
0a22375ac1 fix: nyc-taxi bench suite (#1204)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-20 21:53:01 +08:00
fys
0596d20a3b fix: can not create table in the local distributed environment (#1207)
fix: create table in local distribute env
2023-03-20 20:12:35 +08:00
Weny Xu
e19c8fa2b6 refactor: combine Copy To and Copy From (#1197)
* refactor: combine Copy To and Copy From

* Apply suggestions from code review

Co-authored-by: LFC <bayinamine@gmail.com>

* Apply suggestions from code review

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

---------

Co-authored-by: LFC <bayinamine@gmail.com>
Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
2023-03-20 19:23:25 +08:00
LFC
ad886f5b3e feat: GRPC client stream interface for insertion (#1206)
* feat: GRPC client stream interface for insertion

* feat: GRPC client stream interface for insertion
2023-03-20 18:45:37 +08:00
LFC
f6669a8201 feat: add GRPC unary call service to our GreptimeDB (#1196)
* feat: add GRPC unary call service to our GreptimeDB
2023-03-20 14:27:32 +08:00
Yingwen
ad5c47185d feat: wait flush until the flush is done (#1188)
* feat: Add wait argument to flush

* test(storage): Fix flush tests
2023-03-20 11:25:19 +08:00
zyy17
64441616db ci: refactor compile-python.sh and use the python310 to build amd64 binary (#1199) 2023-03-18 16:16:15 +08:00
zyy17
09491d6aee ci: release the standalone binaries with pyo3 and install python utils in images (#1194)
* ci: install python3 and python3-dev in CI Dockerfile

* ci: release the standalone binaries with pyo3 support for multiple platforms

* refactor: install pip and pyarrow

* refactor: specify the python version
2023-03-17 15:42:13 +08:00
Weny Xu
7cfa30b2ab feat: add shutdown for standalone and metasrv (#1174) 2023-03-17 11:35:17 +08:00
Ning Sun
a7676d8860 refactor: port div_ceil from stdlib to avoid unstable features (#1191)
* refactor: use float div&ceil to avoid unstable features

* refactor: port div_ceil from rust stdlib
2023-03-16 22:55:35 +08:00
zyy17
62e2a60b7b ci: release artifacts after binary and container is ready (#1192)
ci: release artifacts before binary and container is ready
2023-03-16 09:20:03 +00:00
zyy17
128c5cabe1 ci: disable run tests temporarily (#1187) 2023-03-16 14:12:19 +08:00
Yingwen
9a001d3392 chore(datanode): derive serde default for Wal/CompactionConfig (#1173) 2023-03-16 11:56:28 +08:00
Weny Xu
facdda4d9f feat: implement CONNECTION clause of Copy To (#1163)
* feat: implement CONNECTION clause of Copy To

* test: add tests for s3 backend

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-03-16 11:36:38 +08:00
Lei, HUANG
17eb99bc52 feat: allow manual table flush through HTTP API (#1184) 2023-03-15 20:15:34 +08:00
Xieqijun
cd8be77968 feat(procedure): Max retry time (#1095)
* feat: procedure config

* fix: modify config

* feat: add retry logic

* feat: add error

* feat: add it

* feat: add it

* feat: add it

* feat: rm retry from runner

* feat: use backon

* feat: add retry_interval

* feat: add retry_interval

* fix: conflict

* fix: cr

* feat: add retry error and id

* feat: rename

* refactor: execute

* feat: use config dir

* fix: cr

* fix: cr

* fix: fmt

* fix: fmt

* fix: pr

* fix: it

* fix: rm unless cmd params

* feat: add toml

* fix: ut

* feat: add rolling back

* fix: cr

* fix: cr

* fix: cr

* fix: ci

* fix: ci

* fix: ci

* chore: Apply suggestions from code review

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-03-15 08:28:08 +00:00
Eugene Tolbakov
b530ac9e60 chore(from_unixtime): remove UDF from_unixtime (#1179)
* chore(from_unixtime): remove UDF from_unixtime

* chore(from_unixtime): restore timestamp.rs for further usage

* chore(from_unixtime): address fmt issue
2023-03-15 16:27:09 +08:00
zyy17
76f1a79f1b ci: set 'continue-on-error' to false since the problem of compiling binary was resolved (#1182)
Signed-off-by: zyy17 <zyylsxm@gmail.com>
2023-03-15 15:41:36 +08:00
LFC
4705245d60 docs: region failover RFC (#1139)
* docs: region failover RFC

* fix: resolve PR comments
2023-03-15 15:21:58 +08:00
Zheming Li
f712f978cf feat: Report disk usage stats to metasrv thru heartbeat (#1167)
* feat: Report disk usage stats to metasrv thru heartbeat

Signed-off-by: Zheming Li <nkdudu@126.com>

* Update src/catalog/src/error.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

* Update src/catalog/src/lib.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

* Update src/mito/src/table.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

---------

Signed-off-by: Zheming Li <nkdudu@126.com>
Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>
2023-03-15 03:11:32 +00:00
discord9
cbf64e65b9 refactor: put dataframe & query into greptime module (#1172)
* feat: impl getitem for `vector`

* feat: mv `query`&`dataframe` into `greptime` for PyO3

* refactor: allow call dataframe&query

* refactor: pyo3 query&dataframe

* chore: CR advices
2023-03-15 11:01:43 +08:00
zyy17
242ce5c2aa ci: add pyo3 options for mac (#1178) 2023-03-14 13:51:58 +00:00
Ruihang Xia
e8d2e82335 fix: ambiguous column reference (#1177)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-14 13:18:43 +00:00
zyy17
0086cc2d3d fix: export 'PYO3_CROSS_LIB_DIR' when cargo build for aarch64-linux and refactor matrix opts (#1171) 2023-03-14 15:35:29 +08:00
Weny Xu
cdc111b607 refactor: make the cmd hold the application instance (#1159) 2023-03-14 15:18:50 +08:00
zyy17
81ca1d8399 refactor: add the separate GitHub Action job to push the image to the UCloud registry (#1170) 2023-03-14 11:35:18 +08:00
LFC
8d3999df5f fix: failed to run subquery wrapped in two parentheses (#1157) 2023-03-14 10:59:43 +08:00
discord9
a60788e92e fix: use correct env var (#1166)
* fix: use correct env var

* fix: move COPY up so rustup know it's nightly

* fix: add `pyo3_backend` in GHA yml

* chore: name for `TODO`

* temp: not set `pyo3_backend` before find DSO

* fix: release linux with pyo3_backend
2023-03-14 10:57:13 +08:00
Weny Xu
296c6dfcbf feat: implement table flush (#1121)
* feat: add flush method for trait

* feat: implement flush via grpc

* chore: move table_dir/region_name/region_id to table crate

* chore: Update src/mito/src/table.rs

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-03-13 20:10:37 +08:00
LFC
604c20a83d refactor: remove the SQL execution interfaces in Datanode (#1135)
* refactor: remove the SQL execution interfaces in Datanode
2023-03-13 18:45:30 +08:00
Weny Xu
c7f114c8fa feat: add shutdown for frontend (#1161) 2023-03-13 17:59:36 +08:00
Weny Xu
8a83de4ea5 feat: add shutdown for datanode (#1160) 2023-03-13 17:49:26 +08:00
discord9
3377930a50 build: add cross compile docker (#1156)
* build: add cross compile docker

* build: added compile python to github action

* fix: correct path

* fix: Python Compile

* fix: run mulitple cmds

* fix: both cross compile docker file&github action

* refactor: compile-python.sh

* chore: put wget install together

* fix: CR advices

* chore: add `-F pyo3_backend`
2023-03-13 16:56:03 +08:00
Ning Sun
85dd7e4f24 feat: implement promql query on grpc (#1134)
* feat: implement promql query on grpc

* test: resolve test errors

* test: add tests for promql grpc api

* refactor: align prom object name with proto

* chore: switch proto revision to main
2023-03-13 15:24:34 +08:00
LFC
f790fa05c1 fix: validate insert request (#1142)
* fix: validate GRPC insert request has the value when required by column schema, before actually made any change to the DB

* fix: resolve PR comments
2023-03-13 11:03:51 +08:00
Yingwen
dfd91a1bf8 chore: Bump version to 0.1.1 (#1155) 2023-03-11 01:11:23 +08:00
localhost
ded31fb069 chore: remove addr from datanode error message (#1152)
* chore: remove addr from datanode error message

* chore: add log for flight get error
2023-03-10 14:13:01 +08:00
Ning Sun
6a574fc52b chore: update script prompt (#1154) 2023-03-10 10:51:38 +08:00
Ning Sun
58bdf27068 fix: make pyo3 optional again (#1153)
* fix: make pyo3 optional again

* Update src/script/Cargo.toml

Co-authored-by: dennis zhuang <killme2008@gmail.com>

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-03-09 14:16:48 +00:00
discord9
610a895b66 feat: abi3 & abi37 (#1151) 2023-03-09 20:08:07 +08:00
zyy17
a9ccc06449 ci: modify scheduled release tag to 'v0.2.0-nightly-yymmdd' (#1149)
* ci: modify scheduled release tag to 'v0.2.0-nightly-yymmdd'

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* refactor: modify 'profile.weekly' to 'profile.nightly'

Signed-off-by: zyy17 <zyylsxm@gmail.com>

---------

Signed-off-by: zyy17 <zyylsxm@gmail.com>
2023-03-09 16:42:40 +08:00
Ruihang Xia
38fe1a2f01 chore: update dependencies (#1148)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-09 14:24:29 +08:00
fys
3414ac46b0 fix: remove unnecessary braces around block return value (#1147) 2023-03-09 03:42:04 +00:00
Lei, HUANG
757b4a87a0 fix: remove profile in mem-prof crate to suppress compiler warnings (#1146) 2023-03-09 03:39:24 +00:00
Yingwen
ba1517fceb ci: Fix step "build and push amd64" not triggered (#1145) 2023-03-09 11:35:38 +08:00
Yingwen
5b5d953d56 ci: tolerate error while building arm64 releases (#1143)
* ci: allow failure while building arm64 docker

* ci: Remove continue-on-error on docker step
2023-03-08 21:11:40 +08:00
Yingwen
3f6cbc378d ci: Disable arm64 release temporarily (#1141) 2023-03-08 19:13:00 +08:00
Yingwen
9619940569 ci: Allow error when building release for non-x86 platform (#1140) 2023-03-08 18:12:06 +08:00
Weny Xu
ed8252157a chore: code styling (#1137)
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-08 08:10:12 +00:00
Ruihang Xia
3e0fb7e75b test: ignore two test cases due to arrow-datafusion#5513 (#1138)
* test: ignore two test cases due to arrow-datafusion#5513

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-08 07:29:34 +00:00
Bohan Wu
ba3ce436df refactor(SST): UUID as id in FileMeta (#1116)
* feat(SST): use a newType named FileId for FileMeta

* chore: rename some functions

* fix: compatible for previous FileMeta format

* fix: alias for file_id when getting deserialized
2023-03-08 14:27:20 +08:00
Eugene Tolbakov
b31a6cb506 refactor: replace tempdir with tempfile (#1123)
* refactor: replace tempdir with tempfile

* refactor(query): move tempfile dependency under the workspace's Cargo.toml

* refactor(tempfile): create common-test-util

* refactor(tempfile): fix toml format

* refactor(tempfile): remove tempfile out of dependencies

* refactor(tempfile): fix incorrect toml
2023-03-08 11:15:56 +08:00
SSebo
95090592f0 feat: mysql prepare replacing sql placeholder to param (#1086)
* feat: mysql prepare by replace ? in sql to param

* chore: mysql prepare statment support time param

* chore: prepare test more types

* chore: add TODO
2023-03-08 11:02:29 +08:00
Ruihang Xia
3a527c0fd5 feat: impl proc macro range_fn and some aggr_over_time functions (#1072)
* impl range_fn proc macro

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl some aggr_over_time fn

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl present_over_time and absent_over_time

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* accomplish planner, and correct type cast

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* document the macro

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix styles

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update irate/idelta test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add test cases

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-07 23:39:45 +08:00
elijah
819b60ca13 feat(datatypes): implement VectorOp::take (#1115)
* feat: add take index method for VectorOp

* chore: make clippy happy

* chore: make clippy happy

* chore: improve the code

* chore: improve the code

* chore: add take null test

* chore: fix clippy
2023-03-07 19:27:33 +08:00
Weny Xu
7169fe2989 feat: implement Copy From (#1064) 2023-03-07 17:54:11 +08:00
Zheming Li
b70672be77 feat: track disk usage of regions (#1125)
* feat: track disk usage of regions

Signed-off-by: Zheming Li <nkdudu@126.com>

* calculate disk usage when call

* add default on file meta

---------

Signed-off-by: Zheming Li <nkdudu@126.com>
2023-03-07 17:13:12 +08:00
Lei, HUANG
a4c01f4a3a feat: memory profiling (#1124)
* feat: use jemalloc as default allocator

* feat: add feature for mem-prof

* feat: add errors

* make common-mem-prof optional dep

* fix: toml format

* doc: add profile doc

* fix: typo
2023-03-07 17:12:51 +08:00
Weny Xu
bd98a26cca chore: bump greptime-proto to latest(ad01872) (#1102) 2023-03-07 10:52:42 +08:00
shuiyisong
1b4236d698 refactor: use split instead of serde_urlencoded in http auth (#1110)
* refactor: change from urlencoded to regex

* refactor: change from urlencoded to regex

* chore: add unit test

* chore: update comment

* chore: remove local benchmark test

* chore: minor fix

* chore: remove unused dep
2023-03-07 10:51:47 +08:00
Lei, HUANG
e8cc9b4b29 test: add manifest compatibility tests (#1130)
* tests: add manifest compatibility tests

* fix: clippy
2023-03-06 19:31:54 +08:00
discord9
379f581780 test: add Integrated Test for Coprocessor& fix minor bugs (#1122)
* feat: cache `Runtime`

* fix: coprstream schema not set

* test: integrated tests for Coprocessor

* fix: UDF fixed

* style: remove unused import

* chore: remove more unused import

* feat: `filter`, (r)floordiv for Vector

* chore: CR advices

* feat: auto convert to `lit`

* chore: fix typo

* feat: from&to `pyarrow.array`

* feat: allow `pyarrow.array` as args to builtins

* chore: cargo fmt

* test: CI add `pyarrow`

* test: install Python&PyArrow in CI

* test: not cache depend for now

* chore: CR advices

* test: fix name

* style: rename
2023-03-06 19:20:59 +08:00
fys
ff6cfe8e70 refactor: move the batch_get to KvStore trait (#1029)
* move batch_get from KvStoreExt to KvStore

* add some unit tests

* add some unit test

* add some unit tests

* expose batch_get grpc method
2023-03-06 17:35:43 +08:00
Igor Morozov
5a397917c0 docs(contributingmd): add run tests commands (#1129)
* docs(contributingmd): add run tests commands

* docs(contributingmd): add link to nextest website

Co-authored-by: dennis zhuang <killme2008@gmail.com>

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-03-06 15:54:16 +08:00
fys
559880cb84 fix: can not find catalog when create table (#1118)
* fix: get catalog by name in RemoteCatalogManager

* cr

* cr

* cr

* fix: ut failed
2023-03-06 14:44:40 +08:00
Ruihang Xia
b76b27f3bf refactor: try to remove unnecessary tests in error mod (#750)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-06 12:31:30 +08:00
yuanbohan
d4e0dc3685 feat: specify prom server start addr (#1111)
* feat: specify promql server start addr

* refactor: rename promql to prom in Prometheus API server scenario
2023-03-06 11:07:21 +08:00
Eugene Tolbakov
b022556b79 fix: apply ttl and write_buffer_size options when a table is created via procedure (#1117)
* fix: apply ttl and write_buffer_size options when a table is created via procedure

* fix: address code review suggestion

* fix: use borrowing of table_options correctly
2023-03-05 19:37:23 +08:00
shuiyisong
bd065ea6e8 fix: remove incorrect continue (#1114) 2023-03-02 19:52:17 +08:00
yuanbohan
9a87f5edf8 fix(grpc): support timestamp precision (#1113) 2023-03-02 17:33:59 +08:00
Weny Xu
e851b6d019 feat: implement Copy From parser (#1092)
* feat: implement Copy From parser

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-03-02 14:03:13 +08:00
Ruihang Xia
e7b92f24e8 feat: impl EmptyMetric plan and time() function (#1100)
* impl EmptyMetric plan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add test cases

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl planner part

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* adapt new datafusion changes

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-02 03:15:55 +00:00
Igor Morozov
4b8db408cf style(contributingmd): fix markdown issues and typos (#1107)
* style(contributingmd): fix markdown issues and typos

* style(contributingmd): remove code blocks in lists
2023-03-01 20:00:36 +08:00
Yingwen
98659899c0 refactor: Move mito engine tests to a separate file (#1104)
* refactor(mito): Move tests to a separate file

* chore(query): Remove empty mod function
2023-03-01 11:46:39 +00:00
Ruihang Xia
b1311801da ci: update breaking-change labeler (#1109)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-01 19:24:21 +08:00
Yingwen
f1b65d9b77 test: fix datanode::test_read_from_config_file (#1106)
* test: Fix datanode::test_read_from_config_file

* test: frontend and metasrv don't read example toml file
2023-03-01 18:31:40 +08:00
Ruihang Xia
d5a2a26916 chore(deps): bump sqlness to v0.4 (#1101)
deps: bump sqlness to v0.4

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-01 17:27:16 +08:00
Ning Sun
8e7e68708f docs: correct readme format (#1105)
* docs: correct readme format

* ci: fix config name
2023-03-01 16:59:11 +08:00
Ruihang Xia
9c1118b06d ci: adjust title labeler's rule (#1079)
* ci: adjust title labeler's rule

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-03-01 15:16:21 +08:00
Yingwen
3fb93efbd0 docs: Document fields in the config examples (#1098)
* docs: Add comments to standalone config example

* docs: Add comments to datanode config example

* docs: Add comments to frontend config example

* docs: Add comments to meta-srv config example

* docs: Use "GB" instead of "GiB"

* docs: Add link to the selector doc

* docs: Fix grammar
2023-03-01 15:14:08 +08:00
Yingwen
3fd9c2f144 feat: Store error in procedure state (#1062)
* docs: Change comment position

* refactor(procedure): Store error in ProcedureState

* test: Mock instance with procedure enabled

* feat: Add wait method to wait for procedure

* test(datanode): Test create table by procedure

* chore: Fix clippy
2023-03-01 14:37:50 +08:00
Ning Sun
75e48c5f20 ci: fix apidoc generation 2023-03-01 14:09:47 +08:00
Ning Sun
d402f83442 ci: generate apidocs when pushing to default branch (#1093)
* ci: generate apidocs when pushing to default branch

* ci: require clippy before running tests

* fix: resolve new clippy warnings on primitive slice

* fix: resolve more clippy warnings

* Update .github/workflows/apidoc.yml

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* ci: add an index html to redirect

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-01 13:18:26 +08:00
discord9
c5c6494e0b feat: add PyO3(Hence CPython as a Optional Backend (#976)
* refactor: ffi_types

* style: fmt

* refactor: use `String` for return when possible

* todo: vector_impl

* feat: pyobj_try_typed_val

* refactor: more backend indep function

* feat: +-*/ magic methods

* refactor: copr

* style: fmt

* feat: add paired tests

* refactor: more

* refactor: move inside `python` folder

* refactor: all but test code

* feat: builtins for PyO3

* chore: add licenses

* chore: remove unused&add todos

* refactor: remove old files

* chore: mark unused

* chore: fmt

* chore: license

* feat: query in PyO3

* test: paired testcases for rspy&pyo3

* feat: PyDataFrame(Untested)

* feat: some allow_threads

* style: fmt

* style: add license

* feat: rebase manually of #962

* feat: more `allow_threads`

* chore: typo

* chore: remove some `TODO`

* test: allow margin of epsilon

* chore: code review advices

* chore: more CR adjust

* chore: more adjust

* feat: kwargs&its test

* chore: remove some `dbg!`

* chore: allow params

* fix: put `dataframe` into scope

* chore: newline

* fix: adjust after rebase

* fix: test serde skip attr

* style: taplo

* feat: add `pyo3_backend` feature

* doc: update CI&readme
2023-03-01 10:45:55 +08:00
shuiyisong
dc50095af3 fix: use catalog from connection (#1099)
* fix: using schema instead of full database

* fix: using schema instead of full database

* fix: using schema instead of full database

* chore: add debug log

* chore: remove debug log

* chore: remove debug log

* chore: fix cr
2023-03-01 10:34:57 +08:00
LFC
8cd69f441e feat: REPL issues logical plan to DB (#1097) 2023-02-28 16:59:48 +08:00
Weny Xu
f52fc9b7d4 fix: fix panic when the root is not specified (#1089) 2023-02-28 10:54:52 +08:00
shuiyisong
50d2685365 fix: fix catalog parsing issue (#1091)
fix: try fix catalog parsing issue
2023-02-27 22:51:49 +08:00
LFC
11d45e2918 refactor: upgrade DataFusion, Arrow and Sqlparser (#1074)
* refactor: upgrade DataFusion, Arrow and Sqlparser

* fix: resolve PR comments
2023-02-27 22:20:08 +08:00
shuiyisong
30287e7e41 fix: continue if parsing err catalog (#1090)
* fix: continue if parsing err catalog

* fix: change from warn to error
2023-02-27 11:28:45 +00:00
391 changed files with 17184 additions and 7013 deletions

View File

@@ -2,7 +2,7 @@
GT_S3_BUCKET=S3 bucket
GT_S3_ACCESS_KEY_ID=S3 access key id
GT_S3_ACCESS_KEY=S3 secret access key
GT_S3_ENDPOINT_URL=S3 endpoint url
# Settings for oss test
GT_OSS_BUCKET=OSS bucket
GT_OSS_ACCESS_KEY_ID=OSS access key id

View File

@@ -0,0 +1,13 @@
{
"LABEL": {
"name": "breaking change",
"color": "D93F0B"
},
"CHECKS": {
"regexp": "^(?:(?!!:).)*$",
"ignoreLabels": [
"ignore-title"
],
"alwaysPassCI": true
}
}

View File

@@ -1,10 +1,12 @@
{
"LABEL": {
"name": "Invalid PR Title",
"color": "B60205"
},
"CHECKS": {
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?:.*",
"ignoreLabels" : ["ignore-title"]
}
"LABEL": {
"name": "Invalid PR Title",
"color": "B60205"
},
"CHECKS": {
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?\\!?:.*",
"ignoreLabels": [
"ignore-title"
]
}
}

42
.github/workflows/apidoc.yml vendored Normal file
View File

@@ -0,0 +1,42 @@
on:
push:
branches:
- develop
paths-ignore:
- 'docs/**'
- 'config/**'
- '**.md'
- '.dockerignore'
- 'docker/**'
- '.gitignore'
name: Build API docs
env:
RUST_TOOLCHAIN: nightly-2023-02-26
jobs:
apidoc:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- run: cargo doc --workspace --no-deps --document-private-items
- run: |
cat <<EOF > target/doc/index.html
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="refresh" content="0; url='greptime/'" />
</head>
<body></body></html>
EOF
- name: Publish dist directory
uses: JamesIves/github-pages-deploy-action@v4
with:
folder: target/doc

View File

@@ -24,7 +24,7 @@ on:
name: CI
env:
RUST_TOOLCHAIN: nightly-2023-02-14
RUST_TOOLCHAIN: nightly-2023-02-26
jobs:
typos:
@@ -116,6 +116,7 @@ jobs:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest-8-cores
timeout-minutes: 60
needs: [clippy]
steps:
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
@@ -131,7 +132,7 @@ jobs:
ETCD_VER=v3.5.7
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
mkdir -p /tmp/etcd-download
mkdir -p /tmp/etcd-download
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
@@ -188,6 +189,7 @@ jobs:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest-8-cores
timeout-minutes: 60
needs: [clippy]
steps:
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
@@ -205,10 +207,16 @@ jobs:
uses: Swatinem/rust-cache@v2
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Install Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install PyArrow Package
run: pip install pyarrow
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
- name: Collect coverage data
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend
env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
RUST_BACKTRACE: 1

View File

@@ -18,3 +18,12 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
pass_on_octokit_error: false
configuration_path: ".github/pr-title-checker-config.json"
breaking:
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- uses: thehanimo/pr-title-checker@v1.3.4
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
pass_on_octokit_error: false
configuration_path: ".github/pr-title-breaking-change-label-config.json"

View File

@@ -10,15 +10,16 @@ on:
name: Release
env:
RUST_TOOLCHAIN: nightly-2023-02-14
RUST_TOOLCHAIN: nightly-2023-02-26
# FIXME(zyy17): Would be better to use `gh release list -L 1 | cut -f 3` to get the latest release version tag, but for a long time, we will stay at 'v0.1.0-alpha-*'.
SCHEDULED_BUILD_VERSION_PREFIX: v0.1.0-alpha
SCHEDULED_BUILD_VERSION_PREFIX: v0.2.0
# In the future, we can change SCHEDULED_PERIOD to nightly.
SCHEDULED_PERIOD: weekly
SCHEDULED_PERIOD: nightly
CARGO_PROFILE: weekly
CARGO_PROFILE: nightly
## FIXME(zyy17): Enable it after the tests are stabled.
DISABLE_RUN_TESTS: true
jobs:
build:
@@ -30,16 +31,41 @@ jobs:
- arch: x86_64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-amd64
continue-on-error: false
- arch: aarch64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-arm64
continue-on-error: false
- arch: aarch64-apple-darwin
os: macos-latest
file: greptime-darwin-arm64
continue-on-error: false
- arch: x86_64-apple-darwin
os: macos-latest
file: greptime-darwin-amd64
continue-on-error: false
- arch: x86_64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-amd64-pyo3
continue-on-error: false
opts: "-F pyo3_backend"
- arch: aarch64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-arm64-pyo3
continue-on-error: false
opts: "-F pyo3_backend"
- arch: aarch64-apple-darwin
os: macos-latest
file: greptime-darwin-arm64-pyo3
continue-on-error: false
opts: "-F pyo3_backend"
- arch: x86_64-apple-darwin
os: macos-latest
file: greptime-darwin-amd64-pyo3
continue-on-error: false
opts: "-F pyo3_backend"
runs-on: ${{ matrix.os }}
continue-on-error: ${{ matrix.continue-on-error }}
if: github.repository == 'GreptimeTeam/greptimedb'
steps:
- name: Checkout sources
@@ -76,10 +102,10 @@ jobs:
ETCD_VER=v3.5.7
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
mkdir -p /tmp/etcd-download
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
mkdir -p /tmp/etcd-download
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
nohup etcd >/tmp/etcd.log 2>&1 &
@@ -93,7 +119,14 @@ jobs:
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
run: |
sudo apt-get -y update
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu wget
# FIXME(zyy17): Should we specify the version of python when building binary for darwin?
- name: Compile Python 3.10.10 from source for linux
if: contains(matrix.arch, 'linux') && contains(matrix.opts, 'pyo3_backend')
run: |
sudo chmod +x ./docker/aarch64/compile-python.sh
sudo ./docker/aarch64/compile-python.sh ${{ matrix.arch }}
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@master
@@ -105,10 +138,55 @@ jobs:
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
- name: Run tests
if: env.DISABLE_RUN_TESTS == 'false'
run: make unit-test integration-test sqlness-test
- name: Run cargo build with pyo3 for aarch64-linux
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
run: |
# TODO(zyy17): We should make PYO3_CROSS_LIB_DIR configurable.
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
export PYO3_CROSS_LIB_DIR=${PWD}/python-3.10.10/aarch64
echo "PYO3_CROSS_LIB_DIR: $PYO3_CROSS_LIB_DIR"
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Run cargo build with pyo3 for amd64-linux
if: contains(matrix.arch, 'x86_64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
run: |
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
echo "implementation=CPython" >> pyo3.config
echo "version=3.10" >> pyo3.config
echo "implementation=CPython" >> pyo3.config
echo "shared=true" >> pyo3.config
echo "abi3=true" >> pyo3.config
echo "lib_name=python3.10" >> pyo3.config
echo "lib_dir=$PYTHON_INSTALL_PATH_AMD64/lib" >> pyo3.config
echo "executable=$PYTHON_INSTALL_PATH_AMD64/bin/python3" >> pyo3.config
echo "pointer_width=64" >> pyo3.config
echo "build_flags=" >> pyo3.config
echo "suppress_build_script_link_lines=false" >> pyo3.config
cat pyo3.config
export PYO3_CONFIG_FILE=${PWD}/pyo3.config
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Run cargo build
run: cargo build ${{ matrix.opts }} --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }}
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Calculate checksum and rename binary
shell: bash
@@ -129,48 +207,6 @@ jobs:
with:
name: ${{ matrix.file }}.sha256sum
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
release:
name: Release artifacts
needs: [build]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb'
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Download artifacts
uses: actions/download-artifact@v3
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}, like v0.1.0-alpha-20221119-weekly.
shell: bash
if: github.event_name == 'schedule'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
echo "SCHEDULED_BUILD_VERSION=${SCHEDULED_BUILD_VERSION}" >> $GITHUB_ENV
- name: Create scheduled build git tag
if: github.event_name == 'schedule'
run: |
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
- name: Publish scheduled release # configure the different release title and tags.
uses: softprops/action-gh-release@v1
if: github.event_name == 'schedule'
with:
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
tag_name: ${{ env.SCHEDULED_BUILD_VERSION }}
generate_release_notes: true
files: |
**/greptime-*
- name: Publish release
uses: softprops/action-gh-release@v1
if: github.event_name != 'schedule'
with:
name: "Release ${{ github.ref_name }}"
files: |
**/greptime-*
docker:
name: Build docker image
@@ -181,37 +217,6 @@ jobs:
- name: Checkout sources
uses: actions/checkout@v3
- name: Download amd64 binary
uses: actions/download-artifact@v3
with:
name: greptime-linux-amd64
path: amd64
- name: Unzip the amd64 artifacts
run: |
cd amd64
tar xvf greptime-linux-amd64.tgz
rm greptime-linux-amd64.tgz
- name: Download arm64 binary
uses: actions/download-artifact@v3
with:
name: greptime-linux-arm64
path: arm64
- name: Unzip the arm64 artifacts
run: |
cd arm64
tar xvf greptime-linux-arm64.tgz
rm greptime-linux-arm64.tgz
- name: Login to UCloud Container Registry
uses: docker/login-action@v2
with:
registry: uhub.service.ucloud.cn
username: ${{ secrets.UCLOUD_USERNAME }}
password: ${{ secrets.UCLOUD_PASSWORD }}
- name: Login to Dockerhub
uses: docker/login-action@v2
with:
@@ -239,8 +244,36 @@ jobs:
- name: Set up buildx
uses: docker/setup-buildx-action@v2
- name: Build and push
- name: Download amd64 binary
uses: actions/download-artifact@v3
with:
name: greptime-linux-amd64-pyo3
path: amd64
- name: Unzip the amd64 artifacts
run: |
cd amd64
tar xvf greptime-linux-amd64-pyo3.tgz
rm greptime-linux-amd64-pyo3.tgz
- name: Download arm64 binary
id: download-arm64
uses: actions/download-artifact@v3
with:
name: greptime-linux-arm64-pyo3
path: arm64
- name: Unzip the arm64 artifacts
id: unzip-arm64
if: success() || steps.download-arm64.conclusion == 'success'
run: |
cd arm64
tar xvf greptime-linux-arm64-pyo3.tgz
rm greptime-linux-arm64-pyo3.tgz
- name: Build and push all
uses: docker/build-push-action@v3
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
with:
context: .
file: ./docker/ci/Dockerfile
@@ -249,5 +282,105 @@ jobs:
tags: |
greptime/greptimedb:latest
greptime/greptimedb:${{ env.IMAGE_TAG }}
uhub.service.ucloud.cn/greptime/greptimedb:latest
uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }}
- name: Build and push amd64 only
uses: docker/build-push-action@v3
if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
with:
context: .
file: ./docker/ci/Dockerfile
push: true
platforms: linux/amd64
tags: |
greptime/greptimedb:latest
greptime/greptimedb:${{ env.IMAGE_TAG }}
release:
name: Release artifacts
# Release artifacts only when all the artifacts are built successfully.
needs: [build,docker]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb'
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Download artifacts
uses: actions/download-artifact@v3
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-${SCHEDULED_PERIOD}-YYYYMMDD, like v0.2.0-nigthly-20230313.
shell: bash
if: github.event_name == 'schedule'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-${{ env.SCHEDULED_PERIOD }}-$buildTime
echo "SCHEDULED_BUILD_VERSION=${SCHEDULED_BUILD_VERSION}" >> $GITHUB_ENV
- name: Create scheduled build git tag
if: github.event_name == 'schedule'
run: |
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
- name: Publish scheduled release # configure the different release title and tags.
uses: softprops/action-gh-release@v1
if: github.event_name == 'schedule'
with:
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
tag_name: ${{ env.SCHEDULED_BUILD_VERSION }}
generate_release_notes: true
files: |
**/greptime-*
- name: Publish release
uses: softprops/action-gh-release@v1
if: github.event_name != 'schedule'
with:
name: "Release ${{ github.ref_name }}"
files: |
**/greptime-*
docker-push-uhub:
name: Push docker image to UCloud Container Registry
needs: [docker]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb'
# Push to uhub may fail(500 error), but we don't want to block the release process. The failed job will be retried manually.
continue-on-error: true
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to UCloud Container Registry
uses: docker/login-action@v2
with:
registry: uhub.service.ucloud.cn
username: ${{ secrets.UCLOUD_USERNAME }}
password: ${{ secrets.UCLOUD_PASSWORD }}
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
if: github.event_name == 'schedule'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash
if: github.event_name != 'schedule'
run: |
VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
- name: Push image to uhub # Use 'docker buildx imagetools create' to create a new image base on source image.
run: |
docker buildx imagetools create \
--tag uhub.service.ucloud.cn/greptime/greptimedb:latest \
--tag uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }} \
greptime/greptimedb:${{ env.IMAGE_TAG }}

View File

@@ -1,4 +1,4 @@
# Welcome!
# Welcome 👋
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
@@ -50,34 +50,33 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
- Make sure all unit tests are passed.
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr`).
#### `pre-commit` Hooks
You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run these checks on every commit automatically.
1. Install `pre-commit`
```
$ pip install pre-commit
```
or
```
$ brew install pre-commit
```
pip install pre-commit
or
brew install pre-commit
2. Install the `pre-commit` hooks
```
$ pre-commit install
pre-commit installed at .git/hooks/pre-commit
$ pre-commit install --hook-type commit-msg
pre-commit installed at .git/hooks/commit-msg
$ pre-commit install
pre-commit installed at .git/hooks/pre-commit
$ pre-commit install --hook-type pre-push
pre-commit installed at .git/hooks/pre-pus
```
$ pre-commit install --hook-type commit-msg
pre-commit installed at .git/hooks/commit-msg
now `pre-commit` will run automatically on `git commit`.
$ pre-commit install --hook-type pre-push
pre-commit installed at .git/hooks/pre-push
Now, `pre-commit` will run automatically on `git commit`.
### Title
@@ -102,10 +101,12 @@ of what you were trying to do and what went wrong. You can also reach for help i
## Community
The core team will be thrilled if you participate in any way you like. When you are stuck, try ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
- [GreptimeDB Community Slack](https://greptime.com/slack)
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
Also, see some extra GreptimeDB content:
- [GreptimeDB Docs](https://greptime.com/docs)
- [Learn GreptimeDB](https://greptime.com/products/db)
- [Greptime Inc. Website](https://greptime.com)

1465
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -7,17 +7,20 @@ members = [
"src/cmd",
"src/common/base",
"src/common/catalog",
"src/common/datasource",
"src/common/error",
"src/common/function",
"src/common/function-macro",
"src/common/grpc",
"src/common/grpc-expr",
"src/common/mem-prof",
"src/common/procedure",
"src/common/query",
"src/common/recordbatch",
"src/common/runtime",
"src/common/substrait",
"src/common/telemetry",
"src/common/test-util",
"src/common/time",
"src/datanode",
"src/datatypes",
@@ -43,34 +46,35 @@ members = [
]
[workspace.package]
version = "0.1.0"
version = "0.1.1"
edition = "2021"
license = "Apache-2.0"
[workspace.dependencies]
arrow = "29.0"
arrow-array = "29.0"
arrow-flight = "29.0"
arrow-schema = { version = "29.0", features = ["serde"] }
arrow = { version = "34.0" }
arrow-array = "34.0"
arrow-flight = "34.0"
arrow-schema = { version = "34.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
chrono = { version = "0.4", features = ["serde"] }
# TODO(LFC): Use released Datafusion when it officially dependent on Arrow 29.0
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
futures = "0.3"
futures-util = "0.3"
parquet = "29.0"
parquet = "34.0"
paste = "1.0"
prost = "0.11"
rand = "0.8"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
sqlparser = "0.28"
sqlparser = "0.32"
tempfile = "3"
tokio = { version = "1.24.2", features = ["full"] }
tokio-util = "0.7"
tonic = { version = "0.8", features = ["tls"] }
@@ -79,7 +83,7 @@ uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
[profile.release]
debug = true
[profile.weekly]
[profile.nightly]
inherits = "release"
strip = true
lto = "thin"

View File

@@ -61,6 +61,12 @@ To compile GreptimeDB from source, you'll need:
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
keyword. You can check it with `protoc --version`.
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
scripting engine(In CPython Mode). This is available as `python3-dev` on
ubuntu, you can install it with `sudo apt install python3-dev`, or
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
#### Build with Docker
@@ -141,9 +147,9 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
### Installation
- [Pre-built Binaries](https://github.com/GreptimeTeam/greptimedb/releases):
downloadable pre-built binaries for Linux and MacOS
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb): pre-built
Docker images
For Linux and macOS, you can easily download pre-built binaries that are ready to use. In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version. We recommend using virtualenv for the installation process to manage multiple Python versions.
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
Kubernetes deployment

View File

@@ -21,12 +21,12 @@ use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::time::Instant;
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampNanosecondArray};
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
use arrow::datatypes::{DataType, Float64Type, Int64Type};
use arrow::record_batch::RecordBatch;
use clap::Parser;
use client::api::v1::column::Values;
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, TableId};
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest};
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
@@ -61,7 +61,7 @@ struct Args {
#[arg(long = "skip-read")]
skip_read: bool,
#[arg(short, long, default_value_t = String::from("127.0.0.1:3001"))]
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
endpoint: String,
}
@@ -97,6 +97,9 @@ async fn write_data(
for record_batch in record_batch_reader {
let record_batch = record_batch.unwrap();
if !is_record_batch_full(&record_batch) {
continue;
}
let (columns, row_count) = convert_record_batch(record_batch);
let request = InsertRequest {
table_name: TABLE_NAME.to_string(),
@@ -122,11 +125,16 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
let mut columns = vec![];
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
let values = build_values(array);
let (values, datatype) = build_values(array);
let column = Column {
column_name: field.name().to_owned(),
values: Some(values),
null_mask: vec![],
null_mask: array
.data()
.null_bitmap()
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
.unwrap_or_default(),
datatype: datatype.into(),
// datatype and semantic_type are set to default
..Default::default()
};
@@ -136,7 +144,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
(columns, row_count as _)
}
fn build_values(column: &ArrayRef) -> Values {
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
match column.data_type() {
DataType::Int64 => {
let array = column
@@ -144,10 +152,13 @@ fn build_values(column: &ArrayRef) -> Values {
.downcast_ref::<PrimitiveArray<Int64Type>>()
.unwrap();
let values = array.values();
Values {
i64_values: values.to_vec(),
..Default::default()
}
(
Values {
i64_values: values.to_vec(),
..Default::default()
},
ColumnDataType::Int64,
)
}
DataType::Float64 => {
let array = column
@@ -155,29 +166,38 @@ fn build_values(column: &ArrayRef) -> Values {
.downcast_ref::<PrimitiveArray<Float64Type>>()
.unwrap();
let values = array.values();
Values {
f64_values: values.to_vec(),
..Default::default()
}
(
Values {
f64_values: values.to_vec(),
..Default::default()
},
ColumnDataType::Float64,
)
}
DataType::Timestamp(_, _) => {
let array = column
.as_any()
.downcast_ref::<TimestampNanosecondArray>()
.downcast_ref::<TimestampMicrosecondArray>()
.unwrap();
let values = array.values();
Values {
i64_values: values.to_vec(),
..Default::default()
}
(
Values {
i64_values: values.to_vec(),
..Default::default()
},
ColumnDataType::Int64,
)
}
DataType::Utf8 => {
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
Values {
string_values: values,
..Default::default()
}
(
Values {
string_values: values,
..Default::default()
},
ColumnDataType::String,
)
}
DataType::Null
| DataType::Boolean
@@ -208,10 +228,15 @@ fn build_values(column: &ArrayRef) -> Values {
| DataType::Dictionary(_, _)
| DataType::Decimal128(_, _)
| DataType::Decimal256(_, _)
| DataType::RunEndEncoded(_, _)
| DataType::Map(_, _) => todo!(),
}
}
fn is_record_batch_full(batch: &RecordBatch) -> bool {
batch.columns().iter().all(|col| col.null_count() == 0)
}
fn create_table_expr() -> CreateTableExpr {
CreateTableExpr {
catalog_name: CATALOG_NAME.to_string(),
@@ -339,7 +364,7 @@ fn create_table_expr() -> CreateTableExpr {
create_if_not_exists: false,
table_options: Default::default(),
region_ids: vec![0],
table_id: Some(TableId { id: 0 }),
table_id: None,
}
}

View File

@@ -1,35 +1,50 @@
node_id = 42
mode = 'distributed'
rpc_addr = '127.0.0.1:3001'
rpc_hostname = '127.0.0.1'
rpc_runtime_size = 8
mysql_addr = '127.0.0.1:4406'
mysql_runtime_size = 4
# Node running mode, see `standalone.example.toml`.
mode = "distributed"
# Whether to use in-memory catalog, see `standalone.example.toml`.
enable_memory_catalog = false
# The datanode identifier, should be unique.
node_id = 42
# gRPC server address, "127.0.0.1:3001" by default.
rpc_addr = "127.0.0.1:3001"
# Hostname of this node.
rpc_hostname = "127.0.0.1"
# The number of gRPC server worker threads, 8 by default.
rpc_runtime_size = 8
# Metasrv client options.
[meta_client_options]
# Metasrv address list.
metasrv_addrs = ["127.0.0.1:3002"]
# Operation timeout in milliseconds, 3000 by default.
timeout_millis = 3000
# Connect server timeout in milliseconds, 5000 by default.
connect_timeout_millis = 5000
# `TCP_NODELAY` option for accepted connections, true by default.
tcp_nodelay = true
# WAL options, see `standalone.example.toml`.
[wal]
dir = "/tmp/greptimedb/wal"
file_size = '1GB'
purge_interval = '10m'
purge_threshold = '50GB'
file_size = "1GB"
purge_threshold = "50GB"
purge_interval = "10m"
read_batch_size = 128
sync_write = false
# Storage options, see `standalone.example.toml`.
[storage]
type = 'File'
data_dir = '/tmp/greptimedb/data/'
[meta_client_options]
metasrv_addrs = ['127.0.0.1:3002']
timeout_millis = 3000
connect_timeout_millis = 5000
tcp_nodelay = false
type = "File"
data_dir = "/tmp/greptimedb/data/"
# Compaction options, see `standalone.example.toml`.
[compaction]
max_inflight_tasks = 4
max_files_in_level0 = 16
max_files_in_level0 = 8
max_purge_tasks = 32
[procedure.store]
type = 'File'
data_dir = '/tmp/greptimedb/procedure/'
# Procedure storage options, see `standalone.example.toml`.
# [procedure.store]
# type = "File"
# data_dir = "/tmp/greptimedb/procedure/"
# max_retry_times = 3
# retry_delay = "500ms"

View File

@@ -1,12 +1,58 @@
mode = 'distributed'
datanode_rpc_addr = '127.0.0.1:3001'
# Node running mode, see `standalone.example.toml`.
mode = "distributed"
# HTTP server options, see `standalone.example.toml`.
[http_options]
addr = '127.0.0.1:4000'
addr = "127.0.0.1:4000"
timeout = "30s"
# gRPC server options, see `standalone.example.toml`.
[grpc_options]
addr = "127.0.0.1:4001"
runtime_size = 8
# MySQL server options, see `standalone.example.toml`.
[mysql_options]
addr = "127.0.0.1:4002"
runtime_size = 2
# MySQL server TLS options, see `standalone.example.toml`.
[mysql_options.tls]
mode = "disable"
cert_path = ""
key_path = ""
# PostgresSQL server options, see `standalone.example.toml`.
[postgres_options]
addr = "127.0.0.1:4003"
runtime_size = 2
# PostgresSQL server TLS options, see `standalone.example.toml`.
[postgres_options.tls]
mode = "disable"
cert_path = ""
key_path = ""
# OpenTSDB protocol options, see `standalone.example.toml`.
[opentsdb_options]
addr = "127.0.0.1:4242"
runtime_size = 2
# InfluxDB protocol options, see `standalone.example.toml`.
[influxdb_options]
enable = true
# Prometheus protocol options, see `standalone.example.toml`.
[prometheus_options]
enable = true
# Prometheus protocol options, see `standalone.example.toml`.
[prom_options]
addr = "127.0.0.1:4004"
# Metasrv client options, see `datanode.example.toml`.
[meta_client_options]
metasrv_addrs = ['127.0.0.1:3002']
metasrv_addrs = ["127.0.0.1:3002"]
timeout_millis = 3000
connect_timeout_millis = 5000
tcp_nodelay = false
tcp_nodelay = true

View File

@@ -1,6 +1,15 @@
bind_addr = '127.0.0.1:3002'
server_addr = '127.0.0.1:3002'
store_addr = '127.0.0.1:2379'
# The bind address of metasrv, "127.0.0.1:3002" by default.
bind_addr = "127.0.0.1:3002"
# The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
server_addr = "127.0.0.1:3002"
# Etcd server address, "127.0.0.1:2379" by default.
store_addr = "127.0.0.1:2379"
# Datanode lease in seconds, 15 seconds by default.
datanode_lease_secs = 15
# selector: 'LeaseBased', 'LoadBased'
selector = 'LeaseBased'
# Datanode selector type.
# - "LeaseBased" (default value).
# - "LoadBased"
# For details, please see "https://docs.greptime.com/developer-guide/meta/selector".
selector = "LeaseBased"
# Store data in memory, false by default.
use_memory_store = false

View File

@@ -1,47 +1,120 @@
node_id = 0
mode = 'standalone'
# Node running mode, "standalone" or "distributed".
mode = "standalone"
# Whether to use in-memory catalog, `false` by default.
enable_memory_catalog = false
# HTTP server options.
[http_options]
addr = '127.0.0.1:4000'
# Server address, "127.0.0.1:4000" by default.
addr = "127.0.0.1:4000"
# HTTP request timeout, 30s by default.
timeout = "30s"
[wal]
dir = "/tmp/greptimedb/wal"
file_size = '1GB'
purge_interval = '10m'
purge_threshold = '50GB'
read_batch_size = 128
sync_write = false
[storage]
type = 'File'
data_dir = '/tmp/greptimedb/data/'
# gRPC server options.
[grpc_options]
addr = '127.0.0.1:4001'
# Server address, "127.0.0.1:4001" by default.
addr = "127.0.0.1:4001"
# The number of server worker threads, 8 by default.
runtime_size = 8
# MySQL server options.
[mysql_options]
addr = '127.0.0.1:4002'
# Server address, "127.0.0.1:4002" by default.
addr = "127.0.0.1:4002"
# The number of server worker threads, 2 by default.
runtime_size = 2
[influxdb_options]
enable = true
[opentsdb_options]
addr = '127.0.0.1:4242'
enable = true
runtime_size = 2
[prometheus_options]
enable = true
# MySQL server TLS options.
[mysql_options.tls]
# TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
# - "disable" (default value)
# - "prefer"
# - "require"
# - "verify-ca"
# - "verify-full"
mode = "disable"
# Certificate file path.
cert_path = ""
# Private key file path.
key_path = ""
# PostgresSQL server options.
[postgres_options]
addr = '127.0.0.1:4003'
# Server address, "127.0.0.1:4003" by default.
addr = "127.0.0.1:4003"
# The number of server worker threads, 2 by default.
runtime_size = 2
check_pwd = false
[procedure.store]
type = 'File'
data_dir = '/tmp/greptimedb/procedure/'
# PostgresSQL server TLS options, see `[mysql_options.tls]` section.
[postgres_options.tls]
# TLS mode.
mode = "disable"
# certificate file path.
cert_path = ""
# private key file path.
key_path = ""
# OpenTSDB protocol options.
[opentsdb_options]
# OpenTSDB telnet API server address, "127.0.0.1:4242" by default.
addr = "127.0.0.1:4242"
# The number of server worker threads, 2 by default.
runtime_size = 2
# InfluxDB protocol options.
[influxdb_options]
# Whether to enable InfluxDB protocol in HTTP API, true by default.
enable = true
# Prometheus protocol options.
[prometheus_options]
# Whether to enable Prometheus remote write and read in HTTP API, true by default.
enable = true
# Prom protocol options.
[prom_options]
# Prometheus API server address, "127.0.0.1:4004" by default.
addr = "127.0.0.1:4004"
# WAL options.
[wal]
# WAL data directory.
dir = "/tmp/greptimedb/wal"
# WAL file size in bytes.
file_size = "1GB"
# WAL purge threshold in bytes.
purge_threshold = "50GB"
# WAL purge interval in seconds.
purge_interval = "10m"
# WAL read batch size.
read_batch_size = 128
# Whether to sync log file after every write.
sync_write = false
# Storage options.
[storage]
# Storage type.
type = "File"
# Data directory, "/tmp/greptimedb/data" by default.
data_dir = "/tmp/greptimedb/data/"
# Compaction options.
[compaction]
# Max task number that can concurrently run.
max_inflight_tasks = 4
# Max files in level 0 to trigger compaction.
max_files_in_level0 = 8
# Max task number for SST purge task after compaction.
max_purge_tasks = 32
# Procedure storage options.
# Uncomment to enable.
# [procedure.store]
# # Storage type.
# type = "File"
# # Procedure data path.
# data_dir = "/tmp/greptimedb/procedure/"
# # Procedure max retry time.
# max_retry_times = 3
# # Initial retry delay of procedures, increases exponentially
# retry_delay = "500ms"

View File

@@ -9,7 +9,10 @@ RUN apt-get update && apt-get install -y \
protobuf-compiler \
curl \
build-essential \
pkg-config
pkg-config \
python3 \
python3-dev \
&& pip install pyarrow
# Install Rust.
SHELL ["/bin/bash", "-c"]

57
docker/aarch64/Dockerfile Normal file
View File

@@ -0,0 +1,57 @@
FROM ubuntu:22.04 as builder
ENV LANG en_US.utf8
WORKDIR /greptimedb
# Install dependencies.
RUN apt-get update && apt-get install -y \
libssl-dev \
protobuf-compiler \
curl \
build-essential \
pkg-config \
wget
# Install Rust.
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /root/.cargo/bin/:$PATH
# Install cross platform toolchain
RUN apt-get -y update && \
apt-get -y install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu && \
apt-get install binutils-aarch64-linux-gnu
COPY ./docker/aarch64/compile-python.sh ./docker/aarch64/
RUN chmod +x ./docker/aarch64/compile-python.sh && \
./docker/aarch64/compile-python.sh
COPY ./rust-toolchain.toml .
# Install rustup target for cross compiling.
RUN rustup target add aarch64-unknown-linux-gnu
COPY . .
# Update dependency, using separate `RUN` to separate cache
RUN cargo fetch
# This three env var is set in script, so I set it manually in dockerfile.
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
ENV LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
ENV PY_INSTALL_PATH=/greptimedb/python_arm64_build
# Set the environment variable for cross compiling and compile it
# cross compiled python is `python3` in path, but pyo3 need `python` in path so alias it
# Build the project in release mode.
RUN export PYO3_CROSS_LIB_DIR=$PY_INSTALL_PATH/lib && \
alias python=python3 && \
cargo build --target aarch64-unknown-linux-gnu --release -F pyo3_backend
# Exporting the binary to the clean image
FROM ubuntu:22.04 as base
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
WORKDIR /greptime
COPY --from=builder /greptimedb/target/aarch64-unknown-linux-gnu/release/greptime /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENTRYPOINT ["greptime"]

View File

@@ -0,0 +1,87 @@
#!/usr/bin/env bash
set -e
# this script will download Python source code, compile it, and install it to /usr/local/lib
# then use this python to compile cross-compiled python for aarch64
ARCH=$1
PYTHON_VERSION=3.10.10
PYTHON_SOURCE_DIR=Python-${PYTHON_VERSION}
PYTHON_INSTALL_PATH_AMD64=${PWD}/python-${PYTHON_VERSION}/amd64
PYTHON_INSTALL_PATH_AARCH64=${PWD}/python-${PYTHON_VERSION}/aarch64
function download_python_source_code() {
wget https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tgz
tar -xvf Python-$PYTHON_VERSION.tgz
}
function compile_for_amd64_platform() {
mkdir -p "$PYTHON_INSTALL_PATH_AMD64"
echo "Compiling for amd64 platform..."
./configure \
--prefix="$PYTHON_INSTALL_PATH_AMD64" \
--enable-shared \
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
ac_cv_have_long_long_format=yes \
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
make
make install
}
# explain Python compile options here a bit:s
# --enable-shared: enable building a shared Python library (default is no) but we do need it for calling from rust
# CC, CXX, AR, LD, RANLIB: set the compiler, archiver, linker, and ranlib programs to use
# build: the machine you are building on, host: the machine you will run the compiled program on
# --with-system-ffi: build _ctypes module using an installed ffi library, see Doc/library/ctypes.rst, not used in here TODO: could remove
# ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes:
# allow cross-compiled python to have -pthread set for CXX, see https://github.com/python/cpython/pull/22525
# ac_cv_have_long_long_format=yes: target platform supports long long type
# disable-ipv6: disable ipv6 support, we don't need it in here
# ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no: disable pty support, we don't need it in here
function compile_for_aarch64_platform() {
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
mkdir -p "$PYTHON_INSTALL_PATH_AARCH64"
echo "Compiling for aarch64 platform..."
echo "LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
echo "LIBRARY_PATH: $LIBRARY_PATH"
echo "PATH: $PATH"
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
--prefix="$PYTHON_INSTALL_PATH_AARCH64" --enable-optimizations \
CC=aarch64-linux-gnu-gcc \
CXX=aarch64-linux-gnu-g++ \
AR=aarch64-linux-gnu-ar \
LD=aarch64-linux-gnu-ld \
RANLIB=aarch64-linux-gnu-ranlib \
--enable-shared \
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
ac_cv_have_long_long_format=yes \
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
make
make altinstall
}
# Main script starts here.
download_python_source_code
# Enter the python source code directory.
cd $PYTHON_SOURCE_DIR || exit 1
# Build local python first, then build cross-compiled python.
compile_for_amd64_platform
# Clean the build directory.
make clean && make distclean
# Cross compile python for aarch64.
if [ "$ARCH" = "aarch64-unknown-linux-gnu" ]; then
compile_for_aarch64_platform
fi

View File

@@ -1,6 +1,12 @@
FROM ubuntu:22.04
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
ca-certificates \
python3.10 \
python3.10-dev \
python3-pip
RUN python3 -m pip install pyarrow
ARG TARGETARCH

View File

@@ -0,0 +1,196 @@
---
Feature Name: "Fault Tolerance for Region"
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/1126
Date: 2023-03-08
Author: "Luo Fucong <luofucong@greptime.com>"
---
Fault Tolerance for Region
----------------------
# Summary
This RFC proposes a method to achieve fault tolerance for regions in GreptimeDB's distributed mode. Or, put it in another way, achieving region high availability("HA") for GreptimeDB cluster.
In this RFC, we mainly describe two aspects of region HA: how region availability is detected, and what recovery process is need to be taken. We also discuss some alternatives and future work.
When this feature is done, our users could expect a GreptimeDB cluster that can always handle their requests to regions, despite some requests may failed during the region failover. The optimization to reduce the MTTR(Mean Time To Recovery) is not a concern of this RPC, and is left for future work.
# Motivation
Fault tolerance for regions is a critical feature for our clients to use the GreptimeDB cluster confidently. High availability for users to interact with their stored data is a "must have" for any TSDB products, that include our GreptimeDB cluster.
# Details
## Background
Some backgrounds about region in distributed mode:
- A table is logically split into multiple regions. Each region stores a part of non-overlapping table data.
- Regions are distributed in Datanodes, the mappings are not static, are assigned and governed by Metasrv.
- In distributed mode, client requests are scoped in regions. To be more specific, when a request that needs to scan multiple regions arrived in Frontend, Frontend splits the request into multiple sub-requests, each of which scans one region only, and submits them to Datanodes that hold corresponding regions.
In conclusion, as long as regions remain available, and regions could regain availability when failures do occur, the overall region HA could be achieved. With this in mind, let's see how region failures are detected first.
## Failure Detection
We detect region failures in Metasrv, and do it both passively and actively. Passively means that Metasrv do not fire some "are you healthy" requests to regions. Instead, we carry region healthy information in the heartbeat requests that are submit to Metasrv by Datanodes.
Datanode already carries its regions stats in the heartbeat request (the non-relevant fields are omitted):
```protobuf
message HeartbeatRequest {
...
// Region stats on this node
repeated RegionStat region_stats = 6;
...
}
message RegionStat {
uint64 region_id = 1;
TableName table_name = 2;
...
}
```
For the sake of simplicity, we don't add another field `bool available = 3` to the `RegionStat` message; instead, if the region were unavailable in the view of the Datanode that contains it, the Datanode just not includes the `RegionStat` of it in the heartbeat request. Or, if the Datanode itself is not unavailable, the heartbeat request is not submitted, effectively the same with not carrying the `RegionStat`.
> The heartbeat interval is now hardcoded to five seconds.
Metasrv gathers the heartbeat requests, extracts the `RegionStat`s, and treat them as region heartbeat. In this way, Metasrv maintains all regions healthy information. If some region's heartbeats were not received in a period of time, Metasrv speculates the region might be unavailable. To make the decision whether a region is failed or not, Metasrv uses a failure detection algorithm called the "[Phi φ Accrual Failure Detection](https://medium.com/@arpitbhayani/phi-%CF%86-accrual-failure-detection-79c21ce53a7a)". Basically, the algorithm calculates a value called "phi" to represent the possibility of a region's unavailability, based on the historical heartbeats' arrived rate. Once the "phi" is above some pre-defined threshold, Metasrv knows the region is failed.
> This algorithm has been widely adopted in some well known products, like Akka and Cassandra.
When Metasrv decides some region is failed from heartbeats, it's not the final decision. Here comes the "actively" detection. Before Metasrv decides to do region failover, it actively invokes the healthy check interface of the Datanode that the failure region resides. Only this healthy check is failed does Metasrv actually start doing failover upon the region.
To conclude, the failure detection pseudo-codes are like this:
```rust
// in Metasrv:
fn failure_detection() {
loop {
// passive detection
let failed_regions = all_regions.iter().filter(|r| r.estimated_failure_possibility() > config.phi).collect();
// find the datanodes that contains the failed regions
let datanodes_and_regions = find_region_resides_datanodes(failed_regions);
// active detection
for (datanode, regions) in datanodes_and_regions {
if !datanode.is_healthy(regions) {
do_failover(datanode, regions);
}
}
sleep(config.detect_interval);
}
}
```
Some design considerations:
- Why active detecting while we have passively detection? Because it could be happened that the network is singly connectable sometimes (especially in the complex Cloud environment), then the Datanode's heartbeats cannot reach Metasrv, while Metasrv could request Datanode. Active detecting avoid this false positive situation.
- Why the detection works on region instead of Datanode? Because we might face the possibility that only part of the regions in the Datanode are not available, not ALL regions. Especially the situation that Datanodes are used by multiple tenants. If this is the case, it's better to do failover upon the designated regions instead of the whole regions that reside on the Datanode. All in all, we want a more subtle control over region failover.
So we detect some regions are not available. How to regain the availability back?
## Region Failover
Region Failover largely relies on remote WAL, aka "[Bunshin](https://github.com/GreptimeTeam/bunshin)". I'm not including any of the details of it in this RFC, let's just assume we already have it.
In general, region failover is fairly simple. Once Metasrv decides to do failover upon some regions, it first chooses one or more Datanodes to hold the failed region. This can be done easily, as the Metasrv already has the whole picture of Datanodes: it knows which Datanode has the minimum regions, what Datanode historically had the lowest CPU usage and IO rate, and how the Datanodes are assigned to tenants, among other information that can all help the Metasrv choose the most suitable Datanodes. Let's call these chosen Datanodes as "candidates".
> The strategy to choose the most suitable candidates required careful design, but it's another RFC.
Then, Metasrv sets the states of these failed regions as "passive". We should add a field to `Region`:
```protobuf
message Region {
uint64 id = 1;
string name = 2;
Partition partition = 3;
message State {
Active,
Passive,
}
State state = 4;
map<string, string> attrs = 100;
}
```
Here `Region` is used in message `RegionRoute`, which indicates how the write request is split among regions. When a region is set as "passive", Frontend knows the write to it should be rejected at the moment (the region read is not blocked, however).
> Making a region "passive" here is effectively blocking the write to it. It's ok in the failover situation, the region is failed anyway. However, when dealing with active maintenance operations, region state requires more refined design. But that's another story.
Third, Metasrv fires the "close region" requests to the failed Datanodes, and fires the "open region" requests to those candidates. "Close region" requests might be failed due to the unavailability of Datanodes, but that's fine, it's just a best-effort attempt to reduce the chance of any in-flight writes got handled unintentionally after the region is set as "passive". The "open region" requests must have succeeded though. Datanodes open regions from remote WAL.
> Currently the "close region" is undefined in Datanode. It could be a local cache clean up of region data or other resources tidy up.
Finally, when a candidate successfully opens its region, it calls back to Metasrv, indicating it is ready to handle region. "call back" here is backed by its heartbeat to Metasrv. Metasrv updates the region's state to "active", so as to let Frontend lifts the restrictions of region writes (again, the read part of region is untouched).
All the above steps should be managed by remote procedure framework. It's another implementation challenge in the region failover feature. (One is the remote WAL of course.)
A picture is worth a 1000 words:
```text
+-------------------------+
| Metasrv detects region |
| failure |
+-------------------------+
|
v
+----------------------------+
| Metasrv chooses candidates |
| to hold failed regions |
+----------------------------+
|
v
+-------------------------+ +-------------------------+
| Metasrv "passive" the |------>| Frontend rejects writes |
| failed regions | | to "passive" regions |
+-------------------------+ +-------------------------+
|
v
+--------------------------+ +---------------------------+
| Candidate Datanodes open |<-------| Metasrv fires "close" and |
| regions from remote WAL | | "open" region requests |
+--------------------------+ +---------------------------+
|
|
| +-------------------------+ +-------------------------+
+--------------------->| Metasrv "active" the |------>| Frontend lifts write |
| failed regions | | restriction to regions |
+-------------------------+ +-------------------------+
|
v
+-------------------------+
| Region failover done, |
| HA regain |
+-------------------------+
```
# Alternatives
## The "Neon" Way
Remote WAL raises a problem that could harm the write throughput of GreptimeDB cluster: each write request has to do at least two remote call, one is from Frontend to Datanode, and one is from Datanode to remote WAL. What if we do it the "[Neon](https://github.com/neondatabase/neon)" way, making remote WAL sits in between the Frontend and Datanode, couldn't that improve our write throughput? It could, though there're some consistency issues like "read-your-writes" to solve.
However, the main concerns we don't adopt this method are two-fold:
1. Remote WAL is planned to be quorum based, it can be efficiently written;
2. More importantly, we are planning to make the remote WAL an option that users could choose not to enable it (at the cost of some reliability reduction).
## No WAL, Replication instead
This method replicates region across Datanodes directly, like the common way in shared-nothing database. Were the main region failed, a standby region in the replicate group is elected as new "main" and take the read/write requests. The main concern to this method is the incompatibility to our current architecture and code structure. It requires a major redesign, but gains no significant advantage over the remote WAL method.
However, the replication does have its own advantage that we can learn from to optimize this failover procedure.
# Future Work
Some optimizations we could take:
- To reduce the MTTR, we could make Metasrv chooses the candidate to each region at normal time. The candidate does some preparation works to reduce the open region time, effectively accelerate the failover procedure.
- We can adopt the replication method, to the degree that region replicas are used as the fast catch-up candidates. The data difference among replicas is minor, region failover does not need to load or exchange too much data, greatly reduced the region failover time.

View File

@@ -1,2 +1,2 @@
[toolchain]
channel = "nightly-2023-02-14"
channel = "nightly-2023-02-26"

View File

@@ -59,5 +59,5 @@ if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
fi
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run '${BIN} --help' to get started"
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run './${BIN} --help' to get started"
fi

View File

@@ -10,7 +10,7 @@ common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "1599ae2a0d1d8f42ee23ed26e4ad7a7b34134c60" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "eb760d219206c77dd3a105ecb6a3ba97d9d650ec" }
prost.workspace = true
snafu = { version = "0.7", features = ["backtraces"] }
tonic.workspace = true

View File

@@ -18,25 +18,28 @@ common-recordbatch = { path = "../common/recordbatch" }
common-runtime = { path = "../common/runtime" }
common-telemetry = { path = "../common/telemetry" }
common-time = { path = "../common/time" }
dashmap = "5.4"
datafusion.workspace = true
datatypes = { path = "../datatypes" }
futures = "0.3"
futures-util.workspace = true
lazy_static = "1.4"
meta-client = { path = "../meta-client" }
parking_lot = "0.12"
regex = "1.6"
serde = "1.0"
serde_json = "1.0"
session = { path = "../session" }
snafu = { version = "0.7", features = ["backtraces"] }
storage = { path = "../storage" }
table = { path = "../table" }
tokio.workspace = true
[dev-dependencies]
common-test-util = { path = "../common/test-util" }
chrono.workspace = true
log-store = { path = "../log-store" }
mito = { path = "../mito", features = ["test"] }
object-store = { path = "../object-store" }
storage = { path = "../storage" }
tempdir = "0.3"
tokio.workspace = true

View File

@@ -201,6 +201,24 @@ pub enum Error {
#[snafu(backtrace)]
source: common_catalog::error::Error,
},
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
QueryAccessDenied { catalog: String, schema: String },
#[snafu(display(
"Failed to get region stats, catalog: {}, schema: {}, table: {}, source: {}",
catalog,
schema,
table,
source
))]
RegionStats {
catalog: String,
schema: String,
table: String,
#[snafu(backtrace)]
source: table::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -235,7 +253,8 @@ impl ErrorExt for Error {
| Error::InsertCatalogRecord { source, .. }
| Error::OpenTable { source, .. }
| Error::CreateTable { source, .. }
| Error::DeregisterTable { source, .. } => source.status_code(),
| Error::DeregisterTable { source, .. }
| Error::RegionStats { source, .. } => source.status_code(),
Error::MetaSrv { source, .. } => source.status_code(),
Error::SystemCatalogTableScan { source } => source.status_code(),
@@ -246,6 +265,7 @@ impl ErrorExt for Error {
}
Error::Unimplemented { .. } => StatusCode::Unsupported,
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
}
}

View File

@@ -18,7 +18,8 @@ use std::any::Any;
use std::fmt::{Debug, Formatter};
use std::sync::Arc;
use common_telemetry::info;
use api::v1::meta::{RegionStat, TableName};
use common_telemetry::{info, warn};
use snafu::{OptionExt, ResultExt};
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::TableId;
@@ -34,6 +35,7 @@ pub mod local;
pub mod remote;
pub mod schema;
pub mod system;
pub mod table_source;
pub mod tables;
/// Represent a list of named catalogs
@@ -107,7 +109,12 @@ pub trait CatalogManager: CatalogList {
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>>;
/// Returns the table by catalog, schema and table name.
fn table(&self, catalog: &str, schema: &str, table_name: &str) -> Result<Option<TableRef>>;
async fn table(
&self,
catalog: &str,
schema: &str,
table_name: &str,
) -> Result<Option<TableRef>>;
}
pub type CatalogManagerRef = Arc<dyn CatalogManager>;
@@ -186,7 +193,8 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
let table_name = &req.create_table_request.table_name;
let table_id = req.create_table_request.id;
let table = if let Some(table) = manager.table(catalog_name, schema_name, table_name)? {
let table = manager.table(catalog_name, schema_name, table_name).await?;
let table = if let Some(table) = table {
table
} else {
let table = engine
@@ -218,9 +226,11 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
Ok(())
}
/// The number of regions in the datanode node.
pub fn region_number(catalog_manager: &CatalogManagerRef) -> Result<u64> {
/// The stat of regions in the datanode node.
/// The number of regions can be got from len of vec.
pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> Result<(u64, Vec<RegionStat>)> {
let mut region_number: u64 = 0;
let mut region_stats = Vec::new();
for catalog_name in catalog_manager.catalog_names()? {
let catalog =
@@ -239,16 +249,39 @@ pub fn region_number(catalog_manager: &CatalogManagerRef) -> Result<u64> {
})?;
for table_name in schema.table_names()? {
let table = schema
.table(&table_name)?
.context(error::TableNotFoundSnafu {
table_info: &table_name,
})?;
let table =
schema
.table(&table_name)
.await?
.context(error::TableNotFoundSnafu {
table_info: &table_name,
})?;
let region_numbers = &table.table_info().meta.region_numbers;
region_number += region_numbers.len() as u64;
match table.region_stats() {
Ok(stats) => {
let stats = stats.into_iter().map(|stat| RegionStat {
region_id: stat.region_id,
table_name: Some(TableName {
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
}),
approximate_bytes: stat.disk_usage_bytes as i64,
..Default::default()
});
region_stats.extend(stats);
}
Err(e) => {
warn!("Failed to get region status, err: {:?}", e);
}
};
}
}
}
Ok(region_number)
Ok((region_number, region_stats))
}

View File

@@ -345,7 +345,7 @@ impl CatalogManager for LocalCatalogManager {
{
let _lock = self.register_lock.lock().await;
if let Some(existing) = schema.table(&request.table_name)? {
if let Some(existing) = schema.table(&request.table_name).await? {
if existing.table_info().ident.table_id != request.table_id {
error!(
"Unexpected table register request: {:?}, existing: {:?}",
@@ -434,9 +434,10 @@ impl CatalogManager for LocalCatalogManager {
} = &request;
let table_id = self
.catalogs
.table(catalog, schema, table_name)?
.table(catalog, schema, table_name)
.await?
.with_context(|| error::TableNotExistSnafu {
table: format!("{catalog}.{schema}.{table_name}"),
table: format_full_table_name(catalog, schema, table_name),
})?
.table_info()
.ident
@@ -505,7 +506,7 @@ impl CatalogManager for LocalCatalogManager {
.schema(schema)
}
fn table(
async fn table(
&self,
catalog_name: &str,
schema_name: &str,
@@ -521,7 +522,7 @@ impl CatalogManager for LocalCatalogManager {
catalog: catalog_name,
schema: schema_name,
})?;
schema.table(table_name)
schema.table(table_name).await
}
}

View File

@@ -18,6 +18,7 @@ use std::collections::HashMap;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::{Arc, RwLock};
use async_trait::async_trait;
use common_catalog::consts::MIN_USER_TABLE_ID;
use common_telemetry::error;
use snafu::{ensure, OptionExt};
@@ -155,16 +156,20 @@ impl CatalogManager for MemoryCatalogManager {
}
}
fn table(&self, catalog: &str, schema: &str, table_name: &str) -> Result<Option<TableRef>> {
let c = self.catalogs.read().unwrap();
let catalog = if let Some(c) = c.get(catalog) {
async fn table(
&self,
catalog: &str,
schema: &str,
table_name: &str,
) -> Result<Option<TableRef>> {
let catalog = {
let c = self.catalogs.read().unwrap();
let Some(c) = c.get(catalog) else { return Ok(None) };
c.clone()
} else {
return Ok(None);
};
match catalog.schema(schema)? {
None => Ok(None),
Some(s) => s.table(table_name),
Some(s) => s.table(table_name).await,
}
}
}
@@ -283,6 +288,7 @@ impl Default for MemorySchemaProvider {
}
}
#[async_trait]
impl SchemaProvider for MemorySchemaProvider {
fn as_any(&self) -> &dyn Any {
self
@@ -293,7 +299,7 @@ impl SchemaProvider for MemorySchemaProvider {
Ok(tables.keys().cloned().collect())
}
fn table(&self, name: &str) -> Result<Option<TableRef>> {
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
let tables = self.tables.read().unwrap();
Ok(tables.get(name).cloned())
}
@@ -355,8 +361,8 @@ mod tests {
use super::*;
#[test]
fn test_new_memory_catalog_list() {
#[tokio::test]
async fn test_new_memory_catalog_list() {
let catalog_list = new_memory_catalog_list().unwrap();
let default_catalog = catalog_list.catalog(DEFAULT_CATALOG_NAME).unwrap().unwrap();
@@ -369,9 +375,9 @@ mod tests {
.register_table("numbers".to_string(), Arc::new(NumbersTable::default()))
.unwrap();
let table = default_schema.table("numbers").unwrap();
let table = default_schema.table("numbers").await.unwrap();
assert!(table.is_some());
assert!(default_schema.table("not_exists").unwrap().is_none());
assert!(default_schema.table("not_exists").await.unwrap().is_none());
}
#[tokio::test]
@@ -419,7 +425,7 @@ mod tests {
// test new table name exists
assert!(provider.table_exist(new_table_name).unwrap());
let registered_table = provider.table(new_table_name).unwrap().unwrap();
let registered_table = provider.table(new_table_name).await.unwrap().unwrap();
assert_eq!(
registered_table.table_info().ident.table_id,
test_table.table_info().ident.table_id
@@ -468,6 +474,7 @@ mod tests {
let registered_table = catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
.await
.unwrap()
.unwrap();
assert_eq!(registered_table.table_info().ident.table_id, table_id);

View File

@@ -13,16 +13,19 @@
// limitations under the License.
use std::any::Any;
use std::collections::HashMap;
use std::collections::{HashMap, HashSet};
use std::pin::Pin;
use std::sync::Arc;
use arc_swap::ArcSwap;
use async_stream::stream;
use async_trait::async_trait;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
use common_telemetry::{debug, info};
use common_telemetry::{debug, error, info};
use dashmap::DashMap;
use futures::Stream;
use futures_util::StreamExt;
use parking_lot::RwLock;
use snafu::{OptionExt, ResultExt};
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::TableId;
@@ -38,6 +41,7 @@ use crate::error::{
use crate::helper::{
build_catalog_prefix, build_schema_prefix, build_table_global_prefix, CatalogKey, CatalogValue,
SchemaKey, SchemaValue, TableGlobalKey, TableGlobalValue, TableRegionalKey, TableRegionalValue,
CATALOG_KEY_PREFIX,
};
use crate::remote::{Kv, KvBackendRef};
use crate::{
@@ -50,10 +54,9 @@ use crate::{
pub struct RemoteCatalogManager {
node_id: u64,
backend: KvBackendRef,
catalogs: Arc<ArcSwap<HashMap<String, CatalogProviderRef>>>,
catalogs: Arc<RwLock<DashMap<String, CatalogProviderRef>>>,
engine: TableEngineRef,
system_table_requests: Mutex<Vec<RegisterSystemTableRequest>>,
mutex: Arc<Mutex<()>>,
}
impl RemoteCatalogManager {
@@ -64,7 +67,6 @@ impl RemoteCatalogManager {
backend,
catalogs: Default::default(),
system_table_requests: Default::default(),
mutex: Default::default(),
}
}
@@ -108,9 +110,13 @@ impl RemoteCatalogManager {
debug!("Ignoring non-catalog key: {}", String::from_utf8_lossy(&k));
continue;
}
let key = CatalogKey::parse(&String::from_utf8_lossy(&k))
.context(InvalidCatalogValueSnafu)?;
yield Ok(key)
let catalog_key = String::from_utf8_lossy(&k);
if let Ok(key) = CatalogKey::parse(&catalog_key) {
yield Ok(key)
} else {
error!("Invalid catalog key: {:?}", catalog_key);
}
}
}))
}
@@ -381,7 +387,14 @@ impl CatalogManager for RemoteCatalogManager {
"Initialized catalogs: {:?}",
catalogs.keys().cloned().collect::<Vec<_>>()
);
self.catalogs.store(Arc::new(catalogs));
{
let self_catalogs = self.catalogs.read();
catalogs.into_iter().for_each(|(k, v)| {
self_catalogs.insert(k, v);
});
}
info!("Max table id allocated: {}", max_table_id);
let mut system_table_requests = self.system_table_requests.lock().await;
@@ -468,7 +481,7 @@ impl CatalogManager for RemoteCatalogManager {
.schema(schema)
}
fn table(
async fn table(
&self,
catalog_name: &str,
schema_name: &str,
@@ -483,7 +496,7 @@ impl CatalogManager for RemoteCatalogManager {
catalog: catalog_name,
schema: schema_name,
})?;
schema.table(table_name)
schema.table(table_name).await
}
}
@@ -499,12 +512,10 @@ impl CatalogList for RemoteCatalogManager {
) -> Result<Option<CatalogProviderRef>> {
let key = self.build_catalog_key(&name).to_string();
let backend = self.backend.clone();
let mutex = self.mutex.clone();
let catalogs = self.catalogs.clone();
std::thread::spawn(|| {
common_runtime::block_on_write(async move {
let _guard = mutex.lock().await;
backend
.set(
key.as_bytes(),
@@ -513,11 +524,10 @@ impl CatalogList for RemoteCatalogManager {
.context(InvalidCatalogValueSnafu)?,
)
.await?;
let prev_catalogs = catalogs.load();
let mut new_catalogs = HashMap::with_capacity(prev_catalogs.len() + 1);
new_catalogs.clone_from(&prev_catalogs);
let prev = new_catalogs.insert(name, catalog);
catalogs.store(Arc::new(new_catalogs));
let catalogs = catalogs.read();
let prev = catalogs.insert(name, catalog.clone());
Ok(prev)
})
})
@@ -527,12 +537,65 @@ impl CatalogList for RemoteCatalogManager {
/// List all catalogs from metasrv
fn catalog_names(&self) -> Result<Vec<String>> {
Ok(self.catalogs.load().keys().cloned().collect::<Vec<_>>())
let catalogs = self.catalogs.read();
Ok(catalogs.iter().map(|k| k.key().to_string()).collect())
}
/// Read catalog info of given name from metasrv.
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>> {
Ok(self.catalogs.load().get(name).cloned())
{
let catalogs = self.catalogs.read();
let catalog = catalogs.get(name);
if let Some(catalog) = catalog {
return Ok(Some(catalog.clone()));
}
}
let catalogs = self.catalogs.write();
let catalog = catalogs.get(name);
if let Some(catalog) = catalog {
return Ok(Some(catalog.clone()));
}
// It's for lack of incremental catalog syncing between datanode and meta. Here we fetch catalog
// from meta on demand. This can be removed when incremental catalog syncing is done in datanode.
let backend = self.backend.clone();
let catalogs_from_meta: HashSet<String> = std::thread::spawn(|| {
common_runtime::block_on_read(async move {
let mut stream = backend.range(CATALOG_KEY_PREFIX.as_bytes());
let mut catalogs = HashSet::new();
while let Some(catalog) = stream.next().await {
if let Ok(catalog) = catalog {
let catalog_key = String::from_utf8_lossy(&catalog.0);
if let Ok(key) = CatalogKey::parse(&catalog_key) {
catalogs.insert(key.catalog_name);
}
}
}
catalogs
})
})
.join()
.unwrap();
catalogs.retain(|catalog_name, _| catalogs_from_meta.get(catalog_name).is_some());
for catalog in catalogs_from_meta {
catalogs
.entry(catalog.clone())
.or_insert(self.new_catalog_provider(&catalog));
}
let catalog = catalogs.get(name);
Ok(catalog.as_deref().cloned())
}
}
@@ -692,6 +755,7 @@ impl RemoteSchemaProvider {
}
}
#[async_trait]
impl SchemaProvider for RemoteSchemaProvider {
fn as_any(&self) -> &dyn Any {
self
@@ -701,7 +765,7 @@ impl SchemaProvider for RemoteSchemaProvider {
Ok(self.tables.load().keys().cloned().collect::<Vec<_>>())
}
fn table(&self, name: &str) -> Result<Option<TableRef>> {
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
Ok(self.tables.load().get(name).cloned())
}

View File

@@ -15,11 +15,13 @@
use std::any::Any;
use std::sync::Arc;
use async_trait::async_trait;
use table::TableRef;
use crate::error::Result;
/// Represents a schema, comprising a number of named tables.
#[async_trait]
pub trait SchemaProvider: Sync + Send {
/// Returns the schema provider as [`Any`](std::any::Any)
/// so that it can be downcast to a specific implementation.
@@ -29,7 +31,7 @@ pub trait SchemaProvider: Sync + Send {
fn table_names(&self) -> Result<Vec<String>>;
/// Retrieves a specific table from the schema by name, provided it exists.
fn table(&self, name: &str) -> Result<Option<TableRef>>;
async fn table(&self, name: &str) -> Result<Option<TableRef>>;
/// If supported by the implementation, adds a new table to this schema.
/// If a table of the same name existed before, it returns "Table already exists" error.

View File

@@ -219,7 +219,7 @@ fn build_primary_key_columns(entry_type: EntryType, key: &[u8]) -> HashMap<Strin
let mut m = HashMap::with_capacity(3);
m.insert(
"entry_type".to_string(),
Arc::new(UInt8Vector::from_slice(&[entry_type as u8])) as _,
Arc::new(UInt8Vector::from_slice([entry_type as u8])) as _,
);
m.insert(
"key".to_string(),
@@ -228,7 +228,7 @@ fn build_primary_key_columns(entry_type: EntryType, key: &[u8]) -> HashMap<Strin
// Timestamp in key part is intentionally left to 0
m.insert(
"timestamp".to_string(),
Arc::new(TimestampMillisecondVector::from_slice(&[0])) as _,
Arc::new(TimestampMillisecondVector::from_slice([0])) as _,
);
m
}
@@ -258,12 +258,12 @@ pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) ->
let now = util::current_time_millis();
columns_values.insert(
"gmt_created".to_string(),
Arc::new(TimestampMillisecondVector::from_slice(&[now])) as _,
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
);
columns_values.insert(
"gmt_modified".to_string(),
Arc::new(TimestampMillisecondVector::from_slice(&[now])) as _,
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
);
InsertRequest {
@@ -395,6 +395,7 @@ pub struct TableEntryValue {
#[cfg(test)]
mod tests {
use common_recordbatch::RecordBatches;
use common_test_util::temp_dir::{create_temp_dir, TempDir};
use datatypes::value::Value;
use log_store::NoopLogStore;
use mito::config::EngineConfig;
@@ -405,7 +406,6 @@ mod tests {
use storage::EngineImpl;
use table::metadata::TableType;
use table::metadata::TableType::Base;
use tempdir::TempDir;
use super::*;
@@ -480,7 +480,7 @@ mod tests {
}
pub async fn prepare_table_engine() -> (TempDir, TableEngineRef) {
let dir = TempDir::new("system-table-test").unwrap();
let dir = create_temp_dir("system-table-test");
let store_dir = dir.path().to_string_lossy();
let accessor = object_store::services::Fs::default()
.root(&store_dir)

View File

@@ -0,0 +1,178 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::sync::Arc;
use common_catalog::format_full_table_name;
use datafusion::common::{OwnedTableReference, ResolvedTableReference, TableReference};
use datafusion::datasource::provider_as_source;
use datafusion::logical_expr::TableSource;
use session::context::QueryContext;
use snafu::{ensure, OptionExt};
use table::table::adapter::DfTableProviderAdapter;
use crate::error::{
CatalogNotFoundSnafu, QueryAccessDeniedSnafu, Result, SchemaNotFoundSnafu, TableNotExistSnafu,
};
use crate::CatalogListRef;
pub struct DfTableSourceProvider {
catalog_list: CatalogListRef,
resolved_tables: HashMap<String, Arc<dyn TableSource>>,
disallow_cross_schema_query: bool,
default_catalog: String,
default_schema: String,
}
impl DfTableSourceProvider {
pub fn new(
catalog_list: CatalogListRef,
disallow_cross_schema_query: bool,
query_ctx: &QueryContext,
) -> Self {
Self {
catalog_list,
disallow_cross_schema_query,
resolved_tables: HashMap::new(),
default_catalog: query_ctx.current_catalog(),
default_schema: query_ctx.current_schema(),
}
}
pub fn resolve_table_ref<'a>(
&'a self,
table_ref: TableReference<'a>,
) -> Result<ResolvedTableReference<'a>> {
if self.disallow_cross_schema_query {
match &table_ref {
TableReference::Bare { .. } => (),
TableReference::Partial { schema, .. } => {
ensure!(
schema.as_ref() == self.default_schema,
QueryAccessDeniedSnafu {
catalog: &self.default_catalog,
schema: schema.as_ref(),
}
);
}
TableReference::Full {
catalog, schema, ..
} => {
ensure!(
catalog.as_ref() == self.default_catalog
&& schema.as_ref() == self.default_schema,
QueryAccessDeniedSnafu {
catalog: catalog.as_ref(),
schema: schema.as_ref()
}
);
}
};
}
Ok(table_ref.resolve(&self.default_catalog, &self.default_schema))
}
pub async fn resolve_table(
&mut self,
table_ref: OwnedTableReference,
) -> Result<Arc<dyn TableSource>> {
let table_ref = table_ref.as_table_reference();
let table_ref = self.resolve_table_ref(table_ref)?;
let resolved_name = table_ref.to_string();
if let Some(table) = self.resolved_tables.get(&resolved_name) {
return Ok(table.clone());
}
let catalog_name = table_ref.catalog.as_ref();
let schema_name = table_ref.schema.as_ref();
let table_name = table_ref.table.as_ref();
let catalog = self
.catalog_list
.catalog(catalog_name)?
.context(CatalogNotFoundSnafu { catalog_name })?;
let schema = catalog.schema(schema_name)?.context(SchemaNotFoundSnafu {
catalog: catalog_name,
schema: schema_name,
})?;
let table = schema
.table(table_name)
.await?
.with_context(|| TableNotExistSnafu {
table: format_full_table_name(catalog_name, schema_name, table_name),
})?;
let table = DfTableProviderAdapter::new(table);
let table = provider_as_source(Arc::new(table));
self.resolved_tables.insert(resolved_name, table.clone());
Ok(table)
}
}
#[cfg(test)]
mod tests {
use std::borrow::Cow;
use session::context::QueryContext;
use super::*;
use crate::local::MemoryCatalogManager;
#[test]
fn test_validate_table_ref() {
let query_ctx = &QueryContext::with("greptime", "public");
let table_provider =
DfTableSourceProvider::new(Arc::new(MemoryCatalogManager::default()), true, query_ctx);
let table_ref = TableReference::Bare {
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let table_ref = TableReference::Partial {
schema: Cow::Borrowed("public"),
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let table_ref = TableReference::Partial {
schema: Cow::Borrowed("wrong_schema"),
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_err());
let table_ref = TableReference::Full {
catalog: Cow::Borrowed("greptime"),
schema: Cow::Borrowed("public"),
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let table_ref = TableReference::Full {
catalog: Cow::Borrowed("wrong_catalog"),
schema: Cow::Borrowed("public"),
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_err());
}
}

View File

@@ -20,6 +20,7 @@ use std::sync::Arc;
use std::task::{Context, Poll};
use async_stream::stream;
use async_trait::async_trait;
use common_catalog::consts::{INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_TABLE_NAME};
use common_error::ext::BoxedError;
use common_query::logical_plan::Expr;
@@ -200,6 +201,7 @@ pub struct InformationSchema {
pub system: Arc<SystemCatalogTable>,
}
#[async_trait]
impl SchemaProvider for InformationSchema {
fn as_any(&self) -> &dyn Any {
self
@@ -212,7 +214,7 @@ impl SchemaProvider for InformationSchema {
])
}
fn table(&self, name: &str) -> Result<Option<TableRef>, Error> {
async fn table(&self, name: &str) -> Result<Option<TableRef>, Error> {
if name.eq_ignore_ascii_case("tables") {
Ok(Some(self.tables.clone()))
} else if name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME) {

View File

@@ -71,6 +71,7 @@ mod tests {
let registered_table = catalog_manager
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
.await
.unwrap()
.unwrap();
assert_eq!(registered_table.table_info().ident.table_id, table_id);
@@ -158,6 +159,7 @@ mod tests {
let table = guard.as_ref().unwrap();
let table_registered = catalog_manager
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "test_table")
.await
.unwrap()
.unwrap();
assert_eq!(

View File

@@ -221,4 +221,8 @@ impl TableEngine for MockTableEngine {
) -> table::Result<bool> {
unimplemented!()
}
async fn close(&self) -> table::Result<()> {
Ok(())
}
}

View File

@@ -16,13 +16,14 @@ common-grpc-expr = { path = "../common/grpc-expr" }
common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
common-time = { path = "../common/time" }
common-telemetry = { path = "../common/telemetry" }
datafusion.workspace = true
datatypes = { path = "../datatypes" }
enum_dispatch = "0.3"
futures-util.workspace = true
parking_lot = "0.12"
prost.workspace = true
rand = "0.8"
rand.workspace = true
snafu.workspace = true
tonic.workspace = true

View File

@@ -14,6 +14,7 @@
use std::sync::Arc;
use api::v1::greptime_database_client::GreptimeDatabaseClient;
use arrow_flight::flight_service_client::FlightServiceClient;
use common_grpc::channel_manager::ChannelManager;
use parking_lot::RwLock;
@@ -23,6 +24,10 @@ use tonic::transport::Channel;
use crate::load_balance::{LoadBalance, Loadbalancer};
use crate::{error, Result};
pub(crate) struct DatabaseClient {
pub(crate) inner: GreptimeDatabaseClient<Channel>,
}
pub(crate) struct FlightClient {
addr: String,
client: FlightServiceClient<Channel>,
@@ -118,7 +123,7 @@ impl Client {
self.inner.set_peers(urls);
}
pub(crate) fn make_client(&self) -> Result<FlightClient> {
fn find_channel(&self) -> Result<(String, Channel)> {
let addr = self
.inner
.get_peer()
@@ -131,11 +136,23 @@ impl Client {
.channel_manager
.get(&addr)
.context(error::CreateChannelSnafu { addr: &addr })?;
Ok((addr, channel))
}
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
let (addr, channel) = self.find_channel()?;
Ok(FlightClient {
addr,
client: FlightServiceClient::new(channel),
})
}
pub(crate) fn make_database_client(&self) -> Result<DatabaseClient> {
let (_, channel) = self.find_channel()?;
Ok(DatabaseClient {
inner: GreptimeDatabaseClient::new(channel),
})
}
}
#[cfg(test)]

View File

@@ -12,25 +12,27 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::str::FromStr;
use api::v1::auth_header::AuthScheme;
use api::v1::ddl_request::Expr as DdlExpr;
use api::v1::greptime_request::Request;
use api::v1::query_request::Query;
use api::v1::{
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, DropTableExpr, GreptimeRequest,
InsertRequest, QueryRequest, RequestHeader,
greptime_response, AffectedRows, AlterExpr, AuthHeader, CreateTableExpr, DdlRequest,
DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequest, PromRangeQuery, QueryRequest,
RequestHeader,
};
use arrow_flight::{FlightData, Ticket};
use common_error::prelude::*;
use common_grpc::flight::{flight_messages_to_recordbatches, FlightDecoder, FlightMessage};
use common_query::Output;
use common_telemetry::logging;
use futures_util::{TryFutureExt, TryStreamExt};
use prost::Message;
use snafu::{ensure, ResultExt};
use crate::error::{ConvertFlightDataSnafu, IllegalFlightMessagesSnafu};
use crate::error::{
ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
};
use crate::{error, Client, Result};
#[derive(Clone, Debug)]
@@ -55,10 +57,18 @@ impl Database {
}
}
pub fn catalog(&self) -> &String {
&self.catalog
}
pub fn set_catalog(&mut self, catalog: impl Into<String>) {
self.catalog = catalog.into();
}
pub fn schema(&self) -> &String {
&self.schema
}
pub fn set_schema(&mut self, schema: impl Into<String>) {
self.schema = schema.into();
}
@@ -69,8 +79,26 @@ impl Database {
});
}
pub async fn insert(&self, request: InsertRequest) -> Result<Output> {
self.do_get(Request::Insert(request)).await
pub async fn insert(&self, request: InsertRequest) -> Result<u32> {
let mut client = self.client.make_database_client()?.inner;
let request = GreptimeRequest {
header: Some(RequestHeader {
catalog: self.catalog.clone(),
schema: self.schema.clone(),
authorization: self.ctx.auth_header.clone(),
}),
request: Some(Request::Insert(request)),
};
let response = client
.handle(request)
.await?
.into_inner()
.response
.context(IllegalDatabaseResponseSnafu {
err_msg: "GreptimeResponse is empty",
})?;
let greptime_response::Response::AffectedRows(AffectedRows { value }) = response;
Ok(value)
}
pub async fn sql(&self, sql: &str) -> Result<Output> {
@@ -87,6 +115,24 @@ impl Database {
.await
}
pub async fn prom_range_query(
&self,
promql: &str,
start: &str,
end: &str,
step: &str,
) -> Result<Output> {
self.do_get(Request::Query(QueryRequest {
query: Some(Query::PromRangeQuery(PromRangeQuery {
query: promql.to_string(),
start: start.to_string(),
end: end.to_string(),
step: step.to_string(),
})),
}))
.await
}
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
self.do_get(Request::Ddl(DdlRequest {
expr: Some(DdlExpr::CreateTable(expr)),
@@ -108,6 +154,13 @@ impl Database {
.await
}
pub async fn flush_table(&self, expr: FlushTableExpr) -> Result<Output> {
self.do_get(Request::Ddl(DdlRequest {
expr: Some(DdlExpr::FlushTable(expr)),
}))
.await
}
async fn do_get(&self, request: Request) -> Result<Output> {
let request = GreptimeRequest {
header: Some(RequestHeader {
@@ -118,10 +171,10 @@ impl Database {
request: Some(request),
};
let request = Ticket {
ticket: request.encode_to_vec(),
ticket: request.encode_to_vec().into(),
};
let mut client = self.client.make_client()?;
let mut client = self.client.make_flight_client()?;
// TODO(LFC): Streaming get flight data.
let flight_data: Vec<FlightData> = client
@@ -130,17 +183,26 @@ impl Database {
.and_then(|response| response.into_inner().try_collect())
.await
.map_err(|e| {
let code = get_metadata_value(&e, INNER_ERROR_CODE)
.and_then(|s| StatusCode::from_str(&s).ok())
.unwrap_or(StatusCode::Unknown);
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
error::ExternalSnafu { code, msg }
let tonic_code = e.code();
let e: error::Error = e.into();
let code = e.status_code();
let msg = e.to_string();
error::ServerSnafu { code, msg }
.fail::<()>()
.map_err(BoxedError::new)
.context(error::FlightGetSnafu {
tonic_code: e.code(),
tonic_code,
addr: client.addr(),
})
.map_err(|error| {
logging::error!(
"Failed to do Flight get, addr: {}, code: {}, source: {}",
client.addr(),
tonic_code,
error
);
error
})
.unwrap_err()
})?;
@@ -167,12 +229,6 @@ impl Database {
}
}
fn get_metadata_value(e: &tonic::Status, key: &str) -> Option<String> {
e.metadata()
.get(key)
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
}
#[derive(Default, Debug, Clone)]
pub struct FlightContext {
auth_header: Option<AuthHeader>,

View File

@@ -13,9 +13,10 @@
// limitations under the License.
use std::any::Any;
use std::str::FromStr;
use common_error::prelude::*;
use tonic::Code;
use tonic::{Code, Status};
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
@@ -26,12 +27,7 @@ pub enum Error {
backtrace: Backtrace,
},
#[snafu(display(
"Failed to do Flight get, addr: {}, code: {}, source: {}",
addr,
tonic_code,
source
))]
#[snafu(display("Failed to do Flight get, code: {}, source: {}", tonic_code, source))]
FlightGet {
addr: String,
tonic_code: Code,
@@ -70,9 +66,12 @@ pub enum Error {
source: common_grpc::error::Error,
},
/// Error deserialized from gRPC metadata
// Server error carried in Tonic Status's metadata.
#[snafu(display("{}", msg))]
ExternalError { code: StatusCode, msg: String },
Server { code: StatusCode, msg: String },
#[snafu(display("Illegal Database response: {err_msg}"))]
IllegalDatabaseResponse { err_msg: String },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -82,13 +81,15 @@ impl ErrorExt for Error {
match self {
Error::IllegalFlightMessages { .. }
| Error::ColumnDataType { .. }
| Error::MissingField { .. } => StatusCode::Internal,
| Error::MissingField { .. }
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
Error::Server { code, .. } => *code,
Error::FlightGet { source, .. } => source.status_code(),
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source } => {
source.status_code()
}
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
Error::ExternalError { code, .. } => *code,
}
}
@@ -100,3 +101,21 @@ impl ErrorExt for Error {
self
}
}
impl From<Status> for Error {
fn from(e: Status) -> Self {
fn get_metadata_value(e: &Status, key: &str) -> Option<String> {
e.metadata()
.get(key)
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
}
let code = get_metadata_value(&e, INNER_ERROR_CODE)
.and_then(|s| StatusCode::from_str(&s).ok())
.unwrap_or(StatusCode::Unknown);
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
Self::Server { code, msg }
}
}

View File

@@ -9,8 +9,12 @@ default-run = "greptime"
name = "greptime"
path = "src/bin/greptime.rs"
[features]
mem-prof = ["tikv-jemallocator", "tikv-jemalloc-ctl"]
[dependencies]
anymap = "1.0.0-beta.2"
catalog = { path = "../catalog" }
clap = { version = "3.1", features = ["derive"] }
client = { path = "../client" }
common-base = { path = "../common/base" }
@@ -27,17 +31,24 @@ futures.workspace = true
meta-client = { path = "../meta-client" }
meta-srv = { path = "../meta-srv" }
nu-ansi-term = "0.46"
partition = { path = "../partition" }
query = { path = "../query" }
rustyline = "10.1"
serde.workspace = true
servers = { path = "../servers" }
session = { path = "../session" }
snafu.workspace = true
substrait = { path = "../common/substrait" }
tikv-jemalloc-ctl = { version = "0.5", optional = true }
tikv-jemallocator = { version = "0.5", optional = true }
tokio.workspace = true
toml = "0.5"
[dev-dependencies]
common-test-util = { path = "../common/test-util" }
rexpect = "0.5"
serde.workspace = true
tempdir = "0.3"
[build-dependencies]
build-data = "0.1.3"

View File

@@ -30,9 +30,39 @@ struct Command {
subcmd: SubCommand,
}
pub enum Application {
Datanode(datanode::Instance),
Frontend(frontend::Instance),
Metasrv(metasrv::Instance),
Standalone(standalone::Instance),
Cli(cli::Instance),
}
impl Application {
async fn run(&mut self) -> Result<()> {
match self {
Application::Datanode(instance) => instance.run().await,
Application::Frontend(instance) => instance.run().await,
Application::Metasrv(instance) => instance.run().await,
Application::Standalone(instance) => instance.run().await,
Application::Cli(instance) => instance.run().await,
}
}
async fn stop(&self) -> Result<()> {
match self {
Application::Datanode(instance) => instance.stop().await,
Application::Frontend(instance) => instance.stop().await,
Application::Metasrv(instance) => instance.stop().await,
Application::Standalone(instance) => instance.stop().await,
Application::Cli(instance) => instance.stop().await,
}
}
}
impl Command {
async fn run(self) -> Result<()> {
self.subcmd.run().await
async fn build(self) -> Result<Application> {
self.subcmd.build().await
}
}
@@ -51,13 +81,28 @@ enum SubCommand {
}
impl SubCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Application> {
match self {
SubCommand::Datanode(cmd) => cmd.run().await,
SubCommand::Frontend(cmd) => cmd.run().await,
SubCommand::Metasrv(cmd) => cmd.run().await,
SubCommand::Standalone(cmd) => cmd.run().await,
SubCommand::Cli(cmd) => cmd.run().await,
SubCommand::Datanode(cmd) => {
let app = cmd.build().await?;
Ok(Application::Datanode(app))
}
SubCommand::Frontend(cmd) => {
let app = cmd.build().await?;
Ok(Application::Frontend(app))
}
SubCommand::Metasrv(cmd) => {
let app = cmd.build().await?;
Ok(Application::Metasrv(app))
}
SubCommand::Standalone(cmd) => {
let app = cmd.build().await?;
Ok(Application::Standalone(app))
}
SubCommand::Cli(cmd) => {
let app = cmd.build().await?;
Ok(Application::Cli(app))
}
}
}
}
@@ -87,6 +132,10 @@ fn print_version() -> &'static str {
)
}
#[cfg(feature = "mem-prof")]
#[global_allocator]
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
#[tokio::main]
async fn main() -> Result<()> {
let cmd = Command::parse();
@@ -100,13 +149,18 @@ async fn main() -> Result<()> {
common_telemetry::init_default_metrics_recorder();
let _guard = common_telemetry::init_global_logging(app_name, log_dir, log_level, false);
let mut app = cmd.build().await?;
tokio::select! {
result = cmd.run() => {
result = app.run() => {
if let Err(err) = result {
error!(err; "Fatal error occurs!");
}
}
_ = tokio::signal::ctrl_c() => {
if let Err(err) = app.stop().await {
error!(err; "Fatal error occurs!");
}
info!("Goodbye!");
}
}

View File

@@ -17,10 +17,24 @@ mod helper;
mod repl;
use clap::Parser;
use repl::Repl;
pub use repl::Repl;
use crate::error::Result;
pub struct Instance {
repl: Repl,
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
self.repl.run().await
}
pub async fn stop(&self) -> Result<()> {
Ok(())
}
}
#[derive(Parser)]
pub struct Command {
#[clap(subcommand)]
@@ -28,8 +42,8 @@ pub struct Command {
}
impl Command {
pub async fn run(self) -> Result<()> {
self.cmd.run().await
pub async fn build(self) -> Result<Instance> {
self.cmd.build().await
}
}
@@ -39,9 +53,9 @@ enum SubCommand {
}
impl SubCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
match self {
SubCommand::Attach(cmd) => cmd.run().await,
SubCommand::Attach(cmd) => cmd.build().await,
}
}
}
@@ -50,13 +64,15 @@ impl SubCommand {
pub(crate) struct AttachCommand {
#[clap(long)]
pub(crate) grpc_addr: String,
#[clap(long)]
pub(crate) meta_addr: Option<String>,
#[clap(long, action)]
pub(crate) disable_helper: bool,
}
impl AttachCommand {
async fn run(self) -> Result<()> {
let mut repl = Repl::try_new(&self)?;
repl.run().await
async fn build(self) -> Result<Instance> {
let repl = Repl::try_new(&self).await?;
Ok(Instance { repl })
}
}

View File

@@ -13,28 +13,44 @@
// limitations under the License.
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use catalog::remote::MetaKvBackend;
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::prelude::ErrorExt;
use common_query::Output;
use common_recordbatch::RecordBatches;
use common_telemetry::logging;
use either::Either;
use frontend::catalog::FrontendCatalogManager;
use frontend::datanode::DatanodeClients;
use meta_client::client::MetaClientBuilder;
use partition::manager::PartitionRuleManager;
use partition::route::TableRoutes;
use query::datafusion::DatafusionQueryEngine;
use query::logical_optimizer::LogicalOptimizer;
use query::parser::QueryLanguageParser;
use query::plan::LogicalPlan;
use query::query_engine::QueryEngineState;
use query::QueryEngine;
use rustyline::error::ReadlineError;
use rustyline::Editor;
use session::context::QueryContext;
use snafu::{ErrorCompat, ResultExt};
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use crate::cli::cmd::ReplCommand;
use crate::cli::helper::RustylineHelper;
use crate::cli::AttachCommand;
use crate::error::{
CollectRecordBatchesSnafu, PrettyPrintRecordBatchesSnafu, ReadlineSnafu, ReplCreationSnafu,
RequestDatabaseSnafu, Result,
CollectRecordBatchesSnafu, ParseSqlSnafu, PlanStatementSnafu, PrettyPrintRecordBatchesSnafu,
ReadlineSnafu, ReplCreationSnafu, RequestDatabaseSnafu, Result, StartMetaClientSnafu,
SubstraitEncodeLogicalPlanSnafu,
};
/// Captures the state of the repl, gathers commands and executes them one by one
pub(crate) struct Repl {
pub struct Repl {
/// Rustyline editor for interacting with user on command line
rl: Editor<RustylineHelper>,
@@ -43,6 +59,8 @@ pub(crate) struct Repl {
/// Client for interacting with GreptimeDB
database: Database,
query_engine: Option<DatafusionQueryEngine>,
}
#[allow(clippy::print_stdout)]
@@ -51,7 +69,7 @@ impl Repl {
println!("{}", ReplCommand::help())
}
pub(crate) fn try_new(cmd: &AttachCommand) -> Result<Self> {
pub(crate) async fn try_new(cmd: &AttachCommand) -> Result<Self> {
let mut rl = Editor::new().context(ReplCreationSnafu)?;
if !cmd.disable_helper {
@@ -69,10 +87,17 @@ impl Repl {
let client = Client::with_urls([&cmd.grpc_addr]);
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
let query_engine = if let Some(meta_addr) = &cmd.meta_addr {
create_query_engine(meta_addr).await.map(Some)?
} else {
None
};
Ok(Self {
rl,
prompt: "> ".to_string(),
database,
query_engine,
})
}
@@ -134,11 +159,33 @@ impl Repl {
async fn do_execute_sql(&self, sql: String) -> Result<()> {
let start = Instant::now();
let output = self
.database
.sql(&sql)
.await
.context(RequestDatabaseSnafu { sql: &sql })?;
let output = if let Some(query_engine) = &self.query_engine {
let stmt = QueryLanguageParser::parse_sql(&sql)
.with_context(|_| ParseSqlSnafu { sql: sql.clone() })?;
let query_ctx = Arc::new(QueryContext::with(
self.database.catalog(),
self.database.schema(),
));
let plan = query_engine
.planner()
.plan(stmt, query_ctx)
.await
.context(PlanStatementSnafu)?;
let LogicalPlan::DfPlan(plan) =
query_engine.optimize(&plan).context(PlanStatementSnafu)?;
let plan = DFLogicalSubstraitConvertor {}
.encode(plan)
.context(SubstraitEncodeLogicalPlanSnafu)?;
self.database.logical_plan(plan.to_vec()).await
} else {
self.database.sql(&sql).await
}
.context(RequestDatabaseSnafu { sql: &sql })?;
let either = match output {
Output::Stream(s) => {
@@ -197,3 +244,30 @@ fn history_file() -> PathBuf {
buf.push(".greptimedb_cli_history");
buf
}
async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
let mut meta_client = MetaClientBuilder::default().enable_store().build();
meta_client
.start([meta_addr])
.await
.context(StartMetaClientSnafu)?;
let meta_client = Arc::new(meta_client);
let backend = Arc::new(MetaKvBackend {
client: meta_client.clone(),
});
let table_routes = Arc::new(TableRoutes::new(meta_client));
let partition_manager = Arc::new(PartitionRuleManager::new(table_routes));
let datanode_clients = Arc::new(DatanodeClients::default());
let catalog_list = Arc::new(FrontendCatalogManager::new(
backend,
partition_manager,
datanode_clients,
));
let state = Arc::new(QueryEngineState::new(catalog_list, Default::default()));
Ok(DatafusionQueryEngine::new(state))
}

View File

@@ -21,9 +21,26 @@ use meta_client::MetaClientOptions;
use servers::Mode;
use snafu::ResultExt;
use crate::error::{Error, MissingConfigSnafu, Result, StartDatanodeSnafu};
use crate::error::{Error, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
use crate::toml_loader;
pub struct Instance {
datanode: Datanode,
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
self.datanode.start().await.context(StartDatanodeSnafu)
}
pub async fn stop(&self) -> Result<()> {
self.datanode
.shutdown()
.await
.context(ShutdownDatanodeSnafu)
}
}
#[derive(Parser)]
pub struct Command {
#[clap(subcommand)]
@@ -31,8 +48,8 @@ pub struct Command {
}
impl Command {
pub async fn run(self) -> Result<()> {
self.subcmd.run().await
pub async fn build(self) -> Result<Instance> {
self.subcmd.build().await
}
}
@@ -42,9 +59,9 @@ enum SubCommand {
}
impl SubCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.run().await,
SubCommand::Start(cmd) => cmd.build().await,
}
}
}
@@ -72,19 +89,16 @@ struct StartCommand {
}
impl StartCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
logging::info!("Datanode start command: {:#?}", self);
let opts: DatanodeOptions = self.try_into()?;
logging::info!("Datanode options: {:#?}", opts);
Datanode::new(opts)
.await
.context(StartDatanodeSnafu)?
.start()
.await
.context(StartDatanodeSnafu)
let datanode = Datanode::new(opts).await.context(StartDatanodeSnafu)?;
Ok(Instance { datanode })
}
}
@@ -138,7 +152,6 @@ impl TryFrom<StartCommand> for DatanodeOptions {
if let Some(wal_dir) = cmd.wal_dir {
opts.wal.dir = wal_dir;
}
if let Some(procedure_dir) = cmd.procedure_dir {
opts.procedure = Some(ProcedureConfig::from_file_path(procedure_dir));
}
@@ -150,8 +163,10 @@ impl TryFrom<StartCommand> for DatanodeOptions {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::io::Write;
use std::time::Duration;
use common_test_util::temp_dir::create_named_temp_file;
use datanode::datanode::{CompactionConfig, ObjectStoreConfig};
use servers::Mode;
@@ -159,18 +174,57 @@ mod tests {
#[test]
fn test_read_from_config_file() {
let mut file = create_named_temp_file();
let toml_str = r#"
mode = "distributed"
enable_memory_catalog = false
node_id = 42
rpc_addr = "127.0.0.1:3001"
rpc_hostname = "127.0.0.1"
rpc_runtime_size = 8
mysql_addr = "127.0.0.1:4406"
mysql_runtime_size = 2
[meta_client_options]
metasrv_addrs = ["127.0.0.1:3002"]
timeout_millis = 3000
connect_timeout_millis = 5000
tcp_nodelay = true
[wal]
dir = "/tmp/greptimedb/wal"
file_size = "1GB"
purge_threshold = "50GB"
purge_interval = "10m"
read_batch_size = 128
sync_write = false
[storage]
type = "File"
data_dir = "/tmp/greptimedb/data/"
[compaction]
max_inflight_tasks = 4
max_files_in_level0 = 8
max_purge_tasks = 32
"#;
write!(file, "{}", toml_str).unwrap();
let cmd = StartCommand {
config_file: Some(format!(
"{}/../../config/datanode.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
)),
config_file: Some(file.path().to_str().unwrap().to_string()),
..Default::default()
};
let options: DatanodeOptions = cmd.try_into().unwrap();
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
assert_eq!("/tmp/greptimedb/wal".to_string(), options.wal.dir);
assert_eq!("127.0.0.1:4406".to_string(), options.mysql_addr);
assert_eq!(4, options.mysql_runtime_size);
assert_eq!(2, options.mysql_runtime_size);
assert_eq!(Some(42), options.node_id);
assert_eq!(Duration::from_secs(600), options.wal.purge_interval);
assert_eq!(1024 * 1024 * 1024, options.wal.file_size.0);
assert_eq!(1024 * 1024 * 1024 * 50, options.wal.purge_threshold.0);
assert!(!options.wal.sync_write);
let MetaClientOptions {
metasrv_addrs: metasrv_addr,
timeout_millis,
@@ -181,7 +235,7 @@ mod tests {
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
assert_eq!(5000, connect_timeout_millis);
assert_eq!(3000, timeout_millis);
assert!(!tcp_nodelay);
assert!(tcp_nodelay);
match options.storage {
ObjectStoreConfig::File(FileConfig { data_dir }) => {
@@ -194,7 +248,7 @@ mod tests {
assert_eq!(
CompactionConfig {
max_inflight_tasks: 4,
max_files_in_level0: 16,
max_files_in_level0: 8,
max_purge_tasks: 32,
},
options.compaction
@@ -232,32 +286,4 @@ mod tests {
})
.unwrap();
}
#[test]
fn test_merge_config() {
let dn_opts = DatanodeOptions::try_from(StartCommand {
config_file: Some(format!(
"{}/../../config/datanode.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
)),
..Default::default()
})
.unwrap();
assert_eq!("/tmp/greptimedb/wal", dn_opts.wal.dir);
assert_eq!(Duration::from_secs(600), dn_opts.wal.purge_interval);
assert_eq!(1024 * 1024 * 1024, dn_opts.wal.file_size.0);
assert_eq!(1024 * 1024 * 1024 * 50, dn_opts.wal.purge_threshold.0);
assert!(!dn_opts.wal.sync_write);
assert_eq!(Some(42), dn_opts.node_id);
let MetaClientOptions {
metasrv_addrs: metasrv_addr,
timeout_millis,
connect_timeout_millis,
tcp_nodelay,
} = dn_opts.meta_client_options.unwrap();
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
assert_eq!(3000, timeout_millis);
assert_eq!(5000, connect_timeout_millis);
assert!(!tcp_nodelay);
}
}

View File

@@ -26,18 +26,42 @@ pub enum Error {
source: datanode::error::Error,
},
#[snafu(display("Failed to shutdown datanode, source: {}", source))]
ShutdownDatanode {
#[snafu(backtrace)]
source: datanode::error::Error,
},
#[snafu(display("Failed to start frontend, source: {}", source))]
StartFrontend {
#[snafu(backtrace)]
source: frontend::error::Error,
},
#[snafu(display("Failed to shutdown frontend, source: {}", source))]
ShutdownFrontend {
#[snafu(backtrace)]
source: frontend::error::Error,
},
#[snafu(display("Failed to build meta server, source: {}", source))]
BuildMetaServer {
#[snafu(backtrace)]
source: meta_srv::error::Error,
},
#[snafu(display("Failed to start meta server, source: {}", source))]
StartMetaServer {
#[snafu(backtrace)]
source: meta_srv::error::Error,
},
#[snafu(display("Failed to shutdown meta server, source: {}", source))]
ShutdownMetaServer {
#[snafu(backtrace)]
source: meta_srv::error::Error,
},
#[snafu(display("Failed to read config file: {}, source: {}", path, source))]
ReadConfig {
path: String,
@@ -103,6 +127,31 @@ pub enum Error {
#[snafu(backtrace)]
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to start Meta client, source: {}", source))]
StartMetaClient {
#[snafu(backtrace)]
source: meta_client::error::Error,
},
#[snafu(display("Failed to parse SQL: {}, source: {}", sql, source))]
ParseSql {
sql: String,
#[snafu(backtrace)]
source: query::error::Error,
},
#[snafu(display("Failed to plan statement, source: {}", source))]
PlanStatement {
#[snafu(backtrace)]
source: query::error::Error,
},
#[snafu(display("Failed to encode logical plan in substrait, source: {}", source))]
SubstraitEncodeLogicalPlan {
#[snafu(backtrace)]
source: substrait::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -112,7 +161,11 @@ impl ErrorExt for Error {
match self {
Error::StartDatanode { source } => source.status_code(),
Error::StartFrontend { source } => source.status_code(),
Error::ShutdownDatanode { source } => source.status_code(),
Error::ShutdownFrontend { source } => source.status_code(),
Error::StartMetaServer { source } => source.status_code(),
Error::ShutdownMetaServer { source } => source.status_code(),
Error::BuildMetaServer { source } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
StatusCode::InvalidArguments
@@ -126,6 +179,11 @@ impl ErrorExt for Error {
Error::CollectRecordBatches { source } | Error::PrettyPrintRecordBatches { source } => {
source.status_code()
}
Error::StartMetaClient { source } => source.status_code(),
Error::ParseSql { source, .. } | Error::PlanStatement { source } => {
source.status_code()
}
Error::SubstraitEncodeLogicalPlan { source } => source.status_code(),
}
}

View File

@@ -16,13 +16,14 @@ use std::sync::Arc;
use clap::Parser;
use common_base::Plugins;
use frontend::frontend::{Frontend, FrontendOptions};
use frontend::frontend::FrontendOptions;
use frontend::grpc::GrpcOptions;
use frontend::influxdb::InfluxdbOptions;
use frontend::instance::Instance;
use frontend::instance::{FrontendInstance, Instance as FeInstance};
use frontend::mysql::MysqlOptions;
use frontend::opentsdb::OpentsdbOptions;
use frontend::postgres::PostgresOptions;
use frontend::prom::PromOptions;
use meta_client::MetaClientOptions;
use servers::auth::UserProviderRef;
use servers::http::HttpOptions;
@@ -33,6 +34,26 @@ use snafu::ResultExt;
use crate::error::{self, IllegalAuthConfigSnafu, Result};
use crate::toml_loader;
pub struct Instance {
frontend: FeInstance,
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
self.frontend
.start()
.await
.context(error::StartFrontendSnafu)
}
pub async fn stop(&self) -> Result<()> {
self.frontend
.shutdown()
.await
.context(error::ShutdownFrontendSnafu)
}
}
#[derive(Parser)]
pub struct Command {
#[clap(subcommand)]
@@ -40,8 +61,8 @@ pub struct Command {
}
impl Command {
pub async fn run(self) -> Result<()> {
self.subcmd.run().await
pub async fn build(self) -> Result<Instance> {
self.subcmd.build().await
}
}
@@ -51,9 +72,9 @@ enum SubCommand {
}
impl SubCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.run().await,
SubCommand::Start(cmd) => cmd.build().await,
}
}
}
@@ -67,6 +88,8 @@ pub struct StartCommand {
#[clap(long)]
mysql_addr: Option<String>,
#[clap(long)]
prom_addr: Option<String>,
#[clap(long)]
postgres_addr: Option<String>,
#[clap(long)]
opentsdb_addr: Option<String>,
@@ -87,16 +110,20 @@ pub struct StartCommand {
}
impl StartCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
let opts: FrontendOptions = self.try_into()?;
let instance = Instance::try_new_distributed(&opts, plugins.clone())
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone())
.await
.context(error::StartFrontendSnafu)?;
let mut frontend = Frontend::new(opts, instance, plugins);
frontend.start().await.context(error::StartFrontendSnafu)
instance
.build_servers(&opts, plugins)
.await
.context(error::StartFrontendSnafu)?;
Ok(Instance { frontend: instance })
}
}
@@ -141,6 +168,9 @@ impl TryFrom<StartCommand> for FrontendOptions {
..Default::default()
});
}
if let Some(addr) = cmd.prom_addr {
opts.prom_options = Some(PromOptions { addr });
}
if let Some(addr) = cmd.postgres_addr {
opts.postgres_options = Some(PostgresOptions {
addr,
@@ -173,8 +203,10 @@ impl TryFrom<StartCommand> for FrontendOptions {
#[cfg(test)]
mod tests {
use std::io::Write;
use std::time::Duration;
use common_test_util::temp_dir::create_named_temp_file;
use servers::auth::{Identity, Password, UserProviderRef};
use super::*;
@@ -184,6 +216,7 @@ mod tests {
let command = StartCommand {
http_addr: Some("127.0.0.1:1234".to_string()),
grpc_addr: None,
prom_addr: Some("127.0.0.1:4444".to_string()),
mysql_addr: Some("127.0.0.1:5678".to_string()),
postgres_addr: Some("127.0.0.1:5432".to_string()),
opentsdb_addr: Some("127.0.0.1:4321".to_string()),
@@ -207,6 +240,7 @@ mod tests {
opts.opentsdb_options.as_ref().unwrap().addr,
"127.0.0.1:4321"
);
assert_eq!(opts.prom_options.as_ref().unwrap().addr, "127.0.0.1:4444");
let default_opts = FrontendOptions::default();
assert_eq!(
@@ -231,17 +265,25 @@ mod tests {
#[test]
fn test_read_from_config_file() {
let mut file = create_named_temp_file();
let toml_str = r#"
mode = "distributed"
[http_options]
addr = "127.0.0.1:4000"
timeout = "30s"
"#;
write!(file, "{}", toml_str).unwrap();
let command = StartCommand {
http_addr: None,
grpc_addr: None,
mysql_addr: None,
prom_addr: None,
postgres_addr: None,
opentsdb_addr: None,
influxdb_enable: None,
config_file: Some(format!(
"{}/../../config/frontend.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
)),
config_file: Some(file.path().to_str().unwrap().to_string()),
metasrv_addr: None,
tls_mode: None,
tls_cert_path: None,
@@ -267,6 +309,7 @@ mod tests {
http_addr: None,
grpc_addr: None,
mysql_addr: None,
prom_addr: None,
postgres_addr: None,
opentsdb_addr: None,
influxdb_enable: None,

View File

@@ -14,13 +14,33 @@
use clap::Parser;
use common_telemetry::{info, logging, warn};
use meta_srv::bootstrap;
use meta_srv::bootstrap::MetaSrvInstance;
use meta_srv::metasrv::MetaSrvOptions;
use snafu::ResultExt;
use crate::error::{Error, Result};
use crate::{error, toml_loader};
pub struct Instance {
instance: MetaSrvInstance,
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
self.instance
.start()
.await
.context(error::StartMetaServerSnafu)
}
pub async fn stop(&self) -> Result<()> {
self.instance
.shutdown()
.await
.context(error::ShutdownMetaServerSnafu)
}
}
#[derive(Parser)]
pub struct Command {
#[clap(subcommand)]
@@ -28,8 +48,8 @@ pub struct Command {
}
impl Command {
pub async fn run(self) -> Result<()> {
self.subcmd.run().await
pub async fn build(self) -> Result<Instance> {
self.subcmd.build().await
}
}
@@ -39,9 +59,9 @@ enum SubCommand {
}
impl SubCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.run().await,
SubCommand::Start(cmd) => cmd.build().await,
}
}
}
@@ -63,16 +83,17 @@ struct StartCommand {
}
impl StartCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
logging::info!("MetaSrv start command: {:#?}", self);
let opts: MetaSrvOptions = self.try_into()?;
logging::info!("MetaSrv options: {:#?}", opts);
bootstrap::bootstrap_meta_srv(opts)
let instance = MetaSrvInstance::new(opts)
.await
.context(error::StartMetaServerSnafu)
.context(error::BuildMetaServerSnafu)?;
Ok(Instance { instance })
}
}
@@ -113,6 +134,9 @@ impl TryFrom<StartCommand> for MetaSrvOptions {
#[cfg(test)]
mod tests {
use std::io::Write;
use common_test_util::temp_dir::create_named_temp_file;
use meta_srv::selector::SelectorType;
use super::*;
@@ -136,15 +160,23 @@ mod tests {
#[test]
fn test_read_from_config_file() {
let mut file = create_named_temp_file();
let toml_str = r#"
bind_addr = "127.0.0.1:3002"
server_addr = "127.0.0.1:3002"
store_addr = "127.0.0.1:2379"
datanode_lease_secs = 15
selector = "LeaseBased"
use_memory_store = false
"#;
write!(file, "{}", toml_str).unwrap();
let cmd = StartCommand {
bind_addr: None,
server_addr: None,
store_addr: None,
selector: None,
config_file: Some(format!(
"{}/../../config/metasrv.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
)),
config_file: Some(file.path().to_str().unwrap().to_string()),
use_memory_store: false,
};
let options: MetaSrvOptions = cmd.try_into().unwrap();

View File

@@ -21,22 +21,25 @@ use datanode::datanode::{
CompactionConfig, Datanode, DatanodeOptions, ObjectStoreConfig, ProcedureConfig, WalConfig,
};
use datanode::instance::InstanceRef;
use frontend::frontend::{Frontend, FrontendOptions};
use frontend::frontend::FrontendOptions;
use frontend::grpc::GrpcOptions;
use frontend::influxdb::InfluxdbOptions;
use frontend::instance::Instance as FeInstance;
use frontend::instance::{FrontendInstance, Instance as FeInstance};
use frontend::mysql::MysqlOptions;
use frontend::opentsdb::OpentsdbOptions;
use frontend::postgres::PostgresOptions;
use frontend::prom::PromOptions;
use frontend::prometheus::PrometheusOptions;
use frontend::promql::PromqlOptions;
use serde::{Deserialize, Serialize};
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
use servers::Mode;
use snafu::ResultExt;
use crate::error::{Error, IllegalConfigSnafu, Result, StartDatanodeSnafu, StartFrontendSnafu};
use crate::error::{
Error, IllegalConfigSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu,
StartDatanodeSnafu, StartFrontendSnafu,
};
use crate::frontend::load_frontend_plugins;
use crate::toml_loader;
@@ -47,8 +50,8 @@ pub struct Command {
}
impl Command {
pub async fn run(self) -> Result<()> {
self.subcmd.run().await
pub async fn build(self) -> Result<Instance> {
self.subcmd.build().await
}
}
@@ -58,9 +61,9 @@ enum SubCommand {
}
impl SubCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.run().await,
SubCommand::Start(cmd) => cmd.build().await,
}
}
}
@@ -77,7 +80,7 @@ pub struct StandaloneOptions {
pub opentsdb_options: Option<OpentsdbOptions>,
pub influxdb_options: Option<InfluxdbOptions>,
pub prometheus_options: Option<PrometheusOptions>,
pub promql_options: Option<PromqlOptions>,
pub prom_options: Option<PromOptions>,
pub wal: WalConfig,
pub storage: ObjectStoreConfig,
pub compaction: CompactionConfig,
@@ -96,7 +99,7 @@ impl Default for StandaloneOptions {
opentsdb_options: Some(OpentsdbOptions::default()),
influxdb_options: Some(InfluxdbOptions::default()),
prometheus_options: Some(PrometheusOptions::default()),
promql_options: Some(PromqlOptions::default()),
prom_options: Some(PromOptions::default()),
wal: WalConfig::default(),
storage: ObjectStoreConfig::default(),
compaction: CompactionConfig::default(),
@@ -116,7 +119,7 @@ impl StandaloneOptions {
opentsdb_options: self.opentsdb_options,
influxdb_options: self.influxdb_options,
prometheus_options: self.prometheus_options,
promql_options: self.promql_options,
prom_options: self.prom_options,
meta_client_options: None,
}
}
@@ -133,6 +136,40 @@ impl StandaloneOptions {
}
}
pub struct Instance {
datanode: Datanode,
frontend: FeInstance,
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
self.datanode
.start_instance()
.await
.context(StartDatanodeSnafu)?;
info!("Datanode instance started");
self.frontend.start().await.context(StartFrontendSnafu)?;
Ok(())
}
pub async fn stop(&self) -> Result<()> {
self.frontend
.shutdown()
.await
.context(ShutdownFrontendSnafu)?;
self.datanode
.shutdown_instance()
.await
.context(ShutdownDatanodeSnafu)?;
info!("Datanode instance stopped.");
Ok(())
}
}
#[derive(Debug, Parser)]
struct StartCommand {
#[clap(long)]
@@ -142,6 +179,8 @@ struct StartCommand {
#[clap(long)]
mysql_addr: Option<String>,
#[clap(long)]
prom_addr: Option<String>,
#[clap(long)]
postgres_addr: Option<String>,
#[clap(long)]
opentsdb_addr: Option<String>,
@@ -162,7 +201,7 @@ struct StartCommand {
}
impl StartCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
let enable_memory_catalog = self.enable_memory_catalog;
let config_file = self.config_file.clone();
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
@@ -182,33 +221,30 @@ impl StartCommand {
fe_opts, dn_opts
);
let mut datanode = Datanode::new(dn_opts.clone())
let datanode = Datanode::new(dn_opts.clone())
.await
.context(StartDatanodeSnafu)?;
let mut frontend = build_frontend(fe_opts, plugins, datanode.get_instance()).await?;
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
datanode
.start_instance()
let mut frontend = build_frontend(plugins.clone(), datanode.get_instance()).await?;
frontend
.build_servers(&fe_opts, plugins)
.await
.context(StartDatanodeSnafu)?;
info!("Datanode instance started");
.context(StartFrontendSnafu)?;
frontend.start().await.context(StartFrontendSnafu)?;
Ok(())
Ok(Instance { datanode, frontend })
}
}
/// Build frontend instance in standalone mode
async fn build_frontend(
fe_opts: FrontendOptions,
plugins: Arc<Plugins>,
datanode_instance: InstanceRef,
) -> Result<Frontend<FeInstance>> {
) -> Result<FeInstance> {
let mut frontend_instance = FeInstance::new_standalone(datanode_instance.clone());
frontend_instance.set_script_handler(datanode_instance);
frontend_instance.set_plugins(plugins.clone());
Ok(Frontend::new(fe_opts, frontend_instance, plugins))
Ok(frontend_instance)
}
impl TryFrom<StartCommand> for FrontendOptions {
@@ -254,6 +290,11 @@ impl TryFrom<StartCommand> for FrontendOptions {
..Default::default()
})
}
if let Some(addr) = cmd.prom_addr {
opts.prom_options = Some(PromOptions { addr })
}
if let Some(addr) = cmd.postgres_addr {
opts.postgres_options = Some(PostgresOptions {
addr,
@@ -302,6 +343,7 @@ mod tests {
http_addr: None,
rpc_addr: None,
mysql_addr: None,
prom_addr: None,
postgres_addr: None,
opentsdb_addr: None,
config_file: Some(format!(
@@ -347,6 +389,7 @@ mod tests {
let command = StartCommand {
http_addr: None,
rpc_addr: None,
prom_addr: None,
mysql_addr: None,
postgres_addr: None,
opentsdb_addr: None,

View File

@@ -29,9 +29,9 @@ mod tests {
use std::fs::File;
use std::io::Write;
use common_test_util::temp_dir::create_temp_dir;
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use tempdir::TempDir;
use super::*;
use crate::error::Result;
@@ -62,7 +62,7 @@ mod tests {
host: "greptime.test".to_string(),
};
let dir = TempDir::new("test_from_file").unwrap();
let dir = create_temp_dir("test_from_file");
let test_file = format!("{}/test.toml", dir.path().to_str().unwrap());
let s = toml::to_string(&config).unwrap();

View File

@@ -18,8 +18,8 @@ mod tests {
use std::process::{Command, Stdio};
use std::time::Duration;
use common_test_util::temp_dir::create_temp_dir;
use rexpect::session::PtyReplSession;
use tempdir::TempDir;
struct Repl {
repl: PtyReplSession,
@@ -46,10 +46,13 @@ mod tests {
}
}
// TODO(LFC): Un-ignore this REPL test.
// Ignore this REPL test because some logical plans like create database are not supported yet in Datanode.
#[ignore]
#[test]
fn test_repl() {
let data_dir = TempDir::new_in("/tmp", "data").unwrap();
let wal_dir = TempDir::new_in("/tmp", "wal").unwrap();
let data_dir = create_temp_dir("data");
let wal_dir = create_temp_dir("wal");
let mut bin_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
bin_path.push("../../target/debug");

View File

@@ -20,6 +20,12 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize)]
pub struct Bytes(bytes::Bytes);
impl From<Bytes> for bytes::Bytes {
fn from(value: Bytes) -> Self {
value.0
}
}
impl From<bytes::Bytes> for Bytes {
fn from(bytes: bytes::Bytes) -> Bytes {
Bytes(bytes)

View File

@@ -17,5 +17,4 @@ snafu = { version = "0.7", features = ["backtraces"] }
[dev-dependencies]
chrono.workspace = true
tempdir = "0.3"
tokio.workspace = true

View File

@@ -0,0 +1,13 @@
[package]
name = "common-datasource"
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
common-error = { path = "../error" }
futures.workspace = true
object-store = { path = "../../object-store" }
regex = "1.7"
snafu.workspace = true
url = "2.3"

View File

@@ -0,0 +1,75 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use common_error::prelude::*;
use url::ParseError;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Unsupported backend protocol: {}", protocol))]
UnsupportedBackendProtocol { protocol: String },
#[snafu(display("empty host: {}", url))]
EmptyHostPath { url: String },
#[snafu(display("Invalid path: {}", path))]
InvalidPath { path: String },
#[snafu(display("Invalid url: {}, error :{}", url, source))]
InvalidUrl { url: String, source: ParseError },
#[snafu(display("Failed to build backend, source: {}", source))]
BuildBackend {
source: object_store::Error,
backtrace: Backtrace,
},
#[snafu(display("Failed to list object in path: {}, source: {}", path, source))]
ListObjects {
path: String,
backtrace: Backtrace,
source: object_store::Error,
},
#[snafu(display("Invalid connection: {}", msg))]
InvalidConnection { msg: String },
}
pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
use Error::*;
match self {
BuildBackend { .. } | ListObjects { .. } => StatusCode::StorageUnavailable,
UnsupportedBackendProtocol { .. }
| InvalidConnection { .. }
| InvalidUrl { .. }
| EmptyHostPath { .. }
| InvalidPath { .. } => StatusCode::InvalidArguments,
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {
ErrorCompat::backtrace(self)
}
fn as_any(&self) -> &dyn Any {
self
}
}

View File

@@ -0,0 +1,18 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod error;
pub mod lister;
pub mod object_store;
pub mod util;

View File

@@ -0,0 +1,81 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use futures::{future, TryStreamExt};
use object_store::{Object, ObjectStore};
use regex::Regex;
use snafu::ResultExt;
use crate::error::{self, Result};
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Source {
Filename(String),
Dir,
}
pub struct Lister {
object_store: ObjectStore,
source: Source,
path: String,
regex: Option<Regex>,
}
impl Lister {
pub fn new(
object_store: ObjectStore,
source: Source,
path: String,
regex: Option<Regex>,
) -> Self {
Lister {
object_store,
source,
path,
regex,
}
}
pub async fn list(&self) -> Result<Vec<Object>> {
match &self.source {
Source::Dir => {
let streamer = self
.object_store
.object(&self.path)
.list()
.await
.context(error::ListObjectsSnafu { path: &self.path })?;
streamer
.try_filter(|f| {
let res = self
.regex
.as_ref()
.map(|x| x.is_match(f.name()))
.unwrap_or(true);
future::ready(res)
})
.try_collect::<Vec<_>>()
.await
.context(error::ListObjectsSnafu { path: &self.path })
}
Source::Filename(filename) => {
let obj = self
.object_store
.object(&format!("{}{}", self.path, filename));
Ok(vec![obj])
}
}
}
}

View File

@@ -0,0 +1,60 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod fs;
pub mod s3;
use std::collections::HashMap;
use object_store::ObjectStore;
use snafu::{OptionExt, ResultExt};
use url::{ParseError, Url};
use self::fs::build_fs_backend;
use self::s3::build_s3_backend;
use crate::error::{self, Result};
pub const FS_SCHEMA: &str = "FS";
pub const S3_SCHEMA: &str = "S3";
/// parse url returns (schema,Option<host>,path)
pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
let parsed_url = Url::parse(url);
match parsed_url {
Ok(url) => Ok((
url.scheme().to_string(),
url.host_str().map(|s| s.to_string()),
url.path().to_string(),
)),
Err(ParseError::RelativeUrlWithoutBase) => {
Ok((FS_SCHEMA.to_string(), None, url.to_string()))
}
Err(err) => Err(err).context(error::InvalidUrlSnafu { url }),
}
}
pub fn build_backend(url: &str, connection: HashMap<String, String>) -> Result<ObjectStore> {
let (schema, host, _path) = parse_url(url)?;
match schema.to_uppercase().as_str() {
S3_SCHEMA => {
let host = host.context(error::EmptyHostPathSnafu {
url: url.to_string(),
})?;
Ok(build_s3_backend(&host, "/", connection)?)
}
FS_SCHEMA => Ok(build_fs_backend("/")?),
_ => error::UnsupportedBackendProtocolSnafu { protocol: schema }.fail(),
}
}

View File

@@ -0,0 +1,28 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use object_store::services::Fs;
use object_store::{ObjectStore, ObjectStoreBuilder};
use snafu::ResultExt;
use crate::error::{self, Result};
pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
let accessor = Fs::default()
.root(root)
.build()
.context(error::BuildBackendSnafu)?;
Ok(ObjectStore::new(accessor).finish())
}

View File

@@ -0,0 +1,79 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use object_store::services::S3;
use object_store::{ObjectStore, ObjectStoreBuilder};
use snafu::ResultExt;
use crate::error::{self, Result};
const ENDPOINT_URL: &str = "ENDPOINT_URL";
const ACCESS_KEY_ID: &str = "ACCESS_KEY_ID";
const SECRET_ACCESS_KEY: &str = "SECRET_ACCESS_KEY";
const SESSION_TOKEN: &str = "SESSION_TOKEN";
const REGION: &str = "REGION";
const ENABLE_VIRTUAL_HOST_STYLE: &str = "ENABLE_VIRTUAL_HOST_STYLE";
pub fn build_s3_backend(
host: &str,
path: &str,
connection: HashMap<String, String>,
) -> Result<ObjectStore> {
let mut builder = S3::default();
builder.root(path);
builder.bucket(host);
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
builder.endpoint(endpoint);
}
if let Some(region) = connection.get(REGION) {
builder.region(region);
}
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
builder.access_key_id(key_id);
}
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
builder.secret_access_key(key);
}
if let Some(session_token) = connection.get(SESSION_TOKEN) {
builder.security_token(session_token);
}
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
let enable = enable_str.as_str().parse::<bool>().map_err(|e| {
error::InvalidConnectionSnafu {
msg: format!(
"failed to parse the option {}={}, {}",
ENABLE_VIRTUAL_HOST_STYLE, enable_str, e
),
}
.build()
})?;
if enable {
builder.enable_virtual_host_style();
}
}
let accessor = builder.build().context(error::BuildBackendSnafu)?;
Ok(ObjectStore::new(accessor).finish())
}

View File

@@ -0,0 +1,125 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub fn find_dir_and_filename(path: &str) -> (String, Option<String>) {
if path.is_empty() {
("/".to_string(), None)
} else if path.ends_with('/') {
(path.to_string(), None)
} else if let Some(idx) = path.rfind('/') {
(
path[..idx + 1].to_string(),
Some(path[idx + 1..].to_string()),
)
} else {
("/".to_string(), Some(path.to_string()))
}
}
#[cfg(test)]
mod tests {
use url::Url;
use super::*;
#[test]
fn test_parse_uri() {
struct Test<'a> {
uri: &'a str,
expected_path: &'a str,
expected_schema: &'a str,
}
let tests = [
Test {
uri: "s3://bucket/to/path/",
expected_path: "/to/path/",
expected_schema: "s3",
},
Test {
uri: "fs:///to/path/",
expected_path: "/to/path/",
expected_schema: "fs",
},
Test {
uri: "fs:///to/path/file",
expected_path: "/to/path/file",
expected_schema: "fs",
},
];
for test in tests {
let parsed_uri = Url::parse(test.uri).unwrap();
assert_eq!(parsed_uri.path(), test.expected_path);
assert_eq!(parsed_uri.scheme(), test.expected_schema);
}
}
#[test]
fn test_parse_path_and_dir() {
let parsed = Url::from_file_path("/to/path/file").unwrap();
assert_eq!(parsed.path(), "/to/path/file");
let parsed = Url::from_directory_path("/to/path/").unwrap();
assert_eq!(parsed.path(), "/to/path/");
}
#[test]
fn test_find_dir_and_filename() {
struct Test<'a> {
path: &'a str,
expected_dir: &'a str,
expected_filename: Option<String>,
}
let tests = [
Test {
path: "to/path/",
expected_dir: "to/path/",
expected_filename: None,
},
Test {
path: "to/path/filename",
expected_dir: "to/path/",
expected_filename: Some("filename".into()),
},
Test {
path: "/to/path/filename",
expected_dir: "/to/path/",
expected_filename: Some("filename".into()),
},
Test {
path: "/",
expected_dir: "/",
expected_filename: None,
},
Test {
path: "filename",
expected_dir: "/",
expected_filename: Some("filename".into()),
},
Test {
path: "",
expected_dir: "/",
expected_filename: None,
},
];
for test in tests {
let (path, filename) = find_dir_and_filename(test.path);
assert_eq!(test.expected_dir, path);
assert_eq!(test.expected_filename, filename)
}
}
}

View File

@@ -10,6 +10,7 @@ proc-macro = true
[dependencies]
quote = "1.0"
syn = "1.0"
proc-macro2 = "1.0"
[dev-dependencies]
arc-swap = "1.0"

View File

@@ -12,8 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
mod range_fn;
use proc_macro::TokenStream;
use quote::{quote, quote_spanned};
use range_fn::process_range_fn;
use syn::parse::Parser;
use syn::spanned::Spanned;
use syn::{parse_macro_input, DeriveInput, ItemStruct};
@@ -83,3 +86,31 @@ pub fn as_aggr_func_creator(_args: TokenStream, input: TokenStream) -> TokenStre
}
.into()
}
/// Attribute macro to convert an arithimetic function to a range function. The annotated function
/// should accept servaral arrays as input and return a single value as output. This procedure
/// macro can works on any number of input parameters. Return type can be either primitive type
/// or wrapped in `Option`.
///
/// # Example
/// Take `count_over_time()` in PromQL as an example:
/// ```rust, ignore
/// /// The count of all values in the specified interval.
/// #[range_fn(
/// name = "CountOverTime",
/// ret = "Float64Array",
/// display_name = "prom_count_over_time"
/// )]
/// pub fn count_over_time(_: &TimestampMillisecondArray, values: &Float64Array) -> f64 {
/// values.len() as f64
/// }
/// ```
///
/// # Arguments
/// - `name`: The name of the generated [ScalarUDF] struct.
/// - `ret`: The return type of the generated UDF function.
/// - `display_name`: The display name of the generated UDF function.
#[proc_macro_attribute]
pub fn range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
process_range_fn(args, input)
}

View File

@@ -0,0 +1,230 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use proc_macro::TokenStream;
use proc_macro2::Span;
use quote::quote;
use syn::punctuated::Punctuated;
use syn::spanned::Spanned;
use syn::token::Comma;
use syn::{
parse_macro_input, Attribute, AttributeArgs, FnArg, Ident, ItemFn, Meta, MetaNameValue,
NestedMeta, Signature, Type, TypeReference, Visibility,
};
/// Internal util macro to early return on error.
macro_rules! ok {
($item:expr) => {
match $item {
Ok(item) => item,
Err(e) => return e.into_compile_error().into(),
}
};
}
pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
// extract arg map
let arg_pairs = parse_macro_input!(args as AttributeArgs);
let arg_span = arg_pairs[0].span();
let arg_map = ok!(extract_arg_map(arg_pairs));
// decompose the fn block
let compute_fn = parse_macro_input!(input as ItemFn);
let ItemFn {
attrs,
vis,
sig,
block,
} = compute_fn;
// extract fn arg list
let Signature {
inputs,
ident: fn_name,
..
} = &sig;
let arg_types = ok!(extract_input_types(inputs));
// build the struct and its impl block
let struct_code = build_struct(
attrs,
vis,
ok!(get_ident(&arg_map, "name", arg_span)),
ok!(get_ident(&arg_map, "display_name", arg_span)),
);
let calc_fn_code = build_calc_fn(
ok!(get_ident(&arg_map, "name", arg_span)),
arg_types,
fn_name.clone(),
ok!(get_ident(&arg_map, "ret", arg_span)),
);
// preserve this fn, but remove its `pub` modifier
let input_fn_code: TokenStream = quote! {
#sig { #block }
}
.into();
let mut result = TokenStream::new();
result.extend(struct_code);
result.extend(calc_fn_code);
result.extend(input_fn_code);
result
}
/// Extract a String <-> Ident map from the attribute args.
fn extract_arg_map(args: Vec<NestedMeta>) -> Result<HashMap<String, Ident>, syn::Error> {
args.into_iter()
.map(|meta| {
if let NestedMeta::Meta(Meta::NameValue(MetaNameValue { path, lit, .. })) = meta {
let name = path.get_ident().unwrap().to_string();
let ident = match lit {
syn::Lit::Str(lit_str) => lit_str.parse::<Ident>(),
_ => Err(syn::Error::new(
lit.span(),
"Unexpected attribute format. Expected `name = \"value\"`",
)),
}?;
Ok((name, ident))
} else {
Err(syn::Error::new(
meta.span(),
"Unexpected attribute format. Expected `name = \"value\"`",
))
}
})
.collect::<Result<HashMap<String, Ident>, syn::Error>>()
}
/// Helper function to get an Ident from the previous arg map.
fn get_ident(map: &HashMap<String, Ident>, key: &str, span: Span) -> Result<Ident, syn::Error> {
map.get(key)
.cloned()
.ok_or_else(|| syn::Error::new(span, format!("Expect attribute {key} but not found")))
}
/// Extract the argument list from the annotated function.
fn extract_input_types(inputs: &Punctuated<FnArg, Comma>) -> Result<Vec<Type>, syn::Error> {
inputs
.iter()
.map(|arg| match arg {
FnArg::Receiver(receiver) => Err(syn::Error::new(receiver.span(), "expected bool")),
FnArg::Typed(pat_type) => Ok(*pat_type.ty.clone()),
})
.collect()
}
fn build_struct(
attrs: Vec<Attribute>,
vis: Visibility,
name: Ident,
display_name_ident: Ident,
) -> TokenStream {
let display_name = display_name_ident.to_string();
quote! {
#(#attrs)*
#[derive(Debug)]
#vis struct #name {}
impl #name {
pub const fn name() -> &'static str {
#display_name
}
pub fn scalar_udf() -> ScalarUDF {
ScalarUDF {
name: Self::name().to_string(),
signature: Signature::new(
TypeSignature::Exact(Self::input_type()),
Volatility::Immutable,
),
return_type: Arc::new(|_| Ok(Arc::new(Self::return_type()))),
fun: Arc::new(Self::calc),
}
}
// TODO(ruihang): this should be parameterized
// time index column and value column
fn input_type() -> Vec<DataType> {
vec![
RangeArray::convert_data_type(DataType::Timestamp(TimeUnit::Millisecond, None)),
RangeArray::convert_data_type(DataType::Float64),
]
}
// TODO(ruihang): this should be parameterized
fn return_type() -> DataType {
DataType::Float64
}
}
}
.into()
}
fn build_calc_fn(
name: Ident,
param_types: Vec<Type>,
fn_name: Ident,
ret_type: Ident,
) -> TokenStream {
let param_names = param_types
.iter()
.enumerate()
.map(|(i, ty)| Ident::new(&format!("param_{}", i), ty.span()))
.collect::<Vec<_>>();
let unref_param_types = param_types
.iter()
.map(|ty| {
if let Type::Reference(TypeReference { elem, .. }) = ty {
elem.as_ref().clone()
} else {
ty.clone()
}
})
.collect::<Vec<_>>();
let num_params = param_types.len();
let param_numbers = (0..num_params).collect::<Vec<_>>();
let range_array_names = param_names
.iter()
.map(|name| Ident::new(&format!("{}_range_array", name), name.span()))
.collect::<Vec<_>>();
let first_range_array_name = range_array_names.first().unwrap().clone();
quote! {
impl #name {
fn calc(input: &[ColumnarValue]) -> Result<ColumnarValue, DataFusionError> {
assert_eq!(input.len(), #num_params);
#( let #range_array_names = RangeArray::try_new(extract_array(&input[#param_numbers])?.data().clone().into())?; )*
// TODO(ruihang): add ensure!()
let mut result_array = Vec::new();
for index in 0..#first_range_array_name.len(){
#( let #param_names = #range_array_names.get(index).unwrap().as_any().downcast_ref::<#unref_param_types>().unwrap().clone(); )*
// TODO(ruihang): add ensure!() to check length
let result = #fn_name(#( &#param_names, )*);
result_array.push(result);
}
let result = ColumnarValue::Array(Arc::new(#ret_type::from_iter(result_array)));
Ok(result)
}
}
}
.into()
}

View File

@@ -12,17 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
mod from_unixtime;
use from_unixtime::FromUnixtimeFunction;
use crate::scalars::function_registry::FunctionRegistry;
pub(crate) struct TimestampFunction;
impl TimestampFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(FromUnixtimeFunction::default()));
}
pub fn register(_registry: &FunctionRegistry) {}
}

View File

@@ -1,133 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! from_unixtime function.
/// TODO(dennis) It can be removed after we upgrade datafusion.
use std::fmt;
use std::sync::Arc;
use common_query::error::{
ArrowComputeSnafu, IntoVectorSnafu, Result, TypeCastSnafu, UnsupportedInputDataTypeSnafu,
};
use common_query::prelude::{Signature, Volatility};
use datatypes::arrow::compute;
use datatypes::arrow::datatypes::{DataType as ArrowDatatype, Int64Type};
use datatypes::data_type::DataType;
use datatypes::prelude::ConcreteDataType;
use datatypes::vectors::{TimestampMillisecondVector, VectorRef};
use snafu::ResultExt;
use crate::scalars::function::{Function, FunctionContext};
#[derive(Clone, Debug, Default)]
pub struct FromUnixtimeFunction;
const NAME: &str = "from_unixtime";
impl Function for FromUnixtimeFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::timestamp_millisecond_datatype())
}
fn signature(&self) -> Signature {
Signature::uniform(
1,
vec![ConcreteDataType::int64_datatype()],
Volatility::Immutable,
)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
match columns[0].data_type() {
ConcreteDataType::Int64(_) => {
let array = columns[0].to_arrow_array();
// Our timestamp vector's time unit is millisecond
let array = compute::multiply_scalar_dyn::<Int64Type>(&array, 1000i64)
.context(ArrowComputeSnafu)?;
let arrow_datatype = &self.return_type(&[]).unwrap().as_arrow_type();
Ok(Arc::new(
TimestampMillisecondVector::try_from_arrow_array(
compute::cast(&array, arrow_datatype).context(TypeCastSnafu {
typ: ArrowDatatype::Int64,
})?,
)
.context(IntoVectorSnafu {
data_type: arrow_datatype.clone(),
})?,
))
}
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
}
.fail(),
}
}
}
impl fmt::Display for FromUnixtimeFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "FROM_UNIXTIME")
}
}
#[cfg(test)]
mod tests {
use common_query::prelude::TypeSignature;
use datatypes::value::Value;
use datatypes::vectors::Int64Vector;
use super::*;
#[test]
fn test_from_unixtime() {
let f = FromUnixtimeFunction::default();
assert_eq!("from_unixtime", f.name());
assert_eq!(
ConcreteDataType::timestamp_millisecond_datatype(),
f.return_type(&[]).unwrap()
);
assert!(matches!(f.signature(),
Signature {
type_signature: TypeSignature::Uniform(1, valid_types),
volatility: Volatility::Immutable
} if valid_types == vec![ConcreteDataType::int64_datatype()]
));
let times = vec![Some(1494410783), None, Some(1494410983)];
let args: Vec<VectorRef> = vec![Arc::new(Int64Vector::from(times.clone()))];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(3, vector.len());
for (i, t) in times.iter().enumerate() {
let v = vector.get(i);
if i == 1 {
assert_eq!(Value::Null, v);
continue;
}
match v {
Value::Timestamp(ts) => {
assert_eq!(ts.value(), t.unwrap() * 1000);
}
_ => unreachable!(),
}
}
}
}

View File

@@ -14,9 +14,9 @@
use std::sync::Arc;
use common_query::error::{ExecuteFunctionSnafu, FromScalarValueSnafu};
use common_query::error::FromScalarValueSnafu;
use common_query::prelude::{
ColumnarValue, ReturnTypeFunction, ScalarFunctionImplementation, ScalarUdf, ScalarValue,
ColumnarValue, ReturnTypeFunction, ScalarFunctionImplementation, ScalarUdf,
};
use datatypes::error::Error as DataTypeError;
use datatypes::prelude::*;
@@ -54,16 +54,8 @@ pub fn create_udf(func: FunctionRef) -> ScalarUdf {
.collect();
let result = func_cloned.eval(func_ctx, &args.context(FromScalarValueSnafu)?);
let udf = if len.is_some() {
result.map(ColumnarValue::Vector)?
} else {
ScalarValue::try_from_array(&result?.to_arrow_array(), 0)
.map(ColumnarValue::Scalar)
.context(ExecuteFunctionSnafu)?
};
Ok(udf)
let udf_result = result.map(ColumnarValue::Vector)?;
Ok(udf_result)
});
ScalarUdf::new(func.name(), &func.signature(), &return_type, &fun)

View File

@@ -32,7 +32,7 @@ pub enum Error {
DecodeInsert { source: DecodeError },
#[snafu(display("Illegal insert data"))]
IllegalInsertData,
IllegalInsertData { backtrace: Backtrace },
#[snafu(display("Column datatype error, source: {}", source))]
ColumnDataType {

View File

@@ -26,6 +26,7 @@ use common_time::{Date, DateTime};
use datatypes::data_type::{ConcreteDataType, DataType};
use datatypes::prelude::{ValueRef, VectorRef};
use datatypes::schema::SchemaRef;
use datatypes::types::TimestampType;
use datatypes::value::Value;
use datatypes::vectors::MutableVector;
use snafu::{ensure, OptionExt, ResultExt};
@@ -414,11 +415,26 @@ fn convert_values(data_type: &ConcreteDataType, values: Values) -> Vec<Value> {
.into_iter()
.map(|v| Value::Date(v.into()))
.collect(),
ConcreteDataType::Timestamp(_) => values
ConcreteDataType::Timestamp(TimestampType::Second(_)) => values
.ts_second_values
.into_iter()
.map(|v| Value::Timestamp(Timestamp::new_second(v)))
.collect(),
ConcreteDataType::Timestamp(TimestampType::Millisecond(_)) => values
.ts_millisecond_values
.into_iter()
.map(|v| Value::Timestamp(Timestamp::new_millisecond(v)))
.collect(),
ConcreteDataType::Timestamp(TimestampType::Microsecond(_)) => values
.ts_microsecond_values
.into_iter()
.map(|v| Value::Timestamp(Timestamp::new_microsecond(v)))
.collect(),
ConcreteDataType::Timestamp(TimestampType::Nanosecond(_)) => values
.ts_nanosecond_values
.into_iter()
.map(|v| Value::Timestamp(Timestamp::new_nanosecond(v)))
.collect(),
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
unreachable!()
}
@@ -444,6 +460,7 @@ mod tests {
use common_time::timestamp::Timestamp;
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
use datatypes::types::{TimestampMillisecondType, TimestampSecondType, TimestampType};
use datatypes::value::Value;
use snafu::ResultExt;
use table::error::Result as TableResult;
@@ -647,6 +664,39 @@ mod tests {
);
}
#[test]
fn test_convert_timestamp_values() {
// second
let actual = convert_values(
&ConcreteDataType::Timestamp(TimestampType::Second(TimestampSecondType)),
Values {
ts_second_values: vec![1_i64, 2_i64, 3_i64],
..Default::default()
},
);
let expect = vec![
Value::Timestamp(Timestamp::new_second(1_i64)),
Value::Timestamp(Timestamp::new_second(2_i64)),
Value::Timestamp(Timestamp::new_second(3_i64)),
];
assert_eq!(expect, actual);
// millisecond
let actual = convert_values(
&ConcreteDataType::Timestamp(TimestampType::Millisecond(TimestampMillisecondType)),
Values {
ts_millisecond_values: vec![1_i64, 2_i64, 3_i64],
..Default::default()
},
);
let expect = vec![
Value::Timestamp(Timestamp::new_millisecond(1_i64)),
Value::Timestamp(Timestamp::new_millisecond(2_i64)),
Value::Timestamp(Timestamp::new_millisecond(3_i64)),
];
assert_eq!(expect, actual);
}
#[test]
fn test_is_null() {
let null_mask = BitVec::from_slice(&[0b0000_0001, 0b0000_1000]);

View File

@@ -16,7 +16,7 @@ common-runtime = { path = "../runtime" }
dashmap = "5.4"
datafusion.workspace = true
datatypes = { path = "../../datatypes" }
flatbuffers = "22"
flatbuffers = "23.1"
futures = "0.3"
prost.workspace = true
snafu = { version = "0.7", features = ["backtraces"] }
@@ -26,7 +26,7 @@ tower = "0.4"
[dev-dependencies]
criterion = "0.4"
rand = "0.8"
rand.workspace = true
[[bench]]
name = "bench_main"

View File

@@ -16,8 +16,9 @@ use std::collections::HashMap;
use std::sync::Arc;
use api::v1::{AffectedRows, FlightMetadata};
use arrow_flight::utils::{flight_data_from_arrow_batch, flight_data_to_arrow_batch};
use arrow_flight::utils::flight_data_to_arrow_batch;
use arrow_flight::{FlightData, IpcMessage, SchemaAsIpc};
use common_base::bytes::Bytes;
use common_recordbatch::{RecordBatch, RecordBatches};
use datatypes::arrow;
use datatypes::arrow::datatypes::Schema as ArrowSchema;
@@ -39,38 +40,58 @@ pub enum FlightMessage {
AffectedRows(usize),
}
#[derive(Default)]
pub struct FlightEncoder {
write_options: writer::IpcWriteOptions,
data_gen: writer::IpcDataGenerator,
dictionary_tracker: writer::DictionaryTracker,
}
impl Default for FlightEncoder {
fn default() -> Self {
Self {
write_options: writer::IpcWriteOptions::default(),
data_gen: writer::IpcDataGenerator::default(),
dictionary_tracker: writer::DictionaryTracker::new(false),
}
}
}
impl FlightEncoder {
pub fn encode(&self, flight_message: FlightMessage) -> FlightData {
pub fn encode(&mut self, flight_message: FlightMessage) -> FlightData {
match flight_message {
FlightMessage::Schema(schema) => {
SchemaAsIpc::new(schema.arrow_schema(), &self.write_options).into()
}
FlightMessage::Recordbatch(recordbatch) => {
let (flight_dictionaries, flight_batch) = flight_data_from_arrow_batch(
recordbatch.df_record_batch(),
&self.write_options,
);
let (encoded_dictionaries, encoded_batch) = self
.data_gen
.encoded_batch(
recordbatch.df_record_batch(),
&mut self.dictionary_tracker,
&self.write_options,
)
.expect("DictionaryTracker configured above to not fail on replacement");
// TODO(LFC): Handle dictionary as FlightData here, when we supported Arrow's Dictionary DataType.
// Currently we don't have a datatype corresponding to Arrow's Dictionary DataType,
// so there won't be any "dictionaries" here. Assert to be sure about it, and
// perform a "testing guard" in case we forgot to handle the possible "dictionaries"
// here in the future.
debug_assert_eq!(flight_dictionaries.len(), 0);
debug_assert_eq!(encoded_dictionaries.len(), 0);
flight_batch
encoded_batch.into()
}
FlightMessage::AffectedRows(rows) => {
let metadata = FlightMetadata {
affected_rows: Some(AffectedRows { value: rows as _ }),
}
.encode_to_vec();
FlightData::new(None, IpcMessage(build_none_flight_msg()), metadata, vec![])
FlightData::new(
None,
IpcMessage(build_none_flight_msg().into()),
metadata,
vec![],
)
}
}
}
@@ -83,7 +104,8 @@ pub struct FlightDecoder {
impl FlightDecoder {
pub fn try_decode(&mut self, flight_data: FlightData) -> Result<FlightMessage> {
let message = root_as_message(flight_data.data_header.as_slice()).map_err(|e| {
let bytes = flight_data.data_header.slice(..);
let message = root_as_message(&bytes).map_err(|e| {
InvalidFlightDataSnafu {
reason: e.to_string(),
}
@@ -91,7 +113,7 @@ impl FlightDecoder {
})?;
match message.header_type() {
MessageHeader::NONE => {
let metadata = FlightMetadata::decode(flight_data.app_metadata.as_slice())
let metadata = FlightMetadata::decode(flight_data.app_metadata)
.context(DecodeFlightDataSnafu)?;
if let Some(AffectedRows { value }) = metadata.affected_rows {
return Ok(FlightMessage::AffectedRows(value as _));
@@ -176,7 +198,7 @@ pub fn flight_messages_to_recordbatches(messages: Vec<FlightMessage>) -> Result<
}
}
fn build_none_flight_msg() -> Vec<u8> {
fn build_none_flight_msg() -> Bytes {
let mut builder = FlatBufferBuilder::new();
let mut message = arrow::ipc::MessageBuilder::new(&mut builder);
@@ -187,7 +209,7 @@ fn build_none_flight_msg() -> Vec<u8> {
let data = message.finish();
builder.finish(data, None);
builder.finished_data().to_vec()
builder.finished_data().into()
}
#[cfg(test)]

View File

@@ -0,0 +1,17 @@
[package]
name = "common-mem-prof"
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
common-error = { path = "../error" }
snafu.workspace = true
tempfile = "3.4"
tikv-jemalloc-ctl = { version = "0.5", features = ["use_std"] }
tikv-jemallocator = "0.5"
tokio.workspace = true
[dependencies.tikv-jemalloc-sys]
version = "0.5"
features = ["stats", "profiling", "unprefixed_malloc_on_supported_platforms"]

View File

@@ -0,0 +1,50 @@
# Profile memory usage of GreptimeDB
This crate provides an easy approach to dump memory profiling info.
## Prerequisites
### jemalloc
```bash
# for macOS
brew install jemalloc
# for Ubuntu
sudo apt install libjemalloc-dev
```
### [flamegraph](https://github.com/brendangregg/FlameGraph)
```bash
curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph.pl > ./flamegraph.pl
```
### Build GreptimeDB with `mem-prof` feature.
```bash
cargo build --features=mem-prof
```
## Profiling
Start GreptimeDB instance with environment variables:
```bash
MALLOC_CONF=prof:true,lg_prof_interval:28 ./target/debug/greptime standalone start
```
Dump memory profiling data through HTTP API:
```bash
curl localhost:4000/v1/prof/mem > greptime.hprof
```
You can periodically dump profiling data and compare them to find the delta memory usage.
## Analyze profiling data with flamegraph
To create flamegraph according to dumped profiling data:
```bash
jeprof --svg <path_to_greptimedb_binary> --base=<baseline_prof> <profile_data> > output.svg
```

View File

@@ -0,0 +1,66 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::path::PathBuf;
use common_error::prelude::{ErrorExt, StatusCode};
use snafu::{Backtrace, Snafu};
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Failed to read OPT_PROF"))]
ReadOptProf { source: tikv_jemalloc_ctl::Error },
#[snafu(display("Memory profiling is not enabled"))]
ProfilingNotEnabled,
#[snafu(display("Failed to build temp file from given path: {:?}", path))]
BuildTempPath { path: PathBuf, backtrace: Backtrace },
#[snafu(display("Failed to open temp file: {}", path))]
OpenTempFile {
path: String,
source: std::io::Error,
},
#[snafu(display("Failed to dump profiling data to temp file: {:?}", path))]
DumpProfileData {
path: PathBuf,
source: tikv_jemalloc_ctl::Error,
},
}
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::ReadOptProf { .. } => StatusCode::Internal,
Error::ProfilingNotEnabled => StatusCode::InvalidArguments,
Error::BuildTempPath { .. } => StatusCode::Internal,
Error::OpenTempFile { .. } => StatusCode::StorageUnavailable,
Error::DumpProfileData { .. } => StatusCode::StorageUnavailable,
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {
snafu::ErrorCompat::backtrace(self)
}
fn as_any(&self) -> &dyn Any {
self
}
}

View File

@@ -0,0 +1,74 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod error;
use std::ffi::{c_char, CString};
use std::path::PathBuf;
use snafu::{ensure, ResultExt};
use tokio::io::AsyncReadExt;
use crate::error::{
BuildTempPathSnafu, DumpProfileDataSnafu, OpenTempFileSnafu, ProfilingNotEnabledSnafu,
ReadOptProfSnafu,
};
const PROF_DUMP: &[u8] = b"prof.dump\0";
const OPT_PROF: &[u8] = b"opt.prof\0";
pub async fn dump_profile() -> error::Result<Vec<u8>> {
ensure!(is_prof_enabled()?, ProfilingNotEnabledSnafu);
let tmp_path = tempfile::tempdir().map_err(|_| {
BuildTempPathSnafu {
path: std::env::temp_dir(),
}
.build()
})?;
let mut path_buf = PathBuf::from(tmp_path.path());
path_buf.push("greptimedb.hprof");
let path = path_buf
.to_str()
.ok_or_else(|| BuildTempPathSnafu { path: &path_buf }.build())?
.to_string();
let mut bytes = CString::new(path.as_str())
.map_err(|_| BuildTempPathSnafu { path: &path_buf }.build())?
.into_bytes_with_nul();
{
// #safety: we always expect a valid temp file path to write profiling data to.
let ptr = bytes.as_mut_ptr() as *mut c_char;
unsafe {
tikv_jemalloc_ctl::raw::write(PROF_DUMP, ptr)
.context(DumpProfileDataSnafu { path: path_buf })?
}
}
let mut f = tokio::fs::File::open(path.as_str())
.await
.context(OpenTempFileSnafu { path: &path })?;
let mut buf = vec![];
f.read_to_end(&mut buf)
.await
.context(OpenTempFileSnafu { path })?;
Ok(buf)
}
fn is_prof_enabled() -> error::Result<bool> {
// safety: OPT_PROF variable, if present, is always a boolean value.
Ok(unsafe { tikv_jemalloc_ctl::raw::read::<bool>(OPT_PROF).context(ReadOptProfSnafu)? })
}

View File

@@ -14,10 +14,11 @@ object-store = { path = "../../object-store" }
serde.workspace = true
serde_json = "1.0"
smallvec = "1"
backon = "0.4.0"
snafu.workspace = true
tokio.workspace = true
uuid.workspace = true
[dev-dependencies]
common-test-util = { path = "../test-util" }
futures-util.workspace = true
tempdir = "0.3"

View File

@@ -13,6 +13,7 @@
// limitations under the License.
use std::any::Any;
use std::sync::Arc;
use common_error::prelude::*;
@@ -81,6 +82,31 @@ pub enum Error {
#[snafu(backtrace)]
source: BoxedError,
},
#[snafu(display("Procedure panics, procedure_id: {}", procedure_id))]
ProcedurePanic { procedure_id: ProcedureId },
#[snafu(display("Failed to wait watcher, source: {}", source))]
WaitWatcher {
source: tokio::sync::watch::error::RecvError,
backtrace: Backtrace,
},
#[snafu(display("Failed to execute procedure, source: {}", source))]
ProcedureExec {
source: Arc<Error>,
backtrace: Backtrace,
},
#[snafu(display(
"Procedure retry exceeded max times, procedure_id: {}, source:{}",
procedure_id,
source
))]
RetryTimesExceeded {
source: Arc<Error>,
procedure_id: ProcedureId,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -95,10 +121,14 @@ impl ErrorExt for Error {
| Error::ListState { .. }
| Error::ReadState { .. }
| Error::FromJson { .. }
| Error::RetryLater { .. } => StatusCode::Internal,
| Error::RetryTimesExceeded { .. }
| Error::RetryLater { .. }
| Error::WaitWatcher { .. } => StatusCode::Internal,
Error::LoaderConflict { .. } | Error::DuplicateProcedure { .. } => {
StatusCode::InvalidArguments
}
Error::ProcedurePanic { .. } => StatusCode::Unexpected,
Error::ProcedureExec { source, .. } => source.status_code(),
}
}

View File

@@ -18,9 +18,11 @@ pub mod error;
pub mod local;
mod procedure;
mod store;
pub mod watcher;
pub use crate::error::{Error, Result};
pub use crate::procedure::{
BoxedProcedure, Context, ContextProvider, LockKey, Procedure, ProcedureId, ProcedureManager,
ProcedureManagerRef, ProcedureState, ProcedureWithId, Status, Watcher,
ProcedureManagerRef, ProcedureState, ProcedureWithId, Status,
};
pub use crate::watcher::Watcher;

View File

@@ -17,8 +17,10 @@ mod runner;
use std::collections::{HashMap, VecDeque};
use std::sync::{Arc, Mutex, RwLock};
use std::time::Duration;
use async_trait::async_trait;
use backon::ExponentialBuilder;
use common_telemetry::logging;
use object_store::ObjectStore;
use snafu::ensure;
@@ -291,12 +293,16 @@ impl ManagerContext {
pub struct ManagerConfig {
/// Object store
pub object_store: ObjectStore,
pub max_retry_times: usize,
pub retry_delay: Duration,
}
/// A [ProcedureManager] that maintains procedure states locally.
pub struct LocalManager {
manager_ctx: Arc<ManagerContext>,
state_store: StateStoreRef,
max_retry_times: usize,
retry_delay: Duration,
}
impl LocalManager {
@@ -305,6 +311,8 @@ impl LocalManager {
LocalManager {
manager_ctx: Arc::new(ManagerContext::new()),
state_store: Arc::new(ObjectStateStore::new(config.object_store)),
max_retry_times: config.max_retry_times,
retry_delay: config.retry_delay,
}
}
@@ -321,7 +329,11 @@ impl LocalManager {
procedure,
manager_ctx: self.manager_ctx.clone(),
step,
exponential_builder: ExponentialBuilder::default()
.with_min_delay(self.retry_delay)
.with_max_times(self.max_retry_times),
store: ProcedureStore::new(self.state_store.clone()),
rolling_back: false,
};
let watcher = meta.state_receiver.clone();
@@ -334,7 +346,7 @@ impl LocalManager {
common_runtime::spawn_bg(async move {
// Run the root procedure.
let _ = runner.run().await;
runner.run().await;
});
Ok(watcher)
@@ -409,9 +421,9 @@ impl ProcedureManager for LocalManager {
/// Create a new [ProcedureMeta] for test purpose.
#[cfg(test)]
mod test_util {
use common_test_util::temp_dir::TempDir;
use object_store::services::Fs as Builder;
use object_store::ObjectStoreBuilder;
use tempdir::TempDir;
use super::*;
@@ -430,7 +442,7 @@ mod test_util {
mod tests {
use common_error::mock::MockError;
use common_error::prelude::StatusCode;
use tempdir::TempDir;
use common_test_util::temp_dir::create_temp_dir;
use super::*;
use crate::error::Error;
@@ -447,9 +459,9 @@ mod tests {
assert!(ctx.try_insert_procedure(meta.clone()));
assert!(ctx.contains_procedure(meta.id));
assert_eq!(ProcedureState::Running, ctx.state(meta.id).unwrap());
assert!(ctx.state(meta.id).unwrap().is_running());
meta.set_state(ProcedureState::Done);
assert_eq!(ProcedureState::Done, ctx.state(meta.id).unwrap());
assert!(ctx.state(meta.id).unwrap().is_done());
}
#[test]
@@ -540,9 +552,11 @@ mod tests {
#[test]
fn test_register_loader() {
let dir = TempDir::new("register").unwrap();
let dir = create_temp_dir("register");
let config = ManagerConfig {
object_store: test_util::new_object_store(&dir),
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
};
let manager = LocalManager::new(config);
@@ -558,10 +572,12 @@ mod tests {
#[tokio::test]
async fn test_recover() {
let dir = TempDir::new("recover").unwrap();
let dir = create_temp_dir("recover");
let object_store = test_util::new_object_store(&dir);
let config = ManagerConfig {
object_store: object_store.clone(),
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
};
let manager = LocalManager::new(config);
@@ -603,9 +619,11 @@ mod tests {
#[tokio::test]
async fn test_submit_procedure() {
let dir = TempDir::new("submit").unwrap();
let dir = create_temp_dir("submit");
let config = ManagerConfig {
object_store: test_util::new_object_store(&dir),
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
};
let manager = LocalManager::new(config);
@@ -634,7 +652,7 @@ mod tests {
// Wait for the procedure done.
let mut watcher = manager.procedure_watcher(procedure_id).unwrap();
watcher.changed().await.unwrap();
assert_eq!(ProcedureState::Done, *watcher.borrow());
assert!(watcher.borrow().is_done());
// Try to submit procedure with same id again.
let err = manager
@@ -649,9 +667,11 @@ mod tests {
#[tokio::test]
async fn test_state_changed_on_err() {
let dir = TempDir::new("on_err").unwrap();
let dir = create_temp_dir("on_err");
let config = ManagerConfig {
object_store: test_util::new_object_store(&dir),
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
};
let manager = LocalManager::new(config);
@@ -697,7 +717,7 @@ mod tests {
.unwrap();
// Wait for the notification.
watcher.changed().await.unwrap();
assert_eq!(ProcedureState::Failed, *watcher.borrow());
assert!(watcher.borrow().is_failed());
}
};

View File

@@ -15,22 +15,22 @@
use std::sync::Arc;
use std::time::Duration;
use backon::{BackoffBuilder, ExponentialBuilder};
use common_telemetry::logging;
use tokio::time;
use crate::error::{Error, Result};
use crate::error::{ProcedurePanicSnafu, Result};
use crate::local::{ManagerContext, ProcedureMeta, ProcedureMetaRef};
use crate::store::ProcedureStore;
use crate::{BoxedProcedure, Context, ProcedureId, ProcedureState, ProcedureWithId, Status};
const ERR_WAIT_DURATION: Duration = Duration::from_secs(30);
use crate::ProcedureState::Retrying;
use crate::{BoxedProcedure, Context, Error, ProcedureId, ProcedureState, ProcedureWithId, Status};
#[derive(Debug)]
enum ExecResult {
Continue,
Done,
RetryLater,
Failed(Error),
Failed,
}
#[cfg(test)]
@@ -48,7 +48,7 @@ impl ExecResult {
}
fn is_failed(&self) -> bool {
matches!(self, ExecResult::Failed(_))
matches!(self, ExecResult::Failed)
}
}
@@ -83,7 +83,11 @@ impl Drop for ProcedureGuard {
// Set state to failed. This is useful in test as runtime may not abort when the runner task panics.
// See https://github.com/tokio-rs/tokio/issues/2002 .
// We set set_panic_hook() in the application's main function. But our tests don't have this panic hook.
self.meta.set_state(ProcedureState::Failed);
let err = ProcedurePanicSnafu {
procedure_id: self.meta.id,
}
.build();
self.meta.set_state(ProcedureState::failed(Arc::new(err)));
}
// Notify parent procedure.
@@ -104,12 +108,14 @@ pub(crate) struct Runner {
pub(crate) procedure: BoxedProcedure,
pub(crate) manager_ctx: Arc<ManagerContext>,
pub(crate) step: u32,
pub(crate) exponential_builder: ExponentialBuilder,
pub(crate) store: ProcedureStore,
pub(crate) rolling_back: bool,
}
impl Runner {
/// Run the procedure.
pub(crate) async fn run(mut self) -> Result<()> {
pub(crate) async fn run(mut self) {
// Ensure we can update the procedure state.
let guard = ProcedureGuard::new(self.meta.clone(), self.manager_ctx.clone());
@@ -129,12 +135,9 @@ impl Runner {
.await;
}
let mut result = Ok(());
// Execute the procedure. We need to release the lock whenever the the execution
// is successful or fail.
if let Err(e) = self.execute_procedure_in_loop().await {
result = Err(e);
}
self.execute_procedure_in_loop().await;
// We can't remove the metadata of the procedure now as users and its parent might
// need to query its state.
@@ -155,29 +158,64 @@ impl Runner {
self.procedure.type_name(),
self.meta.id
);
result
}
async fn execute_procedure_in_loop(&mut self) -> Result<()> {
async fn execute_procedure_in_loop(&mut self) {
let ctx = Context {
procedure_id: self.meta.id,
provider: self.manager_ctx.clone(),
};
self.rolling_back = false;
self.execute_once_with_retry(&ctx).await;
}
async fn execute_once_with_retry(&mut self, ctx: &Context) {
let mut retry = self.exponential_builder.build();
let mut retry_times = 0;
loop {
match self.execute_once(&ctx).await {
match self.execute_once(ctx).await {
ExecResult::Done | ExecResult::Failed => return,
ExecResult::Continue => (),
ExecResult::Done => return Ok(()),
ExecResult::RetryLater => {
self.wait_on_err().await;
retry_times += 1;
if let Some(d) = retry.next() {
self.wait_on_err(d, retry_times).await;
} else {
assert!(self.meta.state().is_retrying());
if let Retrying { error } = self.meta.state() {
self.meta.set_state(ProcedureState::failed(Arc::new(
Error::RetryTimesExceeded {
source: error,
procedure_id: self.meta.id,
},
)))
}
return;
}
}
ExecResult::Failed(e) => return Err(e),
}
}
}
async fn rollback(&mut self, error: Arc<Error>) -> ExecResult {
if let Err(e) = self.rollback_procedure().await {
self.rolling_back = true;
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
return ExecResult::RetryLater;
}
self.meta.set_state(ProcedureState::failed(error));
ExecResult::Failed
}
async fn execute_once(&mut self, ctx: &Context) -> ExecResult {
// if rolling_back, there is no need to execute again.
if self.rolling_back {
// We can definitely get the previous error here.
let state = self.meta.state();
let err = state.error().unwrap();
return self.rollback(err.clone()).await;
}
match self.procedure.execute(ctx).await {
Ok(status) => {
logging::debug!(
@@ -188,8 +226,11 @@ impl Runner {
status.need_persist(),
);
if status.need_persist() && self.persist_procedure().await.is_err() {
return ExecResult::RetryLater;
if status.need_persist() {
if let Err(err) = self.persist_procedure().await {
self.meta.set_state(ProcedureState::retrying(Arc::new(err)));
return ExecResult::RetryLater;
}
}
match status {
@@ -198,7 +239,8 @@ impl Runner {
self.on_suspended(subprocedures).await;
}
Status::Done => {
if self.commit_procedure().await.is_err() {
if let Err(e) = self.commit_procedure().await {
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
return ExecResult::RetryLater;
}
@@ -219,17 +261,12 @@ impl Runner {
);
if e.is_retry_later() {
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
return ExecResult::RetryLater;
}
self.meta.set_state(ProcedureState::Failed);
// Write rollback key so we can skip this procedure while recovering procedures.
if self.rollback_procedure().await.is_err() {
return ExecResult::RetryLater;
}
ExecResult::Failed(e)
self.rollback(Arc::new(e)).await
}
}
}
@@ -263,7 +300,9 @@ impl Runner {
procedure,
manager_ctx: self.manager_ctx.clone(),
step,
exponential_builder: self.exponential_builder.clone(),
store: self.store.clone(),
rolling_back: false,
};
// Insert the procedure. We already check the procedure existence before inserting
@@ -287,8 +326,16 @@ impl Runner {
});
}
async fn wait_on_err(&self) {
time::sleep(ERR_WAIT_DURATION).await;
/// Extend the retry time to wait for the next retry.
async fn wait_on_err(&self, d: Duration, i: u64) {
logging::info!(
"Procedure {}-{} retry for the {} times after {} millis",
self.procedure.type_name(),
self.meta.id,
i,
d.as_millis(),
);
time::sleep(d).await;
}
async fn on_suspended(&self, subprocedures: Vec<ProcedureWithId>) {
@@ -397,14 +444,14 @@ mod tests {
use common_error::ext::PlainError;
use common_error::mock::MockError;
use common_error::prelude::StatusCode;
use common_test_util::temp_dir::create_temp_dir;
use futures_util::future::BoxFuture;
use futures_util::{FutureExt, TryStreamExt};
use object_store::ObjectStore;
use tempdir::TempDir;
use super::*;
use crate::local::test_util;
use crate::{ContextProvider, LockKey, Procedure};
use crate::{ContextProvider, Error, LockKey, Procedure};
const ROOT_ID: &str = "9f805a1f-05f7-490c-9f91-bd56e3cc54c1";
@@ -418,7 +465,9 @@ mod tests {
procedure,
manager_ctx: Arc::new(ManagerContext::new()),
step: 0,
exponential_builder: ExponentialBuilder::default(),
store,
rolling_back: false,
}
}
@@ -513,7 +562,7 @@ mod tests {
exec_fn,
};
let dir = TempDir::new("normal").unwrap();
let dir = create_temp_dir("normal");
let meta = normal.new_meta(ROOT_ID);
let ctx = context_without_provider(meta.id);
let object_store = test_util::new_object_store(&dir);
@@ -561,7 +610,7 @@ mod tests {
exec_fn,
};
let dir = TempDir::new("suspend").unwrap();
let dir = create_temp_dir("suspend");
let meta = suspend.new_meta(ROOT_ID);
let ctx = context_without_provider(meta.id);
let object_store = test_util::new_object_store(&dir);
@@ -630,9 +679,14 @@ mod tests {
// Wait for subprocedures.
let mut all_child_done = true;
for id in children_ids {
if ctx.provider.procedure_state(id).await.unwrap()
!= Some(ProcedureState::Done)
{
let is_not_done = ctx
.provider
.procedure_state(id)
.await
.unwrap()
.map(|s| !s.is_done())
.unwrap_or(true);
if is_not_done {
all_child_done = false;
}
}
@@ -655,7 +709,7 @@ mod tests {
exec_fn,
};
let dir = TempDir::new("parent").unwrap();
let dir = create_temp_dir("parent");
let meta = parent.new_meta(ROOT_ID);
let procedure_id = meta.id;
@@ -668,7 +722,7 @@ mod tests {
// Replace the manager ctx.
runner.manager_ctx = manager_ctx;
runner.run().await.unwrap();
runner.run().await;
// Check files on store.
for child_id in children_ids {
@@ -697,7 +751,7 @@ mod tests {
exec_fn,
};
let dir = TempDir::new("fail").unwrap();
let dir = create_temp_dir("fail");
let meta = fail.new_meta(ROOT_ID);
let ctx = context_without_provider(meta.id);
let object_store = test_util::new_object_store(&dir);
@@ -706,7 +760,7 @@ mod tests {
let res = runner.execute_once(&ctx).await;
assert!(res.is_failed(), "{res:?}");
assert_eq!(ProcedureState::Failed, meta.state());
assert!(meta.state().is_failed());
check_files(&object_store, ctx.procedure_id, &["0000000000.rollback"]).await;
}
@@ -732,7 +786,7 @@ mod tests {
exec_fn,
};
let dir = TempDir::new("retry_later").unwrap();
let dir = create_temp_dir("retry_later");
let meta = retry_later.new_meta(ROOT_ID);
let ctx = context_without_provider(meta.id);
let object_store = test_util::new_object_store(&dir);
@@ -741,14 +795,44 @@ mod tests {
let res = runner.execute_once(&ctx).await;
assert!(res.is_retry_later(), "{res:?}");
assert_eq!(ProcedureState::Running, meta.state());
assert!(meta.state().is_retrying());
let res = runner.execute_once(&ctx).await;
assert!(res.is_done(), "{res:?}");
assert_eq!(ProcedureState::Done, meta.state());
assert!(meta.state().is_done());
check_files(&object_store, ctx.procedure_id, &["0000000000.commit"]).await;
}
#[tokio::test]
async fn test_execute_exceed_max_retry_later() {
let exec_fn =
|_| async { Err(Error::retry_later(MockError::new(StatusCode::Unexpected))) }.boxed();
let exceed_max_retry_later = ProcedureAdapter {
data: "exceed_max_retry_later".to_string(),
lock_key: LockKey::single("catalog.schema.table"),
exec_fn,
};
let dir = create_temp_dir("exceed_max_retry_later");
let meta = exceed_max_retry_later.new_meta(ROOT_ID);
let object_store = test_util::new_object_store(&dir);
let procedure_store = ProcedureStore::from(object_store.clone());
let mut runner = new_runner(
meta.clone(),
Box::new(exceed_max_retry_later),
procedure_store,
);
runner.exponential_builder = ExponentialBuilder::default()
.with_min_delay(Duration::from_millis(1))
.with_max_times(3);
// Run the runner and execute the procedure.
runner.execute_procedure_in_loop().await;
let err = meta.state().error().unwrap().to_string();
assert!(err.contains("Procedure retry exceeded max times"));
}
#[tokio::test]
async fn test_child_error() {
let mut times = 0;
@@ -779,7 +863,8 @@ mod tests {
} else {
// Wait for subprocedures.
let state = ctx.provider.procedure_state(child_id).await.unwrap();
if state == Some(ProcedureState::Failed) {
let is_failed = state.map(|s| s.is_failed()).unwrap_or(false);
if is_failed {
// The parent procedure to abort itself if child procedure is failed.
Err(Error::from_error_ext(PlainError::new(
"subprocedure failed".to_string(),
@@ -802,7 +887,7 @@ mod tests {
exec_fn,
};
let dir = TempDir::new("child_err").unwrap();
let dir = create_temp_dir("child_err");
let meta = parent.new_meta(ROOT_ID);
let object_store = test_util::new_object_store(&dir);
@@ -811,12 +896,13 @@ mod tests {
let manager_ctx = Arc::new(ManagerContext::new());
// Manually add this procedure to the manager ctx.
assert!(manager_ctx.try_insert_procedure(meta));
assert!(manager_ctx.try_insert_procedure(meta.clone()));
// Replace the manager ctx.
runner.manager_ctx = manager_ctx;
// Run the runer and execute the procedure.
let err = runner.run().await.unwrap_err();
assert!(err.to_string().contains("subprocedure failed"), "{err}");
// Run the runner and execute the procedure.
runner.run().await;
let err = meta.state().error().unwrap().to_string();
assert!(err.contains("subprocedure failed"), "{err}");
}
}

View File

@@ -20,10 +20,10 @@ use async_trait::async_trait;
use serde::{Deserialize, Serialize};
use smallvec::{smallvec, SmallVec};
use snafu::{ResultExt, Snafu};
use tokio::sync::watch::Receiver;
use uuid::Uuid;
use crate::error::Result;
use crate::error::{Error, Result};
use crate::watcher::Watcher;
/// Procedure execution status.
#[derive(Debug)]
@@ -198,20 +198,60 @@ impl FromStr for ProcedureId {
/// Loader to recover the [Procedure] instance from serialized data.
pub type BoxedProcedureLoader = Box<dyn Fn(&str) -> Result<BoxedProcedure> + Send>;
// TODO(yingwen): Find a way to return the error message if the procedure is failed.
/// State of a submitted procedure.
#[derive(Debug, Clone, PartialEq, Eq)]
#[derive(Debug, Default, Clone)]
pub enum ProcedureState {
/// The procedure is running.
#[default]
Running,
/// The procedure is finished.
Done,
/// The procedure is failed and can be retried.
Retrying { error: Arc<Error> },
/// The procedure is failed and cannot proceed anymore.
Failed,
Failed { error: Arc<Error> },
}
/// Watcher to watch procedure state.
pub type Watcher = Receiver<ProcedureState>;
impl ProcedureState {
/// Returns a [ProcedureState] with failed state.
pub fn failed(error: Arc<Error>) -> ProcedureState {
ProcedureState::Failed { error }
}
/// Returns a [ProcedureState] with retrying state.
pub fn retrying(error: Arc<Error>) -> ProcedureState {
ProcedureState::Retrying { error }
}
/// Returns true if the procedure state is running.
pub fn is_running(&self) -> bool {
matches!(self, ProcedureState::Running)
}
/// Returns true if the procedure state is done.
pub fn is_done(&self) -> bool {
matches!(self, ProcedureState::Done)
}
/// Returns true if the procedure state failed.
pub fn is_failed(&self) -> bool {
matches!(self, ProcedureState::Failed { .. })
}
/// Returns true if the procedure state is retrying.
pub fn is_retrying(&self) -> bool {
matches!(self, ProcedureState::Retrying { .. })
}
/// Returns the error.
pub fn error(&self) -> Option<&Arc<Error>> {
match self {
ProcedureState::Failed { error } => Some(error),
ProcedureState::Retrying { error } => Some(error),
_ => None,
}
}
}
// TODO(yingwen): Shutdown
/// `ProcedureManager` executes [Procedure] submitted to it.
@@ -244,6 +284,9 @@ pub type ProcedureManagerRef = Arc<dyn ProcedureManager>;
#[cfg(test)]
mod tests {
use common_error::mock::MockError;
use common_error::prelude::StatusCode;
use super::*;
#[test]
@@ -311,4 +354,17 @@ mod tests {
let parsed = serde_json::from_str(&json).unwrap();
assert_eq!(id, parsed);
}
#[test]
fn test_procedure_state() {
assert!(ProcedureState::Running.is_running());
assert!(ProcedureState::Running.error().is_none());
assert!(ProcedureState::Done.is_done());
let state = ProcedureState::failed(Arc::new(Error::external(MockError::new(
StatusCode::Unexpected,
))));
assert!(state.is_failed());
assert!(state.error().is_some());
}
}

View File

@@ -246,9 +246,9 @@ impl ParsedKey {
#[cfg(test)]
mod tests {
use async_trait::async_trait;
use common_test_util::temp_dir::{create_temp_dir, TempDir};
use object_store::services::Fs as Builder;
use object_store::ObjectStoreBuilder;
use tempdir::TempDir;
use super::*;
use crate::{Context, LockKey, Procedure, Status};
@@ -373,7 +373,7 @@ mod tests {
#[tokio::test]
async fn test_store_procedure() {
let dir = TempDir::new("store_procedure").unwrap();
let dir = create_temp_dir("store_procedure");
let store = procedure_store_for_test(&dir);
let procedure_id = ProcedureId::random();
@@ -398,7 +398,7 @@ mod tests {
#[tokio::test]
async fn test_commit_procedure() {
let dir = TempDir::new("commit_procedure").unwrap();
let dir = create_temp_dir("commit_procedure");
let store = procedure_store_for_test(&dir);
let procedure_id = ProcedureId::random();
@@ -416,7 +416,7 @@ mod tests {
#[tokio::test]
async fn test_rollback_procedure() {
let dir = TempDir::new("rollback_procedure").unwrap();
let dir = create_temp_dir("rollback_procedure");
let store = procedure_store_for_test(&dir);
let procedure_id = ProcedureId::random();
@@ -434,7 +434,7 @@ mod tests {
#[tokio::test]
async fn test_load_messages() {
let dir = TempDir::new("load_messages").unwrap();
let dir = create_temp_dir("load_messages");
let store = procedure_store_for_test(&dir);
// store 3 steps

View File

@@ -115,15 +115,15 @@ impl StateStore for ObjectStateStore {
#[cfg(test)]
mod tests {
use common_test_util::temp_dir::create_temp_dir;
use object_store::services::Fs as Builder;
use object_store::ObjectStoreBuilder;
use tempdir::TempDir;
use super::*;
#[tokio::test]
async fn test_object_state_store() {
let dir = TempDir::new("state_store").unwrap();
let dir = create_temp_dir("state_store");
let store_dir = dir.path().to_str().unwrap();
let accessor = Builder::default().root(store_dir).build().unwrap();
let object_store = ObjectStore::new(accessor).finish();

View File

@@ -0,0 +1,41 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use snafu::ResultExt;
use tokio::sync::watch::Receiver;
use crate::error::{ProcedureExecSnafu, Result, WaitWatcherSnafu};
use crate::procedure::ProcedureState;
/// Watcher to watch procedure state.
pub type Watcher = Receiver<ProcedureState>;
/// Wait the [Watcher] until the [ProcedureState] is done.
pub async fn wait(watcher: &mut Watcher) -> Result<()> {
loop {
watcher.changed().await.context(WaitWatcherSnafu)?;
match &*watcher.borrow() {
ProcedureState::Running => (),
ProcedureState::Done => {
return Ok(());
}
ProcedureState::Failed { error } => {
return Err(error.clone()).context(ProcedureExecSnafu);
}
ProcedureState::Retrying { error } => {
return Err(error.clone()).context(ProcedureExecSnafu);
}
}
}
}

View File

@@ -239,7 +239,6 @@ impl From<BoxedError> for Error {
#[cfg(test)]
mod tests {
use datatypes::arrow::error::ArrowError;
use snafu::GenerateImplicitData;
use super::*;
@@ -286,7 +285,7 @@ mod tests {
fn test_convert_df_recordbatch_stream_error() {
let result: std::result::Result<i32, common_recordbatch::error::Error> =
Err(common_recordbatch::error::Error::PollStream {
source: ArrowError::DivideByZero,
source: DataFusionError::Internal("blabla".to_string()),
backtrace: Backtrace::generate(),
});
let error = result

View File

@@ -315,7 +315,11 @@ mod test {
.unwrap()
.build()
.unwrap();
let physical_plan = ctx.create_physical_plan(&logical_plan).await.unwrap();
let physical_plan = ctx
.state()
.create_physical_plan(&logical_plan)
.await
.unwrap();
let df_recordbatches = collect(physical_plan, Arc::new(TaskContext::from(&ctx)))
.await
.unwrap();

View File

@@ -18,9 +18,9 @@ use std::sync::Arc;
use std::task::{Context, Poll};
use datafusion::arrow::datatypes::SchemaRef as DfSchemaRef;
use datafusion::error::Result as DfResult;
use datafusion::physical_plan::RecordBatchStream as DfRecordBatchStream;
use datafusion_common::DataFusionError;
use datatypes::arrow::error::{ArrowError, Result as ArrowResult};
use datatypes::schema::{Schema, SchemaRef};
use futures::ready;
use snafu::ResultExt;
@@ -57,14 +57,14 @@ impl DfRecordBatchStream for DfRecordBatchStreamAdapter {
}
impl Stream for DfRecordBatchStreamAdapter {
type Item = ArrowResult<DfRecordBatch>;
type Item = DfResult<DfRecordBatch>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
match Pin::new(&mut self.stream).poll_next(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(Some(recordbatch)) => match recordbatch {
Ok(recordbatch) => Poll::Ready(Some(Ok(recordbatch.into_df_record_batch()))),
Err(e) => Poll::Ready(Some(Err(ArrowError::ExternalError(Box::new(e))))),
Err(e) => Poll::Ready(Some(Err(DataFusionError::External(Box::new(e))))),
},
Poll::Ready(None) => Poll::Ready(None),
}
@@ -242,12 +242,12 @@ mod test {
)]));
let batch1 = RecordBatch::new(
schema.clone(),
vec![Arc::new(Int32Vector::from_slice(&[1])) as _],
vec![Arc::new(Int32Vector::from_slice([1])) as _],
)
.unwrap();
let batch2 = RecordBatch::new(
schema.clone(),
vec![Arc::new(Int32Vector::from_slice(&[2])) as _],
vec![Arc::new(Int32Vector::from_slice([2])) as _],
)
.unwrap();

View File

@@ -55,7 +55,7 @@ pub enum Error {
#[snafu(display("Failed to poll stream, source: {}", source))]
PollStream {
source: datatypes::arrow::error::ArrowError,
source: datafusion::error::DataFusionError,
backtrace: Backtrace,
},

View File

@@ -204,7 +204,7 @@ mod tests {
);
assert!(result.is_err());
let v: VectorRef = Arc::new(Int32Vector::from_slice(&[1, 2]));
let v: VectorRef = Arc::new(Int32Vector::from_slice([1, 2]));
let expected = vec![RecordBatch::new(schema.clone(), vec![v.clone()]).unwrap()];
let r = RecordBatches::try_from_columns(schema, vec![v]).unwrap();
assert_eq!(r.take(), expected);
@@ -216,7 +216,7 @@ mod tests {
let column_b = ColumnSchema::new("b", ConcreteDataType::string_datatype(), false);
let column_c = ColumnSchema::new("c", ConcreteDataType::boolean_datatype(), false);
let va: VectorRef = Arc::new(Int32Vector::from_slice(&[1, 2]));
let va: VectorRef = Arc::new(Int32Vector::from_slice([1, 2]));
let vb: VectorRef = Arc::new(StringVector::from(vec!["hello", "world"]));
let vc: VectorRef = Arc::new(BooleanVector::from(vec![true, false]));
@@ -255,11 +255,11 @@ mod tests {
let column_b = ColumnSchema::new("b", ConcreteDataType::string_datatype(), false);
let schema = Arc::new(Schema::new(vec![column_a, column_b]));
let va1: VectorRef = Arc::new(Int32Vector::from_slice(&[1, 2]));
let va1: VectorRef = Arc::new(Int32Vector::from_slice([1, 2]));
let vb1: VectorRef = Arc::new(StringVector::from(vec!["a", "b"]));
let batch1 = RecordBatch::new(schema.clone(), vec![va1, vb1]).unwrap();
let va2: VectorRef = Arc::new(Int32Vector::from_slice(&[3, 4, 5]));
let va2: VectorRef = Arc::new(Int32Vector::from_slice([3, 4, 5]));
let vb2: VectorRef = Arc::new(StringVector::from(vec!["c", "d", "e"]));
let batch2 = RecordBatch::new(schema.clone(), vec![va2, vb2]).unwrap();

View File

@@ -189,8 +189,8 @@ mod tests {
]));
let schema = Arc::new(Schema::try_from(arrow_schema).unwrap());
let c1 = Arc::new(UInt32Vector::from_slice(&[1, 2, 3]));
let c2 = Arc::new(UInt32Vector::from_slice(&[4, 5, 6]));
let c1 = Arc::new(UInt32Vector::from_slice([1, 2, 3]));
let c2 = Arc::new(UInt32Vector::from_slice([4, 5, 6]));
let columns: Vec<VectorRef> = vec![c1, c2];
let batch = RecordBatch::new(schema.clone(), columns.clone()).unwrap();
@@ -222,7 +222,7 @@ mod tests {
let schema = Arc::new(Schema::try_new(column_schemas).unwrap());
let numbers: Vec<u32> = (0..10).collect();
let columns = vec![Arc::new(UInt32Vector::from_slice(&numbers)) as VectorRef];
let columns = vec![Arc::new(UInt32Vector::from_slice(numbers)) as VectorRef];
let batch = RecordBatch::new(schema, columns).unwrap();
let output = serde_json::to_string(&batch).unwrap();

View File

@@ -5,6 +5,8 @@ edition.workspace = true
license.workspace = true
[dependencies]
async-recursion = "1.0"
async-trait.workspace = true
bytes = "1.1"
catalog = { path = "../../catalog" }
common-catalog = { path = "../catalog" }
@@ -15,6 +17,7 @@ datafusion-expr.workspace = true
datatypes = { path = "../../datatypes" }
futures = "0.3"
prost.workspace = true
session = { path = "../../session" }
snafu.workspace = true
table = { path = "../../table" }

View File

@@ -635,8 +635,6 @@ mod utils {
Operator::Modulo => "modulo",
Operator::And => "and",
Operator::Or => "or",
Operator::Like => "like",
Operator::NotLike => "not_like",
Operator::IsDistinctFrom => "is_distinct_from",
Operator::IsNotDistinctFrom => "is_not_distinct_from",
Operator::RegexMatch => "regex_match",
@@ -649,8 +647,6 @@ mod utils {
Operator::BitwiseShiftRight => "bitwise_shift_right",
Operator::BitwiseShiftLeft => "bitwise_shift_left",
Operator::StringConcat => "string_concat",
Operator::ILike => "i_like",
Operator::NotILike => "not_i_like",
}
}

View File

@@ -14,16 +14,20 @@
use std::sync::Arc;
use async_recursion::async_recursion;
use async_trait::async_trait;
use bytes::{Buf, Bytes, BytesMut};
use catalog::table_source::DfTableSourceProvider;
use catalog::CatalogManagerRef;
use common_error::prelude::BoxedError;
use common_catalog::format_full_table_name;
use common_telemetry::debug;
use datafusion::arrow::datatypes::SchemaRef as ArrowSchemaRef;
use datafusion::common::{DFField, DFSchema};
use datafusion::common::{DFField, DFSchema, OwnedTableReference};
use datafusion::datasource::DefaultTableSource;
use datafusion::physical_plan::project_schema;
use datafusion_expr::{Filter, LogicalPlan, TableScan, TableSource};
use datafusion_expr::{Filter, LogicalPlan, TableScan};
use prost::Message;
use session::context::QueryContext;
use snafu::{ensure, OptionExt, ResultExt};
use substrait_proto::proto::expression::mask_expression::{StructItem, StructSelect};
use substrait_proto::proto::expression::MaskExpression;
@@ -37,8 +41,8 @@ use table::table::adapter::DfTableProviderAdapter;
use crate::context::ConvertorContext;
use crate::df_expr::{expression_from_df_expr, to_df_expr};
use crate::error::{
self, DFInternalSnafu, DecodeRelSnafu, EmptyPlanSnafu, EncodeRelSnafu, Error, InternalSnafu,
InvalidParametersSnafu, MissingFieldSnafu, SchemaNotMatchSnafu, TableNotFoundSnafu,
self, DFInternalSnafu, DecodeRelSnafu, EmptyPlanSnafu, EncodeRelSnafu, Error,
InvalidParametersSnafu, MissingFieldSnafu, ResolveTableSnafu, SchemaNotMatchSnafu,
UnknownPlanSnafu, UnsupportedExprSnafu, UnsupportedPlanSnafu,
};
use crate::schema::{from_schema, to_schema};
@@ -46,18 +50,19 @@ use crate::SubstraitPlan;
pub struct DFLogicalSubstraitConvertor;
#[async_trait]
impl SubstraitPlan for DFLogicalSubstraitConvertor {
type Error = Error;
type Plan = LogicalPlan;
fn decode<B: Buf + Send>(
async fn decode<B: Buf + Send>(
&self,
message: B,
catalog_manager: CatalogManagerRef,
) -> Result<Self::Plan, Self::Error> {
let plan = Plan::decode(message).context(DecodeRelSnafu)?;
self.convert_plan(plan, catalog_manager)
self.convert_plan(plan, catalog_manager).await
}
fn encode(&self, plan: Self::Plan) -> Result<Bytes, Self::Error> {
@@ -71,7 +76,7 @@ impl SubstraitPlan for DFLogicalSubstraitConvertor {
}
impl DFLogicalSubstraitConvertor {
fn convert_plan(
async fn convert_plan(
&self,
mut plan: Plan,
catalog_manager: CatalogManagerRef,
@@ -102,20 +107,25 @@ impl DFLogicalSubstraitConvertor {
.fail()?
};
self.rel_to_logical_plan(&mut ctx, Box::new(rel), catalog_manager)
// TODO(LFC): Create table provider from outside, respect "disallow_cross_schema_query" option in query engine state.
let mut table_provider =
DfTableSourceProvider::new(catalog_manager, false, &QueryContext::new());
self.rel_to_logical_plan(&mut ctx, Box::new(rel), &mut table_provider)
.await
}
fn rel_to_logical_plan(
#[async_recursion]
async fn rel_to_logical_plan(
&self,
ctx: &mut ConvertorContext,
rel: Box<Rel>,
catalog_manager: CatalogManagerRef,
table_provider: &mut DfTableSourceProvider,
) -> Result<LogicalPlan, Error> {
let rel_type = rel.rel_type.context(EmptyPlanSnafu)?;
// build logical plan
let logical_plan = match rel_type {
RelType::Read(read_rel) => self.convert_read_rel(ctx, read_rel, catalog_manager)?,
RelType::Read(read_rel) => self.convert_read_rel(ctx, read_rel, table_provider).await?,
RelType::Filter(filter) => {
let FilterRel {
common: _,
@@ -128,7 +138,7 @@ impl DFLogicalSubstraitConvertor {
field: "input",
plan: "Filter",
})?;
let input = Arc::new(self.rel_to_logical_plan(ctx, input, catalog_manager)?);
let input = Arc::new(self.rel_to_logical_plan(ctx, input, table_provider).await?);
let condition = condition.context(MissingFieldSnafu {
field: "condition",
@@ -191,11 +201,11 @@ impl DFLogicalSubstraitConvertor {
Ok(logical_plan)
}
fn convert_read_rel(
async fn convert_read_rel(
&self,
ctx: &mut ConvertorContext,
read_rel: Box<ReadRel>,
catalog_manager: CatalogManagerRef,
table_provider: &mut DfTableSourceProvider,
) -> Result<LogicalPlan, Error> {
// Extract the catalog, schema and table name from NamedTable. Assume the first three are those names.
let read_type = read_rel.read_type.context(MissingFieldSnafu {
@@ -230,17 +240,17 @@ impl DFLogicalSubstraitConvertor {
.projection
.map(|mask_expr| self.convert_mask_expression(mask_expr));
// Get table handle from catalog manager
let table_ref = catalog_manager
.table(&catalog_name, &schema_name, &table_name)
.map_err(BoxedError::new)
.context(InternalSnafu)?
.context(TableNotFoundSnafu {
name: format!("{catalog_name}.{schema_name}.{table_name}"),
let table_ref = OwnedTableReference::Full {
catalog: catalog_name.clone(),
schema: schema_name.clone(),
table: table_name.clone(),
};
let adapter = table_provider
.resolve_table(table_ref)
.await
.with_context(|_| ResolveTableSnafu {
table_name: format_full_table_name(&catalog_name, &schema_name, &table_name),
})?;
let adapter = Arc::new(DefaultTableSource::new(Arc::new(
DfTableProviderAdapter::new(table_ref),
)));
// Get schema directly from the table, and compare it with the schema retrieved from substrait proto.
let stored_schema = adapter.schema();
@@ -262,7 +272,7 @@ impl DFLogicalSubstraitConvertor {
};
// Calculate the projected schema
let qualified = &format!("{catalog_name}.{schema_name}.{table_name}");
let qualified = &format_full_table_name(&catalog_name, &schema_name, &table_name);
let projected_schema = Arc::new(
project_schema(&stored_schema, projection.as_ref())
.and_then(|x| {
@@ -281,7 +291,7 @@ impl DFLogicalSubstraitConvertor {
// TODO(ruihang): Support limit(fetch)
Ok(LogicalPlan::TableScan(TableScan {
table_name: format!("{catalog_name}.{schema_name}.{table_name}"),
table_name: qualified.to_string(),
source: adapter,
projection,
projected_schema,
@@ -314,7 +324,7 @@ impl DFLogicalSubstraitConvertor {
.fail()?,
LogicalPlan::Filter(filter) => {
let input = Some(Box::new(
self.logical_plan_to_rel(ctx, filter.input().clone())?,
self.logical_plan_to_rel(ctx, filter.input.clone())?,
));
let schema = plan
@@ -324,7 +334,7 @@ impl DFLogicalSubstraitConvertor {
.context(error::ConvertDfSchemaSnafu)?;
let condition = Some(Box::new(expression_from_df_expr(
ctx,
filter.predicate(),
&filter.predicate,
&schema,
)?));
@@ -396,7 +406,10 @@ impl DFLogicalSubstraitConvertor {
| LogicalPlan::Explain(_)
| LogicalPlan::Analyze(_)
| LogicalPlan::Extension(_)
| LogicalPlan::Prepare(_) => InvalidParametersSnafu {
| LogicalPlan::Prepare(_)
| LogicalPlan::Dml(_)
| LogicalPlan::DescribeTable(_)
| LogicalPlan::Unnest(_) => InvalidParametersSnafu {
reason: format!(
"Trying to convert DDL/DML plan to substrait proto, plan: {plan:?}",
),
@@ -524,6 +537,7 @@ mod test {
use catalog::{CatalogList, CatalogProvider, RegisterTableRequest};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use datafusion::common::{DFSchema, ToDFSchema};
use datafusion_expr::TableSource;
use datatypes::schema::RawSchema;
use table::requests::CreateTableRequest;
use table::test_util::{EmptyTable, MockTableEngine};
@@ -572,7 +586,7 @@ mod test {
let convertor = DFLogicalSubstraitConvertor;
let proto = convertor.encode(plan.clone()).unwrap();
let tripped_plan = convertor.decode(proto, catalog).unwrap();
let tripped_plan = convertor.decode(proto, catalog).await.unwrap();
assert_eq!(format!("{plan:?}"), format!("{tripped_plan:?}"));
}

View File

@@ -105,6 +105,13 @@ pub enum Error {
#[snafu(backtrace)]
source: datatypes::error::Error,
},
#[snafu(display("Unable to resolve table: {table_name}, error: {source}"))]
ResolveTable {
table_name: String,
#[snafu(backtrace)]
source: catalog::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -127,6 +134,7 @@ impl ErrorExt for Error {
| Error::SchemaNotMatch { .. } => StatusCode::InvalidArguments,
Error::DFInternal { .. } | Error::Internal { .. } => StatusCode::Internal,
Error::ConvertDfSchema { source } => source.status_code(),
Error::ResolveTable { source, .. } => source.status_code(),
}
}

View File

@@ -13,6 +13,7 @@
// limitations under the License.
#![feature(let_chains)]
#![feature(trait_upcasting)]
mod context;
mod df_expr;
@@ -21,17 +22,19 @@ pub mod error;
mod schema;
mod types;
use async_trait::async_trait;
use bytes::{Buf, Bytes};
use catalog::CatalogManagerRef;
pub use crate::df_logical::DFLogicalSubstraitConvertor;
#[async_trait]
pub trait SubstraitPlan {
type Error: std::error::Error;
type Plan;
fn decode<B: Buf + Send>(
async fn decode<B: Buf + Send>(
&self,
message: B,
catalog_manager: CatalogManagerRef,

View File

@@ -0,0 +1,8 @@
[package]
name = "common-test-util"
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
tempfile.workspace = true

View File

@@ -12,4 +12,4 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//! GreptimeDB builtin functions
pub mod temp_dir;

View File

@@ -0,0 +1,23 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub use tempfile::{NamedTempFile, TempDir};
pub fn create_temp_dir(prefix: &str) -> TempDir {
tempfile::Builder::new().prefix(prefix).tempdir().unwrap()
}
pub fn create_named_temp_file() -> NamedTempFile {
NamedTempFile::new().unwrap()
}

View File

@@ -12,4 +12,4 @@ serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
[dev-dependencies]
rand = "0.8"
rand.workspace = true

View File

@@ -1,4 +1,3 @@
#![feature(int_roundings)]
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");

Some files were not shown because too many files have changed in this diff Show More