Compare commits

...

158 Commits

Author SHA1 Message Date
LFC
b2a09c888a feat: phi accrual failure detector (#1200) 2023-03-21 11:47:47 +08:00
LFC
af101480b3 feat: add gRPC reflection service (#1208)
* feat: add gRPC reflection service

* feat: add gRPC reflection service
2023-03-21 11:23:29 +08:00
Weny Xu
b8f7f603cf test: add copy clause sqlness tests (#1198) 2023-03-21 11:22:26 +08:00
dennis zhuang
8fb97ea1d8 fix: losing region numbers after altering table (#1209) 2023-03-21 11:19:43 +08:00
discord9
21ce9c1163 docs: more explain in readme (#1195)
* docs: more explain in readme

* fix: typos

* fix: CR advices
2023-03-20 21:56:34 +08:00
Ruihang Xia
0a22375ac1 fix: nyc-taxi bench suite (#1204)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-20 21:53:01 +08:00
fys
0596d20a3b fix: can not create table in the local distributed environment (#1207)
fix: create table in local distribute env
2023-03-20 20:12:35 +08:00
Weny Xu
e19c8fa2b6 refactor: combine Copy To and Copy From (#1197)
* refactor: combine Copy To and Copy From

* Apply suggestions from code review

Co-authored-by: LFC <bayinamine@gmail.com>

* Apply suggestions from code review

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

---------

Co-authored-by: LFC <bayinamine@gmail.com>
Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
2023-03-20 19:23:25 +08:00
LFC
ad886f5b3e feat: GRPC client stream interface for insertion (#1206)
* feat: GRPC client stream interface for insertion

* feat: GRPC client stream interface for insertion
2023-03-20 18:45:37 +08:00
LFC
f6669a8201 feat: add GRPC unary call service to our GreptimeDB (#1196)
* feat: add GRPC unary call service to our GreptimeDB
2023-03-20 14:27:32 +08:00
Yingwen
ad5c47185d feat: wait flush until the flush is done (#1188)
* feat: Add wait argument to flush

* test(storage): Fix flush tests
2023-03-20 11:25:19 +08:00
zyy17
64441616db ci: refactor compile-python.sh and use the python310 to build amd64 binary (#1199) 2023-03-18 16:16:15 +08:00
zyy17
09491d6aee ci: release the standalone binaries with pyo3 and install python utils in images (#1194)
* ci: install python3 and python3-dev in CI Dockerfile

* ci: release the standalone binaries with pyo3 support for multiple platforms

* refactor: install pip and pyarrow

* refactor: specify the python version
2023-03-17 15:42:13 +08:00
Weny Xu
7cfa30b2ab feat: add shutdown for standalone and metasrv (#1174) 2023-03-17 11:35:17 +08:00
Ning Sun
a7676d8860 refactor: port div_ceil from stdlib to avoid unstable features (#1191)
* refactor: use float div&ceil to avoid unstable features

* refactor: port div_ceil from rust stdlib
2023-03-16 22:55:35 +08:00
zyy17
62e2a60b7b ci: release artifacts after binary and container is ready (#1192)
ci: release artifacts before binary and container is ready
2023-03-16 09:20:03 +00:00
zyy17
128c5cabe1 ci: disable run tests temporarily (#1187) 2023-03-16 14:12:19 +08:00
Yingwen
9a001d3392 chore(datanode): derive serde default for Wal/CompactionConfig (#1173) 2023-03-16 11:56:28 +08:00
Weny Xu
facdda4d9f feat: implement CONNECTION clause of Copy To (#1163)
* feat: implement CONNECTION clause of Copy To

* test: add tests for s3 backend

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-03-16 11:36:38 +08:00
Lei, HUANG
17eb99bc52 feat: allow manual table flush through HTTP API (#1184) 2023-03-15 20:15:34 +08:00
Xieqijun
cd8be77968 feat(procedure): Max retry time (#1095)
* feat: procedure config

* fix: modify config

* feat: add retry logic

* feat: add error

* feat: add it

* feat: add it

* feat: add it

* feat: rm retry from runner

* feat: use backon

* feat: add retry_interval

* feat: add retry_interval

* fix: conflict

* fix: cr

* feat: add retry error and id

* feat: rename

* refactor: execute

* feat: use config dir

* fix: cr

* fix: cr

* fix: fmt

* fix: fmt

* fix: pr

* fix: it

* fix: rm unless cmd params

* feat: add toml

* fix: ut

* feat: add rolling back

* fix: cr

* fix: cr

* fix: cr

* fix: ci

* fix: ci

* fix: ci

* chore: Apply suggestions from code review

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-03-15 08:28:08 +00:00
Eugene Tolbakov
b530ac9e60 chore(from_unixtime): remove UDF from_unixtime (#1179)
* chore(from_unixtime): remove UDF from_unixtime

* chore(from_unixtime): restore timestamp.rs for further usage

* chore(from_unixtime): address fmt issue
2023-03-15 16:27:09 +08:00
zyy17
76f1a79f1b ci: set 'continue-on-error' to false since the problem of compiling binary was resolved (#1182)
Signed-off-by: zyy17 <zyylsxm@gmail.com>
2023-03-15 15:41:36 +08:00
LFC
4705245d60 docs: region failover RFC (#1139)
* docs: region failover RFC

* fix: resolve PR comments
2023-03-15 15:21:58 +08:00
Zheming Li
f712f978cf feat: Report disk usage stats to metasrv thru heartbeat (#1167)
* feat: Report disk usage stats to metasrv thru heartbeat

Signed-off-by: Zheming Li <nkdudu@126.com>

* Update src/catalog/src/error.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

* Update src/catalog/src/lib.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

* Update src/mito/src/table.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

---------

Signed-off-by: Zheming Li <nkdudu@126.com>
Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>
2023-03-15 03:11:32 +00:00
discord9
cbf64e65b9 refactor: put dataframe & query into greptime module (#1172)
* feat: impl getitem for `vector`

* feat: mv `query`&`dataframe` into `greptime` for PyO3

* refactor: allow call dataframe&query

* refactor: pyo3 query&dataframe

* chore: CR advices
2023-03-15 11:01:43 +08:00
zyy17
242ce5c2aa ci: add pyo3 options for mac (#1178) 2023-03-14 13:51:58 +00:00
Ruihang Xia
e8d2e82335 fix: ambiguous column reference (#1177)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-14 13:18:43 +00:00
zyy17
0086cc2d3d fix: export 'PYO3_CROSS_LIB_DIR' when cargo build for aarch64-linux and refactor matrix opts (#1171) 2023-03-14 15:35:29 +08:00
Weny Xu
cdc111b607 refactor: make the cmd hold the application instance (#1159) 2023-03-14 15:18:50 +08:00
zyy17
81ca1d8399 refactor: add the separate GitHub Action job to push the image to the UCloud registry (#1170) 2023-03-14 11:35:18 +08:00
LFC
8d3999df5f fix: failed to run subquery wrapped in two parentheses (#1157) 2023-03-14 10:59:43 +08:00
discord9
a60788e92e fix: use correct env var (#1166)
* fix: use correct env var

* fix: move COPY up so rustup know it's nightly

* fix: add `pyo3_backend` in GHA yml

* chore: name for `TODO`

* temp: not set `pyo3_backend` before find DSO

* fix: release linux with pyo3_backend
2023-03-14 10:57:13 +08:00
Weny Xu
296c6dfcbf feat: implement table flush (#1121)
* feat: add flush method for trait

* feat: implement flush via grpc

* chore: move table_dir/region_name/region_id to table crate

* chore: Update src/mito/src/table.rs

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-03-13 20:10:37 +08:00
LFC
604c20a83d refactor: remove the SQL execution interfaces in Datanode (#1135)
* refactor: remove the SQL execution interfaces in Datanode
2023-03-13 18:45:30 +08:00
Weny Xu
c7f114c8fa feat: add shutdown for frontend (#1161) 2023-03-13 17:59:36 +08:00
Weny Xu
8a83de4ea5 feat: add shutdown for datanode (#1160) 2023-03-13 17:49:26 +08:00
discord9
3377930a50 build: add cross compile docker (#1156)
* build: add cross compile docker

* build: added compile python to github action

* fix: correct path

* fix: Python Compile

* fix: run mulitple cmds

* fix: both cross compile docker file&github action

* refactor: compile-python.sh

* chore: put wget install together

* fix: CR advices

* chore: add `-F pyo3_backend`
2023-03-13 16:56:03 +08:00
Ning Sun
85dd7e4f24 feat: implement promql query on grpc (#1134)
* feat: implement promql query on grpc

* test: resolve test errors

* test: add tests for promql grpc api

* refactor: align prom object name with proto

* chore: switch proto revision to main
2023-03-13 15:24:34 +08:00
LFC
f790fa05c1 fix: validate insert request (#1142)
* fix: validate GRPC insert request has the value when required by column schema, before actually made any change to the DB

* fix: resolve PR comments
2023-03-13 11:03:51 +08:00
Yingwen
dfd91a1bf8 chore: Bump version to 0.1.1 (#1155) 2023-03-11 01:11:23 +08:00
localhost
ded31fb069 chore: remove addr from datanode error message (#1152)
* chore: remove addr from datanode error message

* chore: add log for flight get error
2023-03-10 14:13:01 +08:00
Ning Sun
6a574fc52b chore: update script prompt (#1154) 2023-03-10 10:51:38 +08:00
Ning Sun
58bdf27068 fix: make pyo3 optional again (#1153)
* fix: make pyo3 optional again

* Update src/script/Cargo.toml

Co-authored-by: dennis zhuang <killme2008@gmail.com>

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-03-09 14:16:48 +00:00
discord9
610a895b66 feat: abi3 & abi37 (#1151) 2023-03-09 20:08:07 +08:00
zyy17
a9ccc06449 ci: modify scheduled release tag to 'v0.2.0-nightly-yymmdd' (#1149)
* ci: modify scheduled release tag to 'v0.2.0-nightly-yymmdd'

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* refactor: modify 'profile.weekly' to 'profile.nightly'

Signed-off-by: zyy17 <zyylsxm@gmail.com>

---------

Signed-off-by: zyy17 <zyylsxm@gmail.com>
2023-03-09 16:42:40 +08:00
Ruihang Xia
38fe1a2f01 chore: update dependencies (#1148)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-09 14:24:29 +08:00
fys
3414ac46b0 fix: remove unnecessary braces around block return value (#1147) 2023-03-09 03:42:04 +00:00
Lei, HUANG
757b4a87a0 fix: remove profile in mem-prof crate to suppress compiler warnings (#1146) 2023-03-09 03:39:24 +00:00
Yingwen
ba1517fceb ci: Fix step "build and push amd64" not triggered (#1145) 2023-03-09 11:35:38 +08:00
Yingwen
5b5d953d56 ci: tolerate error while building arm64 releases (#1143)
* ci: allow failure while building arm64 docker

* ci: Remove continue-on-error on docker step
2023-03-08 21:11:40 +08:00
Yingwen
3f6cbc378d ci: Disable arm64 release temporarily (#1141) 2023-03-08 19:13:00 +08:00
Yingwen
9619940569 ci: Allow error when building release for non-x86 platform (#1140) 2023-03-08 18:12:06 +08:00
Weny Xu
ed8252157a chore: code styling (#1137)
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-08 08:10:12 +00:00
Ruihang Xia
3e0fb7e75b test: ignore two test cases due to arrow-datafusion#5513 (#1138)
* test: ignore two test cases due to arrow-datafusion#5513

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-08 07:29:34 +00:00
Bohan Wu
ba3ce436df refactor(SST): UUID as id in FileMeta (#1116)
* feat(SST): use a newType named FileId for FileMeta

* chore: rename some functions

* fix: compatible for previous FileMeta format

* fix: alias for file_id when getting deserialized
2023-03-08 14:27:20 +08:00
Eugene Tolbakov
b31a6cb506 refactor: replace tempdir with tempfile (#1123)
* refactor: replace tempdir with tempfile

* refactor(query): move tempfile dependency under the workspace's Cargo.toml

* refactor(tempfile): create common-test-util

* refactor(tempfile): fix toml format

* refactor(tempfile): remove tempfile out of dependencies

* refactor(tempfile): fix incorrect toml
2023-03-08 11:15:56 +08:00
SSebo
95090592f0 feat: mysql prepare replacing sql placeholder to param (#1086)
* feat: mysql prepare by replace ? in sql to param

* chore: mysql prepare statment support time param

* chore: prepare test more types

* chore: add TODO
2023-03-08 11:02:29 +08:00
Ruihang Xia
3a527c0fd5 feat: impl proc macro range_fn and some aggr_over_time functions (#1072)
* impl range_fn proc macro

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl some aggr_over_time fn

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl present_over_time and absent_over_time

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* accomplish planner, and correct type cast

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* document the macro

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix styles

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update irate/idelta test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add test cases

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-07 23:39:45 +08:00
elijah
819b60ca13 feat(datatypes): implement VectorOp::take (#1115)
* feat: add take index method for VectorOp

* chore: make clippy happy

* chore: make clippy happy

* chore: improve the code

* chore: improve the code

* chore: add take null test

* chore: fix clippy
2023-03-07 19:27:33 +08:00
Weny Xu
7169fe2989 feat: implement Copy From (#1064) 2023-03-07 17:54:11 +08:00
Zheming Li
b70672be77 feat: track disk usage of regions (#1125)
* feat: track disk usage of regions

Signed-off-by: Zheming Li <nkdudu@126.com>

* calculate disk usage when call

* add default on file meta

---------

Signed-off-by: Zheming Li <nkdudu@126.com>
2023-03-07 17:13:12 +08:00
Lei, HUANG
a4c01f4a3a feat: memory profiling (#1124)
* feat: use jemalloc as default allocator

* feat: add feature for mem-prof

* feat: add errors

* make common-mem-prof optional dep

* fix: toml format

* doc: add profile doc

* fix: typo
2023-03-07 17:12:51 +08:00
Weny Xu
bd98a26cca chore: bump greptime-proto to latest(ad01872) (#1102) 2023-03-07 10:52:42 +08:00
shuiyisong
1b4236d698 refactor: use split instead of serde_urlencoded in http auth (#1110)
* refactor: change from urlencoded to regex

* refactor: change from urlencoded to regex

* chore: add unit test

* chore: update comment

* chore: remove local benchmark test

* chore: minor fix

* chore: remove unused dep
2023-03-07 10:51:47 +08:00
Lei, HUANG
e8cc9b4b29 test: add manifest compatibility tests (#1130)
* tests: add manifest compatibility tests

* fix: clippy
2023-03-06 19:31:54 +08:00
discord9
379f581780 test: add Integrated Test for Coprocessor& fix minor bugs (#1122)
* feat: cache `Runtime`

* fix: coprstream schema not set

* test: integrated tests for Coprocessor

* fix: UDF fixed

* style: remove unused import

* chore: remove more unused import

* feat: `filter`, (r)floordiv for Vector

* chore: CR advices

* feat: auto convert to `lit`

* chore: fix typo

* feat: from&to `pyarrow.array`

* feat: allow `pyarrow.array` as args to builtins

* chore: cargo fmt

* test: CI add `pyarrow`

* test: install Python&PyArrow in CI

* test: not cache depend for now

* chore: CR advices

* test: fix name

* style: rename
2023-03-06 19:20:59 +08:00
fys
ff6cfe8e70 refactor: move the batch_get to KvStore trait (#1029)
* move batch_get from KvStoreExt to KvStore

* add some unit tests

* add some unit test

* add some unit tests

* expose batch_get grpc method
2023-03-06 17:35:43 +08:00
Igor Morozov
5a397917c0 docs(contributingmd): add run tests commands (#1129)
* docs(contributingmd): add run tests commands

* docs(contributingmd): add link to nextest website

Co-authored-by: dennis zhuang <killme2008@gmail.com>

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-03-06 15:54:16 +08:00
fys
559880cb84 fix: can not find catalog when create table (#1118)
* fix: get catalog by name in RemoteCatalogManager

* cr

* cr

* cr

* fix: ut failed
2023-03-06 14:44:40 +08:00
Ruihang Xia
b76b27f3bf refactor: try to remove unnecessary tests in error mod (#750)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-06 12:31:30 +08:00
yuanbohan
d4e0dc3685 feat: specify prom server start addr (#1111)
* feat: specify promql server start addr

* refactor: rename promql to prom in Prometheus API server scenario
2023-03-06 11:07:21 +08:00
Eugene Tolbakov
b022556b79 fix: apply ttl and write_buffer_size options when a table is created via procedure (#1117)
* fix: apply ttl and write_buffer_size options when a table is created via procedure

* fix: address code review suggestion

* fix: use borrowing of table_options correctly
2023-03-05 19:37:23 +08:00
shuiyisong
bd065ea6e8 fix: remove incorrect continue (#1114) 2023-03-02 19:52:17 +08:00
yuanbohan
9a87f5edf8 fix(grpc): support timestamp precision (#1113) 2023-03-02 17:33:59 +08:00
Weny Xu
e851b6d019 feat: implement Copy From parser (#1092)
* feat: implement Copy From parser

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-03-02 14:03:13 +08:00
Ruihang Xia
e7b92f24e8 feat: impl EmptyMetric plan and time() function (#1100)
* impl EmptyMetric plan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add test cases

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl planner part

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* adapt new datafusion changes

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-02 03:15:55 +00:00
Igor Morozov
4b8db408cf style(contributingmd): fix markdown issues and typos (#1107)
* style(contributingmd): fix markdown issues and typos

* style(contributingmd): remove code blocks in lists
2023-03-01 20:00:36 +08:00
Yingwen
98659899c0 refactor: Move mito engine tests to a separate file (#1104)
* refactor(mito): Move tests to a separate file

* chore(query): Remove empty mod function
2023-03-01 11:46:39 +00:00
Ruihang Xia
b1311801da ci: update breaking-change labeler (#1109)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-01 19:24:21 +08:00
Yingwen
f1b65d9b77 test: fix datanode::test_read_from_config_file (#1106)
* test: Fix datanode::test_read_from_config_file

* test: frontend and metasrv don't read example toml file
2023-03-01 18:31:40 +08:00
Ruihang Xia
d5a2a26916 chore(deps): bump sqlness to v0.4 (#1101)
deps: bump sqlness to v0.4

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-01 17:27:16 +08:00
Ning Sun
8e7e68708f docs: correct readme format (#1105)
* docs: correct readme format

* ci: fix config name
2023-03-01 16:59:11 +08:00
Ruihang Xia
9c1118b06d ci: adjust title labeler's rule (#1079)
* ci: adjust title labeler's rule

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-03-01 15:16:21 +08:00
Yingwen
3fb93efbd0 docs: Document fields in the config examples (#1098)
* docs: Add comments to standalone config example

* docs: Add comments to datanode config example

* docs: Add comments to frontend config example

* docs: Add comments to meta-srv config example

* docs: Use "GB" instead of "GiB"

* docs: Add link to the selector doc

* docs: Fix grammar
2023-03-01 15:14:08 +08:00
Yingwen
3fd9c2f144 feat: Store error in procedure state (#1062)
* docs: Change comment position

* refactor(procedure): Store error in ProcedureState

* test: Mock instance with procedure enabled

* feat: Add wait method to wait for procedure

* test(datanode): Test create table by procedure

* chore: Fix clippy
2023-03-01 14:37:50 +08:00
Ning Sun
75e48c5f20 ci: fix apidoc generation 2023-03-01 14:09:47 +08:00
Ning Sun
d402f83442 ci: generate apidocs when pushing to default branch (#1093)
* ci: generate apidocs when pushing to default branch

* ci: require clippy before running tests

* fix: resolve new clippy warnings on primitive slice

* fix: resolve more clippy warnings

* Update .github/workflows/apidoc.yml

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* ci: add an index html to redirect

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-01 13:18:26 +08:00
discord9
c5c6494e0b feat: add PyO3(Hence CPython as a Optional Backend (#976)
* refactor: ffi_types

* style: fmt

* refactor: use `String` for return when possible

* todo: vector_impl

* feat: pyobj_try_typed_val

* refactor: more backend indep function

* feat: +-*/ magic methods

* refactor: copr

* style: fmt

* feat: add paired tests

* refactor: more

* refactor: move inside `python` folder

* refactor: all but test code

* feat: builtins for PyO3

* chore: add licenses

* chore: remove unused&add todos

* refactor: remove old files

* chore: mark unused

* chore: fmt

* chore: license

* feat: query in PyO3

* test: paired testcases for rspy&pyo3

* feat: PyDataFrame(Untested)

* feat: some allow_threads

* style: fmt

* style: add license

* feat: rebase manually of #962

* feat: more `allow_threads`

* chore: typo

* chore: remove some `TODO`

* test: allow margin of epsilon

* chore: code review advices

* chore: more CR adjust

* chore: more adjust

* feat: kwargs&its test

* chore: remove some `dbg!`

* chore: allow params

* fix: put `dataframe` into scope

* chore: newline

* fix: adjust after rebase

* fix: test serde skip attr

* style: taplo

* feat: add `pyo3_backend` feature

* doc: update CI&readme
2023-03-01 10:45:55 +08:00
shuiyisong
dc50095af3 fix: use catalog from connection (#1099)
* fix: using schema instead of full database

* fix: using schema instead of full database

* fix: using schema instead of full database

* chore: add debug log

* chore: remove debug log

* chore: remove debug log

* chore: fix cr
2023-03-01 10:34:57 +08:00
LFC
8cd69f441e feat: REPL issues logical plan to DB (#1097) 2023-02-28 16:59:48 +08:00
Weny Xu
f52fc9b7d4 fix: fix panic when the root is not specified (#1089) 2023-02-28 10:54:52 +08:00
shuiyisong
50d2685365 fix: fix catalog parsing issue (#1091)
fix: try fix catalog parsing issue
2023-02-27 22:51:49 +08:00
LFC
11d45e2918 refactor: upgrade DataFusion, Arrow and Sqlparser (#1074)
* refactor: upgrade DataFusion, Arrow and Sqlparser

* fix: resolve PR comments
2023-02-27 22:20:08 +08:00
shuiyisong
30287e7e41 fix: continue if parsing err catalog (#1090)
* fix: continue if parsing err catalog

* fix: change from warn to error
2023-02-27 11:28:45 +00:00
Xieqijun
0b3f955ca7 feat: Add an error variant RetryLater (#1058)
* feat: support retry error

* fix: ci

* fix: ci

* fix: fmt

* feat: add convert procedure error

* Docs : add rustdoc

* fix: cr

* fix: cr

* fix: rm unless code
2023-02-27 17:19:37 +08:00
Ning Sun
4b58a8a18d feat: update substrait and prost version (#1080) 2023-02-27 15:18:12 +08:00
Yingwen
bd377ef329 feat: Procedure to create table and register table to catalog (#1040)
* feat: Add table-procedures crate

* feat: Implement procedure to create table

* feat: Integrate procedure manager to datanode

* test: Test CreateTableProcedure

* refactor: Rename table-procedures to table-procedure

* feat: Implement create_table_by_procedure

* chore: Remove comment

* chore: Add todo

* feat: Add procedure config to standalone mode

* feat: Register table-procedure loaders

* feat: Address review comments

CreateTableProcedure just return error if the subprocedure is failed

* chore: Address CR comments
2023-02-27 11:49:23 +08:00
LFC
df751c38b4 feat: a simple REPL for debugging purpose (#1048)
* feat: a simple REPL for debugging purpose

* fix: rebase develop
2023-02-27 11:00:15 +08:00
Yingwen
f6e871708a chore: Rename MetaClientOpts to MetaClientOptions (#1075)
* fix: Serialize FrontendOptions to toml

* fix: Serialize DatanodeOptions to toml

* fix: Serialize StandaloneOptions to toml

See https://users.rust-lang.org/t/why-toml-to-string-get-error-valueaftertable/85903/2

* chore!: Rename MetaClientOpts to MetaClientOptions

BREAKING CHANGE: Change the meta_client_opts in the config file to
meta_client_options
2023-02-24 16:28:38 +08:00
fys
819c990a89 fix: thread that reports the heartbeat panics in unit test (#1078)
fix: ut panic in heartbeat report thread
2023-02-24 15:36:32 +08:00
Ruihang Xia
a8b4e8d933 ci: simplify codecov commment (#1073)
chore(ci): simplify codecov commment

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-24 15:22:57 +08:00
Yingwen
710e2ed133 ci: Use fixed skywalking-eyes revision (#1076)
The latest PR of skywalking-eyes https://github.com/apache/skywalking-eyes/pull/149
breaks our CI action
2023-02-24 07:05:18 +00:00
Ning Sun
81eab74b90 refactor: remove grpc client constructor with default catalog/schema (#1060)
* refactor: remove grpc client with default catalog/schema

* refactor: re-export consts in client module
2023-02-24 11:06:14 +08:00
Ning Sun
8f67d8ca93 fix: update mysql server library to fix tls corrupt messsage issue (#1065) 2023-02-24 10:20:44 +08:00
Ruihang Xia
4cc3ac37d5 feat: add DictionaryVector DataType (#1061)
* fix stddev and stdvar. try build range function expr

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: add dictionary data type

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* preserve timestamp column in range manipulator

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* plan range functions

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update test result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update test result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update test result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* resolve CR comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* resolve CR comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-23 20:31:07 +08:00
Lei, HUANG
b48c851b96 fix: support datetime type parsing (#1071)
* fix: support datetime type parsing

* fix: unit test
2023-02-23 20:26:47 +08:00
Xuanwo
fdd17c6eeb refactor: Clean up re-export of opendal services (#1067)
Signed-off-by: Xuanwo <github@xuanwo.io>
2023-02-23 14:12:34 +08:00
Ruihang Xia
51641db39e feat: support filter expression in PromQL (#1066)
feat: support filter expression

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-23 11:55:23 +08:00
Xuanwo
98ef74bff4 chore: Bump OpenDAL to v0.27 (#1057)
* Bump OpenDAL to v0.27

Signed-off-by: Xuanwo <github@xuanwo.io>

* Make cargo check happy

Signed-off-by: Xuanwo <github@xuanwo.io>

* Address comments

Signed-off-by: Xuanwo <github@xuanwo.io>

* Address comments

Signed-off-by: Xuanwo <github@xuanwo.io>

* Format toml

Signed-off-by: Xuanwo <github@xuanwo.io>

* Make taplo happy

Signed-off-by: Xuanwo <github@xuanwo.io>

---------

Signed-off-by: Xuanwo <github@xuanwo.io>
2023-02-23 11:20:45 +08:00
Lei, HUANG
f42acc90c2 fix: allow empty TableOptions (#1063)
fix: allow default TableOptions to avoid panic when upgrading from older versions
2023-02-22 19:19:13 +08:00
Lei, HUANG
2df8143ad5 feat: support table ttl (#1052)
* feat: purge expired sst on compaction

* chore: add more log

* fix: clippy

* fix: mark expired ssts as compacting before picking candidates

* fix: some CR comments

* fix: remove useless result

* fix: cr comments
2023-02-22 16:56:20 +08:00
shuiyisong
fb2e0c7cf3 feat: add auth to grpc handler (#1051)
* chore: get header in grpc & temp save

* chore: change authscheme to include data str

* chore: add auth to grpc flight handler

* chore: add unit test & hold for now since grpc api doesnt accept req input

* chore: minor change

* chore: minor change

* chore: add flight context to database interface

* chore: add test

* chore: update proto version & fix cr issue

* chore: add test

* chore: minor update
2023-02-22 15:20:10 +08:00
Xieqijun
390e9095f6 feat: admin http api (#1026)
* feat: catalog list

* feat: catalog list

* feat:api

* feat: leader info

* feat: use constant

* fix: ci

* feat: query heartbeat by ip

* ut: add test

* fix: cr

* fix: cr

* fix: cr
2023-02-22 14:18:37 +08:00
dennis zhuang
bcd44b90c1 feat: invoke TQL via SQL interface (#1047)
* feat: impl TQL parser in sqlparser

* feat: impl invoking TQL via SQL

* chore: remove src/sql/src/tql_parser.rs

* chore: fix typo

* test: add tql test

* chore: carry type

Co-authored-by: LFC <bayinamine@gmail.com>

* chore: cr comments

---------

Co-authored-by: LFC <bayinamine@gmail.com>
2023-02-22 11:28:09 +08:00
Yingwen
c6f2db8ae0 feat(procedure): Add procedure watcher (#1043)
* refactor: Use watch channel to store ProcedureState

* feat: Add a watcher to wait for state change

* test: test watcher on procedure failure

* feat: Only clear message cache on success

* feat: submit returns Watcher
2023-02-21 17:19:39 +08:00
Lei, HUANG
e17d5a1c41 feat: support table options (#1044)
* feat: change table options from string map to a struct, add ttl and write_buffer_size

* fix: also pass table options to table meta

* feat: pass table options when opening/creating regions

* fix: CR comments
2023-02-21 08:10:23 +00:00
Ruihang Xia
23092a5208 feat: Support unary, paren, bool keyword and nonexistent metric/label in PromQL (#1049)
* feat: don't report metric/label not found as error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: impl unary expr

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: impl paren expr

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: support bool keyword

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add some tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* ignore nonexistence labels during planning

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-21 15:24:01 +08:00
Yingwen
4bbad6ab1e ci: allow ci pass when codecov can't upload data (#1046) 2023-02-21 14:52:44 +08:00
Zhizhen He
6833b405d9 ci: upgrade spell checker to 1.13.10 (#1045)
* ci: upgrade spell checker to 1.13.10

Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com>

* fix: fix existing typos

Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com>

* chore: use taplo to format typos.toml

Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com>

* chore: add fmt-toml rule to format TOML files

Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com>

---------

Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com>
2023-02-21 10:55:27 +08:00
Yingwen
aaaf24143d feat: Procedure to create a mito engine (#1035)
* feat: wip

* feat: Implement procedure to create mito table

* feat: Add create_table_procedure to TableEngine

* feat: Impl dump and lock for CreateMitoTable

* feat: Impl CreateMitoTable::execute and register it to manager

* feat(common-procedure): pub local mod

* feat: Add simple test for MitoCreateTable

* style: Fix clippy

* refactor: Move create_table_procedure to a new trait TableEngineProcedure
2023-02-21 09:40:56 +08:00
Jiachun Feng
9161796dfa feat: export the data from a table to parquet files (#1000)
* feat: copy table parser

* feat: coopy table

* chore: minor fix

* chore: give stmt a more clearer name

* chore: unified naming

* chore: minor change

* chore: add a todo

* chore: end up with an empty file when occur an empty table

* feat: format with copy table

* feat: with options

* chore: by cr

* chore: default 5M rows per segment

* Update src/datanode/src/sql/copy_table.rs

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

* Update src/datanode/src/sql/copy_table.rs

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

* Update src/datanode/src/error.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

---------

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>
2023-02-20 16:43:50 +08:00
Ruihang Xia
68b231987c feat: improve Prometheus compliance (#1022)
* initial impl

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* minor (useless) refactor

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* retrieve metric name

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add time index column to group by columns
filter out NaN in normalize
remove NULL in instant manipulator
accept form data as HTTP params
correct API URL
accept second literal as step param

* happy clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update test result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-20 07:29:43 +00:00
Yingwen
6e9964ac97 refactor(storage): Simplify debug output of some structs (#1028)
* refactor: Simplify debug output of RegionImpl

* feat: Simplify memtable debug output
2023-02-20 14:35:30 +08:00
shuiyisong
6afd79cab8 feat: support InfluxDB auth protocol (#1034)
* chore: add http auth influxdb compat

* chore: add test

* chore: minor change

* chore: fix typo

* chore: fix cr
2023-02-20 03:26:19 +00:00
fys
4e88a01638 feat: support influxdb ping and health endpoint (#1027)
* feat: support influxdb ping and health endpoint

* add some unit tests

* ping and health api no need auth

* cr
2023-02-20 02:31:51 +00:00
Lei, HUANG
af1f8d6101 feat: file purger (#1030)
* wip

* wip

* feat: file purger

* chore: add tests

* feat: delete removed file on sst merge

* chore: move MockAccessLayer to test_util

* fix: some cr comments

* feat: add await termination for scheduler

* fix: some cr comments

* chore: rename max_file_in_level0 to max_files_in_level0
2023-02-19 14:56:41 +08:00
dennis zhuang
a9c8584c98 feat: impl insert data from query (#1025)
* feat: refactor insertion in datanode

* feat: supports inserting data by select query

* feat: impl cast operation for vector

* feat: streaming insert from select query results

* chore: minor changes

* fix: remove unwrap

* test: insert_to_requsts

* test: test_execute_insert_by_select

* fix: cast operation for vectors

* fix: test

* fix: typo

* chore: by CR comments

* fix: test_statement_to_request
2023-02-17 17:56:12 +08:00
Eugene Tolbakov
7787cfdd42 refactor(datatypes): enhance MutableVector methods (#987)
* refactor(datatypes): enhance MutableVector methods

* refactor(datatypes): address code review issues

* refactor(datatypes): address more code review issues

* refactor(datatypes): fix merge conflicts

* refactor(datatypes): address code review issues

* refactor(datatypes): address more code review issues

* refactor(datatypes): update sql delete with the newly introduced method
2023-02-17 16:16:23 +08:00
Weny Xu
2f39a77137 feat: add close method for the region trait (#970)
feat: add close for region trait
2023-02-17 11:32:55 +08:00
Lei, HUANG
16f86a9d77 refactor: separate compaction stuff from task scheduler (#1021)
* refactor: make schedule request return value generic

* feat: add handler trait

* wip

* feat: use task handler

* fix: unit test

* refactor: separate scheduler mod

* chore: rename

* chore: Request use associate type

* refactor: use associate type

* refactor: use associate type to reduce generic parameters

* chore: further remove generic types

* chore: further remove a generic parameter
2023-02-16 19:30:23 +08:00
dennis zhuang
5ec1a7027b feat: supports passing user params into coprocessor (#962)
* feat: make args in coprocessor optional

* feat: supports kwargs for coprocessor as params passed by the users

* feat: supports params for /run-script

* fix: we should rewrite the coprocessor by removing kwargs

* fix: remove println

* fix: compile error after rebasing

* fix: improve http_handler_test

* test: http scripts api with user params

* refactor: tweak all to_owned
2023-02-16 16:11:26 +08:00
Yingwen
ddbc97befb refactor: changes CreateTableRequest::schema to RawSchema (#1018)
* refactor: changes CreateTableRequest::schema to RawSchema

* refactor(grpc-expr): create_table_schema returns RawSchema
2023-02-16 16:04:17 +08:00
Ruihang Xia
a8c2b35ec6 chore: bump rust to nightly-2023-02-14 (#1019)
* chore: bump rust to nightly-2023-02-14

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* bump statrs to 0.16

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-16 13:17:57 +08:00
Yingwen
04afee216e feat(procedure): Support multi-lock keys and querying procedure state from context (#1006)
* feat: Add ContextProvider to Context

So procedures can query states of other procedures via the
ContextProvider and they don't need to hold a ProcedureManagerRef

* feat: Procedure supports acquring multiple lock keys

* test: Use multi-locks in test

* feat: Add keys_to_lock/unlock
2023-02-15 18:04:19 +08:00
LFC
5533040be7 fix: describe distribute table (#988)
* fix: describe distribute table
2023-02-15 17:48:43 +08:00
LFC
34fdba77df feat: create database if not exists (#1009) 2023-02-15 17:47:46 +08:00
Ning Sun
cd0d58cb24 fix: correct date/time type format for postgresql (#1001)
* fix: correct date/time type format for postgresql

* fix: tests for timestamp

* refactor: use Utc datetime for timestamp::to_chrono_datetime

* Update src/servers/Cargo.toml

Co-authored-by: LFC <bayinamine@gmail.com>

---------

Co-authored-by: LFC <bayinamine@gmail.com>
2023-02-15 09:40:16 +00:00
yuanbohan
8b869642b8 feat: update promql-parser to v0.1.0 (#994)
feat: update promql-parser version to v0.1.0
2023-02-15 17:23:59 +08:00
Ning Sun
a33d1e9863 ci: add cloud followup label (#1007)
ci: add cloud followup support
2023-02-15 17:17:32 +08:00
Ruihang Xia
dfe7bfb07f feat: handle PromQL HTTP API parameters (#985)
* feat: impl EvalStmt parser

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix compile errors

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update test result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add integration test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* resolve CR comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl From<PromqlQuery> for PromQuery

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* move format into with_context

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update test result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* shorthand compound error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use rfc3339 error to report float parsing error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove CompoundError

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-15 17:15:44 +08:00
Ruihang Xia
5d1f231004 fix: update planner state according to output plan (#1005)
* fix: update context according to planner phase

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* alias out qualifier

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove ignore

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-15 16:52:14 +08:00
Ning Sun
40eec85cf7 feat: add catalog name to s3 path (#1011) 2023-02-15 08:30:09 +00:00
shuiyisong
e17d564bf0 feat: add client tls option to channel manager config (#999)
* feat: add client tls to channel manager config

* chore: move test to tests folder

* chore: fix license issue

* chore: fix cr issue
2023-02-15 16:02:27 +08:00
shuiyisong
301656d568 fix: rename schema to db in http param (#1008)
chore: rename schema to db in http script handler
2023-02-15 15:59:00 +08:00
Zheming Li
a19dee1dc0 feat: duplicate error logs into separate file (#995)
Signed-off-by: Zheming Li <nkdudu@126.com>
2023-02-15 14:27:32 +08:00
Lei, HUANG
75b8afe043 feat: compaction integration (#997)
* feat: trigger compaction on flush

* chore: rebase develop

* feat: add config item max_file_in_level0 and remove compaction_after_flush

* fix: cr comments

* chore: add unit test to cover Timestamp::new_inclusive

* fix: workaround to fix future is not Sync

* fix: future is not sync

* fix: some cr comments
2023-02-15 14:14:07 +08:00
fys
e2904b99ac feat: add retry logic for MetaPeerClient (#991)
* add retry logic in meta_peer_client

* impl need_retry function

* create meta_peer_client using the builder pattern

* cr
2023-02-15 14:12:53 +08:00
Xieqijun
de0b8aa0a0 feat: Support the DELETE SQL statement (#942)
* [WIP]:delete sql

* [fix]:time parser bug

* [fix]:resolve conflict

* [fmt]:cargo fmt

* [fix]:remove unless log

* [fix]:test

* [feat]:add error parse

* [fix]:resolve conflict

* [fix]:remove unless code

* [fix]:remove unless code

* [test]:add IT

* [fix]:add license

* [fix]:ci

* [fix]:ci

* [fix]:ci

* [fix]:remove

* [fix]:ci

* [feat]:add sql

* [fix]:modify sql

* [feat]:refactor parser_expr

* [feat]:rm backtrace

* [fix]:ci

* [fix]: conversation

* [fix]: conversation

* feat:refactor delete

* feat:refactor delete

* fix:resolve conversation

* fix:ut

* fix:ut

* fix:conversation

* fix:conversation

* fix:conservation

---------

Co-authored-by: xieqijun <qijun@apache.org>
2023-02-15 13:13:17 +08:00
Xieqijun
63e396e9e9 test: add api and doc http test (#998)
* test:add api and doc test

* fix:conservation
2023-02-15 11:55:13 +08:00
Eugene Tolbakov
4d8276790b refactor(storage): remove unused FlushIo variant (#1002)
refactor(storeage): remove unused FlushIo variant
2023-02-15 11:42:05 +08:00
Lei, HUANG
374acc8830 feat: compaction reader and writer (#972)
* feat: compaction reader and writer

* feat: make ParquetWrite accept both memtable iterator and chunk reader

* feat: adapt ParquetWriter to accomodate ChunkReaderImpl

* chore: rebase develop

* wip: compile

* wip: task logic

* feat: version and manifest update

* fix: remove useless as_inner from Timestamp vectors

* feat: mark file compacting

* fix: unit test

* fix: clippy warnings

* fix: CR comment

* chore: according to cr comments, remove visit_levels from LevelMetas

* fix: some CR comments

* fix: add PlainTimestampRowFilter for correctness

* fix: cr comments

* fix: some typos
2023-02-14 17:32:00 +08:00
shuiyisong
8491f65093 refactor: remove obj_name_to_tab_ref (#989) 2023-02-14 16:33:55 +08:00
Weny Xu
5e6f340dd9 refactor: refactor execute_stream to non-async method (#980) 2023-02-14 15:41:22 +08:00
Ruihang Xia
7b98718cd9 test: Some PromQL cases about aggregator (#977)
* port some aggregator tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* find two unsupported cases

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy warnings

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* resolve CR comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix fn naming

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-14 15:36:00 +08:00
Yingwen
0f7e5a2fb2 feat: Implement LocalManager::recover (#981)
* feat: Implement LocalManager::recover

* feat: Impl From<ObjectStore> for ProcedureStore
2023-02-14 14:50:43 +08:00
LFC
9ad6c45913 test: Sqlness tests for distribute mode (#979)
* test: Sqlness tests for distribute mode

* ci

* fix: resolve PR comments

* fix: resolve PR comments
2023-02-14 10:24:09 +08:00
fys
7fe417e740 fix: an error occurred when requesting the http doc api (#984) 2023-02-13 11:17:27 +00:00
547 changed files with 30552 additions and 9642 deletions

View File

@@ -2,7 +2,7 @@
GT_S3_BUCKET=S3 bucket
GT_S3_ACCESS_KEY_ID=S3 access key id
GT_S3_ACCESS_KEY=S3 secret access key
GT_S3_ENDPOINT_URL=S3 endpoint url
# Settings for oss test
GT_OSS_BUCKET=OSS bucket
GT_OSS_ACCESS_KEY_ID=OSS access key id

View File

@@ -0,0 +1,13 @@
{
"LABEL": {
"name": "breaking change",
"color": "D93F0B"
},
"CHECKS": {
"regexp": "^(?:(?!!:).)*$",
"ignoreLabels": [
"ignore-title"
],
"alwaysPassCI": true
}
}

View File

@@ -1,10 +1,12 @@
{
"LABEL": {
"name": "Invalid PR Title",
"color": "B60205"
},
"CHECKS": {
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?:.*",
"ignoreLabels" : ["ignore-title"]
}
"LABEL": {
"name": "Invalid PR Title",
"color": "B60205"
},
"CHECKS": {
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?\\!?:.*",
"ignoreLabels": [
"ignore-title"
]
}
}

42
.github/workflows/apidoc.yml vendored Normal file
View File

@@ -0,0 +1,42 @@
on:
push:
branches:
- develop
paths-ignore:
- 'docs/**'
- 'config/**'
- '**.md'
- '.dockerignore'
- 'docker/**'
- '.gitignore'
name: Build API docs
env:
RUST_TOOLCHAIN: nightly-2023-02-26
jobs:
apidoc:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- run: cargo doc --workspace --no-deps --document-private-items
- run: |
cat <<EOF > target/doc/index.html
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="refresh" content="0; url='greptime/'" />
</head>
<body></body></html>
EOF
- name: Publish dist directory
uses: JamesIves/github-pages-deploy-action@v4
with:
folder: target/doc

View File

@@ -24,15 +24,15 @@ on:
name: CI
env:
RUST_TOOLCHAIN: nightly-2022-12-20
RUST_TOOLCHAIN: nightly-2023-02-26
jobs:
typos:
name: Spell Check with Typos
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: crate-ci/typos@v1.0.4
- uses: actions/checkout@v3
- uses: crate-ci/typos@v1.13.10
check:
name: Check
@@ -116,6 +116,7 @@ jobs:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest-8-cores
timeout-minutes: 60
needs: [clippy]
steps:
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
@@ -131,7 +132,7 @@ jobs:
ETCD_VER=v3.5.7
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
mkdir -p /tmp/etcd-download
mkdir -p /tmp/etcd-download
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
@@ -188,6 +189,7 @@ jobs:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest-8-cores
timeout-minutes: 60
needs: [clippy]
steps:
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
@@ -205,10 +207,16 @@ jobs:
uses: Swatinem/rust-cache@v2
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Install Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install PyArrow Package
run: pip install pyarrow
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
- name: Collect coverage data
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend
env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
RUST_BACKTRACE: 1
@@ -223,5 +231,5 @@ jobs:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./lcov.info
flags: rust
fail_ci_if_error: true
fail_ci_if_error: false
verbose: true

View File

@@ -1,4 +1,4 @@
name: Create Issue in docs repo on doc related changes
name: Create Issue in downstream repos
on:
issues:
@@ -23,3 +23,17 @@ jobs:
body: |
A document change request is generated from
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
cloud_issue:
if: github.event.label.name == 'cloud followup required'
runs-on: ubuntu-latest
steps:
- name: create an issue in cloud repo
uses: dacbd/create-issue-action@main
with:
owner: GreptimeTeam
repo: greptimedb-cloud
token: ${{ secrets.DOCS_REPO_TOKEN }}
title: Followup changes in ${{ github.event.issue.title || github.event.pull_request.title }}
body: |
A followup request is generated from
${{ github.event.issue.html_url || github.event.pull_request.html_url }}

View File

@@ -13,4 +13,4 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: Check License Header
uses: apache/skywalking-eyes/header@main
uses: apache/skywalking-eyes/header@df70871af1a8109c9a5b1dc824faaf65246c5236

View File

@@ -18,3 +18,12 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
pass_on_octokit_error: false
configuration_path: ".github/pr-title-checker-config.json"
breaking:
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- uses: thehanimo/pr-title-checker@v1.3.4
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
pass_on_octokit_error: false
configuration_path: ".github/pr-title-breaking-change-label-config.json"

View File

@@ -10,15 +10,16 @@ on:
name: Release
env:
RUST_TOOLCHAIN: nightly-2022-12-20
RUST_TOOLCHAIN: nightly-2023-02-26
# FIXME(zyy17): Would be better to use `gh release list -L 1 | cut -f 3` to get the latest release version tag, but for a long time, we will stay at 'v0.1.0-alpha-*'.
SCHEDULED_BUILD_VERSION_PREFIX: v0.1.0-alpha
SCHEDULED_BUILD_VERSION_PREFIX: v0.2.0
# In the future, we can change SCHEDULED_PERIOD to nightly.
SCHEDULED_PERIOD: weekly
SCHEDULED_PERIOD: nightly
CARGO_PROFILE: weekly
CARGO_PROFILE: nightly
## FIXME(zyy17): Enable it after the tests are stabled.
DISABLE_RUN_TESTS: true
jobs:
build:
@@ -30,16 +31,41 @@ jobs:
- arch: x86_64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-amd64
continue-on-error: false
- arch: aarch64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-arm64
continue-on-error: false
- arch: aarch64-apple-darwin
os: macos-latest
file: greptime-darwin-arm64
continue-on-error: false
- arch: x86_64-apple-darwin
os: macos-latest
file: greptime-darwin-amd64
continue-on-error: false
- arch: x86_64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-amd64-pyo3
continue-on-error: false
opts: "-F pyo3_backend"
- arch: aarch64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-arm64-pyo3
continue-on-error: false
opts: "-F pyo3_backend"
- arch: aarch64-apple-darwin
os: macos-latest
file: greptime-darwin-arm64-pyo3
continue-on-error: false
opts: "-F pyo3_backend"
- arch: x86_64-apple-darwin
os: macos-latest
file: greptime-darwin-amd64-pyo3
continue-on-error: false
opts: "-F pyo3_backend"
runs-on: ${{ matrix.os }}
continue-on-error: ${{ matrix.continue-on-error }}
if: github.repository == 'GreptimeTeam/greptimedb'
steps:
- name: Checkout sources
@@ -76,10 +102,10 @@ jobs:
ETCD_VER=v3.5.7
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
mkdir -p /tmp/etcd-download
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
mkdir -p /tmp/etcd-download
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
nohup etcd >/tmp/etcd.log 2>&1 &
@@ -93,7 +119,14 @@ jobs:
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
run: |
sudo apt-get -y update
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu wget
# FIXME(zyy17): Should we specify the version of python when building binary for darwin?
- name: Compile Python 3.10.10 from source for linux
if: contains(matrix.arch, 'linux') && contains(matrix.opts, 'pyo3_backend')
run: |
sudo chmod +x ./docker/aarch64/compile-python.sh
sudo ./docker/aarch64/compile-python.sh ${{ matrix.arch }}
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@master
@@ -105,10 +138,55 @@ jobs:
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
- name: Run tests
if: env.DISABLE_RUN_TESTS == 'false'
run: make unit-test integration-test sqlness-test
- name: Run cargo build with pyo3 for aarch64-linux
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
run: |
# TODO(zyy17): We should make PYO3_CROSS_LIB_DIR configurable.
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
export PYO3_CROSS_LIB_DIR=${PWD}/python-3.10.10/aarch64
echo "PYO3_CROSS_LIB_DIR: $PYO3_CROSS_LIB_DIR"
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Run cargo build with pyo3 for amd64-linux
if: contains(matrix.arch, 'x86_64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
run: |
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
echo "implementation=CPython" >> pyo3.config
echo "version=3.10" >> pyo3.config
echo "implementation=CPython" >> pyo3.config
echo "shared=true" >> pyo3.config
echo "abi3=true" >> pyo3.config
echo "lib_name=python3.10" >> pyo3.config
echo "lib_dir=$PYTHON_INSTALL_PATH_AMD64/lib" >> pyo3.config
echo "executable=$PYTHON_INSTALL_PATH_AMD64/bin/python3" >> pyo3.config
echo "pointer_width=64" >> pyo3.config
echo "build_flags=" >> pyo3.config
echo "suppress_build_script_link_lines=false" >> pyo3.config
cat pyo3.config
export PYO3_CONFIG_FILE=${PWD}/pyo3.config
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Run cargo build
run: cargo build ${{ matrix.opts }} --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }}
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Calculate checksum and rename binary
shell: bash
@@ -129,48 +207,6 @@ jobs:
with:
name: ${{ matrix.file }}.sha256sum
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
release:
name: Release artifacts
needs: [build]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb'
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Download artifacts
uses: actions/download-artifact@v3
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}, like v0.1.0-alpha-20221119-weekly.
shell: bash
if: github.event_name == 'schedule'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
echo "SCHEDULED_BUILD_VERSION=${SCHEDULED_BUILD_VERSION}" >> $GITHUB_ENV
- name: Create scheduled build git tag
if: github.event_name == 'schedule'
run: |
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
- name: Publish scheduled release # configure the different release title and tags.
uses: softprops/action-gh-release@v1
if: github.event_name == 'schedule'
with:
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
tag_name: ${{ env.SCHEDULED_BUILD_VERSION }}
generate_release_notes: true
files: |
**/greptime-*
- name: Publish release
uses: softprops/action-gh-release@v1
if: github.event_name != 'schedule'
with:
name: "Release ${{ github.ref_name }}"
files: |
**/greptime-*
docker:
name: Build docker image
@@ -181,37 +217,6 @@ jobs:
- name: Checkout sources
uses: actions/checkout@v3
- name: Download amd64 binary
uses: actions/download-artifact@v3
with:
name: greptime-linux-amd64
path: amd64
- name: Unzip the amd64 artifacts
run: |
cd amd64
tar xvf greptime-linux-amd64.tgz
rm greptime-linux-amd64.tgz
- name: Download arm64 binary
uses: actions/download-artifact@v3
with:
name: greptime-linux-arm64
path: arm64
- name: Unzip the arm64 artifacts
run: |
cd arm64
tar xvf greptime-linux-arm64.tgz
rm greptime-linux-arm64.tgz
- name: Login to UCloud Container Registry
uses: docker/login-action@v2
with:
registry: uhub.service.ucloud.cn
username: ${{ secrets.UCLOUD_USERNAME }}
password: ${{ secrets.UCLOUD_PASSWORD }}
- name: Login to Dockerhub
uses: docker/login-action@v2
with:
@@ -239,8 +244,36 @@ jobs:
- name: Set up buildx
uses: docker/setup-buildx-action@v2
- name: Build and push
- name: Download amd64 binary
uses: actions/download-artifact@v3
with:
name: greptime-linux-amd64-pyo3
path: amd64
- name: Unzip the amd64 artifacts
run: |
cd amd64
tar xvf greptime-linux-amd64-pyo3.tgz
rm greptime-linux-amd64-pyo3.tgz
- name: Download arm64 binary
id: download-arm64
uses: actions/download-artifact@v3
with:
name: greptime-linux-arm64-pyo3
path: arm64
- name: Unzip the arm64 artifacts
id: unzip-arm64
if: success() || steps.download-arm64.conclusion == 'success'
run: |
cd arm64
tar xvf greptime-linux-arm64-pyo3.tgz
rm greptime-linux-arm64-pyo3.tgz
- name: Build and push all
uses: docker/build-push-action@v3
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
with:
context: .
file: ./docker/ci/Dockerfile
@@ -249,5 +282,105 @@ jobs:
tags: |
greptime/greptimedb:latest
greptime/greptimedb:${{ env.IMAGE_TAG }}
uhub.service.ucloud.cn/greptime/greptimedb:latest
uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }}
- name: Build and push amd64 only
uses: docker/build-push-action@v3
if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
with:
context: .
file: ./docker/ci/Dockerfile
push: true
platforms: linux/amd64
tags: |
greptime/greptimedb:latest
greptime/greptimedb:${{ env.IMAGE_TAG }}
release:
name: Release artifacts
# Release artifacts only when all the artifacts are built successfully.
needs: [build,docker]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb'
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Download artifacts
uses: actions/download-artifact@v3
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-${SCHEDULED_PERIOD}-YYYYMMDD, like v0.2.0-nigthly-20230313.
shell: bash
if: github.event_name == 'schedule'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-${{ env.SCHEDULED_PERIOD }}-$buildTime
echo "SCHEDULED_BUILD_VERSION=${SCHEDULED_BUILD_VERSION}" >> $GITHUB_ENV
- name: Create scheduled build git tag
if: github.event_name == 'schedule'
run: |
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
- name: Publish scheduled release # configure the different release title and tags.
uses: softprops/action-gh-release@v1
if: github.event_name == 'schedule'
with:
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
tag_name: ${{ env.SCHEDULED_BUILD_VERSION }}
generate_release_notes: true
files: |
**/greptime-*
- name: Publish release
uses: softprops/action-gh-release@v1
if: github.event_name != 'schedule'
with:
name: "Release ${{ github.ref_name }}"
files: |
**/greptime-*
docker-push-uhub:
name: Push docker image to UCloud Container Registry
needs: [docker]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb'
# Push to uhub may fail(500 error), but we don't want to block the release process. The failed job will be retried manually.
continue-on-error: true
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to UCloud Container Registry
uses: docker/login-action@v2
with:
registry: uhub.service.ucloud.cn
username: ${{ secrets.UCLOUD_USERNAME }}
password: ${{ secrets.UCLOUD_PASSWORD }}
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
if: github.event_name == 'schedule'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash
if: github.event_name != 'schedule'
run: |
VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
- name: Push image to uhub # Use 'docker buildx imagetools create' to create a new image base on source image.
run: |
docker buildx imagetools create \
--tag uhub.service.ucloud.cn/greptime/greptimedb:latest \
--tag uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }} \
greptime/greptimedb:${{ env.IMAGE_TAG }}

View File

@@ -1,4 +1,4 @@
# Welcome!
# Welcome 👋
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
@@ -50,34 +50,33 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
- Make sure all unit tests are passed.
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr`).
#### `pre-commit` Hooks
You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run these checks on every commit automatically.
1. Install `pre-commit`
```
$ pip install pre-commit
```
or
```
$ brew install pre-commit
```
pip install pre-commit
or
brew install pre-commit
2. Install the `pre-commit` hooks
```
$ pre-commit install
pre-commit installed at .git/hooks/pre-commit
$ pre-commit install --hook-type commit-msg
pre-commit installed at .git/hooks/commit-msg
$ pre-commit install
pre-commit installed at .git/hooks/pre-commit
$ pre-commit install --hook-type pre-push
pre-commit installed at .git/hooks/pre-pus
```
$ pre-commit install --hook-type commit-msg
pre-commit installed at .git/hooks/commit-msg
now `pre-commit` will run automatically on `git commit`.
$ pre-commit install --hook-type pre-push
pre-commit installed at .git/hooks/pre-push
Now, `pre-commit` will run automatically on `git commit`.
### Title
@@ -102,10 +101,12 @@ of what you were trying to do and what went wrong. You can also reach for help i
## Community
The core team will be thrilled if you participate in any way you like. When you are stuck, try ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
- [GreptimeDB Community Slack](https://greptime.com/slack)
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
Also, see some extra GreptimeDB content:
- [GreptimeDB Docs](https://greptime.com/docs)
- [Learn GreptimeDB](https://greptime.com/products/db)
- [Greptime Inc. Website](https://greptime.com)

1908
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -7,17 +7,20 @@ members = [
"src/cmd",
"src/common/base",
"src/common/catalog",
"src/common/datasource",
"src/common/error",
"src/common/function",
"src/common/function-macro",
"src/common/grpc",
"src/common/grpc-expr",
"src/common/mem-prof",
"src/common/procedure",
"src/common/query",
"src/common/recordbatch",
"src/common/runtime",
"src/common/substrait",
"src/common/telemetry",
"src/common/test-util",
"src/common/time",
"src/datanode",
"src/datatypes",
@@ -37,47 +40,50 @@ members = [
"src/storage",
"src/store-api",
"src/table",
"src/table-procedure",
"tests-integration",
"tests/runner",
]
[workspace.package]
version = "0.1.0"
version = "0.1.1"
edition = "2021"
license = "Apache-2.0"
[workspace.dependencies]
arrow = "29.0"
arrow-flight = "29.0"
arrow-schema = { version = "29.0", features = ["serde"] }
arrow = { version = "34.0" }
arrow-array = "34.0"
arrow-flight = "34.0"
arrow-schema = { version = "34.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
chrono = { version = "0.4", features = ["serde"] }
# TODO(LFC): Use released Datafusion when it officially dpendent on Arrow 29.0
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
futures = "0.3"
futures-util = "0.3"
parquet = "29.0"
parquet = "34.0"
paste = "1.0"
prost = "0.11"
rand = "0.8"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
sqlparser = "0.28"
sqlparser = "0.32"
tempfile = "3"
tokio = { version = "1.24.2", features = ["full"] }
tokio-util = "0.7"
tonic = "0.8"
tonic = { version = "0.8", features = ["tls"] }
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
[profile.release]
debug = true
[profile.weekly]
[profile.nightly]
inherits = "release"
strip = true
lto = "thin"

View File

@@ -19,6 +19,10 @@ clean: ## Clean the project.
fmt: ## Format all the Rust code.
cargo fmt --all
.PHONY: fmt-toml
fmt-toml: ## Format all TOML files.
taplo format --check --option "indent_string= "
.PHONY: docker-image
docker-image: ## Build docker image.
docker build --network host -f docker/Dockerfile -t ${IMAGE_REGISTRY}:${IMAGE_TAG} .

View File

@@ -61,6 +61,12 @@ To compile GreptimeDB from source, you'll need:
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
keyword. You can check it with `protoc --version`.
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
scripting engine(In CPython Mode). This is available as `python3-dev` on
ubuntu, you can install it with `sudo apt install python3-dev`, or
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
#### Build with Docker
@@ -141,9 +147,9 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
### Installation
- [Pre-built Binaries](https://github.com/GreptimeTeam/greptimedb/releases):
downloadable pre-built binaries for Linux and MacOS
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb): pre-built
Docker images
For Linux and macOS, you can easily download pre-built binaries that are ready to use. In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version. We recommend using virtualenv for the installation process to manage multiple Python versions.
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
Kubernetes deployment

View File

@@ -21,13 +21,13 @@ use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::time::Instant;
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampNanosecondArray};
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
use arrow::datatypes::{DataType, Float64Type, Int64Type};
use arrow::record_batch::RecordBatch;
use clap::Parser;
use client::api::v1::column::Values;
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, TableId};
use client::{Client, Database};
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest};
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
use tokio::task::JoinSet;
@@ -61,7 +61,7 @@ struct Args {
#[arg(long = "skip-read")]
skip_read: bool,
#[arg(short, long, default_value_t = String::from("127.0.0.1:3001"))]
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
endpoint: String,
}
@@ -97,6 +97,9 @@ async fn write_data(
for record_batch in record_batch_reader {
let record_batch = record_batch.unwrap();
if !is_record_batch_full(&record_batch) {
continue;
}
let (columns, row_count) = convert_record_batch(record_batch);
let request = InsertRequest {
table_name: TABLE_NAME.to_string(),
@@ -122,11 +125,16 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
let mut columns = vec![];
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
let values = build_values(array);
let (values, datatype) = build_values(array);
let column = Column {
column_name: field.name().to_owned(),
values: Some(values),
null_mask: vec![],
null_mask: array
.data()
.null_bitmap()
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
.unwrap_or_default(),
datatype: datatype.into(),
// datatype and semantic_type are set to default
..Default::default()
};
@@ -136,7 +144,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
(columns, row_count as _)
}
fn build_values(column: &ArrayRef) -> Values {
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
match column.data_type() {
DataType::Int64 => {
let array = column
@@ -144,10 +152,13 @@ fn build_values(column: &ArrayRef) -> Values {
.downcast_ref::<PrimitiveArray<Int64Type>>()
.unwrap();
let values = array.values();
Values {
i64_values: values.to_vec(),
..Default::default()
}
(
Values {
i64_values: values.to_vec(),
..Default::default()
},
ColumnDataType::Int64,
)
}
DataType::Float64 => {
let array = column
@@ -155,29 +166,38 @@ fn build_values(column: &ArrayRef) -> Values {
.downcast_ref::<PrimitiveArray<Float64Type>>()
.unwrap();
let values = array.values();
Values {
f64_values: values.to_vec(),
..Default::default()
}
(
Values {
f64_values: values.to_vec(),
..Default::default()
},
ColumnDataType::Float64,
)
}
DataType::Timestamp(_, _) => {
let array = column
.as_any()
.downcast_ref::<TimestampNanosecondArray>()
.downcast_ref::<TimestampMicrosecondArray>()
.unwrap();
let values = array.values();
Values {
i64_values: values.to_vec(),
..Default::default()
}
(
Values {
i64_values: values.to_vec(),
..Default::default()
},
ColumnDataType::Int64,
)
}
DataType::Utf8 => {
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
Values {
string_values: values,
..Default::default()
}
(
Values {
string_values: values,
..Default::default()
},
ColumnDataType::String,
)
}
DataType::Null
| DataType::Boolean
@@ -208,10 +228,15 @@ fn build_values(column: &ArrayRef) -> Values {
| DataType::Dictionary(_, _)
| DataType::Decimal128(_, _)
| DataType::Decimal256(_, _)
| DataType::RunEndEncoded(_, _)
| DataType::Map(_, _) => todo!(),
}
}
fn is_record_batch_full(batch: &RecordBatch) -> bool {
batch.columns().iter().all(|col| col.null_count() == 0)
}
fn create_table_expr() -> CreateTableExpr {
CreateTableExpr {
catalog_name: CATALOG_NAME.to_string(),
@@ -339,7 +364,7 @@ fn create_table_expr() -> CreateTableExpr {
create_if_not_exists: false,
table_options: Default::default(),
region_ids: vec![0],
table_id: Some(TableId { id: 0 }),
table_id: None,
}
}
@@ -422,7 +447,7 @@ fn main() {
.unwrap()
.block_on(async {
let client = Client::with_urls(vec![&args.endpoint]);
let db = Database::with_client(client);
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
if !args.skip_write {
do_write(&args, &db).await;

View File

@@ -8,3 +8,5 @@ coverage:
ignore:
- "**/error*.rs" # ignore all error.rs files
- "tests/runner/*.rs" # ignore integration test runner
comment: # this is a top-level key
layout: "diff"

View File

@@ -1,26 +1,50 @@
node_id = 42
mode = 'distributed'
rpc_addr = '127.0.0.1:3001'
rpc_hostname = '127.0.0.1'
rpc_runtime_size = 8
mysql_addr = '127.0.0.1:4406'
mysql_runtime_size = 4
# Node running mode, see `standalone.example.toml`.
mode = "distributed"
# Whether to use in-memory catalog, see `standalone.example.toml`.
enable_memory_catalog = false
# The datanode identifier, should be unique.
node_id = 42
# gRPC server address, "127.0.0.1:3001" by default.
rpc_addr = "127.0.0.1:3001"
# Hostname of this node.
rpc_hostname = "127.0.0.1"
# The number of gRPC server worker threads, 8 by default.
rpc_runtime_size = 8
# Metasrv client options.
[meta_client_options]
# Metasrv address list.
metasrv_addrs = ["127.0.0.1:3002"]
# Operation timeout in milliseconds, 3000 by default.
timeout_millis = 3000
# Connect server timeout in milliseconds, 5000 by default.
connect_timeout_millis = 5000
# `TCP_NODELAY` option for accepted connections, true by default.
tcp_nodelay = true
# WAL options, see `standalone.example.toml`.
[wal]
dir = "/tmp/greptimedb/wal"
file_size = '1GB'
purge_interval = '10m'
purge_threshold = '50GB'
file_size = "1GB"
purge_threshold = "50GB"
purge_interval = "10m"
read_batch_size = 128
sync_write = false
# Storage options, see `standalone.example.toml`.
[storage]
type = 'File'
data_dir = '/tmp/greptimedb/data/'
type = "File"
data_dir = "/tmp/greptimedb/data/"
[meta_client_opts]
metasrv_addrs = ['127.0.0.1:3002']
timeout_millis = 3000
connect_timeout_millis = 5000
tcp_nodelay = false
# Compaction options, see `standalone.example.toml`.
[compaction]
max_inflight_tasks = 4
max_files_in_level0 = 8
max_purge_tasks = 32
# Procedure storage options, see `standalone.example.toml`.
# [procedure.store]
# type = "File"
# data_dir = "/tmp/greptimedb/procedure/"
# max_retry_times = 3
# retry_delay = "500ms"

View File

@@ -1,12 +1,58 @@
mode = 'distributed'
datanode_rpc_addr = '127.0.0.1:3001'
# Node running mode, see `standalone.example.toml`.
mode = "distributed"
# HTTP server options, see `standalone.example.toml`.
[http_options]
addr = '127.0.0.1:4000'
addr = "127.0.0.1:4000"
timeout = "30s"
[meta_client_opts]
metasrv_addrs = ['127.0.0.1:3002']
# gRPC server options, see `standalone.example.toml`.
[grpc_options]
addr = "127.0.0.1:4001"
runtime_size = 8
# MySQL server options, see `standalone.example.toml`.
[mysql_options]
addr = "127.0.0.1:4002"
runtime_size = 2
# MySQL server TLS options, see `standalone.example.toml`.
[mysql_options.tls]
mode = "disable"
cert_path = ""
key_path = ""
# PostgresSQL server options, see `standalone.example.toml`.
[postgres_options]
addr = "127.0.0.1:4003"
runtime_size = 2
# PostgresSQL server TLS options, see `standalone.example.toml`.
[postgres_options.tls]
mode = "disable"
cert_path = ""
key_path = ""
# OpenTSDB protocol options, see `standalone.example.toml`.
[opentsdb_options]
addr = "127.0.0.1:4242"
runtime_size = 2
# InfluxDB protocol options, see `standalone.example.toml`.
[influxdb_options]
enable = true
# Prometheus protocol options, see `standalone.example.toml`.
[prometheus_options]
enable = true
# Prometheus protocol options, see `standalone.example.toml`.
[prom_options]
addr = "127.0.0.1:4004"
# Metasrv client options, see `datanode.example.toml`.
[meta_client_options]
metasrv_addrs = ["127.0.0.1:3002"]
timeout_millis = 3000
connect_timeout_millis = 5000
tcp_nodelay = false
tcp_nodelay = true

View File

@@ -1,6 +1,15 @@
bind_addr = '127.0.0.1:3002'
server_addr = '127.0.0.1:3002'
store_addr = '127.0.0.1:2379'
# The bind address of metasrv, "127.0.0.1:3002" by default.
bind_addr = "127.0.0.1:3002"
# The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
server_addr = "127.0.0.1:3002"
# Etcd server address, "127.0.0.1:2379" by default.
store_addr = "127.0.0.1:2379"
# Datanode lease in seconds, 15 seconds by default.
datanode_lease_secs = 15
# selector: 'LeaseBased', 'LoadBased'
selector = 'LeaseBased'
# Datanode selector type.
# - "LeaseBased" (default value).
# - "LoadBased"
# For details, please see "https://docs.greptime.com/developer-guide/meta/selector".
selector = "LeaseBased"
# Store data in memory, false by default.
use_memory_store = false

View File

@@ -1,44 +1,120 @@
node_id = 0
mode = 'standalone'
# Node running mode, "standalone" or "distributed".
mode = "standalone"
# Whether to use in-memory catalog, `false` by default.
enable_memory_catalog = false
# HTTP server options.
[http_options]
addr = '127.0.0.1:4000'
# Server address, "127.0.0.1:4000" by default.
addr = "127.0.0.1:4000"
# HTTP request timeout, 30s by default.
timeout = "30s"
[wal]
dir = "/tmp/greptimedb/wal"
file_size = '1GB'
purge_interval = '10m'
purge_threshold = '50GB'
read_batch_size = 128
sync_write = false
[storage]
type = 'File'
data_dir = '/tmp/greptimedb/data/'
# gRPC server options.
[grpc_options]
addr = '127.0.0.1:4001'
# Server address, "127.0.0.1:4001" by default.
addr = "127.0.0.1:4001"
# The number of server worker threads, 8 by default.
runtime_size = 8
# MySQL server options.
[mysql_options]
addr = '127.0.0.1:4002'
# Server address, "127.0.0.1:4002" by default.
addr = "127.0.0.1:4002"
# The number of server worker threads, 2 by default.
runtime_size = 2
[influxdb_options]
enable = true
[opentsdb_options]
addr = '127.0.0.1:4242'
enable = true
runtime_size = 2
[prometheus_options]
enable = true
# MySQL server TLS options.
[mysql_options.tls]
# TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
# - "disable" (default value)
# - "prefer"
# - "require"
# - "verify-ca"
# - "verify-full"
mode = "disable"
# Certificate file path.
cert_path = ""
# Private key file path.
key_path = ""
# PostgresSQL server options.
[postgres_options]
addr = '127.0.0.1:4003'
# Server address, "127.0.0.1:4003" by default.
addr = "127.0.0.1:4003"
# The number of server worker threads, 2 by default.
runtime_size = 2
check_pwd = false
# PostgresSQL server TLS options, see `[mysql_options.tls]` section.
[postgres_options.tls]
# TLS mode.
mode = "disable"
# certificate file path.
cert_path = ""
# private key file path.
key_path = ""
# OpenTSDB protocol options.
[opentsdb_options]
# OpenTSDB telnet API server address, "127.0.0.1:4242" by default.
addr = "127.0.0.1:4242"
# The number of server worker threads, 2 by default.
runtime_size = 2
# InfluxDB protocol options.
[influxdb_options]
# Whether to enable InfluxDB protocol in HTTP API, true by default.
enable = true
# Prometheus protocol options.
[prometheus_options]
# Whether to enable Prometheus remote write and read in HTTP API, true by default.
enable = true
# Prom protocol options.
[prom_options]
# Prometheus API server address, "127.0.0.1:4004" by default.
addr = "127.0.0.1:4004"
# WAL options.
[wal]
# WAL data directory.
dir = "/tmp/greptimedb/wal"
# WAL file size in bytes.
file_size = "1GB"
# WAL purge threshold in bytes.
purge_threshold = "50GB"
# WAL purge interval in seconds.
purge_interval = "10m"
# WAL read batch size.
read_batch_size = 128
# Whether to sync log file after every write.
sync_write = false
# Storage options.
[storage]
# Storage type.
type = "File"
# Data directory, "/tmp/greptimedb/data" by default.
data_dir = "/tmp/greptimedb/data/"
# Compaction options.
[compaction]
# Max task number that can concurrently run.
max_inflight_tasks = 4
# Max files in level 0 to trigger compaction.
max_files_in_level0 = 8
# Max task number for SST purge task after compaction.
max_purge_tasks = 32
# Procedure storage options.
# Uncomment to enable.
# [procedure.store]
# # Storage type.
# type = "File"
# # Procedure data path.
# data_dir = "/tmp/greptimedb/procedure/"
# # Procedure max retry time.
# max_retry_times = 3
# # Initial retry delay of procedures, increases exponentially
# retry_delay = "500ms"

View File

@@ -9,7 +9,10 @@ RUN apt-get update && apt-get install -y \
protobuf-compiler \
curl \
build-essential \
pkg-config
pkg-config \
python3 \
python3-dev \
&& pip install pyarrow
# Install Rust.
SHELL ["/bin/bash", "-c"]

57
docker/aarch64/Dockerfile Normal file
View File

@@ -0,0 +1,57 @@
FROM ubuntu:22.04 as builder
ENV LANG en_US.utf8
WORKDIR /greptimedb
# Install dependencies.
RUN apt-get update && apt-get install -y \
libssl-dev \
protobuf-compiler \
curl \
build-essential \
pkg-config \
wget
# Install Rust.
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /root/.cargo/bin/:$PATH
# Install cross platform toolchain
RUN apt-get -y update && \
apt-get -y install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu && \
apt-get install binutils-aarch64-linux-gnu
COPY ./docker/aarch64/compile-python.sh ./docker/aarch64/
RUN chmod +x ./docker/aarch64/compile-python.sh && \
./docker/aarch64/compile-python.sh
COPY ./rust-toolchain.toml .
# Install rustup target for cross compiling.
RUN rustup target add aarch64-unknown-linux-gnu
COPY . .
# Update dependency, using separate `RUN` to separate cache
RUN cargo fetch
# This three env var is set in script, so I set it manually in dockerfile.
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
ENV LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
ENV PY_INSTALL_PATH=/greptimedb/python_arm64_build
# Set the environment variable for cross compiling and compile it
# cross compiled python is `python3` in path, but pyo3 need `python` in path so alias it
# Build the project in release mode.
RUN export PYO3_CROSS_LIB_DIR=$PY_INSTALL_PATH/lib && \
alias python=python3 && \
cargo build --target aarch64-unknown-linux-gnu --release -F pyo3_backend
# Exporting the binary to the clean image
FROM ubuntu:22.04 as base
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
WORKDIR /greptime
COPY --from=builder /greptimedb/target/aarch64-unknown-linux-gnu/release/greptime /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENTRYPOINT ["greptime"]

View File

@@ -0,0 +1,87 @@
#!/usr/bin/env bash
set -e
# this script will download Python source code, compile it, and install it to /usr/local/lib
# then use this python to compile cross-compiled python for aarch64
ARCH=$1
PYTHON_VERSION=3.10.10
PYTHON_SOURCE_DIR=Python-${PYTHON_VERSION}
PYTHON_INSTALL_PATH_AMD64=${PWD}/python-${PYTHON_VERSION}/amd64
PYTHON_INSTALL_PATH_AARCH64=${PWD}/python-${PYTHON_VERSION}/aarch64
function download_python_source_code() {
wget https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tgz
tar -xvf Python-$PYTHON_VERSION.tgz
}
function compile_for_amd64_platform() {
mkdir -p "$PYTHON_INSTALL_PATH_AMD64"
echo "Compiling for amd64 platform..."
./configure \
--prefix="$PYTHON_INSTALL_PATH_AMD64" \
--enable-shared \
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
ac_cv_have_long_long_format=yes \
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
make
make install
}
# explain Python compile options here a bit:s
# --enable-shared: enable building a shared Python library (default is no) but we do need it for calling from rust
# CC, CXX, AR, LD, RANLIB: set the compiler, archiver, linker, and ranlib programs to use
# build: the machine you are building on, host: the machine you will run the compiled program on
# --with-system-ffi: build _ctypes module using an installed ffi library, see Doc/library/ctypes.rst, not used in here TODO: could remove
# ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes:
# allow cross-compiled python to have -pthread set for CXX, see https://github.com/python/cpython/pull/22525
# ac_cv_have_long_long_format=yes: target platform supports long long type
# disable-ipv6: disable ipv6 support, we don't need it in here
# ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no: disable pty support, we don't need it in here
function compile_for_aarch64_platform() {
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
mkdir -p "$PYTHON_INSTALL_PATH_AARCH64"
echo "Compiling for aarch64 platform..."
echo "LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
echo "LIBRARY_PATH: $LIBRARY_PATH"
echo "PATH: $PATH"
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
--prefix="$PYTHON_INSTALL_PATH_AARCH64" --enable-optimizations \
CC=aarch64-linux-gnu-gcc \
CXX=aarch64-linux-gnu-g++ \
AR=aarch64-linux-gnu-ar \
LD=aarch64-linux-gnu-ld \
RANLIB=aarch64-linux-gnu-ranlib \
--enable-shared \
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
ac_cv_have_long_long_format=yes \
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
make
make altinstall
}
# Main script starts here.
download_python_source_code
# Enter the python source code directory.
cd $PYTHON_SOURCE_DIR || exit 1
# Build local python first, then build cross-compiled python.
compile_for_amd64_platform
# Clean the build directory.
make clean && make distclean
# Cross compile python for aarch64.
if [ "$ARCH" = "aarch64-unknown-linux-gnu" ]; then
compile_for_aarch64_platform
fi

View File

@@ -1,6 +1,12 @@
FROM ubuntu:22.04
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
ca-certificates \
python3.10 \
python3.10-dev \
python3-pip
RUN python3 -m pip install pyarrow
ARG TARGETARCH

View File

@@ -149,10 +149,10 @@ inputs:
- title: 'Series Normalize: \noffset = 0'
operator: prom
inputs:
- title: 'Filter: \ntimetamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
- title: 'Filter: \ntimestamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
operator: filter
inputs:
- title: 'Table Scan: \ntable = request_duration, timetamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
- title: 'Table Scan: \ntable = request_duration, timestamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
operator: scan -->
![example](example.png)

View File

@@ -0,0 +1,196 @@
---
Feature Name: "Fault Tolerance for Region"
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/1126
Date: 2023-03-08
Author: "Luo Fucong <luofucong@greptime.com>"
---
Fault Tolerance for Region
----------------------
# Summary
This RFC proposes a method to achieve fault tolerance for regions in GreptimeDB's distributed mode. Or, put it in another way, achieving region high availability("HA") for GreptimeDB cluster.
In this RFC, we mainly describe two aspects of region HA: how region availability is detected, and what recovery process is need to be taken. We also discuss some alternatives and future work.
When this feature is done, our users could expect a GreptimeDB cluster that can always handle their requests to regions, despite some requests may failed during the region failover. The optimization to reduce the MTTR(Mean Time To Recovery) is not a concern of this RPC, and is left for future work.
# Motivation
Fault tolerance for regions is a critical feature for our clients to use the GreptimeDB cluster confidently. High availability for users to interact with their stored data is a "must have" for any TSDB products, that include our GreptimeDB cluster.
# Details
## Background
Some backgrounds about region in distributed mode:
- A table is logically split into multiple regions. Each region stores a part of non-overlapping table data.
- Regions are distributed in Datanodes, the mappings are not static, are assigned and governed by Metasrv.
- In distributed mode, client requests are scoped in regions. To be more specific, when a request that needs to scan multiple regions arrived in Frontend, Frontend splits the request into multiple sub-requests, each of which scans one region only, and submits them to Datanodes that hold corresponding regions.
In conclusion, as long as regions remain available, and regions could regain availability when failures do occur, the overall region HA could be achieved. With this in mind, let's see how region failures are detected first.
## Failure Detection
We detect region failures in Metasrv, and do it both passively and actively. Passively means that Metasrv do not fire some "are you healthy" requests to regions. Instead, we carry region healthy information in the heartbeat requests that are submit to Metasrv by Datanodes.
Datanode already carries its regions stats in the heartbeat request (the non-relevant fields are omitted):
```protobuf
message HeartbeatRequest {
...
// Region stats on this node
repeated RegionStat region_stats = 6;
...
}
message RegionStat {
uint64 region_id = 1;
TableName table_name = 2;
...
}
```
For the sake of simplicity, we don't add another field `bool available = 3` to the `RegionStat` message; instead, if the region were unavailable in the view of the Datanode that contains it, the Datanode just not includes the `RegionStat` of it in the heartbeat request. Or, if the Datanode itself is not unavailable, the heartbeat request is not submitted, effectively the same with not carrying the `RegionStat`.
> The heartbeat interval is now hardcoded to five seconds.
Metasrv gathers the heartbeat requests, extracts the `RegionStat`s, and treat them as region heartbeat. In this way, Metasrv maintains all regions healthy information. If some region's heartbeats were not received in a period of time, Metasrv speculates the region might be unavailable. To make the decision whether a region is failed or not, Metasrv uses a failure detection algorithm called the "[Phi φ Accrual Failure Detection](https://medium.com/@arpitbhayani/phi-%CF%86-accrual-failure-detection-79c21ce53a7a)". Basically, the algorithm calculates a value called "phi" to represent the possibility of a region's unavailability, based on the historical heartbeats' arrived rate. Once the "phi" is above some pre-defined threshold, Metasrv knows the region is failed.
> This algorithm has been widely adopted in some well known products, like Akka and Cassandra.
When Metasrv decides some region is failed from heartbeats, it's not the final decision. Here comes the "actively" detection. Before Metasrv decides to do region failover, it actively invokes the healthy check interface of the Datanode that the failure region resides. Only this healthy check is failed does Metasrv actually start doing failover upon the region.
To conclude, the failure detection pseudo-codes are like this:
```rust
// in Metasrv:
fn failure_detection() {
loop {
// passive detection
let failed_regions = all_regions.iter().filter(|r| r.estimated_failure_possibility() > config.phi).collect();
// find the datanodes that contains the failed regions
let datanodes_and_regions = find_region_resides_datanodes(failed_regions);
// active detection
for (datanode, regions) in datanodes_and_regions {
if !datanode.is_healthy(regions) {
do_failover(datanode, regions);
}
}
sleep(config.detect_interval);
}
}
```
Some design considerations:
- Why active detecting while we have passively detection? Because it could be happened that the network is singly connectable sometimes (especially in the complex Cloud environment), then the Datanode's heartbeats cannot reach Metasrv, while Metasrv could request Datanode. Active detecting avoid this false positive situation.
- Why the detection works on region instead of Datanode? Because we might face the possibility that only part of the regions in the Datanode are not available, not ALL regions. Especially the situation that Datanodes are used by multiple tenants. If this is the case, it's better to do failover upon the designated regions instead of the whole regions that reside on the Datanode. All in all, we want a more subtle control over region failover.
So we detect some regions are not available. How to regain the availability back?
## Region Failover
Region Failover largely relies on remote WAL, aka "[Bunshin](https://github.com/GreptimeTeam/bunshin)". I'm not including any of the details of it in this RFC, let's just assume we already have it.
In general, region failover is fairly simple. Once Metasrv decides to do failover upon some regions, it first chooses one or more Datanodes to hold the failed region. This can be done easily, as the Metasrv already has the whole picture of Datanodes: it knows which Datanode has the minimum regions, what Datanode historically had the lowest CPU usage and IO rate, and how the Datanodes are assigned to tenants, among other information that can all help the Metasrv choose the most suitable Datanodes. Let's call these chosen Datanodes as "candidates".
> The strategy to choose the most suitable candidates required careful design, but it's another RFC.
Then, Metasrv sets the states of these failed regions as "passive". We should add a field to `Region`:
```protobuf
message Region {
uint64 id = 1;
string name = 2;
Partition partition = 3;
message State {
Active,
Passive,
}
State state = 4;
map<string, string> attrs = 100;
}
```
Here `Region` is used in message `RegionRoute`, which indicates how the write request is split among regions. When a region is set as "passive", Frontend knows the write to it should be rejected at the moment (the region read is not blocked, however).
> Making a region "passive" here is effectively blocking the write to it. It's ok in the failover situation, the region is failed anyway. However, when dealing with active maintenance operations, region state requires more refined design. But that's another story.
Third, Metasrv fires the "close region" requests to the failed Datanodes, and fires the "open region" requests to those candidates. "Close region" requests might be failed due to the unavailability of Datanodes, but that's fine, it's just a best-effort attempt to reduce the chance of any in-flight writes got handled unintentionally after the region is set as "passive". The "open region" requests must have succeeded though. Datanodes open regions from remote WAL.
> Currently the "close region" is undefined in Datanode. It could be a local cache clean up of region data or other resources tidy up.
Finally, when a candidate successfully opens its region, it calls back to Metasrv, indicating it is ready to handle region. "call back" here is backed by its heartbeat to Metasrv. Metasrv updates the region's state to "active", so as to let Frontend lifts the restrictions of region writes (again, the read part of region is untouched).
All the above steps should be managed by remote procedure framework. It's another implementation challenge in the region failover feature. (One is the remote WAL of course.)
A picture is worth a 1000 words:
```text
+-------------------------+
| Metasrv detects region |
| failure |
+-------------------------+
|
v
+----------------------------+
| Metasrv chooses candidates |
| to hold failed regions |
+----------------------------+
|
v
+-------------------------+ +-------------------------+
| Metasrv "passive" the |------>| Frontend rejects writes |
| failed regions | | to "passive" regions |
+-------------------------+ +-------------------------+
|
v
+--------------------------+ +---------------------------+
| Candidate Datanodes open |<-------| Metasrv fires "close" and |
| regions from remote WAL | | "open" region requests |
+--------------------------+ +---------------------------+
|
|
| +-------------------------+ +-------------------------+
+--------------------->| Metasrv "active" the |------>| Frontend lifts write |
| failed regions | | restriction to regions |
+-------------------------+ +-------------------------+
|
v
+-------------------------+
| Region failover done, |
| HA regain |
+-------------------------+
```
# Alternatives
## The "Neon" Way
Remote WAL raises a problem that could harm the write throughput of GreptimeDB cluster: each write request has to do at least two remote call, one is from Frontend to Datanode, and one is from Datanode to remote WAL. What if we do it the "[Neon](https://github.com/neondatabase/neon)" way, making remote WAL sits in between the Frontend and Datanode, couldn't that improve our write throughput? It could, though there're some consistency issues like "read-your-writes" to solve.
However, the main concerns we don't adopt this method are two-fold:
1. Remote WAL is planned to be quorum based, it can be efficiently written;
2. More importantly, we are planning to make the remote WAL an option that users could choose not to enable it (at the cost of some reliability reduction).
## No WAL, Replication instead
This method replicates region across Datanodes directly, like the common way in shared-nothing database. Were the main region failed, a standby region in the replicate group is elected as new "main" and take the read/write requests. The main concern to this method is the incompatibility to our current architecture and code structure. It requires a major redesign, but gains no significant advantage over the remote WAL method.
However, the replication does have its own advantage that we can learn from to optimize this failover procedure.
# Future Work
Some optimizations we could take:
- To reduce the MTTR, we could make Metasrv chooses the candidate to each region at normal time. The candidate does some preparation works to reduce the open region time, effectively accelerate the failover procedure.
- We can adopt the replication method, to the degree that region replicas are used as the fast catch-up candidates. The data difference among replicas is minor, region failover does not need to load or exchange too much data, greatly reduced the region failover time.

View File

@@ -1,2 +1,2 @@
[toolchain]
channel = "nightly-2022-12-20"
channel = "nightly-2023-02-26"

View File

@@ -59,5 +59,5 @@ if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
fi
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run '${BIN} --help' to get started"
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run './${BIN} --help' to get started"
fi

View File

@@ -10,7 +10,7 @@ common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "3e6349be127b65a8b42a38cda9d527ec423ca77d" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "eb760d219206c77dd3a105ecb6a3ba97d9d650ec" }
prost.workspace = true
snafu = { version = "0.7", features = ["backtraces"] }
tonic.workspace = true

View File

@@ -97,7 +97,9 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
TimestampType::Microsecond(_) => ColumnDataType::TimestampMicrosecond,
TimestampType::Nanosecond(_) => ColumnDataType::TimestampNanosecond,
},
ConcreteDataType::Null(_) | ConcreteDataType::List(_) => {
ConcreteDataType::Null(_)
| ConcreteDataType::List(_)
| ConcreteDataType::Dictionary(_) => {
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
}
});
@@ -194,7 +196,7 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
null_mask.reserve_exact(origin_count + len);
null_mask.extend(BitVec::repeat(false, len));
(0..len).into_iter().for_each(|idx| match vector.get(idx) {
(0..len).for_each(|idx| match vector.get(idx) {
Value::Null => null_mask.set(idx + origin_count, true),
Value::Boolean(val) => values.bool_values.push(val),
Value::UInt8(val) => values.u8_values.push(val.into()),

View File

@@ -18,25 +18,28 @@ common-recordbatch = { path = "../common/recordbatch" }
common-runtime = { path = "../common/runtime" }
common-telemetry = { path = "../common/telemetry" }
common-time = { path = "../common/time" }
dashmap = "5.4"
datafusion.workspace = true
datatypes = { path = "../datatypes" }
futures = "0.3"
futures-util.workspace = true
lazy_static = "1.4"
meta-client = { path = "../meta-client" }
parking_lot = "0.12"
regex = "1.6"
serde = "1.0"
serde_json = "1.0"
session = { path = "../session" }
snafu = { version = "0.7", features = ["backtraces"] }
storage = { path = "../storage" }
table = { path = "../table" }
tokio.workspace = true
[dev-dependencies]
common-test-util = { path = "../common/test-util" }
chrono.workspace = true
log-store = { path = "../log-store" }
mito = { path = "../mito", features = ["test"] }
object-store = { path = "../object-store" }
storage = { path = "../storage" }
tempdir = "0.3"
tokio.workspace = true

View File

@@ -19,7 +19,6 @@ use common_error::ext::{BoxedError, ErrorExt};
use common_error::prelude::{Snafu, StatusCode};
use datafusion::error::DataFusionError;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::RawSchema;
use snafu::{Backtrace, ErrorCompat};
use crate::DeregisterTableRequest;
@@ -162,19 +161,6 @@ pub enum Error {
source: table::error::Error,
},
#[snafu(display(
"Invalid table schema in catalog entry, table:{}, schema: {:?}, source: {}",
table_info,
schema,
source
))]
InvalidTableSchema {
table_info: String,
schema: RawSchema,
#[snafu(backtrace)]
source: datatypes::error::Error,
},
#[snafu(display("Failure during SchemaProvider operation, source: {}", source))]
SchemaProviderOperation {
#[snafu(backtrace)]
@@ -215,6 +201,24 @@ pub enum Error {
#[snafu(backtrace)]
source: common_catalog::error::Error,
},
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
QueryAccessDenied { catalog: String, schema: String },
#[snafu(display(
"Failed to get region stats, catalog: {}, schema: {}, table: {}, source: {}",
catalog,
schema,
table,
source
))]
RegionStats {
catalog: String,
schema: String,
table: String,
#[snafu(backtrace)]
source: table::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -249,18 +253,19 @@ impl ErrorExt for Error {
| Error::InsertCatalogRecord { source, .. }
| Error::OpenTable { source, .. }
| Error::CreateTable { source, .. }
| Error::DeregisterTable { source, .. } => source.status_code(),
| Error::DeregisterTable { source, .. }
| Error::RegionStats { source, .. } => source.status_code(),
Error::MetaSrv { source, .. } => source.status_code(),
Error::SystemCatalogTableScan { source } => source.status_code(),
Error::SystemCatalogTableScanExec { source } => source.status_code(),
Error::InvalidTableSchema { source, .. }
| Error::InvalidTableInfoInCatalog { source } => source.status_code(),
Error::InvalidTableInfoInCatalog { source } => source.status_code(),
Error::SchemaProviderOperation { source } | Error::Internal { source } => {
source.status_code()
}
Error::Unimplemented { .. } => StatusCode::Unsupported,
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
}
}

View File

@@ -24,10 +24,10 @@ use serde::{Deserialize, Serialize, Serializer};
use snafu::{ensure, OptionExt, ResultExt};
use table::metadata::{RawTableInfo, TableId, TableVersion};
const CATALOG_KEY_PREFIX: &str = "__c";
const SCHEMA_KEY_PREFIX: &str = "__s";
const TABLE_GLOBAL_KEY_PREFIX: &str = "__tg";
const TABLE_REGIONAL_KEY_PREFIX: &str = "__tr";
pub const CATALOG_KEY_PREFIX: &str = "__c";
pub const SCHEMA_KEY_PREFIX: &str = "__s";
pub const TABLE_GLOBAL_KEY_PREFIX: &str = "__tg";
pub const TABLE_REGIONAL_KEY_PREFIX: &str = "__tr";
const ALPHANUMERICS_NAME_PATTERN: &str = "[a-zA-Z_][a-zA-Z0-9_]*";
@@ -370,4 +370,10 @@ mod tests {
let deserialized = TableGlobalValue::parse(serialized).unwrap();
assert_eq!(value, deserialized);
}
#[test]
fn test_table_global_value_compatibility() {
let s = r#"{"node_id":1,"regions_id_map":{"1":[0]},"table_info":{"ident":{"table_id":1098,"version":1},"name":"container_cpu_limit","desc":"Created on insertion","catalog_name":"greptime","schema_name":"dd","meta":{"schema":{"column_schemas":[{"name":"container_id","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"container_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"docker_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"host","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_tag","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"interval","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"runtime","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"short_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"type","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"dd_value","data_type":{"Float64":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"ts","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":false,"is_time_index":true,"default_constraint":null,"metadata":{"greptime:time_index":"true"}},{"name":"git.repository_url","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}}],"timestamp_index":11,"version":1},"primary_key_indices":[0,1,2,3,4,5,6,7,8,9,12],"value_indices":[10,11],"engine":"mito","next_column_id":12,"region_numbers":[],"engine_options":{},"options":{},"created_on":"1970-01-01T00:00:00Z"},"table_type":"Base"}}"#;
TableGlobalValue::parse(s).unwrap();
}
}

View File

@@ -18,7 +18,8 @@ use std::any::Any;
use std::fmt::{Debug, Formatter};
use std::sync::Arc;
use common_telemetry::info;
use api::v1::meta::{RegionStat, TableName};
use common_telemetry::{info, warn};
use snafu::{OptionExt, ResultExt};
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::TableId;
@@ -34,6 +35,7 @@ pub mod local;
pub mod remote;
pub mod schema;
pub mod system;
pub mod table_source;
pub mod tables;
/// Represent a list of named catalogs
@@ -107,7 +109,12 @@ pub trait CatalogManager: CatalogList {
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>>;
/// Returns the table by catalog, schema and table name.
fn table(&self, catalog: &str, schema: &str, table_name: &str) -> Result<Option<TableRef>>;
async fn table(
&self,
catalog: &str,
schema: &str,
table_name: &str,
) -> Result<Option<TableRef>>;
}
pub type CatalogManagerRef = Arc<dyn CatalogManager>;
@@ -186,7 +193,8 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
let table_name = &req.create_table_request.table_name;
let table_id = req.create_table_request.id;
let table = if let Some(table) = manager.table(catalog_name, schema_name, table_name)? {
let table = manager.table(catalog_name, schema_name, table_name).await?;
let table = if let Some(table) = table {
table
} else {
let table = engine
@@ -218,9 +226,11 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
Ok(())
}
/// The number of regions in the datanode node.
pub fn region_number(catalog_manager: &CatalogManagerRef) -> Result<u64> {
/// The stat of regions in the datanode node.
/// The number of regions can be got from len of vec.
pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> Result<(u64, Vec<RegionStat>)> {
let mut region_number: u64 = 0;
let mut region_stats = Vec::new();
for catalog_name in catalog_manager.catalog_names()? {
let catalog =
@@ -239,16 +249,39 @@ pub fn region_number(catalog_manager: &CatalogManagerRef) -> Result<u64> {
})?;
for table_name in schema.table_names()? {
let table = schema
.table(&table_name)?
.context(error::TableNotFoundSnafu {
table_info: &table_name,
})?;
let table =
schema
.table(&table_name)
.await?
.context(error::TableNotFoundSnafu {
table_info: &table_name,
})?;
let region_numbers = &table.table_info().meta.region_numbers;
region_number += region_numbers.len() as u64;
match table.region_stats() {
Ok(stats) => {
let stats = stats.into_iter().map(|stat| RegionStat {
region_id: stat.region_id,
table_name: Some(TableName {
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
}),
approximate_bytes: stat.disk_usage_bytes as i64,
..Default::default()
});
region_stats.extend(stats);
}
Err(e) => {
warn!("Failed to get region status, err: {:?}", e);
}
};
}
}
}
Ok(region_number)
Ok((region_number, region_stats))
}

View File

@@ -345,7 +345,7 @@ impl CatalogManager for LocalCatalogManager {
{
let _lock = self.register_lock.lock().await;
if let Some(existing) = schema.table(&request.table_name)? {
if let Some(existing) = schema.table(&request.table_name).await? {
if existing.table_info().ident.table_id != request.table_id {
error!(
"Unexpected table register request: {:?}, existing: {:?}",
@@ -434,9 +434,10 @@ impl CatalogManager for LocalCatalogManager {
} = &request;
let table_id = self
.catalogs
.table(catalog, schema, table_name)?
.table(catalog, schema, table_name)
.await?
.with_context(|| error::TableNotExistSnafu {
table: format!("{catalog}.{schema}.{table_name}"),
table: format_full_table_name(catalog, schema, table_name),
})?
.table_info()
.ident
@@ -505,7 +506,7 @@ impl CatalogManager for LocalCatalogManager {
.schema(schema)
}
fn table(
async fn table(
&self,
catalog_name: &str,
schema_name: &str,
@@ -521,7 +522,7 @@ impl CatalogManager for LocalCatalogManager {
catalog: catalog_name,
schema: schema_name,
})?;
schema.table(table_name)
schema.table(table_name).await
}
}

View File

@@ -18,6 +18,7 @@ use std::collections::HashMap;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::{Arc, RwLock};
use async_trait::async_trait;
use common_catalog::consts::MIN_USER_TABLE_ID;
use common_telemetry::error;
use snafu::{ensure, OptionExt};
@@ -155,16 +156,20 @@ impl CatalogManager for MemoryCatalogManager {
}
}
fn table(&self, catalog: &str, schema: &str, table_name: &str) -> Result<Option<TableRef>> {
let c = self.catalogs.read().unwrap();
let catalog = if let Some(c) = c.get(catalog) {
async fn table(
&self,
catalog: &str,
schema: &str,
table_name: &str,
) -> Result<Option<TableRef>> {
let catalog = {
let c = self.catalogs.read().unwrap();
let Some(c) = c.get(catalog) else { return Ok(None) };
c.clone()
} else {
return Ok(None);
};
match catalog.schema(schema)? {
None => Ok(None),
Some(s) => s.table(table_name),
Some(s) => s.table(table_name).await,
}
}
}
@@ -283,6 +288,7 @@ impl Default for MemorySchemaProvider {
}
}
#[async_trait]
impl SchemaProvider for MemorySchemaProvider {
fn as_any(&self) -> &dyn Any {
self
@@ -293,7 +299,7 @@ impl SchemaProvider for MemorySchemaProvider {
Ok(tables.keys().cloned().collect())
}
fn table(&self, name: &str) -> Result<Option<TableRef>> {
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
let tables = self.tables.read().unwrap();
Ok(tables.get(name).cloned())
}
@@ -355,8 +361,8 @@ mod tests {
use super::*;
#[test]
fn test_new_memory_catalog_list() {
#[tokio::test]
async fn test_new_memory_catalog_list() {
let catalog_list = new_memory_catalog_list().unwrap();
let default_catalog = catalog_list.catalog(DEFAULT_CATALOG_NAME).unwrap().unwrap();
@@ -369,9 +375,9 @@ mod tests {
.register_table("numbers".to_string(), Arc::new(NumbersTable::default()))
.unwrap();
let table = default_schema.table("numbers").unwrap();
let table = default_schema.table("numbers").await.unwrap();
assert!(table.is_some());
assert!(default_schema.table("not_exists").unwrap().is_none());
assert!(default_schema.table("not_exists").await.unwrap().is_none());
}
#[tokio::test]
@@ -419,7 +425,7 @@ mod tests {
// test new table name exists
assert!(provider.table_exist(new_table_name).unwrap());
let registered_table = provider.table(new_table_name).unwrap().unwrap();
let registered_table = provider.table(new_table_name).await.unwrap().unwrap();
assert_eq!(
registered_table.table_info().ident.table_id,
test_table.table_info().ident.table_id
@@ -468,6 +474,7 @@ mod tests {
let registered_table = catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
.await
.unwrap()
.unwrap();
assert_eq!(registered_table.table_info().ident.table_id, table_id);

View File

@@ -13,16 +13,19 @@
// limitations under the License.
use std::any::Any;
use std::collections::HashMap;
use std::collections::{HashMap, HashSet};
use std::pin::Pin;
use std::sync::Arc;
use arc_swap::ArcSwap;
use async_stream::stream;
use async_trait::async_trait;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
use common_telemetry::{debug, info};
use common_telemetry::{debug, error, info};
use dashmap::DashMap;
use futures::Stream;
use futures_util::StreamExt;
use parking_lot::RwLock;
use snafu::{OptionExt, ResultExt};
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::TableId;
@@ -32,12 +35,13 @@ use table::TableRef;
use tokio::sync::Mutex;
use crate::error::{
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, InvalidTableSchemaSnafu,
OpenTableSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, UnimplementedSnafu,
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, OpenTableSnafu, Result,
SchemaNotFoundSnafu, TableExistsSnafu, UnimplementedSnafu,
};
use crate::helper::{
build_catalog_prefix, build_schema_prefix, build_table_global_prefix, CatalogKey, CatalogValue,
SchemaKey, SchemaValue, TableGlobalKey, TableGlobalValue, TableRegionalKey, TableRegionalValue,
CATALOG_KEY_PREFIX,
};
use crate::remote::{Kv, KvBackendRef};
use crate::{
@@ -50,10 +54,9 @@ use crate::{
pub struct RemoteCatalogManager {
node_id: u64,
backend: KvBackendRef,
catalogs: Arc<ArcSwap<HashMap<String, CatalogProviderRef>>>,
catalogs: Arc<RwLock<DashMap<String, CatalogProviderRef>>>,
engine: TableEngineRef,
system_table_requests: Mutex<Vec<RegisterSystemTableRequest>>,
mutex: Arc<Mutex<()>>,
}
impl RemoteCatalogManager {
@@ -64,7 +67,6 @@ impl RemoteCatalogManager {
backend,
catalogs: Default::default(),
system_table_requests: Default::default(),
mutex: Default::default(),
}
}
@@ -108,9 +110,13 @@ impl RemoteCatalogManager {
debug!("Ignoring non-catalog key: {}", String::from_utf8_lossy(&k));
continue;
}
let key = CatalogKey::parse(&String::from_utf8_lossy(&k))
.context(InvalidCatalogValueSnafu)?;
yield Ok(key)
let catalog_key = String::from_utf8_lossy(&k);
if let Ok(key) = CatalogKey::parse(&catalog_key) {
yield Ok(key)
} else {
error!("Invalid catalog key: {:?}", catalog_key);
}
}
}))
}
@@ -346,21 +352,13 @@ impl RemoteCatalogManager {
);
let meta = &table_info.meta;
let schema = meta
.schema
.clone()
.try_into()
.context(InvalidTableSchemaSnafu {
table_info: format!("{catalog_name}.{schema_name}.{table_name}"),
schema: meta.schema.clone(),
})?;
let req = CreateTableRequest {
id: table_id,
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
desc: None,
schema: Arc::new(schema),
schema: meta.schema.clone(),
region_numbers: region_numbers.clone(),
primary_key_indices: meta.primary_key_indices.clone(),
create_if_not_exists: true,
@@ -389,7 +387,14 @@ impl CatalogManager for RemoteCatalogManager {
"Initialized catalogs: {:?}",
catalogs.keys().cloned().collect::<Vec<_>>()
);
self.catalogs.store(Arc::new(catalogs));
{
let self_catalogs = self.catalogs.read();
catalogs.into_iter().for_each(|(k, v)| {
self_catalogs.insert(k, v);
});
}
info!("Max table id allocated: {}", max_table_id);
let mut system_table_requests = self.system_table_requests.lock().await;
@@ -476,7 +481,7 @@ impl CatalogManager for RemoteCatalogManager {
.schema(schema)
}
fn table(
async fn table(
&self,
catalog_name: &str,
schema_name: &str,
@@ -491,7 +496,7 @@ impl CatalogManager for RemoteCatalogManager {
catalog: catalog_name,
schema: schema_name,
})?;
schema.table(table_name)
schema.table(table_name).await
}
}
@@ -507,12 +512,10 @@ impl CatalogList for RemoteCatalogManager {
) -> Result<Option<CatalogProviderRef>> {
let key = self.build_catalog_key(&name).to_string();
let backend = self.backend.clone();
let mutex = self.mutex.clone();
let catalogs = self.catalogs.clone();
std::thread::spawn(|| {
common_runtime::block_on_write(async move {
let _guard = mutex.lock().await;
backend
.set(
key.as_bytes(),
@@ -521,11 +524,10 @@ impl CatalogList for RemoteCatalogManager {
.context(InvalidCatalogValueSnafu)?,
)
.await?;
let prev_catalogs = catalogs.load();
let mut new_catalogs = HashMap::with_capacity(prev_catalogs.len() + 1);
new_catalogs.clone_from(&prev_catalogs);
let prev = new_catalogs.insert(name, catalog);
catalogs.store(Arc::new(new_catalogs));
let catalogs = catalogs.read();
let prev = catalogs.insert(name, catalog.clone());
Ok(prev)
})
})
@@ -535,12 +537,65 @@ impl CatalogList for RemoteCatalogManager {
/// List all catalogs from metasrv
fn catalog_names(&self) -> Result<Vec<String>> {
Ok(self.catalogs.load().keys().cloned().collect::<Vec<_>>())
let catalogs = self.catalogs.read();
Ok(catalogs.iter().map(|k| k.key().to_string()).collect())
}
/// Read catalog info of given name from metasrv.
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>> {
Ok(self.catalogs.load().get(name).cloned())
{
let catalogs = self.catalogs.read();
let catalog = catalogs.get(name);
if let Some(catalog) = catalog {
return Ok(Some(catalog.clone()));
}
}
let catalogs = self.catalogs.write();
let catalog = catalogs.get(name);
if let Some(catalog) = catalog {
return Ok(Some(catalog.clone()));
}
// It's for lack of incremental catalog syncing between datanode and meta. Here we fetch catalog
// from meta on demand. This can be removed when incremental catalog syncing is done in datanode.
let backend = self.backend.clone();
let catalogs_from_meta: HashSet<String> = std::thread::spawn(|| {
common_runtime::block_on_read(async move {
let mut stream = backend.range(CATALOG_KEY_PREFIX.as_bytes());
let mut catalogs = HashSet::new();
while let Some(catalog) = stream.next().await {
if let Ok(catalog) = catalog {
let catalog_key = String::from_utf8_lossy(&catalog.0);
if let Ok(key) = CatalogKey::parse(&catalog_key) {
catalogs.insert(key.catalog_name);
}
}
}
catalogs
})
})
.join()
.unwrap();
catalogs.retain(|catalog_name, _| catalogs_from_meta.get(catalog_name).is_some());
for catalog in catalogs_from_meta {
catalogs
.entry(catalog.clone())
.or_insert(self.new_catalog_provider(&catalog));
}
let catalog = catalogs.get(name);
Ok(catalog.as_deref().cloned())
}
}
@@ -700,6 +755,7 @@ impl RemoteSchemaProvider {
}
}
#[async_trait]
impl SchemaProvider for RemoteSchemaProvider {
fn as_any(&self) -> &dyn Any {
self
@@ -709,7 +765,7 @@ impl SchemaProvider for RemoteSchemaProvider {
Ok(self.tables.load().keys().cloned().collect::<Vec<_>>())
}
fn table(&self, name: &str) -> Result<Option<TableRef>> {
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
Ok(self.tables.load().get(name).cloned())
}

View File

@@ -15,11 +15,13 @@
use std::any::Any;
use std::sync::Arc;
use async_trait::async_trait;
use table::TableRef;
use crate::error::Result;
/// Represents a schema, comprising a number of named tables.
#[async_trait]
pub trait SchemaProvider: Sync + Send {
/// Returns the schema provider as [`Any`](std::any::Any)
/// so that it can be downcast to a specific implementation.
@@ -29,7 +31,7 @@ pub trait SchemaProvider: Sync + Send {
fn table_names(&self) -> Result<Vec<String>>;
/// Retrieves a specific table from the schema by name, provided it exists.
fn table(&self, name: &str) -> Result<Option<TableRef>>;
async fn table(&self, name: &str) -> Result<Option<TableRef>>;
/// If supported by the implementation, adds a new table to this schema.
/// If a table of the same name existed before, it returns "Table already exists" error.

View File

@@ -26,13 +26,15 @@ use common_recordbatch::SendableRecordBatchStream;
use common_telemetry::debug;
use common_time::util;
use datatypes::prelude::{ConcreteDataType, ScalarVector, VectorRef};
use datatypes::schema::{ColumnSchema, Schema, SchemaBuilder, SchemaRef};
use datatypes::schema::{ColumnSchema, RawSchema, SchemaRef};
use datatypes::vectors::{BinaryVector, TimestampMillisecondVector, UInt8Vector};
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::{TableId, TableInfoRef};
use table::requests::{CreateTableRequest, DeleteRequest, InsertRequest, OpenTableRequest};
use table::requests::{
CreateTableRequest, DeleteRequest, InsertRequest, OpenTableRequest, TableOptions,
};
use table::{Table, TableRef};
use crate::error::{
@@ -88,7 +90,7 @@ impl SystemCatalogTable {
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
table_id: SYSTEM_CATALOG_TABLE_ID,
};
let schema = Arc::new(build_system_catalog_schema());
let schema = build_system_catalog_schema();
let ctx = EngineContext::default();
if let Some(table) = engine
@@ -105,11 +107,11 @@ impl SystemCatalogTable {
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
desc: Some("System catalog table".to_string()),
schema: schema.clone(),
schema,
region_numbers: vec![0],
primary_key_indices: vec![ENTRY_TYPE_INDEX, KEY_INDEX],
create_if_not_exists: true,
table_options: HashMap::new(),
table_options: TableOptions::default(),
};
let table = engine
@@ -143,7 +145,7 @@ impl SystemCatalogTable {
/// - value: JSON-encoded value of entry's metadata.
/// - gmt_created: create time of this metadata.
/// - gmt_modified: last updated time of this metadata.
fn build_system_catalog_schema() -> Schema {
fn build_system_catalog_schema() -> RawSchema {
let cols = vec![
ColumnSchema::new(
"entry_type".to_string(),
@@ -178,8 +180,7 @@ fn build_system_catalog_schema() -> Schema {
),
];
// The schema of this table must be valid.
SchemaBuilder::try_from(cols).unwrap().build().unwrap()
RawSchema::new(cols)
}
/// Formats key string for table entry in system catalog
@@ -218,7 +219,7 @@ fn build_primary_key_columns(entry_type: EntryType, key: &[u8]) -> HashMap<Strin
let mut m = HashMap::with_capacity(3);
m.insert(
"entry_type".to_string(),
Arc::new(UInt8Vector::from_slice(&[entry_type as u8])) as _,
Arc::new(UInt8Vector::from_slice([entry_type as u8])) as _,
);
m.insert(
"key".to_string(),
@@ -227,7 +228,7 @@ fn build_primary_key_columns(entry_type: EntryType, key: &[u8]) -> HashMap<Strin
// Timestamp in key part is intentionally left to 0
m.insert(
"timestamp".to_string(),
Arc::new(TimestampMillisecondVector::from_slice(&[0])) as _,
Arc::new(TimestampMillisecondVector::from_slice([0])) as _,
);
m
}
@@ -257,12 +258,12 @@ pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) ->
let now = util::current_time_millis();
columns_values.insert(
"gmt_created".to_string(),
Arc::new(TimestampMillisecondVector::from_slice(&[now])) as _,
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
);
columns_values.insert(
"gmt_modified".to_string(),
Arc::new(TimestampMillisecondVector::from_slice(&[now])) as _,
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
);
InsertRequest {
@@ -394,16 +395,17 @@ pub struct TableEntryValue {
#[cfg(test)]
mod tests {
use common_recordbatch::RecordBatches;
use common_test_util::temp_dir::{create_temp_dir, TempDir};
use datatypes::value::Value;
use log_store::NoopLogStore;
use mito::config::EngineConfig;
use mito::engine::MitoEngine;
use object_store::ObjectStore;
use object_store::{ObjectStore, ObjectStoreBuilder};
use storage::compaction::noop::NoopCompactionScheduler;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
use table::metadata::TableType;
use table::metadata::TableType::Base;
use tempdir::TempDir;
use super::*;
@@ -478,19 +480,21 @@ mod tests {
}
pub async fn prepare_table_engine() -> (TempDir, TableEngineRef) {
let dir = TempDir::new("system-table-test").unwrap();
let dir = create_temp_dir("system-table-test");
let store_dir = dir.path().to_string_lossy();
let accessor = object_store::backend::fs::Builder::default()
let accessor = object_store::services::Fs::default()
.root(&store_dir)
.build()
.unwrap();
let object_store = ObjectStore::new(accessor);
let object_store = ObjectStore::new(accessor).finish();
let noop_compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
let table_engine = Arc::new(MitoEngine::new(
EngineConfig::default(),
EngineImpl::new(
StorageEngineConfig::default(),
Arc::new(NoopLogStore::default()),
object_store.clone(),
noop_compaction_scheduler,
),
object_store,
));

View File

@@ -0,0 +1,178 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::sync::Arc;
use common_catalog::format_full_table_name;
use datafusion::common::{OwnedTableReference, ResolvedTableReference, TableReference};
use datafusion::datasource::provider_as_source;
use datafusion::logical_expr::TableSource;
use session::context::QueryContext;
use snafu::{ensure, OptionExt};
use table::table::adapter::DfTableProviderAdapter;
use crate::error::{
CatalogNotFoundSnafu, QueryAccessDeniedSnafu, Result, SchemaNotFoundSnafu, TableNotExistSnafu,
};
use crate::CatalogListRef;
pub struct DfTableSourceProvider {
catalog_list: CatalogListRef,
resolved_tables: HashMap<String, Arc<dyn TableSource>>,
disallow_cross_schema_query: bool,
default_catalog: String,
default_schema: String,
}
impl DfTableSourceProvider {
pub fn new(
catalog_list: CatalogListRef,
disallow_cross_schema_query: bool,
query_ctx: &QueryContext,
) -> Self {
Self {
catalog_list,
disallow_cross_schema_query,
resolved_tables: HashMap::new(),
default_catalog: query_ctx.current_catalog(),
default_schema: query_ctx.current_schema(),
}
}
pub fn resolve_table_ref<'a>(
&'a self,
table_ref: TableReference<'a>,
) -> Result<ResolvedTableReference<'a>> {
if self.disallow_cross_schema_query {
match &table_ref {
TableReference::Bare { .. } => (),
TableReference::Partial { schema, .. } => {
ensure!(
schema.as_ref() == self.default_schema,
QueryAccessDeniedSnafu {
catalog: &self.default_catalog,
schema: schema.as_ref(),
}
);
}
TableReference::Full {
catalog, schema, ..
} => {
ensure!(
catalog.as_ref() == self.default_catalog
&& schema.as_ref() == self.default_schema,
QueryAccessDeniedSnafu {
catalog: catalog.as_ref(),
schema: schema.as_ref()
}
);
}
};
}
Ok(table_ref.resolve(&self.default_catalog, &self.default_schema))
}
pub async fn resolve_table(
&mut self,
table_ref: OwnedTableReference,
) -> Result<Arc<dyn TableSource>> {
let table_ref = table_ref.as_table_reference();
let table_ref = self.resolve_table_ref(table_ref)?;
let resolved_name = table_ref.to_string();
if let Some(table) = self.resolved_tables.get(&resolved_name) {
return Ok(table.clone());
}
let catalog_name = table_ref.catalog.as_ref();
let schema_name = table_ref.schema.as_ref();
let table_name = table_ref.table.as_ref();
let catalog = self
.catalog_list
.catalog(catalog_name)?
.context(CatalogNotFoundSnafu { catalog_name })?;
let schema = catalog.schema(schema_name)?.context(SchemaNotFoundSnafu {
catalog: catalog_name,
schema: schema_name,
})?;
let table = schema
.table(table_name)
.await?
.with_context(|| TableNotExistSnafu {
table: format_full_table_name(catalog_name, schema_name, table_name),
})?;
let table = DfTableProviderAdapter::new(table);
let table = provider_as_source(Arc::new(table));
self.resolved_tables.insert(resolved_name, table.clone());
Ok(table)
}
}
#[cfg(test)]
mod tests {
use std::borrow::Cow;
use session::context::QueryContext;
use super::*;
use crate::local::MemoryCatalogManager;
#[test]
fn test_validate_table_ref() {
let query_ctx = &QueryContext::with("greptime", "public");
let table_provider =
DfTableSourceProvider::new(Arc::new(MemoryCatalogManager::default()), true, query_ctx);
let table_ref = TableReference::Bare {
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let table_ref = TableReference::Partial {
schema: Cow::Borrowed("public"),
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let table_ref = TableReference::Partial {
schema: Cow::Borrowed("wrong_schema"),
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_err());
let table_ref = TableReference::Full {
catalog: Cow::Borrowed("greptime"),
schema: Cow::Borrowed("public"),
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let table_ref = TableReference::Full {
catalog: Cow::Borrowed("wrong_catalog"),
schema: Cow::Borrowed("public"),
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_err());
}
}

View File

@@ -20,6 +20,7 @@ use std::sync::Arc;
use std::task::{Context, Poll};
use async_stream::stream;
use async_trait::async_trait;
use common_catalog::consts::{INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_TABLE_NAME};
use common_error::ext::BoxedError;
use common_query::logical_plan::Expr;
@@ -162,16 +163,10 @@ fn tables_to_record_batch(
for table_name in table_names {
// Safety: All these vectors are string type.
catalog_vec
.push_value_ref(ValueRef::String(catalog_name))
.unwrap();
schema_vec
.push_value_ref(ValueRef::String(schema_name))
.unwrap();
table_name_vec
.push_value_ref(ValueRef::String(&table_name))
.unwrap();
engine_vec.push_value_ref(ValueRef::String(engine)).unwrap();
catalog_vec.push_value_ref(ValueRef::String(catalog_name));
schema_vec.push_value_ref(ValueRef::String(schema_name));
table_name_vec.push_value_ref(ValueRef::String(&table_name));
engine_vec.push_value_ref(ValueRef::String(engine));
}
vec![
@@ -206,6 +201,7 @@ pub struct InformationSchema {
pub system: Arc<SystemCatalogTable>,
}
#[async_trait]
impl SchemaProvider for InformationSchema {
fn as_any(&self) -> &dyn Any {
self
@@ -218,7 +214,7 @@ impl SchemaProvider for InformationSchema {
])
}
fn table(&self, name: &str) -> Result<Option<TableRef>, Error> {
async fn table(&self, name: &str) -> Result<Option<TableRef>, Error> {
if name.eq_ignore_ascii_case("tables") {
Ok(Some(self.tables.clone()))
} else if name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME) {

View File

@@ -71,6 +71,7 @@ mod tests {
let registered_table = catalog_manager
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
.await
.unwrap()
.unwrap();
assert_eq!(registered_table.table_info().ident.table_id, table_id);
@@ -158,6 +159,7 @@ mod tests {
let table = guard.as_ref().unwrap();
let table_registered = catalog_manager
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "test_table")
.await
.unwrap()
.unwrap();
assert_eq!(

View File

@@ -147,6 +147,7 @@ impl TableEngine for MockTableEngine {
let table_id = TableId::from_str(
request
.table_options
.extra_options
.get("table_id")
.unwrap_or(&default_table_id),
)
@@ -220,4 +221,8 @@ impl TableEngine for MockTableEngine {
) -> table::Result<bool> {
unimplemented!()
}
async fn close(&self) -> table::Result<()> {
Ok(())
}
}

View File

@@ -28,7 +28,7 @@ mod tests {
};
use catalog::{CatalogList, CatalogManager, RegisterTableRequest};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use datatypes::schema::Schema;
use datatypes::schema::RawSchema;
use futures_util::StreamExt;
use table::engine::{EngineContext, TableEngineRef};
use table::requests::CreateTableRequest;
@@ -116,7 +116,7 @@ mod tests {
let schema_name = "nonexistent_schema".to_string();
let table_name = "fail_table".to_string();
// this schema has no effect
let table_schema = Arc::new(Schema::new(vec![]));
let table_schema = RawSchema::new(vec![]);
let table = table_engine
.create_table(
&EngineContext {},
@@ -126,7 +126,7 @@ mod tests {
schema_name: schema_name.clone(),
table_name: table_name.clone(),
desc: None,
schema: table_schema.clone(),
schema: table_schema,
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
@@ -176,7 +176,7 @@ mod tests {
let table_name = "test_table".to_string();
let table_id = 1;
// this schema has no effect
let table_schema = Arc::new(Schema::new(vec![]));
let table_schema = RawSchema::new(vec![]);
let table = table_engine
.create_table(
&EngineContext {},
@@ -186,7 +186,7 @@ mod tests {
schema_name: schema_name.clone(),
table_name: table_name.clone(),
desc: None,
schema: table_schema.clone(),
schema: table_schema,
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
@@ -246,7 +246,7 @@ mod tests {
schema_name: schema_name.clone(),
table_name: "".to_string(),
desc: None,
schema: Arc::new(Schema::new(vec![])),
schema: RawSchema::new(vec![]),
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,

View File

@@ -16,13 +16,14 @@ common-grpc-expr = { path = "../common/grpc-expr" }
common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
common-time = { path = "../common/time" }
common-telemetry = { path = "../common/telemetry" }
datafusion.workspace = true
datatypes = { path = "../datatypes" }
enum_dispatch = "0.3"
futures-util.workspace = true
parking_lot = "0.12"
prost.workspace = true
rand = "0.8"
rand.workspace = true
snafu.workspace = true
tonic.workspace = true
@@ -32,12 +33,8 @@ substrait = { path = "../common/substrait" }
tokio.workspace = true
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
# TODO(ruihang): upgrade to 0.11 once substrait-rs supports it.
[dev-dependencies.prost_09]
package = "prost"
version = "0.9"
prost.workspace = true
[dev-dependencies.substrait_proto]
package = "substrait"
version = "0.2"
version = "0.4"

View File

@@ -14,11 +14,12 @@
use api::v1::{ColumnDataType, ColumnDef, CreateTableExpr, TableId};
use client::{Client, Database};
use prost_09::Message;
use substrait_proto::protobuf::plan_rel::RelType as PlanRelType;
use substrait_proto::protobuf::read_rel::{NamedTable, ReadType};
use substrait_proto::protobuf::rel::RelType;
use substrait_proto::protobuf::{PlanRel, ReadRel, Rel};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use prost::Message;
use substrait_proto::proto::plan_rel::RelType as PlanRelType;
use substrait_proto::proto::read_rel::{NamedTable, ReadType};
use substrait_proto::proto::rel::RelType;
use substrait_proto::proto::{PlanRel, ReadRel, Rel};
use tracing::{event, Level};
fn main() {
@@ -65,7 +66,7 @@ async fn run() {
region_ids: vec![0],
};
let db = Database::with_client(client);
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
let result = db.create(create_table_expr).await.unwrap();
event!(Level::INFO, "create table result: {:#?}", result);
@@ -88,12 +89,8 @@ fn mock_logical_plan() -> Vec<u8> {
let read_type = ReadType::NamedTable(named_table);
let read_rel = ReadRel {
common: None,
base_schema: None,
filter: None,
projection: None,
advanced_extension: None,
read_type: Some(read_type),
..Default::default()
};
let mut buf = vec![];

View File

@@ -14,6 +14,7 @@
use std::sync::Arc;
use api::v1::greptime_database_client::GreptimeDatabaseClient;
use arrow_flight::flight_service_client::FlightServiceClient;
use common_grpc::channel_manager::ChannelManager;
use parking_lot::RwLock;
@@ -23,6 +24,10 @@ use tonic::transport::Channel;
use crate::load_balance::{LoadBalance, Loadbalancer};
use crate::{error, Result};
pub(crate) struct DatabaseClient {
pub(crate) inner: GreptimeDatabaseClient<Channel>,
}
pub(crate) struct FlightClient {
addr: String,
client: FlightServiceClient<Channel>,
@@ -118,7 +123,7 @@ impl Client {
self.inner.set_peers(urls);
}
pub(crate) fn make_client(&self) -> Result<FlightClient> {
fn find_channel(&self) -> Result<(String, Channel)> {
let addr = self
.inner
.get_peer()
@@ -131,11 +136,23 @@ impl Client {
.channel_manager
.get(&addr)
.context(error::CreateChannelSnafu { addr: &addr })?;
Ok((addr, channel))
}
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
let (addr, channel) = self.find_channel()?;
Ok(FlightClient {
addr,
client: FlightServiceClient::new(channel),
})
}
pub(crate) fn make_database_client(&self) -> Result<DatabaseClient> {
let (_, channel) = self.find_channel()?;
Ok(DatabaseClient {
inner: GreptimeDatabaseClient::new(channel),
})
}
}
#[cfg(test)]

View File

@@ -12,25 +12,27 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::str::FromStr;
use api::v1::auth_header::AuthScheme;
use api::v1::ddl_request::Expr as DdlExpr;
use api::v1::greptime_request::Request;
use api::v1::query_request::Query;
use api::v1::{
AlterExpr, CreateTableExpr, DdlRequest, DropTableExpr, GreptimeRequest, InsertRequest,
QueryRequest, RequestHeader,
greptime_response, AffectedRows, AlterExpr, AuthHeader, CreateTableExpr, DdlRequest,
DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequest, PromRangeQuery, QueryRequest,
RequestHeader,
};
use arrow_flight::{FlightData, Ticket};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::prelude::*;
use common_grpc::flight::{flight_messages_to_recordbatches, FlightDecoder, FlightMessage};
use common_query::Output;
use common_telemetry::logging;
use futures_util::{TryFutureExt, TryStreamExt};
use prost::Message;
use snafu::{ensure, ResultExt};
use crate::error::{ConvertFlightDataSnafu, IllegalFlightMessagesSnafu};
use crate::error::{
ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
};
use crate::{error, Client, Result};
#[derive(Clone, Debug)]
@@ -42,6 +44,7 @@ pub struct Database {
schema: String,
client: Client,
ctx: FlightContext,
}
impl Database {
@@ -50,19 +53,52 @@ impl Database {
catalog: catalog.into(),
schema: schema.into(),
client,
ctx: FlightContext::default(),
}
}
pub fn with_client(client: Client) -> Self {
Self::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client)
pub fn catalog(&self) -> &String {
&self.catalog
}
pub fn set_catalog(&mut self, catalog: impl Into<String>) {
self.catalog = catalog.into();
}
pub fn schema(&self) -> &String {
&self.schema
}
pub fn set_schema(&mut self, schema: impl Into<String>) {
self.schema = schema.into();
}
pub async fn insert(&self, request: InsertRequest) -> Result<Output> {
self.do_get(Request::Insert(request)).await
pub fn set_auth(&mut self, auth: AuthScheme) {
self.ctx.auth_header = Some(AuthHeader {
auth_scheme: Some(auth),
});
}
pub async fn insert(&self, request: InsertRequest) -> Result<u32> {
let mut client = self.client.make_database_client()?.inner;
let request = GreptimeRequest {
header: Some(RequestHeader {
catalog: self.catalog.clone(),
schema: self.schema.clone(),
authorization: self.ctx.auth_header.clone(),
}),
request: Some(Request::Insert(request)),
};
let response = client
.handle(request)
.await?
.into_inner()
.response
.context(IllegalDatabaseResponseSnafu {
err_msg: "GreptimeResponse is empty",
})?;
let greptime_response::Response::AffectedRows(AffectedRows { value }) = response;
Ok(value)
}
pub async fn sql(&self, sql: &str) -> Result<Output> {
@@ -79,6 +115,24 @@ impl Database {
.await
}
pub async fn prom_range_query(
&self,
promql: &str,
start: &str,
end: &str,
step: &str,
) -> Result<Output> {
self.do_get(Request::Query(QueryRequest {
query: Some(Query::PromRangeQuery(PromRangeQuery {
query: promql.to_string(),
start: start.to_string(),
end: end.to_string(),
step: step.to_string(),
})),
}))
.await
}
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
self.do_get(Request::Ddl(DdlRequest {
expr: Some(DdlExpr::CreateTable(expr)),
@@ -100,19 +154,27 @@ impl Database {
.await
}
pub async fn flush_table(&self, expr: FlushTableExpr) -> Result<Output> {
self.do_get(Request::Ddl(DdlRequest {
expr: Some(DdlExpr::FlushTable(expr)),
}))
.await
}
async fn do_get(&self, request: Request) -> Result<Output> {
let request = GreptimeRequest {
header: Some(RequestHeader {
catalog: self.catalog.clone(),
schema: self.schema.clone(),
authorization: self.ctx.auth_header.clone(),
}),
request: Some(request),
};
let request = Ticket {
ticket: request.encode_to_vec(),
ticket: request.encode_to_vec().into(),
};
let mut client = self.client.make_client()?;
let mut client = self.client.make_flight_client()?;
// TODO(LFC): Streaming get flight data.
let flight_data: Vec<FlightData> = client
@@ -121,17 +183,26 @@ impl Database {
.and_then(|response| response.into_inner().try_collect())
.await
.map_err(|e| {
let code = get_metadata_value(&e, INNER_ERROR_CODE)
.and_then(|s| StatusCode::from_str(&s).ok())
.unwrap_or(StatusCode::Unknown);
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
error::ExternalSnafu { code, msg }
let tonic_code = e.code();
let e: error::Error = e.into();
let code = e.status_code();
let msg = e.to_string();
error::ServerSnafu { code, msg }
.fail::<()>()
.map_err(BoxedError::new)
.context(error::FlightGetSnafu {
tonic_code: e.code(),
tonic_code,
addr: client.addr(),
})
.map_err(|error| {
logging::error!(
"Failed to do Flight get, addr: {}, code: {}, source: {}",
client.addr(),
tonic_code,
error
);
error
})
.unwrap_err()
})?;
@@ -158,10 +229,9 @@ impl Database {
}
}
fn get_metadata_value(e: &tonic::Status, key: &str) -> Option<String> {
e.metadata()
.get(key)
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
#[derive(Default, Debug, Clone)]
pub struct FlightContext {
auth_header: Option<AuthHeader>,
}
#[cfg(test)]
@@ -169,7 +239,8 @@ mod tests {
use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
use api::v1::Column;
use api::v1::auth_header::AuthScheme;
use api::v1::{AuthHeader, Basic, Column};
use common_grpc::select::{null_mask, values};
use common_grpc_expr::column_to_vector;
use datatypes::prelude::{Vector, VectorRef};
@@ -179,6 +250,8 @@ mod tests {
UInt32Vector, UInt64Vector, UInt8Vector,
};
use crate::database::FlightContext;
#[test]
fn test_column_to_vector() {
let mut column = create_test_column(Arc::new(BooleanVector::from(vec![true])));
@@ -262,4 +335,26 @@ mod tests {
datatype: wrapper.datatype() as i32,
}
}
#[test]
fn test_flight_ctx() {
let mut ctx = FlightContext::default();
assert!(ctx.auth_header.is_none());
let basic = AuthScheme::Basic(Basic {
username: "u".to_string(),
password: "p".to_string(),
});
ctx.auth_header = Some(AuthHeader {
auth_scheme: Some(basic),
});
assert!(matches!(
ctx.auth_header,
Some(AuthHeader {
auth_scheme: Some(AuthScheme::Basic(_)),
})
))
}
}

View File

@@ -13,9 +13,10 @@
// limitations under the License.
use std::any::Any;
use std::str::FromStr;
use common_error::prelude::*;
use tonic::Code;
use tonic::{Code, Status};
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
@@ -26,12 +27,7 @@ pub enum Error {
backtrace: Backtrace,
},
#[snafu(display(
"Failed to do Flight get, addr: {}, code: {}, source: {}",
addr,
tonic_code,
source
))]
#[snafu(display("Failed to do Flight get, code: {}, source: {}", tonic_code, source))]
FlightGet {
addr: String,
tonic_code: Code,
@@ -70,9 +66,12 @@ pub enum Error {
source: common_grpc::error::Error,
},
/// Error deserialized from gRPC metadata
// Server error carried in Tonic Status's metadata.
#[snafu(display("{}", msg))]
ExternalError { code: StatusCode, msg: String },
Server { code: StatusCode, msg: String },
#[snafu(display("Illegal Database response: {err_msg}"))]
IllegalDatabaseResponse { err_msg: String },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -82,13 +81,15 @@ impl ErrorExt for Error {
match self {
Error::IllegalFlightMessages { .. }
| Error::ColumnDataType { .. }
| Error::MissingField { .. } => StatusCode::Internal,
| Error::MissingField { .. }
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
Error::Server { code, .. } => *code,
Error::FlightGet { source, .. } => source.status_code(),
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source } => {
source.status_code()
}
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
Error::ExternalError { code, .. } => *code,
}
}
@@ -100,3 +101,21 @@ impl ErrorExt for Error {
self
}
}
impl From<Status> for Error {
fn from(e: Status) -> Self {
fn get_metadata_value(e: &Status, key: &str) -> Option<String> {
e.metadata()
.get(key)
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
}
let code = get_metadata_value(&e, INNER_ERROR_CODE)
.and_then(|s| StatusCode::from_str(&s).ok())
.unwrap_or(StatusCode::Unknown);
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
Self::Server { code, msg }
}
}

View File

@@ -18,6 +18,7 @@ mod error;
pub mod load_balance;
pub use api;
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
pub use self::client::Client;
pub use self::database::Database;

View File

@@ -9,28 +9,46 @@ default-run = "greptime"
name = "greptime"
path = "src/bin/greptime.rs"
[features]
mem-prof = ["tikv-jemallocator", "tikv-jemalloc-ctl"]
[dependencies]
anymap = "1.0.0-beta.2"
catalog = { path = "../catalog" }
clap = { version = "3.1", features = ["derive"] }
client = { path = "../client" }
common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
common-telemetry = { path = "../common/telemetry", features = [
"deadlock_detection",
] }
datanode = { path = "../datanode" }
either = "1.8"
frontend = { path = "../frontend" }
futures.workspace = true
meta-client = { path = "../meta-client" }
meta-srv = { path = "../meta-srv" }
nu-ansi-term = "0.46"
partition = { path = "../partition" }
query = { path = "../query" }
rustyline = "10.1"
serde.workspace = true
servers = { path = "../servers" }
session = { path = "../session" }
snafu.workspace = true
substrait = { path = "../common/substrait" }
tikv-jemalloc-ctl = { version = "0.5", optional = true }
tikv-jemallocator = { version = "0.5", optional = true }
tokio.workspace = true
toml = "0.5"
[dev-dependencies]
common-test-util = { path = "../common/test-util" }
rexpect = "0.5"
serde.workspace = true
tempdir = "0.3"
[build-dependencies]
build-data = "0.1.3"

View File

@@ -16,7 +16,7 @@ use std::fmt;
use clap::Parser;
use cmd::error::Result;
use cmd::{datanode, frontend, metasrv, standalone};
use cmd::{cli, datanode, frontend, metasrv, standalone};
use common_telemetry::logging::{error, info};
#[derive(Parser)]
@@ -30,9 +30,39 @@ struct Command {
subcmd: SubCommand,
}
pub enum Application {
Datanode(datanode::Instance),
Frontend(frontend::Instance),
Metasrv(metasrv::Instance),
Standalone(standalone::Instance),
Cli(cli::Instance),
}
impl Application {
async fn run(&mut self) -> Result<()> {
match self {
Application::Datanode(instance) => instance.run().await,
Application::Frontend(instance) => instance.run().await,
Application::Metasrv(instance) => instance.run().await,
Application::Standalone(instance) => instance.run().await,
Application::Cli(instance) => instance.run().await,
}
}
async fn stop(&self) -> Result<()> {
match self {
Application::Datanode(instance) => instance.stop().await,
Application::Frontend(instance) => instance.stop().await,
Application::Metasrv(instance) => instance.stop().await,
Application::Standalone(instance) => instance.stop().await,
Application::Cli(instance) => instance.stop().await,
}
}
}
impl Command {
async fn run(self) -> Result<()> {
self.subcmd.run().await
async fn build(self) -> Result<Application> {
self.subcmd.build().await
}
}
@@ -46,15 +76,33 @@ enum SubCommand {
Metasrv(metasrv::Command),
#[clap(name = "standalone")]
Standalone(standalone::Command),
#[clap(name = "cli")]
Cli(cli::Command),
}
impl SubCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Application> {
match self {
SubCommand::Datanode(cmd) => cmd.run().await,
SubCommand::Frontend(cmd) => cmd.run().await,
SubCommand::Metasrv(cmd) => cmd.run().await,
SubCommand::Standalone(cmd) => cmd.run().await,
SubCommand::Datanode(cmd) => {
let app = cmd.build().await?;
Ok(Application::Datanode(app))
}
SubCommand::Frontend(cmd) => {
let app = cmd.build().await?;
Ok(Application::Frontend(app))
}
SubCommand::Metasrv(cmd) => {
let app = cmd.build().await?;
Ok(Application::Metasrv(app))
}
SubCommand::Standalone(cmd) => {
let app = cmd.build().await?;
Ok(Application::Standalone(app))
}
SubCommand::Cli(cmd) => {
let app = cmd.build().await?;
Ok(Application::Cli(app))
}
}
}
}
@@ -66,6 +114,7 @@ impl fmt::Display for SubCommand {
SubCommand::Frontend(..) => write!(f, "greptime-frontend"),
SubCommand::Metasrv(..) => write!(f, "greptime-metasrv"),
SubCommand::Standalone(..) => write!(f, "greptime-standalone"),
SubCommand::Cli(_) => write!(f, "greptime-cli"),
}
}
}
@@ -83,6 +132,10 @@ fn print_version() -> &'static str {
)
}
#[cfg(feature = "mem-prof")]
#[global_allocator]
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
#[tokio::main]
async fn main() -> Result<()> {
let cmd = Command::parse();
@@ -96,13 +149,18 @@ async fn main() -> Result<()> {
common_telemetry::init_default_metrics_recorder();
let _guard = common_telemetry::init_global_logging(app_name, log_dir, log_level, false);
let mut app = cmd.build().await?;
tokio::select! {
result = cmd.run() => {
result = app.run() => {
if let Err(err) = result {
error!(err; "Fatal error occurs!");
}
}
_ = tokio::signal::ctrl_c() => {
if let Err(err) = app.stop().await {
error!(err; "Fatal error occurs!");
}
info!("Goodbye!");
}
}

78
src/cmd/src/cli.rs Normal file
View File

@@ -0,0 +1,78 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod cmd;
mod helper;
mod repl;
use clap::Parser;
pub use repl::Repl;
use crate::error::Result;
pub struct Instance {
repl: Repl,
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
self.repl.run().await
}
pub async fn stop(&self) -> Result<()> {
Ok(())
}
}
#[derive(Parser)]
pub struct Command {
#[clap(subcommand)]
cmd: SubCommand,
}
impl Command {
pub async fn build(self) -> Result<Instance> {
self.cmd.build().await
}
}
#[derive(Parser)]
enum SubCommand {
Attach(AttachCommand),
}
impl SubCommand {
async fn build(self) -> Result<Instance> {
match self {
SubCommand::Attach(cmd) => cmd.build().await,
}
}
}
#[derive(Debug, Parser)]
pub(crate) struct AttachCommand {
#[clap(long)]
pub(crate) grpc_addr: String,
#[clap(long)]
pub(crate) meta_addr: Option<String>,
#[clap(long, action)]
pub(crate) disable_helper: bool,
}
impl AttachCommand {
async fn build(self) -> Result<Instance> {
let repl = Repl::try_new(&self).await?;
Ok(Instance { repl })
}
}

154
src/cmd/src/cli/cmd.rs Normal file
View File

@@ -0,0 +1,154 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::error::{Error, InvalidReplCommandSnafu, Result};
/// Represents the parsed command from the user (which may be over many lines)
#[derive(Debug, PartialEq)]
pub(crate) enum ReplCommand {
Help,
UseDatabase { db_name: String },
Sql { sql: String },
Exit,
}
impl TryFrom<&str> for ReplCommand {
type Error = Error;
fn try_from(input: &str) -> Result<Self> {
let input = input.trim();
if input.is_empty() {
return InvalidReplCommandSnafu {
reason: "No command specified".to_string(),
}
.fail();
}
// If line ends with ';', it must be treated as a complete input.
// However, the opposite is not true.
let input_is_completed = input.ends_with(';');
let input = input.strip_suffix(';').map(|x| x.trim()).unwrap_or(input);
let lowercase = input.to_lowercase();
match lowercase.as_str() {
"help" => Ok(Self::Help),
"exit" | "quit" => Ok(Self::Exit),
_ => match input.split_once(' ') {
Some((maybe_use, database)) if maybe_use.to_lowercase() == "use" => {
Ok(Self::UseDatabase {
db_name: database.trim().to_string(),
})
}
// Any valid SQL must contains at least one whitespace.
Some(_) if input_is_completed => Ok(Self::Sql {
sql: input.to_string(),
}),
_ => InvalidReplCommandSnafu {
reason: format!("unknown command '{input}', maybe input is not completed"),
}
.fail(),
},
}
}
}
impl ReplCommand {
pub fn help() -> &'static str {
r#"
Available commands (case insensitive):
- 'help': print this help
- 'exit' or 'quit': exit the REPL
- 'use <your database name>': switch to another database/schema context
- Other typed in text will be treated as SQL.
You can enter new line while typing, just remember to end it with ';'.
"#
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::error::Error::InvalidReplCommand;
#[test]
fn test_from_str() {
fn test_ok(s: &str, expected: ReplCommand) {
let actual: ReplCommand = s.try_into().unwrap();
assert_eq!(expected, actual, "'{}'", s);
}
fn test_err(s: &str) {
let result: Result<ReplCommand> = s.try_into();
assert!(matches!(result, Err(InvalidReplCommand { .. })))
}
test_err("");
test_err(" ");
test_err("\t");
test_ok("help", ReplCommand::Help);
test_ok("help", ReplCommand::Help);
test_ok(" help", ReplCommand::Help);
test_ok(" help ", ReplCommand::Help);
test_ok(" HELP ", ReplCommand::Help);
test_ok(" Help; ", ReplCommand::Help);
test_ok(" help ; ", ReplCommand::Help);
test_ok("exit", ReplCommand::Exit);
test_ok("exit;", ReplCommand::Exit);
test_ok("exit ;", ReplCommand::Exit);
test_ok("EXIT", ReplCommand::Exit);
test_ok("quit", ReplCommand::Exit);
test_ok("quit;", ReplCommand::Exit);
test_ok("quit ;", ReplCommand::Exit);
test_ok("QUIT", ReplCommand::Exit);
test_ok(
"use Foo",
ReplCommand::UseDatabase {
db_name: "Foo".to_string(),
},
);
test_ok(
" use Foo ; ",
ReplCommand::UseDatabase {
db_name: "Foo".to_string(),
},
);
// ensure that database name is case sensitive
test_ok(
" use FOO ; ",
ReplCommand::UseDatabase {
db_name: "FOO".to_string(),
},
);
// ensure that we aren't messing with capitalization
test_ok(
"SELECT * from foo;",
ReplCommand::Sql {
sql: "SELECT * from foo".to_string(),
},
);
// Input line (that don't belong to any other cases above) must ends with ';' to make it a valid SQL.
test_err("insert blah");
test_ok(
"insert blah;",
ReplCommand::Sql {
sql: "insert blah".to_string(),
},
);
}
}

112
src/cmd/src/cli/helper.rs Normal file
View File

@@ -0,0 +1,112 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::borrow::Cow;
use rustyline::completion::Completer;
use rustyline::highlight::{Highlighter, MatchingBracketHighlighter};
use rustyline::hint::{Hinter, HistoryHinter};
use rustyline::validate::{ValidationContext, ValidationResult, Validator};
use crate::cli::cmd::ReplCommand;
pub(crate) struct RustylineHelper {
hinter: HistoryHinter,
highlighter: MatchingBracketHighlighter,
}
impl Default for RustylineHelper {
fn default() -> Self {
Self {
hinter: HistoryHinter {},
highlighter: MatchingBracketHighlighter::default(),
}
}
}
impl rustyline::Helper for RustylineHelper {}
impl Validator for RustylineHelper {
fn validate(&self, ctx: &mut ValidationContext<'_>) -> rustyline::Result<ValidationResult> {
let input = ctx.input();
match ReplCommand::try_from(input) {
Ok(_) => Ok(ValidationResult::Valid(None)),
Err(e) => {
if input.trim_end().ends_with(';') {
// If line ends with ';', it HAS to be a valid command.
Ok(ValidationResult::Invalid(Some(e.to_string())))
} else {
Ok(ValidationResult::Incomplete)
}
}
}
}
}
impl Hinter for RustylineHelper {
type Hint = String;
fn hint(&self, line: &str, pos: usize, ctx: &rustyline::Context<'_>) -> Option<Self::Hint> {
self.hinter.hint(line, pos, ctx)
}
}
impl Highlighter for RustylineHelper {
fn highlight<'l>(&self, line: &'l str, pos: usize) -> Cow<'l, str> {
self.highlighter.highlight(line, pos)
}
fn highlight_prompt<'b, 's: 'b, 'p: 'b>(
&'s self,
prompt: &'p str,
default: bool,
) -> Cow<'b, str> {
self.highlighter.highlight_prompt(prompt, default)
}
fn highlight_hint<'h>(&self, hint: &'h str) -> Cow<'h, str> {
use nu_ansi_term::Style;
Cow::Owned(Style::new().dimmed().paint(hint).to_string())
}
fn highlight_candidate<'c>(
&self,
candidate: &'c str,
completion: rustyline::CompletionType,
) -> Cow<'c, str> {
self.highlighter.highlight_candidate(candidate, completion)
}
fn highlight_char(&self, line: &str, pos: usize) -> bool {
self.highlighter.highlight_char(line, pos)
}
}
impl Completer for RustylineHelper {
type Candidate = String;
fn complete(
&self,
line: &str,
pos: usize,
ctx: &rustyline::Context<'_>,
) -> rustyline::Result<(usize, Vec<Self::Candidate>)> {
// If there is a hint, use that as the auto-complete when user hits `tab`
if let Some(hint) = self.hinter.hint(line, pos, ctx) {
Ok((pos, vec![hint]))
} else {
Ok((0, vec![]))
}
}
}

273
src/cmd/src/cli/repl.rs Normal file
View File

@@ -0,0 +1,273 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use catalog::remote::MetaKvBackend;
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::prelude::ErrorExt;
use common_query::Output;
use common_recordbatch::RecordBatches;
use common_telemetry::logging;
use either::Either;
use frontend::catalog::FrontendCatalogManager;
use frontend::datanode::DatanodeClients;
use meta_client::client::MetaClientBuilder;
use partition::manager::PartitionRuleManager;
use partition::route::TableRoutes;
use query::datafusion::DatafusionQueryEngine;
use query::logical_optimizer::LogicalOptimizer;
use query::parser::QueryLanguageParser;
use query::plan::LogicalPlan;
use query::query_engine::QueryEngineState;
use query::QueryEngine;
use rustyline::error::ReadlineError;
use rustyline::Editor;
use session::context::QueryContext;
use snafu::{ErrorCompat, ResultExt};
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use crate::cli::cmd::ReplCommand;
use crate::cli::helper::RustylineHelper;
use crate::cli::AttachCommand;
use crate::error::{
CollectRecordBatchesSnafu, ParseSqlSnafu, PlanStatementSnafu, PrettyPrintRecordBatchesSnafu,
ReadlineSnafu, ReplCreationSnafu, RequestDatabaseSnafu, Result, StartMetaClientSnafu,
SubstraitEncodeLogicalPlanSnafu,
};
/// Captures the state of the repl, gathers commands and executes them one by one
pub struct Repl {
/// Rustyline editor for interacting with user on command line
rl: Editor<RustylineHelper>,
/// Current prompt
prompt: String,
/// Client for interacting with GreptimeDB
database: Database,
query_engine: Option<DatafusionQueryEngine>,
}
#[allow(clippy::print_stdout)]
impl Repl {
fn print_help(&self) {
println!("{}", ReplCommand::help())
}
pub(crate) async fn try_new(cmd: &AttachCommand) -> Result<Self> {
let mut rl = Editor::new().context(ReplCreationSnafu)?;
if !cmd.disable_helper {
rl.set_helper(Some(RustylineHelper::default()));
let history_file = history_file();
if let Err(e) = rl.load_history(&history_file) {
logging::debug!(
"failed to load history file on {}, error: {e}",
history_file.display()
);
}
}
let client = Client::with_urls([&cmd.grpc_addr]);
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
let query_engine = if let Some(meta_addr) = &cmd.meta_addr {
create_query_engine(meta_addr).await.map(Some)?
} else {
None
};
Ok(Self {
rl,
prompt: "> ".to_string(),
database,
query_engine,
})
}
/// Parse the next command
fn next_command(&mut self) -> Result<ReplCommand> {
match self.rl.readline(&self.prompt) {
Ok(ref line) => {
let request = line.trim();
self.rl.add_history_entry(request.to_string());
request.try_into()
}
Err(ReadlineError::Eof) | Err(ReadlineError::Interrupted) => Ok(ReplCommand::Exit),
// Some sort of real underlying error
Err(e) => Err(e).context(ReadlineSnafu),
}
}
/// Read Evaluate Print Loop (interactive command line) for GreptimeDB
///
/// Inspired / based on repl.rs from InfluxDB IOX
pub(crate) async fn run(&mut self) -> Result<()> {
println!("Ready for commands. (Hint: try 'help')");
loop {
match self.next_command()? {
ReplCommand::Help => {
self.print_help();
}
ReplCommand::UseDatabase { db_name } => {
if self.execute_sql(format!("USE {db_name}")).await {
println!("Using {db_name}");
self.database.set_schema(&db_name);
self.prompt = format!("[{db_name}] > ");
}
}
ReplCommand::Sql { sql } => {
self.execute_sql(sql).await;
}
ReplCommand::Exit => {
return Ok(());
}
}
}
}
async fn execute_sql(&self, sql: String) -> bool {
self.do_execute_sql(sql)
.await
.map_err(|e| {
let status_code = e.status_code();
let root_cause = e.iter_chain().last().unwrap();
println!("Error: {}({status_code}), {root_cause}", status_code as u32)
})
.is_ok()
}
async fn do_execute_sql(&self, sql: String) -> Result<()> {
let start = Instant::now();
let output = if let Some(query_engine) = &self.query_engine {
let stmt = QueryLanguageParser::parse_sql(&sql)
.with_context(|_| ParseSqlSnafu { sql: sql.clone() })?;
let query_ctx = Arc::new(QueryContext::with(
self.database.catalog(),
self.database.schema(),
));
let plan = query_engine
.planner()
.plan(stmt, query_ctx)
.await
.context(PlanStatementSnafu)?;
let LogicalPlan::DfPlan(plan) =
query_engine.optimize(&plan).context(PlanStatementSnafu)?;
let plan = DFLogicalSubstraitConvertor {}
.encode(plan)
.context(SubstraitEncodeLogicalPlanSnafu)?;
self.database.logical_plan(plan.to_vec()).await
} else {
self.database.sql(&sql).await
}
.context(RequestDatabaseSnafu { sql: &sql })?;
let either = match output {
Output::Stream(s) => {
let x = RecordBatches::try_collect(s)
.await
.context(CollectRecordBatchesSnafu)?;
Either::Left(x)
}
Output::RecordBatches(x) => Either::Left(x),
Output::AffectedRows(rows) => Either::Right(rows),
};
let end = Instant::now();
match either {
Either::Left(recordbatches) => {
let total_rows: usize = recordbatches.iter().map(|x| x.num_rows()).sum();
if total_rows > 0 {
println!(
"{}",
recordbatches
.pretty_print()
.context(PrettyPrintRecordBatchesSnafu)?
);
}
println!("Total Rows: {total_rows}")
}
Either::Right(rows) => println!("Affected Rows: {rows}"),
};
println!("Cost {} ms", (end - start).as_millis());
Ok(())
}
}
impl Drop for Repl {
fn drop(&mut self) {
if self.rl.helper().is_some() {
let history_file = history_file();
if let Err(e) = self.rl.save_history(&history_file) {
logging::debug!(
"failed to save history file on {}, error: {e}",
history_file.display()
);
}
}
}
}
/// Return the location of the history file (defaults to $HOME/".greptimedb_cli_history")
fn history_file() -> PathBuf {
let mut buf = match std::env::var("HOME") {
Ok(home) => PathBuf::from(home),
Err(_) => PathBuf::new(),
};
buf.push(".greptimedb_cli_history");
buf
}
async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
let mut meta_client = MetaClientBuilder::default().enable_store().build();
meta_client
.start([meta_addr])
.await
.context(StartMetaClientSnafu)?;
let meta_client = Arc::new(meta_client);
let backend = Arc::new(MetaKvBackend {
client: meta_client.clone(),
});
let table_routes = Arc::new(TableRoutes::new(meta_client));
let partition_manager = Arc::new(PartitionRuleManager::new(table_routes));
let datanode_clients = Arc::new(DatanodeClients::default());
let catalog_list = Arc::new(FrontendCatalogManager::new(
backend,
partition_manager,
datanode_clients,
));
let state = Arc::new(QueryEngineState::new(catalog_list, Default::default()));
Ok(DatafusionQueryEngine::new(state))
}

View File

@@ -14,14 +14,33 @@
use clap::Parser;
use common_telemetry::logging;
use datanode::datanode::{Datanode, DatanodeOptions, FileConfig, ObjectStoreConfig};
use meta_client::MetaClientOpts;
use datanode::datanode::{
Datanode, DatanodeOptions, FileConfig, ObjectStoreConfig, ProcedureConfig,
};
use meta_client::MetaClientOptions;
use servers::Mode;
use snafu::ResultExt;
use crate::error::{Error, MissingConfigSnafu, Result, StartDatanodeSnafu};
use crate::error::{Error, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
use crate::toml_loader;
pub struct Instance {
datanode: Datanode,
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
self.datanode.start().await.context(StartDatanodeSnafu)
}
pub async fn stop(&self) -> Result<()> {
self.datanode
.shutdown()
.await
.context(ShutdownDatanodeSnafu)
}
}
#[derive(Parser)]
pub struct Command {
#[clap(subcommand)]
@@ -29,8 +48,8 @@ pub struct Command {
}
impl Command {
pub async fn run(self) -> Result<()> {
self.subcmd.run().await
pub async fn build(self) -> Result<Instance> {
self.subcmd.build().await
}
}
@@ -40,9 +59,9 @@ enum SubCommand {
}
impl SubCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.run().await,
SubCommand::Start(cmd) => cmd.build().await,
}
}
}
@@ -65,22 +84,21 @@ struct StartCommand {
data_dir: Option<String>,
#[clap(long)]
wal_dir: Option<String>,
#[clap(long)]
procedure_dir: Option<String>,
}
impl StartCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
logging::info!("Datanode start command: {:#?}", self);
let opts: DatanodeOptions = self.try_into()?;
logging::info!("Datanode options: {:#?}", opts);
Datanode::new(opts)
.await
.context(StartDatanodeSnafu)?
.start()
.await
.context(StartDatanodeSnafu)
let datanode = Datanode::new(opts).await.context(StartDatanodeSnafu)?;
Ok(Instance { datanode })
}
}
@@ -110,8 +128,8 @@ impl TryFrom<StartCommand> for DatanodeOptions {
}
if let Some(meta_addr) = cmd.metasrv_addr {
opts.meta_client_opts
.get_or_insert_with(MetaClientOpts::default)
opts.meta_client_options
.get_or_insert_with(MetaClientOptions::default)
.metasrv_addrs = meta_addr
.split(',')
.map(&str::trim)
@@ -134,6 +152,10 @@ impl TryFrom<StartCommand> for DatanodeOptions {
if let Some(wal_dir) = cmd.wal_dir {
opts.wal.dir = wal_dir;
}
if let Some(procedure_dir) = cmd.procedure_dir {
opts.procedure = Some(ProcedureConfig::from_file_path(procedure_dir));
}
Ok(opts)
}
}
@@ -141,38 +163,79 @@ impl TryFrom<StartCommand> for DatanodeOptions {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::io::Write;
use std::time::Duration;
use datanode::datanode::ObjectStoreConfig;
use common_test_util::temp_dir::create_named_temp_file;
use datanode::datanode::{CompactionConfig, ObjectStoreConfig};
use servers::Mode;
use super::*;
#[test]
fn test_read_from_config_file() {
let mut file = create_named_temp_file();
let toml_str = r#"
mode = "distributed"
enable_memory_catalog = false
node_id = 42
rpc_addr = "127.0.0.1:3001"
rpc_hostname = "127.0.0.1"
rpc_runtime_size = 8
mysql_addr = "127.0.0.1:4406"
mysql_runtime_size = 2
[meta_client_options]
metasrv_addrs = ["127.0.0.1:3002"]
timeout_millis = 3000
connect_timeout_millis = 5000
tcp_nodelay = true
[wal]
dir = "/tmp/greptimedb/wal"
file_size = "1GB"
purge_threshold = "50GB"
purge_interval = "10m"
read_batch_size = 128
sync_write = false
[storage]
type = "File"
data_dir = "/tmp/greptimedb/data/"
[compaction]
max_inflight_tasks = 4
max_files_in_level0 = 8
max_purge_tasks = 32
"#;
write!(file, "{}", toml_str).unwrap();
let cmd = StartCommand {
config_file: Some(format!(
"{}/../../config/datanode.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
)),
config_file: Some(file.path().to_str().unwrap().to_string()),
..Default::default()
};
let options: DatanodeOptions = cmd.try_into().unwrap();
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
assert_eq!("/tmp/greptimedb/wal".to_string(), options.wal.dir);
assert_eq!("127.0.0.1:4406".to_string(), options.mysql_addr);
assert_eq!(4, options.mysql_runtime_size);
let MetaClientOpts {
assert_eq!(2, options.mysql_runtime_size);
assert_eq!(Some(42), options.node_id);
assert_eq!(Duration::from_secs(600), options.wal.purge_interval);
assert_eq!(1024 * 1024 * 1024, options.wal.file_size.0);
assert_eq!(1024 * 1024 * 1024 * 50, options.wal.purge_threshold.0);
assert!(!options.wal.sync_write);
let MetaClientOptions {
metasrv_addrs: metasrv_addr,
timeout_millis,
connect_timeout_millis,
tcp_nodelay,
} = options.meta_client_opts.unwrap();
} = options.meta_client_options.unwrap();
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
assert_eq!(5000, connect_timeout_millis);
assert_eq!(3000, timeout_millis);
assert!(!tcp_nodelay);
assert!(tcp_nodelay);
match options.storage {
ObjectStoreConfig::File(FileConfig { data_dir }) => {
@@ -181,6 +244,15 @@ mod tests {
ObjectStoreConfig::S3 { .. } => unreachable!(),
ObjectStoreConfig::Oss { .. } => unreachable!(),
};
assert_eq!(
CompactionConfig {
max_inflight_tasks: 4,
max_files_in_level0: 8,
max_purge_tasks: 32,
},
options.compaction
);
}
#[test]
@@ -214,32 +286,4 @@ mod tests {
})
.unwrap();
}
#[test]
fn test_merge_config() {
let dn_opts = DatanodeOptions::try_from(StartCommand {
config_file: Some(format!(
"{}/../../config/datanode.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
)),
..Default::default()
})
.unwrap();
assert_eq!("/tmp/greptimedb/wal", dn_opts.wal.dir);
assert_eq!(Duration::from_secs(600), dn_opts.wal.purge_interval);
assert_eq!(1024 * 1024 * 1024, dn_opts.wal.file_size.0);
assert_eq!(1024 * 1024 * 1024 * 50, dn_opts.wal.purge_threshold.0);
assert!(!dn_opts.wal.sync_write);
assert_eq!(Some(42), dn_opts.node_id);
let MetaClientOpts {
metasrv_addrs: metasrv_addr,
timeout_millis,
connect_timeout_millis,
tcp_nodelay,
} = dn_opts.meta_client_opts.unwrap();
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
assert_eq!(3000, timeout_millis);
assert_eq!(5000, connect_timeout_millis);
assert!(!tcp_nodelay);
}
}

View File

@@ -15,6 +15,7 @@
use std::any::Any;
use common_error::prelude::*;
use rustyline::error::ReadlineError;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
@@ -25,18 +26,42 @@ pub enum Error {
source: datanode::error::Error,
},
#[snafu(display("Failed to shutdown datanode, source: {}", source))]
ShutdownDatanode {
#[snafu(backtrace)]
source: datanode::error::Error,
},
#[snafu(display("Failed to start frontend, source: {}", source))]
StartFrontend {
#[snafu(backtrace)]
source: frontend::error::Error,
},
#[snafu(display("Failed to shutdown frontend, source: {}", source))]
ShutdownFrontend {
#[snafu(backtrace)]
source: frontend::error::Error,
},
#[snafu(display("Failed to build meta server, source: {}", source))]
BuildMetaServer {
#[snafu(backtrace)]
source: meta_srv::error::Error,
},
#[snafu(display("Failed to start meta server, source: {}", source))]
StartMetaServer {
#[snafu(backtrace)]
source: meta_srv::error::Error,
},
#[snafu(display("Failed to shutdown meta server, source: {}", source))]
ShutdownMetaServer {
#[snafu(backtrace)]
source: meta_srv::error::Error,
},
#[snafu(display("Failed to read config file: {}, source: {}", path, source))]
ReadConfig {
path: String,
@@ -68,6 +93,65 @@ pub enum Error {
#[snafu(backtrace)]
source: meta_srv::error::Error,
},
#[snafu(display("Invalid REPL command: {reason}"))]
InvalidReplCommand { reason: String },
#[snafu(display("Cannot create REPL: {}", source))]
ReplCreation {
source: ReadlineError,
backtrace: Backtrace,
},
#[snafu(display("Error reading command: {}", source))]
Readline {
source: ReadlineError,
backtrace: Backtrace,
},
#[snafu(display("Failed to request database, sql: {sql}, source: {source}"))]
RequestDatabase {
sql: String,
#[snafu(backtrace)]
source: client::Error,
},
#[snafu(display("Failed to collect RecordBatches, source: {source}"))]
CollectRecordBatches {
#[snafu(backtrace)]
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to pretty print Recordbatches, source: {source}"))]
PrettyPrintRecordBatches {
#[snafu(backtrace)]
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to start Meta client, source: {}", source))]
StartMetaClient {
#[snafu(backtrace)]
source: meta_client::error::Error,
},
#[snafu(display("Failed to parse SQL: {}, source: {}", sql, source))]
ParseSql {
sql: String,
#[snafu(backtrace)]
source: query::error::Error,
},
#[snafu(display("Failed to plan statement, source: {}", source))]
PlanStatement {
#[snafu(backtrace)]
source: query::error::Error,
},
#[snafu(display("Failed to encode logical plan in substrait, source: {}", source))]
SubstraitEncodeLogicalPlan {
#[snafu(backtrace)]
source: substrait::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -77,13 +161,29 @@ impl ErrorExt for Error {
match self {
Error::StartDatanode { source } => source.status_code(),
Error::StartFrontend { source } => source.status_code(),
Error::ShutdownDatanode { source } => source.status_code(),
Error::ShutdownFrontend { source } => source.status_code(),
Error::StartMetaServer { source } => source.status_code(),
Error::ShutdownMetaServer { source } => source.status_code(),
Error::BuildMetaServer { source } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
StatusCode::InvalidArguments
}
Error::IllegalConfig { .. } => StatusCode::InvalidArguments,
Error::IllegalConfig { .. } | Error::InvalidReplCommand { .. } => {
StatusCode::InvalidArguments
}
Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments,
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
Error::RequestDatabase { source, .. } => source.status_code(),
Error::CollectRecordBatches { source } | Error::PrettyPrintRecordBatches { source } => {
source.status_code()
}
Error::StartMetaClient { source } => source.status_code(),
Error::ParseSql { source, .. } | Error::PlanStatement { source } => {
source.status_code()
}
Error::SubstraitEncodeLogicalPlan { source } => source.status_code(),
}
}

View File

@@ -16,14 +16,15 @@ use std::sync::Arc;
use clap::Parser;
use common_base::Plugins;
use frontend::frontend::{Frontend, FrontendOptions};
use frontend::frontend::FrontendOptions;
use frontend::grpc::GrpcOptions;
use frontend::influxdb::InfluxdbOptions;
use frontend::instance::Instance;
use frontend::instance::{FrontendInstance, Instance as FeInstance};
use frontend::mysql::MysqlOptions;
use frontend::opentsdb::OpentsdbOptions;
use frontend::postgres::PostgresOptions;
use meta_client::MetaClientOpts;
use frontend::prom::PromOptions;
use meta_client::MetaClientOptions;
use servers::auth::UserProviderRef;
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
@@ -33,6 +34,26 @@ use snafu::ResultExt;
use crate::error::{self, IllegalAuthConfigSnafu, Result};
use crate::toml_loader;
pub struct Instance {
frontend: FeInstance,
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
self.frontend
.start()
.await
.context(error::StartFrontendSnafu)
}
pub async fn stop(&self) -> Result<()> {
self.frontend
.shutdown()
.await
.context(error::ShutdownFrontendSnafu)
}
}
#[derive(Parser)]
pub struct Command {
#[clap(subcommand)]
@@ -40,8 +61,8 @@ pub struct Command {
}
impl Command {
pub async fn run(self) -> Result<()> {
self.subcmd.run().await
pub async fn build(self) -> Result<Instance> {
self.subcmd.build().await
}
}
@@ -51,9 +72,9 @@ enum SubCommand {
}
impl SubCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.run().await,
SubCommand::Start(cmd) => cmd.build().await,
}
}
}
@@ -67,6 +88,8 @@ pub struct StartCommand {
#[clap(long)]
mysql_addr: Option<String>,
#[clap(long)]
prom_addr: Option<String>,
#[clap(long)]
postgres_addr: Option<String>,
#[clap(long)]
opentsdb_addr: Option<String>,
@@ -87,16 +110,20 @@ pub struct StartCommand {
}
impl StartCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
let opts: FrontendOptions = self.try_into()?;
let instance = Instance::try_new_distributed(&opts, plugins.clone())
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone())
.await
.context(error::StartFrontendSnafu)?;
let mut frontend = Frontend::new(opts, instance, plugins);
frontend.start().await.context(error::StartFrontendSnafu)
instance
.build_servers(&opts, plugins)
.await
.context(error::StartFrontendSnafu)?;
Ok(Instance { frontend: instance })
}
}
@@ -141,6 +168,9 @@ impl TryFrom<StartCommand> for FrontendOptions {
..Default::default()
});
}
if let Some(addr) = cmd.prom_addr {
opts.prom_options = Some(PromOptions { addr });
}
if let Some(addr) = cmd.postgres_addr {
opts.postgres_options = Some(PostgresOptions {
addr,
@@ -158,8 +188,8 @@ impl TryFrom<StartCommand> for FrontendOptions {
opts.influxdb_options = Some(InfluxdbOptions { enable });
}
if let Some(metasrv_addr) = cmd.metasrv_addr {
opts.meta_client_opts
.get_or_insert_with(MetaClientOpts::default)
opts.meta_client_options
.get_or_insert_with(MetaClientOptions::default)
.metasrv_addrs = metasrv_addr
.split(',')
.map(&str::trim)
@@ -173,8 +203,10 @@ impl TryFrom<StartCommand> for FrontendOptions {
#[cfg(test)]
mod tests {
use std::io::Write;
use std::time::Duration;
use common_test_util::temp_dir::create_named_temp_file;
use servers::auth::{Identity, Password, UserProviderRef};
use super::*;
@@ -184,6 +216,7 @@ mod tests {
let command = StartCommand {
http_addr: Some("127.0.0.1:1234".to_string()),
grpc_addr: None,
prom_addr: Some("127.0.0.1:4444".to_string()),
mysql_addr: Some("127.0.0.1:5678".to_string()),
postgres_addr: Some("127.0.0.1:5432".to_string()),
opentsdb_addr: Some("127.0.0.1:4321".to_string()),
@@ -207,6 +240,7 @@ mod tests {
opts.opentsdb_options.as_ref().unwrap().addr,
"127.0.0.1:4321"
);
assert_eq!(opts.prom_options.as_ref().unwrap().addr, "127.0.0.1:4444");
let default_opts = FrontendOptions::default();
assert_eq!(
@@ -231,17 +265,25 @@ mod tests {
#[test]
fn test_read_from_config_file() {
let mut file = create_named_temp_file();
let toml_str = r#"
mode = "distributed"
[http_options]
addr = "127.0.0.1:4000"
timeout = "30s"
"#;
write!(file, "{}", toml_str).unwrap();
let command = StartCommand {
http_addr: None,
grpc_addr: None,
mysql_addr: None,
prom_addr: None,
postgres_addr: None,
opentsdb_addr: None,
influxdb_enable: None,
config_file: Some(format!(
"{}/../../config/frontend.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
)),
config_file: Some(file.path().to_str().unwrap().to_string()),
metasrv_addr: None,
tls_mode: None,
tls_cert_path: None,
@@ -267,6 +309,7 @@ mod tests {
http_addr: None,
grpc_addr: None,
mysql_addr: None,
prom_addr: None,
postgres_addr: None,
opentsdb_addr: None,
influxdb_enable: None,

View File

@@ -14,6 +14,7 @@
#![feature(assert_matches)]
pub mod cli;
pub mod datanode;
pub mod error;
pub mod frontend;

View File

@@ -14,13 +14,33 @@
use clap::Parser;
use common_telemetry::{info, logging, warn};
use meta_srv::bootstrap;
use meta_srv::bootstrap::MetaSrvInstance;
use meta_srv::metasrv::MetaSrvOptions;
use snafu::ResultExt;
use crate::error::{Error, Result};
use crate::{error, toml_loader};
pub struct Instance {
instance: MetaSrvInstance,
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
self.instance
.start()
.await
.context(error::StartMetaServerSnafu)
}
pub async fn stop(&self) -> Result<()> {
self.instance
.shutdown()
.await
.context(error::ShutdownMetaServerSnafu)
}
}
#[derive(Parser)]
pub struct Command {
#[clap(subcommand)]
@@ -28,8 +48,8 @@ pub struct Command {
}
impl Command {
pub async fn run(self) -> Result<()> {
self.subcmd.run().await
pub async fn build(self) -> Result<Instance> {
self.subcmd.build().await
}
}
@@ -39,9 +59,9 @@ enum SubCommand {
}
impl SubCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.run().await,
SubCommand::Start(cmd) => cmd.build().await,
}
}
}
@@ -63,16 +83,17 @@ struct StartCommand {
}
impl StartCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
logging::info!("MetaSrv start command: {:#?}", self);
let opts: MetaSrvOptions = self.try_into()?;
logging::info!("MetaSrv options: {:#?}", opts);
bootstrap::bootstrap_meta_srv(opts)
let instance = MetaSrvInstance::new(opts)
.await
.context(error::StartMetaServerSnafu)
.context(error::BuildMetaServerSnafu)?;
Ok(Instance { instance })
}
}
@@ -113,6 +134,9 @@ impl TryFrom<StartCommand> for MetaSrvOptions {
#[cfg(test)]
mod tests {
use std::io::Write;
use common_test_util::temp_dir::create_named_temp_file;
use meta_srv::selector::SelectorType;
use super::*;
@@ -136,15 +160,23 @@ mod tests {
#[test]
fn test_read_from_config_file() {
let mut file = create_named_temp_file();
let toml_str = r#"
bind_addr = "127.0.0.1:3002"
server_addr = "127.0.0.1:3002"
store_addr = "127.0.0.1:2379"
datanode_lease_secs = 15
selector = "LeaseBased"
use_memory_store = false
"#;
write!(file, "{}", toml_str).unwrap();
let cmd = StartCommand {
bind_addr: None,
server_addr: None,
store_addr: None,
selector: None,
config_file: Some(format!(
"{}/../../config/metasrv.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
)),
config_file: Some(file.path().to_str().unwrap().to_string()),
use_memory_store: false,
};
let options: MetaSrvOptions = cmd.try_into().unwrap();

View File

@@ -17,24 +17,29 @@ use std::sync::Arc;
use clap::Parser;
use common_base::Plugins;
use common_telemetry::info;
use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig, WalConfig};
use datanode::datanode::{
CompactionConfig, Datanode, DatanodeOptions, ObjectStoreConfig, ProcedureConfig, WalConfig,
};
use datanode::instance::InstanceRef;
use frontend::frontend::{Frontend, FrontendOptions};
use frontend::frontend::FrontendOptions;
use frontend::grpc::GrpcOptions;
use frontend::influxdb::InfluxdbOptions;
use frontend::instance::Instance as FeInstance;
use frontend::instance::{FrontendInstance, Instance as FeInstance};
use frontend::mysql::MysqlOptions;
use frontend::opentsdb::OpentsdbOptions;
use frontend::postgres::PostgresOptions;
use frontend::prom::PromOptions;
use frontend::prometheus::PrometheusOptions;
use frontend::promql::PromqlOptions;
use serde::{Deserialize, Serialize};
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
use servers::Mode;
use snafu::ResultExt;
use crate::error::{Error, IllegalConfigSnafu, Result, StartDatanodeSnafu, StartFrontendSnafu};
use crate::error::{
Error, IllegalConfigSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu,
StartDatanodeSnafu, StartFrontendSnafu,
};
use crate::frontend::load_frontend_plugins;
use crate::toml_loader;
@@ -45,8 +50,8 @@ pub struct Command {
}
impl Command {
pub async fn run(self) -> Result<()> {
self.subcmd.run().await
pub async fn build(self) -> Result<Instance> {
self.subcmd.build().await
}
}
@@ -56,9 +61,9 @@ enum SubCommand {
}
impl SubCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.run().await,
SubCommand::Start(cmd) => cmd.build().await,
}
}
}
@@ -66,6 +71,8 @@ impl SubCommand {
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct StandaloneOptions {
pub mode: Mode,
pub enable_memory_catalog: bool,
pub http_options: Option<HttpOptions>,
pub grpc_options: Option<GrpcOptions>,
pub mysql_options: Option<MysqlOptions>,
@@ -73,16 +80,18 @@ pub struct StandaloneOptions {
pub opentsdb_options: Option<OpentsdbOptions>,
pub influxdb_options: Option<InfluxdbOptions>,
pub prometheus_options: Option<PrometheusOptions>,
pub promql_options: Option<PromqlOptions>,
pub mode: Mode,
pub prom_options: Option<PromOptions>,
pub wal: WalConfig,
pub storage: ObjectStoreConfig,
pub enable_memory_catalog: bool,
pub compaction: CompactionConfig,
pub procedure: Option<ProcedureConfig>,
}
impl Default for StandaloneOptions {
fn default() -> Self {
Self {
mode: Mode::Standalone,
enable_memory_catalog: false,
http_options: Some(HttpOptions::default()),
grpc_options: Some(GrpcOptions::default()),
mysql_options: Some(MysqlOptions::default()),
@@ -90,11 +99,11 @@ impl Default for StandaloneOptions {
opentsdb_options: Some(OpentsdbOptions::default()),
influxdb_options: Some(InfluxdbOptions::default()),
prometheus_options: Some(PrometheusOptions::default()),
promql_options: Some(PromqlOptions::default()),
mode: Mode::Standalone,
prom_options: Some(PromOptions::default()),
wal: WalConfig::default(),
storage: ObjectStoreConfig::default(),
enable_memory_catalog: false,
compaction: CompactionConfig::default(),
procedure: None,
}
}
}
@@ -102,6 +111,7 @@ impl Default for StandaloneOptions {
impl StandaloneOptions {
fn frontend_options(self) -> FrontendOptions {
FrontendOptions {
mode: self.mode,
http_options: self.http_options,
grpc_options: self.grpc_options,
mysql_options: self.mysql_options,
@@ -109,22 +119,57 @@ impl StandaloneOptions {
opentsdb_options: self.opentsdb_options,
influxdb_options: self.influxdb_options,
prometheus_options: self.prometheus_options,
promql_options: self.promql_options,
mode: self.mode,
meta_client_opts: None,
prom_options: self.prom_options,
meta_client_options: None,
}
}
fn datanode_options(self) -> DatanodeOptions {
DatanodeOptions {
enable_memory_catalog: self.enable_memory_catalog,
wal: self.wal,
storage: self.storage,
enable_memory_catalog: self.enable_memory_catalog,
compaction: self.compaction,
procedure: self.procedure,
..Default::default()
}
}
}
pub struct Instance {
datanode: Datanode,
frontend: FeInstance,
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
self.datanode
.start_instance()
.await
.context(StartDatanodeSnafu)?;
info!("Datanode instance started");
self.frontend.start().await.context(StartFrontendSnafu)?;
Ok(())
}
pub async fn stop(&self) -> Result<()> {
self.frontend
.shutdown()
.await
.context(ShutdownFrontendSnafu)?;
self.datanode
.shutdown_instance()
.await
.context(ShutdownDatanodeSnafu)?;
info!("Datanode instance stopped.");
Ok(())
}
}
#[derive(Debug, Parser)]
struct StartCommand {
#[clap(long)]
@@ -134,6 +179,8 @@ struct StartCommand {
#[clap(long)]
mysql_addr: Option<String>,
#[clap(long)]
prom_addr: Option<String>,
#[clap(long)]
postgres_addr: Option<String>,
#[clap(long)]
opentsdb_addr: Option<String>,
@@ -154,7 +201,7 @@ struct StartCommand {
}
impl StartCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
let enable_memory_catalog = self.enable_memory_catalog;
let config_file = self.config_file.clone();
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
@@ -174,33 +221,30 @@ impl StartCommand {
fe_opts, dn_opts
);
let mut datanode = Datanode::new(dn_opts.clone())
let datanode = Datanode::new(dn_opts.clone())
.await
.context(StartDatanodeSnafu)?;
let mut frontend = build_frontend(fe_opts, plugins, datanode.get_instance()).await?;
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
datanode
.start_instance()
let mut frontend = build_frontend(plugins.clone(), datanode.get_instance()).await?;
frontend
.build_servers(&fe_opts, plugins)
.await
.context(StartDatanodeSnafu)?;
info!("Datanode instance started");
.context(StartFrontendSnafu)?;
frontend.start().await.context(StartFrontendSnafu)?;
Ok(())
Ok(Instance { datanode, frontend })
}
}
/// Build frontend instance in standalone mode
async fn build_frontend(
fe_opts: FrontendOptions,
plugins: Arc<Plugins>,
datanode_instance: InstanceRef,
) -> Result<Frontend<FeInstance>> {
) -> Result<FeInstance> {
let mut frontend_instance = FeInstance::new_standalone(datanode_instance.clone());
frontend_instance.set_script_handler(datanode_instance);
frontend_instance.set_plugins(plugins.clone());
Ok(Frontend::new(fe_opts, frontend_instance, plugins))
Ok(frontend_instance)
}
impl TryFrom<StartCommand> for FrontendOptions {
@@ -246,6 +290,11 @@ impl TryFrom<StartCommand> for FrontendOptions {
..Default::default()
})
}
if let Some(addr) = cmd.prom_addr {
opts.prom_options = Some(PromOptions { addr })
}
if let Some(addr) = cmd.postgres_addr {
opts.postgres_options = Some(PostgresOptions {
addr,
@@ -294,6 +343,7 @@ mod tests {
http_addr: None,
rpc_addr: None,
mysql_addr: None,
prom_addr: None,
postgres_addr: None,
opentsdb_addr: None,
config_file: Some(format!(
@@ -339,6 +389,7 @@ mod tests {
let command = StartCommand {
http_addr: None,
rpc_addr: None,
prom_addr: None,
mysql_addr: None,
postgres_addr: None,
opentsdb_addr: None,
@@ -362,4 +413,11 @@ mod tests {
.await;
assert!(result.is_ok());
}
#[test]
fn test_toml() {
let opts = StandaloneOptions::default();
let toml_string = toml::to_string(&opts).unwrap();
let _parsed: StandaloneOptions = toml::from_str(&toml_string).unwrap();
}
}

View File

@@ -29,9 +29,9 @@ mod tests {
use std::fs::File;
use std::io::Write;
use common_test_util::temp_dir::create_temp_dir;
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use tempdir::TempDir;
use super::*;
use crate::error::Result;
@@ -62,7 +62,7 @@ mod tests {
host: "greptime.test".to_string(),
};
let dir = TempDir::new("test_from_file").unwrap();
let dir = create_temp_dir("test_from_file");
let test_file = format!("{}/test.toml", dir.path().to_str().unwrap());
let s = toml::to_string(&config).unwrap();

148
src/cmd/tests/cli.rs Normal file
View File

@@ -0,0 +1,148 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(target_os = "macos")]
mod tests {
use std::path::PathBuf;
use std::process::{Command, Stdio};
use std::time::Duration;
use common_test_util::temp_dir::create_temp_dir;
use rexpect::session::PtyReplSession;
struct Repl {
repl: PtyReplSession,
}
impl Repl {
fn send_line(&mut self, line: &str) {
self.repl.send_line(line).unwrap();
// read a line to consume the prompt
self.read_line();
}
fn read_line(&mut self) -> String {
self.repl.read_line().unwrap()
}
fn read_expect(&mut self, expect: &str) {
assert_eq!(self.read_line(), expect);
}
fn read_contains(&mut self, pat: &str) {
assert!(self.read_line().contains(pat));
}
}
// TODO(LFC): Un-ignore this REPL test.
// Ignore this REPL test because some logical plans like create database are not supported yet in Datanode.
#[ignore]
#[test]
fn test_repl() {
let data_dir = create_temp_dir("data");
let wal_dir = create_temp_dir("wal");
let mut bin_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
bin_path.push("../../target/debug");
let bin_path = bin_path.to_str().unwrap();
let mut datanode = Command::new("./greptime")
.current_dir(bin_path)
.args([
"datanode",
"start",
"--rpc-addr=0.0.0.0:4321",
"--node-id=1",
&format!("--data-dir={}", data_dir.path().display()),
&format!("--wal-dir={}", wal_dir.path().display()),
])
.stdout(Stdio::null())
.spawn()
.unwrap();
// wait for Datanode actually started
std::thread::sleep(Duration::from_secs(3));
let mut repl_cmd = Command::new("./greptime");
repl_cmd.current_dir(bin_path).args([
"--log-level=off",
"cli",
"attach",
"--grpc-addr=0.0.0.0:4321",
// history commands can sneaky into stdout and mess up our tests, so disable it
"--disable-helper",
]);
let pty_session = rexpect::session::spawn_command(repl_cmd, Some(5_000)).unwrap();
let repl = PtyReplSession {
prompt: "> ".to_string(),
pty_session,
quit_command: None,
echo_on: false,
};
let repl = &mut Repl { repl };
repl.read_expect("Ready for commands. (Hint: try 'help')");
test_create_database(repl);
test_use_database(repl);
test_create_table(repl);
test_insert(repl);
test_select(repl);
datanode.kill().unwrap();
datanode.wait().unwrap();
}
fn test_create_database(repl: &mut Repl) {
repl.send_line("CREATE DATABASE db;");
repl.read_expect("Affected Rows: 1");
repl.read_contains("Cost");
}
fn test_use_database(repl: &mut Repl) {
repl.send_line("USE db");
repl.read_expect("Total Rows: 0");
repl.read_contains("Cost");
repl.read_expect("Using db");
}
fn test_create_table(repl: &mut Repl) {
repl.send_line("CREATE TABLE t(x STRING, ts TIMESTAMP TIME INDEX);");
repl.read_expect("Affected Rows: 0");
repl.read_contains("Cost");
}
fn test_insert(repl: &mut Repl) {
repl.send_line("INSERT INTO t(x, ts) VALUES ('hello', 1676895812239);");
repl.read_expect("Affected Rows: 1");
repl.read_contains("Cost");
}
fn test_select(repl: &mut Repl) {
repl.send_line("SELECT * FROM t;");
repl.read_expect("+-------+-------------------------+");
repl.read_expect("| x | ts |");
repl.read_expect("+-------+-------------------------+");
repl.read_expect("| hello | 2023-02-20T12:23:32.239 |");
repl.read_expect("+-------+-------------------------+");
repl.read_expect("Total Rows: 1");
repl.read_contains("Cost");
}
}

View File

@@ -20,6 +20,12 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize)]
pub struct Bytes(bytes::Bytes);
impl From<Bytes> for bytes::Bytes {
fn from(value: Bytes) -> Self {
value.0
}
}
impl From<bytes::Bytes> for Bytes {
fn from(bytes: bytes::Bytes) -> Bytes {
Bytes(bytes)

View File

@@ -17,5 +17,4 @@ snafu = { version = "0.7", features = ["backtraces"] }
[dev-dependencies]
chrono.workspace = true
tempdir = "0.3"
tokio.workspace = true

View File

@@ -0,0 +1,13 @@
[package]
name = "common-datasource"
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
common-error = { path = "../error" }
futures.workspace = true
object-store = { path = "../../object-store" }
regex = "1.7"
snafu.workspace = true
url = "2.3"

View File

@@ -0,0 +1,75 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use common_error::prelude::*;
use url::ParseError;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Unsupported backend protocol: {}", protocol))]
UnsupportedBackendProtocol { protocol: String },
#[snafu(display("empty host: {}", url))]
EmptyHostPath { url: String },
#[snafu(display("Invalid path: {}", path))]
InvalidPath { path: String },
#[snafu(display("Invalid url: {}, error :{}", url, source))]
InvalidUrl { url: String, source: ParseError },
#[snafu(display("Failed to build backend, source: {}", source))]
BuildBackend {
source: object_store::Error,
backtrace: Backtrace,
},
#[snafu(display("Failed to list object in path: {}, source: {}", path, source))]
ListObjects {
path: String,
backtrace: Backtrace,
source: object_store::Error,
},
#[snafu(display("Invalid connection: {}", msg))]
InvalidConnection { msg: String },
}
pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
use Error::*;
match self {
BuildBackend { .. } | ListObjects { .. } => StatusCode::StorageUnavailable,
UnsupportedBackendProtocol { .. }
| InvalidConnection { .. }
| InvalidUrl { .. }
| EmptyHostPath { .. }
| InvalidPath { .. } => StatusCode::InvalidArguments,
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {
ErrorCompat::backtrace(self)
}
fn as_any(&self) -> &dyn Any {
self
}
}

View File

@@ -12,4 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub use opendal::services::fs::Builder;
pub mod error;
pub mod lister;
pub mod object_store;
pub mod util;

View File

@@ -0,0 +1,81 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use futures::{future, TryStreamExt};
use object_store::{Object, ObjectStore};
use regex::Regex;
use snafu::ResultExt;
use crate::error::{self, Result};
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Source {
Filename(String),
Dir,
}
pub struct Lister {
object_store: ObjectStore,
source: Source,
path: String,
regex: Option<Regex>,
}
impl Lister {
pub fn new(
object_store: ObjectStore,
source: Source,
path: String,
regex: Option<Regex>,
) -> Self {
Lister {
object_store,
source,
path,
regex,
}
}
pub async fn list(&self) -> Result<Vec<Object>> {
match &self.source {
Source::Dir => {
let streamer = self
.object_store
.object(&self.path)
.list()
.await
.context(error::ListObjectsSnafu { path: &self.path })?;
streamer
.try_filter(|f| {
let res = self
.regex
.as_ref()
.map(|x| x.is_match(f.name()))
.unwrap_or(true);
future::ready(res)
})
.try_collect::<Vec<_>>()
.await
.context(error::ListObjectsSnafu { path: &self.path })
}
Source::Filename(filename) => {
let obj = self
.object_store
.object(&format!("{}{}", self.path, filename));
Ok(vec![obj])
}
}
}
}

View File

@@ -0,0 +1,60 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod fs;
pub mod s3;
use std::collections::HashMap;
use object_store::ObjectStore;
use snafu::{OptionExt, ResultExt};
use url::{ParseError, Url};
use self::fs::build_fs_backend;
use self::s3::build_s3_backend;
use crate::error::{self, Result};
pub const FS_SCHEMA: &str = "FS";
pub const S3_SCHEMA: &str = "S3";
/// parse url returns (schema,Option<host>,path)
pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
let parsed_url = Url::parse(url);
match parsed_url {
Ok(url) => Ok((
url.scheme().to_string(),
url.host_str().map(|s| s.to_string()),
url.path().to_string(),
)),
Err(ParseError::RelativeUrlWithoutBase) => {
Ok((FS_SCHEMA.to_string(), None, url.to_string()))
}
Err(err) => Err(err).context(error::InvalidUrlSnafu { url }),
}
}
pub fn build_backend(url: &str, connection: HashMap<String, String>) -> Result<ObjectStore> {
let (schema, host, _path) = parse_url(url)?;
match schema.to_uppercase().as_str() {
S3_SCHEMA => {
let host = host.context(error::EmptyHostPathSnafu {
url: url.to_string(),
})?;
Ok(build_s3_backend(&host, "/", connection)?)
}
FS_SCHEMA => Ok(build_fs_backend("/")?),
_ => error::UnsupportedBackendProtocolSnafu { protocol: schema }.fail(),
}
}

View File

@@ -0,0 +1,28 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use object_store::services::Fs;
use object_store::{ObjectStore, ObjectStoreBuilder};
use snafu::ResultExt;
use crate::error::{self, Result};
pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
let accessor = Fs::default()
.root(root)
.build()
.context(error::BuildBackendSnafu)?;
Ok(ObjectStore::new(accessor).finish())
}

View File

@@ -0,0 +1,79 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use object_store::services::S3;
use object_store::{ObjectStore, ObjectStoreBuilder};
use snafu::ResultExt;
use crate::error::{self, Result};
const ENDPOINT_URL: &str = "ENDPOINT_URL";
const ACCESS_KEY_ID: &str = "ACCESS_KEY_ID";
const SECRET_ACCESS_KEY: &str = "SECRET_ACCESS_KEY";
const SESSION_TOKEN: &str = "SESSION_TOKEN";
const REGION: &str = "REGION";
const ENABLE_VIRTUAL_HOST_STYLE: &str = "ENABLE_VIRTUAL_HOST_STYLE";
pub fn build_s3_backend(
host: &str,
path: &str,
connection: HashMap<String, String>,
) -> Result<ObjectStore> {
let mut builder = S3::default();
builder.root(path);
builder.bucket(host);
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
builder.endpoint(endpoint);
}
if let Some(region) = connection.get(REGION) {
builder.region(region);
}
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
builder.access_key_id(key_id);
}
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
builder.secret_access_key(key);
}
if let Some(session_token) = connection.get(SESSION_TOKEN) {
builder.security_token(session_token);
}
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
let enable = enable_str.as_str().parse::<bool>().map_err(|e| {
error::InvalidConnectionSnafu {
msg: format!(
"failed to parse the option {}={}, {}",
ENABLE_VIRTUAL_HOST_STYLE, enable_str, e
),
}
.build()
})?;
if enable {
builder.enable_virtual_host_style();
}
}
let accessor = builder.build().context(error::BuildBackendSnafu)?;
Ok(ObjectStore::new(accessor).finish())
}

View File

@@ -0,0 +1,125 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub fn find_dir_and_filename(path: &str) -> (String, Option<String>) {
if path.is_empty() {
("/".to_string(), None)
} else if path.ends_with('/') {
(path.to_string(), None)
} else if let Some(idx) = path.rfind('/') {
(
path[..idx + 1].to_string(),
Some(path[idx + 1..].to_string()),
)
} else {
("/".to_string(), Some(path.to_string()))
}
}
#[cfg(test)]
mod tests {
use url::Url;
use super::*;
#[test]
fn test_parse_uri() {
struct Test<'a> {
uri: &'a str,
expected_path: &'a str,
expected_schema: &'a str,
}
let tests = [
Test {
uri: "s3://bucket/to/path/",
expected_path: "/to/path/",
expected_schema: "s3",
},
Test {
uri: "fs:///to/path/",
expected_path: "/to/path/",
expected_schema: "fs",
},
Test {
uri: "fs:///to/path/file",
expected_path: "/to/path/file",
expected_schema: "fs",
},
];
for test in tests {
let parsed_uri = Url::parse(test.uri).unwrap();
assert_eq!(parsed_uri.path(), test.expected_path);
assert_eq!(parsed_uri.scheme(), test.expected_schema);
}
}
#[test]
fn test_parse_path_and_dir() {
let parsed = Url::from_file_path("/to/path/file").unwrap();
assert_eq!(parsed.path(), "/to/path/file");
let parsed = Url::from_directory_path("/to/path/").unwrap();
assert_eq!(parsed.path(), "/to/path/");
}
#[test]
fn test_find_dir_and_filename() {
struct Test<'a> {
path: &'a str,
expected_dir: &'a str,
expected_filename: Option<String>,
}
let tests = [
Test {
path: "to/path/",
expected_dir: "to/path/",
expected_filename: None,
},
Test {
path: "to/path/filename",
expected_dir: "to/path/",
expected_filename: Some("filename".into()),
},
Test {
path: "/to/path/filename",
expected_dir: "/to/path/",
expected_filename: Some("filename".into()),
},
Test {
path: "/",
expected_dir: "/",
expected_filename: None,
},
Test {
path: "filename",
expected_dir: "/",
expected_filename: Some("filename".into()),
},
Test {
path: "",
expected_dir: "/",
expected_filename: None,
},
];
for test in tests {
let (path, filename) = find_dir_and_filename(test.path);
assert_eq!(test.expected_dir, path);
assert_eq!(test.expected_filename, filename)
}
}
}

View File

@@ -86,6 +86,34 @@ impl StatusCode {
pub fn is_success(code: u32) -> bool {
Self::Success as u32 == code
}
pub fn is_retryable(&self) -> bool {
match self {
StatusCode::StorageUnavailable
| StatusCode::RuntimeResourcesExhausted
| StatusCode::Internal => true,
StatusCode::Success
| StatusCode::Unknown
| StatusCode::Unsupported
| StatusCode::Unexpected
| StatusCode::InvalidArguments
| StatusCode::InvalidSyntax
| StatusCode::PlanQuery
| StatusCode::EngineExecuteQuery
| StatusCode::TableAlreadyExists
| StatusCode::TableNotFound
| StatusCode::TableColumnNotFound
| StatusCode::TableColumnExists
| StatusCode::DatabaseNotFound
| StatusCode::UserNotFound
| StatusCode::UnsupportedPasswordType
| StatusCode::UserPasswordMismatch
| StatusCode::AuthHeaderNotFound
| StatusCode::InvalidAuthHeader
| StatusCode::AccessDenied => false,
}
}
}
impl fmt::Display for StatusCode {

View File

@@ -10,6 +10,7 @@ proc-macro = true
[dependencies]
quote = "1.0"
syn = "1.0"
proc-macro2 = "1.0"
[dev-dependencies]
arc-swap = "1.0"

View File

@@ -12,8 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
mod range_fn;
use proc_macro::TokenStream;
use quote::{quote, quote_spanned};
use range_fn::process_range_fn;
use syn::parse::Parser;
use syn::spanned::Spanned;
use syn::{parse_macro_input, DeriveInput, ItemStruct};
@@ -83,3 +86,31 @@ pub fn as_aggr_func_creator(_args: TokenStream, input: TokenStream) -> TokenStre
}
.into()
}
/// Attribute macro to convert an arithimetic function to a range function. The annotated function
/// should accept servaral arrays as input and return a single value as output. This procedure
/// macro can works on any number of input parameters. Return type can be either primitive type
/// or wrapped in `Option`.
///
/// # Example
/// Take `count_over_time()` in PromQL as an example:
/// ```rust, ignore
/// /// The count of all values in the specified interval.
/// #[range_fn(
/// name = "CountOverTime",
/// ret = "Float64Array",
/// display_name = "prom_count_over_time"
/// )]
/// pub fn count_over_time(_: &TimestampMillisecondArray, values: &Float64Array) -> f64 {
/// values.len() as f64
/// }
/// ```
///
/// # Arguments
/// - `name`: The name of the generated [ScalarUDF] struct.
/// - `ret`: The return type of the generated UDF function.
/// - `display_name`: The display name of the generated UDF function.
#[proc_macro_attribute]
pub fn range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
process_range_fn(args, input)
}

View File

@@ -0,0 +1,230 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use proc_macro::TokenStream;
use proc_macro2::Span;
use quote::quote;
use syn::punctuated::Punctuated;
use syn::spanned::Spanned;
use syn::token::Comma;
use syn::{
parse_macro_input, Attribute, AttributeArgs, FnArg, Ident, ItemFn, Meta, MetaNameValue,
NestedMeta, Signature, Type, TypeReference, Visibility,
};
/// Internal util macro to early return on error.
macro_rules! ok {
($item:expr) => {
match $item {
Ok(item) => item,
Err(e) => return e.into_compile_error().into(),
}
};
}
pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
// extract arg map
let arg_pairs = parse_macro_input!(args as AttributeArgs);
let arg_span = arg_pairs[0].span();
let arg_map = ok!(extract_arg_map(arg_pairs));
// decompose the fn block
let compute_fn = parse_macro_input!(input as ItemFn);
let ItemFn {
attrs,
vis,
sig,
block,
} = compute_fn;
// extract fn arg list
let Signature {
inputs,
ident: fn_name,
..
} = &sig;
let arg_types = ok!(extract_input_types(inputs));
// build the struct and its impl block
let struct_code = build_struct(
attrs,
vis,
ok!(get_ident(&arg_map, "name", arg_span)),
ok!(get_ident(&arg_map, "display_name", arg_span)),
);
let calc_fn_code = build_calc_fn(
ok!(get_ident(&arg_map, "name", arg_span)),
arg_types,
fn_name.clone(),
ok!(get_ident(&arg_map, "ret", arg_span)),
);
// preserve this fn, but remove its `pub` modifier
let input_fn_code: TokenStream = quote! {
#sig { #block }
}
.into();
let mut result = TokenStream::new();
result.extend(struct_code);
result.extend(calc_fn_code);
result.extend(input_fn_code);
result
}
/// Extract a String <-> Ident map from the attribute args.
fn extract_arg_map(args: Vec<NestedMeta>) -> Result<HashMap<String, Ident>, syn::Error> {
args.into_iter()
.map(|meta| {
if let NestedMeta::Meta(Meta::NameValue(MetaNameValue { path, lit, .. })) = meta {
let name = path.get_ident().unwrap().to_string();
let ident = match lit {
syn::Lit::Str(lit_str) => lit_str.parse::<Ident>(),
_ => Err(syn::Error::new(
lit.span(),
"Unexpected attribute format. Expected `name = \"value\"`",
)),
}?;
Ok((name, ident))
} else {
Err(syn::Error::new(
meta.span(),
"Unexpected attribute format. Expected `name = \"value\"`",
))
}
})
.collect::<Result<HashMap<String, Ident>, syn::Error>>()
}
/// Helper function to get an Ident from the previous arg map.
fn get_ident(map: &HashMap<String, Ident>, key: &str, span: Span) -> Result<Ident, syn::Error> {
map.get(key)
.cloned()
.ok_or_else(|| syn::Error::new(span, format!("Expect attribute {key} but not found")))
}
/// Extract the argument list from the annotated function.
fn extract_input_types(inputs: &Punctuated<FnArg, Comma>) -> Result<Vec<Type>, syn::Error> {
inputs
.iter()
.map(|arg| match arg {
FnArg::Receiver(receiver) => Err(syn::Error::new(receiver.span(), "expected bool")),
FnArg::Typed(pat_type) => Ok(*pat_type.ty.clone()),
})
.collect()
}
fn build_struct(
attrs: Vec<Attribute>,
vis: Visibility,
name: Ident,
display_name_ident: Ident,
) -> TokenStream {
let display_name = display_name_ident.to_string();
quote! {
#(#attrs)*
#[derive(Debug)]
#vis struct #name {}
impl #name {
pub const fn name() -> &'static str {
#display_name
}
pub fn scalar_udf() -> ScalarUDF {
ScalarUDF {
name: Self::name().to_string(),
signature: Signature::new(
TypeSignature::Exact(Self::input_type()),
Volatility::Immutable,
),
return_type: Arc::new(|_| Ok(Arc::new(Self::return_type()))),
fun: Arc::new(Self::calc),
}
}
// TODO(ruihang): this should be parameterized
// time index column and value column
fn input_type() -> Vec<DataType> {
vec![
RangeArray::convert_data_type(DataType::Timestamp(TimeUnit::Millisecond, None)),
RangeArray::convert_data_type(DataType::Float64),
]
}
// TODO(ruihang): this should be parameterized
fn return_type() -> DataType {
DataType::Float64
}
}
}
.into()
}
fn build_calc_fn(
name: Ident,
param_types: Vec<Type>,
fn_name: Ident,
ret_type: Ident,
) -> TokenStream {
let param_names = param_types
.iter()
.enumerate()
.map(|(i, ty)| Ident::new(&format!("param_{}", i), ty.span()))
.collect::<Vec<_>>();
let unref_param_types = param_types
.iter()
.map(|ty| {
if let Type::Reference(TypeReference { elem, .. }) = ty {
elem.as_ref().clone()
} else {
ty.clone()
}
})
.collect::<Vec<_>>();
let num_params = param_types.len();
let param_numbers = (0..num_params).collect::<Vec<_>>();
let range_array_names = param_names
.iter()
.map(|name| Ident::new(&format!("{}_range_array", name), name.span()))
.collect::<Vec<_>>();
let first_range_array_name = range_array_names.first().unwrap().clone();
quote! {
impl #name {
fn calc(input: &[ColumnarValue]) -> Result<ColumnarValue, DataFusionError> {
assert_eq!(input.len(), #num_params);
#( let #range_array_names = RangeArray::try_new(extract_array(&input[#param_numbers])?.data().clone().into())?; )*
// TODO(ruihang): add ensure!()
let mut result_array = Vec::new();
for index in 0..#first_range_array_name.len(){
#( let #param_names = #range_array_names.get(index).unwrap().as_any().downcast_ref::<#unref_param_types>().unwrap().clone(); )*
// TODO(ruihang): add ensure!() to check length
let result = #fn_name(#( &#param_names, )*);
result_array.push(result);
}
let result = ColumnarValue::Array(Arc::new(#ret_type::from_iter(result_array)));
Ok(result)
}
}
}
.into()
}

View File

@@ -20,6 +20,7 @@ use static_assertions::{assert_fields, assert_impl_all};
struct Foo {}
#[test]
#[allow(clippy::extra_unused_type_parameters)]
fn test_derive() {
Foo::default();
assert_fields!(Foo: input_types);

View File

@@ -19,7 +19,7 @@ num-traits = "0.2"
once_cell = "1.10"
paste = "1.0"
snafu.workspace = true
statrs = "0.15"
statrs = "0.16"
[dev-dependencies]
ron = "0.7"

View File

@@ -12,17 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
mod from_unixtime;
use from_unixtime::FromUnixtimeFunction;
use crate::scalars::function_registry::FunctionRegistry;
pub(crate) struct TimestampFunction;
impl TimestampFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(FromUnixtimeFunction::default()));
}
pub fn register(_registry: &FunctionRegistry) {}
}

View File

@@ -1,133 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! from_unixtime function.
/// TODO(dennis) It can be removed after we upgrade datafusion.
use std::fmt;
use std::sync::Arc;
use common_query::error::{
ArrowComputeSnafu, IntoVectorSnafu, Result, TypeCastSnafu, UnsupportedInputDataTypeSnafu,
};
use common_query::prelude::{Signature, Volatility};
use datatypes::arrow::compute;
use datatypes::arrow::datatypes::{DataType as ArrowDatatype, Int64Type};
use datatypes::data_type::DataType;
use datatypes::prelude::ConcreteDataType;
use datatypes::vectors::{TimestampMillisecondVector, VectorRef};
use snafu::ResultExt;
use crate::scalars::function::{Function, FunctionContext};
#[derive(Clone, Debug, Default)]
pub struct FromUnixtimeFunction;
const NAME: &str = "from_unixtime";
impl Function for FromUnixtimeFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::timestamp_millisecond_datatype())
}
fn signature(&self) -> Signature {
Signature::uniform(
1,
vec![ConcreteDataType::int64_datatype()],
Volatility::Immutable,
)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
match columns[0].data_type() {
ConcreteDataType::Int64(_) => {
let array = columns[0].to_arrow_array();
// Our timestamp vector's time unit is millisecond
let array = compute::multiply_scalar_dyn::<Int64Type>(&array, 1000i64)
.context(ArrowComputeSnafu)?;
let arrow_datatype = &self.return_type(&[]).unwrap().as_arrow_type();
Ok(Arc::new(
TimestampMillisecondVector::try_from_arrow_array(
compute::cast(&array, arrow_datatype).context(TypeCastSnafu {
typ: ArrowDatatype::Int64,
})?,
)
.context(IntoVectorSnafu {
data_type: arrow_datatype.clone(),
})?,
))
}
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
}
.fail(),
}
}
}
impl fmt::Display for FromUnixtimeFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "FROM_UNIXTIME")
}
}
#[cfg(test)]
mod tests {
use common_query::prelude::TypeSignature;
use datatypes::value::Value;
use datatypes::vectors::Int64Vector;
use super::*;
#[test]
fn test_from_unixtime() {
let f = FromUnixtimeFunction::default();
assert_eq!("from_unixtime", f.name());
assert_eq!(
ConcreteDataType::timestamp_millisecond_datatype(),
f.return_type(&[]).unwrap()
);
assert!(matches!(f.signature(),
Signature {
type_signature: TypeSignature::Uniform(1, valid_types),
volatility: Volatility::Immutable
} if valid_types == vec![ConcreteDataType::int64_datatype()]
));
let times = vec![Some(1494410783), None, Some(1494410983)];
let args: Vec<VectorRef> = vec![Arc::new(Int64Vector::from(times.clone()))];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(3, vector.len());
for (i, t) in times.iter().enumerate() {
let v = vector.get(i);
if i == 1 {
assert_eq!(Value::Null, v);
continue;
}
match v {
Value::Timestamp(ts) => {
assert_eq!(ts.value(), t.unwrap() * 1000);
}
_ => unreachable!(),
}
}
}
}

View File

@@ -14,9 +14,9 @@
use std::sync::Arc;
use common_query::error::{ExecuteFunctionSnafu, FromScalarValueSnafu};
use common_query::error::FromScalarValueSnafu;
use common_query::prelude::{
ColumnarValue, ReturnTypeFunction, ScalarFunctionImplementation, ScalarUdf, ScalarValue,
ColumnarValue, ReturnTypeFunction, ScalarFunctionImplementation, ScalarUdf,
};
use datatypes::error::Error as DataTypeError;
use datatypes::prelude::*;
@@ -54,16 +54,8 @@ pub fn create_udf(func: FunctionRef) -> ScalarUdf {
.collect();
let result = func_cloned.eval(func_ctx, &args.context(FromScalarValueSnafu)?);
let udf = if len.is_some() {
result.map(ColumnarValue::Vector)?
} else {
ScalarValue::try_from_array(&result?.to_arrow_array(), 0)
.map(ColumnarValue::Scalar)
.context(ExecuteFunctionSnafu)?
};
Ok(udf)
let udf_result = result.map(ColumnarValue::Vector)?;
Ok(udf_result)
});
ScalarUdf::new(func.name(), &func.signature(), &return_type, &fun)

View File

@@ -12,19 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use api::v1::alter_expr::Kind;
use api::v1::{column_def, AlterExpr, CreateTableExpr, DropColumns, RenameTable};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
use datatypes::schema::{ColumnSchema, RawSchema};
use snafu::{ensure, OptionExt, ResultExt};
use table::metadata::TableId;
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, CreateTableRequest};
use table::requests::{
AddColumnRequest, AlterKind, AlterTableRequest, CreateTableRequest, TableOptions,
};
use crate::error::{
ColumnNotFoundSnafu, CreateSchemaSnafu, InvalidColumnDefSnafu, MissingFieldSnafu,
MissingTimestampColumnSnafu, Result,
ColumnNotFoundSnafu, InvalidColumnDefSnafu, MissingFieldSnafu, MissingTimestampColumnSnafu,
Result, UnrecognizedTableOptionSnafu,
};
/// Convert an [`AlterExpr`] to an [`AlterTableRequest`]
@@ -92,7 +92,7 @@ pub fn alter_expr_to_request(expr: AlterExpr) -> Result<AlterTableRequest> {
}
}
pub fn create_table_schema(expr: &CreateTableExpr) -> Result<SchemaRef> {
pub fn create_table_schema(expr: &CreateTableExpr) -> Result<RawSchema> {
let column_schemas = expr
.column_defs
.iter()
@@ -121,12 +121,7 @@ pub fn create_table_schema(expr: &CreateTableExpr) -> Result<SchemaRef> {
})
.collect::<Vec<_>>();
Ok(Arc::new(
SchemaBuilder::try_from(column_schemas)
.context(CreateSchemaSnafu)?
.build()
.context(CreateSchemaSnafu)?,
))
Ok(RawSchema::new(column_schemas))
}
pub fn create_expr_to_request(
@@ -138,8 +133,11 @@ pub fn create_expr_to_request(
.primary_keys
.iter()
.map(|key| {
// We do a linear search here.
schema
.column_index_by_name(key)
.column_schemas
.iter()
.position(|column_schema| column_schema.name == *key)
.context(ColumnNotFoundSnafu {
column_name: key,
table_name: &expr.table_name,
@@ -167,6 +165,8 @@ pub fn create_expr_to_request(
expr.region_ids
};
let table_options =
TableOptions::try_from(&expr.table_options).context(UnrecognizedTableOptionSnafu)?;
Ok(CreateTableRequest {
id: table_id,
catalog_name,
@@ -177,7 +177,7 @@ pub fn create_expr_to_request(
region_numbers: region_ids,
primary_key_indices,
create_if_not_exists: expr.create_if_not_exists,
table_options: expr.table_options,
table_options,
})
}

View File

@@ -32,7 +32,7 @@ pub enum Error {
DecodeInsert { source: DecodeError },
#[snafu(display("Illegal insert data"))]
IllegalInsertData,
IllegalInsertData { backtrace: Backtrace },
#[snafu(display("Column datatype error, source: {}", source))]
ColumnDataType {
@@ -40,12 +40,6 @@ pub enum Error {
source: api::error::Error,
},
#[snafu(display("Failed to create schema when creating table, source: {}", source))]
CreateSchema {
#[snafu(backtrace)]
source: datatypes::error::Error,
},
#[snafu(display(
"Duplicated timestamp column in gRPC requests, exists {}, duplicated: {}",
exists,
@@ -90,6 +84,12 @@ pub enum Error {
#[snafu(backtrace)]
source: api::error::Error,
},
#[snafu(display("Unrecognized table option: {}", source))]
UnrecognizedTableOption {
#[snafu(backtrace)]
source: table::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -102,14 +102,15 @@ impl ErrorExt for Error {
StatusCode::InvalidArguments
}
Error::ColumnDataType { .. } => StatusCode::Internal,
Error::CreateSchema { .. }
| Error::DuplicatedTimestampColumn { .. }
| Error::MissingTimestampColumn { .. } => StatusCode::InvalidArguments,
Error::DuplicatedTimestampColumn { .. } | Error::MissingTimestampColumn { .. } => {
StatusCode::InvalidArguments
}
Error::InvalidColumnProto { .. } => StatusCode::InvalidArguments,
Error::CreateVector { .. } => StatusCode::InvalidArguments,
Error::MissingField { .. } => StatusCode::InvalidArguments,
Error::ColumnDefaultConstraint { source, .. } => source.status_code(),
Error::InvalidColumnDef { source, .. } => source.status_code(),
Error::UnrecognizedTableOption { .. } => StatusCode::InvalidArguments,
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {

View File

@@ -26,6 +26,7 @@ use common_time::{Date, DateTime};
use datatypes::data_type::{ConcreteDataType, DataType};
use datatypes::prelude::{ValueRef, VectorRef};
use datatypes::schema::SchemaRef;
use datatypes::types::TimestampType;
use datatypes::value::Value;
use datatypes::vectors::MutableVector;
use snafu::{ensure, OptionExt, ResultExt};
@@ -96,9 +97,7 @@ pub fn column_to_vector(column: &Column, rows: u32) -> Result<VectorRef> {
for i in 0..rows {
if let Some(true) = nulls_iter.next() {
vector
.push_value_ref(ValueRef::Null)
.context(CreateVectorSnafu)?;
vector.push_null();
} else {
let value_ref = values_iter
.next()
@@ -109,16 +108,12 @@ pub fn column_to_vector(column: &Column, rows: u32) -> Result<VectorRef> {
),
})?;
vector
.push_value_ref(value_ref)
.try_push_value_ref(value_ref)
.context(CreateVectorSnafu)?;
}
}
} else {
(0..rows).try_for_each(|_| {
vector
.push_value_ref(ValueRef::Null)
.context(CreateVectorSnafu)
})?;
(0..rows).for_each(|_| vector.push_null());
}
Ok(vector.to_vector())
}
@@ -324,7 +319,7 @@ fn add_values_to_builder(
values.iter().try_for_each(|value| {
builder
.push_value_ref(value.as_value_ref())
.try_push_value_ref(value.as_value_ref())
.context(CreateVectorSnafu)
})?;
} else {
@@ -337,12 +332,10 @@ fn add_values_to_builder(
let mut idx_of_values = 0;
for idx in 0..row_count {
match is_null(&null_mask, idx) {
Some(true) => builder
.push_value_ref(ValueRef::Null)
.context(CreateVectorSnafu)?,
Some(true) => builder.push_null(),
_ => {
builder
.push_value_ref(values[idx_of_values].as_value_ref())
.try_push_value_ref(values[idx_of_values].as_value_ref())
.context(CreateVectorSnafu)?;
idx_of_values += 1
}
@@ -422,13 +415,29 @@ fn convert_values(data_type: &ConcreteDataType, values: Values) -> Vec<Value> {
.into_iter()
.map(|v| Value::Date(v.into()))
.collect(),
ConcreteDataType::Timestamp(_) => values
ConcreteDataType::Timestamp(TimestampType::Second(_)) => values
.ts_second_values
.into_iter()
.map(|v| Value::Timestamp(Timestamp::new_second(v)))
.collect(),
ConcreteDataType::Timestamp(TimestampType::Millisecond(_)) => values
.ts_millisecond_values
.into_iter()
.map(|v| Value::Timestamp(Timestamp::new_millisecond(v)))
.collect(),
ConcreteDataType::Null(_) => unreachable!(),
ConcreteDataType::List(_) => unreachable!(),
ConcreteDataType::Timestamp(TimestampType::Microsecond(_)) => values
.ts_microsecond_values
.into_iter()
.map(|v| Value::Timestamp(Timestamp::new_microsecond(v)))
.collect(),
ConcreteDataType::Timestamp(TimestampType::Nanosecond(_)) => values
.ts_nanosecond_values
.into_iter()
.map(|v| Value::Timestamp(Timestamp::new_nanosecond(v)))
.collect(),
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
unreachable!()
}
}
}
@@ -451,6 +460,7 @@ mod tests {
use common_time::timestamp::Timestamp;
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
use datatypes::types::{TimestampMillisecondType, TimestampSecondType, TimestampType};
use datatypes::value::Value;
use snafu::ResultExt;
use table::error::Result as TableResult;
@@ -654,6 +664,39 @@ mod tests {
);
}
#[test]
fn test_convert_timestamp_values() {
// second
let actual = convert_values(
&ConcreteDataType::Timestamp(TimestampType::Second(TimestampSecondType)),
Values {
ts_second_values: vec![1_i64, 2_i64, 3_i64],
..Default::default()
},
);
let expect = vec![
Value::Timestamp(Timestamp::new_second(1_i64)),
Value::Timestamp(Timestamp::new_second(2_i64)),
Value::Timestamp(Timestamp::new_second(3_i64)),
];
assert_eq!(expect, actual);
// millisecond
let actual = convert_values(
&ConcreteDataType::Timestamp(TimestampType::Millisecond(TimestampMillisecondType)),
Values {
ts_millisecond_values: vec![1_i64, 2_i64, 3_i64],
..Default::default()
},
);
let expect = vec![
Value::Timestamp(Timestamp::new_millisecond(1_i64)),
Value::Timestamp(Timestamp::new_millisecond(2_i64)),
Value::Timestamp(Timestamp::new_millisecond(3_i64)),
];
assert_eq!(expect, actual);
}
#[test]
fn test_is_null() {
let null_mask = BitVec::from_slice(&[0b0000_0001, 0b0000_1000]);

View File

@@ -16,7 +16,7 @@ common-runtime = { path = "../runtime" }
dashmap = "5.4"
datafusion.workspace = true
datatypes = { path = "../../datatypes" }
flatbuffers = "22"
flatbuffers = "23.1"
futures = "0.3"
prost.workspace = true
snafu = { version = "0.7", features = ["backtraces"] }
@@ -26,7 +26,7 @@ tower = "0.4"
[dev-dependencies]
criterion = "0.4"
rand = "0.8"
rand.workspace = true
[[bench]]
name = "bench_main"

View File

@@ -18,18 +18,20 @@ use std::time::Duration;
use dashmap::mapref::entry::Entry;
use dashmap::DashMap;
use snafu::ResultExt;
use tonic::transport::{Channel as InnerChannel, Endpoint, Uri};
use snafu::{OptionExt, ResultExt};
use tonic::transport::{
Certificate, Channel as InnerChannel, ClientTlsConfig, Endpoint, Identity, Uri,
};
use tower::make::MakeConnection;
use crate::error;
use crate::error::Result;
use crate::error::{CreateChannelSnafu, InvalidConfigFilePathSnafu, InvalidTlsConfigSnafu, Result};
const RECYCLE_CHANNEL_INTERVAL_SECS: u64 = 60;
#[derive(Clone, Debug)]
pub struct ChannelManager {
config: ChannelConfig,
client_tls_config: Option<ClientTlsConfig>,
pool: Arc<Pool>,
}
@@ -52,7 +54,37 @@ impl ChannelManager {
recycle_channel_in_loop(cloned_pool, RECYCLE_CHANNEL_INTERVAL_SECS).await;
});
Self { config, pool }
Self {
config,
client_tls_config: None,
pool,
}
}
pub fn with_tls_config(config: ChannelConfig) -> Result<Self> {
let mut cm = Self::with_config(config.clone());
// setup tls
let path_config = config.client_tls.context(InvalidTlsConfigSnafu {
msg: "no config input",
})?;
let server_root_ca_cert = std::fs::read_to_string(path_config.server_ca_cert_path)
.context(InvalidConfigFilePathSnafu)?;
let server_root_ca_cert = Certificate::from_pem(server_root_ca_cert);
let client_cert = std::fs::read_to_string(path_config.client_cert_path)
.context(InvalidConfigFilePathSnafu)?;
let client_key = std::fs::read_to_string(path_config.client_key_path)
.context(InvalidConfigFilePathSnafu)?;
let client_identity = Identity::from_pem(client_cert, client_key);
cm.client_tls_config = Some(
ClientTlsConfig::new()
.ca_certificate(server_root_ca_cert)
.identity(client_identity),
);
Ok(cm)
}
pub fn config(&self) -> &ChannelConfig {
@@ -119,8 +151,7 @@ impl ChannelManager {
}
fn build_endpoint(&self, addr: &str) -> Result<Endpoint> {
let mut endpoint =
Endpoint::new(format!("http://{addr}")).context(error::CreateChannelSnafu)?;
let mut endpoint = Endpoint::new(format!("http://{addr}")).context(CreateChannelSnafu)?;
if let Some(dur) = self.config.timeout {
endpoint = endpoint.timeout(dur);
@@ -152,6 +183,12 @@ impl ChannelManager {
if let Some(enabled) = self.config.http2_adaptive_window {
endpoint = endpoint.http2_adaptive_window(enabled);
}
if let Some(tls_config) = &self.client_tls_config {
endpoint = endpoint
.tls_config(tls_config.clone())
.context(CreateChannelSnafu)?;
}
endpoint = endpoint
.tcp_keepalive(self.config.tcp_keepalive)
.tcp_nodelay(self.config.tcp_nodelay);
@@ -160,6 +197,13 @@ impl ChannelManager {
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ClientTlsOption {
pub server_ca_cert_path: String,
pub client_cert_path: String,
pub client_key_path: String,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ChannelConfig {
pub timeout: Option<Duration>,
@@ -174,6 +218,7 @@ pub struct ChannelConfig {
pub http2_adaptive_window: Option<bool>,
pub tcp_keepalive: Option<Duration>,
pub tcp_nodelay: bool,
pub client_tls: Option<ClientTlsOption>,
}
impl Default for ChannelConfig {
@@ -191,6 +236,7 @@ impl Default for ChannelConfig {
http2_adaptive_window: None,
tcp_keepalive: None,
tcp_nodelay: true,
client_tls: None,
}
}
}
@@ -307,6 +353,16 @@ impl ChannelConfig {
..self
}
}
/// Set the value of tls client auth.
///
/// Disabled by default.
pub fn client_tls_config(self, client_tls_option: ClientTlsOption) -> Self {
Self {
client_tls: Some(client_tls_option),
..self
}
}
}
#[derive(Debug)]
@@ -401,7 +457,11 @@ mod tests {
async fn test_access_count() {
let pool = Arc::new(Pool::default());
let config = ChannelConfig::new();
let mgr = Arc::new(ChannelManager { pool, config });
let mgr = Arc::new(ChannelManager {
pool,
config,
client_tls_config: None,
});
let addr = "test_uri";
let mut joins = Vec::with_capacity(10);
@@ -443,6 +503,7 @@ mod tests {
http2_adaptive_window: None,
tcp_keepalive: None,
tcp_nodelay: true,
client_tls: None,
},
default_cfg
);
@@ -459,7 +520,12 @@ mod tests {
.http2_keep_alive_while_idle(true)
.http2_adaptive_window(true)
.tcp_keepalive(Duration::from_secs(2))
.tcp_nodelay(false);
.tcp_nodelay(false)
.client_tls_config(ClientTlsOption {
server_ca_cert_path: "some_server_path".to_string(),
client_cert_path: "some_cert_path".to_string(),
client_key_path: "some_key_path".to_string(),
});
assert_eq!(
ChannelConfig {
@@ -475,6 +541,11 @@ mod tests {
http2_adaptive_window: Some(true),
tcp_keepalive: Some(Duration::from_secs(2)),
tcp_nodelay: false,
client_tls: Some(ClientTlsOption {
server_ca_cert_path: "some_server_path".to_string(),
client_cert_path: "some_cert_path".to_string(),
client_key_path: "some_key_path".to_string(),
}),
},
cfg
);
@@ -496,7 +567,11 @@ mod tests {
.http2_adaptive_window(true)
.tcp_keepalive(Duration::from_secs(2))
.tcp_nodelay(true);
let mgr = ChannelManager { pool, config };
let mgr = ChannelManager {
pool,
config,
client_tls_config: None,
};
let res = mgr.build_endpoint("test_addr");
@@ -512,7 +587,11 @@ mod tests {
let pool = Arc::new(pool);
let config = ChannelConfig::new();
let mgr = ChannelManager { pool, config };
let mgr = ChannelManager {
pool,
config,
client_tls_config: None,
};
let addr = "test_addr";
let res = mgr.get(addr);

View File

@@ -13,6 +13,7 @@
// limitations under the License.
use std::any::Any;
use std::io;
use common_error::prelude::{ErrorExt, StatusCode};
use snafu::{Backtrace, ErrorCompat, Snafu};
@@ -22,6 +23,15 @@ pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Invalid client tls config, {}", msg))]
InvalidTlsConfig { msg: String },
#[snafu(display("Invalid config file path, {}", source))]
InvalidConfigFilePath {
source: io::Error,
backtrace: Backtrace,
},
#[snafu(display("Missing required field in protobuf, field: {}", field))]
MissingField { field: String, backtrace: Backtrace },
@@ -81,7 +91,9 @@ pub enum Error {
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::MissingField { .. }
Error::InvalidTlsConfig { .. }
| Error::InvalidConfigFilePath { .. }
| Error::MissingField { .. }
| Error::TypeMismatch { .. }
| Error::InvalidFlightData { .. } => StatusCode::InvalidArguments,

View File

@@ -16,8 +16,9 @@ use std::collections::HashMap;
use std::sync::Arc;
use api::v1::{AffectedRows, FlightMetadata};
use arrow_flight::utils::{flight_data_from_arrow_batch, flight_data_to_arrow_batch};
use arrow_flight::utils::flight_data_to_arrow_batch;
use arrow_flight::{FlightData, IpcMessage, SchemaAsIpc};
use common_base::bytes::Bytes;
use common_recordbatch::{RecordBatch, RecordBatches};
use datatypes::arrow;
use datatypes::arrow::datatypes::Schema as ArrowSchema;
@@ -39,38 +40,58 @@ pub enum FlightMessage {
AffectedRows(usize),
}
#[derive(Default)]
pub struct FlightEncoder {
write_options: writer::IpcWriteOptions,
data_gen: writer::IpcDataGenerator,
dictionary_tracker: writer::DictionaryTracker,
}
impl Default for FlightEncoder {
fn default() -> Self {
Self {
write_options: writer::IpcWriteOptions::default(),
data_gen: writer::IpcDataGenerator::default(),
dictionary_tracker: writer::DictionaryTracker::new(false),
}
}
}
impl FlightEncoder {
pub fn encode(&self, flight_message: FlightMessage) -> FlightData {
pub fn encode(&mut self, flight_message: FlightMessage) -> FlightData {
match flight_message {
FlightMessage::Schema(schema) => {
SchemaAsIpc::new(schema.arrow_schema(), &self.write_options).into()
}
FlightMessage::Recordbatch(recordbatch) => {
let (flight_dictionaries, flight_batch) = flight_data_from_arrow_batch(
recordbatch.df_record_batch(),
&self.write_options,
);
let (encoded_dictionaries, encoded_batch) = self
.data_gen
.encoded_batch(
recordbatch.df_record_batch(),
&mut self.dictionary_tracker,
&self.write_options,
)
.expect("DictionaryTracker configured above to not fail on replacement");
// TODO(LFC): Handle dictionary as FlightData here, when we supported Arrow's Dictionary DataType.
// Currently we don't have a datatype corresponding to Arrow's Dictionary DataType,
// so there won't be any "dictionaries" here. Assert to be sure about it, and
// perform a "testing guard" in case we forgot to handle the possible "dictionaries"
// here in the future.
debug_assert_eq!(flight_dictionaries.len(), 0);
debug_assert_eq!(encoded_dictionaries.len(), 0);
flight_batch
encoded_batch.into()
}
FlightMessage::AffectedRows(rows) => {
let metadata = FlightMetadata {
affected_rows: Some(AffectedRows { value: rows as _ }),
}
.encode_to_vec();
FlightData::new(None, IpcMessage(build_none_flight_msg()), metadata, vec![])
FlightData::new(
None,
IpcMessage(build_none_flight_msg().into()),
metadata,
vec![],
)
}
}
}
@@ -83,7 +104,8 @@ pub struct FlightDecoder {
impl FlightDecoder {
pub fn try_decode(&mut self, flight_data: FlightData) -> Result<FlightMessage> {
let message = root_as_message(flight_data.data_header.as_slice()).map_err(|e| {
let bytes = flight_data.data_header.slice(..);
let message = root_as_message(&bytes).map_err(|e| {
InvalidFlightDataSnafu {
reason: e.to_string(),
}
@@ -91,7 +113,7 @@ impl FlightDecoder {
})?;
match message.header_type() {
MessageHeader::NONE => {
let metadata = FlightMetadata::decode(flight_data.app_metadata.as_slice())
let metadata = FlightMetadata::decode(flight_data.app_metadata)
.context(DecodeFlightDataSnafu)?;
if let Some(AffectedRows { value }) = metadata.affected_rows {
return Ok(FlightMessage::AffectedRows(value as _));
@@ -176,7 +198,7 @@ pub fn flight_messages_to_recordbatches(messages: Vec<FlightMessage>) -> Result<
}
}
fn build_none_flight_msg() -> Vec<u8> {
fn build_none_flight_msg() -> Bytes {
let mut builder = FlatBufferBuilder::new();
let mut message = arrow::ipc::MessageBuilder::new(&mut builder);
@@ -187,7 +209,7 @@ fn build_none_flight_msg() -> Vec<u8> {
let data = message.finish();
builder.finish(data, None);
builder.finished_data().to_vec()
builder.finished_data().into()
}
#[cfg(test)]

View File

@@ -67,7 +67,7 @@ macro_rules! convert_arrow_array_to_grpc_vals {
return Ok(vals);
},
)+
ConcreteDataType::Null(_) | ConcreteDataType::List(_) => unreachable!("Should not send {:?} in gRPC", $data_type),
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => unreachable!("Should not send {:?} in gRPC", $data_type),
}
}};
}

View File

@@ -0,0 +1,57 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_grpc::channel_manager::{ChannelConfig, ChannelManager, ClientTlsOption};
#[tokio::test]
async fn test_mtls_config() {
// test no config
let config = ChannelConfig::new();
let re = ChannelManager::with_tls_config(config);
assert!(re.is_err());
// test wrong file
let config = ChannelConfig::new().client_tls_config(ClientTlsOption {
server_ca_cert_path: "tests/tls/wrong_server.cert.pem".to_string(),
client_cert_path: "tests/tls/wrong_client.cert.pem".to_string(),
client_key_path: "tests/tls/wrong_client.key.pem".to_string(),
});
let re = ChannelManager::with_tls_config(config);
assert!(re.is_err());
// test corrupted file content
let config = ChannelConfig::new().client_tls_config(ClientTlsOption {
server_ca_cert_path: "tests/tls/server.cert.pem".to_string(),
client_cert_path: "tests/tls/client.cert.pem".to_string(),
client_key_path: "tests/tls/corrupted".to_string(),
});
let re = ChannelManager::with_tls_config(config);
assert!(re.is_ok());
let re = re.unwrap().get("127.0.0.1:0");
assert!(re.is_err());
// success
let config = ChannelConfig::new().client_tls_config(ClientTlsOption {
server_ca_cert_path: "tests/tls/server.cert.pem".to_string(),
client_cert_path: "tests/tls/client.cert.pem".to_string(),
client_key_path: "tests/tls/client.key.pem".to_string(),
});
let re = ChannelManager::with_tls_config(config);
assert!(re.is_ok());
let re = re.unwrap().get("127.0.0.1:0");
assert!(re.is_ok());
}

View File

@@ -0,0 +1,36 @@
-----BEGIN CERTIFICATE-----
MIIGOzCCBCOgAwIBAgIBATANBgkqhkiG9w0BAQsFADCBhzELMAkGA1UEBhMCSU4x
EjAQBgNVBAgMCUthcm5hdGFrYTESMBAGA1UEBwwJQkFOR0FMT1JFMRUwEwYDVQQK
DAxHb0xpbnV4Q2xvdWQxEjAQBgNVBAMMCWNhLXNlcnZlcjElMCMGCSqGSIb3DQEJ
ARYWYWRtaW5AZ29saW51eGNsb3VkLmNvbTAeFw0yMzAyMTQxMTM4MDFaFw0yNzA4
MjIxMTM4MDFaMHIxCzAJBgNVBAYTAklOMRIwEAYDVQQIDAlLYXJuYXRha2ExFTAT
BgNVBAoMDEdvTGludXhDbG91ZDERMA8GA1UEAwwIc2VydmVyLTIxJTAjBgkqhkiG
9w0BCQEWFmFkbWluQGdvbGludXhjbG91ZC5jb20wggIiMA0GCSqGSIb3DQEBAQUA
A4ICDwAwggIKAoICAQDNPiXZFK1cDOevdU5628xqAZjHn2e86hD9ih0IHvQKbcAm
a8fhFMQ+Gki+p2+Ga1fxHDi1+aUn00UjyLAxSMQVulpZWYHsRj3koyD9LyTvpDQk
SwJhFNtL33WlqUMtjgVXoznjECfhc/hwKJ9BS0b5j21XzqYkSKTJNcxZmoNLJVvL
dfbsWjLywSAHbcF1gs2w3IxruPQwyMXL1URjcwGRTtK+zk6QGxgyXsIEJDW4EZqR
xXgmEz7jx7vfDLaYc8GoujTki2dkyTWQkdDrJ4/N7VWGOGjL60EJDOcQyCowDuAq
sbB5C9OuhB59o2/wzeSeaY7qS5nLOufwiYmvc1S6kgi9emirxqFLmrcaJv8QPDEX
6ufI8wSkCS/CX/IUNXPkSripU3zQcjorinAw3w9pGY1VNknz5AgDXrEAW17aZKsp
QyLSyl87vG9dhjybdkc7QyBghTxweggYT1INY6dmj9ijIyU+9V64xOTb9dlbgLW/
qAvZyeq2H9Z5aBwkG31n1b2rX0JEK+/NC+8PRs2tWq63EOB8hzh4mF9RKLcZC3zS
9eJa1B0ugyy5fw8GGWA49H3rFoU2u7+Gazzdn5uD9sqLuVnzW1FREDhMHGd4VdRx
vuhUp9jz9u0WDRr2Ix7N7Vd57mwhBPivUywg7QwZSTqlIrGVoQFPL4BjWwSSswID
AQABo4HFMIHCMAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEBBAQDAgWgMDMGCWCGSAGG
+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRlZCBDbGllbnQgQ2VydGlmaWNhdGUwHQYD
VR0OBBYEFI056bMc2jHoeOTUGBCpBGGY/UfQMB8GA1UdIwQYMBaAFKVZwpSJCPkN
wGXyJX1sl2Pbby4FMA4GA1UdDwEB/wQEAwIF4DAdBgNVHSUEFjAUBggrBgEFBQcD
AgYIKwYBBQUHAwQwDQYJKoZIhvcNAQELBQADggIBABHQ/EGnAFeIdzKTbaP3kaSd
A3tCyjWVwo9eULXBjsMFFyf4NDw8bkrYdJos6rBpzi6R1PUb4UMc9CUF6ee9zbTK
mDeusqwhDOLmYZot1aZbujMngpbMoQx5keSQ9Eg10npbYMl6Sq3qFbAST9l/hlDh
Ue9KhfrAvrSobP0WWb/EpEXZMt2DafKpoz4nvtFpcOO5kbsQ+/eQfWHmR/k6sCYG
UycFYCJCFQz2xG8wtbExg5iyaR3nE0LfqZwRxhIa4iSWlCecYc1XUJnOh8fIeop4
9fD5k2wqvCEBAZiaKg2RYbaw6LIFkg7c99B4Gt5eez7Bs878T7lS+xl9wbzinzez
WFIgsDYHYjmK8s5WXXWwT7UhqSA12FHOp8grqFllXV/dOPTFz+dq9Mn1VGgH6MS4
Ls3r2LH5ycAz+gkoY2wlnF++ItpB2K3LTlqk+OvQZ1oXMq8u5F6XsM7Uirc7Da+9
MEG1zBpGvA/iAd2kKd3APS+EuoytSt022bD7YDJ1isuxT5q2Hpa4p14BJHCgDKTZ
vPYIdzCh05vwLwB28T8bh7s5OLOcRY9KmxVPkT0SYLOk11j5nZ1N/hQvGDxL60e2
RBS3ADHkymIE55Xf1VLXcs17zR9fLV+5fiSQ40FLjcBEjhkvrzcDe3tVFsA/ty9h
dBCSsexiXj/S5KwKtz/c
-----END CERTIFICATE-----

View File

@@ -0,0 +1,51 @@
-----BEGIN RSA PRIVATE KEY-----
MIIJKQIBAAKCAgEAzT4l2RStXAznr3VOetvMagGYx59nvOoQ/YodCB70Cm3AJmvH
4RTEPhpIvqdvhmtX8Rw4tfmlJ9NFI8iwMUjEFbpaWVmB7EY95KMg/S8k76Q0JEsC
YRTbS991palDLY4FV6M54xAn4XP4cCifQUtG+Y9tV86mJEikyTXMWZqDSyVby3X2
7Foy8sEgB23BdYLNsNyMa7j0MMjFy9VEY3MBkU7Svs5OkBsYMl7CBCQ1uBGakcV4
JhM+48e73wy2mHPBqLo05ItnZMk1kJHQ6yePze1Vhjhoy+tBCQznEMgqMA7gKrGw
eQvTroQefaNv8M3knmmO6kuZyzrn8ImJr3NUupIIvXpoq8ahS5q3Gib/EDwxF+rn
yPMEpAkvwl/yFDVz5Eq4qVN80HI6K4pwMN8PaRmNVTZJ8+QIA16xAFte2mSrKUMi
0spfO7xvXYY8m3ZHO0MgYIU8cHoIGE9SDWOnZo/YoyMlPvVeuMTk2/XZW4C1v6gL
2cnqth/WeWgcJBt9Z9W9q19CRCvvzQvvD0bNrVqutxDgfIc4eJhfUSi3GQt80vXi
WtQdLoMsuX8PBhlgOPR96xaFNru/hms83Z+bg/bKi7lZ81tRURA4TBxneFXUcb7o
VKfY8/btFg0a9iMeze1Xee5sIQT4r1MsIO0MGUk6pSKxlaEBTy+AY1sEkrMCAwEA
AQKCAgEAw2jBZj5+k96hk/dPIkA1DlS43o7RmRcN2CdwXrQBzBAUW0BRDObVtP8X
dZY647M+BozFHdUzPoizEk/YGQRb1QgZT2qd/ZQfB5mdJhGFzDf9gPR9rmrKJCH8
hB50nGHUik0ZJyvRnKDqz/aNMgB28dJx26Efo/oaEoyLJGCtUpWeIUgOMZfrXB8t
3ITOJZDFP/esJj/xFqWBVQGXXEw6GNwAYLRSLnftgL+hX4oOL1NrZBCrxSybuwkG
wWX8T4gewQOQqmxjo5zCyANc8xc2nmyx+dmpRUWWJQTI1ryNFjaDjYKiL41oHIcj
9KDwSkftvDlqXX5fThSmkeiRU5+t8UMj4+Bt7opCzIlwHtQe+95BqiXQ7bHfCjn7
GvShZgHo45rDkfwWDz/pYhHQ2Wb9DkhEtwa0cu3mDMGc6BY+4yo+Vz6Rk1TypxQw
LIa43WgVCRm66Mq65sObx7wkdxvolUE8j1Io3AHwgeBjV+gISV9srj2m/HnOmFFb
16SKQEDEVoaci+v6DT8A7UOZH4sgYSbknHdjMy6c6UlYgd8UNqbY3h/ohZ2JOcPd
8DqGUDGKbpS7OxWogxb9K++6SPSn86sPmUjzRPMgijVjU5pyK42DpZj1/RIe8Tml
JXVqHuZvURK4Qi3ECQ09m9vQ9nS88HMRVJ7sFSca6HOFYSFyAfkCggEBAPN35hva
OhbgQlFJrpo5YDYS5v7l7YjLbry6DaCR1CpYaKlTPkc4tiznCUHe1N41mR4qu2Tc
4+m7GN9BZfLU8w/Jvrp7mAO7fZXZtIzTQrZQDbAZppUGBbGBoAOlLVxR4NrN2TSk
49Ljj87UynhxeCv6RWx0F1p1/VIZertLELbSdb3C43pAsNSXzbkb7LtT9RXemyUL
LBK4ugcXMSZrzHJK1Ct31LoGd9m+TEp/VW2aGMeWliuIticJx44OW4tlJ70qKrd0
KezBZVMHPa3FqW7kdYwdlISoqZsE9OVPgLCQNVLhDO1YMaTl3WEHKTRBxTF70pvv
zMkSRQGoU4ff7AUCggEBANfOkCsx2mRvJV+UYxW8R6510w2H1bNbNRbfTJAo8kld
/7dXU4H3QrhUrCsSyc5ijm09q7I4+rc+uMxfT/R1mO5tq9AueCWhg85WV+NBR1FE
Yg7MX+zblpHqUDQoTj9vvgwLyqvZ7k9NON42Zz+Tj2worICnVlDvahm/3NaItT9B
oGhsEoJjYFK4Hq7RwosU+KPXkQBxWzrNLipo8jx0XFpPZVHSLIFs9eW25bnj/qxc
toMgx4IsvEDlzS/oqfycCrDdKwqiW74w0Djb5TiJv+dYzl9GnN6istqbUTNZkJjn
lkbmegrtfz3Yd1ORvjkNqHuANyuR+YnUSIsb0PV5eVcCggEAck5bgb4eQbk+SY3P
ZOcFLb4IJ6ppsCzaq86qMTXmJ49kbAMCHUwZ89DwvrVQuZbucYRcgMlYU9ccoUzC
AZVLHKF6Y3E9eJshJiaVJvzUuGWzV3djh1nReHpEVxHIzyw95lx42seDkvJ2BQRQ
nuWfJv6Uc4u5nyYALfh6b86ZZUxALTx/slkG7HjtBDiBF54eVgsySd0J7yw9YrDX
yZMY5JwPKu1SuZfp0xgOF3fa8t9DPQmNLZk88+0afK5u+m4ejyhp78GhIV/XI3kl
0x0XJFIsggEtRm8tWfOkyrhd0geSkXvJpvEeNa4aFsDW7ormewoIYl/ehJSIQ3P0
67kMxQKCAQA12iP7w2r+GQY4fazkJaG1lU1fWQAoy5/J31sZtj4PtNc1ByOdkPgj
S23TKdMWH13vQK5xwOo/g/VVeotXM2lARjnTr2Tn7xAXE1DHMuj7DJdznehqELnY
G6J8AXrVNas1ElQ24iEnxNtmCClnogjuMpApYpiVhcjyOACBwIeKC3Rd2mocA3Rr
7+ooMcvcLRWGvSo/9AmR+NWGW73m/Bp3psxfyJS2j1wlQKi+5HgOxuv8eNeQUl1/
zFiRlfulP8MjM22kL7O5GDE9nxHqM+Whc3W8LMDEhdEf4BY5PCZrIY9MjgLyayWP
Z08PmZTgY9ohR3N8+eZNUJ3xqLVSLEftAoIBAQDF1K8lPXAs8e4V0oc9hq4GFLvi
E0KC+8X1ShzvkVGV/3Kz1FJ0bwix/M3C5XSSNguxHI6CG2GprJlExp1qqwlvmGr2
hHdfemvq6tF4qjXLgPXvgoWocBGNUvBXxFVuc0hOHgT/X3+GsPYtNvZb3fp+4Bm6
ugUu05drqrHSOY5kUbU3jf/5KctnDFmOsSeOgGiI/JJWVcKJALpDkhazRL0nxfuW
6xU6pZazhCAby2Qn+wn0xyi4bEZSNobiQTgOXOC0DA1uGD3XHctCMnSBtYtocQjq
IFT2l3u4pEKpVQwuc4+yObWUT47oBxV6vFneXsnV89vd2SSUPuR8GIYYeA+/
-----END RSA PRIVATE KEY-----

View File

@@ -0,0 +1,50 @@
rWtZ7U3SoVAl6yMhfJsB
LcEGbuCfgFxk2ADw0N1G
byTKlrUgoRZeSc0cYHTf
0XjbRCBtMV9yYaVJKPwi
rGofQgFoc1lW0U5x2bnN
O9nn9aDe5t5LAlGS81uX
aBMvuzVjHbZKOlabXl4W
ZJc06qngAcQWQUu8nAnR
FLsjhoaTyuaDMY3OWJAx
5Dt7YglND5uFAqYwRG9L
agLGOCH8suwnXGYaPxjM
Ysb5RANkpgcbSulLZiic
4sLmpJomjokwZbctODVW
pCLiQT3wWDJ7YjIePR6g
P3Jlg0LDhbgSwXxgjjUR
6qGRfcb8LFlVlT7O1ze2
lFBNWzijkPeKyKmwpOSa
oGCR2OUg71n0Tzt2a3ir
WLijq0bL1Cetz24fv738
L3MEAwezFBW38U4QilNz
uza1bC3PgToermGSgKLx
WMdgjZIszK4t6Rehelx8
YpCJWVXTob3Gn4bMwWJO
xpJ9qhvMBdD8iamheF4b
bUm1YmHW4gPT1ujiqCmN
I7hOFurjJ6zvXGETyfCn
w23W8PNFWbqpHUKN59Bz
HpbsIRDVVpEGxnoWmdjq
58BUOxDdbTZxCKt0UqLD
uUPOlW8bRhuC1tK1NL5u
wq9ybcfwZ4jIHyYlHZ5M
4t4zKLRG2DN6icHmctOW
TzYp3np0OFsTlzCwkogM
Os6SOvjU0Irq2Xo5wLvn
1nN6FQwUxcw0H5rfQEZo
NioHP0JdBv3HmIaQZs1n
8lJWLVof1TBWtRUKmWmO
79DcTURdzt28Vdn6F0K0
UiG15bda4Pb81I9IE9ug
iZkC7CE98aE6WQK9Ghlu
dNXJTkUD3uVg6Tqi3957
Hfa9xMclyrxsOvkGcudI
QbcvG5Apom6nBWIGHRMQ
68rn9eZEcq5mJLaiNmHr
5AOtHddC5NVgQLgdmmKb
gQlrcSXzxT6V6jzbxZ79
xmulvmkeqG4kj6TAuJEg
u9dCkExxv5tLSpF8hC08
HHU4QE56UC97djO5EpmK
g3rElyboRHlAYPWviWbm

View File

@@ -0,0 +1,40 @@
-----BEGIN CERTIFICATE-----
MIIG+jCCBOKgAwIBAgIBAjANBgkqhkiG9w0BAQsFADCBhzELMAkGA1UEBhMCSU4x
EjAQBgNVBAgMCUthcm5hdGFrYTESMBAGA1UEBwwJQkFOR0FMT1JFMRUwEwYDVQQK
DAxHb0xpbnV4Q2xvdWQxEjAQBgNVBAMMCWNhLXNlcnZlcjElMCMGCSqGSIb3DQEJ
ARYWYWRtaW5AZ29saW51eGNsb3VkLmNvbTAeFw0yMzAyMTQxMTM5NDBaFw0yNzA4
MjIxMTM5NDBaMHAxCzAJBgNVBAYTAklOMRIwEAYDVQQIDAlLYXJuYXRha2ExFTAT
BgNVBAoMDEdvTGludXhDbG91ZDEPMA0GA1UEAwwGc2VydmVyMSUwIwYJKoZIhvcN
AQkBFhZhZG1pbkBnb2xpbnV4Y2xvdWQuY29tMIICIjANBgkqhkiG9w0BAQEFAAOC
Ag8AMIICCgKCAgEAvVtxAoRjLRs3Ei4+CgzqJ2+bpc0sBdUm/4LM/D+0KbXxwD7w
HP6GcKl/9zf9GJg56pVXxXMaerMDLS4Est25+mBgqcePC6utCBYrKA25pKbkFkxZ
TPh9/R4RHGVJ3KHy9vc4VzqoV7XFMJFFUQ2fQywHZlXh6MNz0WPTIGaH7hvYoHbK
I3NpPq8TjRuuV61XB0hK+RW0K6/5Yuj74h/mfheX1VIUOjGwKnTPccZQAlrKYjeW
BZBS4YqahkTIaGLa06SdUSkuhL85rqAxWvhK9GIRlQLNYJOzg+E3jGyqf566xX60
fxM6alLYf+ZzCwSBuDDj5f+j752gPLYUI82YL4xQ+AEHNR8U1uMvt0EzzFt7mSRe
fobVr+Y2zpci+mo7kcQGOhenzGclsm+qXwMhYUnJcOYFZWtTJlFaaPreL4M3Dh+2
pmKj23ZU6zcT3MYtE6phjCLJl0DsFIcOn+tSqMdpwB20EeQjo9bVJuw/HJrlpcnY
U9aLsnm/4Ls5A0BQutZnxKBIJjpzp8VfK0WU8a4iKok3AS0z1/K+atNrgSUB9DCH
0MvLqqQmM9TdLcZj7NSEfLyyFVwPRc5dt4CrNDL7JUpMzt36ezU83JU+nfqWDZsL
+2JOaE4gGLZDcA3cfP83/mYRaAnYW/9W4vEnIpa6subzq1aFOeY/3dKLTx8CAwEA
AaOCAYUwggGBMAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEBBAQDAgZAMDMGCWCGSAGG
+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRlZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYD
VR0OBBYEFLijeA+RFDQtuVeMUkaXqF7LF50GMIG8BgNVHSMEgbQwgbGAFKVZwpSJ
CPkNwGXyJX1sl2Pbby4FoYGNpIGKMIGHMQswCQYDVQQGEwJJTjESMBAGA1UECAwJ
S2FybmF0YWthMRIwEAYDVQQHDAlCQU5HQUxPUkUxFTATBgNVBAoMDEdvTGludXhD
bG91ZDESMBAGA1UEAwwJY2Etc2VydmVyMSUwIwYJKoZIhvcNAQkBFhZhZG1pbkBn
b2xpbnV4Y2xvdWQuY29tggkA7NvbvF8jodEwDgYDVR0PAQH/BAQDAgWgMBMGA1Ud
JQQMMAoGCCsGAQUFBwMBMCkGA1UdEQQiMCCHBMCoAHKHBAoAAg+CEnNlcnZlci5l
eGFtcGxlLmNvbTANBgkqhkiG9w0BAQsFAAOCAgEAXvaS9+y5g2Kw/4EPsnhjpN1v
CxXW0+UYSWOaxVJdEAjGQI/1m9LOiF9IHImmiwluJ/Bex1TzuaTCKmpluPwGvd9D
Zgf0A5SmVqW4WTT4d2nSecxw4OICJ3j6ubKkvMVf9s+ZJwb+fMMUaSt80bWqp1TY
XbZguv67PkBECPqVe6rgzXnTLwM3lE8EgG8VtM3IOy9a5SIEjm5L8SQ2I2hiytmE
e4jR1fbZsB5NbBdfA3GFMKQEE2dIymkG3Bz71M3tZi1y4RnHtRKdrFtrIlgclrwd
nVnQn/NiXUOOzsL2+vwSF32SSbiLvOxu63qO1YDBkKVChog3P/2f6xcJ23wkbHlL
qaL2jvLo6ylvMPUYHf5ZWat5zayaGUMHYDKcbD4Dw7aY3M0tNgEHdqUqNePmKvmn
luyXof3KmmLgWlcfBoX96a7hXDtxFyB2N4nzfQBXh+0VAlgqa+ZZhpdEqRQaWkkR
MDBdsVJ9O3812IaNfMzpS1vb701GFDCM5Hcyw6a/v6Ln08NMhYut4saLi13kHilS
Wq7wOAfW3rzxuhjOJJxsi0jJNI775q+a/BbbG/CPl826bXPGH43BdPV8mKwsX5HM
wwDKf3otP/v7bxwJabfhv2EKUy+W1kkFW9FEZ919yTtfhSDrTNcrXtE7RkiAepfm
95I025URIlhJGLGBUlA=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,17 @@
[package]
name = "common-mem-prof"
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
common-error = { path = "../error" }
snafu.workspace = true
tempfile = "3.4"
tikv-jemalloc-ctl = { version = "0.5", features = ["use_std"] }
tikv-jemallocator = "0.5"
tokio.workspace = true
[dependencies.tikv-jemalloc-sys]
version = "0.5"
features = ["stats", "profiling", "unprefixed_malloc_on_supported_platforms"]

View File

@@ -0,0 +1,50 @@
# Profile memory usage of GreptimeDB
This crate provides an easy approach to dump memory profiling info.
## Prerequisites
### jemalloc
```bash
# for macOS
brew install jemalloc
# for Ubuntu
sudo apt install libjemalloc-dev
```
### [flamegraph](https://github.com/brendangregg/FlameGraph)
```bash
curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph.pl > ./flamegraph.pl
```
### Build GreptimeDB with `mem-prof` feature.
```bash
cargo build --features=mem-prof
```
## Profiling
Start GreptimeDB instance with environment variables:
```bash
MALLOC_CONF=prof:true,lg_prof_interval:28 ./target/debug/greptime standalone start
```
Dump memory profiling data through HTTP API:
```bash
curl localhost:4000/v1/prof/mem > greptime.hprof
```
You can periodically dump profiling data and compare them to find the delta memory usage.
## Analyze profiling data with flamegraph
To create flamegraph according to dumped profiling data:
```bash
jeprof --svg <path_to_greptimedb_binary> --base=<baseline_prof> <profile_data> > output.svg
```

View File

@@ -0,0 +1,66 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::path::PathBuf;
use common_error::prelude::{ErrorExt, StatusCode};
use snafu::{Backtrace, Snafu};
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Failed to read OPT_PROF"))]
ReadOptProf { source: tikv_jemalloc_ctl::Error },
#[snafu(display("Memory profiling is not enabled"))]
ProfilingNotEnabled,
#[snafu(display("Failed to build temp file from given path: {:?}", path))]
BuildTempPath { path: PathBuf, backtrace: Backtrace },
#[snafu(display("Failed to open temp file: {}", path))]
OpenTempFile {
path: String,
source: std::io::Error,
},
#[snafu(display("Failed to dump profiling data to temp file: {:?}", path))]
DumpProfileData {
path: PathBuf,
source: tikv_jemalloc_ctl::Error,
},
}
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::ReadOptProf { .. } => StatusCode::Internal,
Error::ProfilingNotEnabled => StatusCode::InvalidArguments,
Error::BuildTempPath { .. } => StatusCode::Internal,
Error::OpenTempFile { .. } => StatusCode::StorageUnavailable,
Error::DumpProfileData { .. } => StatusCode::StorageUnavailable,
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {
snafu::ErrorCompat::backtrace(self)
}
fn as_any(&self) -> &dyn Any {
self
}
}

View File

@@ -0,0 +1,74 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod error;
use std::ffi::{c_char, CString};
use std::path::PathBuf;
use snafu::{ensure, ResultExt};
use tokio::io::AsyncReadExt;
use crate::error::{
BuildTempPathSnafu, DumpProfileDataSnafu, OpenTempFileSnafu, ProfilingNotEnabledSnafu,
ReadOptProfSnafu,
};
const PROF_DUMP: &[u8] = b"prof.dump\0";
const OPT_PROF: &[u8] = b"opt.prof\0";
pub async fn dump_profile() -> error::Result<Vec<u8>> {
ensure!(is_prof_enabled()?, ProfilingNotEnabledSnafu);
let tmp_path = tempfile::tempdir().map_err(|_| {
BuildTempPathSnafu {
path: std::env::temp_dir(),
}
.build()
})?;
let mut path_buf = PathBuf::from(tmp_path.path());
path_buf.push("greptimedb.hprof");
let path = path_buf
.to_str()
.ok_or_else(|| BuildTempPathSnafu { path: &path_buf }.build())?
.to_string();
let mut bytes = CString::new(path.as_str())
.map_err(|_| BuildTempPathSnafu { path: &path_buf }.build())?
.into_bytes_with_nul();
{
// #safety: we always expect a valid temp file path to write profiling data to.
let ptr = bytes.as_mut_ptr() as *mut c_char;
unsafe {
tikv_jemalloc_ctl::raw::write(PROF_DUMP, ptr)
.context(DumpProfileDataSnafu { path: path_buf })?
}
}
let mut f = tokio::fs::File::open(path.as_str())
.await
.context(OpenTempFileSnafu { path: &path })?;
let mut buf = vec![];
f.read_to_end(&mut buf)
.await
.context(OpenTempFileSnafu { path })?;
Ok(buf)
}
fn is_prof_enabled() -> error::Result<bool> {
// safety: OPT_PROF variable, if present, is always a boolean value.
Ok(unsafe { tikv_jemalloc_ctl::raw::read::<bool>(OPT_PROF).context(ReadOptProfSnafu)? })
}

Some files were not shown because too many files have changed in this diff Show More