Compare commits

...

350 Commits

Author SHA1 Message Date
Yingwen
5b5d953d56 ci: tolerate error while building arm64 releases (#1143)
* ci: allow failure while building arm64 docker

* ci: Remove continue-on-error on docker step
2023-03-08 21:11:40 +08:00
Yingwen
3f6cbc378d ci: Disable arm64 release temporarily (#1141) 2023-03-08 19:13:00 +08:00
Yingwen
9619940569 ci: Allow error when building release for non-x86 platform (#1140) 2023-03-08 18:12:06 +08:00
Weny Xu
ed8252157a chore: code styling (#1137)
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-08 08:10:12 +00:00
Ruihang Xia
3e0fb7e75b test: ignore two test cases due to arrow-datafusion#5513 (#1138)
* test: ignore two test cases due to arrow-datafusion#5513

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-08 07:29:34 +00:00
Bohan Wu
ba3ce436df refactor(SST): UUID as id in FileMeta (#1116)
* feat(SST): use a newType named FileId for FileMeta

* chore: rename some functions

* fix: compatible for previous FileMeta format

* fix: alias for file_id when getting deserialized
2023-03-08 14:27:20 +08:00
Eugene Tolbakov
b31a6cb506 refactor: replace tempdir with tempfile (#1123)
* refactor: replace tempdir with tempfile

* refactor(query): move tempfile dependency under the workspace's Cargo.toml

* refactor(tempfile): create common-test-util

* refactor(tempfile): fix toml format

* refactor(tempfile): remove tempfile out of dependencies

* refactor(tempfile): fix incorrect toml
2023-03-08 11:15:56 +08:00
SSebo
95090592f0 feat: mysql prepare replacing sql placeholder to param (#1086)
* feat: mysql prepare by replace ? in sql to param

* chore: mysql prepare statment support time param

* chore: prepare test more types

* chore: add TODO
2023-03-08 11:02:29 +08:00
Ruihang Xia
3a527c0fd5 feat: impl proc macro range_fn and some aggr_over_time functions (#1072)
* impl range_fn proc macro

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl some aggr_over_time fn

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl present_over_time and absent_over_time

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* accomplish planner, and correct type cast

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* document the macro

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix styles

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update irate/idelta test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add test cases

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-07 23:39:45 +08:00
elijah
819b60ca13 feat(datatypes): implement VectorOp::take (#1115)
* feat: add take index method for VectorOp

* chore: make clippy happy

* chore: make clippy happy

* chore: improve the code

* chore: improve the code

* chore: add take null test

* chore: fix clippy
2023-03-07 19:27:33 +08:00
Weny Xu
7169fe2989 feat: implement Copy From (#1064) 2023-03-07 17:54:11 +08:00
Zheming Li
b70672be77 feat: track disk usage of regions (#1125)
* feat: track disk usage of regions

Signed-off-by: Zheming Li <nkdudu@126.com>

* calculate disk usage when call

* add default on file meta

---------

Signed-off-by: Zheming Li <nkdudu@126.com>
2023-03-07 17:13:12 +08:00
Lei, HUANG
a4c01f4a3a feat: memory profiling (#1124)
* feat: use jemalloc as default allocator

* feat: add feature for mem-prof

* feat: add errors

* make common-mem-prof optional dep

* fix: toml format

* doc: add profile doc

* fix: typo
2023-03-07 17:12:51 +08:00
Weny Xu
bd98a26cca chore: bump greptime-proto to latest(ad01872) (#1102) 2023-03-07 10:52:42 +08:00
shuiyisong
1b4236d698 refactor: use split instead of serde_urlencoded in http auth (#1110)
* refactor: change from urlencoded to regex

* refactor: change from urlencoded to regex

* chore: add unit test

* chore: update comment

* chore: remove local benchmark test

* chore: minor fix

* chore: remove unused dep
2023-03-07 10:51:47 +08:00
Lei, HUANG
e8cc9b4b29 test: add manifest compatibility tests (#1130)
* tests: add manifest compatibility tests

* fix: clippy
2023-03-06 19:31:54 +08:00
discord9
379f581780 test: add Integrated Test for Coprocessor& fix minor bugs (#1122)
* feat: cache `Runtime`

* fix: coprstream schema not set

* test: integrated tests for Coprocessor

* fix: UDF fixed

* style: remove unused import

* chore: remove more unused import

* feat: `filter`, (r)floordiv for Vector

* chore: CR advices

* feat: auto convert to `lit`

* chore: fix typo

* feat: from&to `pyarrow.array`

* feat: allow `pyarrow.array` as args to builtins

* chore: cargo fmt

* test: CI add `pyarrow`

* test: install Python&PyArrow in CI

* test: not cache depend for now

* chore: CR advices

* test: fix name

* style: rename
2023-03-06 19:20:59 +08:00
fys
ff6cfe8e70 refactor: move the batch_get to KvStore trait (#1029)
* move batch_get from KvStoreExt to KvStore

* add some unit tests

* add some unit test

* add some unit tests

* expose batch_get grpc method
2023-03-06 17:35:43 +08:00
Igor Morozov
5a397917c0 docs(contributingmd): add run tests commands (#1129)
* docs(contributingmd): add run tests commands

* docs(contributingmd): add link to nextest website

Co-authored-by: dennis zhuang <killme2008@gmail.com>

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-03-06 15:54:16 +08:00
fys
559880cb84 fix: can not find catalog when create table (#1118)
* fix: get catalog by name in RemoteCatalogManager

* cr

* cr

* cr

* fix: ut failed
2023-03-06 14:44:40 +08:00
Ruihang Xia
b76b27f3bf refactor: try to remove unnecessary tests in error mod (#750)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-06 12:31:30 +08:00
yuanbohan
d4e0dc3685 feat: specify prom server start addr (#1111)
* feat: specify promql server start addr

* refactor: rename promql to prom in Prometheus API server scenario
2023-03-06 11:07:21 +08:00
Eugene Tolbakov
b022556b79 fix: apply ttl and write_buffer_size options when a table is created via procedure (#1117)
* fix: apply ttl and write_buffer_size options when a table is created via procedure

* fix: address code review suggestion

* fix: use borrowing of table_options correctly
2023-03-05 19:37:23 +08:00
shuiyisong
bd065ea6e8 fix: remove incorrect continue (#1114) 2023-03-02 19:52:17 +08:00
yuanbohan
9a87f5edf8 fix(grpc): support timestamp precision (#1113) 2023-03-02 17:33:59 +08:00
Weny Xu
e851b6d019 feat: implement Copy From parser (#1092)
* feat: implement Copy From parser

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-03-02 14:03:13 +08:00
Ruihang Xia
e7b92f24e8 feat: impl EmptyMetric plan and time() function (#1100)
* impl EmptyMetric plan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add test cases

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl planner part

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* adapt new datafusion changes

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-02 03:15:55 +00:00
Igor Morozov
4b8db408cf style(contributingmd): fix markdown issues and typos (#1107)
* style(contributingmd): fix markdown issues and typos

* style(contributingmd): remove code blocks in lists
2023-03-01 20:00:36 +08:00
Yingwen
98659899c0 refactor: Move mito engine tests to a separate file (#1104)
* refactor(mito): Move tests to a separate file

* chore(query): Remove empty mod function
2023-03-01 11:46:39 +00:00
Ruihang Xia
b1311801da ci: update breaking-change labeler (#1109)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-01 19:24:21 +08:00
Yingwen
f1b65d9b77 test: fix datanode::test_read_from_config_file (#1106)
* test: Fix datanode::test_read_from_config_file

* test: frontend and metasrv don't read example toml file
2023-03-01 18:31:40 +08:00
Ruihang Xia
d5a2a26916 chore(deps): bump sqlness to v0.4 (#1101)
deps: bump sqlness to v0.4

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-01 17:27:16 +08:00
Ning Sun
8e7e68708f docs: correct readme format (#1105)
* docs: correct readme format

* ci: fix config name
2023-03-01 16:59:11 +08:00
Ruihang Xia
9c1118b06d ci: adjust title labeler's rule (#1079)
* ci: adjust title labeler's rule

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-03-01 15:16:21 +08:00
Yingwen
3fb93efbd0 docs: Document fields in the config examples (#1098)
* docs: Add comments to standalone config example

* docs: Add comments to datanode config example

* docs: Add comments to frontend config example

* docs: Add comments to meta-srv config example

* docs: Use "GB" instead of "GiB"

* docs: Add link to the selector doc

* docs: Fix grammar
2023-03-01 15:14:08 +08:00
Yingwen
3fd9c2f144 feat: Store error in procedure state (#1062)
* docs: Change comment position

* refactor(procedure): Store error in ProcedureState

* test: Mock instance with procedure enabled

* feat: Add wait method to wait for procedure

* test(datanode): Test create table by procedure

* chore: Fix clippy
2023-03-01 14:37:50 +08:00
Ning Sun
75e48c5f20 ci: fix apidoc generation 2023-03-01 14:09:47 +08:00
Ning Sun
d402f83442 ci: generate apidocs when pushing to default branch (#1093)
* ci: generate apidocs when pushing to default branch

* ci: require clippy before running tests

* fix: resolve new clippy warnings on primitive slice

* fix: resolve more clippy warnings

* Update .github/workflows/apidoc.yml

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* ci: add an index html to redirect

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-01 13:18:26 +08:00
discord9
c5c6494e0b feat: add PyO3(Hence CPython as a Optional Backend (#976)
* refactor: ffi_types

* style: fmt

* refactor: use `String` for return when possible

* todo: vector_impl

* feat: pyobj_try_typed_val

* refactor: more backend indep function

* feat: +-*/ magic methods

* refactor: copr

* style: fmt

* feat: add paired tests

* refactor: more

* refactor: move inside `python` folder

* refactor: all but test code

* feat: builtins for PyO3

* chore: add licenses

* chore: remove unused&add todos

* refactor: remove old files

* chore: mark unused

* chore: fmt

* chore: license

* feat: query in PyO3

* test: paired testcases for rspy&pyo3

* feat: PyDataFrame(Untested)

* feat: some allow_threads

* style: fmt

* style: add license

* feat: rebase manually of #962

* feat: more `allow_threads`

* chore: typo

* chore: remove some `TODO`

* test: allow margin of epsilon

* chore: code review advices

* chore: more CR adjust

* chore: more adjust

* feat: kwargs&its test

* chore: remove some `dbg!`

* chore: allow params

* fix: put `dataframe` into scope

* chore: newline

* fix: adjust after rebase

* fix: test serde skip attr

* style: taplo

* feat: add `pyo3_backend` feature

* doc: update CI&readme
2023-03-01 10:45:55 +08:00
shuiyisong
dc50095af3 fix: use catalog from connection (#1099)
* fix: using schema instead of full database

* fix: using schema instead of full database

* fix: using schema instead of full database

* chore: add debug log

* chore: remove debug log

* chore: remove debug log

* chore: fix cr
2023-03-01 10:34:57 +08:00
LFC
8cd69f441e feat: REPL issues logical plan to DB (#1097) 2023-02-28 16:59:48 +08:00
Weny Xu
f52fc9b7d4 fix: fix panic when the root is not specified (#1089) 2023-02-28 10:54:52 +08:00
shuiyisong
50d2685365 fix: fix catalog parsing issue (#1091)
fix: try fix catalog parsing issue
2023-02-27 22:51:49 +08:00
LFC
11d45e2918 refactor: upgrade DataFusion, Arrow and Sqlparser (#1074)
* refactor: upgrade DataFusion, Arrow and Sqlparser

* fix: resolve PR comments
2023-02-27 22:20:08 +08:00
shuiyisong
30287e7e41 fix: continue if parsing err catalog (#1090)
* fix: continue if parsing err catalog

* fix: change from warn to error
2023-02-27 11:28:45 +00:00
Xieqijun
0b3f955ca7 feat: Add an error variant RetryLater (#1058)
* feat: support retry error

* fix: ci

* fix: ci

* fix: fmt

* feat: add convert procedure error

* Docs : add rustdoc

* fix: cr

* fix: cr

* fix: rm unless code
2023-02-27 17:19:37 +08:00
Ning Sun
4b58a8a18d feat: update substrait and prost version (#1080) 2023-02-27 15:18:12 +08:00
Yingwen
bd377ef329 feat: Procedure to create table and register table to catalog (#1040)
* feat: Add table-procedures crate

* feat: Implement procedure to create table

* feat: Integrate procedure manager to datanode

* test: Test CreateTableProcedure

* refactor: Rename table-procedures to table-procedure

* feat: Implement create_table_by_procedure

* chore: Remove comment

* chore: Add todo

* feat: Add procedure config to standalone mode

* feat: Register table-procedure loaders

* feat: Address review comments

CreateTableProcedure just return error if the subprocedure is failed

* chore: Address CR comments
2023-02-27 11:49:23 +08:00
LFC
df751c38b4 feat: a simple REPL for debugging purpose (#1048)
* feat: a simple REPL for debugging purpose

* fix: rebase develop
2023-02-27 11:00:15 +08:00
Yingwen
f6e871708a chore: Rename MetaClientOpts to MetaClientOptions (#1075)
* fix: Serialize FrontendOptions to toml

* fix: Serialize DatanodeOptions to toml

* fix: Serialize StandaloneOptions to toml

See https://users.rust-lang.org/t/why-toml-to-string-get-error-valueaftertable/85903/2

* chore!: Rename MetaClientOpts to MetaClientOptions

BREAKING CHANGE: Change the meta_client_opts in the config file to
meta_client_options
2023-02-24 16:28:38 +08:00
fys
819c990a89 fix: thread that reports the heartbeat panics in unit test (#1078)
fix: ut panic in heartbeat report thread
2023-02-24 15:36:32 +08:00
Ruihang Xia
a8b4e8d933 ci: simplify codecov commment (#1073)
chore(ci): simplify codecov commment

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-24 15:22:57 +08:00
Yingwen
710e2ed133 ci: Use fixed skywalking-eyes revision (#1076)
The latest PR of skywalking-eyes https://github.com/apache/skywalking-eyes/pull/149
breaks our CI action
2023-02-24 07:05:18 +00:00
Ning Sun
81eab74b90 refactor: remove grpc client constructor with default catalog/schema (#1060)
* refactor: remove grpc client with default catalog/schema

* refactor: re-export consts in client module
2023-02-24 11:06:14 +08:00
Ning Sun
8f67d8ca93 fix: update mysql server library to fix tls corrupt messsage issue (#1065) 2023-02-24 10:20:44 +08:00
Ruihang Xia
4cc3ac37d5 feat: add DictionaryVector DataType (#1061)
* fix stddev and stdvar. try build range function expr

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: add dictionary data type

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* preserve timestamp column in range manipulator

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* plan range functions

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update test result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update test result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update test result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* resolve CR comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* resolve CR comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-23 20:31:07 +08:00
Lei, HUANG
b48c851b96 fix: support datetime type parsing (#1071)
* fix: support datetime type parsing

* fix: unit test
2023-02-23 20:26:47 +08:00
Xuanwo
fdd17c6eeb refactor: Clean up re-export of opendal services (#1067)
Signed-off-by: Xuanwo <github@xuanwo.io>
2023-02-23 14:12:34 +08:00
Ruihang Xia
51641db39e feat: support filter expression in PromQL (#1066)
feat: support filter expression

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-23 11:55:23 +08:00
Xuanwo
98ef74bff4 chore: Bump OpenDAL to v0.27 (#1057)
* Bump OpenDAL to v0.27

Signed-off-by: Xuanwo <github@xuanwo.io>

* Make cargo check happy

Signed-off-by: Xuanwo <github@xuanwo.io>

* Address comments

Signed-off-by: Xuanwo <github@xuanwo.io>

* Address comments

Signed-off-by: Xuanwo <github@xuanwo.io>

* Format toml

Signed-off-by: Xuanwo <github@xuanwo.io>

* Make taplo happy

Signed-off-by: Xuanwo <github@xuanwo.io>

---------

Signed-off-by: Xuanwo <github@xuanwo.io>
2023-02-23 11:20:45 +08:00
Lei, HUANG
f42acc90c2 fix: allow empty TableOptions (#1063)
fix: allow default TableOptions to avoid panic when upgrading from older versions
2023-02-22 19:19:13 +08:00
Lei, HUANG
2df8143ad5 feat: support table ttl (#1052)
* feat: purge expired sst on compaction

* chore: add more log

* fix: clippy

* fix: mark expired ssts as compacting before picking candidates

* fix: some CR comments

* fix: remove useless result

* fix: cr comments
2023-02-22 16:56:20 +08:00
shuiyisong
fb2e0c7cf3 feat: add auth to grpc handler (#1051)
* chore: get header in grpc & temp save

* chore: change authscheme to include data str

* chore: add auth to grpc flight handler

* chore: add unit test & hold for now since grpc api doesnt accept req input

* chore: minor change

* chore: minor change

* chore: add flight context to database interface

* chore: add test

* chore: update proto version & fix cr issue

* chore: add test

* chore: minor update
2023-02-22 15:20:10 +08:00
Xieqijun
390e9095f6 feat: admin http api (#1026)
* feat: catalog list

* feat: catalog list

* feat:api

* feat: leader info

* feat: use constant

* fix: ci

* feat: query heartbeat by ip

* ut: add test

* fix: cr

* fix: cr

* fix: cr
2023-02-22 14:18:37 +08:00
dennis zhuang
bcd44b90c1 feat: invoke TQL via SQL interface (#1047)
* feat: impl TQL parser in sqlparser

* feat: impl invoking TQL via SQL

* chore: remove src/sql/src/tql_parser.rs

* chore: fix typo

* test: add tql test

* chore: carry type

Co-authored-by: LFC <bayinamine@gmail.com>

* chore: cr comments

---------

Co-authored-by: LFC <bayinamine@gmail.com>
2023-02-22 11:28:09 +08:00
Yingwen
c6f2db8ae0 feat(procedure): Add procedure watcher (#1043)
* refactor: Use watch channel to store ProcedureState

* feat: Add a watcher to wait for state change

* test: test watcher on procedure failure

* feat: Only clear message cache on success

* feat: submit returns Watcher
2023-02-21 17:19:39 +08:00
Lei, HUANG
e17d5a1c41 feat: support table options (#1044)
* feat: change table options from string map to a struct, add ttl and write_buffer_size

* fix: also pass table options to table meta

* feat: pass table options when opening/creating regions

* fix: CR comments
2023-02-21 08:10:23 +00:00
Ruihang Xia
23092a5208 feat: Support unary, paren, bool keyword and nonexistent metric/label in PromQL (#1049)
* feat: don't report metric/label not found as error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: impl unary expr

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: impl paren expr

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: support bool keyword

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add some tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* ignore nonexistence labels during planning

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-21 15:24:01 +08:00
Yingwen
4bbad6ab1e ci: allow ci pass when codecov can't upload data (#1046) 2023-02-21 14:52:44 +08:00
Zhizhen He
6833b405d9 ci: upgrade spell checker to 1.13.10 (#1045)
* ci: upgrade spell checker to 1.13.10

Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com>

* fix: fix existing typos

Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com>

* chore: use taplo to format typos.toml

Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com>

* chore: add fmt-toml rule to format TOML files

Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com>

---------

Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com>
2023-02-21 10:55:27 +08:00
Yingwen
aaaf24143d feat: Procedure to create a mito engine (#1035)
* feat: wip

* feat: Implement procedure to create mito table

* feat: Add create_table_procedure to TableEngine

* feat: Impl dump and lock for CreateMitoTable

* feat: Impl CreateMitoTable::execute and register it to manager

* feat(common-procedure): pub local mod

* feat: Add simple test for MitoCreateTable

* style: Fix clippy

* refactor: Move create_table_procedure to a new trait TableEngineProcedure
2023-02-21 09:40:56 +08:00
Jiachun Feng
9161796dfa feat: export the data from a table to parquet files (#1000)
* feat: copy table parser

* feat: coopy table

* chore: minor fix

* chore: give stmt a more clearer name

* chore: unified naming

* chore: minor change

* chore: add a todo

* chore: end up with an empty file when occur an empty table

* feat: format with copy table

* feat: with options

* chore: by cr

* chore: default 5M rows per segment

* Update src/datanode/src/sql/copy_table.rs

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

* Update src/datanode/src/sql/copy_table.rs

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

* Update src/datanode/src/error.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

---------

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>
2023-02-20 16:43:50 +08:00
Ruihang Xia
68b231987c feat: improve Prometheus compliance (#1022)
* initial impl

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* minor (useless) refactor

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* retrieve metric name

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add time index column to group by columns
filter out NaN in normalize
remove NULL in instant manipulator
accept form data as HTTP params
correct API URL
accept second literal as step param

* happy clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update test result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-20 07:29:43 +00:00
Yingwen
6e9964ac97 refactor(storage): Simplify debug output of some structs (#1028)
* refactor: Simplify debug output of RegionImpl

* feat: Simplify memtable debug output
2023-02-20 14:35:30 +08:00
shuiyisong
6afd79cab8 feat: support InfluxDB auth protocol (#1034)
* chore: add http auth influxdb compat

* chore: add test

* chore: minor change

* chore: fix typo

* chore: fix cr
2023-02-20 03:26:19 +00:00
fys
4e88a01638 feat: support influxdb ping and health endpoint (#1027)
* feat: support influxdb ping and health endpoint

* add some unit tests

* ping and health api no need auth

* cr
2023-02-20 02:31:51 +00:00
Lei, HUANG
af1f8d6101 feat: file purger (#1030)
* wip

* wip

* feat: file purger

* chore: add tests

* feat: delete removed file on sst merge

* chore: move MockAccessLayer to test_util

* fix: some cr comments

* feat: add await termination for scheduler

* fix: some cr comments

* chore: rename max_file_in_level0 to max_files_in_level0
2023-02-19 14:56:41 +08:00
dennis zhuang
a9c8584c98 feat: impl insert data from query (#1025)
* feat: refactor insertion in datanode

* feat: supports inserting data by select query

* feat: impl cast operation for vector

* feat: streaming insert from select query results

* chore: minor changes

* fix: remove unwrap

* test: insert_to_requsts

* test: test_execute_insert_by_select

* fix: cast operation for vectors

* fix: test

* fix: typo

* chore: by CR comments

* fix: test_statement_to_request
2023-02-17 17:56:12 +08:00
Eugene Tolbakov
7787cfdd42 refactor(datatypes): enhance MutableVector methods (#987)
* refactor(datatypes): enhance MutableVector methods

* refactor(datatypes): address code review issues

* refactor(datatypes): address more code review issues

* refactor(datatypes): fix merge conflicts

* refactor(datatypes): address code review issues

* refactor(datatypes): address more code review issues

* refactor(datatypes): update sql delete with the newly introduced method
2023-02-17 16:16:23 +08:00
Weny Xu
2f39a77137 feat: add close method for the region trait (#970)
feat: add close for region trait
2023-02-17 11:32:55 +08:00
Lei, HUANG
16f86a9d77 refactor: separate compaction stuff from task scheduler (#1021)
* refactor: make schedule request return value generic

* feat: add handler trait

* wip

* feat: use task handler

* fix: unit test

* refactor: separate scheduler mod

* chore: rename

* chore: Request use associate type

* refactor: use associate type

* refactor: use associate type to reduce generic parameters

* chore: further remove generic types

* chore: further remove a generic parameter
2023-02-16 19:30:23 +08:00
dennis zhuang
5ec1a7027b feat: supports passing user params into coprocessor (#962)
* feat: make args in coprocessor optional

* feat: supports kwargs for coprocessor as params passed by the users

* feat: supports params for /run-script

* fix: we should rewrite the coprocessor by removing kwargs

* fix: remove println

* fix: compile error after rebasing

* fix: improve http_handler_test

* test: http scripts api with user params

* refactor: tweak all to_owned
2023-02-16 16:11:26 +08:00
Yingwen
ddbc97befb refactor: changes CreateTableRequest::schema to RawSchema (#1018)
* refactor: changes CreateTableRequest::schema to RawSchema

* refactor(grpc-expr): create_table_schema returns RawSchema
2023-02-16 16:04:17 +08:00
Ruihang Xia
a8c2b35ec6 chore: bump rust to nightly-2023-02-14 (#1019)
* chore: bump rust to nightly-2023-02-14

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* bump statrs to 0.16

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-16 13:17:57 +08:00
Yingwen
04afee216e feat(procedure): Support multi-lock keys and querying procedure state from context (#1006)
* feat: Add ContextProvider to Context

So procedures can query states of other procedures via the
ContextProvider and they don't need to hold a ProcedureManagerRef

* feat: Procedure supports acquring multiple lock keys

* test: Use multi-locks in test

* feat: Add keys_to_lock/unlock
2023-02-15 18:04:19 +08:00
LFC
5533040be7 fix: describe distribute table (#988)
* fix: describe distribute table
2023-02-15 17:48:43 +08:00
LFC
34fdba77df feat: create database if not exists (#1009) 2023-02-15 17:47:46 +08:00
Ning Sun
cd0d58cb24 fix: correct date/time type format for postgresql (#1001)
* fix: correct date/time type format for postgresql

* fix: tests for timestamp

* refactor: use Utc datetime for timestamp::to_chrono_datetime

* Update src/servers/Cargo.toml

Co-authored-by: LFC <bayinamine@gmail.com>

---------

Co-authored-by: LFC <bayinamine@gmail.com>
2023-02-15 09:40:16 +00:00
yuanbohan
8b869642b8 feat: update promql-parser to v0.1.0 (#994)
feat: update promql-parser version to v0.1.0
2023-02-15 17:23:59 +08:00
Ning Sun
a33d1e9863 ci: add cloud followup label (#1007)
ci: add cloud followup support
2023-02-15 17:17:32 +08:00
Ruihang Xia
dfe7bfb07f feat: handle PromQL HTTP API parameters (#985)
* feat: impl EvalStmt parser

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix compile errors

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update test result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add integration test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* resolve CR comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl From<PromqlQuery> for PromQuery

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* move format into with_context

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update test result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* shorthand compound error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use rfc3339 error to report float parsing error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove CompoundError

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-15 17:15:44 +08:00
Ruihang Xia
5d1f231004 fix: update planner state according to output plan (#1005)
* fix: update context according to planner phase

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* alias out qualifier

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove ignore

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-15 16:52:14 +08:00
Ning Sun
40eec85cf7 feat: add catalog name to s3 path (#1011) 2023-02-15 08:30:09 +00:00
shuiyisong
e17d564bf0 feat: add client tls option to channel manager config (#999)
* feat: add client tls to channel manager config

* chore: move test to tests folder

* chore: fix license issue

* chore: fix cr issue
2023-02-15 16:02:27 +08:00
shuiyisong
301656d568 fix: rename schema to db in http param (#1008)
chore: rename schema to db in http script handler
2023-02-15 15:59:00 +08:00
Zheming Li
a19dee1dc0 feat: duplicate error logs into separate file (#995)
Signed-off-by: Zheming Li <nkdudu@126.com>
2023-02-15 14:27:32 +08:00
Lei, HUANG
75b8afe043 feat: compaction integration (#997)
* feat: trigger compaction on flush

* chore: rebase develop

* feat: add config item max_file_in_level0 and remove compaction_after_flush

* fix: cr comments

* chore: add unit test to cover Timestamp::new_inclusive

* fix: workaround to fix future is not Sync

* fix: future is not sync

* fix: some cr comments
2023-02-15 14:14:07 +08:00
fys
e2904b99ac feat: add retry logic for MetaPeerClient (#991)
* add retry logic in meta_peer_client

* impl need_retry function

* create meta_peer_client using the builder pattern

* cr
2023-02-15 14:12:53 +08:00
Xieqijun
de0b8aa0a0 feat: Support the DELETE SQL statement (#942)
* [WIP]:delete sql

* [fix]:time parser bug

* [fix]:resolve conflict

* [fmt]:cargo fmt

* [fix]:remove unless log

* [fix]:test

* [feat]:add error parse

* [fix]:resolve conflict

* [fix]:remove unless code

* [fix]:remove unless code

* [test]:add IT

* [fix]:add license

* [fix]:ci

* [fix]:ci

* [fix]:ci

* [fix]:remove

* [fix]:ci

* [feat]:add sql

* [fix]:modify sql

* [feat]:refactor parser_expr

* [feat]:rm backtrace

* [fix]:ci

* [fix]: conversation

* [fix]: conversation

* feat:refactor delete

* feat:refactor delete

* fix:resolve conversation

* fix:ut

* fix:ut

* fix:conversation

* fix:conversation

* fix:conservation

---------

Co-authored-by: xieqijun <qijun@apache.org>
2023-02-15 13:13:17 +08:00
Xieqijun
63e396e9e9 test: add api and doc http test (#998)
* test:add api and doc test

* fix:conservation
2023-02-15 11:55:13 +08:00
Eugene Tolbakov
4d8276790b refactor(storage): remove unused FlushIo variant (#1002)
refactor(storeage): remove unused FlushIo variant
2023-02-15 11:42:05 +08:00
Lei, HUANG
374acc8830 feat: compaction reader and writer (#972)
* feat: compaction reader and writer

* feat: make ParquetWrite accept both memtable iterator and chunk reader

* feat: adapt ParquetWriter to accomodate ChunkReaderImpl

* chore: rebase develop

* wip: compile

* wip: task logic

* feat: version and manifest update

* fix: remove useless as_inner from Timestamp vectors

* feat: mark file compacting

* fix: unit test

* fix: clippy warnings

* fix: CR comment

* chore: according to cr comments, remove visit_levels from LevelMetas

* fix: some CR comments

* fix: add PlainTimestampRowFilter for correctness

* fix: cr comments

* fix: some typos
2023-02-14 17:32:00 +08:00
shuiyisong
8491f65093 refactor: remove obj_name_to_tab_ref (#989) 2023-02-14 16:33:55 +08:00
Weny Xu
5e6f340dd9 refactor: refactor execute_stream to non-async method (#980) 2023-02-14 15:41:22 +08:00
Ruihang Xia
7b98718cd9 test: Some PromQL cases about aggregator (#977)
* port some aggregator tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* find two unsupported cases

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy warnings

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* resolve CR comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix fn naming

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-14 15:36:00 +08:00
Yingwen
0f7e5a2fb2 feat: Implement LocalManager::recover (#981)
* feat: Implement LocalManager::recover

* feat: Impl From<ObjectStore> for ProcedureStore
2023-02-14 14:50:43 +08:00
LFC
9ad6c45913 test: Sqlness tests for distribute mode (#979)
* test: Sqlness tests for distribute mode

* ci

* fix: resolve PR comments

* fix: resolve PR comments
2023-02-14 10:24:09 +08:00
fys
7fe417e740 fix: an error occurred when requesting the http doc api (#984) 2023-02-13 11:17:27 +00:00
fys
c1a9f84c7f feat: meta provides the ability to distribute lock (#961)
* add DistLock trait and a implement based etcd

wip

impl lock grpc service for meta-srv

reuse the etcd client instead of repeatedly creating etcd client

add some docs and comments

add some comment

meta client support distribute lock

fix: dead lock

self-cr

* cr

* rename "expire" -> "expire_secs"
2023-02-13 15:58:30 +08:00
Yingwen
be897efd01 feat: Execute procedure in LocalManager (#953)
* feat: Runner executes procedure

* feat: Add rollback key type to ParsedKey

* feat: Write rollback key when procedure is unable to execute

* feat: Use loaded step to re-submit subprocedure

* feat: Track subprocedures in ProcedureMeta

* feat: Clean message cache after the root procedure is done

* feat: Runner returns execution result

* fix: Fix tests

* test: Test Runner

* test: Test procedures_in_tree

* chore: Refine test and comments

* feat: Remove support of lock inheritance

A deadlock happens if a subprocedure acquires the same lock key as
its parent.

The main concern is if the subprocedure directly inherits its parent's
lock, then how should we behave when multiple subprocedures acquire
this same lock? Each procedure may assume it has unique access to the
same object but it actually shares the resource with others.

Now subprocedures need to use different keys to lock objects, which is
reasonable. For example:
- A parent procedure wants to create a table so it locks the table with
a key like `catalog.schema.table`
- Subprocedures create regions for the table so they lock the regions
with keys `catalog.schema.table.region-0 ~ catalog.schema.table.region-n`

* style: Fix clippy

* feat: insert_procedure returns false on duplicate procedure

Also rename this method to try_insert_procedure

* chore: Address CR comments
2023-02-13 10:38:56 +08:00
Eugene Tolbakov
c06e04afbb refactor(query): tests from query/tests to query/src (#973)
* refactor(query): tests from query/tests to query/src

* chore(query): address rust fmt issues

* chore(query): add licence header
2023-02-12 20:55:17 +08:00
Lei, HUANG
e77a7f253c feat: L0 to L1 compaction strategy (#964)
* feat: impl simple compaction strategy

* chore: rebase to develop and fix clippy warnings

* chore: simplify time bucket strcut

* chore: some typos
2023-02-11 21:10:24 +08:00
Eugene Tolbakov
7d6f4cd88b feat: remove backtrace from sql::error::Error (#966)
* feat: remove backtrace from sql::error::Error

* fix: address formatting issues

---------

Co-authored-by: Evgeny Tolbakov <evgeny.tolbakov@jpmorgan.com>
2023-02-11 14:52:29 +08:00
Ruihang Xia
83ac6598b6 feat: add start, end and step to promql http api (#969)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-10 14:16:16 +08:00
Ruihang Xia
4c925e0079 chore(deps): bump promql-parser (#968)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-10 14:12:55 +08:00
LFC
c6128ec0a4 refactor: use remote proto (#963)
* refactor: use remote proto (see our new repo "GreptimeTeam/greptime-proto")

* fix: resolve PR comments
2023-02-10 13:35:18 +08:00
discord9
7c34b009ec feat: bind DataFrame API into python script (#945)
* chore: remove unused magic fn

* feat: dataframe

* feat: add data_frame crate

* feat: more api binded

* fix: `Comparable` for overload op

* fix: license&more test

* chore: PR advices

* chore: more PR advices
2023-02-10 11:21:57 +08:00
shuiyisong
70edd4d55b fix: remove incorrect table_idents_to_full_name (#967) 2023-02-10 03:15:48 +00:00
Ning Sun
6beea73590 fix: use query_ctx in distributed inserts (#965) 2023-02-10 10:09:13 +08:00
Yun Chen
c0d3533d10 fix: Sql Inline Primary Key definition (#957)
* fix: invalid inline primary key syntax

* fix: format

* fix: clippy fix

* fix: added sqlness tests

* fix: throw exception when multiple inline pk defined

* fix: pr comments

* fix: add ending blank line for create.sql
2023-02-09 18:57:19 +08:00
shuiyisong
9989a8c192 fix: check full table name during logical plan creation (#948) 2023-02-09 17:23:28 +08:00
Ruihang Xia
19dd8b1246 feat: SeriesDivide plan for PromQL (#960)
* implement SeriesDivide plan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* planner part

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy and typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-09 11:50:29 +08:00
Lei, HUANG
1e9918ddf9 feat: compaction scheduler and rate limiter (#947)
* wip: compaction schdduler

* feat: imple simple compaction scheduler

* fix: typo

* feat: add generic parameter to make scheduler friendly to tests

* chore: add more tests

* fix: CR comments

* fix: CR comments

* fix: ensure idempotency for rate limit token

* fix: Cr ct omments
2023-02-09 11:43:20 +08:00
fys
4ce62f850b chore: add an opaque error type in meta (#959)
add boxed err in meta
2023-02-08 09:47:33 +00:00
Ning Sun
83d57f9111 fix: setting postgres query context (#958) 2023-02-08 16:34:10 +08:00
LFC
803b7f0633 feat: implement "drop table" in distributed mode (both in SQL and gRPC) (#944)
* feat: implement "drop table" in distributed mode (both in SQL and gRPC)

refactor: create distributed table
some details:
- set table global value in Meta, as well as table routes value. Datanode only set table regional value
- complete instance SQL tests both in standalone and distributed mode

* fix: rebase develop

* fix: resolve PR comments
2023-02-08 07:36:38 +00:00
Ruihang Xia
37ca5ba380 chore: alias sqlness subcommand (#956)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-07 18:53:46 +08:00
Ning Sun
c1d32bdf2b fix: add form data support for http sql api (#955)
fix: add form data support for http apis
2023-02-07 10:15:39 +00:00
fys
83509f31f4 feat: datanode stats is stored in the mem_kv of meta leader (#943)
* store heartbeat data in memory, instead of etcd

* fix: typo

* fix: license header

* cr
2023-02-07 17:09:28 +08:00
elijah
926022e14c feat: enable caching when using object store (#928)
* feat: enable caching when using object store

* feat: support file cache for object store

* feat: maintaining the cached files with lru

* fix: improve the code

* empty commit

* improve the code
2023-02-07 15:46:37 +08:00
Ruihang Xia
2f2609d8c6 build(ci): disable release workflow for forked repo (#954)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-07 15:22:32 +08:00
Yingwen
ecadbc1435 feat: Add procedure manager LocalManager (#946)
* feat: Add ManagerContext and LocalManager

* test: Add register_loader test

* feat: Remove some unused methods

* fix: Fix submit_procedure ensure condition
2023-02-07 11:33:13 +08:00
ShenJunkun
afac885c10 refactor: add schema column to the scripts table (#868) 2023-02-07 11:07:32 +08:00
Lei, HUANG
5d62e193bd feat: support multi regions on datanode (#653)
* wip: fix compile errors

* chore: move splitter to partition crate

* fix: remove useless variants in frontend errors

* chore: move more partition related code to partition manager

* fix: license header

* wip: move WriteSplitter to PartitionRuleManager

* fix: clippy warnings

* chore: remove useless error variant and format toml

* fix: cr comments

* chore: resolve conflicts

* chore: rebase develop

* fix: cr comments

* feat: support multi regions on datanode

* chore: rebase onto develop

* chore: rebase develop

* chore: rebase develop

* wip

* fix: compile errors

* feat: multi region

* fix: CR comments

* feat: allow stat existing regions without actually open it

* fix: use table meta in manifest to recover region info
2023-02-07 10:46:18 +08:00
elijah
7d77913e88 chore: fix rfc typo (#952) 2023-02-07 08:47:06 +08:00
Lei, HUANG
3f45a0d337 docs: rfc for table compaction (#939)
* doc: rfc for table compaction

* docs: update compaction rfc
2023-02-06 22:15:53 +08:00
Zhizhen He
a1e97c990f chore: fix typo (#949) 2023-02-06 22:13:56 +08:00
Ning Sun
4ae63b7089 feat: Initial prepare statement support for Postgres protocol (#925)
* feat: add describe statement to query_engine

* feat: add ability to describe statement for sql handler

* refactor: return schema instead of wrapped ref

* test: resolve tests

* feat: add initial support for prepared statements

* feat: add parameter types to query statement

* test: fix parser test

* chore: add todo task

* fix: turn on integer_datetime for binary timestamp

* fix: format string using single quote

* test: add tests for prepared statement

* Apply suggestions from code review

Co-authored-by: LFC <bayinamine@gmail.com>

* refactor: use stream api from recordbatches

---------

Co-authored-by: LFC <bayinamine@gmail.com>
2023-02-06 22:06:00 +08:00
Yingwen
b0925d94ed feat: Implement lock component for ProcedureManager (#937)
* feat: Add procedure meta

* feat: Implement lock for procedures

* chore: Allow dead code

* docs: Fix comment

* docs: Update docs of acquire_lock
2023-02-03 18:42:03 +08:00
Ruihang Xia
fc9276c79d feat: export promql service in server (#924)
* chore: some tiny typo/style fix

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: add promql server

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* works for mocked query

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* integration test case

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* resolve CR comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* expose promql api to our http server

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* resolve CR comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* adjust router structure

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-03 08:28:56 +00:00
LFC
184ca78a4d revert: removed all "USE"s in sqlness tests introduced in #922 (#938) 2023-02-03 15:44:58 +08:00
discord9
ebbf1e43b5 feat: Query using sql inside python script (#884)
* feat: add weakref to QueryEngine in copr

* feat: sql query in python

* fix: make_class for Query Engine

* fix: use `Handle::try_current` instead

* fix: cache `Runtime`

* fix: lock file conflict

* fix: dedicated thread for blocking&fix test

* test: remove unnecessary print
2023-02-03 15:05:27 +08:00
dennis zhuang
54fe81dad9 docs: add dashboard to resources in README (#934) 2023-02-03 13:47:19 +08:00
LFC
af935671b2 feat: support "use" in GRPC requests (#922)
* feat: support "use catalog and schema"(behave like the "use" in MySQL) in GRPC requests

* fix: rebase develop
2023-02-02 20:02:56 +08:00
Yingwen
74adb077bc feat: Implement ProcedureStore (#927)
* test: Add more tests for ProcedureId

* feat: Add ObjectStore based state store

* feat: Implement ProcedureStore

* test: Add tests for ParsedKey

* refactor: Rename list to walk_top_down

* fix: Test ProcedureStore and handles unordered key values.

* style: Fix clippy

* docs: Update comment

* chore: Adjust log level for printing invalid key
2023-02-02 17:49:31 +08:00
Ruihang Xia
54c7a8be02 docs: document sqlness-runner usage (#931)
docs: paste doc from greptime-doc

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-02 15:56:51 +08:00
Ruihang Xia
ea5146762a chore(deps): bump promql-parser (#929)
* fix promql crate

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* migrate to new api

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix aggregator test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix styles

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-02-02 07:31:41 +00:00
Yingwen
788b5362a1 docs: Add procedure framework RFC (#836)
* docs: Add procedure framework RFC

* docs: Add dump, rollback and locking to procedure framework

* docs: Change ProcedureBuilder to ProcedureLoader

* docs: Add sub-procedures section

* docs: Add a link to explain idempotent

* docs: Add link to the tracking issue

* docs: Fix ProcedureLoader type alias

* docs: Update procedure API

* docs: Address CR comments

* docs: Update path and make the docs more clear
2023-02-02 11:28:56 +08:00
Lei, HUANG
028a69e349 refactor: move partition related code to partition manager (#906)
* wip: fix compile errors

* chore: move splitter to partition crate

* fix: remove useless variants in frontend errors

* chore: move more partition related code to partition manager

* fix: license header

* wip: move WriteSplitter to PartitionRuleManager

* fix: clippy warnings

* chore: remove useless error variant and format toml

* fix: cr comments

* chore: resolve conflicts

* chore: rebase develop

* fix: cr comments
2023-02-01 19:24:49 +08:00
elijah
9a30ba00c4 test: run sqlness test in distributed mode (#916)
* test: run sqlness test in distributed mode

* chore: fix ci test

* chore: improve the ci yaml

* chore: improve the code

* chore: fix conflicts
2023-01-31 15:00:11 +08:00
LFC
8149932bad feat: local catalog drop table (#913)
* feat: local catalog drop table

* Update src/catalog/src/local/manager.rs

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

* Update src/catalog/src/local/manager.rs

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

* fix: resolve PR comments

---------

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
2023-01-31 14:44:03 +08:00
Ruihang Xia
89e4084af4 build(ci): upload sqlness log files (#920)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-01-31 14:31:27 +08:00
Ning Sun
39df25a8f6 refactor: make postgres handler stateful (#914)
* feat: update pgwire to 0.8 and unify postgres handler

* fix: correct password message matching
2023-01-31 14:19:18 +08:00
Yingwen
b2ad0e972b feat: Define procedure related traits (#904)
* chore: Move uuid to workspace.dependencies

* feat: Define procedure related traits

* test: Add tests

* chore: Update imports

* feat: Submit ProcedureWithId to manager

* chore: pub ProcedureId::parse_str

* refactor: ProcedureId::parse_str returns Result

* chore: Address CR comments

Also implements FromStr for ProcedureId
2023-01-31 14:17:28 +08:00
shuiyisong
18e6740ac9 chore: add interceptor err in frontend::error::Error (#917)
* chore: add interceptor boxed err

* chore: rename

* chore: update err msg

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

---------

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>
2023-01-30 03:12:03 +00:00
Yun Chen
a7dc86ffe5 feat: oss storage support (#911)
* feat: add oss storage support

* fix: ci build format check

* fix: align OSS to Oss

* fix: cr comments

* fix: rename OSS to Oss in integration tests

* fix: clippy fix
2023-01-29 20:09:38 +08:00
Ruihang Xia
71482b38d7 feat: PromQL binary expr planner (#889)
* feat: PromQL binary expr planner

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* column & column test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* column & literal test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* mark literal-literal unsupported

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-01-29 17:02:11 +08:00
Ruihang Xia
dc9b5339bf feat: impl increase and irate/idelta in PromQL (#880)
* feat: impl increase and irate/idelta in PromQL

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add license header

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix styles

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* resolve CR comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add counter reset test case

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-01-29 14:21:13 +08:00
Lei, HUANG
5e05c8f884 fix: TimestampRange::new_inclusive and strum dependency (#910)
fix: TimestampRange::new_inclusive; also fix strum dependency in common-error
2023-01-29 13:09:05 +08:00
shuiyisong
aafc26c788 feat: add mysql reject_no_database (#896)
* chore: update opensrv-mysql to main

* refactor: change mysql server struct

* feat: add option to reject no database mysql connection request

* chore: remove unused condition

* chore: rebase develop

* chore: make reject_no_database optional
2023-01-29 04:09:47 +00:00
LFC
64243e3a7d refactor: accommodate java flight client (#886)
* refactor: change how AffectedRows is carried in flight stream to accommodate Java Flight client

* fix: clippy
2023-01-29 11:27:13 +08:00
Ruihang Xia
36a13dafb7 build(deps): bump tokio to 1.24.2 (#900)
deps: bump tokio to 1.24.2

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-01-29 11:13:37 +08:00
shuiyisong
637837ae44 chore: return authorize err msg to mysql client (#905)
chore: refine authorize err msg to client
2023-01-29 10:53:36 +08:00
dependabot[bot]
ae8afd3711 build(deps): bump bzip2 from 0.4.3 to 0.4.4 (#898)
Bumps [bzip2](https://github.com/alexcrichton/bzip2-rs) from 0.4.3 to 0.4.4.
- [Release notes](https://github.com/alexcrichton/bzip2-rs/releases)
- [Commits](https://github.com/alexcrichton/bzip2-rs/commits/0.4.4)

---
updated-dependencies:
- dependency-name: bzip2
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-01-28 21:08:03 +08:00
Yingwen
3db8f95169 ci: Skip status check on docs changed (#903)
* ci: Pass status check on docs changed

* ci: Remove coverage.yml
2023-01-28 16:37:47 +08:00
Lei, HUANG
43aefc5d74 feat: prunine sst files according to time range in filters (#887)
* 1. Reimplement Eq for Timestamp
2. Add and/or for GenericRange

* feat: extract time range from filters

* feat: select sst files according to time range

* fix: clippy

* fix: empty value in range

* fix: some cr comments

* fix: return optional timestamp range

* fix: cr comments
2023-01-28 15:16:41 +08:00
Ruihang Xia
b33937f48e test: sqlness test for alter table rename (#891)
* test: sqlness test for alter table rename

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update test result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* change show create table to desc table

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-01-28 11:35:38 +08:00
Ning Sun
9bc4c0d9c7 fix: mysql tests error (#897)
fix: mysql tests merge error
2023-01-20 16:15:16 +08:00
Ning Sun
302d7ec41b ci: use ubuntu 2004 to build weekly (#895)
feat: use ubuntu 2004 to build weekly
2023-01-20 08:36:41 +08:00
zyy17
cc46194f29 refactor: support TLS private key of RSA format and add the full test certificates generation (#885)
chore: add the full certificate generation

Signed-off-by: zyy17 <zyylsxm@gmail.com>

Signed-off-by: zyy17 <zyylsxm@gmail.com>
2023-01-19 13:13:33 +08:00
elijah
5dfc24e4f6 fix: create table after rename table (#894)
* fix: create table after rename table

* chore: fix test
2023-01-19 13:13:09 +08:00
Zheming Li
4987136850 refactor: use rust-toolchain.toml to override toolchain (#882) 2023-01-19 13:11:36 +08:00
shuiyisong
6960739b3d feat: add authorize to UserProvider trait (#879)
* feat: add SchemaValidator

* feat: add schema validator to mysql shim

* chore: pass schema validator to http auth layer

* feat: add schema validator to http

* feat: add schema validator to pg

* feat: add schema validator to pg

* feat: add schema validator test

* chore: remove println in test

* chore: use !matches

* refactor: refac authenticate and authorize in http auth

* refactor: refac authenticate and authorize in http auth

* chore: typo

* chore: minor change

* refactor: merge schema_validator into user_providier

* chore: fix license issue

* refactor: change http query param from database to db

* chore: fix cr issue
2023-01-18 12:42:08 +08:00
fys
49d83abc0c chore: add an opaque error type in meta (#890)
add a boxed error type in meta
2023-01-18 11:30:54 +08:00
Ning Sun
ecb71f81be feat: add --rpc-hostname option to datanode for a persist address to store in meta (#871)
* feat: add --rpc-hostname option

* fix: config file and hostname parsing

* Apply suggestions from code review

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>
2023-01-17 10:50:50 +08:00
fys
6f5639fccd feat: add load_based selector in meta (#874)
* fix: wrong error info

* add derive hash for StatKey

* add a attrs field in Context

* add load_based selector

* add license

* make Nodestat module public

* add meta startup config item about selector

* cr: remove attrs, add concrete type in context

* cr: change region_number type to Option<u64>

* cr: add comment in example.toml

* cr
2023-01-17 10:25:00 +08:00
Ruihang Xia
1e9d09099e feat: update promql-parser to commit fec3c8b (#881)
deps: update promql-parser to commit fec3c8b

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-01-16 17:55:44 +08:00
Lei, HUANG
daad38360f fix: impl total order for Timestamp (#878)
* 1. Reimplement Eq for Timestamp
2. Add and/or for GenericRange

* chore: add test for TimestampRange with diff unit

* chore: optimize split implementation

* fix: clippy

* fix: add fast path

* fix: CR comments
2023-01-16 17:37:30 +08:00
Ruihang Xia
bae0243959 test: sqlness test for insert default (#873)
* test: sqlness test for insert default

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* empty line

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add more sqls

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typos

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update test according to typo fix

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-01-13 20:22:55 +08:00
dennis zhuang
d162fbb598 fix: compile error in test (#872) 2023-01-13 15:12:49 +08:00
Zheming Li
0959c1d16b feat: support default value when inserting data (#854) 2023-01-13 14:49:05 +08:00
discord9
e428a84446 feat: use Python Script as UDF in SQL (#839)
* feat: reg PyScript as UDF

* refactor: use `ConcreteDataType` instead

* fix: accept `str` data type

* fix: allow binary to capture SIGINT

* test: add test for py udf

* Update src/servers/tests/py_script/mod.rs

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>

* style: clippy problem

* style: add newline

* chore: PR advices

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2023-01-13 14:35:03 +08:00
Ruihang Xia
58c37f588d feat: plan some aggregate expr in PromQL planner (#870)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-01-13 14:27:31 +08:00
dennis zhuang
d195a22f40 fix: parsing time index column option (#865)
* fix: parsing time index column option

* test: adds more cases for creating table

* chore: by CR comments

* feat: validate time index constraint in parser

* chore: improve error msg
2023-01-13 13:22:12 +08:00
elijah
6775c5be87 feat: support renaming table in the catalog manger (#824)
* feat: support renaming table in the catalog manger

* feat: implement rename table for local catalog manager

* chore: fmt code

* fix: update system catalog when renaming table in local catalog manager

* chore: add instance test for rename table

* chore: fix frontend test

* chore: fix comment

* chore: fix rename table test

* fix: renaming a table with an existing name

* fix: improve the system catalog's renaming process

* chore: improve the code

* chore: improve the comment

Co-authored-by: Yingwen <realevenyag@gmail.com>

* chore: improve the code

* chore: fix tests

* chore: fix instance_test

* chore: improve the code

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-01-12 19:48:18 +08:00
Ruihang Xia
5e89f1ba4e ci: run tests on weekly release build (#869)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-01-12 19:10:23 +08:00
LFC
2664436194 feat: handle "USE <catalog>-<schema>" in MySQL (#857)
* feat: handle "USE <catalog>-<schema>" in MySQL

* fix: resolve PR comments
2023-01-12 11:12:11 +08:00
shuiyisong
b91c77b862 chore: add path check to http auth (#866)
* chore: add whitelist to http auth

* chore: use const instead of format everytime
2023-01-12 10:20:18 +08:00
Lei, HUANG
4015dd8075 feat: record sst file time range in FileMeta (#860)
* feat: record sst file time range in FileMeta

* fix: clippy

* chore: add some log and doc
2023-01-11 21:16:07 +08:00
Yingwen
b39dbcbda9 fix: Fix deleting table with non null column (#849)
If the table has a non-null column, we need to use default value instead
of null to fill the value columns in the record batch for deletion.
Otherwise, we can't create the record batch since the schema check
doesn't allow null in the non-null column.
2023-01-11 20:06:46 +08:00
elijah
0e8411c2ff chore: add custom log level support for common_telemetry::init_default_ut_logging() (#864)
chore: improve default ut logging
2023-01-11 16:52:21 +08:00
Ruihang Xia
a9b42b436d feat: PromQL handler in query engine (#861)
* example promql test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* make the mock test works

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update planner test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippys

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add license header

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-01-11 11:31:07 +08:00
dennis zhuang
9428e70971 feat: integration test (#770)
* feat: add insert test cases

* fix: update results after rebase develop

* feat: supports unsigned integer types and big_insert test

* test: add insert_invalid test

* feat: supports time index constraint for bigint type

* chore: time index column at last

* test: adds more order, limit test

* fix: style

* feat: adds numbers table in standable memory catalog mode

* feat: enable fail_fast and test_filter in sqlness

* feat: add more tests

* fix: test_filter

* test: add alter tests

* feat: supports if_not_exists when create database

* test: filter_push_down and catalog test

* fix: compile error

* fix: delete output file

* chore: ignore integration test output in git

* test: update all integration test results

* fix: by code review

* chore: revert .gitignore

* feat: sort the show tables/databases results

* chore: remove issue link

* fix: compile error and code format after rebase

* test: update all integration test results
2023-01-10 18:15:50 +08:00
Ruihang Xia
32d51947a4 refactor: adjust outermost error message (#859)
* refactor: adjust outermost error message

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* preserve tonic status code

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-01-10 17:28:27 +08:00
Ruihang Xia
5fb417ec7c feat: implement RangeManipulate (#843)
* basic impl

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl constructor

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* test printout

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* truncate tag columns

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* doc this plan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix empty range

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* document behavior

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-01-10 16:27:09 +08:00
Lei, HUANG
90fcaa8487 feat: expose wal config (#852)
* feat: wal config

* fix: use human-readable string in wal config

* feat: copy ReadableSize and humanize size config items in toml files

* fix: clippy
2023-01-10 16:07:26 +08:00
Jiachun Feng
c609b193a1 feat: in memory storage on meta leader (#856)
* chore: minor change on election

* chore: refactor some from/into

* feat: add in_memory store for leader node

* refactor: make context mutable

* feat: add ResetableKvStore trait
2023-01-10 15:53:34 +08:00
Ruihang Xia
1305924423 ci: add sqlness job (#835)
* ci: add sqlness job

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness to official release

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* filter out backtrace

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix error display

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* close once_cell feature gate

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-01-10 15:12:20 +08:00
Ning Sun
ea753b9ac0 ci: fix release task by correcting output dir (#853) 2023-01-10 14:37:35 +08:00
LFC
72f05a3137 feat: flight aboard (#840)
feat: replace old GRPC interface with Arrow Flight
2023-01-09 17:06:24 +08:00
fys
9e58311ecd feat: datanode support report number of regions to meta (#838)
* feat: dn support report number of regions to meta

* put the heartbeat batch to store

* cr: change region_number's parameter to &CatalogManagerRef

* cr: when dn failed to get region number, report region_num = -1 to meta
2023-01-09 16:13:53 +08:00
Ruihang Xia
2679faf911 refactor: move parse methods out of QueryEngine trait (#850)
* refactor: move parse methods out of QueryEngine trait

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix styles

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* change style

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add license header

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix test literal

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-01-09 15:44:20 +08:00
Lei, HUANG
fa54870197 fix: parquet native row group pruning support (#845)
* fix: parquet native row group pruning support

* fix: use filter_map instead of flat_map
2023-01-09 12:10:14 +08:00
Ning Sun
3988770266 feat: add catalog name resolution for postgres and http interface (#810)
* feat: add catalog name resolution for postgres and http interface

* test: add tests for catalog resolution on http and postgres

* feat: assign custom catalog for query

* chore: order code for better readability
2023-01-09 11:43:25 +08:00
Xuanwo
777a3182c5 feat: Bump OpenDAL to 0.24 for better seekable support (#847)
* deps: Bump OpenDAL to 0.24 for better seekable support

Signed-off-by: Xuanwo <github@xuanwo.io>

* fix: test

Signed-off-by: Xuanwo <github@xuanwo.io>
Co-authored-by: Lei, HUANG <mrsatangel@gmail.com>
2023-01-09 11:37:43 +08:00
Ning Sun
5b675f54a8 ci: add lto and strip to weekly build (#841) 2023-01-06 16:20:23 +08:00
Lei, HUANG
627d444723 fix: remove start from LogStore; fix error message (#837) 2023-01-06 12:21:00 +08:00
LFC
d1730a9577 refactor: simplify how Frontend instance handles other protocols (#831)
* refactor: make influxdb, opentsdb and prometheus read/write goes through GRPC interface, to unify and simplify the Frontend instance either in standalone or distributed mode
2023-01-06 12:19:38 +08:00
Jiachun Feng
ca7ed67dc5 feat: collect stats from heartbeats (#833)
* feat: collect stats from heartbeats

* chore: refactor and improve the keep_lease_handler

* Update src/meta-srv/src/handler/collect_stats_handler.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>
2023-01-06 11:30:23 +08:00
Yingwen
072e5f78b4 feat: Implement delete for table (#801)
* feat: Table default implementations for insert/alter return error

* feat: Implement delete for mito table

* docs: Fix comment
2023-01-05 20:03:40 +08:00
Lei, HUANG
8f5ecefc90 feat: use raft-engine crate to reimplement logstore (#799)
* chore: remove useless method in Entry trait, add proto definition for entry and namespace

* feat: add proto definition for raft-engine based logstore

* feat: introduce RaftEngineLogstore

* feat: impl read for raft engine log store

* feat: impl raft engine logstore

* feat: raft engine logstore start and stop

* feat: add purge bg task

* fix: license header

* fix: clippy

* fix: toml files

* feat: add some test cases

* fix: CR comments

* fix: CR comments

* fix: check namespace validity and state of logstore

* fix: CR comments; add config item to control sync/async flush per write

* fix: remove unused error variants

* fix: unit tests

* fix: use compare and exchange to stop logstore

* fix: CR comments
2023-01-05 17:18:51 +08:00
Ruihang Xia
afd9866709 feat: basic promql planner for single arg function call (#828)
* wip: draft planner

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* handle function args

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* a simple test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* list all operators that accept 1 instant vector as input

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* simplify test cases

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update cargo lock

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* CR suggessions

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* CR suggessions

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* change the way to handle metric name

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-01-05 16:30:54 +08:00
LFC
89d5306740 feat: Impl Query and DDL functionality of Arrow Flight service for Frontend Instance (#827)
* feat: Implement Query and DDL functionality of Arrow Flight service for Frontend Instance
2023-01-05 14:17:57 +08:00
LFC
50cc0e9b51 feat: Impl Insert functionality of Arrow Flight service for Frontend Instance (#821)
* feat: Implement Insert functionality of Arrow Flight service for Frontend Instance

* fix: update license content

* Update src/common/grpc-expr/src/alter.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

* fix: resolve PR comments

* fix: resolve PR comments

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>
2023-01-04 17:48:59 +08:00
dennis zhuang
7762873842 feat: endpoint and region config for s3 storage (#820)
* feat: adds serde default attribute to options

* feat: adds endpoint and region for s3 config
2023-01-04 11:24:24 +08:00
LFC
4aa24f0639 fix: test failure (#822) 2023-01-04 10:47:18 +08:00
LFC
f1b95e25a1 fix: remove boilerplate message from GRPC error output (#813)
* fix: remove boilerplate message from GRPC error output

* fix: rebase develop
2023-01-03 20:49:36 +08:00
Ning Sun
041cd422a1 refactor: do not call use upon mysql connection (#818) 2023-01-03 19:15:47 +08:00
Ruihang Xia
f907a93b97 feat: impl RangeArray based on DictionaryArray (#796)
* feat: impl RangeArray based on DictionaryArray

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippys

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* apply review suggs

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update license header

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Apply suggestions from code review

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

* update doc to change i32 to u32

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
2023-01-03 18:04:26 +08:00
elijah
a6eb213adf feat: implement rename table (#802)
* feat: support renaming tables in the mito table engine

* chore: add test for table engine

* chore: fix test
2023-01-03 17:37:27 +08:00
Ruihang Xia
5fcad7a175 fix: update license header for instant manipulate (#817)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-01-03 16:10:21 +08:00
Ruihang Xia
0566f812d3 refactor: remove macro define_opaque_error (#812)
* refactor: remove macro define_opaque_error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl BoxedError

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove open-region error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-01-03 15:50:27 +08:00
Ruihang Xia
334fd26bc5 feat: impl InstantManipulator for PromQL extension (#803)
* feat: impl InstantSelector for PromQL extension

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* make clippy happy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* apply review suggs

* rename manipulator to manipulate

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-01-03 15:32:12 +08:00
Lei, HUANG
8ffc078f88 fix: license header (#815) 2023-01-03 15:09:49 +08:00
shuiyisong
179ff728df refactor: merge servers::context into session (#811)
* refactor: move context to session

* chore: add unit test

* chore: add pg, opentsdb, influxdb and prometheus to channel enum
2022-12-31 00:00:04 +08:00
Yingwen
4d56d896ca feat: Implement delete for the storage engine (#777)
* docs: Fix incorrect comment of Vector::only_null

* feat: Add delete to WriteRequest and WriteBatch

* feat: Filter deleted rows

* fix: Fix panic after reopening engine

This is detected by adding a reopen step to the delete test for region.

* fix: Fix OpType::min_type()

* test: Add delete absent key test

* chore: Address CR comments
2022-12-30 17:12:18 +08:00
discord9
6fe205f3b5 chore: Update RustPython(With GC) (#809)
* chore: use newest RustPython

* chore: use Garbage collected RustPython Fork

* style: format toml
2022-12-30 16:55:43 +08:00
LFC
d13de0aeba refactor: remove AdminExpr, make DDL expressions as normal GRPC requests (#808)
* refactor: remove AdminExpr, make DDL expressions as normal GRPC requests
2022-12-30 16:47:45 +08:00
zyy17
11194f37d4 build: install ca-certificates in docker image building (#807)
refactor: install ca-certificates in docker image building

Signed-off-by: zyy17 <zyylsxm@gmail.com>

Signed-off-by: zyy17 <zyylsxm@gmail.com>
2022-12-30 14:56:39 +08:00
LFC
de6803d253 feat: handle InsertRequest(formerly InsertExpr) in new Arrow Flight (#800)
feat: handle InsertRequest(formerly InsertExpr) in new Arrow Flight interface
2022-12-30 10:24:09 +08:00
Ruihang Xia
d0ef3aa9eb docs: align Jeremy Clarkson to the right side (#804)
docs: align Jeremy Clarkson to right side

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-12-29 16:55:38 +08:00
LFC
04df80e640 fix: further ease the restriction of executing SQLs in new GRPC interface (#797)
* fix: carry not recordbatch result in FlightData, to allow executing SQLs other than selection in new GRPC interface

* Update src/datanode/src/instance/flight/stream.rs

Co-authored-by: Jiachun Feng <jiachun_feng@proton.me>
2022-12-28 16:43:21 +08:00
fys
76236646ef chore: extract some functions from "bootstrap_meta_srv" function (#795)
refactor: bootstrap of meta
2022-12-28 14:29:52 +08:00
LFC
26848f9f5c feat: Replace SelectResult with FlightData (#776)
* feat: replace SelectResult with FlightData

* Update tests/runner/src/env.rs

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2022-12-28 10:22:46 +08:00
Ruihang Xia
90990584b7 feat: Prom SeriesNormalize plan (#787)
* feat: impl SeriesNormalize plan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* some tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: add metrics

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add license header

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* resolve CR comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* make time index column a parameter

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* precompute time index column index

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* sign the TODO

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-12-27 22:59:53 +08:00
LFC
a14ec94653 fix: ease the restriction of the original "SelectExpr" (#794)
fix: ease the restriction of the original "SelectExpr" since we used to pass SQLs other than selection in the related GRPC interface
2022-12-27 16:50:12 +08:00
Ruihang Xia
26a3e93ca7 chore: util workspace deps in more places (#792)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-12-27 16:26:59 +08:00
elijah
3978931b8e feat: support parsing the RENAME TABLE statements in the parser (#780)
* feat: add parsing `alter rename table` syntax to the parser

* chore: fix clippy

* chore: add test for parser

* fix: add test for parsing RENAME keyword

* chore: remove unused code

* fix: parse table name object

Co-authored-by: Yingwen <realevenyag@gmail.com>

* chore: fmt code

Co-authored-by: Yingwen <realevenyag@gmail.com>
2022-12-27 14:53:40 +08:00
shuiyisong
d589de63ef feat: pub auth_mysql & add auth boxed err (#788)
* chore: minor openup

* chore: open up auth_mysql and return ()

* chore: typo change

* chore: change according to ci

* chore: change according to ci

* chore: remove tonic status in auth error
2022-12-27 11:04:05 +08:00
LFC
7829e4a219 feat: Implement Arrow Flight Service (except gRPC server) for selection (#768)
* feat: Implement Arrow Flight Service (but not the GRPC server) for selection

Co-authored-by: luofucong <luofucong@greptime.com>
2022-12-26 16:41:10 +08:00
Mike Yang
bc9a46dbb7 feat: support varbinary (#767)
feat: support varbinary for table creation and record insertion
2022-12-26 13:14:12 +08:00
Ruihang Xia
a61e96477b docs: RFC of promql (#779)
* docs: RFC of promql

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* docs: change styles, list drawback of misusing arrow

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-12-26 13:12:24 +08:00
Yingwen
f8500e54c1 refactor: Remove PutOperation and Simplify WriteRequest API (#775)
* chore: Remove unused MutationExtra

* refactor(storage): Refactor Mutation and Payload

Change Mutation from enum to a struct that holds op type and record
batches so the encoder don't need to convert the mutation into record
batch. Now The Payload is no more an enum, it just holds the data, to
be serialized to the WAL, of the WriteBatch. The encoder and decoder
now deal with the Payload instead of the WriteBatch, so we could hold
more information not necessary to be stored to the WAL in the
WriteBatch.

This commit also merge variants in write_batch::Error to storage::Error
as some variants of them denote the same error.

* test(storage): Pass all tests in storage

* chore: Remove unused codes then format codes

* test(storage): Fix test_put_unknown_column test

* style(storage): Fix clippy

* chore: Remove some unused codes

* chore: Rebase upstream and fix clippy

* chore(storage): Remove unused codes

* chore(storage): Update comments

* feat: Remove PayloadType from wal.proto

* chore: Address CR comments

* chore: Remove unused write_batch.proto
2022-12-26 13:11:24 +08:00
discord9
e85780b5e4 refactor: rename some mod.rs to <MOD_NAME>.rs (#784)
* refactor: rename `mod.rs` to <MOD_NAME>.rs

* refactor: not rename mod.rs in benches/
2022-12-26 12:48:34 +08:00
Ning Sun
11bdb33d37 feat: sql query interceptor and plugin refactoring (#773)
* feat: let instance hold plugins

* feat: add sql query interceptor definition

* docs: add comments to key apis

* feat: add implementation for pre-parsing and post-parsing

* feat: add post_execute hook

* test: add tests for interceptor

* chore: add license header

* fix: clippy error

* Update src/cmd/src/frontend.rs

Co-authored-by: LFC <bayinamine@gmail.com>

* refactor: batching post_parsing calls

* refactor: rename AnyMap2 to Plugins

* feat: call pre_execute with logical plan empty at the moment

Co-authored-by: LFC <bayinamine@gmail.com>
2022-12-23 15:22:12 +08:00
LFC
1daba75e7b refactor: use "USE" keyword (#785)
Co-authored-by: luofucong <luofucong@greptime.com>
2022-12-23 14:29:47 +08:00
LFC
dc52a51576 chore: upgrade to Arrow 29.0 and use workspace package and dependencies (#782)
* chore: upgrade to Arrow 29.0 and use workspace package and dependencies

* fix: resolve PR comments

Co-authored-by: luofucong <luofucong@greptime.com>
2022-12-23 14:28:37 +08:00
Ruihang Xia
26af9e6214 ci: setup secrets for setup-protoc job (#783)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-12-23 11:36:39 +08:00
fys
e07791c5e8 chore: make election mod public (#781) 2022-12-22 17:32:35 +08:00
Yingwen
b6d29afcd1 ci: Use lld for coverage (#778)
* ci: Use lld for coverage

* style: Fix clippy
2022-12-22 16:10:37 +08:00
LFC
ea9af42091 chore: upgrade Rust to nightly 2022-12-20 (#772)
* chore: upgrade Rust to nightly 2022-12-20

* chore: upgrade Rust to nightly 2022-12-20

Co-authored-by: luofucong <luofucong@greptime.com>
2022-12-21 19:32:30 +08:00
shuiyisong
d0ebcc3b5a chore: open userinfo constructor (#774) 2022-12-21 17:58:43 +08:00
LFC
77182f5024 chore: upgrade Arrow to version 28, and DataFusion to 15 (#771)
Co-authored-by: luofucong <luofucong@greptime.com>
2022-12-21 17:02:11 +08:00
Ning Sun
539ead5460 feat: check database existence on http api (#764)
* feat: check database existance on http api

* Update src/servers/src/http/handler.rs

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* feat: use database not found status code

* test: add assertion for status code

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2022-12-21 10:28:45 +08:00
Ruihang Xia
bc0e4e2cb0 fix: fill NULL based on row_count (#765)
* fix: fill NULL based on row_count

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* simplify code

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: replace set_len with resize

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-12-20 12:12:48 +08:00
Ruihang Xia
7d29670c86 fix: consider null mask in sqlness display util (#763)
* fix: consider null mask in sqlness display util

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add test case

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix test case

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* change placeholder to null

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-12-19 14:20:28 +08:00
LFC
afd88dd53a fix: test_dist_table_scan block (#761)
* fix: `test_dist_table_scan` block

* fix: resolve PR comments

Co-authored-by: luofucong <luofucong@greptime.com>
2022-12-19 11:20:51 +08:00
Ning Sun
efd85df6be feat: add schema check on postgres startup (#758)
* feat: add schema check on postgres startup

* chore: update pgwire to 0.6.3

* test: add test for unspecified db
2022-12-19 10:53:44 +08:00
Ning Sun
ea1896493b feat: allow multiple sql statements in query string (#699)
* feat: allow multiple sql statement in query string

* test: add a test for multiple statement call

* feat: add temprary workaround for standalone mode

* fix: resolve sql parser issue temporarily

* Update src/datanode/src/instance/sql.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* fix: adopt new sql handler

* refactor: revert changes in query engine

* refactor: assume sql-statement 1-1 on datanode

* test: use frontend for integration test

* refactor: add statement execution api for explicit single statement call

* fix: typo

* refactor: rename query method

* test: add test case for error

* test: data type change adoption

* chore: add todo from review

* chore: remove obsolete comments

* fix: resolve resolve issues

Co-authored-by: Yingwen <realevenyag@gmail.com>
2022-12-16 19:50:20 +08:00
Jiachun Feng
66bca11401 refactor: remove optional from the protos (#756) 2022-12-16 15:47:51 +08:00
Yingwen
7c16a4a17b refactor(storage): Move write_batch::codec to a separate file (#757)
* refactor(storage): Move write_batch::codec to a separate file

* chore: move new_test_batch to write_batch mod
2022-12-16 15:32:59 +08:00
dennis zhuang
28bd7404ad feat: change column's default property to nullable (#751)
* feat: change column's default property to nullable

* chore: use all instead of any

* fix: compile error

* fix: dependencies order in cargo
2022-12-16 11:17:01 +08:00
Lei, HUANG
0653301754 feat: replace arrow2 with official implementation 🎉 (#753)
* chore: kick off. change datafusion/arrow/parquet to target version

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore: replace one last datafusion dep

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: arrow_array switch to arrow

* chore: update dep of binary vector

* chore: fix wrong merge commit

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: Switch to datatypes2

* feat: Make recordbatch compile

* chore: sort Cargo.toml

* feat: Fix common::recordbatch compiler errors

* feat: Fix recordbatch test compiling issue

* fix: api crate (#708)

* fix: rename ConcreteDataType::timestamp_millis_type to ConcreteDataType::timestamp_millisecond_type. fix other warnings regarding timestamp

* fix: revert changes in datatypes2

* fix: helper

* chore: delete datatypes based on arrow2

* feat: Fix some compiler errors in common::query (#710)

* feat: Fix some compiler errors in common::query

* feat: test_collect use vectors api

* fix: common-query subcrate (#712)

* fix: record batch adapter

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix error enum

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: Fix common::query compiler errors (#713)

* feat: Move conversion to ScalarValue to value.rs

* fix: Fix common::query compiler errors

This commit also make InnerError pub(crate)

* feat: Implements diff accumulator using WrapperType (#715)

* feat: Remove usage of opaque error from common::recordbatch

* feat: Remove opaque error from common::query

* feat: Fix diff compiler errors

Now common_function just use common_query's Error and Result. Adds
a LargestType associated type to LogicalPrimitiveType to get the largest
type a logical primitive type can cast to.

* feat: Remove LargestType from NativeType trait

* chore: Update comments

* feat: Restrict Scalar::RefType of WrapperType to itself

Add trait bound `for<'a> Scalar<RefType<'a> = Self>` to WrapperType

* chore: Address CR comments

* chore: Format codes

* fix: fix compile error for mean/polyval/pow/interp ops

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Revert "fix: fix compile error for mean/polyval/pow/interp ops"

This reverts commit fb0b4eb826.

* fix: Fix compiler errors in argmax/rate/median/norm_cdf (#716)

* fix: Fix compiler errors in argmax/rate/median/norm_cdf

* chore: Address CR comments

* fix: fix compile error for mean/polyval/pow/interp ops (#717)

* fix: fix compile error for mean/polyval/pow/interp ops

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* simplify type bounds

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: fix argmin/percentile/clip/interp/scipy_stats_norm_pdf errors (#718)

fix: fix argmin/percentile/clip/interp/scipy_stats_norm_pdf compiler errors

* fix: fix other compile error in common-function (#719)

* further fixing

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix all compile errors in common function

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: Fix tests and clippy for common-function subcrate (#726)

* further fixing

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix all compile errors in common function

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* revert test changes

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: row group pruning (#725)

* fix: row group pruning

* chore: use macro to simplify stats implemetation

* fxi: CR comments

* fix: row group metadata length mismatch

* fix: simplify code

* fix: Fix common::grpc compiler errors (#722)

* fix: Fix common::grpc compiler errors

This commit refactors RecordBatch and holds vectors in the RecordBatch
struct, so we don't need to cast the array to vector when doing
serialization or iterating the batch.

Now we use the vector API instead of the arrow API in grpc crate.

* chore: Address CR comments

* fix common record batch

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: Fix compile error in server subcrate (#727)

* fix: Fix compile error in server subcrate

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove unused type alias

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* explicitly panic

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/storage/src/sst/parquet.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>

* fix: Fix common grpc expr (#730)

* fix compile errors

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* rename fn names

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix styles

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix wranings in common-time

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: pre-cast to avoid tremendous match arms (#734)

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: upgrade storage crate to arrow and parquet offcial impl (#738)

* fix: compile erros

* fix: parquet reader and writer

* fix: parquet reader and writer

* fix: WriteBatch IPC encode/decode

* fix: clippy errors in storage subcrate

* chore: remove suspicious unwrap

* fix: some cr comments

* fix: CR comments

* fix: CR comments

* fix: Fix compiler errors in catalog and mito crates (#742)

* fix: Fix compiler errors in mito

* fix: Fix compiler errors in catalog crate

* style: Fix clippy

* chore: Fix use

* Merge pull request #745

* fix nyc-taxi and util

* Merge branch 'replace-arrow2' into fix-others

* fix substrait

* fix warnings and error in test

* fix: Fix imports in optimizer.rs

* fix: errors in optimzer

* fix: remove unwrap

* fix: Fix compiler errors in query crate (#746)

* fix: Fix compiler errors in state.rs

* fix: fix compiler errors in state

* feat: upgrade sqlparser to 0.26

* fix: fix datafusion engine compiler errors

* fix: Fix some tests in query crate

* fix: Fix all warnings in tests

* feat: Remove `Type` from timestamp's type name

* fix: fix query tests

Now datafusion already supports median, so this commit also remove the
median function

* style: Fix clippy

* feat: Remove RecordBatch::pretty_print

* chore: Address CR comments

* Update src/query/src/query_engine/state.rs

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* fix: frontend compile errors (#747)

fix: fix compile errors in frontend

* fix: Fix compiler errors in script crate (#749)

* fix: Fix compiler errors in state.rs

* fix: fix compiler errors in state

* feat: upgrade sqlparser to 0.26

* fix: fix datafusion engine compiler errors

* fix: Fix some tests in query crate

* fix: Fix all warnings in tests

* feat: Remove `Type` from timestamp's type name

* fix: fix query tests

Now datafusion already supports median, so this commit also remove the
median function

* style: Fix clippy

* feat: Remove RecordBatch::pretty_print

* chore: Address CR comments

* feat: Add column_by_name to RecordBatch

* feat: modify select_from_rb

* feat: Fix some compiler errors in vector.rs

* feat: Fix more compiler errors in vector.rs

* fix: fix table.rs

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: Fix compiler errors in coprocessor

* fix: Fix some compiler errors

* fix: Fix compiler errors in script

* chore: Remove unused imports and format code

* test: disable interval tests

* test: Fix test_compile_execute test

* style: Fix clippy

* feat: Support interval

* feat: Add RecordBatch::columns and fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* fix: Fix All The Tests! (#752)

* fix: Fix several tests compile errors

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: some compile errors in tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: compile errors in frontend tests

* fix: compile errors in frontend tests

* test: Fix tests in api and common-query

* test: Fix test in sql crate

* fix: resolve substrait error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore: add more test

* test: Fix tests in servers

* fix instance_test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* test: Fix tests in tests-integration

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Lei, HUANG <mrsatangel@gmail.com>
Co-authored-by: evenyag <realevenyag@gmail.com>

* fix: clippy errors

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: evenyag <realevenyag@gmail.com>
2022-12-15 18:49:12 +08:00
LFC
61d8bc2ea1 refactor(frontend): minor changes around FrontendInstance constructor (#748)
* refactor: minor changes in some testing codes

Co-authored-by: luofucong <luofucong@greptime.com>
2022-12-15 14:34:40 +08:00
Ruihang Xia
e3785fca70 docs: change logo in readme automatically based on github theme (#743)
* docs: adaptive logo on theme

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* switch logos

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* aligh center

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* adjust stylet

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use new logo image

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-12-14 19:32:51 +08:00
shuiyisong
fda9e80cbf feat: impl static_user_provider (#739)
* feat: add MemUserProvider and impl auth

* feat: impl user_provider option in fe and standalone mode

* chore: add file impl for mem provider

* chore: remove mem opts

* chore: minor change

* chore: refac pg server to use user_provider as indicator for using pwd auth

* chore: fix test

* chore: extract common code

* chore: add unit test

* chore: rebase develop

* chore: add user provider to http server

* chore: minor rename

* chore: change to ref when convert to anymap

* chore: fix according to clippy

* chore: remove clone on startcommand

* chore: fix cr issue

* chore: update tempdir use

* chore: change TryFrom to normal func while parsing anymap

* chore: minor change

* chore: remove to_lowercase
2022-12-14 16:38:29 +08:00
Lei, HUANG
756c068166 feat: logstore compaction (#740)
* feat: add benchmark for wal

* add bin

* feat: impl wal compaction

* chore: This reverts commit ef9f2326

* chore: This reverts commit 9142ec0e

* fix: remove empty files

* fix: failing tests

* fix: CR comments

* fix: Mark log as stable after writer applies manifest

* fix: some cr comments and namings

* chore: rename all stable_xxx to obsolete_xxx

* chore: error message
2022-12-14 16:15:29 +08:00
dennis zhuang
6a4e2e5975 feat: promql create and skeleton (#720)
* feat: adds promql crate

* feat: adds promql-parser dependency and rfc doc

* fix: dependencies order in servers crate

* fix: forgot error.rs

* fix: comment

* fix: license header

* fix: remove docs/rfc/20221207_promql.md
2022-12-13 17:08:22 +08:00
Lei, HUANG
9ad6ddb26e fix: remove useless metaclient field from datanode Instance (#744) 2022-12-13 14:26:26 +08:00
fys
c5661ee362 feat: support http basic authentication (#733)
* feat: support http auth

* add some unit test and log

* fix

* cr

* remove unused #[derive(Clone)]
2022-12-13 10:44:33 +08:00
zyy17
9b093463cc feat: add Makefile to aggregate the commands that developers always use (#736)
* feat: add Makefile to aggregate the commands that developers always use

* refactor: add 'clean' and 'unit-test' target

* refactor: add sqlness-test target and modify some decriptions format

Signed-off-by: zyy17 <zyylsxm@gmail.com>
2022-12-12 13:03:49 +08:00
zyy17
61e0f1a11c refactor: add tls option in frontend cli options (#735)
* refactor: add tls option in frontend cli options

* fix: add 'Eq' trait for fixing clippy error

* fix: remove redundant clone

Signed-off-by: zyy17 <zyylsxm@gmail.com>
2022-12-12 10:02:17 +08:00
Ning Sun
249ebc6937 feat: update pgwire and refactor pg auth handler (#732) 2022-12-09 17:01:55 +08:00
elijah
c1b8981f61 refactor(mito): change the table path to schema/table_id (#728)
refactor: change the table path to `schema/table_id`
2022-12-09 12:59:16 +08:00
Jiachun Feng
949cd3e3af feat: move_value & delete_route (#707)
* feat: move_value & delete_route

* chore: minor refactor

* chore: refactor unit test of metaclient

* chore: map to kv

* Update src/meta-srv/src/service/router.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* Update src/meta-srv/src/service/router.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* chore: by code review

Co-authored-by: Yingwen <realevenyag@gmail.com>
2022-12-09 11:07:48 +08:00
SSebo
b26982c5d7 feat: support timestamp new syntax (#697)
* feat: support timestamp new syntax

* fix: not null at end of new time stamp index syntax

* chore: simplify code
2022-12-09 10:52:14 +08:00
fys
4fdf26810c feat: support auth in frontend (#688)
* feat: add UserProvider trait

* chore: minor fix

* support pg mysql

* refactor and add some logs

* chore: add license

Co-authored-by: shuiyisong <xixing.sys@gmail.com>
2022-12-08 11:51:52 +08:00
dennis zhuang
7f59758e69 feat: bump opendal version to 0.22 (#721)
* feat: bump opendal version to 0.22

* fix: LoggingLayer
2022-12-08 11:19:21 +08:00
Zheming Li
a521ab5041 fix: set default value when fail to get git info instead of panic (#696)
fix: set default value when fail to git info instead of panic
2022-12-07 13:16:27 +08:00
LFC
833216d317 refactor: directly invoke Datanode methods in standalone mode (part 1) (#694)
* refactor: directly invoke Datanode methods in standalone mode

* test: add more unit tests

* fix: get rid of `println` in testing codes

* fix: resolve PR comments

* fix: resolve PR comments

Co-authored-by: luofucong <luofucong@greptime.com>
2022-12-07 11:37:59 +08:00
Ruihang Xia
90c832b33d refactor: drop support of physical plan query interface (#714)
* refactor: drop support of physical plan query interface

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* refactor: collapse server/grpc sub-module

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* refactor: remove unused errors

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-12-06 19:23:32 +08:00
LFC
8959dbcef8 feat: Substrait logical plan (#704)
* feat: use Substrait logical plan to query data from Datanode in Frontend in distributed mode

* fix: resolve PR comments

* fix: resolve PR comments

* fix: resolve PR comments

Co-authored-by: luofucong <luofucong@greptime.com>
2022-12-06 19:21:57 +08:00
discord9
2034b40f33 chore: update RustPython dependence(With a tweaked fork) (#655)
* refactor: update RsPy

* depend: add `rustpython-pylib`

* feat: add_frozen stdlib for every vm init

* feat: limit stdlib to a selected few

* chore: use `rev` instead of branch` im depend

* refactor: rename to allow_list

* feat: use opt level one

* doc: add username for TODO&change optimize to 0

* style: fmt .toml
2022-12-06 14:15:00 +08:00
SSebo
55e6be7af1 fix: test_server_require_secure_client_secure (#701) 2022-12-06 10:38:54 +08:00
discord9
f9bfb121db feat: add rate() udf (#508)
* feat: rewrite `rate` UDF

* feat: rename to `prom_rate`

* refactor: solve conflict&add license

* refactor: import arrow
2022-12-06 10:30:13 +08:00
Ruihang Xia
6fb413ae50 ci: add toml format linter (#706)
* chore: run taplo format

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* ci: add workflow to check toml

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* rerun formatter with ident to 4 spaces

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update check command

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-12-05 20:03:10 +08:00
Ruihang Xia
beb07fc895 feat: new datatypes subcrate based on the official arrow (#705)
* feat: Init datatypes2 crate

* chore: Remove some unimplemented types

* feat: Implements PrimitiveType and PrimitiveVector for datatypes2 (#633)

* feat: Implement primitive types and vectors

* feat: Implement a wrapper type

* feat: Remove VectorType from ScalarRef

* feat: Move some trait bound from NativeType to WrapperType

* feat: pub use  primitive vectors and builders

* feat: Returns error in try_from when type mismatch

* feat: Impl PartialEq for some vectors

* test: Pass vector tests

* chore: Add license header

* test: Pass more vector tests

* feat: Implement some methods of vector Helper

* test: Pass more tests

* style: Fix clippy

* chore: Add license header

* feat: Remove IntoValueRef trait

* feat: Add NativeType trait bound to WrapperType::Native

* docs: Explain what is wrapper type

* chore: Fix typos

* refactor: LogicalPrimitiveType::type_name returns str

* feat: Implements DateType and DateVector (#651)

* feat: Implement DateType and DateVector

* test: Pass more value and data type tests

* chore: Address CR comments

* test: Skip list value test

* feat: datatypes2 datetime (#661)

* feat: impl DateTime type and vector

* fix: add license header

* fix: CR comments and add more tests

* fix: customized serialization for wrapper type

* feat: Implements NullType and NullVector (#658)

* feat: Implements NullType and NullVector

* chore: Address CR comment

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* chore: Address CR comment

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* feat: Implements StringType and StringVector (#659)

* feat: implement string vector

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add more test and from

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* cover NUL

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: impl datatypes2/timestamp (#686)

* feat: add timestamp datatype and vectors

* fix: cr comments and reformat code

* chore: add some tests

* feat: Implements ListType and ListVector (#681)

* feat: Implement ListType and ListVector

* test: Pass more tests

* style: Fix clippy

* chore: Fix comment

* chore: Address CR comments

* feat: impl constant vector (#680)

* feat: impl constant vector

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

* rename fn names

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove println

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>

* feat: Implements Validity (#684)

* feat: Implements Validity

* chore: remove pub from sub mod in vectors

* feat: Implements schema for datatypes2 (#695)

* feat: Add is_timestamp_compatible to DataType

* feat: Implement ColumnSchema and Schema

* feat: Impl RawSchema

* chore: Remove useless codes and run more tests

* chore: Fix clippy

* feat: Impl from_arrow_time_unit and pass schema tests

* chore: add more tests for timestamp (#702)

* chore: add more tests for timestamp

* chore: add replicate test for timestamps

* feat: Implements helper methods for vectors/values (#703)

* feat: Implement helper methods for vectors/values

* chore: Address CR comments

* chore: add more test for timestamp

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: evenyag <realevenyag@gmail.com>
Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
Co-authored-by: Lei, HUANG <mrsatangel@gmail.com>
2022-12-05 19:59:23 +08:00
Ning Sun
4275e47bdb refactor: use updated mysql_async client (#698) 2022-12-05 11:18:32 +08:00
dennis zhuang
6720bc5f7c fix: validate create table request in mito engine (#690)
* fix: validate create table request in mito engine

* fix: comment

* chore: remove TIMESTAMP_INDEX in system.rs
2022-12-05 11:01:43 +08:00
Lei, HUANG
4052563248 fix: pr template task default state (#687) 2022-12-02 20:39:53 +08:00
dennis zhuang
952e1bd626 test: update dummy result (#693) 2022-12-02 19:22:37 +08:00
shuiyisong
8232015998 fix: cargo sort in pre-commit (#689) 2022-12-02 16:19:31 +08:00
Ruihang Xia
d82a3a7d58 feat: implement most of scalar function and selection conversion in substrait (#678)
* impl to_df_scalar_function

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* part of scalar functions

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* conjunction over filters

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* change the ser/de target to substrait::Plan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* basic test coverage

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typos and license header

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix CR comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* logs unsupported extension

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/common/substrait/src/df_expr.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* address review comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* change format

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* replace context with with_context

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2022-12-02 14:46:05 +08:00
Ning Sun
0599465685 feat: inject current database/schema into query context for postgres protocol (#685)
* feat: inject current database/schema into query context

* test: avoid duplicate server setup
2022-12-02 11:49:39 +08:00
Mofeng
13d51250ba feat: add http /health api (#676)
* feat: add http `/health` api

* feat: add `/health` api test suit in http intergration test
2022-12-01 19:11:58 +08:00
LFC
6127706b5b feat: support "use" stmt part 1 (#672)
* feat: a bare sketch of session; support "use" in MySQL server; modify insertion and selection related codes in Datanode
2022-12-01 17:05:32 +08:00
dennis zhuang
2e17e9c4b5 feat: supports s3 storage (#656)
* feat: adds s3 object storage configuration

* feat: adds s3 integration test

* chore: use map

* fix: forgot license header

* fix: checking if bucket is empty in test_on

* chore: address CR issues

* refactor: run s3 test with dotenv

* chore: randomize grpc port for test

* fix: README in tests-integration

* chore: remove redundant comments
2022-12-01 10:59:14 +08:00
xiaomin tang
b0cbfa7ffb docs: add a roadmap link in README (#673)
* docs: add roadmap to README

* docs: missing period
2022-11-30 21:25:27 +08:00
Ruihang Xia
20172338e8 ci: Revert "ci: change CI unit test trigger" (#674)
Revert "ci: change CI unit test trigger (#671)"

This reverts commit 9c53f9b24c.
2022-11-30 21:23:40 +08:00
Ruihang Xia
9c53f9b24c ci: change CI unit test trigger (#671)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-11-30 20:19:35 +08:00
Dongxu Wang
6d24f7ebb6 refactor: bump axum 0.6, use recommended way to nest routes (#668) 2022-11-30 20:04:33 +08:00
SSebo
68c2de8e45 feat: mysql and pg server support tls (#641)
* feat: mysql and pg server support tls

* chore: replace opensrv-mysql to original

* chore: TlsOption is required but supply default value

* feat: mysql server support force tls

* chore: move TlsOption to servers

* test: mysql server disable / prefer / required tls mode

* test: pg server disable / prefer / required tls mode

* chore: add doc and remove no used code

* chore: add TODO and restore cargo linker config
2022-11-30 12:46:15 +08:00
Yingwen
a17dcbc511 chore: fix SequenceNotMonotonic error message (#664)
* chore: fix SequenceNotMonotonic error message

previous sequence should greater than or equal to given sequence

* Apply suggestions from code review

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2022-11-30 11:58:43 +08:00
Ning Sun
53ab19ea5a ci: remove assignees which is causing error (#663) 2022-11-30 11:36:35 +08:00
Ning Sun
84c44cf540 ci: fix doc label task on forked repo (#654) 2022-11-30 11:23:15 +08:00
LFC
020b9936cd fix: correctly detach spawned mysql listener task (#657)
Co-authored-by: luofucong <luofucong@greptime.com>
2022-11-29 18:39:18 +08:00
Ning Sun
75dcf2467b refactor: add tests-integration module (#590)
* refactor: add integration-tests module

* Apply suggestions from code review

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>

* test: move grpc module to tests-integration

* test: adapt new standalone mode

* test: improve http assertion

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2022-11-29 16:28:58 +08:00
Ruihang Xia
eea5393f96 feat: UI improvement for integration test runner (#645)
* improve dir resolving and start up ordering

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix orphan process

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update tests/runner/src/util.rs, fix typo

Co-authored-by: Dongxu Wang <dongxu@apache.org>

* simplify logic via tokio timeout

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Dongxu Wang <dongxu@apache.org>
2022-11-29 15:32:39 +08:00
Ning Sun
3d312d389d ci: add doc label support for pr too (#650) 2022-11-29 15:21:12 +08:00
dennis zhuang
fdc73fb52f perf: cache python interpreter in TLS (#649)
* perf: cache python interpreter when executing coprocessors

* test: speedup test_execute_script by reusing interpreter

* fix: remove comment

* chore: use get_or_insert_with instead
2022-11-29 14:41:37 +08:00
Ning Sun
2a36e26d19 ci: add action to create doc issue when change labelled (#648)
ci: add action to create doc issue when change labeled
2022-11-29 14:25:57 +08:00
Zheming Li
baef640fe3 feat: add --version command line option (#632)
* add version command line option

* use concat!

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2022-11-28 17:07:17 +08:00
dennis zhuang
5fddb799f7 feat: enable atomic write for file object storage (#643)
* fix: remove opendal from catalog dependencies

* feat: enable atomic writing for fs service
2022-11-28 16:01:32 +08:00
Dongxu Wang
f372229b18 fix: append table id to table data dir (#640) 2022-11-28 10:53:13 +08:00
Xuanwo
4085fc7899 chore: Bump OpenDAL to v0.21.1 (#639)
* deps: Bump OpenDAL to v0.21.1

Signed-off-by: Xuanwo <github@xuanwo.io>

* Avoid using raw types when not needed

Signed-off-by: Xuanwo <github@xuanwo.io>

Signed-off-by: Xuanwo <github@xuanwo.io>
2022-11-27 10:18:39 +08:00
Ruihang Xia
30940e692a feat: impl DROP TABLE on memory catalog based standalone mode (#630)
* feat: implement drop table for standalone mode

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update integration test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* enhancement test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-11-25 11:53:46 +08:00
Mike Yang
b371ce0f48 test: added tests for statements methods (#622)
* test: added tests for parse_column_default_constraint

* test: added test for sql_column_def_to_grpc_column_def

* refactor: remove hardcode in test
2022-11-25 11:35:06 +08:00
Lei, HUANG
ac7f52d303 fix: start datanode instance before frontend services (#634) 2022-11-25 11:25:57 +08:00
Dongxu Wang
051768b735 ci: add spell check with typos (#627) 2022-11-24 14:46:50 +08:00
fys
c5b0d2431f feat: remove InsertBatch in gRPC message (#570) 2022-11-24 14:04:48 +08:00
Lei, HUANG
4038dd4067 fix: add concurrency control for catalog manager (#619) 2022-11-24 11:10:33 +08:00
Dongxu Wang
8be0f05570 chore: able to config axum timeout in toml (#624) 2022-11-24 11:09:21 +08:00
zyy17
69f06eec8b ci: change scheduled release from nigthly to weekly (#623)
Signed-off-by: zyy17 <zyylsxm@gmail.com>
2022-11-24 11:05:35 +08:00
Ruihang Xia
7b37e99a45 feat: deregister table for MemoryCatalogManager (#620)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-11-24 09:36:27 +08:00
dennis zhuang
c09775d17f feat: adds metrics, tracing and retry layer to object-store (#621) 2022-11-23 11:40:03 +08:00
Francis Du
4a9cf49637 feat: support explain syntax (#546) 2022-11-22 21:22:32 +08:00
Ruihang Xia
9f865b50ab test: add dummy select case (#618)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-11-22 16:47:45 +08:00
Ruihang Xia
b407ebf6bb feat: integration test suite (#487)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-11-22 15:34:13 +08:00
Lei, HUANG
c144a1b20e feat: impl alter table in distributed mode (#572) 2022-11-22 15:17:25 +08:00
Yingwen
0791c65149 refactor: replace some usage of MutableBitmap by BitVec (#610) 2022-11-21 17:36:53 +08:00
LFC
62fcb54258 fix: correctly open table when distributed datanode restart (#576)
Co-authored-by: luofucong <luofucong@greptime.com>
2022-11-21 15:15:14 +08:00
Lei, HUANG
2b6b979d5a fix: remove datanode mysql options in standalone mode (#595) 2022-11-21 14:15:47 +08:00
Dongxu Wang
b6fa316c65 chore: correct typos (#589) (#592) 2022-11-21 14:07:45 +08:00
Lei, HUANG
ca5734edb3 feat: disable mysql server on datande when running standalone mode (#593) 2022-11-21 12:12:26 +08:00
Mike Yang
5428ad364e fix: make nullable as default when alter table (#591) 2022-11-21 12:11:19 +08:00
zyy17
663c725838 fix: fix nightly build error and fix typo (#588)
Signed-off-by: zyy17 <zyylsxm@gmail.com>
2022-11-21 11:49:36 +08:00
zyy17
c94b544e4a ci: modify image registry in release.yml (#582)
Signed-off-by: zyy17 <zyylsxm@gmail.com>
2022-11-19 09:19:54 +08:00
Ruihang Xia
f465040acc feat: lazy evaluated record batch stream (#573)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2022-11-18 21:42:10 +08:00
Yingwen
22ae983280 refactor: Use re-exported arrow mod from datatypes crate (#571) 2022-11-18 18:38:07 +08:00
Igor Morozov
e1f326295f feat: implement DESCRIBE TABLE (#558)
Also need to support describe table in other catalog/schema
2022-11-18 16:34:00 +08:00
aievl
6d762aa9dc feat: update mysql default listen port to 4406 (#568)
Co-authored-by: zhaozhenhang <zhaozhenhang@kuaishou.com>
2022-11-18 14:55:11 +08:00
Ruihang Xia
d4b09f69ab docs: specify protoc version requirement (#564)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2022-11-18 14:36:25 +08:00
Xuanwo
1f0b39cc8d chore: Bump OpenDAL to v0.20 (#569)
Signed-off-by: Xuanwo <github@xuanwo.io>
2022-11-18 14:17:38 +08:00
zyy17
dee5ccec9e ci: add nightly build job (#565) 2022-11-18 11:48:29 +08:00
dennis zhuang
f8788273d5 feat: drop column for alter table (#562)
* feat: drop column for alter table

* refactor: rename RemoveColumns to DropColumns

* test: alter table

* chore: error msg

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* fix: test_parse_alter_drop_column

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2022-11-17 23:00:16 +08:00
jay
df465308cc current blog url response as 404, should be https://greptime.com/blogs/index (#561) 2022-11-17 21:24:04 +08:00
LFC
e7b4d2b9cd feat: Implement table_info() for DistTable (#536) (#557)
* feat: Implement `table_info()`` for `DistTable` (#536)

* Update src/catalog/src/error.rs

Co-authored-by: Yingwen <1405012107@qq.com>

Co-authored-by: luofucong <luofucong@greptime.com>
Co-authored-by: Yingwen <1405012107@qq.com>
2022-11-17 18:40:58 +08:00
discord9
bf408e3b96 Update README.md (#552)
Add RustPython's Acknowledgement
2022-11-17 14:15:43 +08:00
dennis zhuang
73e6e2e01b fix: split code and output in README (#549) 2022-11-17 12:54:02 +08:00
Lei, Huang
8faa6b0f09 refactor: start options (#545)
* refactor: config options for frontend/datanode/standalone

* chore: rename MetaClientOpts::metasrv_addr to MetaClientOpts::metasrv_addrs

* fix: clippy

* fix: change default meta-srv addr to 127.0.0.1:3002
2022-11-17 11:47:39 +08:00
872 changed files with 74673 additions and 29251 deletions

View File

@@ -1,2 +1,5 @@
[target.aarch64-unknown-linux-gnu]
linker = "aarch64-linux-gnu-gcc"
[alias]
sqlness = "run --bin sqlness-runner --"

10
.env.example Normal file
View File

@@ -0,0 +1,10 @@
# Settings for s3 test
GT_S3_BUCKET=S3 bucket
GT_S3_ACCESS_KEY_ID=S3 access key id
GT_S3_ACCESS_KEY=S3 secret access key
# Settings for oss test
GT_OSS_BUCKET=OSS bucket
GT_OSS_ACCESS_KEY_ID=OSS access key id
GT_OSS_ACCESS_KEY=OSS access key
GT_OSS_ENDPOINT=OSS endpoint

View File

@@ -0,0 +1,13 @@
{
"LABEL": {
"name": "breaking change",
"color": "D93F0B"
},
"CHECKS": {
"regexp": "^(?:(?!!:).)*$",
"ignoreLabels": [
"ignore-title"
],
"alwaysPassCI": true
}
}

View File

@@ -1,10 +1,12 @@
{
"LABEL": {
"name": "Invalid PR Title",
"color": "B60205"
},
"CHECKS": {
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?:.*",
"ignoreLabels" : ["ignore-title"]
}
"LABEL": {
"name": "Invalid PR Title",
"color": "B60205"
},
"CHECKS": {
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?\\!?:.*",
"ignoreLabels": [
"ignore-title"
]
}
}

View File

@@ -13,7 +13,7 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
## Checklist
- [] I have written the necessary rustdoc comments.
- [] I have added the necessary unit tests and integration tests.
- [ ] I have written the necessary rustdoc comments.
- [ ] I have added the necessary unit tests and integration tests.
## Refer to a related PR or issue link (optional)

42
.github/workflows/apidoc.yml vendored Normal file
View File

@@ -0,0 +1,42 @@
on:
push:
branches:
- develop
paths-ignore:
- 'docs/**'
- 'config/**'
- '**.md'
- '.dockerignore'
- 'docker/**'
- '.gitignore'
name: Build API docs
env:
RUST_TOOLCHAIN: nightly-2023-02-26
jobs:
apidoc:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- run: cargo doc --workspace --no-deps --document-private-items
- run: |
cat <<EOF > target/doc/index.html
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="refresh" content="0; url='greptime/'" />
</head>
<body></body></html>
EOF
- name: Publish dist directory
uses: JamesIves/github-pages-deploy-action@v4
with:
folder: target/doc

View File

@@ -1,64 +0,0 @@
on:
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
paths-ignore:
- 'docs/**'
- 'config/**'
- '**.md'
- '.dockerignore'
- 'docker/**'
- '.gitignore'
push:
branches:
- "main"
- "develop"
paths-ignore:
- 'docs/**'
- 'config/**'
- '**.md'
- '.dockerignore'
- 'docker/**'
- '.gitignore'
workflow_dispatch:
name: Code coverage
env:
RUST_TOOLCHAIN: nightly-2022-07-14
jobs:
coverage:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest-8-cores
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
- name: Install toolchain
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: llvm-tools-preview
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
- name: Collect coverage data
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info
env:
RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
UNITTEST_LOG_DIR: "__unittest_logs"
- name: Codecov upload
uses: codecov/codecov-action@v2
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./lcov.info
flags: rust
fail_ci_if_error: true
verbose: true

View File

@@ -7,6 +7,7 @@ on:
- '**.md'
- '.dockerignore'
- 'docker/**'
- '.gitignore'
push:
branches:
- develop
@@ -23,9 +24,16 @@ on:
name: CI
env:
RUST_TOOLCHAIN: nightly-2022-07-14
RUST_TOOLCHAIN: nightly-2023-02-26
jobs:
typos:
name: Spell Check with Typos
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: crate-ci/typos@v1.13.10
check:
name: Check
if: github.event.pull_request.draft == false
@@ -34,6 +42,8 @@ jobs:
steps:
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
@@ -42,6 +52,23 @@ jobs:
- name: Run cargo check
run: cargo check --workspace --all-targets
toml:
name: Toml Check
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Install taplo
run: cargo install taplo-cli --version ^0.8 --locked
- name: Run taplo
run: taplo format --check --option "indent_string= "
# Use coverage to run test.
# test:
# name: Test Suite
@@ -57,6 +84,8 @@ jobs:
# path: ./llvm
# key: llvm
# - uses: arduino/setup-protoc@v1
# with:
# repo-token: ${{ secrets.GITHUB_TOKEN }}
# - uses: KyleMayes/install-llvm-action@v1
# with:
# version: "14.0"
@@ -82,6 +111,42 @@ jobs:
# GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
# UNITTEST_LOG_DIR: "__unittest_logs"
sqlness:
name: Sqlness Test
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest-8-cores
timeout-minutes: 60
needs: [clippy]
steps:
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Run etcd
run: |
ETCD_VER=v3.5.7
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
mkdir -p /tmp/etcd-download
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
nohup etcd >/tmp/etcd.log 2>&1 &
- name: Run sqlness
run: cargo sqlness && ls /tmp
- name: Upload sqlness logs
uses: actions/upload-artifact@v3
with:
name: sqlness-logs
path: /tmp/greptime-*.log
retention-days: 3
fmt:
name: Rustfmt
if: github.event.pull_request.draft == false
@@ -90,6 +155,8 @@ jobs:
steps:
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
@@ -107,6 +174,8 @@ jobs:
steps:
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
@@ -115,3 +184,52 @@ jobs:
uses: Swatinem/rust-cache@v2
- name: Run cargo clippy
run: cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr
coverage:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest-8-cores
timeout-minutes: 60
needs: [clippy]
steps:
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: KyleMayes/install-llvm-action@v1
with:
version: "14.0"
- name: Install toolchain
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: llvm-tools-preview
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Install Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install PyArrow Package
run: pip install pyarrow
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
- name: Collect coverage data
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend
env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
UNITTEST_LOG_DIR: "__unittest_logs"
- name: Codecov upload
uses: codecov/codecov-action@v2
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./lcov.info
flags: rust
fail_ci_if_error: false
verbose: true

39
.github/workflows/doc-issue.yml vendored Normal file
View File

@@ -0,0 +1,39 @@
name: Create Issue in downstream repos
on:
issues:
types:
- labeled
pull_request_target:
types:
- labeled
jobs:
doc_issue:
if: github.event.label.name == 'doc update required'
runs-on: ubuntu-latest
steps:
- name: create an issue in doc repo
uses: dacbd/create-issue-action@main
with:
owner: GreptimeTeam
repo: docs
token: ${{ secrets.DOCS_REPO_TOKEN }}
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
body: |
A document change request is generated from
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
cloud_issue:
if: github.event.label.name == 'cloud followup required'
runs-on: ubuntu-latest
steps:
- name: create an issue in cloud repo
uses: dacbd/create-issue-action@main
with:
owner: GreptimeTeam
repo: greptimedb-cloud
token: ${{ secrets.DOCS_REPO_TOKEN }}
title: Followup changes in ${{ github.event.issue.title || github.event.pull_request.title }}
body: |
A followup request is generated from
${{ github.event.issue.html_url || github.event.pull_request.html_url }}

55
.github/workflows/docs.yml vendored Normal file
View File

@@ -0,0 +1,55 @@
on:
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
paths:
- 'docs/**'
- 'config/**'
- '**.md'
- '.dockerignore'
- 'docker/**'
- '.gitignore'
push:
branches:
- develop
- main
paths:
- 'docs/**'
- 'config/**'
- '**.md'
- '.dockerignore'
- 'docker/**'
- '.gitignore'
workflow_dispatch:
name: CI
# To pass the required status check, see:
# https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/defining-the-mergeability-of-pull-requests/troubleshooting-required-status-checks#handling-skipped-but-required-checks
jobs:
check:
name: Check
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- run: 'echo "No action required"'
fmt:
name: Rustfmt
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- run: 'echo "No action required"'
clippy:
name: Clippy
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- run: 'echo "No action required"'
coverage:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- run: 'echo "No action required"'

View File

@@ -13,4 +13,4 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: Check License Header
uses: apache/skywalking-eyes/header@main
uses: apache/skywalking-eyes/header@df70871af1a8109c9a5b1dc824faaf65246c5236

View File

@@ -18,3 +18,12 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
pass_on_octokit_error: false
configuration_path: ".github/pr-title-checker-config.json"
breaking:
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- uses: thehanimo/pr-title-checker@v1.3.4
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
pass_on_octokit_error: false
configuration_path: ".github/pr-title-breaking-change-label-config.json"

View File

@@ -2,12 +2,23 @@ on:
push:
tags:
- "v*.*.*"
schedule:
# At 00:00 on Monday.
- cron: '0 0 * * 1'
workflow_dispatch:
name: Release
env:
RUST_TOOLCHAIN: nightly-2022-07-14
RUST_TOOLCHAIN: nightly-2023-02-26
# FIXME(zyy17): Would be better to use `gh release list -L 1 | cut -f 3` to get the latest release version tag, but for a long time, we will stay at 'v0.1.0-alpha-*'.
SCHEDULED_BUILD_VERSION_PREFIX: v0.1.0-alpha
# In the future, we can change SCHEDULED_PERIOD to nightly.
SCHEDULED_PERIOD: weekly
CARGO_PROFILE: weekly
jobs:
build:
@@ -17,18 +28,24 @@ jobs:
# The file format is greptime-<os>-<arch>
include:
- arch: x86_64-unknown-linux-gnu
os: ubuntu-latest-16-cores
os: ubuntu-2004-16-cores
file: greptime-linux-amd64
continue-on-error: false
- arch: aarch64-unknown-linux-gnu
os: ubuntu-latest-16-cores
os: ubuntu-2004-16-cores
file: greptime-linux-arm64
continue-on-error: true
- arch: aarch64-apple-darwin
os: macos-latest
file: greptime-darwin-arm64
continue-on-error: true
- arch: x86_64-apple-darwin
os: macos-latest
file: greptime-darwin-amd64
continue-on-error: true
runs-on: ${{ matrix.os }}
continue-on-error: ${{ matrix.continue-on-error }}
if: github.repository == 'GreptimeTeam/greptimedb'
steps:
- name: Checkout sources
uses: actions/checkout@v3
@@ -58,6 +75,25 @@ jobs:
run: |
brew install protobuf
- name: Install etcd for linux
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
run: |
ETCD_VER=v3.5.7
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
mkdir -p /tmp/etcd-download
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
nohup etcd >/tmp/etcd.log 2>&1 &
- name: Install etcd for macos
if: contains(matrix.arch, 'darwin')
run: |
brew install etcd
brew services start etcd
- name: Install dependencies for linux
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
run: |
@@ -73,13 +109,16 @@ jobs:
- name: Output package versions
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
- name: Run tests
run: make unit-test integration-test sqlness-test
- name: Run cargo build
run: cargo build ${{ matrix.opts }} --release --locked --target ${{ matrix.arch }}
run: cargo build ${{ matrix.opts }} --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }}
- name: Calculate checksum and rename binary
shell: bash
run: |
cd target/${{ matrix.arch }}/release
cd target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}
chmod +x greptime
tar -zcvf ${{ matrix.file }}.tgz greptime
echo $(shasum -a 256 ${{ matrix.file }}.tgz | cut -f1 -d' ') > ${{ matrix.file }}.sha256sum
@@ -88,17 +127,18 @@ jobs:
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.file }}
path: target/${{ matrix.arch }}/release/${{ matrix.file }}.tgz
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.tgz
- name: Upload checksum of artifacts
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.file }}.sha256sum
path: target/${{ matrix.arch }}/release/${{ matrix.file }}.sha256sum
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
release:
name: Release artifacts
needs: [build]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb'
steps:
- name: Checkout sources
uses: actions/checkout@v3
@@ -106,8 +146,32 @@ jobs:
- name: Download artifacts
uses: actions/download-artifact@v3
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}, like v0.1.0-alpha-20221119-weekly.
shell: bash
if: github.event_name == 'schedule'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
echo "SCHEDULED_BUILD_VERSION=${SCHEDULED_BUILD_VERSION}" >> $GITHUB_ENV
- name: Create scheduled build git tag
if: github.event_name == 'schedule'
run: |
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
- name: Publish scheduled release # configure the different release title and tags.
uses: softprops/action-gh-release@v1
if: github.event_name == 'schedule'
with:
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
tag_name: ${{ env.SCHEDULED_BUILD_VERSION }}
generate_release_notes: true
files: |
**/greptime-*
- name: Publish release
uses: softprops/action-gh-release@v1
if: github.event_name != 'schedule'
with:
name: "Release ${{ github.ref_name }}"
files: |
@@ -117,10 +181,45 @@ jobs:
name: Build docker image
needs: [build]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb'
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Login to UCloud Container Registry
uses: docker/login-action@v2
with:
registry: uhub.service.ucloud.cn
username: ${{ secrets.UCLOUD_USERNAME }}
password: ${{ secrets.UCLOUD_PASSWORD }}
- name: Login to Dockerhub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
if: github.event_name == 'schedule'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash
if: github.event_name != 'schedule'
run: |
VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up buildx
uses: docker/setup-buildx-action@v2
- name: Download amd64 binary
uses: actions/download-artifact@v3
with:
@@ -128,50 +227,30 @@ jobs:
path: amd64
- name: Unzip the amd64 artifacts
id: unzip-amd64
run: |
cd amd64
tar xvf greptime-linux-amd64.tgz
rm greptime-linux-amd64.tgz
- name: Download arm64 binary
id: download-arm64
uses: actions/download-artifact@v3
with:
name: greptime-linux-arm64
path: arm64
- name: Unzip the arm64 artifacts
id: unzip-arm64
if: success() || steps.download-arm64.conclusion == 'success'
run: |
cd arm64
tar xvf greptime-linux-arm64.tgz
rm greptime-linux-arm64.tgz
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Dockerhub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash
run: |
VERSION=${{ github.ref_name }}
echo "VERSION=${VERSION:1}" >> $GITHUB_ENV
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up buildx
uses: docker/setup-buildx-action@v2
- name: Build and push
- name: Build and push all
uses: docker/build-push-action@v3
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
with:
context: .
file: ./docker/ci/Dockerfile
@@ -179,5 +258,20 @@ jobs:
platforms: linux/amd64,linux/arm64
tags: |
greptime/greptimedb:latest
greptime/greptimedb:${{ env.VERSION }}
ghcr.io/greptimeteam/greptimedb:${{ env.VERSION }}
greptime/greptimedb:${{ env.IMAGE_TAG }}
uhub.service.ucloud.cn/greptime/greptimedb:latest
uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }}
- name: Build and push amd64 only
uses: docker/build-push-action@v3
if: success() || steps.unzip-arm64.conclusion == 'failure' # Only build and push amd64 platform if unzip-arm64 fails
with:
context: .
file: ./docker/ci/Dockerfile
push: true
platforms: linux/amd64
tags: |
greptime/greptimedb:latest
greptime/greptimedb:${{ env.IMAGE_TAG }}
uhub.service.ucloud.cn/greptime/greptimedb:latest
uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }}

4
.gitignore vendored
View File

@@ -18,6 +18,7 @@ debug/
# JetBrains IDE config directory
.idea/
*.iml
# VSCode IDE config directory
.vscode/
@@ -31,3 +32,6 @@ logs/
# Benchmark dataset
benchmarks/data
# dotenv
.env

View File

@@ -5,11 +5,11 @@ repos:
- id: conventional-pre-commit
stages: [commit-msg]
- repo: https://github.com/DevinR528/cargo-sort
rev: e6a795bc6b2c0958f9ef52af4863bbd7cc17238f
hooks:
- id: cargo-sort
args: ["--workspace", "--print"]
# - repo: https://github.com/DevinR528/cargo-sort
# rev: e6a795bc6b2c0958f9ef52af4863bbd7cc17238f
# hooks:
# - id: cargo-sort
# args: ["--workspace"]
- repo: https://github.com/doublify/pre-commit-rust
rev: v1.0

View File

@@ -1,4 +1,4 @@
# Welcome!
# Welcome 👋
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
@@ -50,34 +50,33 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
- Make sure all unit tests are passed.
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr`).
#### `pre-commit` Hooks
You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run these checks on every commit automatically.
1. Install `pre-commit`
```
$ pip install pre-commit
```
or
```
$ brew install pre-commit
```
pip install pre-commit
or
brew install pre-commit
2. Install the `pre-commit` hooks
```
$ pre-commit install
pre-commit installed at .git/hooks/pre-commit
$ pre-commit install --hook-type commit-msg
pre-commit installed at .git/hooks/commit-msg
$ pre-commit install
pre-commit installed at .git/hooks/pre-commit
$ pre-commit install --hook-type pre-push
pre-commit installed at .git/hooks/pre-pus
```
$ pre-commit install --hook-type commit-msg
pre-commit installed at .git/hooks/commit-msg
now `pre-commit` will run automatically on `git commit`.
$ pre-commit install --hook-type pre-push
pre-commit installed at .git/hooks/pre-push
Now, `pre-commit` will run automatically on `git commit`.
### Title
@@ -102,10 +101,12 @@ of what you were trying to do and what went wrong. You can also reach for help i
## Community
The core team will be thrilled if you participate in any way you like. When you are stuck, try ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
- [GreptimeDB Community Slack](https://greptime.com/slack)
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
Also, see some extra GreptimeDB content:
- [GreptimeDB Docs](https://greptime.com/docs)
- [Learn GreptimeDB](https://greptime.com/products/db)
- [Greptime Inc. Website](https://greptime.com)

4410
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -11,12 +11,15 @@ members = [
"src/common/function",
"src/common/function-macro",
"src/common/grpc",
"src/common/grpc-expr",
"src/common/mem-prof",
"src/common/procedure",
"src/common/query",
"src/common/recordbatch",
"src/common/runtime",
"src/common/substrait",
"src/common/insert",
"src/common/telemetry",
"src/common/test-util",
"src/common/time",
"src/datanode",
"src/datatypes",
@@ -24,16 +27,63 @@ members = [
"src/log-store",
"src/meta-client",
"src/meta-srv",
"src/mito",
"src/object-store",
"src/partition",
"src/promql",
"src/query",
"src/script",
"src/servers",
"src/session",
"src/sql",
"src/storage",
"src/store-api",
"src/table",
"src/mito",
"src/table-procedure",
"tests-integration",
"tests/runner",
]
[workspace.package]
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
[workspace.dependencies]
arrow = { version = "33.0", features = ["pyarrow"] }
arrow-array = "33.0"
arrow-flight = "33.0"
arrow-schema = { version = "33.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
chrono = { version = "0.4", features = ["serde"] }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
futures = "0.3"
futures-util = "0.3"
parquet = "33.0"
paste = "1.0"
prost = "0.11"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
sqlparser = "0.30"
tempfile = "3"
tokio = { version = "1.24.2", features = ["full"] }
tokio-util = "0.7"
tonic = { version = "0.8", features = ["tls"] }
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
[profile.release]
debug = true
[profile.weekly]
inherits = "release"
strip = true
lto = "thin"
debug = false
incremental = false

71
Makefile Normal file
View File

@@ -0,0 +1,71 @@
IMAGE_REGISTRY ?= greptimedb
IMAGE_TAG ?= latest
##@ Build
.PHONY: build
build: ## Build debug version greptime.
cargo build
.PHONY: release
release: ## Build release version greptime.
cargo build --release
.PHONY: clean
clean: ## Clean the project.
cargo clean
.PHONY: fmt
fmt: ## Format all the Rust code.
cargo fmt --all
.PHONY: fmt-toml
fmt-toml: ## Format all TOML files.
taplo format --check --option "indent_string= "
.PHONY: docker-image
docker-image: ## Build docker image.
docker build --network host -f docker/Dockerfile -t ${IMAGE_REGISTRY}:${IMAGE_TAG} .
##@ Test
.PHONY: unit-test
unit-test: ## Run unit test.
cargo test --workspace
.PHONY: integration-test
integration-test: ## Run integation test.
cargo test integration
.PHONY: sqlness-test
sqlness-test: ## Run sqlness test.
cargo sqlness
.PHONY: check
check: ## Cargo check all the targets.
cargo check --workspace --all-targets
.PHONY: clippy
clippy: ## Check clippy rules.
cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr
.PHONY: fmt-check
fmt-check: ## Check code format.
cargo fmt --all -- --check
##@ General
# The help target prints out all targets with their descriptions organized
# beneath their categories. The categories are represented by '##@' and the
# target descriptions by '##'. The awk commands is responsible for reading the
# entire set of makefiles included in this invocation, looking for lines of the
# file as xyz: ## something, and then pretty-format the target and help. Then,
# if there's a line with ##@ something, that gets pretty-printed as a category.
# More info on the usage of ANSI control characters for terminal formatting:
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
# More info on the awk command:
# https://linuxcommand.org/lc3_adv_awk.php
.PHONY: help
help: ## Display help messages.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)

View File

@@ -1,7 +1,12 @@
<p align="center">
<img src="/docs/logo-text-padding.png" alt="GreptimeDB Logo" width="400px"></img>
<picture>
<source media="(prefers-color-scheme: light)" srcset="/docs/logo-text-padding.png">
<source media="(prefers-color-scheme: dark)" srcset="/docs/logo-text-padding-dark.png">
<img alt="GreptimeDB Logo" src="/docs/logo-text-padding.png" width="400px">
</picture>
</p>
<h3 align="center">
The next-generation hybrid timeseries/analytics processing database in the cloud
</h3>
@@ -53,8 +58,15 @@ To compile GreptimeDB from source, you'll need:
install correct Rust version for you.
- Protobuf: `protoc` is required for compiling `.proto` files. `protobuf` is
available from major package manager on macos and linux distributions. You can
find an installation instructions
[here](https://grpc.io/docs/protoc-installation/).
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
keyword. You can check it with `protoc --version`.
- python3-dev or python3-devel(Optional, only needed if you want to run scripts
in cpython): this install a Python shared library required for running python
scripting engine(In CPython Mode). This is available as `python3-dev` on
ubuntu, you can install it with `sudo apt install python3-dev`, or
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
`Python3` package should have this shared library by default.
#### Build with Docker
@@ -114,7 +126,10 @@ about Kubernetes deployment, check our [docs](https://docs.greptime.com/).
4. Query the data:
```SQL
mysql> SELECT * FROM monitor;
SELECT * FROM monitor;
```
```TEXT
+-------+---------------------+------+--------+
| host | ts | cpu | memory |
+-------+---------------------+------+--------+
@@ -144,6 +159,9 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
- GreptimeDB [Developer
Guide](https://docs.greptime.com/developer-guide/overview.html)
### Dashboard
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
### SDK
- [GreptimeDB Java
@@ -156,9 +174,11 @@ break things. Benchmark on development branch may not represent its potential
performance. We release pre-built binaries constantly for functional
evaluation. Do not use it in production at the moment.
For future plans, check out [GreptimeDB roadmap](https://github.com/GreptimeTeam/greptimedb/issues/669).
## Community
Our core team is thrilled too see you participate in any ways you like. When you are stuck, try to
Our core team is thrilled to see you participate in any ways you like. When you are stuck, try to
ask for help by filling an issue with a detailed description of what you were trying to do
and what went wrong. If you have any questions or if you would like to get involved in our
community, please check out:
@@ -169,7 +189,7 @@ community, please check out:
In addition, you may:
- View our official [Blog](https://greptime.com/blog)
- View our official [Blog](https://greptime.com/blogs/index)
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
- Follow us on [Twitter](https://twitter.com/greptime)
@@ -189,3 +209,4 @@ Please refer to [contribution guidelines](CONTRIBUTING.md) for more information.
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion](https://github.com/apache/arrow-datafusion).
- [OpenDAL](https://github.com/datafuselabs/opendal) from [Datafuse Labs](https://github.com/datafuselabs) gives GreptimeDB a very general and elegant data access abstraction layer.
- GreptimeDBs meta service is based on [etcd](https://etcd.io/).
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.

View File

@@ -1,14 +1,14 @@
[package]
name = "benchmarks"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
arrow = "10"
arrow.workspace = true
clap = { version = "4.0", features = ["derive"] }
client = { path = "../src/client" }
indicatif = "0.17.1"
itertools = "0.10.5"
parquet = { version = "*" }
tokio = { version = "1.21", features = ["full"] }
parquet.workspace = true
tokio.workspace = true

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,30 +15,23 @@
//! Use the taxi trip records from New York City dataset to bench. You can download the dataset from
//! [here](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page).
#![feature(once_cell)]
#![allow(clippy::print_stdout)]
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Instant;
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampNanosecondArray};
use arrow::datatypes::{DataType, Float64Type, Int64Type};
use arrow::record_batch::RecordBatch;
use clap::Parser;
use client::admin::Admin;
use client::api::v1::codec::InsertBatch;
use client::api::v1::column::Values;
use client::api::v1::{insert_expr, Column, ColumnDataType, ColumnDef, CreateExpr, InsertExpr};
use client::{Client, Database, Select};
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, TableId};
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use parquet::arrow::{ArrowReader, ParquetFileArrowReader};
use parquet::file::reader::FileReader;
use parquet::file::serialized_reader::SerializedFileReader;
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
use tokio::task::JoinSet;
const DATABASE_NAME: &str = "greptime";
const CATALOG_NAME: &str = "greptime";
const SCHEMA_NAME: &str = "public";
const TABLE_NAME: &str = "nyc_taxi";
@@ -87,45 +80,42 @@ async fn write_data(
pb_style: ProgressStyle,
) -> u128 {
let file = std::fs::File::open(&path).unwrap();
let file_reader = Arc::new(SerializedFileReader::new(file).unwrap());
let row_num = file_reader.metadata().file_metadata().num_rows();
let record_batch_reader = ParquetFileArrowReader::new(file_reader)
.get_record_reader(batch_size)
let record_batch_reader_builder = ParquetRecordBatchReaderBuilder::try_new(file).unwrap();
let row_num = record_batch_reader_builder
.metadata()
.file_metadata()
.num_rows();
let record_batch_reader = record_batch_reader_builder
.with_batch_size(batch_size)
.build()
.unwrap();
let progress_bar = mpb.add(ProgressBar::new(row_num as _));
progress_bar.set_style(pb_style);
progress_bar.set_message(format!("{:?}", path));
progress_bar.set_message(format!("{path:?}"));
let mut total_rpc_elapsed_ms = 0;
for record_batch in record_batch_reader {
let record_batch = record_batch.unwrap();
let row_count = record_batch.num_rows();
let insert_batch = convert_record_batch(record_batch).into();
let insert_expr = InsertExpr {
schema_name: "public".to_string(),
let (columns, row_count) = convert_record_batch(record_batch);
let request = InsertRequest {
table_name: TABLE_NAME.to_string(),
expr: Some(insert_expr::Expr::Values(insert_expr::Values {
values: vec![insert_batch],
})),
options: HashMap::default(),
region_number: 0,
columns,
row_count,
};
let now = Instant::now();
db.insert(insert_expr).await.unwrap();
db.insert(request).await.unwrap();
let elapsed = now.elapsed();
total_rpc_elapsed_ms += elapsed.as_millis();
progress_bar.inc(row_count as _);
}
progress_bar.finish_with_message(format!(
"file {:?} done in {}ms",
path, total_rpc_elapsed_ms
));
progress_bar.finish_with_message(format!("file {path:?} done in {total_rpc_elapsed_ms}ms",));
total_rpc_elapsed_ms
}
fn convert_record_batch(record_batch: RecordBatch) -> InsertBatch {
fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
let schema = record_batch.schema();
let fields = schema.fields();
let row_count = record_batch.num_rows();
@@ -143,10 +133,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> InsertBatch {
columns.push(column);
}
InsertBatch {
columns,
row_count: row_count as _,
}
(columns, row_count as _)
}
fn build_values(column: &ArrayRef) -> Values {
@@ -217,133 +204,135 @@ fn build_values(column: &ArrayRef) -> Values {
| DataType::FixedSizeList(_, _)
| DataType::LargeList(_)
| DataType::Struct(_)
| DataType::Union(_, _)
| DataType::Union(_, _, _)
| DataType::Dictionary(_, _)
| DataType::Decimal(_, _)
| DataType::Decimal128(_, _)
| DataType::Decimal256(_, _)
| DataType::RunEndEncoded(_, _)
| DataType::Map(_, _) => todo!(),
}
}
fn create_table_expr() -> CreateExpr {
CreateExpr {
catalog_name: Some(CATALOG_NAME.to_string()),
schema_name: Some(SCHEMA_NAME.to_string()),
fn create_table_expr() -> CreateTableExpr {
CreateTableExpr {
catalog_name: CATALOG_NAME.to_string(),
schema_name: SCHEMA_NAME.to_string(),
table_name: TABLE_NAME.to_string(),
desc: None,
desc: "".to_string(),
column_defs: vec![
ColumnDef {
name: "VendorID".to_string(),
datatype: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "tpep_pickup_datetime".to_string(),
datatype: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "tpep_dropoff_datetime".to_string(),
datatype: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "passenger_count".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "trip_distance".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "RatecodeID".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "store_and_fwd_flag".to_string(),
datatype: ColumnDataType::String as i32,
is_nullable: true,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "PULocationID".to_string(),
datatype: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "DOLocationID".to_string(),
datatype: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "payment_type".to_string(),
datatype: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "fare_amount".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "extra".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "mta_tax".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "tip_amount".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "tolls_amount".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "improvement_surcharge".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "total_amount".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "congestion_surcharge".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "airport_fee".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
default_constraint: vec![],
},
],
time_index: "tpep_pickup_datetime".to_string(),
@@ -351,7 +340,7 @@ fn create_table_expr() -> CreateExpr {
create_if_not_exists: false,
table_options: Default::default(),
region_ids: vec![0],
table_id: Some(0),
table_id: Some(TableId { id: 0 }),
}
}
@@ -360,25 +349,23 @@ fn query_set() -> HashMap<String, String> {
ret.insert(
"count_all".to_string(),
format!("SELECT COUNT(*) FROM {};", TABLE_NAME),
format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
);
ret.insert(
"fare_amt_by_passenger".to_string(),
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {} GROUP BY passenger_count",TABLE_NAME)
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count")
);
ret
}
async fn do_write(args: &Args, client: &Client) {
let admin = Admin::new("admin", client.clone());
async fn do_write(args: &Args, db: &Database) {
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
let mut write_jobs = JoinSet::new();
let create_table_result = admin.create(create_table_expr()).await;
println!("Create table result: {:?}", create_table_result);
let create_table_result = db.create(create_table_expr()).await;
println!("Create table result: {create_table_result:?}");
let progress_bar_style = ProgressStyle::with_template(
"[{elapsed_precise}] {bar:60.cyan/blue} {pos:>7}/{len:7} {msg}",
@@ -392,7 +379,7 @@ async fn do_write(args: &Args, client: &Client) {
let batch_size = args.batch_size;
for _ in 0..args.thread_num {
if let Some(path) = file_list.pop() {
let db = Database::new(DATABASE_NAME, client.clone());
let db = db.clone();
let mpb = multi_progress_bar.clone();
let pb_style = progress_bar_style.clone();
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
@@ -401,7 +388,7 @@ async fn do_write(args: &Args, client: &Client) {
while write_jobs.join_next().await.is_some() {
file_progress.inc(1);
if let Some(path) = file_list.pop() {
let db = Database::new(DATABASE_NAME, client.clone());
let db = db.clone();
let mpb = multi_progress_bar.clone();
let pb_style = progress_bar_style.clone();
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
@@ -411,10 +398,10 @@ async fn do_write(args: &Args, client: &Client) {
async fn do_query(num_iter: usize, db: &Database) {
for (query_name, query) in query_set() {
println!("Running query: {}", query);
println!("Running query: {query}");
for i in 0..num_iter {
let now = Instant::now();
let _res = db.select(Select::Sql(query.clone())).await.unwrap();
let _res = db.sql(&query).await.unwrap();
let elapsed = now.elapsed();
println!(
"query {}, iteration {}: {}ms",
@@ -436,13 +423,13 @@ fn main() {
.unwrap()
.block_on(async {
let client = Client::with_urls(vec![&args.endpoint]);
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
if !args.skip_write {
do_write(&args, &client).await;
do_write(&args, &db).await;
}
if !args.skip_read {
let db = Database::new(DATABASE_NAME, client.clone());
do_query(args.iter_num, &db).await;
}
})

View File

@@ -7,3 +7,6 @@ coverage:
patch: off
ignore:
- "**/error*.rs" # ignore all error.rs files
- "tests/runner/*.rs" # ignore integration test runner
comment: # this is a top-level key
layout: "diff"

View File

@@ -1,17 +1,52 @@
# Node running mode, see `standalone.example.toml`.
mode = "distributed"
# Whether to use in-memory catalog, see `standalone.example.toml`.
enable_memory_catalog = false
# The datanode identifier, should be unique.
node_id = 42
mode = 'distributed'
rpc_addr = '127.0.0.1:3001'
wal_dir = '/tmp/greptimedb/wal'
# gRPC server address, "127.0.0.1:3001" by default.
rpc_addr = "127.0.0.1:3001"
# Hostname of this node.
rpc_hostname = "127.0.0.1"
# The number of gRPC server worker threads, 8 by default.
rpc_runtime_size = 8
mysql_addr = '127.0.0.1:3306'
mysql_runtime_size = 4
# MySQL server address, "127.0.0.1:4406" by default.
mysql_addr = "127.0.0.1:4406"
# The number of MySQL server worker threads, 2 by default.
mysql_runtime_size = 2
[storage]
type = 'File'
data_dir = '/tmp/greptimedb/data/'
[meta_client_opts]
metasrv_addr = '1.1.1.1:3002'
# Metasrv client options.
[meta_client_options]
# Metasrv address list.
metasrv_addrs = ["127.0.0.1:3002"]
# Operation timeout in milliseconds, 3000 by default.
timeout_millis = 3000
# Connect server timeout in milliseconds, 5000 by default.
connect_timeout_millis = 5000
tcp_nodelay = false
# `TCP_NODELAY` option for accepted connections, true by default.
tcp_nodelay = true
# WAL options, see `standalone.example.toml`.
[wal]
dir = "/tmp/greptimedb/wal"
file_size = "1GB"
purge_threshold = "50GB"
purge_interval = "10m"
read_batch_size = 128
sync_write = false
# Storage options, see `standalone.example.toml`.
[storage]
type = "File"
data_dir = "/tmp/greptimedb/data/"
# Compaction options, see `standalone.example.toml`.
[compaction]
max_inflight_tasks = 4
max_files_in_level0 = 8
max_purge_tasks = 32
# Procedure storage options, see `standalone.example.toml`.
# [procedure.store]
# type = 'File'
# data_dir = '/tmp/greptimedb/procedure/'

View File

@@ -1,9 +1,58 @@
mode = 'distributed'
datanode_rpc_addr = '127.0.0.1:3001'
http_addr = '127.0.0.1:4000'
# Node running mode, see `standalone.example.toml`.
mode = "distributed"
[meta_client_opts]
metasrv_addr = '1.1.1.1:3002'
# HTTP server options, see `standalone.example.toml`.
[http_options]
addr = "127.0.0.1:4000"
timeout = "30s"
# gRPC server options, see `standalone.example.toml`.
[grpc_options]
addr = "127.0.0.1:4001"
runtime_size = 8
# MySQL server options, see `standalone.example.toml`.
[mysql_options]
addr = "127.0.0.1:4002"
runtime_size = 2
# MySQL server TLS options, see `standalone.example.toml`.
[mysql_options.tls]
mode = "disable"
cert_path = ""
key_path = ""
# PostgresSQL server options, see `standalone.example.toml`.
[postgres_options]
addr = "127.0.0.1:4003"
runtime_size = 2
# PostgresSQL server TLS options, see `standalone.example.toml`.
[postgres_options.tls]
mode = "disable"
cert_path = ""
key_path = ""
# OpenTSDB protocol options, see `standalone.example.toml`.
[opentsdb_options]
addr = "127.0.0.1:4242"
runtime_size = 2
# InfluxDB protocol options, see `standalone.example.toml`.
[influxdb_options]
enable = true
# Prometheus protocol options, see `standalone.example.toml`.
[prometheus_options]
enable = true
# Prometheus protocol options, see `standalone.example.toml`.
[prom_options]
addr = "127.0.0.1:4004"
# Metasrv client options, see `datanode.example.toml`.
[meta_client_options]
metasrv_addrs = ["127.0.0.1:3002"]
timeout_millis = 3000
connect_timeout_millis = 5000
tcp_nodelay = false
tcp_nodelay = true

View File

@@ -1,4 +1,15 @@
bind_addr = '127.0.0.1:3002'
server_addr = '127.0.0.1:3002'
store_addr = '127.0.0.1:2379'
# The bind address of metasrv, "127.0.0.1:3002" by default.
bind_addr = "127.0.0.1:3002"
# The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
server_addr = "127.0.0.1:3002"
# Etcd server address, "127.0.0.1:2379" by default.
store_addr = "127.0.0.1:2379"
# Datanode lease in seconds, 15 seconds by default.
datanode_lease_secs = 15
# Datanode selector type.
# - "LeaseBased" (default value).
# - "LoadBased"
# For details, please see "https://docs.greptime.com/developer-guide/meta/selector".
selector = "LeaseBased"
# Store data in memory, false by default.
use_memory_store = false

View File

@@ -1,34 +1,116 @@
node_id = 0
mode = 'standalone'
http_addr = '127.0.0.1:4000'
datanode_mysql_addr = '127.0.0.1:3306'
datanode_mysql_runtime_size = 4
wal_dir = '/tmp/greptimedb/wal/'
# Node running mode, "standalone" or "distributed".
mode = "standalone"
# Whether to use in-memory catalog, `false` by default.
enable_memory_catalog = false
[storage]
type = 'File'
data_dir = '/tmp/greptimedb/data/'
# HTTP server options.
[http_options]
# Server address, "127.0.0.1:4000" by default.
addr = "127.0.0.1:4000"
# HTTP request timeout, 30s by default.
timeout = "30s"
# gRPC server options.
[grpc_options]
addr = '127.0.0.1:4001'
# Server address, "127.0.0.1:4001" by default.
addr = "127.0.0.1:4001"
# The number of server worker threads, 8 by default.
runtime_size = 8
# MySQL server options.
[mysql_options]
addr = '127.0.0.1:4002'
# Server address, "127.0.0.1:4002" by default.
addr = "127.0.0.1:4002"
# The number of server worker threads, 2 by default.
runtime_size = 2
[influxdb_options]
enable = true
[opentsdb_options]
addr = '127.0.0.1:4242'
enable = true
runtime_size = 2
[prometheus_options]
enable = true
# MySQL server TLS options.
[mysql_options.tls]
# TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
# - "disable" (default value)
# - "prefer"
# - "require"
# - "verify-ca"
# - "verify-full"
mode = "disable"
# Certificate file path.
cert_path = ""
# Private key file path.
key_path = ""
# PostgresSQL server options.
[postgres_options]
addr = '127.0.0.1:4003'
# Server address, "127.0.0.1:4003" by default.
addr = "127.0.0.1:4003"
# The number of server worker threads, 2 by default.
runtime_size = 2
check_pwd = false
# PostgresSQL server TLS options, see `[mysql_options.tls]` section.
[postgres_options.tls]
# TLS mode.
mode = "disable"
# certificate file path.
cert_path = ""
# private key file path.
key_path = ""
# OpenTSDB protocol options.
[opentsdb_options]
# OpenTSDB telnet API server address, "127.0.0.1:4242" by default.
addr = "127.0.0.1:4242"
# The number of server worker threads, 2 by default.
runtime_size = 2
# InfluxDB protocol options.
[influxdb_options]
# Whether to enable InfluxDB protocol in HTTP API, true by default.
enable = true
# Prometheus protocol options.
[prometheus_options]
# Whether to enable Prometheus remote write and read in HTTP API, true by default.
enable = true
# Prom protocol options.
[prom_options]
# Prometheus API server address, "127.0.0.1:4004" by default.
addr = "127.0.0.1:4004"
# WAL options.
[wal]
# WAL data directory.
dir = "/tmp/greptimedb/wal"
# WAL file size in bytes.
file_size = "1GB"
# WAL purge threshold in bytes.
purge_threshold = "50GB"
# WAL purge interval in seconds.
purge_interval = "10m"
# WAL read batch size.
read_batch_size = 128
# Whether to sync log file after every write.
sync_write = false
# Storage options.
[storage]
# Storage type.
type = "File"
# Data directory, "/tmp/greptimedb/data" by default.
data_dir = "/tmp/greptimedb/data/"
# Compaction options.
[compaction]
# Max task number that can concurrently run.
max_inflight_tasks = 4
# Max files in level 0 to trigger compaction.
max_files_in_level0 = 8
# Max task number for SST purge task after compaction.
max_purge_tasks = 32
# Procedure storage options.
# Uncomment to enable.
# [procedure.store]
# # Storage type.
# type = "File"
# # Procedure data path.
# data_dir = "/tmp/greptimedb/procedure/"

View File

@@ -9,7 +9,10 @@ RUN apt-get update && apt-get install -y \
protobuf-compiler \
curl \
build-essential \
pkg-config
pkg-config \
python3 \
python3-dev \
&& pip install pyarrow
# Install Rust.
SHELL ["/bin/bash", "-c"]
@@ -24,6 +27,8 @@ RUN cargo build --release
# TODO(zyy17): Maybe should use the more secure container image.
FROM ubuntu:22.04 as base
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
WORKDIR /greptime
COPY --from=builder /greptimedb/target/release/greptime /greptime/bin/
ENV PATH /greptime/bin/:$PATH

View File

@@ -1,5 +1,7 @@
FROM ubuntu:22.04
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
ARG TARGETARCH
ADD $TARGETARCH/greptime /greptime/bin/

View File

@@ -55,7 +55,7 @@ The DataFusion basically execute aggregate like this:
2. Call `update_batch` on each accumulator with partitioned data, to let you update your aggregate calculation.
3. Call `state` to get each accumulator's internal state, the medial calculation result.
4. Call `merge_batch` to merge all accumulator's internal state to one.
5. Execute `evalute` on the chosen one to get the final calculation result.
5. Execute `evaluate` on the chosen one to get the final calculation result.
Once you know the meaning of each method, you can easily write your accumulator. You can refer to `Median` accumulator or `SUM` accumulator defined in file `my_sum_udaf_example.rs` for more details.
@@ -63,7 +63,7 @@ Once you know the meaning of each method, you can easily write your accumulator.
You can call `register_aggregate_function` method in query engine to register your aggregate function. To do that, you have to new an instance of struct `AggregateFunctionMeta`. The struct has three fields, first is the name of your aggregate function's name. The function name is case-sensitive due to DataFusion's restriction. We strongly recommend using lowercase for your name. If you have to use uppercase name, wrap your aggregate function with quotation marks. For example, if you define an aggregate function named "my_aggr", you can use "`SELECT MY_AGGR(x)`"; if you define "my_AGGR", you have to use "`SELECT "my_AGGR"(x)`".
The second field is arg_counts ,the count of the arguments. Like accumulator `percentile`, caculating the p_number of the column. We need to input the value of column and the value of p to cacalate, and so the count of the arguments is two.
The second field is arg_counts ,the count of the arguments. Like accumulator `percentile`, calculating the p_number of the column. We need to input the value of column and the value of p to cacalate, and so the count of the arguments is two.
The third field is a function about how to create your accumulator creator that you defined in step 1 above. Create creator, that's a bit intertwined, but it is how we make DataFusion use a newly created aggregate function each time it executes a SQL, preventing the stored input types from affecting each other. The key detail can be starting looking at our `DfContextProviderAdapter` struct's `get_aggregate_meta` method.

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

View File

@@ -0,0 +1,175 @@
---
Feature Name: "promql-in-rust"
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/596
Date: 2022-12-20
Author: "Ruihang Xia <waynestxia@gmail.com>"
---
Rewrite PromQL in Rust
----------------------
# Summary
A Rust native implementation of PromQL, for GreptimeDB.
# Motivation
Prometheus and its query language PromQL prevails in the cloud-native observability area, which is an important scenario for time series database like GreptimeDB. We already have support for its remote read and write protocols. Users can now integrate GreptimeDB as the storage backend to existing Prometheus deployment, but cannot run PromQL query directly on GreptimeDB like SQL.
This RFC proposes to add support for PromQL. Because it was created in Go, we can't use the existing code easily. For interoperability, performance and extendability, porting its logic to Rust is a good choice.
# Details
## Overview
One of the goals is to make use of our existing basic operators, execution model and runtime to reduce the work. So the entire proposal is built on top of Apache Arrow DataFusion. The rewrote PromQL logic is manifested as `Expr` or `Execution Plan` in DataFusion. And both the intermediate data structure and the result is in the format of `Arrow`'s `RecordBatch`.
The following sections are organized in a top-down manner. Starts with evaluation procedure. Then introduces the building blocks of our new PromQL operation. Follows by an explanation of data model. And end with an example logic plan.
*This RFC is heavily related to Prometheus and PromQL. It won't repeat some basic concepts of them.*
## Evaluation
The original implementation is like an interpreter of parsed PromQL AST. It has two characteristics: (1) Operations are evaluated in place after they are parsed to AST. And some key parameters are separated from the AST because they do not present in the query, but come from other places like another field in the HTTP payload. (2) calculation is performed per timestamp. You can see this pattern many times:
```go
for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval {}
```
These bring out two differences in the proposed implementation. First, to make it more general and clear, the evaluation procedure is reorganized into serval phases (and is the same as DataFusion's). And second, data are evaluated by time series (corresponding to "columnar calculation", if think timestamp as row number).
```
Logic
Query AST Plan
─────────► Parser ───────► Logical ────────► Physical ────┐
Planner Planner │
◄───────────────────────────── Executor ◄────────────────┘
Evaluation Result Execution
Plan
```
- Parser
Provided by [`promql-parser`](https://github.com/GreptimeTeam/promql-parser) crate. Same as the original implementation.
- Logical Planner
Generates a logical plan with all the needed parameters. It should accept something like `EvalStmt` in Go's implementation, which contains query time range, evaluation interval and lookback range.
Another important thing done here is assembling the logic plan, with all the operations baked into logically. Like what's the filter and time range to read, how the data then flows through a selector into a binary operation, etc. Or what's the output schema of every single step. The generated logic plan is deterministic without variables, and can be `EXPLAIN`ed clearly.
- Physical Planner
This step converts a logic plan into evaluatable execution plan. There are not many special things like the previous step. Except when a query is going to be executed distributedly. In this case, a logic plan will be divided into serval parts and sent to serval nodes. One physical planner only sees its own part.
- Executor
As its name shows, this step calculates data to result. And all new calculation logic, the implementation of PromQL in rust, is placed here. And the rewrote functions are using `RecordBatch` and `Array` from `Arrow` as the intermediate data structure.
Each "batch" contains only data from single time series. This is from the underlying storage implementation. Though it's not a requirement of this RFC, having this property can simplify some functions.
Another thing to mention is the rewrote functions don't aware of timestamp or value columns, they are defined only based on the input data types. For example, `increase()` function in PromQL calculates the unbiased delta of data, its implementation here only does this single thing. Let's compare the signature of two implementations:
- Go
```go
func funcIncrease(vals []parser.Value, args parser.Expressions) Vector {}
```
- Rust
```rust
fn prom_increase(input: Array) -> Array {}
```
Some unimportant parameters are omitted. The original Go version only writes the logic for `Point`'s value, either float or histogram. But the proposed rewritten one accepts a generic `Array` as input, which can be any type that suits, from `i8` to `u64` to `TimestampNanosecond`.
## Plan and Expression
They are structures to express logic from PromQL. The proposed implementation is built on top of DataFusion, thus our plan and expression are in form of `ExtensionPlan` and `ScalarUDF`. The only difference between them in this context is the return type: plan returns a record batch while expression returns a single column.
This RFC proposes to add four new plans, they are fundamental building blocks that mainly handle data selection logic in PromQL, for the following calculation expressions.
- `SeriesNormalize`
Sort data inside one series on the timestamp column, and bias "offset" if has. This plan usually comes after `TableScan` (or `TableScan` and `Filter`) plan.
- `VectorManipulator` and `MatrixManipulator`
Corresponding to `InstantSelector` and `RangeSelector`. We don't calculate timestamp by timestamp, thus use "vector" instead of "instant", this image shows the difference. And "matrix" is another name for "range vector", for not confused with our "vector". The following section will detail how they are implemented using Arrow.
![instant_and_vector](instant-and-vector.png)
Due to "interval" parameter in PromQL, data after "selector" (or "manipulator" here) are usually shorter than input. And we have to modify the entire record batch to shorten both timestamp, value and tag columns. So they are formed as plan.
- `PromAggregator`
The carrier of aggregator expressions. This should not be very different from the DataFusion built-in `Aggregate` plan, except PromQL can use "group without" to do reverse selection.
PromQL has around 70 expressions and functions. But luckily we can reuse lots of them from DataFusion. Like unary expression, binary expression and aggregator. We only need to implement those PromQL-specific expressions, like `rate` or `percentile`. The following table lists some typical functions in PromQL, and their signature in the proposed implementation. Other function should be the same.
| Name | In Param(s) | Out Param(s) | Explain |
|-------------------- |------------------------------------------------------ |-------------- |-------------------- |
| instant_delta | Matrix T | Array T | idelta in PromQL |
| increase | Matrix T | Array T | increase in PromQL |
| extrapolate_factor | - Matrix T<br>- Array Timestamp<br>- Array Timestamp | Array T | * |
*: *`extrapolate_factor` is one of the "dark sides" in PromQL. In short it's a translation of this [paragraph](https://github.com/prometheus/prometheus/blob/0372e259baf014bbade3134fd79bcdfd8cbdef2c/promql/functions.go#L134-L159)*
To reuse those common calculation logic, we can break them into serval expressions, and assemble in the logic planning phase. Like `rate()` in PromQL can be represented as `increase / extrapolate_factor`.
## Data Model
This part explains how data is represented. Following the data model in GreptimeDB, all the data are stored as table, with tag columns, timestamp column and value column. Table to record batch is very straightforward. So an instant vector can be thought of as a row (though as said before, we don't use instant vectors) in the table. Given four basic types in PromQL: scalar, string, instant vector and range vector, only the last "range vector" need some tricks to adapt our columnar calculation.
Range vector is some sort of matrix, it's consisted of small one-dimension vectors, with each being an input of range function. And, applying range function to a range vector can be thought of kind of convolution.
![range-vector-with-matrix](range-vector-with-matrix.png)
(Left is an illustration of range vector. Notice the Y-axis has no meaning, it's just put different pieces separately. The right side is an imagined "matrix" as range function. Multiplying the left side to it can get a one-dimension "matrix" with four elements. That's the evaluation result of a range vector.)
To adapt this range vector to record batch, it should be represented by a column. This RFC proposes to use `DictionaryArray` from Arrow to represent range vector, or `Matrix`. This is "misusing" `DictionaryArray` to ship some additional information about an array. Because the range vector is sliding over one series, we only need to know the `offset` and `length` of each slides to reconstruct the matrix from an array:
![matrix-from-array](matrix-from-array.png)
The length is not fixed, it depends on the input's timestamp. An PoC implementation of `Matrix` and `increase()` can be found in [this repo](https://github.com/waynexia/corroding-prometheus).
## Example
The logic plan of PromQL query
```promql
# start: 2022-12-20T10:00:00
# end: 2022-12-21T10:00:00
# interval: 1m
# lookback: 30s
sum (rate(request_duration[5m])) by (idc)
```
looks like
<!-- title: 'PromAggregator: \naggr = sum, column = idc'
operator: prom
inputs:
- title: 'Matrix Manipulator: \ninterval = 1m, range = 5m, expr = div(increase(value), extrapolate_factor(timestamp))'
operator: prom
inputs:
- title: 'Series Normalize: \noffset = 0'
operator: prom
inputs:
- title: 'Filter: \ntimestamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
operator: filter
inputs:
- title: 'Table Scan: \ntable = request_duration, timestamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
operator: scan -->
![example](example.png)
# Drawbacks
Human-being is always error-prone. It's harder to endeavor to rewrite from the ground and requires more attention to ensure correctness, than translate line-by-line. And, since the evaluator's architecture are different, it might be painful to catch up with PromQL's breaking update (if any) in the future.
Misusing Arrow's DictionaryVector as Matrix is another point. This hack needs some `unsafe` function call to bypass Arrow's check. And though Arrow's API is stable, this is still an undocumented behavior.
# Alternatives
There are a few alternatives we've considered:
- Wrap the existing PromQL's implementation via FFI, and import it to GreptimeDB.
- Translate its evaluator engine line-by-line, rather than rewrite one.
- Integrate the Prometheus server into GreptimeDB via RPC, making it a detached execution engine for PromQL.
The first and second options are making a separate execution engine in GreptimeDB, they may alleviate the pain during rewriting, but will have negative impacts to afterward evolve like resource management. And introduce another deploy component in the last option will bring a complex deploy architecture.
And all of them are more or less redundant in data transportation that affects performance and resources. The proposed built-in executing procedure is also easy to integrate and expose to the existing SQL interface GreptimeDB currently provides. Some concepts in PromQL like sliding windows (range vector in PromQL) are very convenient and ergonomic in analyzing series data. This makes it not only a PromQL evaluator, but also an enhancement to our query system.

View File

@@ -0,0 +1,151 @@
---
Feature Name: "procedure-framework"
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/286
Date: 2023-01-03
Author: "Yingwen <realevenyag@gmail.com>"
---
Procedure Framework
----------------------
# Summary
A framework for executing operations in a fault-tolerant manner.
# Motivation
Some operations in GreptimeDB require multiple steps to implement. For example, creating a table needs:
1. Check whether the table exists
2. Create the table in the table engine
1. Create a region for the table in the storage engine
2. Persist the metadata of the table to the table manifest
3. Add the table to the catalog manager
If the node dies or restarts in the middle of creating a table, it could leave the system in an inconsistent state. The procedure framework, inspired by [Apache HBase's ProcedureV2 framework](https://github.com/apache/hbase/blob/bfc9fc9605de638785435e404430a9408b99a8d0/src/main/asciidoc/_chapters/pv2.adoc) and [Apache Accumulos FATE framework](https://accumulo.apache.org/docs/2.x/administration/fate), aims to provide a unified way to implement multi-step operations that is tolerant to failure.
# Details
## Overview
The procedure framework consists of the following primary components:
- A `Procedure` represents an operation or a set of operations to be performed step-by-step
- `ProcedureManager`, the runtime to run `Procedures`. It executes the submitted procedures, stores procedures' states to the `ProcedureStore` and restores procedures from `ProcedureStore` while the database restarts.
- `ProcedureStore` is a storage layer for persisting the procedure state
## Procedures
The `ProcedureManager` keeps calling `Procedure::execute()` until the Procedure is done, so the operation of the Procedure should be [idempotent](https://developer.mozilla.org/en-US/docs/Glossary/Idempotent): it needs to be able to undo or replay a partial execution of itself.
```rust
trait Procedure {
fn execute(&mut self, ctx: &Context) -> Result<Status>;
fn dump(&self) -> Result<String>;
fn rollback(&self) -> Result<()>;
// other methods...
}
```
The `Status` is an enum that has the following variants:
```rust
enum Status {
Executing {
persist: bool,
},
Suspended {
subprocedures: Vec<ProcedureWithId>,
persist: bool,
},
Done,
}
```
A call to `execute()` can result in the following possibilities:
- `Ok(Status::Done)`: we are done
- `Ok(Status::Executing { .. })`: there are remaining steps to do
- `Ok(Status::Suspend { sub_procedure, .. })`: execution is suspended and can be resumed later after the sub-procedure is done.
- `Err(e)`: error occurs during execution and the procedure is unable to proceed anymore.
Users need to assign a unique `ProcedureId` to the procedure and the procedure can get this id via the `Context`. The `ProcedureId` is typically a UUID.
```rust
struct Context {
id: ProcedureId,
// other fields ...
}
```
The `ProcedureManager` calls `Procedure::dump()` to serialize the internal state of the procedure and writes to the `ProcedureStore`. The `Status` has a field `persist` to tell the `ProcedureManager` whether it needs persistence.
## Sub-procedures
A procedure may need to create some sub-procedures to process its subtasks. For example, creating a distributed table with multiple regions (partitions) needs to set up the regions in each node, thus the parent procedure should instantiate a sub-procedure for each region. The `ProcedureManager` makes sure that the parent procedure does not proceed till all sub-procedures are successfully finished.
The procedure can submit sub-procedures to the `ProcedureManager` by returning `Status::Suspended`. It needs to assign a procedure id to each procedure manually so it can track the status of the sub-procedures.
```rust
struct ProcedureWithId {
id: ProcedureId,
procedure: BoxedProcedure,
}
```
## ProcedureStore
We might need to provide two different ProcedureStore implementations:
- In standalone mode, it stores data on the local disk.
- In distributed mode, it stores data on the meta server or the object store service.
These implementations should share the same storage structure. They store each procedure's state in a unique path based on the procedure id:
```
Sample paths:
/procedures/{PROCEDURE_ID}/000001.step
/procedures/{PROCEDURE_ID}/000002.step
/procedures/{PROCEDURE_ID}/000003.commit
```
`ProcedureStore` behaves like a WAL. Before performing each step, the `ProcedureManager` can write the procedure's current state to the ProcedureStore, which stores the state in the `.step` file. The `000001` in the path is a monotonic increasing sequence of the step. After the procedure is done, the `ProcedureManager` puts a `.commit` file to indicate the procedure is finished (committed).
The `ProcedureManager` can remove the procedure's files once the procedure is done, but it needs to leave the `.commit` as the last file to remove in case of failure during removal.
## ProcedureManager
`ProcedureManager` executes procedures submitted to it.
```rust
trait ProcedureManager {
fn register_loader(&self, name: &str, loader: BoxedProcedureLoader) -> Result<()>;
async fn submit(&self, procedure: ProcedureWithId) -> Result<()>;
}
```
It supports the following operations:
- Register a `ProcedureLoader` by the type name of the `Procedure`.
- Submit a `Procedure` to the manager and execute it.
When `ProcedureManager` starts, it loads procedures from the `ProcedureStore` and restores the procedures by the `ProcedureLoader`. The manager stores the type name from `Procedure::type_name()` with the data from `Procedure::dump()` in the `.step` file and uses the type name to find a `ProcedureLoader` to recover the procedure from its data.
```rust
type BoxedProcedureLoader = Box<dyn Fn(&str) -> Result<BoxedProcedure> + Send>;
```
## Rollback
The rollback step is supposed to clean up the resources created during the execute() step. When a procedure has failed, the `ProcedureManager` puts a `rollback` file and calls the `Procedure::rollback()` method.
```text
/procedures/{PROCEDURE_ID}/000001.step
/procedures/{PROCEDURE_ID}/000002.rollback
```
Rollback is complicated to implement so some procedures might not support rollback or only provide a best-efforts approach.
## Locking
The `ProcedureManager` can provide a locking mechanism that gives a procedure read/write access to a database object such as a table so other procedures are unable to modify the same table while the current one is executing.
# Drawbacks
The `Procedure` framework introduces additional complexity and overhead to our database.
- To execute a `Procedure`, we need to write to the `ProcedureStore` multiple times, which may slow down the server
- We need to rewrite the logic of creating/dropping/altering a table using the procedure framework
# Alternatives
Another approach is to tolerate failure during execution and allow users to retry the operation until it succeeds. But we still need to:
- Make each step idempotent
- Record the status in some place to check whether we are done

View File

@@ -0,0 +1,92 @@
---
Feature Name: "table-compaction"
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/930
Date: 2023-02-01
Author: "Lei, HUANG <mrsatangel@gmail.com>"
---
# Table Compaction
---
## Background
GreptimeDB uses an LSM-tree based storage engine that flushes memtables to SSTs for persistence.
But currently it only supports level 0. SST files in level 0 does not guarantee to contain only rows with disjoint time ranges.
That is to say, different SST files in level 0 may contain overlapped timestamps.
The consequence is, in order to retrieve rows in some time range, all files need to be scanned, which brings a lot of IO overhead.
Also, just like other LSMT engines, delete/update to existing primary keys are converted to new rows with delete/update mark and appended to SSTs on flushing.
We need to merge the operations to same primary keys so that we don't have to go through all SST files to find the final state of these primary keys.
## Goal
Implement a compaction framework to:
- maintain SSTs in timestamp order to accelerate queries with timestamp condition;
- merge rows with same primary key;
- purge expired SSTs;
- accommodate other tasks like data rollup/indexing.
## Overview
Table compaction involves following components:
- Compaction scheduler: run compaction tasks, limit the consumed resources;
- Compaction strategy: find the SSTs to compact and determine the output files of compaction.
- Compaction task: read the rows from input SSTs and write to the output files.
## Implementation
### Compaction scheduler
`CompactionScheduler` is an executor that continuously polls and executes compaction request from a task queue.
```rust
#[async_trait]
pub trait CompactionScheduler {
/// Schedules a compaction task.
async fn schedule(&self, task: CompactionRequest) -> Result<()>;
/// Stops compaction scheduler.
async fn stop(&self) -> Result<()>;
}
```
### Compaction triggering
Currently, we can check whether to compact tables when memtable is flushed to SST.
https://github.com/GreptimeTeam/greptimedb/blob/4015dd80752e1e6aaa3d7cacc3203cb67ed9be6d/src/storage/src/flush.rs#L245
### Compaction strategy
`CompactionStrategy` defines how to pick SSTs in all levels for compaction.
```rust
pub trait CompactionStrategy {
fn pick(
&self,
ctx: CompactionContext,
levels: &LevelMetas,
) -> Result<CompactionTask>;
}
```
The most suitable compaction strategy for time-series scenario would be
a hybrid strategy that combines time window compaction with size-tired compaction, just like [Cassandra](https://cassandra.apache.org/doc/latest/cassandra/operating/compaction/twcs.html) and [ScyllaDB](https://docs.scylladb.com/stable/architecture/compaction/compaction-strategies.html#time-window-compaction-strategy-twcs) does.
We can first group SSTs in level n into buckets according to some predefined time window. Within that window,
SSTs are compacted in a size-tired manner (find SSTs with similar size and compact them to level n+1).
SSTs from different time windows are neven compacted together.
That strategy guarantees SSTs in each level are mainly sorted in timestamp order which boosts queries with
explicit timestamp condition, while size-tired compaction minimizes the impact to foreground writes.
### Alternatives
Currently, GreptimeDB's storage engine [only support two levels](https://github.com/GreptimeTeam/greptimedb/blob/43aefc5d74dfa73b7819cae77b7eb546d8534a41/src/storage/src/sst.rs#L32).
For level 0, we can start with a simple time-window based leveled compaction, which reads from all SSTs in level 0,
align them to time windows with a fixed duration, merge them with SSTs in level 1 within the same time window
to ensure there is only one sorted run in level 1.

View File

@@ -1 +0,0 @@
nightly-2022-07-14

2
rust-toolchain.toml Normal file
View File

@@ -0,0 +1,2 @@
[toolchain]
channel = "nightly-2023-02-26"

View File

@@ -1,18 +1,19 @@
[package]
name = "api"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
arrow-flight.workspace = true
common-base = { path = "../common/base" }
common-time = { path = "../common/time" }
common-error = { path = "../common/error" }
common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
prost = "0.11"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ad0187295035e83f76272da553453e649b7570de" }
prost.workspace = true
snafu = { version = "0.7", features = ["backtraces"] }
tonic = "0.8"
tonic.workspace = true
[build-dependencies]
tonic-build = "0.8"

View File

@@ -1,36 +0,0 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::PathBuf;
fn main() {
let default_out_dir = PathBuf::from(std::env::var("OUT_DIR").unwrap());
tonic_build::configure()
.file_descriptor_set_path(default_out_dir.join("greptime_fd.bin"))
.compile(
&[
"greptime/v1/insert.proto",
"greptime/v1/select.proto",
"greptime/v1/physical_plan.proto",
"greptime/v1/greptime.proto",
"greptime/v1/meta/common.proto",
"greptime/v1/meta/heartbeat.proto",
"greptime/v1/meta/route.proto",
"greptime/v1/meta/store.proto",
"prometheus/remote/remote.proto",
],
&["."],
)
.expect("compile proto");
}

View File

@@ -1,69 +0,0 @@
syntax = "proto3";
package greptime.v1;
import "greptime/v1/column.proto";
import "greptime/v1/common.proto";
message AdminRequest {
string name = 1;
repeated AdminExpr exprs = 2;
}
message AdminResponse {
repeated AdminResult results = 1;
}
message AdminExpr {
ExprHeader header = 1;
oneof expr {
CreateExpr create = 2;
AlterExpr alter = 3;
CreateDatabaseExpr create_database = 4;
}
}
message AdminResult {
ResultHeader header = 1;
oneof result {
MutateResult mutate = 2;
}
}
// TODO(hl): rename to CreateTableExpr
message CreateExpr {
optional string catalog_name = 1;
optional string schema_name = 2;
string table_name = 3;
optional string desc = 4;
repeated ColumnDef column_defs = 5;
string time_index = 6;
repeated string primary_keys = 7;
bool create_if_not_exists = 8;
map<string, string> table_options = 9;
optional uint32 table_id = 10;
repeated uint32 region_ids = 11;
}
message AlterExpr {
optional string catalog_name = 1;
optional string schema_name = 2;
string table_name = 3;
oneof kind {
AddColumns add_columns = 4;
}
}
message AddColumns {
repeated AddColumn add_columns = 1;
}
message AddColumn {
ColumnDef column_def = 1;
bool is_key = 2;
}
message CreateDatabaseExpr {
//TODO(hl): maybe rename to schema_name?
string database_name = 1;
}

View File

@@ -1,79 +0,0 @@
syntax = "proto3";
package greptime.v1;
message Column {
string column_name = 1;
enum SemanticType {
TAG = 0;
FIELD = 1;
TIMESTAMP = 2;
}
SemanticType semantic_type = 2;
message Values {
repeated int32 i8_values = 1;
repeated int32 i16_values = 2;
repeated int32 i32_values = 3;
repeated int64 i64_values = 4;
repeated uint32 u8_values = 5;
repeated uint32 u16_values = 6;
repeated uint32 u32_values = 7;
repeated uint64 u64_values = 8;
repeated float f32_values = 9;
repeated double f64_values = 10;
repeated bool bool_values = 11;
repeated bytes binary_values = 12;
repeated string string_values = 13;
repeated int32 date_values = 14;
repeated int64 datetime_values = 15;
repeated int64 ts_millis_values = 16;
}
// The array of non-null values in this column.
//
// For example: suppose there is a column "foo" that contains some int32 values (1, 2, 3, 4, 5, null, 7, 8, 9, null);
// column:
// column_name: foo
// semantic_type: Tag
// values: 1, 2, 3, 4, 5, 7, 8, 9
// null_masks: 00100000 00000010
Values values = 3;
// Mask maps the positions of null values.
// If a bit in null_mask is 1, it indicates that the column value at that position is null.
bytes null_mask = 4;
// Helpful in creating vector from column.
ColumnDataType datatype = 5;
}
message ColumnDef {
string name = 1;
ColumnDataType datatype = 2;
bool is_nullable = 3;
optional bytes default_constraint = 4;
}
enum ColumnDataType {
BOOLEAN = 0;
INT8 = 1;
INT16 = 2;
INT32 = 3;
INT64 = 4;
UINT8 = 5;
UINT16 = 6;
UINT32 = 7;
UINT64 = 8;
FLOAT32 = 9;
FLOAT64 = 10;
BINARY = 11;
STRING = 12;
DATE = 13;
DATETIME = 14;
TIMESTAMP = 15;
}

View File

@@ -1,22 +0,0 @@
syntax = "proto3";
package greptime.v1;
message RequestHeader {
string tenant = 1;
}
message ExprHeader {
uint32 version = 1;
}
message ResultHeader {
uint32 version = 1;
uint32 code = 2;
string err_msg = 3;
}
message MutateResult {
uint32 success = 1;
uint32 failure = 2;
}

View File

@@ -1,81 +0,0 @@
syntax = "proto3";
package greptime.v1;
import "greptime/v1/common.proto";
message DatabaseRequest {
string name = 1;
repeated ObjectExpr exprs = 2;
}
message DatabaseResponse {
repeated ObjectResult results = 1;
}
message ObjectExpr {
ExprHeader header = 1;
oneof expr {
InsertExpr insert = 2;
SelectExpr select = 3;
UpdateExpr update = 4;
DeleteExpr delete = 5;
}
}
// TODO(fys): Only support sql now, and will support promql etc in the future
message SelectExpr {
oneof expr {
string sql = 1;
bytes logical_plan = 2;
PhysicalPlan physical_plan = 15;
}
}
message PhysicalPlan {
bytes original_ql = 1;
bytes plan = 2;
}
message InsertExpr {
string schema_name = 1;
string table_name = 2;
message Values {
repeated bytes values = 1;
}
oneof expr {
Values values = 3;
// TODO(LFC): Remove field "sql" in InsertExpr.
// When Frontend instance received an insertion SQL (`insert into ...`), it's anticipated to parse the SQL and
// assemble the values to insert to feed Datanode. In other words, inserting data through Datanode instance's GRPC
// interface shouldn't use SQL directly.
// Then why the "sql" field exists here? It's because the Frontend needs table schema to create the values to insert,
// which is currently not able to find anywhere. (Maybe the table schema is suppose to be fetched from Meta?)
// The "sql" field is meant to be removed in the future.
string sql = 4;
}
/// The region number of current insert request.
uint32 region_number = 5;
map<string, bytes> options = 6;
}
// TODO(jiachun)
message UpdateExpr {}
// TODO(jiachun)
message DeleteExpr {}
message ObjectResult {
ResultHeader header = 1;
oneof result {
SelectResult select = 2;
MutateResult mutate = 3;
}
}
message SelectResult {
bytes raw_data = 1;
}

View File

@@ -1,22 +0,0 @@
syntax = "proto3";
package greptime.v1;
import "greptime/v1/admin.proto";
import "greptime/v1/common.proto";
import "greptime/v1/database.proto";
service Greptime {
rpc Batch(BatchRequest) returns (BatchResponse) {}
}
message BatchRequest {
RequestHeader header = 1;
repeated AdminRequest admins = 2;
repeated DatabaseRequest databases = 3;
}
message BatchResponse {
repeated AdminResponse admins = 1;
repeated DatabaseResponse databases = 2;
}

View File

@@ -1,14 +0,0 @@
syntax = "proto3";
package greptime.v1.codec;
import "greptime/v1/column.proto";
message InsertBatch {
repeated Column columns = 1;
uint32 row_count = 2;
}
message RegionNumber {
uint32 id = 1;
}

View File

@@ -1,48 +0,0 @@
syntax = "proto3";
package greptime.v1.meta;
message RequestHeader {
uint64 protocol_version = 1;
// cluster_id is the ID of the cluster which be sent to.
uint64 cluster_id = 2;
// member_id is the ID of the sender server.
uint64 member_id = 3;
}
message ResponseHeader {
uint64 protocol_version = 1;
// cluster_id is the ID of the cluster which sent the response.
uint64 cluster_id = 2;
Error error = 3;
}
message Error {
int32 code = 1;
string err_msg = 2;
}
message Peer {
uint64 id = 1;
string addr = 2;
}
message TableName {
string catalog_name = 1;
string schema_name = 2;
string table_name = 3;
}
message TimeInterval {
// The unix timestamp in millis of the start of this period.
uint64 start_timestamp_millis = 1;
// The unix timestamp in millis of the end of this period.
uint64 end_timestamp_millis = 2;
}
message KeyValue {
// key is the key in bytes. An empty key is not allowed.
bytes key = 1;
// value is the value held by the key, in bytes.
bytes value = 2;
}

View File

@@ -1,92 +0,0 @@
syntax = "proto3";
package greptime.v1.meta;
import "greptime/v1/meta/common.proto";
service Heartbeat {
// Heartbeat, there may be many contents of the heartbeat, such as:
// 1. Metadata to be registered to meta server and discoverable by other nodes.
// 2. Some performance metrics, such as Load, CPU usage, etc.
// 3. The number of computing tasks being executed.
rpc Heartbeat(stream HeartbeatRequest) returns (stream HeartbeatResponse) {}
// Ask leader's endpoint.
rpc AskLeader(AskLeaderRequest) returns (AskLeaderResponse) {}
}
message HeartbeatRequest {
RequestHeader header = 1;
// Self peer
Peer peer = 2;
// Leader node
bool is_leader = 3;
// Actually reported time interval
TimeInterval report_interval = 4;
// Node stat
NodeStat node_stat = 5;
// Region stats in this node
repeated RegionStat region_stats = 6;
// Follower nodes and stats, empty on follower nodes
repeated ReplicaStat replica_stats = 7;
}
message NodeStat {
// The read capacity units during this period
uint64 rcus = 1;
// The write capacity units during this period
uint64 wcus = 2;
// Table number in this node
uint64 table_num = 3;
// Regon number in this node
uint64 region_num = 4;
double cpu_usage = 5;
double load = 6;
// Read disk I/O in the node
double read_io_rate = 7;
// Write disk I/O in the node
double write_io_rate = 8;
// Others
map<string, string> attrs = 100;
}
message RegionStat {
uint64 region_id = 1;
TableName table_name = 2;
// The read capacity units during this period
uint64 rcus = 3;
// The write capacity units during this period
uint64 wcus = 4;
// Approximate region size
uint64 approximate_size = 5;
// Approximate number of rows
uint64 approximate_rows = 6;
// Others
map<string, string> attrs = 100;
}
message ReplicaStat {
Peer peer = 1;
bool in_sync = 2;
bool is_learner = 3;
}
message HeartbeatResponse {
ResponseHeader header = 1;
repeated bytes payload = 2;
}
message AskLeaderRequest {
RequestHeader header = 1;
}
message AskLeaderResponse {
ResponseHeader header = 1;
Peer leader = 2;
}

View File

@@ -1,90 +0,0 @@
syntax = "proto3";
package greptime.v1.meta;
import "greptime/v1/meta/common.proto";
service Router {
// Fetch routing information for tables. The smallest unit is the complete
// routing information(all regions) of a table.
//
// ```text
// table_1
// table_name
// table_schema
// regions
// region_1
// leader_peer
// follower_peer_1, follower_peer_2
// region_2
// leader_peer
// follower_peer_1, follower_peer_2, follower_peer_3
// region_xxx
// table_2
// ...
// ```
//
rpc Route(RouteRequest) returns (RouteResponse) {}
rpc Create(CreateRequest) returns (RouteResponse) {}
}
message RouteRequest {
RequestHeader header = 1;
repeated TableName table_names = 2;
}
message RouteResponse {
ResponseHeader header = 1;
repeated Peer peers = 2;
repeated TableRoute table_routes = 3;
}
message CreateRequest {
RequestHeader header = 1;
TableName table_name = 2;
repeated Partition partitions = 3;
}
message TableRoute {
Table table = 1;
repeated RegionRoute region_routes = 2;
}
message RegionRoute {
Region region = 1;
// single leader node for write task
uint64 leader_peer_index = 2;
// multiple follower nodes for read task
repeated uint64 follower_peer_indexes = 3;
}
message Table {
uint64 id = 1;
TableName table_name = 2;
bytes table_schema = 3;
}
message Region {
// TODO(LFC): Maybe use message RegionNumber?
uint64 id = 1;
string name = 2;
Partition partition = 3;
map<string, string> attrs = 100;
}
// PARTITION `region_name` VALUES LESS THAN (value_list)
message Partition {
repeated bytes column_list = 1;
repeated bytes value_list = 2;
}
// This message is only for saving into store.
message TableRouteValue {
repeated Peer peers = 1;
TableRoute table_route = 2;
}

View File

@@ -1,138 +0,0 @@
syntax = "proto3";
package greptime.v1.meta;
import "greptime/v1/meta/common.proto";
service Store {
// Range gets the keys in the range from the key-value store.
rpc Range(RangeRequest) returns (RangeResponse);
// Put puts the given key into the key-value store.
rpc Put(PutRequest) returns (PutResponse);
// BatchPut atomically puts the given keys into the key-value store.
rpc BatchPut(BatchPutRequest) returns (BatchPutResponse);
// CompareAndPut atomically puts the value to the given updated
// value if the current value == the expected value.
rpc CompareAndPut(CompareAndPutRequest) returns (CompareAndPutResponse);
// DeleteRange deletes the given range from the key-value store.
rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse);
}
message RangeRequest {
RequestHeader header = 1;
// key is the first key for the range, If range_end is not given, the
// request only looks up key.
bytes key = 2;
// range_end is the upper bound on the requested range [key, range_end).
// If range_end is '\0', the range is all keys >= key.
// If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
// then the range request gets all keys prefixed with key.
// If both key and range_end are '\0', then the range request returns all
// keys.
bytes range_end = 3;
// limit is a limit on the number of keys returned for the request. When
// limit is set to 0, it is treated as no limit.
int64 limit = 4;
// keys_only when set returns only the keys and not the values.
bool keys_only = 5;
}
message RangeResponse {
ResponseHeader header = 1;
// kvs is the list of key-value pairs matched by the range request.
repeated KeyValue kvs = 2;
// more indicates if there are more keys to return in the requested range.
bool more = 3;
}
message PutRequest {
RequestHeader header = 1;
// key is the key, in bytes, to put into the key-value store.
bytes key = 2;
// value is the value, in bytes, to associate with the key in the
// key-value store.
bytes value = 3;
// If prev_kv is set, gets the previous key-value pair before changing it.
// The previous key-value pair will be returned in the put response.
bool prev_kv = 4;
}
message PutResponse {
ResponseHeader header = 1;
// If prev_kv is set in the request, the previous key-value pair will be
// returned.
KeyValue prev_kv = 2;
}
message BatchPutRequest {
RequestHeader header = 1;
repeated KeyValue kvs = 2;
// If prev_kv is set, gets the previous key-value pairs before changing it.
// The previous key-value pairs will be returned in the batch put response.
bool prev_kv = 3;
}
message BatchPutResponse {
ResponseHeader header = 1;
// If prev_kv is set in the request, the previous key-value pairs will be
// returned.
repeated KeyValue prev_kvs = 2;
}
message CompareAndPutRequest {
RequestHeader header = 1;
// key is the key, in bytes, to put into the key-value store.
bytes key = 2;
// expect is the previous value, in bytes
bytes expect = 3;
// value is the value, in bytes, to associate with the key in the
// key-value store.
bytes value = 4;
}
message CompareAndPutResponse {
ResponseHeader header = 1;
bool success = 2;
KeyValue prev_kv = 3;
}
message DeleteRangeRequest {
RequestHeader header = 1;
// key is the first key to delete in the range.
bytes key = 2;
// range_end is the key following the last key to delete for the range
// [key, range_end).
// If range_end is not given, the range is defined to contain only the key
// argument.
// If range_end is one bit larger than the given key, then the range is all
// the keys with the prefix (the given key).
// If range_end is '\0', the range is all keys greater than or equal to the
// key argument.
bytes range_end = 3;
// If prev_kv is set, gets the previous key-value pairs before deleting it.
// The previous key-value pairs will be returned in the delete response.
bool prev_kv = 4;
}
message DeleteRangeResponse {
ResponseHeader header = 1;
// deleted is the number of keys deleted by the delete range request.
int64 deleted = 2;
// If prev_kv is set in the request, the previous key-value pairs will be
// returned.
repeated KeyValue prev_kvs = 3;
}

View File

@@ -1,33 +0,0 @@
syntax = "proto3";
package greptime.v1.codec;
message PhysicalPlanNode {
oneof PhysicalPlanType {
ProjectionExecNode projection = 1;
MockInputExecNode mock = 99;
// TODO(fys): impl other physical plan node
}
}
message ProjectionExecNode {
PhysicalPlanNode input = 1;
repeated PhysicalExprNode expr = 2;
repeated string expr_name = 3;
}
message PhysicalExprNode {
oneof ExprType {
PhysicalColumn column = 1;
// TODO(fys): impl other physical expr node
}
}
message PhysicalColumn {
string name = 1;
uint64 index = 2;
}
message MockInputExecNode {
string name = 1;
}

View File

@@ -1,10 +0,0 @@
syntax = "proto3";
package greptime.v1.codec;
import "greptime/v1/column.proto";
message SelectResult {
repeated Column columns = 1;
uint32 row_count = 2;
}

View File

@@ -1,85 +0,0 @@
// Copyright 2016 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package prometheus;
option go_package = "prompb";
import "prometheus/remote/types.proto";
message WriteRequest {
repeated prometheus.TimeSeries timeseries = 1;
// Cortex uses this field to determine the source of the write request.
// We reserve it to avoid any compatibility issues.
reserved 2;
repeated prometheus.MetricMetadata metadata = 3;
}
// ReadRequest represents a remote read request.
message ReadRequest {
repeated Query queries = 1;
enum ResponseType {
// Server will return a single ReadResponse message with matched series that includes list of raw samples.
// It's recommended to use streamed response types instead.
//
// Response headers:
// Content-Type: "application/x-protobuf"
// Content-Encoding: "snappy"
SAMPLES = 0;
// Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series.
// Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum.
//
// Response headers:
// Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
// Content-Encoding: ""
STREAMED_XOR_CHUNKS = 1;
}
// accepted_response_types allows negotiating the content type of the response.
//
// Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
// implemented by server, error is returned.
// For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
repeated ResponseType accepted_response_types = 2;
}
// ReadResponse is a response when response_type equals SAMPLES.
message ReadResponse {
// In same order as the request's queries.
repeated QueryResult results = 1;
}
message Query {
int64 start_timestamp_ms = 1;
int64 end_timestamp_ms = 2;
repeated prometheus.LabelMatcher matchers = 3;
prometheus.ReadHints hints = 4;
}
message QueryResult {
// Samples within a time series must be ordered by time.
repeated prometheus.TimeSeries timeseries = 1;
}
// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS.
// We strictly stream full series after series, optionally split by time. This means that a single frame can contain
// partition of the single series, but once a new series is started to be streamed it means that no more chunks will
// be sent for previous one. Series are returned sorted in the same way TSDB block are internally.
message ChunkedReadResponse {
repeated prometheus.ChunkedSeries chunked_series = 1;
// query_index represents an index of the query from ReadRequest.queries these chunks relates to.
int64 query_index = 2;
}

View File

@@ -1,117 +0,0 @@
// Copyright 2017 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package prometheus;
option go_package = "prompb";
message MetricMetadata {
enum MetricType {
UNKNOWN = 0;
COUNTER = 1;
GAUGE = 2;
HISTOGRAM = 3;
GAUGEHISTOGRAM = 4;
SUMMARY = 5;
INFO = 6;
STATESET = 7;
}
// Represents the metric type, these match the set from Prometheus.
// Refer to model/textparse/interface.go for details.
MetricType type = 1;
string metric_family_name = 2;
string help = 4;
string unit = 5;
}
message Sample {
double value = 1;
// timestamp is in ms format, see model/timestamp/timestamp.go for
// conversion from time.Time to Prometheus timestamp.
int64 timestamp = 2;
}
message Exemplar {
// Optional, can be empty.
repeated Label labels = 1;
double value = 2;
// timestamp is in ms format, see model/timestamp/timestamp.go for
// conversion from time.Time to Prometheus timestamp.
int64 timestamp = 3;
}
// TimeSeries represents samples and labels for a single time series.
message TimeSeries {
// For a timeseries to be valid, and for the samples and exemplars
// to be ingested by the remote system properly, the labels field is required.
repeated Label labels = 1;
repeated Sample samples = 2;
repeated Exemplar exemplars = 3;
}
message Label {
string name = 1;
string value = 2;
}
message Labels {
repeated Label labels = 1;
}
// Matcher specifies a rule, which can match or set of labels or not.
message LabelMatcher {
enum Type {
EQ = 0;
NEQ = 1;
RE = 2;
NRE = 3;
}
Type type = 1;
string name = 2;
string value = 3;
}
message ReadHints {
int64 step_ms = 1; // Query step size in milliseconds.
string func = 2; // String representation of surrounding function or aggregation.
int64 start_ms = 3; // Start time in milliseconds.
int64 end_ms = 4; // End time in milliseconds.
repeated string grouping = 5; // List of label names used in aggregation.
bool by = 6; // Indicate whether it is without or by.
int64 range_ms = 7; // Range vector selector range in milliseconds.
}
// Chunk represents a TSDB chunk.
// Time range [min, max] is inclusive.
message Chunk {
int64 min_time_ms = 1;
int64 max_time_ms = 2;
// We require this to match chunkenc.Encoding.
enum Encoding {
UNKNOWN = 0;
XOR = 1;
}
Encoding type = 3;
bytes data = 4;
}
// ChunkedSeries represents single, encoded time series.
message ChunkedSeries {
// Labels should be sorted.
repeated Label labels = 1;
// Chunks will be in start time order and may overlap.
repeated Chunk chunks = 2;
}

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -33,6 +33,28 @@ pub enum Error {
from: ConcreteDataType,
backtrace: Backtrace,
},
#[snafu(display(
"Failed to convert column default constraint, column: {}, source: {}",
column,
source
))]
ConvertColumnDefaultConstraint {
column: String,
#[snafu(backtrace)]
source: datatypes::error::Error,
},
#[snafu(display(
"Invalid column default constraint, column: {}, source: {}",
column,
source
))]
InvalidColumnDefaultConstraint {
column: String,
#[snafu(backtrace)]
source: datatypes::error::Error,
},
}
impl ErrorExt for Error {
@@ -40,6 +62,8 @@ impl ErrorExt for Error {
match self {
Error::UnknownColumnDataType { .. } => StatusCode::InvalidArguments,
Error::IntoColumnDataType { .. } => StatusCode::Unexpected,
Error::ConvertColumnDefaultConstraint { source, .. }
| Error::InvalidColumnDefaultConstraint { source, .. } => source.status_code(),
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,6 +15,7 @@
use common_base::BitVec;
use common_time::timestamp::TimeUnit;
use datatypes::prelude::ConcreteDataType;
use datatypes::types::TimestampType;
use datatypes::value::Value;
use datatypes::vectors::VectorRef;
use snafu::prelude::*;
@@ -56,7 +57,16 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
ColumnDataType::String => ConcreteDataType::string_datatype(),
ColumnDataType::Date => ConcreteDataType::date_datatype(),
ColumnDataType::Datetime => ConcreteDataType::datetime_datatype(),
ColumnDataType::Timestamp => ConcreteDataType::timestamp_millis_datatype(),
ColumnDataType::TimestampSecond => ConcreteDataType::timestamp_second_datatype(),
ColumnDataType::TimestampMillisecond => {
ConcreteDataType::timestamp_millisecond_datatype()
}
ColumnDataType::TimestampMicrosecond => {
ConcreteDataType::timestamp_microsecond_datatype()
}
ColumnDataType::TimestampNanosecond => {
ConcreteDataType::timestamp_nanosecond_datatype()
}
}
}
}
@@ -81,8 +91,15 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
ConcreteDataType::String(_) => ColumnDataType::String,
ConcreteDataType::Date(_) => ColumnDataType::Date,
ConcreteDataType::DateTime(_) => ColumnDataType::Datetime,
ConcreteDataType::Timestamp(_) => ColumnDataType::Timestamp,
ConcreteDataType::Null(_) | ConcreteDataType::List(_) => {
ConcreteDataType::Timestamp(unit) => match unit {
TimestampType::Second(_) => ColumnDataType::TimestampSecond,
TimestampType::Millisecond(_) => ColumnDataType::TimestampMillisecond,
TimestampType::Microsecond(_) => ColumnDataType::TimestampMicrosecond,
TimestampType::Nanosecond(_) => ColumnDataType::TimestampNanosecond,
},
ConcreteDataType::Null(_)
| ConcreteDataType::List(_)
| ConcreteDataType::Dictionary(_) => {
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
}
});
@@ -90,176 +107,190 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
}
}
impl Values {
pub fn with_capacity(datatype: ColumnDataType, capacity: usize) -> Self {
match datatype {
ColumnDataType::Boolean => Values {
bool_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Int8 => Values {
i8_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Int16 => Values {
i16_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Int32 => Values {
i32_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Int64 => Values {
i64_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Uint8 => Values {
u8_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Uint16 => Values {
u16_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Uint32 => Values {
u32_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Uint64 => Values {
u64_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Float32 => Values {
f32_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Float64 => Values {
f64_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Binary => Values {
binary_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::String => Values {
string_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Date => Values {
date_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Datetime => Values {
datetime_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Timestamp => Values {
ts_millis_values: Vec::with_capacity(capacity),
..Default::default()
},
}
pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values {
match datatype {
ColumnDataType::Boolean => Values {
bool_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Int8 => Values {
i8_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Int16 => Values {
i16_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Int32 => Values {
i32_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Int64 => Values {
i64_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Uint8 => Values {
u8_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Uint16 => Values {
u16_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Uint32 => Values {
u32_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Uint64 => Values {
u64_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Float32 => Values {
f32_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Float64 => Values {
f64_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Binary => Values {
binary_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::String => Values {
string_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Date => Values {
date_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Datetime => Values {
datetime_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::TimestampSecond => Values {
ts_second_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::TimestampMillisecond => Values {
ts_millisecond_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::TimestampMicrosecond => Values {
ts_microsecond_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::TimestampNanosecond => Values {
ts_nanosecond_values: Vec::with_capacity(capacity),
..Default::default()
},
}
}
impl Column {
// The type of vals must be same.
pub fn push_vals(&mut self, origin_count: usize, vector: VectorRef) {
let values = self.values.get_or_insert_with(Values::default);
let mut null_mask = BitVec::from_slice(&self.null_mask);
let len = vector.len();
null_mask.reserve_exact(origin_count + len);
null_mask.extend(BitVec::repeat(false, len));
// The type of vals must be same.
pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
let values = column.values.get_or_insert_with(Values::default);
let mut null_mask = BitVec::from_slice(&column.null_mask);
let len = vector.len();
null_mask.reserve_exact(origin_count + len);
null_mask.extend(BitVec::repeat(false, len));
(0..len).into_iter().for_each(|idx| match vector.get(idx) {
Value::Null => null_mask.set(idx + origin_count, true),
Value::Boolean(val) => values.bool_values.push(val),
Value::UInt8(val) => values.u8_values.push(val.into()),
Value::UInt16(val) => values.u16_values.push(val.into()),
Value::UInt32(val) => values.u32_values.push(val),
Value::UInt64(val) => values.u64_values.push(val),
Value::Int8(val) => values.i8_values.push(val.into()),
Value::Int16(val) => values.i16_values.push(val.into()),
Value::Int32(val) => values.i32_values.push(val),
Value::Int64(val) => values.i64_values.push(val),
Value::Float32(val) => values.f32_values.push(*val),
Value::Float64(val) => values.f64_values.push(*val),
Value::String(val) => values.string_values.push(val.as_utf8().to_string()),
Value::Binary(val) => values.binary_values.push(val.to_vec()),
Value::Date(val) => values.date_values.push(val.val()),
Value::DateTime(val) => values.datetime_values.push(val.val()),
Value::Timestamp(val) => values
.ts_millis_values
.push(val.convert_to(TimeUnit::Millisecond)),
Value::List(_) => unreachable!(),
});
self.null_mask = null_mask.into_vec();
}
(0..len).for_each(|idx| match vector.get(idx) {
Value::Null => null_mask.set(idx + origin_count, true),
Value::Boolean(val) => values.bool_values.push(val),
Value::UInt8(val) => values.u8_values.push(val.into()),
Value::UInt16(val) => values.u16_values.push(val.into()),
Value::UInt32(val) => values.u32_values.push(val),
Value::UInt64(val) => values.u64_values.push(val),
Value::Int8(val) => values.i8_values.push(val.into()),
Value::Int16(val) => values.i16_values.push(val.into()),
Value::Int32(val) => values.i32_values.push(val),
Value::Int64(val) => values.i64_values.push(val),
Value::Float32(val) => values.f32_values.push(*val),
Value::Float64(val) => values.f64_values.push(*val),
Value::String(val) => values.string_values.push(val.as_utf8().to_string()),
Value::Binary(val) => values.binary_values.push(val.to_vec()),
Value::Date(val) => values.date_values.push(val.val()),
Value::DateTime(val) => values.datetime_values.push(val.val()),
Value::Timestamp(val) => match val.unit() {
TimeUnit::Second => values.ts_second_values.push(val.value()),
TimeUnit::Millisecond => values.ts_millisecond_values.push(val.value()),
TimeUnit::Microsecond => values.ts_microsecond_values.push(val.value()),
TimeUnit::Nanosecond => values.ts_nanosecond_values.push(val.value()),
},
Value::List(_) => unreachable!(),
});
column.null_mask = null_mask.into_vec();
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use datatypes::vectors::BooleanVector;
use datatypes::vectors::{
BooleanVector, TimestampMicrosecondVector, TimestampMillisecondVector,
TimestampNanosecondVector, TimestampSecondVector,
};
use super::*;
#[test]
fn test_values_with_capacity() {
let values = Values::with_capacity(ColumnDataType::Int8, 2);
let values = values_with_capacity(ColumnDataType::Int8, 2);
let values = values.i8_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Int32, 2);
let values = values_with_capacity(ColumnDataType::Int32, 2);
let values = values.i32_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Int64, 2);
let values = values_with_capacity(ColumnDataType::Int64, 2);
let values = values.i64_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Uint8, 2);
let values = values_with_capacity(ColumnDataType::Uint8, 2);
let values = values.u8_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Uint32, 2);
let values = values_with_capacity(ColumnDataType::Uint32, 2);
let values = values.u32_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Uint64, 2);
let values = values_with_capacity(ColumnDataType::Uint64, 2);
let values = values.u64_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Float32, 2);
let values = values_with_capacity(ColumnDataType::Float32, 2);
let values = values.f32_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Float64, 2);
let values = values_with_capacity(ColumnDataType::Float64, 2);
let values = values.f64_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Binary, 2);
let values = values_with_capacity(ColumnDataType::Binary, 2);
let values = values.binary_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Boolean, 2);
let values = values_with_capacity(ColumnDataType::Boolean, 2);
let values = values.bool_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::String, 2);
let values = values_with_capacity(ColumnDataType::String, 2);
let values = values.string_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Date, 2);
let values = values_with_capacity(ColumnDataType::Date, 2);
let values = values.date_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Datetime, 2);
let values = values_with_capacity(ColumnDataType::Datetime, 2);
let values = values.datetime_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::Timestamp, 2);
let values = values.ts_millis_values;
let values = values_with_capacity(ColumnDataType::TimestampMillisecond, 2);
let values = values.ts_millisecond_values;
assert_eq!(2, values.capacity());
}
@@ -326,8 +357,8 @@ mod tests {
ColumnDataTypeWrapper(ColumnDataType::Datetime).into()
);
assert_eq!(
ConcreteDataType::timestamp_millis_datatype(),
ColumnDataTypeWrapper(ColumnDataType::Timestamp).into()
ConcreteDataType::timestamp_millisecond_datatype(),
ColumnDataTypeWrapper(ColumnDataType::TimestampMillisecond).into()
);
}
@@ -394,8 +425,8 @@ mod tests {
ConcreteDataType::datetime_datatype().try_into().unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::Timestamp),
ConcreteDataType::timestamp_millis_datatype()
ColumnDataTypeWrapper(ColumnDataType::TimestampMillisecond),
ConcreteDataType::timestamp_millisecond_datatype()
.try_into()
.unwrap()
);
@@ -412,7 +443,48 @@ mod tests {
assert!(result.is_err());
assert_eq!(
result.unwrap_err().to_string(),
"Failed to create column datatype from List(ListType { inner: Boolean(BooleanType) })"
"Failed to create column datatype from List(ListType { item_type: Boolean(BooleanType) })"
);
}
#[test]
fn test_column_put_timestamp_values() {
let mut column = Column {
column_name: "test".to_string(),
semantic_type: 0,
values: Some(Values {
..Default::default()
}),
null_mask: vec![],
datatype: 0,
};
let vector = Arc::new(TimestampNanosecondVector::from_vec(vec![1, 2, 3]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![1, 2, 3],
column.values.as_ref().unwrap().ts_nanosecond_values
);
let vector = Arc::new(TimestampMillisecondVector::from_vec(vec![4, 5, 6]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![4, 5, 6],
column.values.as_ref().unwrap().ts_millisecond_values
);
let vector = Arc::new(TimestampMicrosecondVector::from_vec(vec![7, 8, 9]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![7, 8, 9],
column.values.as_ref().unwrap().ts_microsecond_values
);
let vector = Arc::new(TimestampSecondVector::from_vec(vec![10, 11, 12]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![10, 11, 12],
column.values.as_ref().unwrap().ts_second_values
);
}
@@ -433,7 +505,7 @@ mod tests {
let row_count = 4;
let vector = Arc::new(BooleanVector::from(vec![Some(true), None, Some(false)]));
column.push_vals(row_count, vector);
push_vals(&mut column, row_count, vector);
// Some(false), None, Some(true), Some(true), Some(true), None, Some(false)
let bool_values = column.values.unwrap().bool_values;
assert_eq!(vec![false, true, true, true, false], bool_values);

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,9 +14,13 @@
pub mod error;
pub mod helper;
pub mod prometheus;
pub mod result;
pub mod serde;
pub mod prometheus {
pub mod remote {
pub use greptime_proto::prometheus::remote::*;
}
}
pub mod v1;
pub use prost::DecodeError;

View File

@@ -1,203 +0,0 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_error::prelude::ErrorExt;
use crate::v1::codec::SelectResult;
use crate::v1::{
admin_result, object_result, AdminResult, MutateResult, ObjectResult, ResultHeader,
SelectResult as SelectResultRaw,
};
pub const PROTOCOL_VERSION: u32 = 1;
pub type Success = u32;
pub type Failure = u32;
#[derive(Default)]
pub struct ObjectResultBuilder {
version: u32,
code: u32,
err_msg: Option<String>,
result: Option<Body>,
}
pub enum Body {
Mutate((Success, Failure)),
Select(SelectResult),
}
impl ObjectResultBuilder {
pub fn new() -> Self {
Self {
version: PROTOCOL_VERSION,
..Default::default()
}
}
#[allow(dead_code)]
pub fn version(mut self, version: u32) -> Self {
self.version = version;
self
}
pub fn status_code(mut self, code: u32) -> Self {
self.code = code;
self
}
pub fn err_msg(mut self, err_msg: String) -> Self {
self.err_msg = Some(err_msg);
self
}
pub fn mutate_result(mut self, success: u32, failure: u32) -> Self {
self.result = Some(Body::Mutate((success, failure)));
self
}
pub fn select_result(mut self, select_result: SelectResult) -> Self {
self.result = Some(Body::Select(select_result));
self
}
pub fn build(self) -> ObjectResult {
let header = Some(ResultHeader {
version: self.version,
code: self.code,
err_msg: self.err_msg.unwrap_or_default(),
});
let result = match self.result {
Some(Body::Mutate((success, failure))) => {
Some(object_result::Result::Mutate(MutateResult {
success,
failure,
}))
}
Some(Body::Select(select)) => Some(object_result::Result::Select(SelectResultRaw {
raw_data: select.into(),
})),
None => None,
};
ObjectResult { header, result }
}
}
pub fn build_err_result(err: &impl ErrorExt) -> ObjectResult {
ObjectResultBuilder::new()
.status_code(err.status_code() as u32)
.err_msg(err.to_string())
.build()
}
#[derive(Debug)]
pub struct AdminResultBuilder {
version: u32,
code: u32,
err_msg: Option<String>,
mutate: Option<(Success, Failure)>,
}
impl AdminResultBuilder {
pub fn status_code(mut self, code: u32) -> Self {
self.code = code;
self
}
pub fn err_msg(mut self, err_msg: String) -> Self {
self.err_msg = Some(err_msg);
self
}
pub fn mutate_result(mut self, success: u32, failure: u32) -> Self {
self.mutate = Some((success, failure));
self
}
pub fn build(self) -> AdminResult {
let header = Some(ResultHeader {
version: self.version,
code: self.code,
err_msg: self.err_msg.unwrap_or_default(),
});
let result = if let Some((success, failure)) = self.mutate {
Some(admin_result::Result::Mutate(MutateResult {
success,
failure,
}))
} else {
None
};
AdminResult { header, result }
}
}
impl Default for AdminResultBuilder {
fn default() -> Self {
Self {
version: PROTOCOL_VERSION,
code: 0,
err_msg: None,
mutate: None,
}
}
}
#[cfg(test)]
mod tests {
use common_error::status_code::StatusCode;
use super::*;
use crate::error::UnknownColumnDataTypeSnafu;
use crate::v1::{object_result, MutateResult};
#[test]
fn test_object_result_builder() {
let obj_result = ObjectResultBuilder::new()
.version(101)
.status_code(500)
.err_msg("Failed to read this file!".to_string())
.mutate_result(100, 20)
.build();
let header = obj_result.header.unwrap();
assert_eq!(101, header.version);
assert_eq!(500, header.code);
assert_eq!("Failed to read this file!", header.err_msg);
let result = obj_result.result.unwrap();
assert_eq!(
object_result::Result::Mutate(MutateResult {
success: 100,
failure: 20,
}),
result
);
}
#[test]
fn test_build_err_result() {
let err = UnknownColumnDataTypeSnafu { datatype: 1 }.build();
let err_result = build_err_result(&err);
let header = err_result.header.unwrap();
let result = err_result.result;
assert_eq!(PROTOCOL_VERSION, header.version);
assert_eq!(StatusCode::InvalidArguments as u32, header.code);
assert!(result.is_none());
}
}

View File

@@ -1,193 +0,0 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub use prost::DecodeError;
use prost::Message;
use crate::v1::codec::{InsertBatch, PhysicalPlanNode, RegionNumber, SelectResult};
use crate::v1::meta::TableRouteValue;
macro_rules! impl_convert_with_bytes {
($data_type: ty) => {
impl From<$data_type> for Vec<u8> {
fn from(entity: $data_type) -> Self {
entity.encode_to_vec()
}
}
impl TryFrom<&[u8]> for $data_type {
type Error = DecodeError;
fn try_from(value: &[u8]) -> Result<Self, Self::Error> {
<$data_type>::decode(value.as_ref())
}
}
};
}
impl_convert_with_bytes!(InsertBatch);
impl_convert_with_bytes!(SelectResult);
impl_convert_with_bytes!(PhysicalPlanNode);
impl_convert_with_bytes!(RegionNumber);
impl_convert_with_bytes!(TableRouteValue);
#[cfg(test)]
mod tests {
use std::ops::Deref;
use crate::v1::codec::*;
use crate::v1::{column, Column};
const SEMANTIC_TAG: i32 = 0;
#[test]
fn test_convert_insert_batch() {
let insert_batch = mock_insert_batch();
let bytes: Vec<u8> = insert_batch.into();
let insert: InsertBatch = bytes.deref().try_into().unwrap();
assert_eq!(8, insert.row_count);
assert_eq!(1, insert.columns.len());
let column = &insert.columns[0];
assert_eq!("foo", column.column_name);
assert_eq!(SEMANTIC_TAG, column.semantic_type);
assert_eq!(vec![1], column.null_mask);
assert_eq!(
vec![2, 3, 4, 5, 6, 7, 8],
column.values.as_ref().unwrap().i32_values
);
}
#[should_panic]
#[test]
fn test_convert_insert_batch_wrong() {
let insert_batch = mock_insert_batch();
let mut bytes: Vec<u8> = insert_batch.into();
// modify some bytes
bytes[0] = 0b1;
bytes[1] = 0b1;
let insert: InsertBatch = bytes.deref().try_into().unwrap();
assert_eq!(8, insert.row_count);
assert_eq!(1, insert.columns.len());
let column = &insert.columns[0];
assert_eq!("foo", column.column_name);
assert_eq!(SEMANTIC_TAG, column.semantic_type);
assert_eq!(vec![1], column.null_mask);
assert_eq!(
vec![2, 3, 4, 5, 6, 7, 8],
column.values.as_ref().unwrap().i32_values
);
}
#[test]
fn test_convert_select_result() {
let select_result = mock_select_result();
let bytes: Vec<u8> = select_result.into();
let result: SelectResult = bytes.deref().try_into().unwrap();
assert_eq!(8, result.row_count);
assert_eq!(1, result.columns.len());
let column = &result.columns[0];
assert_eq!("foo", column.column_name);
assert_eq!(SEMANTIC_TAG, column.semantic_type);
assert_eq!(vec![1], column.null_mask);
assert_eq!(
vec![2, 3, 4, 5, 6, 7, 8],
column.values.as_ref().unwrap().i32_values
);
}
#[should_panic]
#[test]
fn test_convert_select_result_wrong() {
let select_result = mock_select_result();
let mut bytes: Vec<u8> = select_result.into();
// modify some bytes
bytes[0] = 0b1;
bytes[1] = 0b1;
let result: SelectResult = bytes.deref().try_into().unwrap();
assert_eq!(8, result.row_count);
assert_eq!(1, result.columns.len());
let column = &result.columns[0];
assert_eq!("foo", column.column_name);
assert_eq!(SEMANTIC_TAG, column.semantic_type);
assert_eq!(vec![1], column.null_mask);
assert_eq!(
vec![2, 3, 4, 5, 6, 7, 8],
column.values.as_ref().unwrap().i32_values
);
}
#[test]
fn test_convert_region_id() {
let region_id = RegionNumber { id: 12 };
let bytes: Vec<u8> = region_id.into();
let region_id: RegionNumber = bytes.deref().try_into().unwrap();
assert_eq!(12, region_id.id);
}
fn mock_insert_batch() -> InsertBatch {
let values = column::Values {
i32_values: vec![2, 3, 4, 5, 6, 7, 8],
..Default::default()
};
let null_mask = vec![1];
let column = Column {
column_name: "foo".to_string(),
semantic_type: SEMANTIC_TAG,
values: Some(values),
null_mask,
..Default::default()
};
InsertBatch {
columns: vec![column],
row_count: 8,
}
}
fn mock_select_result() -> SelectResult {
let values = column::Values {
i32_values: vec![2, 3, 4, 5, 6, 7, 8],
..Default::default()
};
let null_mask = vec![1];
let column = Column {
column_name: "foo".to_string(),
semantic_type: SEMANTIC_TAG,
values: Some(values),
null_mask,
..Default::default()
};
SelectResult {
columns: vec![column],
row_count: 8,
}
}
}

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,13 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(clippy::derive_partial_eq_without_eq)]
tonic::include_proto!("greptime.v1");
pub mod column_def;
pub const GREPTIME_FD_SET: &[u8] = tonic::include_file_descriptor_set!("greptime_fd");
pub mod codec {
tonic::include_proto!("greptime.v1.codec");
pub mod meta {
pub use greptime_proto::v1::meta::*;
}
pub mod meta;
pub use greptime_proto::v1::*;

View File

@@ -0,0 +1,42 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema};
use snafu::ResultExt;
use crate::error::{self, Result};
use crate::helper::ColumnDataTypeWrapper;
use crate::v1::ColumnDef;
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
let data_type = ColumnDataTypeWrapper::try_new(column_def.datatype)?;
let constraint = if column_def.default_constraint.is_empty() {
None
} else {
Some(
ColumnDefaultConstraint::try_from(column_def.default_constraint.as_slice()).context(
error::ConvertColumnDefaultConstraintSnafu {
column: &column_def.name,
},
)?,
)
};
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
.with_default_constraint(constraint)
.context(error::InvalidColumnDefaultConstraintSnafu {
column: &column_def.name,
})
}

View File

@@ -1,207 +0,0 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
tonic::include_proto!("greptime.v1.meta");
use std::collections::HashMap;
use std::hash::{Hash, Hasher};
pub const PROTOCOL_VERSION: u64 = 1;
#[derive(Default)]
pub struct PeerDict {
peers: HashMap<Peer, usize>,
index: usize,
}
impl PeerDict {
pub fn get_or_insert(&mut self, peer: Peer) -> usize {
let index = self.peers.entry(peer).or_insert_with(|| {
let v = self.index;
self.index += 1;
v
});
*index
}
pub fn into_peers(self) -> Vec<Peer> {
let mut array = vec![Peer::default(); self.index];
for (p, i) in self.peers {
array[i] = p;
}
array
}
}
#[allow(clippy::derive_hash_xor_eq)]
impl Hash for Peer {
fn hash<H: Hasher>(&self, state: &mut H) {
self.id.hash(state);
self.addr.hash(state);
}
}
impl Eq for Peer {}
impl RequestHeader {
#[inline]
pub fn new((cluster_id, member_id): (u64, u64)) -> Self {
Self {
protocol_version: PROTOCOL_VERSION,
cluster_id,
member_id,
}
}
}
impl ResponseHeader {
#[inline]
pub fn success(cluster_id: u64) -> Self {
Self {
protocol_version: PROTOCOL_VERSION,
cluster_id,
..Default::default()
}
}
#[inline]
pub fn failed(cluster_id: u64, error: Error) -> Self {
Self {
protocol_version: PROTOCOL_VERSION,
cluster_id,
error: Some(error),
}
}
#[inline]
pub fn is_not_leader(&self) -> bool {
if let Some(error) = &self.error {
if error.code == ErrorCode::NotLeader as i32 {
return true;
}
}
false
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ErrorCode {
NoActiveDatanodes = 1,
NotLeader = 2,
}
impl Error {
#[inline]
pub fn no_active_datanodes() -> Self {
Self {
code: ErrorCode::NoActiveDatanodes as i32,
err_msg: "No active datanodes".to_string(),
}
}
#[inline]
pub fn is_not_leader() -> Self {
Self {
code: ErrorCode::NotLeader as i32,
err_msg: "Current server is not leader".to_string(),
}
}
}
impl HeartbeatResponse {
#[inline]
pub fn is_not_leader(&self) -> bool {
if let Some(header) = &self.header {
return header.is_not_leader();
}
false
}
}
macro_rules! gen_set_header {
($req: ty) => {
impl $req {
#[inline]
pub fn set_header(&mut self, (cluster_id, member_id): (u64, u64)) {
self.header = Some(RequestHeader::new((cluster_id, member_id)));
}
}
};
}
gen_set_header!(HeartbeatRequest);
gen_set_header!(RouteRequest);
gen_set_header!(CreateRequest);
gen_set_header!(RangeRequest);
gen_set_header!(PutRequest);
gen_set_header!(BatchPutRequest);
gen_set_header!(CompareAndPutRequest);
gen_set_header!(DeleteRangeRequest);
#[cfg(test)]
mod tests {
use std::vec;
use super::*;
#[test]
fn test_peer_dict() {
let mut dict = PeerDict::default();
dict.get_or_insert(Peer {
id: 1,
addr: "111".to_string(),
});
dict.get_or_insert(Peer {
id: 2,
addr: "222".to_string(),
});
dict.get_or_insert(Peer {
id: 1,
addr: "111".to_string(),
});
dict.get_or_insert(Peer {
id: 1,
addr: "111".to_string(),
});
dict.get_or_insert(Peer {
id: 1,
addr: "111".to_string(),
});
dict.get_or_insert(Peer {
id: 1,
addr: "111".to_string(),
});
dict.get_or_insert(Peer {
id: 2,
addr: "222".to_string(),
});
assert_eq!(2, dict.index);
assert_eq!(
vec![
Peer {
id: 1,
addr: "111".to_string(),
},
Peer {
id: 2,
addr: "222".to_string(),
}
],
dict.into_peers()
);
}
}

View File

@@ -1,14 +1,13 @@
[package]
name = "catalog"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
api = { path = "../api" }
arc-swap = "1.0"
async-stream = "0.3"
async-stream.workspace = true
async-trait = "0.1"
backoff = { version = "0.4", features = ["tokio"] }
common-catalog = { path = "../common/catalog" }
@@ -19,29 +18,28 @@ common-recordbatch = { path = "../common/recordbatch" }
common-runtime = { path = "../common/runtime" }
common-telemetry = { path = "../common/telemetry" }
common-time = { path = "../common/time" }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = [
"simd",
] }
dashmap = "5.4"
datafusion.workspace = true
datatypes = { path = "../datatypes" }
futures = "0.3"
futures-util = "0.3"
futures-util.workspace = true
lazy_static = "1.4"
meta-client = { path = "../meta-client" }
opendal = "0.17"
parking_lot = "0.12"
regex = "1.6"
serde = "1.0"
serde_json = "1.0"
session = { path = "../session" }
snafu = { version = "0.7", features = ["backtraces"] }
storage = { path = "../storage" }
table = { path = "../table" }
tokio = { version = "1.18", features = ["full"] }
tokio.workspace = true
[dev-dependencies]
chrono = "0.4"
common-test-util = { path = "../common/test-util" }
chrono.workspace = true
log-store = { path = "../log-store" }
mito = { path = "../mito", features = ["test"] }
object-store = { path = "../object-store" }
opendal = "0.17"
storage = { path = "../storage" }
mito = { path = "../mito" }
tempdir = "0.3"
tokio = { version = "1.0", features = ["full"] }
tokio.workspace = true

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -13,14 +13,16 @@
// limitations under the License.
use std::any::Any;
use std::fmt::Debug;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::prelude::{Snafu, StatusCode};
use datafusion::error::DataFusionError;
use datatypes::arrow;
use datatypes::schema::RawSchema;
use datatypes::prelude::ConcreteDataType;
use snafu::{Backtrace, ErrorCompat};
use crate::DeregisterTableRequest;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
@@ -51,14 +53,12 @@ pub enum Error {
SystemCatalog { msg: String, backtrace: Backtrace },
#[snafu(display(
"System catalog table type mismatch, expected: binary, found: {:?} source: {}",
"System catalog table type mismatch, expected: binary, found: {:?}",
data_type,
source
))]
SystemCatalogTypeMismatch {
data_type: arrow::datatypes::DataType,
#[snafu(backtrace)]
source: datatypes::error::Error,
data_type: ConcreteDataType,
backtrace: Backtrace,
},
#[snafu(display("Invalid system catalog entry type: {:?}", entry_type))]
@@ -88,25 +88,29 @@ pub enum Error {
backtrace: Backtrace,
},
#[snafu(display("Cannot find schema, schema info: {}", schema_info))]
#[snafu(display("Cannot find schema {} in catalog {}", schema, catalog))]
SchemaNotFound {
schema_info: String,
catalog: String,
schema: String,
backtrace: Backtrace,
},
#[snafu(display("Table {} already exists", table))]
#[snafu(display("Table `{}` already exists", table))]
TableExists { table: String, backtrace: Backtrace },
#[snafu(display("Table `{}` not exist", table))]
TableNotExist { table: String, backtrace: Backtrace },
#[snafu(display("Schema {} already exists", schema))]
SchemaExists {
schema: String,
backtrace: Backtrace,
},
#[snafu(display("Failed to register table"))]
RegisterTable {
#[snafu(backtrace)]
source: BoxedError,
#[snafu(display("Operation {} not implemented yet", operation))]
Unimplemented {
operation: String,
backtrace: Backtrace,
},
#[snafu(display("Failed to open table, table info: {}, source: {}", table_info, source))]
@@ -137,6 +141,17 @@ pub enum Error {
source: table::error::Error,
},
#[snafu(display(
"Failed to deregister table, request: {:?}, source: {}",
request,
source
))]
DeregisterTable {
request: DeregisterTableRequest,
#[snafu(backtrace)]
source: table::error::Error,
},
#[snafu(display("Illegal catalog manager state: {}", msg))]
IllegalManagerState { backtrace: Backtrace, msg: String },
@@ -146,17 +161,16 @@ pub enum Error {
source: table::error::Error,
},
#[snafu(display(
"Invalid table schema in catalog entry, table:{}, schema: {:?}, source: {}",
table_info,
schema,
source
))]
InvalidTableSchema {
table_info: String,
schema: RawSchema,
#[snafu(display("Failure during SchemaProvider operation, source: {}", source))]
SchemaProviderOperation {
#[snafu(backtrace)]
source: datatypes::error::Error,
source: BoxedError,
},
#[snafu(display("{source}"))]
Internal {
#[snafu(backtrace)]
source: BoxedError,
},
#[snafu(display("Failed to execute system catalog table scan, source: {}", source))]
@@ -170,32 +184,26 @@ pub enum Error {
source: common_catalog::error::Error,
},
#[snafu(display("IO error occurred while fetching catalog info, source: {}", source))]
Io {
backtrace: Backtrace,
source: std::io::Error,
},
#[snafu(display("Local and remote catalog data are inconsistent, msg: {}", msg))]
CatalogStateInconsistent { msg: String, backtrace: Backtrace },
#[snafu(display("Failed to perform metasrv operation, source: {}", source))]
MetaSrv {
#[snafu(backtrace)]
source: meta_client::error::Error,
},
#[snafu(display("Invalid table schema in catalog, source: {:?}", source))]
InvalidSchemaInCatalog {
#[snafu(display("Invalid table info in catalog, source: {}", source))]
InvalidTableInfoInCatalog {
#[snafu(backtrace)]
source: datatypes::error::Error,
},
#[snafu(display("Catalog internal error: {}", source))]
Internal {
#[snafu(display("Failed to serialize or deserialize catalog entry: {}", source))]
CatalogEntrySerde {
#[snafu(backtrace)]
source: BoxedError,
source: common_catalog::error::Error,
},
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
QueryAccessDenied { catalog: String, schema: String },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -208,33 +216,40 @@ impl ErrorExt for Error {
| Error::TableNotFound { .. }
| Error::IllegalManagerState { .. }
| Error::CatalogNotFound { .. }
| Error::InvalidEntryType { .. }
| Error::CatalogStateInconsistent { .. } => StatusCode::Unexpected,
| Error::InvalidEntryType { .. } => StatusCode::Unexpected,
Error::SystemCatalog { .. }
| Error::EmptyValue { .. }
| Error::ValueDeserialize { .. }
| Error::Io { .. } => StatusCode::StorageUnavailable,
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
Error::SystemCatalogTypeMismatch { .. } => StatusCode::Internal,
Error::ReadSystemCatalog { source, .. } => source.status_code(),
Error::SystemCatalogTypeMismatch { source, .. } => source.status_code(),
Error::InvalidCatalogValue { source, .. } => source.status_code(),
Error::InvalidCatalogValue { source, .. } | Error::CatalogEntrySerde { source } => {
source.status_code()
}
Error::RegisterTable { .. } => StatusCode::Internal,
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
Error::TableNotExist { .. } => StatusCode::TableNotFound,
Error::SchemaExists { .. } => StatusCode::InvalidArguments,
Error::OpenSystemCatalog { source, .. }
| Error::CreateSystemCatalog { source, .. }
| Error::InsertCatalogRecord { source, .. }
| Error::OpenTable { source, .. }
| Error::CreateTable { source, .. } => source.status_code(),
| Error::CreateTable { source, .. }
| Error::DeregisterTable { source, .. } => source.status_code(),
Error::MetaSrv { source, .. } => source.status_code(),
Error::SystemCatalogTableScan { source } => source.status_code(),
Error::SystemCatalogTableScanExec { source } => source.status_code(),
Error::InvalidTableSchema { source, .. } => source.status_code(),
Error::InvalidSchemaInCatalog { .. } => StatusCode::Unexpected,
Error::Internal { source, .. } => source.status_code(),
Error::InvalidTableInfoInCatalog { source } => source.status_code(),
Error::SchemaProviderOperation { source } | Error::Internal { source } => {
source.status_code()
}
Error::Unimplemented { .. } => StatusCode::Unsupported,
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
}
}
@@ -255,8 +270,6 @@ impl From<Error> for DataFusionError {
#[cfg(test)]
mod tests {
use common_error::mock::MockError;
use datatypes::arrow::datatypes::DataType;
use snafu::GenerateImplicitData;
use super::*;
@@ -277,22 +290,6 @@ mod tests {
InvalidKeySnafu { key: None }.build().status_code()
);
assert_eq!(
StatusCode::StorageUnavailable,
Error::OpenSystemCatalog {
source: table::error::Error::new(MockError::new(StatusCode::StorageUnavailable))
}
.status_code()
);
assert_eq!(
StatusCode::StorageUnavailable,
Error::CreateSystemCatalog {
source: table::error::Error::new(MockError::new(StatusCode::StorageUnavailable))
}
.status_code()
);
assert_eq!(
StatusCode::StorageUnavailable,
Error::SystemCatalog {
@@ -305,11 +302,8 @@ mod tests {
assert_eq!(
StatusCode::Internal,
Error::SystemCatalogTypeMismatch {
data_type: DataType::Boolean,
source: datatypes::error::Error::UnsupportedArrowType {
arrow_type: DataType::Boolean,
backtrace: Backtrace::generate()
}
data_type: ConcreteDataType::binary_datatype(),
backtrace: Backtrace::generate(),
}
.status_code()
);

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,54 +15,56 @@
use std::collections::HashMap;
use std::fmt::{Display, Formatter};
use common_catalog::error::{
DeserializeCatalogEntryValueSnafu, Error, InvalidCatalogSnafu, SerializeCatalogEntryValueSnafu,
};
use lazy_static::lazy_static;
use regex::Regex;
use serde::{Deserialize, Serialize, Serializer};
use snafu::{ensure, OptionExt, ResultExt};
use table::metadata::{RawTableMeta, TableId, TableVersion};
use table::metadata::{RawTableInfo, TableId, TableVersion};
use crate::consts::{
CATALOG_KEY_PREFIX, SCHEMA_KEY_PREFIX, TABLE_GLOBAL_KEY_PREFIX, TABLE_REGIONAL_KEY_PREFIX,
};
use crate::error::{
DeserializeCatalogEntryValueSnafu, Error, InvalidCatalogSnafu, SerializeCatalogEntryValueSnafu,
};
pub const CATALOG_KEY_PREFIX: &str = "__c";
pub const SCHEMA_KEY_PREFIX: &str = "__s";
pub const TABLE_GLOBAL_KEY_PREFIX: &str = "__tg";
pub const TABLE_REGIONAL_KEY_PREFIX: &str = "__tr";
const ALPHANUMERICS_NAME_PATTERN: &str = "[a-zA-Z_][a-zA-Z0-9_]*";
lazy_static! {
static ref CATALOG_KEY_PATTERN: Regex =
Regex::new(&format!("^{}-([a-zA-Z_]+)$", CATALOG_KEY_PREFIX)).unwrap();
static ref CATALOG_KEY_PATTERN: Regex = Regex::new(&format!(
"^{CATALOG_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})$"
))
.unwrap();
}
lazy_static! {
static ref SCHEMA_KEY_PATTERN: Regex = Regex::new(&format!(
"^{}-([a-zA-Z_]+)-([a-zA-Z_]+)$",
SCHEMA_KEY_PREFIX
"^{SCHEMA_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})$"
))
.unwrap();
}
lazy_static! {
static ref TABLE_GLOBAL_KEY_PATTERN: Regex = Regex::new(&format!(
"^{}-([a-zA-Z_]+)-([a-zA-Z_]+)-([a-zA-Z0-9_]+)$",
TABLE_GLOBAL_KEY_PREFIX
"^{TABLE_GLOBAL_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})$"
))
.unwrap();
}
lazy_static! {
static ref TABLE_REGIONAL_KEY_PATTERN: Regex = Regex::new(&format!(
"^{}-([a-zA-Z_]+)-([a-zA-Z_]+)-([a-zA-Z0-9_]+)-([0-9]+)$",
TABLE_REGIONAL_KEY_PREFIX
"^{TABLE_REGIONAL_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-([0-9]+)$"
))
.unwrap();
}
pub fn build_catalog_prefix() -> String {
format!("{}-", CATALOG_KEY_PREFIX)
format!("{CATALOG_KEY_PREFIX}-")
}
pub fn build_schema_prefix(catalog_name: impl AsRef<str>) -> String {
format!("{}-{}-", SCHEMA_KEY_PREFIX, catalog_name.as_ref())
format!("{SCHEMA_KEY_PREFIX}-{}-", catalog_name.as_ref())
}
pub fn build_table_global_prefix(
@@ -70,8 +72,7 @@ pub fn build_table_global_prefix(
schema_name: impl AsRef<str>,
) -> String {
format!(
"{}-{}-{}-",
TABLE_GLOBAL_KEY_PREFIX,
"{TABLE_GLOBAL_KEY_PREFIX}-{}-{}-",
catalog_name.as_ref(),
schema_name.as_ref()
)
@@ -90,6 +91,7 @@ pub fn build_table_regional_prefix(
}
/// Table global info has only one key across all datanodes so it does not have `node_id` field.
#[derive(Clone)]
pub struct TableGlobalKey {
pub catalog_name: String,
pub schema_name: String,
@@ -126,17 +128,19 @@ impl TableGlobalKey {
/// Table global info contains necessary info for a datanode to create table regions, including
/// table id, table meta(schema...), region id allocation across datanodes.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct TableGlobalValue {
/// Table id is the same across all datanodes.
pub id: TableId,
/// Id of datanode that created the global table info kv. only for debugging.
pub node_id: u64,
// TODO(LFC): Maybe remove it?
/// Allocation of region ids across all datanodes.
pub regions_id_map: HashMap<u64, Vec<u32>>,
// TODO(LFC): Too much for assembling the table schema that DistTable needs, find another way.
pub meta: RawTableMeta,
pub table_info: RawTableInfo,
}
impl TableGlobalValue {
pub fn table_id(&self) -> TableId {
self.table_info.ident.table_id
}
}
/// Table regional info that varies between datanode, so it contains a `node_id` field.
@@ -258,6 +262,10 @@ macro_rules! define_catalog_value {
.context(DeserializeCatalogEntryValueSnafu { raw: s.as_ref() })
}
pub fn from_bytes(bytes: impl AsRef<[u8]>) -> Result<Self, Error> {
Self::parse(&String::from_utf8_lossy(bytes.as_ref()))
}
pub fn as_bytes(&self) -> Result<Vec<u8>, Error> {
Ok(serde_json::to_string(self)
.context(SerializeCatalogEntryValueSnafu)?
@@ -279,6 +287,7 @@ define_catalog_value!(
mod tests {
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, RawSchema, Schema};
use table::metadata::{RawTableMeta, TableIdent, TableType};
use super::*;
@@ -339,14 +348,32 @@ mod tests {
region_numbers: vec![1],
};
let table_info = RawTableInfo {
ident: TableIdent {
table_id: 42,
version: 1,
},
name: "table_1".to_string(),
desc: Some("blah".to_string()),
catalog_name: "catalog_1".to_string(),
schema_name: "schema_1".to_string(),
meta,
table_type: TableType::Base,
};
let value = TableGlobalValue {
id: 42,
node_id: 0,
regions_id_map: HashMap::from([(0, vec![1, 2, 3])]),
meta,
table_info,
};
let serialized = serde_json::to_string(&value).unwrap();
let deserialized = TableGlobalValue::parse(&serialized).unwrap();
let deserialized = TableGlobalValue::parse(serialized).unwrap();
assert_eq!(value, deserialized);
}
#[test]
fn test_table_global_value_compatibility() {
let s = r#"{"node_id":1,"regions_id_map":{"1":[0]},"table_info":{"ident":{"table_id":1098,"version":1},"name":"container_cpu_limit","desc":"Created on insertion","catalog_name":"greptime","schema_name":"dd","meta":{"schema":{"column_schemas":[{"name":"container_id","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"container_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"docker_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"host","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_tag","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"interval","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"runtime","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"short_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"type","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"dd_value","data_type":{"Float64":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"ts","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":false,"is_time_index":true,"default_constraint":null,"metadata":{"greptime:time_index":"true"}},{"name":"git.repository_url","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}}],"timestamp_index":11,"version":1},"primary_key_indices":[0,1,2,3,4,5,6,7,8,9,12],"value_indices":[10,11],"engine":"mito","next_column_id":12,"region_numbers":[],"engine_options":{},"options":{},"created_on":"1970-01-01T00:00:00Z"},"table_type":"Base"}}"#;
TableGlobalValue::parse(s).unwrap();
}
}

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,10 +15,11 @@
#![feature(assert_matches)]
use std::any::Any;
use std::fmt::{Debug, Formatter};
use std::sync::Arc;
use common_telemetry::info;
use snafu::ResultExt;
use snafu::{OptionExt, ResultExt};
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::TableId;
use table::requests::CreateTableRequest;
@@ -28,10 +29,12 @@ use crate::error::{CreateTableSnafu, Result};
pub use crate::schema::{SchemaProvider, SchemaProviderRef};
pub mod error;
pub mod helper;
pub mod local;
pub mod remote;
pub mod schema;
pub mod system;
pub mod table_source;
pub mod tables;
/// Represent a list of named catalogs
@@ -83,12 +86,20 @@ pub trait CatalogManager: CatalogList {
/// Starts a catalog manager.
async fn start(&self) -> Result<()>;
/// Registers a table given given catalog/schema to catalog manager,
/// returns table registered.
async fn register_table(&self, request: RegisterTableRequest) -> Result<usize>;
/// Registers a table within given catalog/schema to catalog manager,
/// returns whether the table registered.
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool>;
/// Register a schema with catalog name and schema name.
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<usize>;
/// Deregisters a table within given catalog/schema to catalog manager,
/// returns whether the table deregistered.
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool>;
/// Register a schema with catalog name and schema name. Retuens whether the
/// schema registered.
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool>;
/// Rename a table to [RenameTableRequest::new_table_name], returns whether the table is renamed.
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool>;
/// Register a system table, should be called before starting the manager.
async fn register_system_table(&self, request: RegisterSystemTableRequest)
@@ -97,7 +108,12 @@ pub trait CatalogManager: CatalogList {
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>>;
/// Returns the table by catalog, schema and table name.
fn table(&self, catalog: &str, schema: &str, table_name: &str) -> Result<Option<TableRef>>;
async fn table(
&self,
catalog: &str,
schema: &str,
table_name: &str,
) -> Result<Option<TableRef>>;
}
pub type CatalogManagerRef = Arc<dyn CatalogManager>;
@@ -123,17 +139,40 @@ pub struct RegisterTableRequest {
pub table: TableRef,
}
impl Debug for RegisterTableRequest {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("RegisterTableRequest")
.field("catalog", &self.catalog)
.field("schema", &self.schema)
.field("table_name", &self.table_name)
.field("table_id", &self.table_id)
.field("table", &self.table.table_info())
.finish()
}
}
#[derive(Debug, Clone)]
pub struct RenameTableRequest {
pub catalog: String,
pub schema: String,
pub table_name: String,
pub new_table_name: String,
pub table_id: TableId,
}
#[derive(Debug, Clone)]
pub struct DeregisterTableRequest {
pub catalog: String,
pub schema: String,
pub table_name: String,
}
#[derive(Debug, Clone)]
pub struct RegisterSchemaRequest {
pub catalog: String,
pub schema: String,
}
/// Formats table fully-qualified name
pub fn format_full_table_name(catalog: &str, schema: &str, table: &str) -> String {
format!("{}.{}.{}", catalog, schema, table)
}
pub trait CatalogProviderFactory {
fn create(&self, catalog_name: String) -> CatalogProviderRef;
}
@@ -153,16 +192,18 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
let table_name = &req.create_table_request.table_name;
let table_id = req.create_table_request.id;
let table = if let Some(table) = manager.table(catalog_name, schema_name, table_name)? {
let table = manager.table(catalog_name, schema_name, table_name).await?;
let table = if let Some(table) = table {
table
} else {
let table = engine
.create_table(&EngineContext::default(), req.create_table_request.clone())
.await
.with_context(|_| CreateTableSnafu {
table_info: format!(
"{}.{}.{}, id: {}",
catalog_name, schema_name, table_name, table_id,
table_info: common_catalog::format_full_table_name(
catalog_name,
schema_name,
table_name,
),
})?;
manager
@@ -174,7 +215,7 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
table: table.clone(),
})
.await?;
info!("Created and registered system table: {}", table_name);
info!("Created and registered system table: {table_name}");
table
};
if let Some(hook) = req.open_hook {
@@ -183,3 +224,40 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
}
Ok(())
}
/// The number of regions in the datanode node.
pub async fn region_number(catalog_manager: &CatalogManagerRef) -> Result<u64> {
let mut region_number: u64 = 0;
for catalog_name in catalog_manager.catalog_names()? {
let catalog =
catalog_manager
.catalog(&catalog_name)?
.context(error::CatalogNotFoundSnafu {
catalog_name: &catalog_name,
})?;
for schema_name in catalog.schema_names()? {
let schema = catalog
.schema(&schema_name)?
.context(error::SchemaNotFoundSnafu {
catalog: &catalog_name,
schema: &schema_name,
})?;
for table_name in schema.table_names()? {
let table =
schema
.table(&table_name)
.await?
.context(error::TableNotFoundSnafu {
table_info: &table_name,
})?;
let region_numbers = &table.table_info().meta.region_numbers;
region_number += region_numbers.len() as u64;
}
}
}
Ok(region_number)
}

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,8 +20,9 @@ use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MIN_USER_TABLE_ID,
SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_NAME,
};
use common_catalog::format_full_table_name;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use common_telemetry::info;
use common_telemetry::{error, info};
use datatypes::prelude::ScalarVector;
use datatypes::vectors::{BinaryVector, UInt8Vector};
use futures_util::lock::Mutex;
@@ -34,9 +35,9 @@ use table::table::TableIdProvider;
use table::TableRef;
use crate::error::{
CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu, Result,
SchemaExistsSnafu, SchemaNotFoundSnafu, SystemCatalogSnafu, SystemCatalogTypeMismatchSnafu,
TableExistsSnafu, TableNotFoundSnafu,
self, CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu,
Result, SchemaExistsSnafu, SchemaNotFoundSnafu, SystemCatalogSnafu,
SystemCatalogTypeMismatchSnafu, TableExistsSnafu, TableNotFoundSnafu,
};
use crate::local::memory::{MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
use crate::system::{
@@ -45,9 +46,9 @@ use crate::system::{
};
use crate::tables::SystemCatalog;
use crate::{
format_full_table_name, handle_system_table_request, CatalogList, CatalogManager,
CatalogProvider, CatalogProviderRef, RegisterSchemaRequest, RegisterSystemTableRequest,
RegisterTableRequest, SchemaProvider, SchemaProviderRef,
handle_system_table_request, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
RegisterTableRequest, RenameTableRequest, SchemaProvider, SchemaProviderRef,
};
/// A `CatalogManager` consists of a system catalog and a bunch of user catalogs.
@@ -57,6 +58,7 @@ pub struct LocalCatalogManager {
engine: TableEngineRef,
next_table_id: AtomicU32,
init_lock: Mutex<bool>,
register_lock: Mutex<()>,
system_table_requests: Mutex<Vec<RegisterSystemTableRequest>>,
}
@@ -76,6 +78,7 @@ impl LocalCatalogManager {
engine,
next_table_id: AtomicU32::new(MIN_USER_TABLE_ID),
init_lock: Mutex::new(false),
register_lock: Mutex::new(()),
system_table_requests: Mutex::new(Vec::default()),
})
}
@@ -143,27 +146,34 @@ impl LocalCatalogManager {
/// Convert `RecordBatch` to a vector of `Entry`.
fn record_batch_to_entry(rb: RecordBatch) -> Result<Vec<Entry>> {
ensure!(
rb.df_recordbatch.columns().len() >= 6,
rb.num_columns() >= 6,
SystemCatalogSnafu {
msg: format!("Length mismatch: {}", rb.df_recordbatch.columns().len())
msg: format!("Length mismatch: {}", rb.num_columns())
}
);
let entry_type = UInt8Vector::try_from_arrow_array(&rb.df_recordbatch.columns()[0])
.with_context(|_| SystemCatalogTypeMismatchSnafu {
data_type: rb.df_recordbatch.columns()[ENTRY_TYPE_INDEX]
.data_type()
.clone(),
let entry_type = rb
.column(ENTRY_TYPE_INDEX)
.as_any()
.downcast_ref::<UInt8Vector>()
.with_context(|| SystemCatalogTypeMismatchSnafu {
data_type: rb.column(ENTRY_TYPE_INDEX).data_type(),
})?;
let key = BinaryVector::try_from_arrow_array(&rb.df_recordbatch.columns()[1])
.with_context(|_| SystemCatalogTypeMismatchSnafu {
data_type: rb.df_recordbatch.columns()[KEY_INDEX].data_type().clone(),
let key = rb
.column(KEY_INDEX)
.as_any()
.downcast_ref::<BinaryVector>()
.with_context(|| SystemCatalogTypeMismatchSnafu {
data_type: rb.column(KEY_INDEX).data_type(),
})?;
let value = BinaryVector::try_from_arrow_array(&rb.df_recordbatch.columns()[3])
.with_context(|_| SystemCatalogTypeMismatchSnafu {
data_type: rb.df_recordbatch.columns()[VALUE_INDEX].data_type().clone(),
let value = rb
.column(VALUE_INDEX)
.as_any()
.downcast_ref::<BinaryVector>()
.with_context(|| SystemCatalogTypeMismatchSnafu {
data_type: rb.column(VALUE_INDEX).data_type(),
})?;
let mut res = Vec::with_capacity(rb.num_rows());
@@ -232,7 +242,8 @@ impl LocalCatalogManager {
let schema = catalog
.schema(&t.schema_name)?
.context(SchemaNotFoundSnafu {
schema_info: format!("{}.{}", &t.catalog_name, &t.schema_name),
catalog: &t.catalog_name,
schema: &t.schema_name,
})?;
let context = EngineContext {};
@@ -308,7 +319,7 @@ impl CatalogManager for LocalCatalogManager {
self.init().await
}
async fn register_table(&self, request: RegisterTableRequest) -> Result<usize> {
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
let started = self.init_lock.lock().await;
ensure!(
@@ -328,30 +339,119 @@ impl CatalogManager for LocalCatalogManager {
let schema = catalog
.schema(schema_name)?
.with_context(|| SchemaNotFoundSnafu {
schema_info: format!("{}.{}", catalog_name, schema_name),
catalog: catalog_name,
schema: schema_name,
})?;
if schema.table_exist(&request.table_name)? {
return TableExistsSnafu {
table: format_full_table_name(catalog_name, schema_name, &request.table_name),
{
let _lock = self.register_lock.lock().await;
if let Some(existing) = schema.table(&request.table_name).await? {
if existing.table_info().ident.table_id != request.table_id {
error!(
"Unexpected table register request: {:?}, existing: {:?}",
request,
existing.table_info()
);
return TableExistsSnafu {
table: format_full_table_name(
catalog_name,
schema_name,
&request.table_name,
),
}
.fail();
}
// Try to register table with same table id, just ignore.
Ok(false)
} else {
// table does not exist
self.system
.register_table(
catalog_name.clone(),
schema_name.clone(),
request.table_name.clone(),
request.table_id,
)
.await?;
schema.register_table(request.table_name, request.table)?;
Ok(true)
}
.fail();
}
}
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
let started = self.init_lock.lock().await;
ensure!(
*started,
IllegalManagerStateSnafu {
msg: "Catalog manager not started",
}
);
let catalog_name = &request.catalog;
let schema_name = &request.schema;
let catalog = self
.catalogs
.catalog(catalog_name)?
.context(CatalogNotFoundSnafu { catalog_name })?;
let schema = catalog
.schema(schema_name)?
.with_context(|| SchemaNotFoundSnafu {
catalog: catalog_name,
schema: schema_name,
})?;
// rename table in system catalog
self.system
.register_table(
catalog_name.clone(),
schema_name.clone(),
request.table_name.clone(),
request.new_table_name.clone(),
request.table_id,
)
.await?;
schema.register_table(request.table_name, request.table)?;
Ok(1)
Ok(schema
.rename_table(&request.table_name, request.new_table_name)
.is_ok())
}
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<usize> {
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
{
let started = *self.init_lock.lock().await;
ensure!(started, IllegalManagerStateSnafu { msg: "not started" });
}
{
let _ = self.register_lock.lock().await;
let DeregisterTableRequest {
catalog,
schema,
table_name,
} = &request;
let table_id = self
.catalogs
.table(catalog, schema, table_name)
.await?
.with_context(|| error::TableNotExistSnafu {
table: format_full_table_name(catalog, schema, table_name),
})?
.table_info()
.ident
.table_id;
if !self.system.deregister_table(&request, table_id).await? {
return Ok(false);
}
self.catalogs.deregister_table(request).await
}
}
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
let started = self.init_lock.lock().await;
ensure!(
*started,
@@ -366,17 +466,21 @@ impl CatalogManager for LocalCatalogManager {
.catalogs
.catalog(catalog_name)?
.context(CatalogNotFoundSnafu { catalog_name })?;
if catalog.schema(schema_name)?.is_some() {
return SchemaExistsSnafu {
schema: schema_name,
}
.fail();
{
let _lock = self.register_lock.lock().await;
ensure!(
catalog.schema(schema_name)?.is_none(),
SchemaExistsSnafu {
schema: schema_name,
}
);
self.system
.register_schema(request.catalog, schema_name.clone())
.await?;
catalog.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))?;
Ok(true)
}
self.system
.register_schema(request.catalog, schema_name.clone())
.await?;
catalog.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))?;
Ok(1)
}
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
@@ -402,7 +506,7 @@ impl CatalogManager for LocalCatalogManager {
.schema(schema)
}
fn table(
async fn table(
&self,
catalog_name: &str,
schema_name: &str,
@@ -415,9 +519,10 @@ impl CatalogManager for LocalCatalogManager {
let schema = catalog
.schema(schema_name)?
.with_context(|| SchemaNotFoundSnafu {
schema_info: format!("{}.{}", catalog_name, schema_name),
catalog: catalog_name,
schema: schema_name,
})?;
schema.table(table_name)
schema.table(table_name).await
}
}

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -18,17 +18,22 @@ use std::collections::HashMap;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::{Arc, RwLock};
use async_trait::async_trait;
use common_catalog::consts::MIN_USER_TABLE_ID;
use snafu::OptionExt;
use common_telemetry::error;
use snafu::{ensure, OptionExt};
use table::metadata::TableId;
use table::table::TableIdProvider;
use table::TableRef;
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
use crate::error::{
self, CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, TableNotFoundSnafu,
};
use crate::schema::SchemaProvider;
use crate::{
CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef, RegisterSchemaRequest,
RegisterSystemTableRequest, RegisterTableRequest, SchemaProviderRef,
CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef, DeregisterTableRequest,
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
SchemaProviderRef,
};
/// Simple in-memory list of catalogs
@@ -69,7 +74,7 @@ impl CatalogManager for MemoryCatalogManager {
Ok(())
}
async fn register_table(&self, request: RegisterTableRequest) -> Result<usize> {
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
let catalogs = self.catalogs.write().unwrap();
let catalog = catalogs
.get(&request.catalog)
@@ -80,14 +85,53 @@ impl CatalogManager for MemoryCatalogManager {
let schema = catalog
.schema(&request.schema)?
.with_context(|| SchemaNotFoundSnafu {
schema_info: format!("{}.{}", &request.catalog, &request.schema),
catalog: &request.catalog,
schema: &request.schema,
})?;
schema
.register_table(request.table_name, request.table)
.map(|v| if v.is_some() { 0 } else { 1 })
.map(|v| v.is_none())
}
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<usize> {
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
let catalogs = self.catalogs.write().unwrap();
let catalog = catalogs
.get(&request.catalog)
.context(CatalogNotFoundSnafu {
catalog_name: &request.catalog,
})?
.clone();
let schema = catalog
.schema(&request.schema)?
.with_context(|| SchemaNotFoundSnafu {
catalog: &request.catalog,
schema: &request.schema,
})?;
Ok(schema
.rename_table(&request.table_name, request.new_table_name)
.is_ok())
}
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
let catalogs = self.catalogs.write().unwrap();
let catalog = catalogs
.get(&request.catalog)
.context(CatalogNotFoundSnafu {
catalog_name: &request.catalog,
})?
.clone();
let schema = catalog
.schema(&request.schema)?
.with_context(|| SchemaNotFoundSnafu {
catalog: &request.catalog,
schema: &request.schema,
})?;
schema
.deregister_table(&request.table_name)
.map(|v| v.is_some())
}
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
let catalogs = self.catalogs.write().unwrap();
let catalog = catalogs
.get(&request.catalog)
@@ -95,11 +139,12 @@ impl CatalogManager for MemoryCatalogManager {
catalog_name: &request.catalog,
})?;
catalog.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))?;
Ok(1)
Ok(true)
}
async fn register_system_table(&self, _request: RegisterSystemTableRequest) -> Result<()> {
unimplemented!()
// TODO(ruihang): support register system table request
Ok(())
}
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>> {
@@ -111,16 +156,20 @@ impl CatalogManager for MemoryCatalogManager {
}
}
fn table(&self, catalog: &str, schema: &str, table_name: &str) -> Result<Option<TableRef>> {
let c = self.catalogs.read().unwrap();
let catalog = if let Some(c) = c.get(catalog) {
async fn table(
&self,
catalog: &str,
schema: &str,
table_name: &str,
) -> Result<Option<TableRef>> {
let catalog = {
let c = self.catalogs.read().unwrap();
let Some(c) = c.get(catalog) else { return Ok(None) };
c.clone()
} else {
return Ok(None);
};
match catalog.schema(schema)? {
None => Ok(None),
Some(s) => s.table(table_name),
Some(s) => s.table(table_name).await,
}
}
}
@@ -206,6 +255,10 @@ impl CatalogProvider for MemoryCatalogProvider {
schema: SchemaProviderRef,
) -> Result<Option<SchemaProviderRef>> {
let mut schemas = self.schemas.write().unwrap();
ensure!(
!schemas.contains_key(&name),
error::SchemaExistsSnafu { schema: &name }
);
Ok(schemas.insert(name, schema))
}
@@ -235,6 +288,7 @@ impl Default for MemorySchemaProvider {
}
}
#[async_trait]
impl SchemaProvider for MemorySchemaProvider {
fn as_any(&self) -> &dyn Any {
self
@@ -245,17 +299,41 @@ impl SchemaProvider for MemorySchemaProvider {
Ok(tables.keys().cloned().collect())
}
fn table(&self, name: &str) -> Result<Option<TableRef>> {
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
let tables = self.tables.read().unwrap();
Ok(tables.get(name).cloned())
}
fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
if self.table_exist(name.as_str())? {
return TableExistsSnafu { table: name }.fail()?;
}
let mut tables = self.tables.write().unwrap();
Ok(tables.insert(name, table))
if let Some(existing) = tables.get(name.as_str()) {
// if table with the same name but different table id exists, then it's a fatal bug
if existing.table_info().ident.table_id != table.table_info().ident.table_id {
error!(
"Unexpected table register: {:?}, existing: {:?}",
table.table_info(),
existing.table_info()
);
return TableExistsSnafu { table: name }.fail()?;
}
Ok(Some(existing.clone()))
} else {
Ok(tables.insert(name, table))
}
}
fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef> {
let mut tables = self.tables.write().unwrap();
if tables.get(name).is_some() {
let table = tables.remove(name).unwrap();
tables.insert(new_name, table.clone());
Ok(table)
} else {
TableNotFoundSnafu {
table_info: name.to_string(),
}
.fail()?
}
}
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
@@ -283,8 +361,8 @@ mod tests {
use super::*;
#[test]
fn test_new_memory_catalog_list() {
#[tokio::test]
async fn test_new_memory_catalog_list() {
let catalog_list = new_memory_catalog_list().unwrap();
let default_catalog = catalog_list.catalog(DEFAULT_CATALOG_NAME).unwrap().unwrap();
@@ -297,9 +375,9 @@ mod tests {
.register_table("numbers".to_string(), Arc::new(NumbersTable::default()))
.unwrap();
let table = default_schema.table("numbers").unwrap();
let table = default_schema.table("numbers").await.unwrap();
assert!(table.is_some());
assert!(default_schema.table("not_exists").unwrap().is_none());
assert!(default_schema.table("not_exists").await.unwrap().is_none());
}
#[tokio::test]
@@ -315,13 +393,93 @@ mod tests {
.unwrap()
.is_none());
assert!(provider.table_exist(table_name).unwrap());
let other_table = NumbersTable::default();
let other_table = NumbersTable::new(12);
let result = provider.register_table(table_name.to_string(), Arc::new(other_table));
let err = result.err().unwrap();
assert!(err.backtrace_opt().is_some());
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
}
#[tokio::test]
async fn test_mem_provider_rename_table() {
let provider = MemorySchemaProvider::new();
let table_name = "num";
assert!(!provider.table_exist(table_name).unwrap());
let test_table: TableRef = Arc::new(NumbersTable::default());
// register test table
assert!(provider
.register_table(table_name.to_string(), test_table.clone())
.unwrap()
.is_none());
assert!(provider.table_exist(table_name).unwrap());
// rename test table
let new_table_name = "numbers";
provider
.rename_table(table_name, new_table_name.to_string())
.unwrap();
// test old table name not exist
assert!(!provider.table_exist(table_name).unwrap());
assert!(provider.deregister_table(table_name).unwrap().is_none());
// test new table name exists
assert!(provider.table_exist(new_table_name).unwrap());
let registered_table = provider.table(new_table_name).await.unwrap().unwrap();
assert_eq!(
registered_table.table_info().ident.table_id,
test_table.table_info().ident.table_id
);
let other_table = Arc::new(NumbersTable::new(2));
let result = provider.register_table(new_table_name.to_string(), other_table);
let err = result.err().unwrap();
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
}
#[tokio::test]
async fn test_catalog_rename_table() {
let catalog = MemoryCatalogManager::default();
let schema = catalog
.schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
.unwrap()
.unwrap();
// register table
let table_name = "num";
let table_id = 2333;
let table: TableRef = Arc::new(NumbersTable::new(table_id));
let register_table_req = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
table_id,
table,
};
assert!(catalog.register_table(register_table_req).await.unwrap());
assert!(schema.table_exist(table_name).unwrap());
// rename table
let new_table_name = "numbers";
let rename_table_req = RenameTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
new_table_name: new_table_name.to_string(),
table_id,
};
assert!(catalog.rename_table(rename_table_req).await.unwrap());
assert!(!schema.table_exist(table_name).unwrap());
assert!(schema.table_exist(new_table_name).unwrap());
let registered_table = catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
.await
.unwrap()
.unwrap();
assert_eq!(registered_table.table_info().ident.table_id, table_id);
}
#[test]
pub fn test_register_if_absent() {
let list = MemoryCatalogManager::default();
@@ -340,4 +498,34 @@ mod tests {
.downcast_ref::<MemoryCatalogManager>()
.unwrap();
}
#[tokio::test]
pub async fn test_catalog_deregister_table() {
let catalog = MemoryCatalogManager::default();
let schema = catalog
.schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
.unwrap()
.unwrap();
let register_table_req = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "numbers".to_string(),
table_id: 2333,
table: Arc::new(NumbersTable::default()),
};
catalog.register_table(register_table_req).await.unwrap();
assert!(schema.table_exist("numbers").unwrap());
let deregister_table_req = DeregisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "numbers".to_string(),
};
catalog
.deregister_table(deregister_table_req)
.await
.unwrap();
assert!(!schema.table_exist("numbers").unwrap());
}
}

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -13,20 +13,19 @@
// limitations under the License.
use std::any::Any;
use std::collections::HashMap;
use std::collections::{HashMap, HashSet};
use std::pin::Pin;
use std::sync::Arc;
use arc_swap::ArcSwap;
use async_stream::stream;
use async_trait::async_trait;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
use common_catalog::{
build_catalog_prefix, build_schema_prefix, build_table_global_prefix, CatalogKey, CatalogValue,
SchemaKey, SchemaValue, TableGlobalKey, TableGlobalValue, TableRegionalKey, TableRegionalValue,
};
use common_telemetry::{debug, info};
use common_telemetry::{debug, error, info};
use dashmap::DashMap;
use futures::Stream;
use futures_util::StreamExt;
use parking_lot::RwLock;
use snafu::{OptionExt, ResultExt};
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::TableId;
@@ -36,24 +35,28 @@ use table::TableRef;
use tokio::sync::Mutex;
use crate::error::{
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, InvalidTableSchemaSnafu,
OpenTableSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu,
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, OpenTableSnafu, Result,
SchemaNotFoundSnafu, TableExistsSnafu, UnimplementedSnafu,
};
use crate::helper::{
build_catalog_prefix, build_schema_prefix, build_table_global_prefix, CatalogKey, CatalogValue,
SchemaKey, SchemaValue, TableGlobalKey, TableGlobalValue, TableRegionalKey, TableRegionalValue,
CATALOG_KEY_PREFIX,
};
use crate::remote::{Kv, KvBackendRef};
use crate::{
handle_system_table_request, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, SchemaProvider,
SchemaProviderRef,
DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
RegisterTableRequest, RenameTableRequest, SchemaProvider, SchemaProviderRef,
};
/// Catalog manager based on metasrv.
pub struct RemoteCatalogManager {
node_id: u64,
backend: KvBackendRef,
catalogs: Arc<ArcSwap<HashMap<String, CatalogProviderRef>>>,
catalogs: Arc<RwLock<DashMap<String, CatalogProviderRef>>>,
engine: TableEngineRef,
system_table_requests: Mutex<Vec<RegisterSystemTableRequest>>,
mutex: Arc<Mutex<()>>,
}
impl RemoteCatalogManager {
@@ -64,7 +67,6 @@ impl RemoteCatalogManager {
backend,
catalogs: Default::default(),
system_table_requests: Default::default(),
mutex: Default::default(),
}
}
@@ -108,9 +110,13 @@ impl RemoteCatalogManager {
debug!("Ignoring non-catalog key: {}", String::from_utf8_lossy(&k));
continue;
}
let key = CatalogKey::parse(&String::from_utf8_lossy(&k))
.context(InvalidCatalogValueSnafu)?;
yield Ok(key)
let catalog_key = String::from_utf8_lossy(&k);
if let Ok(key) = CatalogKey::parse(&catalog_key) {
yield Ok(key)
} else {
error!("Invalid catalog key: {:?}", catalog_key);
}
}
}))
}
@@ -154,8 +160,8 @@ impl RemoteCatalogManager {
}
let table_key = TableGlobalKey::parse(&String::from_utf8_lossy(&k))
.context(InvalidCatalogValueSnafu)?;
let table_value = TableGlobalValue::parse(&String::from_utf8_lossy(&v))
.context(InvalidCatalogValueSnafu)?;
let table_value =
TableGlobalValue::from_bytes(&v).context(InvalidCatalogValueSnafu)?;
info!(
"Found catalog table entry, key: {}, value: {:?}",
@@ -250,10 +256,7 @@ impl RemoteCatalogManager {
let table_ref = self.open_or_create_table(&table_key, &table_value).await?;
schema.register_table(table_key.table_name.to_string(), table_ref)?;
info!("Registered table {}", &table_key.table_name);
if table_value.id > max_table_id {
info!("Max table id: {} -> {}", max_table_id, table_value.id);
max_table_id = table_value.id;
}
max_table_id = max_table_id.max(table_value.table_id());
table_num += 1;
}
info!(
@@ -311,25 +314,29 @@ impl RemoteCatalogManager {
..
} = table_key;
let table_id = table_value.table_id();
let TableGlobalValue {
id,
meta,
table_info,
regions_id_map,
..
} = table_value;
// unwrap safety: checked in yielding this table when `iter_remote_tables`
let region_numbers = regions_id_map.get(&self.node_id).unwrap();
let request = OpenTableRequest {
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
table_id: *id,
table_id,
};
match self
.engine
.open_table(&context, request)
.await
.with_context(|_| OpenTableSnafu {
table_info: format!("{}.{}.{}, id:{}", catalog_name, schema_name, table_name, id,),
table_info: format!("{catalog_name}.{schema_name}.{table_name}, id:{table_id}"),
})? {
Some(table) => {
info!(
@@ -344,22 +351,15 @@ impl RemoteCatalogManager {
catalog_name, schema_name, table_name
);
let schema = meta
.schema
.clone()
.try_into()
.context(InvalidTableSchemaSnafu {
table_info: format!("{}.{}.{}", catalog_name, schema_name, table_name,),
schema: meta.schema.clone(),
})?;
let meta = &table_info.meta;
let req = CreateTableRequest {
id: *id,
id: table_id,
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
desc: None,
schema: Arc::new(schema),
region_numbers: regions_id_map.get(&self.node_id).unwrap().clone(), // this unwrap is safe because region_id_map is checked in `iter_remote_tables`
schema: meta.schema.clone(),
region_numbers: region_numbers.clone(),
primary_key_indices: meta.primary_key_indices.clone(),
create_if_not_exists: true,
table_options: meta.options.clone(),
@@ -371,7 +371,7 @@ impl RemoteCatalogManager {
.context(CreateTableSnafu {
table_info: format!(
"{}.{}.{}, id:{}",
&catalog_name, &schema_name, &table_name, id
&catalog_name, &schema_name, &table_name, table_id
),
})
}
@@ -387,7 +387,14 @@ impl CatalogManager for RemoteCatalogManager {
"Initialized catalogs: {:?}",
catalogs.keys().cloned().collect::<Vec<_>>()
);
self.catalogs.store(Arc::new(catalogs));
{
let self_catalogs = self.catalogs.read();
catalogs.into_iter().for_each(|(k, v)| {
self_catalogs.insert(k, v);
});
}
info!("Max table id allocated: {}", max_table_id);
let mut system_table_requests = self.system_table_requests.lock().await;
@@ -405,7 +412,7 @@ impl CatalogManager for RemoteCatalogManager {
Ok(())
}
async fn register_table(&self, request: RegisterTableRequest) -> Result<usize> {
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
let catalog_name = request.catalog;
let schema_name = request.schema;
let catalog_provider = self.catalog(&catalog_name)?.context(CatalogNotFoundSnafu {
@@ -415,7 +422,8 @@ impl CatalogManager for RemoteCatalogManager {
catalog_provider
.schema(&schema_name)?
.with_context(|| SchemaNotFoundSnafu {
schema_info: format!("{}.{}", &catalog_name, &schema_name),
catalog: &catalog_name,
schema: &schema_name,
})?;
if schema_provider.table_exist(&request.table_name)? {
return TableExistsSnafu {
@@ -424,10 +432,24 @@ impl CatalogManager for RemoteCatalogManager {
.fail();
}
schema_provider.register_table(request.table_name, request.table)?;
Ok(1)
Ok(true)
}
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<usize> {
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
let catalog_name = &request.catalog;
let schema_name = &request.schema;
let schema = self
.schema(catalog_name, schema_name)?
.context(SchemaNotFoundSnafu {
catalog: catalog_name,
schema: schema_name,
})?;
let result = schema.deregister_table(&request.table_name)?;
Ok(result.is_none())
}
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
let catalog_name = request.catalog;
let schema_name = request.schema;
let catalog_provider = self.catalog(&catalog_name)?.context(CatalogNotFoundSnafu {
@@ -435,7 +457,14 @@ impl CatalogManager for RemoteCatalogManager {
})?;
let schema_provider = self.new_schema_provider(&catalog_name, &schema_name);
catalog_provider.register_schema(schema_name, schema_provider)?;
Ok(1)
Ok(true)
}
async fn rename_table(&self, _request: RenameTableRequest) -> Result<bool> {
UnimplementedSnafu {
operation: "rename table",
}
.fail()
}
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
@@ -452,7 +481,7 @@ impl CatalogManager for RemoteCatalogManager {
.schema(schema)
}
fn table(
async fn table(
&self,
catalog_name: &str,
schema_name: &str,
@@ -464,9 +493,10 @@ impl CatalogManager for RemoteCatalogManager {
let schema = catalog
.schema(schema_name)?
.with_context(|| SchemaNotFoundSnafu {
schema_info: format!("{}.{}", catalog_name, schema_name),
catalog: catalog_name,
schema: schema_name,
})?;
schema.table(table_name)
schema.table(table_name).await
}
}
@@ -482,12 +512,10 @@ impl CatalogList for RemoteCatalogManager {
) -> Result<Option<CatalogProviderRef>> {
let key = self.build_catalog_key(&name).to_string();
let backend = self.backend.clone();
let mutex = self.mutex.clone();
let catalogs = self.catalogs.clone();
std::thread::spawn(|| {
common_runtime::block_on_write(async move {
let _guard = mutex.lock().await;
backend
.set(
key.as_bytes(),
@@ -496,11 +524,10 @@ impl CatalogList for RemoteCatalogManager {
.context(InvalidCatalogValueSnafu)?,
)
.await?;
let prev_catalogs = catalogs.load();
let mut new_catalogs = HashMap::with_capacity(prev_catalogs.len() + 1);
new_catalogs.clone_from(&prev_catalogs);
let prev = new_catalogs.insert(name, catalog);
catalogs.store(Arc::new(new_catalogs));
let catalogs = catalogs.read();
let prev = catalogs.insert(name, catalog.clone());
Ok(prev)
})
})
@@ -510,12 +537,65 @@ impl CatalogList for RemoteCatalogManager {
/// List all catalogs from metasrv
fn catalog_names(&self) -> Result<Vec<String>> {
Ok(self.catalogs.load().keys().cloned().collect::<Vec<_>>())
let catalogs = self.catalogs.read();
Ok(catalogs.iter().map(|k| k.key().to_string()).collect())
}
/// Read catalog info of given name from metasrv.
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>> {
Ok(self.catalogs.load().get(name).cloned())
{
let catalogs = self.catalogs.read();
let catalog = catalogs.get(name);
if let Some(catalog) = catalog {
return Ok(Some(catalog.clone()));
}
}
let catalogs = self.catalogs.write();
let catalog = catalogs.get(name);
if let Some(catalog) = catalog {
return Ok(Some(catalog.clone()));
}
// It's for lack of incremental catalog syncing between datanode and meta. Here we fetch catalog
// from meta on demand. This can be removed when incremental catalog syncing is done in datanode.
let backend = self.backend.clone();
let catalogs_from_meta: HashSet<String> = std::thread::spawn(|| {
common_runtime::block_on_read(async move {
let mut stream = backend.range(CATALOG_KEY_PREFIX.as_bytes());
let mut catalogs = HashSet::new();
while let Some(catalog) = stream.next().await {
if let Ok(catalog) = catalog {
let catalog_key = String::from_utf8_lossy(&catalog.0);
if let Ok(key) = CatalogKey::parse(&catalog_key) {
catalogs.insert(key.catalog_name);
}
}
}
catalogs
})
})
.join()
.unwrap();
catalogs.retain(|catalog_name, _| catalogs_from_meta.get(catalog_name).is_some());
for catalog in catalogs_from_meta {
catalogs
.entry(catalog.clone())
.or_insert(self.new_catalog_provider(&catalog));
}
let catalog = catalogs.get(name);
Ok(catalog.as_deref().cloned())
}
}
@@ -675,6 +755,7 @@ impl RemoteSchemaProvider {
}
}
#[async_trait]
impl SchemaProvider for RemoteSchemaProvider {
fn as_any(&self) -> &dyn Any {
self
@@ -684,7 +765,7 @@ impl SchemaProvider for RemoteSchemaProvider {
Ok(self.tables.load().keys().cloned().collect::<Vec<_>>())
}
fn table(&self, name: &str) -> Result<Option<TableRef>> {
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
Ok(self.tables.load().get(name).cloned())
}
@@ -727,6 +808,13 @@ impl SchemaProvider for RemoteSchemaProvider {
prev
}
fn rename_table(&self, _name: &str, _new_name: String) -> Result<TableRef> {
UnimplementedSnafu {
operation: "rename table",
}
.fail()
}
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
let table_name = name.to_string();
let table_key = self.build_regional_table_key(&table_name).to_string();

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,11 +15,13 @@
use std::any::Any;
use std::sync::Arc;
use async_trait::async_trait;
use table::TableRef;
use crate::error::Result;
/// Represents a schema, comprising a number of named tables.
#[async_trait]
pub trait SchemaProvider: Sync + Send {
/// Returns the schema provider as [`Any`](std::any::Any)
/// so that it can be downcast to a specific implementation.
@@ -29,12 +31,16 @@ pub trait SchemaProvider: Sync + Send {
fn table_names(&self) -> Result<Vec<String>>;
/// Retrieves a specific table from the schema by name, provided it exists.
fn table(&self, name: &str) -> Result<Option<TableRef>>;
async fn table(&self, name: &str) -> Result<Option<TableRef>>;
/// If supported by the implementation, adds a new table to this schema.
/// If a table of the same name existed before, it returns "Table already exists" error.
fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>>;
/// If supported by the implementation, renames an existing table from this schema and returns it.
/// If no table of that name exists, returns "Table not found" error.
fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef>;
/// If supported by the implementation, removes an existing table from this schema and returns it.
/// If no table of that name exists, returns Ok(None).
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>>;

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,35 +21,33 @@ use common_catalog::consts::{
SYSTEM_CATALOG_TABLE_ID, SYSTEM_CATALOG_TABLE_NAME,
};
use common_query::logical_plan::Expr;
use common_query::physical_plan::{PhysicalPlanRef, RuntimeEnv};
use common_query::physical_plan::{PhysicalPlanRef, SessionContext};
use common_recordbatch::SendableRecordBatchStream;
use common_telemetry::debug;
use common_time::timestamp::Timestamp;
use common_time::util;
use datatypes::prelude::{ConcreteDataType, ScalarVector};
use datatypes::schema::{ColumnSchema, Schema, SchemaBuilder, SchemaRef};
use datatypes::vectors::{BinaryVector, TimestampVector, UInt8Vector};
use datatypes::prelude::{ConcreteDataType, ScalarVector, VectorRef};
use datatypes::schema::{ColumnSchema, RawSchema, SchemaRef};
use datatypes::vectors::{BinaryVector, TimestampMillisecondVector, UInt8Vector};
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::{TableId, TableInfoRef};
use table::requests::{CreateTableRequest, InsertRequest, OpenTableRequest};
use table::requests::{
CreateTableRequest, DeleteRequest, InsertRequest, OpenTableRequest, TableOptions,
};
use table::{Table, TableRef};
use crate::error::{
self, CreateSystemCatalogSnafu, EmptyValueSnafu, Error, InvalidEntryTypeSnafu, InvalidKeySnafu,
OpenSystemCatalogSnafu, Result, ValueDeserializeSnafu,
};
use crate::DeregisterTableRequest;
pub const ENTRY_TYPE_INDEX: usize = 0;
pub const KEY_INDEX: usize = 1;
pub const TIMESTAMP_INDEX: usize = 2;
pub const VALUE_INDEX: usize = 3;
pub struct SystemCatalogTable {
table_info: TableInfoRef,
pub table: TableRef,
}
pub struct SystemCatalogTable(TableRef);
#[async_trait::async_trait]
impl Table for SystemCatalogTable {
@@ -58,25 +56,29 @@ impl Table for SystemCatalogTable {
}
fn schema(&self) -> SchemaRef {
self.table_info.meta.schema.clone()
self.0.schema()
}
async fn scan(
&self,
_projection: &Option<Vec<usize>>,
_filters: &[Expr],
_limit: Option<usize>,
projection: Option<&Vec<usize>>,
filters: &[Expr],
limit: Option<usize>,
) -> table::Result<PhysicalPlanRef> {
panic!("System catalog table does not support scan!")
self.0.scan(projection, filters, limit).await
}
/// Insert values into table.
async fn insert(&self, request: InsertRequest) -> table::error::Result<usize> {
self.table.insert(request).await
self.0.insert(request).await
}
fn table_info(&self) -> TableInfoRef {
self.table_info.clone()
self.0.table_info()
}
async fn delete(&self, request: DeleteRequest) -> table::Result<usize> {
self.0.delete(request).await
}
}
@@ -88,7 +90,7 @@ impl SystemCatalogTable {
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
table_id: SYSTEM_CATALOG_TABLE_ID,
};
let schema = Arc::new(build_system_catalog_schema());
let schema = build_system_catalog_schema();
let ctx = EngineContext::default();
if let Some(table) = engine
@@ -96,10 +98,7 @@ impl SystemCatalogTable {
.await
.context(OpenSystemCatalogSnafu)?
{
Ok(Self {
table_info: table.table_info(),
table,
})
Ok(Self(table))
} else {
// system catalog table is not yet created, try to create
let request = CreateTableRequest {
@@ -108,33 +107,31 @@ impl SystemCatalogTable {
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
desc: Some("System catalog table".to_string()),
schema: schema.clone(),
schema,
region_numbers: vec![0],
primary_key_indices: vec![ENTRY_TYPE_INDEX, KEY_INDEX, TIMESTAMP_INDEX],
primary_key_indices: vec![ENTRY_TYPE_INDEX, KEY_INDEX],
create_if_not_exists: true,
table_options: HashMap::new(),
table_options: TableOptions::default(),
};
let table = engine
.create_table(&ctx, request)
.await
.context(CreateSystemCatalogSnafu)?;
let table_info = table.table_info();
Ok(Self { table, table_info })
Ok(Self(table))
}
}
/// Create a stream of all entries inside system catalog table
pub async fn records(&self) -> Result<SendableRecordBatchStream> {
let full_projection = None;
let ctx = SessionContext::new();
let scan = self
.table
.scan(&full_projection, &[], None)
.scan(full_projection, &[], None)
.await
.context(error::SystemCatalogTableScanSnafu)?;
let stream = scan
.execute(0, Arc::new(RuntimeEnv::default()))
.await
.execute(0, ctx.task_ctx())
.context(error::SystemCatalogTableScanExecSnafu)?;
Ok(stream)
}
@@ -148,7 +145,7 @@ impl SystemCatalogTable {
/// - value: JSON-encoded value of entry's metadata.
/// - gmt_created: create time of this metadata.
/// - gmt_modified: last updated time of this metadata.
fn build_system_catalog_schema() -> Schema {
fn build_system_catalog_schema() -> RawSchema {
let cols = vec![
ColumnSchema::new(
"entry_type".to_string(),
@@ -162,7 +159,7 @@ fn build_system_catalog_schema() -> Schema {
),
ColumnSchema::new(
"timestamp".to_string(),
ConcreteDataType::timestamp_millis_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
false,
)
.with_time_index(true),
@@ -173,32 +170,71 @@ fn build_system_catalog_schema() -> Schema {
),
ColumnSchema::new(
"gmt_created".to_string(),
ConcreteDataType::timestamp_millis_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
false,
),
ColumnSchema::new(
"gmt_modified".to_string(),
ConcreteDataType::timestamp_millis_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
false,
),
];
// The schema of this table must be valid.
SchemaBuilder::try_from(cols).unwrap().build().unwrap()
RawSchema::new(cols)
}
pub fn build_table_insert_request(full_table_name: String, table_id: TableId) -> InsertRequest {
/// Formats key string for table entry in system catalog
#[inline]
pub fn format_table_entry_key(catalog: &str, schema: &str, table_id: TableId) -> String {
format!("{catalog}.{schema}.{table_id}")
}
pub fn build_table_insert_request(
catalog: String,
schema: String,
table_name: String,
table_id: TableId,
) -> InsertRequest {
let entry_key = format_table_entry_key(&catalog, &schema, table_id);
build_insert_request(
EntryType::Table,
full_table_name.as_bytes(),
serde_json::to_string(&TableEntryValue { table_id })
entry_key.as_bytes(),
serde_json::to_string(&TableEntryValue { table_name })
.unwrap()
.as_bytes(),
)
}
pub(crate) fn build_table_deletion_request(
request: &DeregisterTableRequest,
table_id: TableId,
) -> DeleteRequest {
let table_key = format_table_entry_key(&request.catalog, &request.schema, table_id);
DeleteRequest {
key_column_values: build_primary_key_columns(EntryType::Table, table_key.as_bytes()),
}
}
fn build_primary_key_columns(entry_type: EntryType, key: &[u8]) -> HashMap<String, VectorRef> {
let mut m = HashMap::with_capacity(3);
m.insert(
"entry_type".to_string(),
Arc::new(UInt8Vector::from_slice([entry_type as u8])) as _,
);
m.insert(
"key".to_string(),
Arc::new(BinaryVector::from_slice(&[key])) as _,
);
// Timestamp in key part is intentionally left to 0
m.insert(
"timestamp".to_string(),
Arc::new(TimestampMillisecondVector::from_slice([0])) as _,
);
m
}
pub fn build_schema_insert_request(catalog_name: String, schema_name: String) -> InsertRequest {
let full_schema_name = format!("{}.{}", catalog_name, schema_name);
let full_schema_name = format!("{catalog_name}.{schema_name}");
build_insert_request(
EntryType::Schema,
full_schema_name.as_bytes(),
@@ -209,40 +245,25 @@ pub fn build_schema_insert_request(catalog_name: String, schema_name: String) ->
}
pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) -> InsertRequest {
let primary_key_columns = build_primary_key_columns(entry_type, key);
let mut columns_values = HashMap::with_capacity(6);
columns_values.insert(
"entry_type".to_string(),
Arc::new(UInt8Vector::from_slice(&[entry_type as u8])) as _,
);
columns_values.insert(
"key".to_string(),
Arc::new(BinaryVector::from_slice(&[key])) as _,
);
// Timestamp in key part is intentionally left to 0
columns_values.insert(
"timestamp".to_string(),
Arc::new(TimestampVector::from_slice(&[Timestamp::from_millis(0)])) as _,
);
columns_values.extend(primary_key_columns.into_iter());
columns_values.insert(
"value".to_string(),
Arc::new(BinaryVector::from_slice(&[value])) as _,
);
let now = util::current_time_millis();
columns_values.insert(
"gmt_created".to_string(),
Arc::new(TimestampVector::from_slice(&[Timestamp::from_millis(
util::current_time_millis(),
)])) as _,
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
);
columns_values.insert(
"gmt_modified".to_string(),
Arc::new(TimestampVector::from_slice(&[Timestamp::from_millis(
util::current_time_millis(),
)])) as _,
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
);
InsertRequest {
@@ -250,6 +271,7 @@ pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) ->
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
columns_values,
region_number: 0, // system catalog table has only one region
}
}
@@ -289,8 +311,8 @@ pub fn decode_system_catalog(
}
EntryType::Table => {
// As for table entry, the key is a string with format: `<catalog_name>.<schema_name>.<table_name>`
// and the value is a JSON string with format: `{"table_id": <table_id>}`
// As for table entry, the key is a string with format: `<catalog_name>.<schema_name>.<table_id>`
// and the value is a JSON string with format: `{"table_name": <table_name>}`
let table_parts = key.split('.').collect::<Vec<_>>();
ensure!(
table_parts.len() >= 3,
@@ -302,11 +324,12 @@ pub fn decode_system_catalog(
debug!("Table meta value: {}", String::from_utf8_lossy(value));
let table_meta: TableEntryValue =
serde_json::from_slice(value).context(ValueDeserializeSnafu)?;
let table_id = table_parts[2].parse::<TableId>().unwrap();
Ok(Entry::Table(TableEntry {
catalog_name: table_parts[0].to_string(),
schema_name: table_parts[1].to_string(),
table_name: table_parts[2].to_string(),
table_id: table_meta.table_id,
table_name: table_meta.table_name,
table_id,
}))
}
}
@@ -366,25 +389,28 @@ pub struct TableEntry {
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct TableEntryValue {
pub table_id: TableId,
pub table_name: String,
}
#[cfg(test)]
mod tests {
use log_store::fs::noop::NoopLogStore;
use common_recordbatch::RecordBatches;
use common_test_util::temp_dir::{create_temp_dir, TempDir};
use datatypes::value::Value;
use log_store::NoopLogStore;
use mito::config::EngineConfig;
use mito::engine::MitoEngine;
use object_store::ObjectStore;
use object_store::{ObjectStore, ObjectStoreBuilder};
use storage::compaction::noop::NoopCompactionScheduler;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
use table::metadata::TableType;
use table::metadata::TableType::Base;
use tempdir::TempDir;
use super::*;
#[test]
pub fn test_decode_catalog_enrty() {
pub fn test_decode_catalog_entry() {
let entry = decode_system_catalog(
Some(EntryType::Catalog as u8),
Some("some_catalog".as_bytes()),
@@ -394,7 +420,7 @@ mod tests {
if let Entry::Catalog(e) = entry {
assert_eq!("some_catalog", e.catalog_name);
} else {
panic!("Unexpected type: {:?}", entry);
panic!("Unexpected type: {entry:?}");
}
}
@@ -411,7 +437,7 @@ mod tests {
assert_eq!("some_catalog", e.catalog_name);
assert_eq!("some_schema", e.schema_name);
} else {
panic!("Unexpected type: {:?}", entry);
panic!("Unexpected type: {entry:?}");
}
}
@@ -419,8 +445,8 @@ mod tests {
pub fn test_decode_table() {
let entry = decode_system_catalog(
Some(EntryType::Table as u8),
Some("some_catalog.some_schema.some_table".as_bytes()),
Some("{\"table_id\":42}".as_bytes()),
Some("some_catalog.some_schema.42".as_bytes()),
Some("{\"table_name\":\"some_table\"}".as_bytes()),
)
.unwrap();
@@ -430,7 +456,7 @@ mod tests {
assert_eq!("some_table", e.table_name);
assert_eq!(42, e.table_id);
} else {
panic!("Unexpected type: {:?}", entry);
panic!("Unexpected type: {entry:?}");
}
}
@@ -439,7 +465,7 @@ mod tests {
pub fn test_decode_mismatch() {
decode_system_catalog(
Some(EntryType::Table as u8),
Some("some_catalog.some_schema.some_table".as_bytes()),
Some("some_catalog.some_schema.42".as_bytes()),
None,
)
.unwrap();
@@ -454,19 +480,21 @@ mod tests {
}
pub async fn prepare_table_engine() -> (TempDir, TableEngineRef) {
let dir = TempDir::new("system-table-test").unwrap();
let dir = create_temp_dir("system-table-test");
let store_dir = dir.path().to_string_lossy();
let accessor = opendal::services::fs::Builder::default()
let accessor = object_store::services::Fs::default()
.root(&store_dir)
.build()
.unwrap();
let object_store = ObjectStore::new(accessor);
let object_store = ObjectStore::new(accessor).finish();
let noop_compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
let table_engine = Arc::new(MitoEngine::new(
EngineConfig::default(),
EngineImpl::new(
StorageEngineConfig::default(),
Arc::new(NoopLogStore::default()),
object_store.clone(),
noop_compaction_scheduler,
),
object_store,
));
@@ -491,4 +519,53 @@ mod tests {
assert_eq!(SYSTEM_CATALOG_NAME, info.catalog_name);
assert_eq!(INFORMATION_SCHEMA_NAME, info.schema_name);
}
#[tokio::test]
async fn test_system_catalog_table_records() {
let (_, table_engine) = prepare_table_engine().await;
let catalog_table = SystemCatalogTable::new(table_engine).await.unwrap();
let table_insertion = build_table_insert_request(
DEFAULT_CATALOG_NAME.to_string(),
DEFAULT_SCHEMA_NAME.to_string(),
"my_table".to_string(),
1,
);
let result = catalog_table.insert(table_insertion).await.unwrap();
assert_eq!(result, 1);
let records = catalog_table.records().await.unwrap();
let mut batches = RecordBatches::try_collect(records).await.unwrap().take();
assert_eq!(batches.len(), 1);
let batch = batches.remove(0);
assert_eq!(batch.num_rows(), 1);
let row = batch.rows().next().unwrap();
let Value::UInt8(entry_type) = row[0] else { unreachable!() };
let Value::Binary(key) = row[1].clone() else { unreachable!() };
let Value::Binary(value) = row[3].clone() else { unreachable!() };
let entry = decode_system_catalog(Some(entry_type), Some(&*key), Some(&*value)).unwrap();
let expected = Entry::Table(TableEntry {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "my_table".to_string(),
table_id: 1,
});
assert_eq!(entry, expected);
let table_deletion = build_table_deletion_request(
&DeregisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "my_table".to_string(),
},
1,
);
let result = catalog_table.delete(table_deletion).await.unwrap();
assert_eq!(result, 1);
let records = catalog_table.records().await.unwrap();
let batches = RecordBatches::try_collect(records).await.unwrap().take();
assert_eq!(batches.len(), 0);
}
}

View File

@@ -0,0 +1,178 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::sync::Arc;
use common_catalog::format_full_table_name;
use datafusion::common::{OwnedTableReference, ResolvedTableReference, TableReference};
use datafusion::datasource::provider_as_source;
use datafusion::logical_expr::TableSource;
use session::context::QueryContext;
use snafu::{ensure, OptionExt};
use table::table::adapter::DfTableProviderAdapter;
use crate::error::{
CatalogNotFoundSnafu, QueryAccessDeniedSnafu, Result, SchemaNotFoundSnafu, TableNotExistSnafu,
};
use crate::CatalogListRef;
pub struct DfTableSourceProvider {
catalog_list: CatalogListRef,
resolved_tables: HashMap<String, Arc<dyn TableSource>>,
disallow_cross_schema_query: bool,
default_catalog: String,
default_schema: String,
}
impl DfTableSourceProvider {
pub fn new(
catalog_list: CatalogListRef,
disallow_cross_schema_query: bool,
query_ctx: &QueryContext,
) -> Self {
Self {
catalog_list,
disallow_cross_schema_query,
resolved_tables: HashMap::new(),
default_catalog: query_ctx.current_catalog(),
default_schema: query_ctx.current_schema(),
}
}
pub fn resolve_table_ref<'a>(
&'a self,
table_ref: TableReference<'a>,
) -> Result<ResolvedTableReference<'a>> {
if self.disallow_cross_schema_query {
match &table_ref {
TableReference::Bare { .. } => (),
TableReference::Partial { schema, .. } => {
ensure!(
schema.as_ref() == self.default_schema,
QueryAccessDeniedSnafu {
catalog: &self.default_catalog,
schema: schema.as_ref(),
}
);
}
TableReference::Full {
catalog, schema, ..
} => {
ensure!(
catalog.as_ref() == self.default_catalog
&& schema.as_ref() == self.default_schema,
QueryAccessDeniedSnafu {
catalog: catalog.as_ref(),
schema: schema.as_ref()
}
);
}
};
}
Ok(table_ref.resolve(&self.default_catalog, &self.default_schema))
}
pub async fn resolve_table(
&mut self,
table_ref: OwnedTableReference,
) -> Result<Arc<dyn TableSource>> {
let table_ref = table_ref.as_table_reference();
let table_ref = self.resolve_table_ref(table_ref)?;
let resolved_name = table_ref.to_string();
if let Some(table) = self.resolved_tables.get(&resolved_name) {
return Ok(table.clone());
}
let catalog_name = table_ref.catalog.as_ref();
let schema_name = table_ref.schema.as_ref();
let table_name = table_ref.table.as_ref();
let catalog = self
.catalog_list
.catalog(catalog_name)?
.context(CatalogNotFoundSnafu { catalog_name })?;
let schema = catalog.schema(schema_name)?.context(SchemaNotFoundSnafu {
catalog: catalog_name,
schema: schema_name,
})?;
let table = schema
.table(table_name)
.await?
.with_context(|| TableNotExistSnafu {
table: format_full_table_name(catalog_name, schema_name, table_name),
})?;
let table = DfTableProviderAdapter::new(table);
let table = provider_as_source(Arc::new(table));
self.resolved_tables.insert(resolved_name, table.clone());
Ok(table)
}
}
#[cfg(test)]
mod tests {
use std::borrow::Cow;
use session::context::QueryContext;
use super::*;
use crate::local::MemoryCatalogManager;
#[test]
fn test_validate_table_ref() {
let query_ctx = &QueryContext::with("greptime", "public");
let table_provider =
DfTableSourceProvider::new(Arc::new(MemoryCatalogManager::default()), true, query_ctx);
let table_ref = TableReference::Bare {
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let table_ref = TableReference::Partial {
schema: Cow::Borrowed("public"),
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let table_ref = TableReference::Partial {
schema: Cow::Borrowed("wrong_schema"),
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_err());
let table_ref = TableReference::Full {
catalog: Cow::Borrowed("greptime"),
schema: Cow::Borrowed("public"),
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let table_ref = TableReference::Full {
catalog: Cow::Borrowed("wrong_catalog"),
schema: Cow::Borrowed("public"),
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_err());
}
}

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,15 +20,16 @@ use std::sync::Arc;
use std::task::{Context, Poll};
use async_stream::stream;
use async_trait::async_trait;
use common_catalog::consts::{INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_TABLE_NAME};
use common_error::ext::BoxedError;
use common_query::logical_plan::Expr;
use common_query::physical_plan::PhysicalPlanRef;
use common_recordbatch::error::Result as RecordBatchResult;
use common_recordbatch::{RecordBatch, RecordBatchStream};
use datatypes::prelude::{ConcreteDataType, VectorBuilder};
use datatypes::prelude::{ConcreteDataType, DataType};
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::value::Value;
use datatypes::value::ValueRef;
use datatypes::vectors::VectorRef;
use futures::Stream;
use snafu::ResultExt;
@@ -38,10 +39,13 @@ use table::metadata::{TableId, TableInfoRef};
use table::table::scan::SimpleTableScan;
use table::{Table, TableRef};
use crate::error::{Error, InsertCatalogRecordSnafu};
use crate::system::{build_schema_insert_request, build_table_insert_request, SystemCatalogTable};
use crate::error::{self, Error, InsertCatalogRecordSnafu, Result as CatalogResult};
use crate::system::{
build_schema_insert_request, build_table_deletion_request, build_table_insert_request,
SystemCatalogTable,
};
use crate::{
format_full_table_name, CatalogListRef, CatalogProvider, SchemaProvider, SchemaProviderRef,
CatalogListRef, CatalogProvider, DeregisterTableRequest, SchemaProvider, SchemaProviderRef,
};
/// Tables holds all tables created by user.
@@ -77,7 +81,7 @@ impl Table for Tables {
async fn scan(
&self,
_projection: &Option<Vec<usize>>,
_projection: Option<&Vec<usize>>,
_filters: &[Expr],
_limit: Option<usize>,
) -> table::error::Result<PhysicalPlanRef> {
@@ -149,26 +153,27 @@ fn tables_to_record_batch(
engine: &str,
) -> Vec<VectorRef> {
let mut catalog_vec =
VectorBuilder::with_capacity(ConcreteDataType::string_datatype(), table_names.len());
ConcreteDataType::string_datatype().create_mutable_vector(table_names.len());
let mut schema_vec =
VectorBuilder::with_capacity(ConcreteDataType::string_datatype(), table_names.len());
ConcreteDataType::string_datatype().create_mutable_vector(table_names.len());
let mut table_name_vec =
VectorBuilder::with_capacity(ConcreteDataType::string_datatype(), table_names.len());
ConcreteDataType::string_datatype().create_mutable_vector(table_names.len());
let mut engine_vec =
VectorBuilder::with_capacity(ConcreteDataType::string_datatype(), table_names.len());
ConcreteDataType::string_datatype().create_mutable_vector(table_names.len());
for table_name in table_names {
catalog_vec.push(&Value::String(catalog_name.into()));
schema_vec.push(&Value::String(schema_name.into()));
table_name_vec.push(&Value::String(table_name.into()));
engine_vec.push(&Value::String(engine.into()));
// Safety: All these vectors are string type.
catalog_vec.push_value_ref(ValueRef::String(catalog_name));
schema_vec.push_value_ref(ValueRef::String(schema_name));
table_name_vec.push_value_ref(ValueRef::String(&table_name));
engine_vec.push_value_ref(ValueRef::String(engine));
}
vec![
catalog_vec.finish(),
schema_vec.finish(),
table_name_vec.finish(),
engine_vec.finish(),
catalog_vec.to_vector(),
schema_vec.to_vector(),
table_name_vec.to_vector(),
engine_vec.to_vector(),
]
}
@@ -196,6 +201,7 @@ pub struct InformationSchema {
pub system: Arc<SystemCatalogTable>,
}
#[async_trait]
impl SchemaProvider for InformationSchema {
fn as_any(&self) -> &dyn Any {
self
@@ -208,7 +214,7 @@ impl SchemaProvider for InformationSchema {
])
}
fn table(&self, name: &str) -> Result<Option<TableRef>, Error> {
async fn table(&self, name: &str) -> Result<Option<TableRef>, Error> {
if name.eq_ignore_ascii_case("tables") {
Ok(Some(self.tables.clone()))
} else if name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME) {
@@ -226,6 +232,10 @@ impl SchemaProvider for InformationSchema {
panic!("System catalog & schema does not support register table")
}
fn rename_table(&self, _name: &str, _new_name: String) -> crate::error::Result<TableRef> {
unimplemented!("System catalog & schema does not support rename table")
}
fn deregister_table(&self, _name: &str) -> crate::error::Result<Option<TableRef>> {
panic!("System catalog & schema does not support deregister table")
}
@@ -262,8 +272,7 @@ impl SystemCatalog {
table_name: String,
table_id: TableId,
) -> crate::error::Result<usize> {
let full_table_name = format_full_table_name(&catalog, &schema, &table_name);
let request = build_table_insert_request(full_table_name, table_id);
let request = build_table_insert_request(catalog, schema, table_name, table_id);
self.information_schema
.system
.insert(request)
@@ -271,6 +280,21 @@ impl SystemCatalog {
.context(InsertCatalogRecordSnafu)
}
pub(crate) async fn deregister_table(
&self,
request: &DeregisterTableRequest,
table_id: TableId,
) -> CatalogResult<bool> {
self.information_schema
.system
.delete(build_table_deletion_request(request, table_id))
.await
.map(|x| x == 1)
.with_context(|_| error::DeregisterTableSnafu {
request: request.clone(),
})
}
pub async fn register_schema(
&self,
catalog: String,
@@ -340,9 +364,7 @@ fn build_schema_for_tables() -> Schema {
#[cfg(test)]
mod tests {
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_query::physical_plan::RuntimeEnv;
use datatypes::arrow::array::Utf8Array;
use datatypes::arrow::datatypes::DataType;
use common_query::physical_plan::SessionContext;
use futures_util::StreamExt;
use table::table::numbers::NumbersTable;
@@ -365,58 +387,48 @@ mod tests {
.unwrap();
let tables = Tables::new(catalog_list, "test_engine".to_string());
let tables_stream = tables.scan(&None, &[], None).await.unwrap();
let mut tables_stream = tables_stream
.execute(0, Arc::new(RuntimeEnv::default()))
.await
.unwrap();
let tables_stream = tables.scan(None, &[], None).await.unwrap();
let session_ctx = SessionContext::new();
let mut tables_stream = tables_stream.execute(0, session_ctx.task_ctx()).unwrap();
if let Some(t) = tables_stream.next().await {
let batch = t.unwrap().df_recordbatch;
let batch = t.unwrap();
assert_eq!(1, batch.num_rows());
assert_eq!(4, batch.num_columns());
assert_eq!(&DataType::Utf8, batch.column(0).data_type());
assert_eq!(&DataType::Utf8, batch.column(1).data_type());
assert_eq!(&DataType::Utf8, batch.column(2).data_type());
assert_eq!(&DataType::Utf8, batch.column(3).data_type());
assert_eq!(
ConcreteDataType::string_datatype(),
batch.column(0).data_type()
);
assert_eq!(
ConcreteDataType::string_datatype(),
batch.column(1).data_type()
);
assert_eq!(
ConcreteDataType::string_datatype(),
batch.column(2).data_type()
);
assert_eq!(
ConcreteDataType::string_datatype(),
batch.column(3).data_type()
);
assert_eq!(
"greptime",
batch
.column(0)
.as_any()
.downcast_ref::<Utf8Array<i32>>()
.unwrap()
.value(0)
batch.column(0).get_ref(0).as_string().unwrap().unwrap()
);
assert_eq!(
"public",
batch
.column(1)
.as_any()
.downcast_ref::<Utf8Array<i32>>()
.unwrap()
.value(0)
batch.column(1).get_ref(0).as_string().unwrap().unwrap()
);
assert_eq!(
"test_table",
batch
.column(2)
.as_any()
.downcast_ref::<Utf8Array<i32>>()
.unwrap()
.value(0)
batch.column(2).get_ref(0).as_string().unwrap().unwrap()
);
assert_eq!(
"test_engine",
batch
.column(3)
.as_any()
.downcast_ref::<Utf8Array<i32>>()
.unwrap()
.value(0)
batch.column(3).get_ref(0).as_string().unwrap().unwrap()
);
} else {
panic!("Record batch should not be empty!")

View File

@@ -0,0 +1,171 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(test)]
mod tests {
use std::sync::Arc;
use catalog::local::LocalCatalogManager;
use catalog::{CatalogManager, RegisterTableRequest, RenameTableRequest};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_telemetry::{error, info};
use mito::config::EngineConfig;
use table::table::numbers::NumbersTable;
use table::TableRef;
use tokio::sync::Mutex;
async fn create_local_catalog_manager() -> Result<LocalCatalogManager, catalog::error::Error> {
let (_dir, object_store) =
mito::table::test_util::new_test_object_store("setup_mock_engine_and_table").await;
let mock_engine = Arc::new(mito::table::test_util::MockMitoEngine::new(
EngineConfig::default(),
mito::table::test_util::MockEngine::default(),
object_store,
));
let catalog_manager = LocalCatalogManager::try_new(mock_engine).await.unwrap();
catalog_manager.start().await?;
Ok(catalog_manager)
}
#[tokio::test]
async fn test_rename_table() {
common_telemetry::init_default_ut_logging();
let catalog_manager = create_local_catalog_manager().await.unwrap();
// register table
let table_name = "test_table";
let table_id = 42;
let table = Arc::new(NumbersTable::new(table_id));
let request = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
table_id,
table: table.clone(),
};
assert!(catalog_manager.register_table(request).await.unwrap());
// rename table
let new_table_name = "table_t";
let rename_table_req = RenameTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
new_table_name: new_table_name.to_string(),
table_id,
};
assert!(catalog_manager
.rename_table(rename_table_req)
.await
.unwrap());
let registered_table = catalog_manager
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
.await
.unwrap()
.unwrap();
assert_eq!(registered_table.table_info().ident.table_id, table_id);
}
#[tokio::test]
async fn test_duplicate_register() {
let catalog_manager = create_local_catalog_manager().await.unwrap();
let request = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "test_table".to_string(),
table_id: 42,
table: Arc::new(NumbersTable::new(42)),
};
assert!(catalog_manager
.register_table(request.clone())
.await
.unwrap());
// register table with same table id will succeed with 0 as return val.
assert!(!catalog_manager.register_table(request).await.unwrap());
let err = catalog_manager
.register_table(RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "test_table".to_string(),
table_id: 43,
table: Arc::new(NumbersTable::new(43)),
})
.await
.unwrap_err();
assert!(
err.to_string()
.contains("Table `greptime.public.test_table` already exists"),
"Actual error message: {err}",
);
}
#[test]
fn test_concurrent_register() {
common_telemetry::init_default_ut_logging();
let rt = Arc::new(tokio::runtime::Builder::new_multi_thread().build().unwrap());
let catalog_manager =
Arc::new(rt.block_on(async { create_local_catalog_manager().await.unwrap() }));
let succeed: Arc<Mutex<Option<TableRef>>> = Arc::new(Mutex::new(None));
let mut handles = Vec::with_capacity(8);
for i in 0..8 {
let catalog = catalog_manager.clone();
let succeed = succeed.clone();
let handle = rt.spawn(async move {
let table_id = 42 + i;
let table = Arc::new(NumbersTable::new(table_id));
let req = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "test_table".to_string(),
table_id,
table: table.clone(),
};
match catalog.register_table(req).await {
Ok(res) => {
if res {
let mut succeed = succeed.lock().await;
info!("Successfully registered table: {}", table_id);
*succeed = Some(table);
}
}
Err(_) => {
error!("Failed to register table {}", table_id);
}
}
});
handles.push(handle);
}
rt.block_on(async move {
for handle in handles {
handle.await.unwrap();
}
let guard = succeed.lock().await;
let table = guard.as_ref().unwrap();
let table_registered = catalog_manager
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "test_table")
.await
.unwrap()
.unwrap();
assert_eq!(
table_registered.table_info().ident.table_id,
table.table_info().ident.table_id
);
});
}
}

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -147,6 +147,7 @@ impl TableEngine for MockTableEngine {
let table_id = TableId::from_str(
request
.table_options
.extra_options
.get("table_id")
.unwrap_or(&default_table_id),
)
@@ -189,10 +190,10 @@ impl TableEngine for MockTableEngine {
unimplemented!()
}
fn get_table<'a>(
fn get_table(
&self,
_ctx: &EngineContext,
table_ref: &'a TableReference,
table_ref: &TableReference,
) -> table::Result<Option<TableRef>> {
futures::executor::block_on(async {
Ok(self
@@ -204,7 +205,7 @@ impl TableEngine for MockTableEngine {
})
}
fn table_exists<'a>(&self, _ctx: &EngineContext, table_ref: &'a TableReference) -> bool {
fn table_exists(&self, _ctx: &EngineContext, table_ref: &TableReference) -> bool {
futures::executor::block_on(async {
self.tables
.read()
@@ -217,7 +218,7 @@ impl TableEngine for MockTableEngine {
&self,
_ctx: &EngineContext,
_request: DropTableRequest,
) -> table::Result<()> {
) -> table::Result<bool> {
unimplemented!()
}
}

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,13 +22,13 @@ mod tests {
use std::collections::HashSet;
use std::sync::Arc;
use catalog::helper::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
use catalog::remote::{
KvBackend, KvBackendRef, RemoteCatalogManager, RemoteCatalogProvider, RemoteSchemaProvider,
};
use catalog::{CatalogList, CatalogManager, RegisterTableRequest};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_catalog::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
use datatypes::schema::Schema;
use datatypes::schema::RawSchema;
use futures_util::StreamExt;
use table::engine::{EngineContext, TableEngineRef};
use table::requests::CreateTableRequest;
@@ -116,7 +116,7 @@ mod tests {
let schema_name = "nonexistent_schema".to_string();
let table_name = "fail_table".to_string();
// this schema has no effect
let table_schema = Arc::new(Schema::new(vec![]));
let table_schema = RawSchema::new(vec![]);
let table = table_engine
.create_table(
&EngineContext {},
@@ -126,7 +126,7 @@ mod tests {
schema_name: schema_name.clone(),
table_name: table_name.clone(),
desc: None,
schema: table_schema.clone(),
schema: table_schema,
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
@@ -176,7 +176,7 @@ mod tests {
let table_name = "test_table".to_string();
let table_id = 1;
// this schema has no effect
let table_schema = Arc::new(Schema::new(vec![]));
let table_schema = RawSchema::new(vec![]);
let table = table_engine
.create_table(
&EngineContext {},
@@ -186,7 +186,7 @@ mod tests {
schema_name: schema_name.clone(),
table_name: table_name.clone(),
desc: None,
schema: table_schema.clone(),
schema: table_schema,
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
@@ -202,7 +202,7 @@ mod tests {
table_id,
table,
};
assert_eq!(1, catalog_manager.register_table(reg_req).await.unwrap());
assert!(catalog_manager.register_table(reg_req).await.unwrap());
assert_eq!(
HashSet::from([table_name, "numbers".to_string()]),
default_schema
@@ -246,7 +246,7 @@ mod tests {
schema_name: schema_name.clone(),
table_name: "".to_string(),
desc: None,
schema: Arc::new(Schema::new(vec![])),
schema: RawSchema::new(vec![]),
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
@@ -287,7 +287,7 @@ mod tests {
.register_schema(schema_name.clone(), schema.clone())
.expect("Register schema should not fail");
assert!(prev.is_none());
assert_eq!(1, catalog_manager.register_table(reg_req).await.unwrap());
assert!(catalog_manager.register_table(reg_req).await.unwrap());
assert_eq!(
HashSet::from([schema_name.clone()]),

View File

@@ -1,42 +1,39 @@
[package]
name = "client"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
api = { path = "../api" }
async-stream = "0.3"
arrow-flight.workspace = true
async-stream.workspace = true
common-base = { path = "../common/base" }
common-catalog = { path = "../common/catalog" }
common-error = { path = "../common/error" }
common-grpc = { path = "../common/grpc" }
common-grpc-expr = { path = "../common/grpc-expr" }
common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
common-insert = { path = "../common/insert" }
common-time = { path = "../common/time" }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = [
"simd",
] }
datafusion.workspace = true
datatypes = { path = "../datatypes" }
enum_dispatch = "0.3"
futures-util.workspace = true
parking_lot = "0.12"
prost.workspace = true
rand = "0.8"
snafu = { version = "0.7", features = ["backtraces"] }
tonic = "0.8"
snafu.workspace = true
tonic.workspace = true
[dev-dependencies]
datanode = { path = "../datanode" }
substrait = { path = "../common/substrait" }
tokio = { version = "1.0", features = ["full"] }
tokio.workspace = true
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
# TODO(ruihang): upgrade to 0.11 once substrait-rs supports it.
[dev-dependencies.prost_09]
package = "prost"
version = "0.9"
prost.workspace = true
[dev-dependencies.substrait_proto]
package = "substrait"
version = "0.2"
version = "0.4"

View File

@@ -1,109 +0,0 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use api::v1::codec::InsertBatch;
use api::v1::*;
use client::{Client, Database};
fn main() {
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
.unwrap();
run();
}
#[tokio::main]
async fn run() {
let client = Client::with_urls(vec!["127.0.0.1:3001"]);
let db = Database::new("greptime", client);
let expr = InsertExpr {
schema_name: "public".to_string(),
table_name: "demo".to_string(),
expr: Some(insert_expr::Expr::Values(insert_expr::Values {
values: insert_batches(),
})),
options: HashMap::default(),
region_number: 0,
};
db.insert(expr).await.unwrap();
}
fn insert_batches() -> Vec<Vec<u8>> {
const SEMANTIC_TAG: i32 = 0;
const SEMANTIC_FEILD: i32 = 1;
const SEMANTIC_TS: i32 = 2;
let row_count = 4;
let host_vals = column::Values {
string_values: vec![
"host1".to_string(),
"host2".to_string(),
"host3".to_string(),
"host4".to_string(),
],
..Default::default()
};
let host_column = Column {
column_name: "host".to_string(),
semantic_type: SEMANTIC_TAG,
values: Some(host_vals),
null_mask: vec![0],
..Default::default()
};
let cpu_vals = column::Values {
f64_values: vec![0.31, 0.41, 0.2],
..Default::default()
};
let cpu_column = Column {
column_name: "cpu".to_string(),
semantic_type: SEMANTIC_FEILD,
values: Some(cpu_vals),
null_mask: vec![2],
..Default::default()
};
let mem_vals = column::Values {
f64_values: vec![0.1, 0.2, 0.3],
..Default::default()
};
let mem_column = Column {
column_name: "memory".to_string(),
semantic_type: SEMANTIC_FEILD,
values: Some(mem_vals),
null_mask: vec![4],
..Default::default()
};
let ts_vals = column::Values {
i64_values: vec![100, 101, 102, 103],
..Default::default()
};
let ts_column = Column {
column_name: "ts".to_string(),
semantic_type: SEMANTIC_TS,
values: Some(ts_vals),
null_mask: vec![0],
..Default::default()
};
let insert_batch = InsertBatch {
columns: vec![host_column, cpu_column, mem_column, ts_column],
row_count,
};
vec![insert_batch.into()]
}

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::{ColumnDataType, ColumnDef, CreateExpr};
use client::admin::Admin;
use api::v1::{ColumnDataType, ColumnDef, CreateTableExpr, TableId};
use client::{Client, Database};
use prost_09::Message;
use substrait_proto::protobuf::plan_rel::RelType as PlanRelType;
use substrait_proto::protobuf::read_rel::{NamedTable, ReadType};
use substrait_proto::protobuf::rel::RelType;
use substrait_proto::protobuf::{PlanRel, ReadRel, Rel};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use prost::Message;
use substrait_proto::proto::plan_rel::RelType as PlanRelType;
use substrait_proto::proto::read_rel::{NamedTable, ReadType};
use substrait_proto::proto::rel::RelType;
use substrait_proto::proto::{PlanRel, ReadRel, Rel};
use tracing::{event, Level};
fn main() {
@@ -33,46 +33,45 @@ fn main() {
async fn run() {
let client = Client::with_urls(vec!["127.0.0.1:3001"]);
let create_table_expr = CreateExpr {
catalog_name: Some("greptime".to_string()),
schema_name: Some("public".to_string()),
let create_table_expr = CreateTableExpr {
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
table_name: "test_logical_dist_exec".to_string(),
desc: None,
desc: "".to_string(),
column_defs: vec![
ColumnDef {
name: "timestamp".to_string(),
datatype: ColumnDataType::Timestamp as i32,
datatype: ColumnDataType::TimestampMillisecond as i32,
is_nullable: false,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "key".to_string(),
datatype: ColumnDataType::Uint64 as i32,
is_nullable: false,
default_constraint: None,
default_constraint: vec![],
},
ColumnDef {
name: "value".to_string(),
datatype: ColumnDataType::Uint64 as i32,
is_nullable: false,
default_constraint: None,
default_constraint: vec![],
},
],
time_index: "timestamp".to_string(),
primary_keys: vec!["key".to_string()],
create_if_not_exists: false,
table_options: Default::default(),
table_id: Some(1024),
table_id: Some(TableId { id: 1024 }),
region_ids: vec![0],
};
let admin = Admin::new("create table", client.clone());
let result = admin.create(create_table_expr).await.unwrap();
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
let result = db.create(create_table_expr).await.unwrap();
event!(Level::INFO, "create table result: {:#?}", result);
let logical = mock_logical_plan();
event!(Level::INFO, "plan size: {:#?}", logical.len());
let db = Database::new("greptime", client);
let result = db.logical_plan(logical).await.unwrap();
event!(Level::INFO, "result: {:#?}", result);
@@ -90,12 +89,8 @@ fn mock_logical_plan() -> Vec<u8> {
let read_type = ReadType::NamedTable(named_table);
let read_rel = ReadRel {
common: None,
base_schema: None,
filter: None,
projection: None,
advanced_extension: None,
read_type: Some(read_type),
..Default::default()
};
let mut buf = vec![];

View File

@@ -1,51 +0,0 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use client::{Client, Database};
use common_grpc::MockExecution;
use datafusion::physical_plan::expressions::Column;
use datafusion::physical_plan::projection::ProjectionExec;
use datafusion::physical_plan::{ExecutionPlan, PhysicalExpr};
use tracing::{event, Level};
fn main() {
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
.unwrap();
run();
}
#[tokio::main]
async fn run() {
let client = Client::with_urls(vec!["127.0.0.1:3001"]);
let db = Database::new("greptime", client);
let physical = mock_physical_plan();
let result = db.physical_plan(physical, None).await;
event!(Level::INFO, "result: {:#?}", result);
}
fn mock_physical_plan() -> Arc<dyn ExecutionPlan> {
let id_expr = Arc::new(Column::new("id", 0)) as Arc<dyn PhysicalExpr>;
let age_expr = Arc::new(Column::new("age", 2)) as Arc<dyn PhysicalExpr>;
let expr = vec![(id_expr, "id".to_string()), (age_expr, "age".to_string())];
let input =
Arc::new(MockExecution::new("mock_input_exec".to_string())) as Arc<dyn ExecutionPlan>;
let projection = ProjectionExec::try_new(expr, input).unwrap();
Arc::new(projection)
}

View File

@@ -1,34 +0,0 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use client::{Client, Database, Select};
use tracing::{event, Level};
fn main() {
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
.unwrap();
run();
}
#[tokio::main]
async fn run() {
let client = Client::with_urls(vec!["127.0.0.1:3001"]);
let db = Database::new("greptime", client);
let sql = Select::Sql("select * from demo".to_string());
let result = db.select(sql).await.unwrap();
event!(Level::INFO, "result: {:#?}", result);
}

View File

@@ -1,125 +0,0 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::*;
use common_error::prelude::StatusCode;
use common_query::Output;
use snafu::prelude::*;
use crate::database::PROTOCOL_VERSION;
use crate::{error, Client, Result};
#[derive(Clone, Debug)]
pub struct Admin {
name: String,
client: Client,
}
impl Admin {
pub fn new(name: impl Into<String>, client: Client) -> Self {
Self {
name: name.into(),
client,
}
}
pub async fn create(&self, expr: CreateExpr) -> Result<AdminResult> {
let header = ExprHeader {
version: PROTOCOL_VERSION,
};
let expr = AdminExpr {
header: Some(header),
expr: Some(admin_expr::Expr::Create(expr)),
};
self.do_request(expr).await
}
pub async fn do_request(&self, expr: AdminExpr) -> Result<AdminResult> {
// `remove(0)` is safe because of `do_requests`'s invariants.
Ok(self.do_requests(vec![expr]).await?.remove(0))
}
pub async fn alter(&self, expr: AlterExpr) -> Result<AdminResult> {
let header = ExprHeader {
version: PROTOCOL_VERSION,
};
let expr = AdminExpr {
header: Some(header),
expr: Some(admin_expr::Expr::Alter(expr)),
};
Ok(self.do_requests(vec![expr]).await?.remove(0))
}
/// Invariants: the lengths of input vec (`Vec<AdminExpr>`) and output vec (`Vec<AdminResult>`) are equal.
async fn do_requests(&self, exprs: Vec<AdminExpr>) -> Result<Vec<AdminResult>> {
let expr_count = exprs.len();
let req = AdminRequest {
name: self.name.clone(),
exprs,
};
let resp = self.client.admin(req).await?;
let results = resp.results;
ensure!(
results.len() == expr_count,
error::MissingResultSnafu {
name: "admin_results",
expected: expr_count,
actual: results.len(),
}
);
Ok(results)
}
pub async fn create_database(&self, expr: CreateDatabaseExpr) -> Result<AdminResult> {
let header = ExprHeader {
version: PROTOCOL_VERSION,
};
let expr = AdminExpr {
header: Some(header),
expr: Some(admin_expr::Expr::CreateDatabase(expr)),
};
Ok(self.do_requests(vec![expr]).await?.remove(0))
}
}
pub fn admin_result_to_output(admin_result: AdminResult) -> Result<Output> {
let header = admin_result.header.context(error::MissingHeaderSnafu)?;
if !StatusCode::is_success(header.code) {
return error::DatanodeSnafu {
code: header.code,
msg: header.err_msg,
}
.fail();
}
let result = admin_result.result.context(error::MissingResultSnafu {
name: "result".to_string(),
expected: 1_usize,
actual: 0_usize,
})?;
let output = match result {
admin_result::Result::Mutate(mutate) => {
if mutate.failure != 0 {
return error::MutateFailureSnafu {
failure: mutate.failure,
}
.fail();
}
Output::AffectedRows(mutate.success as usize)
}
};
Ok(output)
}

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,8 +14,7 @@
use std::sync::Arc;
use api::v1::greptime_client::GreptimeClient;
use api::v1::*;
use arrow_flight::flight_service_client::FlightServiceClient;
use common_grpc::channel_manager::ChannelManager;
use parking_lot::RwLock;
use snafu::{OptionExt, ResultExt};
@@ -24,6 +23,21 @@ use tonic::transport::Channel;
use crate::load_balance::{LoadBalance, Loadbalancer};
use crate::{error, Result};
pub(crate) struct FlightClient {
addr: String,
client: FlightServiceClient<Channel>,
}
impl FlightClient {
pub(crate) fn addr(&self) -> &str {
&self.addr
}
pub(crate) fn mut_inner(&mut self) -> &mut FlightServiceClient<Channel> {
&mut self.client
}
}
#[derive(Clone, Debug, Default)]
pub struct Client {
inner: Arc<Inner>,
@@ -104,57 +118,23 @@ impl Client {
self.inner.set_peers(urls);
}
pub async fn admin(&self, req: AdminRequest) -> Result<AdminResponse> {
let req = BatchRequest {
admins: vec![req],
..Default::default()
};
let mut res = self.batch(req).await?;
res.admins.pop().context(error::MissingResultSnafu {
name: "admins",
expected: 1_usize,
actual: 0_usize,
})
}
pub async fn database(&self, req: DatabaseRequest) -> Result<DatabaseResponse> {
let req = BatchRequest {
databases: vec![req],
..Default::default()
};
let mut res = self.batch(req).await?;
res.databases.pop().context(error::MissingResultSnafu {
name: "database",
expected: 1_usize,
actual: 0_usize,
})
}
pub async fn batch(&self, req: BatchRequest) -> Result<BatchResponse> {
let peer = self
pub(crate) fn make_client(&self) -> Result<FlightClient> {
let addr = self
.inner
.get_peer()
.context(error::IllegalGrpcClientStateSnafu {
err_msg: "No available peer found",
})?;
let mut client = self.make_client(&peer)?;
let result = client
.batch(req)
.await
.context(error::TonicStatusSnafu { addr: peer })?;
Ok(result.into_inner())
}
fn make_client(&self, addr: impl AsRef<str>) -> Result<GreptimeClient<Channel>> {
let addr = addr.as_ref();
let channel = self
.inner
.channel_manager
.get(addr)
.context(error::CreateChannelSnafu { addr })?;
Ok(GreptimeClient::new(channel))
.get(&addr)
.context(error::CreateChannelSnafu { addr: &addr })?;
Ok(FlightClient {
addr,
client: FlightServiceClient::new(channel),
})
}
}

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,259 +12,197 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use std::str::FromStr;
use api::v1::codec::SelectResult as GrpcSelectResult;
use api::v1::column::SemanticType;
use api::v1::auth_header::AuthScheme;
use api::v1::ddl_request::Expr as DdlExpr;
use api::v1::greptime_request::Request;
use api::v1::query_request::Query;
use api::v1::{
object_expr, object_result, select_expr, DatabaseRequest, ExprHeader, InsertExpr,
MutateResult as GrpcMutateResult, ObjectExpr, ObjectResult as GrpcObjectResult, PhysicalPlan,
SelectExpr,
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, DropTableExpr, GreptimeRequest,
InsertRequest, QueryRequest, RequestHeader,
};
use common_error::status_code::StatusCode;
use common_grpc::{AsExcutionPlan, DefaultAsPlanImpl};
use common_insert::column_to_vector;
use arrow_flight::{FlightData, Ticket};
use common_error::prelude::*;
use common_grpc::flight::{flight_messages_to_recordbatches, FlightDecoder, FlightMessage};
use common_query::Output;
use common_recordbatch::{RecordBatch, RecordBatches};
use datafusion::physical_plan::ExecutionPlan;
use datatypes::prelude::*;
use datatypes::schema::{ColumnSchema, Schema};
use snafu::{ensure, OptionExt, ResultExt};
use futures_util::{TryFutureExt, TryStreamExt};
use prost::Message;
use snafu::{ensure, ResultExt};
use crate::error::{
ColumnToVectorSnafu, ConvertSchemaSnafu, DatanodeSnafu, DecodeSelectSnafu, EncodePhysicalSnafu,
};
use crate::error::{ConvertFlightDataSnafu, IllegalFlightMessagesSnafu};
use crate::{error, Client, Result};
pub const PROTOCOL_VERSION: u32 = 1;
#[derive(Clone, Debug)]
pub struct Database {
name: String,
// The "catalog" and "schema" to be used in processing the requests at the server side.
// They are the "hint" or "context", just like how the "database" in "USE" statement is treated in MySQL.
// They will be carried in the request header.
catalog: String,
schema: String,
client: Client,
ctx: FlightContext,
}
impl Database {
pub fn new(name: impl Into<String>, client: Client) -> Self {
pub fn new(catalog: impl Into<String>, schema: impl Into<String>, client: Client) -> Self {
Self {
name: name.into(),
catalog: catalog.into(),
schema: schema.into(),
client,
ctx: FlightContext::default(),
}
}
pub fn name(&self) -> &str {
&self.name
pub fn catalog(&self) -> &String {
&self.catalog
}
pub async fn insert(&self, insert: InsertExpr) -> Result<ObjectResult> {
let header = ExprHeader {
version: PROTOCOL_VERSION,
};
let expr = ObjectExpr {
header: Some(header),
expr: Some(object_expr::Expr::Insert(insert)),
};
self.object(expr).await?.try_into()
pub fn set_catalog(&mut self, catalog: impl Into<String>) {
self.catalog = catalog.into();
}
pub async fn batch_insert(&self, insert_exprs: Vec<InsertExpr>) -> Result<Vec<ObjectResult>> {
let header = ExprHeader {
version: PROTOCOL_VERSION,
};
let obj_exprs = insert_exprs
.into_iter()
.map(|expr| ObjectExpr {
header: Some(header.clone()),
expr: Some(object_expr::Expr::Insert(expr)),
})
.collect();
self.objects(obj_exprs)
.await?
.into_iter()
.map(|result| result.try_into())
.collect()
pub fn schema(&self) -> &String {
&self.schema
}
pub async fn select(&self, expr: Select) -> Result<ObjectResult> {
let select_expr = match expr {
Select::Sql(sql) => SelectExpr {
expr: Some(select_expr::Expr::Sql(sql)),
},
};
self.do_select(select_expr).await
pub fn set_schema(&mut self, schema: impl Into<String>) {
self.schema = schema.into();
}
pub async fn physical_plan(
&self,
physical: Arc<dyn ExecutionPlan>,
original_ql: Option<String>,
) -> Result<ObjectResult> {
let plan = DefaultAsPlanImpl::try_from_physical_plan(physical.clone())
.context(EncodePhysicalSnafu { physical })?
.bytes;
let original_ql = original_ql.unwrap_or_default();
let select_expr = SelectExpr {
expr: Some(select_expr::Expr::PhysicalPlan(PhysicalPlan {
original_ql: original_ql.into_bytes(),
plan,
})),
};
self.do_select(select_expr).await
pub fn set_auth(&mut self, auth: AuthScheme) {
self.ctx.auth_header = Some(AuthHeader {
auth_scheme: Some(auth),
});
}
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<ObjectResult> {
let select_expr = SelectExpr {
expr: Some(select_expr::Expr::LogicalPlan(logical_plan)),
};
self.do_select(select_expr).await
pub async fn insert(&self, request: InsertRequest) -> Result<Output> {
self.do_get(Request::Insert(request)).await
}
async fn do_select(&self, select_expr: SelectExpr) -> Result<ObjectResult> {
let header = ExprHeader {
version: PROTOCOL_VERSION,
pub async fn sql(&self, sql: &str) -> Result<Output> {
self.do_get(Request::Query(QueryRequest {
query: Some(Query::Sql(sql.to_string())),
}))
.await
}
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<Output> {
self.do_get(Request::Query(QueryRequest {
query: Some(Query::LogicalPlan(logical_plan)),
}))
.await
}
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
self.do_get(Request::Ddl(DdlRequest {
expr: Some(DdlExpr::CreateTable(expr)),
}))
.await
}
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
self.do_get(Request::Ddl(DdlRequest {
expr: Some(DdlExpr::Alter(expr)),
}))
.await
}
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<Output> {
self.do_get(Request::Ddl(DdlRequest {
expr: Some(DdlExpr::DropTable(expr)),
}))
.await
}
async fn do_get(&self, request: Request) -> Result<Output> {
let request = GreptimeRequest {
header: Some(RequestHeader {
catalog: self.catalog.clone(),
schema: self.schema.clone(),
authorization: self.ctx.auth_header.clone(),
}),
request: Some(request),
};
let request = Ticket {
ticket: request.encode_to_vec().into(),
};
let expr = ObjectExpr {
header: Some(header),
expr: Some(object_expr::Expr::Select(select_expr)),
};
let mut client = self.client.make_client()?;
let obj_result = self.object(expr).await?;
obj_result.try_into()
}
pub async fn object(&self, expr: ObjectExpr) -> Result<GrpcObjectResult> {
let res = self.objects(vec![expr]).await?.pop().unwrap();
Ok(res)
}
async fn objects(&self, exprs: Vec<ObjectExpr>) -> Result<Vec<GrpcObjectResult>> {
let expr_count = exprs.len();
let req = DatabaseRequest {
name: self.name.clone(),
exprs,
};
let res = self.client.database(req).await?;
let res = res.results;
ensure!(
res.len() == expr_count,
error::MissingResultSnafu {
name: "object_results",
expected: expr_count,
actual: res.len(),
}
);
Ok(res)
}
}
#[derive(Debug)]
pub enum ObjectResult {
Select(GrpcSelectResult),
Mutate(GrpcMutateResult),
}
impl TryFrom<api::v1::ObjectResult> for ObjectResult {
type Error = error::Error;
fn try_from(object_result: api::v1::ObjectResult) -> std::result::Result<Self, Self::Error> {
let header = object_result.header.context(error::MissingHeaderSnafu)?;
if !StatusCode::is_success(header.code) {
return DatanodeSnafu {
code: header.code,
msg: header.err_msg,
}
.fail();
}
let obj_result = object_result.result.context(error::MissingResultSnafu {
name: "result".to_string(),
expected: 1_usize,
actual: 0_usize,
})?;
Ok(match obj_result {
object_result::Result::Select(select) => {
let result = (*select.raw_data).try_into().context(DecodeSelectSnafu)?;
ObjectResult::Select(result)
}
object_result::Result::Mutate(mutate) => ObjectResult::Mutate(mutate),
})
}
}
pub enum Select {
Sql(String),
}
impl TryFrom<ObjectResult> for Output {
type Error = error::Error;
fn try_from(value: ObjectResult) -> Result<Self> {
let output = match value {
ObjectResult::Select(select) => {
let vectors = select
.columns
.iter()
.map(|column| {
column_to_vector(column, select.row_count).context(ColumnToVectorSnafu)
// TODO(LFC): Streaming get flight data.
let flight_data: Vec<FlightData> = client
.mut_inner()
.do_get(request)
.and_then(|response| response.into_inner().try_collect())
.await
.map_err(|e| {
let code = get_metadata_value(&e, INNER_ERROR_CODE)
.and_then(|s| StatusCode::from_str(&s).ok())
.unwrap_or(StatusCode::Unknown);
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
error::ExternalSnafu { code, msg }
.fail::<()>()
.map_err(BoxedError::new)
.context(error::FlightGetSnafu {
tonic_code: e.code(),
addr: client.addr(),
})
.collect::<Result<Vec<VectorRef>>>()?;
.unwrap_err()
})?;
let column_schemas = select
.columns
.iter()
.zip(vectors.iter())
.map(|(column, vector)| {
let datatype = vector.data_type();
// nullable or not, does not affect the output
let mut column_schema =
ColumnSchema::new(&column.column_name, datatype, true);
if column.semantic_type == SemanticType::Timestamp as i32 {
column_schema = column_schema.with_time_index(true);
}
column_schema
})
.collect::<Vec<ColumnSchema>>();
let decoder = &mut FlightDecoder::default();
let flight_messages = flight_data
.into_iter()
.map(|x| decoder.try_decode(x).context(ConvertFlightDataSnafu))
.collect::<Result<Vec<_>>>()?;
let schema = Arc::new(Schema::try_new(column_schemas).context(ConvertSchemaSnafu)?);
let recordbatches = if vectors.is_empty() {
RecordBatches::try_new(schema, vec![])
} else {
RecordBatch::new(schema, vectors)
.and_then(|batch| RecordBatches::try_new(batch.schema.clone(), vec![batch]))
let output = if let Some(FlightMessage::AffectedRows(rows)) = flight_messages.get(0) {
ensure!(
flight_messages.len() == 1,
IllegalFlightMessagesSnafu {
reason: "Expect 'AffectedRows' Flight messages to be one and only!"
}
.context(error::CreateRecordBatchesSnafu)?;
Output::RecordBatches(recordbatches)
}
ObjectResult::Mutate(mutate) => {
if mutate.failure != 0 {
return error::MutateFailureSnafu {
failure: mutate.failure,
}
.fail();
}
Output::AffectedRows(mutate.success as usize)
}
);
Output::AffectedRows(*rows)
} else {
let recordbatches = flight_messages_to_recordbatches(flight_messages)
.context(ConvertFlightDataSnafu)?;
Output::RecordBatches(recordbatches)
};
Ok(output)
}
}
fn get_metadata_value(e: &tonic::Status, key: &str) -> Option<String> {
e.metadata()
.get(key)
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
}
#[derive(Default, Debug, Clone)]
pub struct FlightContext {
auth_header: Option<AuthHeader>,
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
use api::v1::Column;
use api::v1::auth_header::AuthScheme;
use api::v1::{AuthHeader, Basic, Column};
use common_grpc::select::{null_mask, values};
use common_grpc_expr::column_to_vector;
use datatypes::prelude::{Vector, VectorRef};
use datatypes::vectors::{
BinaryVector, BooleanVector, DateTimeVector, DateVector, Float32Vector, Float64Vector,
Int16Vector, Int32Vector, Int64Vector, Int8Vector, StringVector, UInt16Vector,
UInt32Vector, UInt64Vector, UInt8Vector,
};
use super::*;
use crate::database::FlightContext;
#[test]
fn test_column_to_vector() {
@@ -341,13 +279,34 @@ mod tests {
fn create_test_column(vector: VectorRef) -> Column {
let wrapper: ColumnDataTypeWrapper = vector.data_type().try_into().unwrap();
let array = vector.to_arrow_array();
Column {
column_name: "test".to_string(),
semantic_type: 1,
values: Some(values(&[array.clone()]).unwrap()),
null_mask: null_mask(&vec![array], vector.len()),
values: Some(values(&[vector.clone()]).unwrap()),
null_mask: null_mask(&[vector.clone()], vector.len()),
datatype: wrapper.datatype() as i32,
}
}
#[test]
fn test_flight_ctx() {
let mut ctx = FlightContext::default();
assert!(ctx.auth_header.is_none());
let basic = AuthScheme::Basic(Basic {
username: "u".to_string(),
password: "p".to_string(),
});
ctx.auth_header = Some(AuthHeader {
auth_scheme: Some(basic),
});
assert!(matches!(
ctx.auth_header,
Some(AuthHeader {
auth_scheme: Some(AuthScheme::Basic(_)),
})
))
}
}

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -13,67 +13,43 @@
// limitations under the License.
use std::any::Any;
use std::sync::Arc;
use api::serde::DecodeError;
use common_error::prelude::*;
use datafusion::physical_plan::ExecutionPlan;
use tonic::Code;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Connect failed to {}, source: {}", url, source))]
ConnectFailed {
url: String,
source: tonic::transport::Error,
#[snafu(display("Illegal Flight messages, reason: {}", reason))]
IllegalFlightMessages {
reason: String,
backtrace: Backtrace,
},
#[snafu(display("Missing {}, expected {}, actual {}", name, expected, actual))]
MissingResult {
name: String,
expected: usize,
actual: usize,
},
#[snafu(display("Missing result header"))]
MissingHeader,
#[snafu(display("Tonic internal error, addr: {}, source: {}", addr, source))]
TonicStatus {
#[snafu(display(
"Failed to do Flight get, addr: {}, code: {}, source: {}",
addr,
tonic_code,
source
))]
FlightGet {
addr: String,
source: tonic::Status,
backtrace: Backtrace,
tonic_code: Code,
source: BoxedError,
},
#[snafu(display("Fail to decode select result, source: {}", source))]
DecodeSelect { source: DecodeError },
#[snafu(display("Error occurred on the data node, code: {}, msg: {}", code, msg))]
Datanode { code: u32, msg: String },
#[snafu(display("Failed to encode physical plan: {:?}, source: {}", physical, source))]
EncodePhysical {
physical: Arc<dyn ExecutionPlan>,
#[snafu(display("Failed to convert FlightData, source: {}", source))]
ConvertFlightData {
#[snafu(backtrace)]
source: common_grpc::Error,
},
#[snafu(display("Mutate result has failure {}", failure))]
MutateFailure { failure: u32, backtrace: Backtrace },
#[snafu(display("Column datatype error, source: {}", source))]
ColumnDataType {
#[snafu(backtrace)]
source: api::error::Error,
},
#[snafu(display("Failed to create RecordBatches, source: {}", source))]
CreateRecordBatches {
#[snafu(backtrace)]
source: common_recordbatch::error::Error,
},
#[snafu(display("Illegal GRPC client state: {}", err_msg))]
IllegalGrpcClientState {
err_msg: String,
@@ -83,12 +59,6 @@ pub enum Error {
#[snafu(display("Missing required field in protobuf, field: {}", field))]
MissingField { field: String, backtrace: Backtrace },
#[snafu(display("Failed to convert schema, source: {}", source))]
ConvertSchema {
#[snafu(backtrace)]
source: datatypes::error::Error,
},
#[snafu(display(
"Failed to create gRPC channel, peer address: {}, source: {}",
addr,
@@ -100,11 +70,9 @@ pub enum Error {
source: common_grpc::error::Error,
},
#[snafu(display("Failed to convert column to vector, source: {}", source))]
ColumnToVector {
#[snafu(backtrace)]
source: common_insert::error::Error,
},
/// Error deserialized from gRPC metadata
#[snafu(display("{}", msg))]
ExternalError { code: StatusCode, msg: String },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -112,21 +80,15 @@ pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::ConnectFailed { .. }
| Error::MissingResult { .. }
| Error::MissingHeader { .. }
| Error::TonicStatus { .. }
| Error::DecodeSelect { .. }
| Error::Datanode { .. }
| Error::EncodePhysical { .. }
| Error::MutateFailure { .. }
Error::IllegalFlightMessages { .. }
| Error::ColumnDataType { .. }
| Error::MissingField { .. } => StatusCode::Internal,
Error::ConvertSchema { source } => source.status_code(),
Error::CreateRecordBatches { source } => source.status_code(),
Error::CreateChannel { source, .. } => source.status_code(),
Error::FlightGet { source, .. } => source.status_code(),
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source } => {
source.status_code()
}
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
Error::ColumnToVector { source, .. } => source.status_code(),
Error::ExternalError { code, .. } => *code,
}
}

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod admin;
mod client;
mod database;
mod error;
pub mod load_balance;
pub use api;
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
pub use self::client::Client;
pub use self::database::{Database, ObjectResult, Select};
pub use self::database::Database;
pub use self::error::{Error, Result};

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,

View File

@@ -1,29 +1,54 @@
[package]
name = "cmd"
version = "0.1.0"
edition = "2021"
version.workspace = true
edition.workspace = true
license.workspace = true
default-run = "greptime"
license = "Apache-2.0"
[[bin]]
name = "greptime"
path = "src/bin/greptime.rs"
[features]
mem-prof = ["tikv-jemallocator", "tikv-jemalloc-ctl"]
[dependencies]
anymap = "1.0.0-beta.2"
catalog = { path = "../catalog" }
clap = { version = "3.1", features = ["derive"] }
client = { path = "../client" }
common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
common-telemetry = { path = "../common/telemetry", features = [
"deadlock_detection",
] }
datanode = { path = "../datanode" }
either = "1.8"
frontend = { path = "../frontend" }
futures = "0.3"
futures.workspace = true
meta-client = { path = "../meta-client" }
meta-srv = { path = "../meta-srv" }
serde = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
tokio = { version = "1.18", features = ["full"] }
nu-ansi-term = "0.46"
partition = { path = "../partition" }
query = { path = "../query" }
rustyline = "10.1"
serde.workspace = true
servers = { path = "../servers" }
session = { path = "../session" }
snafu.workspace = true
substrait = { path = "../common/substrait" }
tikv-jemalloc-ctl = { version = "0.5", optional = true }
tikv-jemallocator = { version = "0.5", optional = true }
tokio.workspace = true
toml = "0.5"
[dev-dependencies]
serde = "1.0"
tempdir = "0.3"
common-test-util = { path = "../common/test-util" }
rexpect = "0.5"
serde.workspace = true
[build-dependencies]
build-data = "0.1.3"

29
src/cmd/build.rs Normal file
View File

@@ -0,0 +1,29 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
const DEFAULT_VALUE: &str = "unknown";
fn main() {
println!(
"cargo:rustc-env=GIT_COMMIT={}",
build_data::get_git_commit().unwrap_or_else(|_| DEFAULT_VALUE.to_string())
);
println!(
"cargo:rustc-env=GIT_BRANCH={}",
build_data::get_git_branch().unwrap_or_else(|_| DEFAULT_VALUE.to_string())
);
println!(
"cargo:rustc-env=GIT_DIRTY={}",
build_data::get_git_dirty().map_or(DEFAULT_VALUE.to_string(), |v| v.to_string())
);
}

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,11 +16,11 @@ use std::fmt;
use clap::Parser;
use cmd::error::Result;
use cmd::{datanode, frontend, metasrv, standalone};
use cmd::{cli, datanode, frontend, metasrv, standalone};
use common_telemetry::logging::{error, info};
#[derive(Parser)]
#[clap(name = "greptimedb")]
#[clap(name = "greptimedb", version = print_version())]
struct Command {
#[clap(long, default_value = "/tmp/greptimedb/logs")]
log_dir: String,
@@ -46,6 +46,8 @@ enum SubCommand {
Metasrv(metasrv::Command),
#[clap(name = "standalone")]
Standalone(standalone::Command),
#[clap(name = "cli")]
Cli(cli::Command),
}
impl SubCommand {
@@ -55,6 +57,7 @@ impl SubCommand {
SubCommand::Frontend(cmd) => cmd.run().await,
SubCommand::Metasrv(cmd) => cmd.run().await,
SubCommand::Standalone(cmd) => cmd.run().await,
SubCommand::Cli(cmd) => cmd.run().await,
}
}
}
@@ -66,10 +69,28 @@ impl fmt::Display for SubCommand {
SubCommand::Frontend(..) => write!(f, "greptime-frontend"),
SubCommand::Metasrv(..) => write!(f, "greptime-metasrv"),
SubCommand::Standalone(..) => write!(f, "greptime-standalone"),
SubCommand::Cli(_) => write!(f, "greptime-cli"),
}
}
}
fn print_version() -> &'static str {
concat!(
"\nbranch: ",
env!("GIT_BRANCH"),
"\ncommit: ",
env!("GIT_COMMIT"),
"\ndirty: ",
env!("GIT_DIRTY"),
"\nversion: ",
env!("CARGO_PKG_VERSION")
)
}
#[cfg(feature = "mem-prof")]
#[global_allocator]
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
#[tokio::main]
async fn main() -> Result<()> {
let cmd = Command::parse();

64
src/cmd/src/cli.rs Normal file
View File

@@ -0,0 +1,64 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod cmd;
mod helper;
mod repl;
use clap::Parser;
use repl::Repl;
use crate::error::Result;
#[derive(Parser)]
pub struct Command {
#[clap(subcommand)]
cmd: SubCommand,
}
impl Command {
pub async fn run(self) -> Result<()> {
self.cmd.run().await
}
}
#[derive(Parser)]
enum SubCommand {
Attach(AttachCommand),
}
impl SubCommand {
async fn run(self) -> Result<()> {
match self {
SubCommand::Attach(cmd) => cmd.run().await,
}
}
}
#[derive(Debug, Parser)]
pub(crate) struct AttachCommand {
#[clap(long)]
pub(crate) grpc_addr: String,
#[clap(long)]
pub(crate) meta_addr: Option<String>,
#[clap(long, action)]
pub(crate) disable_helper: bool,
}
impl AttachCommand {
async fn run(self) -> Result<()> {
let mut repl = Repl::try_new(&self).await?;
repl.run().await
}
}

154
src/cmd/src/cli/cmd.rs Normal file
View File

@@ -0,0 +1,154 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::error::{Error, InvalidReplCommandSnafu, Result};
/// Represents the parsed command from the user (which may be over many lines)
#[derive(Debug, PartialEq)]
pub(crate) enum ReplCommand {
Help,
UseDatabase { db_name: String },
Sql { sql: String },
Exit,
}
impl TryFrom<&str> for ReplCommand {
type Error = Error;
fn try_from(input: &str) -> Result<Self> {
let input = input.trim();
if input.is_empty() {
return InvalidReplCommandSnafu {
reason: "No command specified".to_string(),
}
.fail();
}
// If line ends with ';', it must be treated as a complete input.
// However, the opposite is not true.
let input_is_completed = input.ends_with(';');
let input = input.strip_suffix(';').map(|x| x.trim()).unwrap_or(input);
let lowercase = input.to_lowercase();
match lowercase.as_str() {
"help" => Ok(Self::Help),
"exit" | "quit" => Ok(Self::Exit),
_ => match input.split_once(' ') {
Some((maybe_use, database)) if maybe_use.to_lowercase() == "use" => {
Ok(Self::UseDatabase {
db_name: database.trim().to_string(),
})
}
// Any valid SQL must contains at least one whitespace.
Some(_) if input_is_completed => Ok(Self::Sql {
sql: input.to_string(),
}),
_ => InvalidReplCommandSnafu {
reason: format!("unknown command '{input}', maybe input is not completed"),
}
.fail(),
},
}
}
}
impl ReplCommand {
pub fn help() -> &'static str {
r#"
Available commands (case insensitive):
- 'help': print this help
- 'exit' or 'quit': exit the REPL
- 'use <your database name>': switch to another database/schema context
- Other typed in text will be treated as SQL.
You can enter new line while typing, just remember to end it with ';'.
"#
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::error::Error::InvalidReplCommand;
#[test]
fn test_from_str() {
fn test_ok(s: &str, expected: ReplCommand) {
let actual: ReplCommand = s.try_into().unwrap();
assert_eq!(expected, actual, "'{}'", s);
}
fn test_err(s: &str) {
let result: Result<ReplCommand> = s.try_into();
assert!(matches!(result, Err(InvalidReplCommand { .. })))
}
test_err("");
test_err(" ");
test_err("\t");
test_ok("help", ReplCommand::Help);
test_ok("help", ReplCommand::Help);
test_ok(" help", ReplCommand::Help);
test_ok(" help ", ReplCommand::Help);
test_ok(" HELP ", ReplCommand::Help);
test_ok(" Help; ", ReplCommand::Help);
test_ok(" help ; ", ReplCommand::Help);
test_ok("exit", ReplCommand::Exit);
test_ok("exit;", ReplCommand::Exit);
test_ok("exit ;", ReplCommand::Exit);
test_ok("EXIT", ReplCommand::Exit);
test_ok("quit", ReplCommand::Exit);
test_ok("quit;", ReplCommand::Exit);
test_ok("quit ;", ReplCommand::Exit);
test_ok("QUIT", ReplCommand::Exit);
test_ok(
"use Foo",
ReplCommand::UseDatabase {
db_name: "Foo".to_string(),
},
);
test_ok(
" use Foo ; ",
ReplCommand::UseDatabase {
db_name: "Foo".to_string(),
},
);
// ensure that database name is case sensitive
test_ok(
" use FOO ; ",
ReplCommand::UseDatabase {
db_name: "FOO".to_string(),
},
);
// ensure that we aren't messing with capitalization
test_ok(
"SELECT * from foo;",
ReplCommand::Sql {
sql: "SELECT * from foo".to_string(),
},
);
// Input line (that don't belong to any other cases above) must ends with ';' to make it a valid SQL.
test_err("insert blah");
test_ok(
"insert blah;",
ReplCommand::Sql {
sql: "insert blah".to_string(),
},
);
}
}

112
src/cmd/src/cli/helper.rs Normal file
View File

@@ -0,0 +1,112 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::borrow::Cow;
use rustyline::completion::Completer;
use rustyline::highlight::{Highlighter, MatchingBracketHighlighter};
use rustyline::hint::{Hinter, HistoryHinter};
use rustyline::validate::{ValidationContext, ValidationResult, Validator};
use crate::cli::cmd::ReplCommand;
pub(crate) struct RustylineHelper {
hinter: HistoryHinter,
highlighter: MatchingBracketHighlighter,
}
impl Default for RustylineHelper {
fn default() -> Self {
Self {
hinter: HistoryHinter {},
highlighter: MatchingBracketHighlighter::default(),
}
}
}
impl rustyline::Helper for RustylineHelper {}
impl Validator for RustylineHelper {
fn validate(&self, ctx: &mut ValidationContext<'_>) -> rustyline::Result<ValidationResult> {
let input = ctx.input();
match ReplCommand::try_from(input) {
Ok(_) => Ok(ValidationResult::Valid(None)),
Err(e) => {
if input.trim_end().ends_with(';') {
// If line ends with ';', it HAS to be a valid command.
Ok(ValidationResult::Invalid(Some(e.to_string())))
} else {
Ok(ValidationResult::Incomplete)
}
}
}
}
}
impl Hinter for RustylineHelper {
type Hint = String;
fn hint(&self, line: &str, pos: usize, ctx: &rustyline::Context<'_>) -> Option<Self::Hint> {
self.hinter.hint(line, pos, ctx)
}
}
impl Highlighter for RustylineHelper {
fn highlight<'l>(&self, line: &'l str, pos: usize) -> Cow<'l, str> {
self.highlighter.highlight(line, pos)
}
fn highlight_prompt<'b, 's: 'b, 'p: 'b>(
&'s self,
prompt: &'p str,
default: bool,
) -> Cow<'b, str> {
self.highlighter.highlight_prompt(prompt, default)
}
fn highlight_hint<'h>(&self, hint: &'h str) -> Cow<'h, str> {
use nu_ansi_term::Style;
Cow::Owned(Style::new().dimmed().paint(hint).to_string())
}
fn highlight_candidate<'c>(
&self,
candidate: &'c str,
completion: rustyline::CompletionType,
) -> Cow<'c, str> {
self.highlighter.highlight_candidate(candidate, completion)
}
fn highlight_char(&self, line: &str, pos: usize) -> bool {
self.highlighter.highlight_char(line, pos)
}
}
impl Completer for RustylineHelper {
type Candidate = String;
fn complete(
&self,
line: &str,
pos: usize,
ctx: &rustyline::Context<'_>,
) -> rustyline::Result<(usize, Vec<Self::Candidate>)> {
// If there is a hint, use that as the auto-complete when user hits `tab`
if let Some(hint) = self.hinter.hint(line, pos, ctx) {
Ok((pos, vec![hint]))
} else {
Ok((0, vec![]))
}
}
}

267
src/cmd/src/cli/repl.rs Normal file
View File

@@ -0,0 +1,267 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use catalog::remote::MetaKvBackend;
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::prelude::ErrorExt;
use common_query::Output;
use common_recordbatch::RecordBatches;
use common_telemetry::logging;
use either::Either;
use frontend::catalog::FrontendCatalogManager;
use frontend::datanode::DatanodeClients;
use meta_client::client::MetaClientBuilder;
use partition::manager::PartitionRuleManager;
use partition::route::TableRoutes;
use query::datafusion::DatafusionQueryEngine;
use query::logical_optimizer::LogicalOptimizer;
use query::parser::QueryLanguageParser;
use query::plan::LogicalPlan;
use query::QueryEngine;
use rustyline::error::ReadlineError;
use rustyline::Editor;
use session::context::QueryContext;
use snafu::{ErrorCompat, ResultExt};
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use crate::cli::cmd::ReplCommand;
use crate::cli::helper::RustylineHelper;
use crate::cli::AttachCommand;
use crate::error::{
CollectRecordBatchesSnafu, ParseSqlSnafu, PlanStatementSnafu, PrettyPrintRecordBatchesSnafu,
ReadlineSnafu, ReplCreationSnafu, RequestDatabaseSnafu, Result, StartMetaClientSnafu,
SubstraitEncodeLogicalPlanSnafu,
};
/// Captures the state of the repl, gathers commands and executes them one by one
pub(crate) struct Repl {
/// Rustyline editor for interacting with user on command line
rl: Editor<RustylineHelper>,
/// Current prompt
prompt: String,
/// Client for interacting with GreptimeDB
database: Database,
query_engine: Option<DatafusionQueryEngine>,
}
#[allow(clippy::print_stdout)]
impl Repl {
fn print_help(&self) {
println!("{}", ReplCommand::help())
}
pub(crate) async fn try_new(cmd: &AttachCommand) -> Result<Self> {
let mut rl = Editor::new().context(ReplCreationSnafu)?;
if !cmd.disable_helper {
rl.set_helper(Some(RustylineHelper::default()));
let history_file = history_file();
if let Err(e) = rl.load_history(&history_file) {
logging::debug!(
"failed to load history file on {}, error: {e}",
history_file.display()
);
}
}
let client = Client::with_urls([&cmd.grpc_addr]);
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
let query_engine = if let Some(meta_addr) = &cmd.meta_addr {
create_query_engine(meta_addr).await.map(Some)?
} else {
None
};
Ok(Self {
rl,
prompt: "> ".to_string(),
database,
query_engine,
})
}
/// Parse the next command
fn next_command(&mut self) -> Result<ReplCommand> {
match self.rl.readline(&self.prompt) {
Ok(ref line) => {
let request = line.trim();
self.rl.add_history_entry(request.to_string());
request.try_into()
}
Err(ReadlineError::Eof) | Err(ReadlineError::Interrupted) => Ok(ReplCommand::Exit),
// Some sort of real underlying error
Err(e) => Err(e).context(ReadlineSnafu),
}
}
/// Read Evaluate Print Loop (interactive command line) for GreptimeDB
///
/// Inspired / based on repl.rs from InfluxDB IOX
pub(crate) async fn run(&mut self) -> Result<()> {
println!("Ready for commands. (Hint: try 'help')");
loop {
match self.next_command()? {
ReplCommand::Help => {
self.print_help();
}
ReplCommand::UseDatabase { db_name } => {
if self.execute_sql(format!("USE {db_name}")).await {
println!("Using {db_name}");
self.database.set_schema(&db_name);
self.prompt = format!("[{db_name}] > ");
}
}
ReplCommand::Sql { sql } => {
self.execute_sql(sql).await;
}
ReplCommand::Exit => {
return Ok(());
}
}
}
}
async fn execute_sql(&self, sql: String) -> bool {
self.do_execute_sql(sql)
.await
.map_err(|e| {
let status_code = e.status_code();
let root_cause = e.iter_chain().last().unwrap();
println!("Error: {}({status_code}), {root_cause}", status_code as u32)
})
.is_ok()
}
async fn do_execute_sql(&self, sql: String) -> Result<()> {
let start = Instant::now();
let output = if let Some(query_engine) = &self.query_engine {
let stmt = QueryLanguageParser::parse_sql(&sql)
.with_context(|_| ParseSqlSnafu { sql: sql.clone() })?;
let query_ctx = Arc::new(QueryContext::with(
self.database.catalog(),
self.database.schema(),
));
let LogicalPlan::DfPlan(plan) = query_engine
.statement_to_plan(stmt, query_ctx)
.await
.and_then(|x| query_engine.optimize(&x))
.context(PlanStatementSnafu)?;
let plan = DFLogicalSubstraitConvertor {}
.encode(plan)
.context(SubstraitEncodeLogicalPlanSnafu)?;
self.database.logical_plan(plan.to_vec()).await
} else {
self.database.sql(&sql).await
}
.context(RequestDatabaseSnafu { sql: &sql })?;
let either = match output {
Output::Stream(s) => {
let x = RecordBatches::try_collect(s)
.await
.context(CollectRecordBatchesSnafu)?;
Either::Left(x)
}
Output::RecordBatches(x) => Either::Left(x),
Output::AffectedRows(rows) => Either::Right(rows),
};
let end = Instant::now();
match either {
Either::Left(recordbatches) => {
let total_rows: usize = recordbatches.iter().map(|x| x.num_rows()).sum();
if total_rows > 0 {
println!(
"{}",
recordbatches
.pretty_print()
.context(PrettyPrintRecordBatchesSnafu)?
);
}
println!("Total Rows: {total_rows}")
}
Either::Right(rows) => println!("Affected Rows: {rows}"),
};
println!("Cost {} ms", (end - start).as_millis());
Ok(())
}
}
impl Drop for Repl {
fn drop(&mut self) {
if self.rl.helper().is_some() {
let history_file = history_file();
if let Err(e) = self.rl.save_history(&history_file) {
logging::debug!(
"failed to save history file on {}, error: {e}",
history_file.display()
);
}
}
}
}
/// Return the location of the history file (defaults to $HOME/".greptimedb_cli_history")
fn history_file() -> PathBuf {
let mut buf = match std::env::var("HOME") {
Ok(home) => PathBuf::from(home),
Err(_) => PathBuf::new(),
};
buf.push(".greptimedb_cli_history");
buf
}
async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
let mut meta_client = MetaClientBuilder::default().enable_store().build();
meta_client
.start([meta_addr])
.await
.context(StartMetaClientSnafu)?;
let meta_client = Arc::new(meta_client);
let backend = Arc::new(MetaKvBackend {
client: meta_client.clone(),
});
let table_routes = Arc::new(TableRoutes::new(meta_client));
let partition_manager = Arc::new(PartitionRuleManager::new(table_routes));
let datanode_clients = Arc::new(DatanodeClients::default());
let catalog_list = Arc::new(FrontendCatalogManager::new(
backend,
partition_manager,
datanode_clients,
));
Ok(DatafusionQueryEngine::new(catalog_list, Default::default()))
}

View File

@@ -1,10 +1,10 @@
// Copyright 2022 Greptime Team
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,8 +14,11 @@
use clap::Parser;
use common_telemetry::logging;
use datanode::datanode::{Datanode, DatanodeOptions};
use frontend::frontend::Mode;
use datanode::datanode::{
Datanode, DatanodeOptions, FileConfig, ObjectStoreConfig, ProcedureConfig,
};
use meta_client::MetaClientOptions;
use servers::Mode;
use snafu::ResultExt;
use crate::error::{Error, MissingConfigSnafu, Result, StartDatanodeSnafu};
@@ -46,18 +49,26 @@ impl SubCommand {
}
}
#[derive(Debug, Parser)]
#[derive(Debug, Parser, Default)]
struct StartCommand {
#[clap(long)]
node_id: Option<u64>,
#[clap(long)]
rpc_addr: Option<String>,
#[clap(long)]
rpc_hostname: Option<String>,
#[clap(long)]
mysql_addr: Option<String>,
#[clap(long)]
metasrv_addr: Option<String>,
#[clap(short, long)]
config_file: Option<String>,
#[clap(long)]
data_dir: Option<String>,
#[clap(long)]
wal_dir: Option<String>,
#[clap(long)]
procedure_dir: Option<String>,
}
impl StartCommand {
@@ -89,6 +100,11 @@ impl TryFrom<StartCommand> for DatanodeOptions {
if let Some(addr) = cmd.rpc_addr {
opts.rpc_addr = addr;
}
if cmd.rpc_hostname.is_some() {
opts.rpc_hostname = cmd.rpc_hostname;
}
if let Some(addr) = cmd.mysql_addr {
opts.mysql_addr = addr;
}
@@ -98,7 +114,13 @@ impl TryFrom<StartCommand> for DatanodeOptions {
}
if let Some(meta_addr) = cmd.metasrv_addr {
opts.meta_client_opts.metasrv_addr = meta_addr;
opts.meta_client_options
.get_or_insert_with(MetaClientOptions::default)
.metasrv_addrs = meta_addr
.split(',')
.map(&str::trim)
.map(&str::to_string)
.collect::<_>();
opts.mode = Mode::Distributed;
}
@@ -108,6 +130,19 @@ impl TryFrom<StartCommand> for DatanodeOptions {
}
.fail();
}
if let Some(data_dir) = cmd.data_dir {
opts.storage = ObjectStoreConfig::File(FileConfig { data_dir });
}
if let Some(wal_dir) = cmd.wal_dir {
opts.wal.dir = wal_dir;
}
if let Some(procedure_dir) = cmd.procedure_dir {
opts.procedure = Some(ProcedureConfig::from_file_path(procedure_dir));
}
Ok(opts)
}
}
@@ -115,104 +150,127 @@ impl TryFrom<StartCommand> for DatanodeOptions {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::io::Write;
use std::time::Duration;
use datanode::datanode::ObjectStoreConfig;
use frontend::frontend::Mode;
use common_test_util::temp_dir::create_named_temp_file;
use datanode::datanode::{CompactionConfig, ObjectStoreConfig};
use servers::Mode;
use super::*;
#[test]
fn test_read_from_config_file() {
let mut file = create_named_temp_file();
let toml_str = r#"
mode = "distributed"
enable_memory_catalog = false
node_id = 42
rpc_addr = "127.0.0.1:3001"
rpc_hostname = "127.0.0.1"
rpc_runtime_size = 8
mysql_addr = "127.0.0.1:4406"
mysql_runtime_size = 2
[meta_client_options]
metasrv_addrs = ["127.0.0.1:3002"]
timeout_millis = 3000
connect_timeout_millis = 5000
tcp_nodelay = true
[wal]
dir = "/tmp/greptimedb/wal"
file_size = "1GB"
purge_threshold = "50GB"
purge_interval = "10m"
read_batch_size = 128
sync_write = false
[storage]
type = "File"
data_dir = "/tmp/greptimedb/data/"
[compaction]
max_inflight_tasks = 4
max_files_in_level0 = 8
max_purge_tasks = 32
"#;
write!(file, "{}", toml_str).unwrap();
let cmd = StartCommand {
node_id: None,
rpc_addr: None,
mysql_addr: None,
metasrv_addr: None,
config_file: Some(format!(
"{}/../../config/datanode.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
)),
config_file: Some(file.path().to_str().unwrap().to_string()),
..Default::default()
};
let options: DatanodeOptions = cmd.try_into().unwrap();
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
assert_eq!("/tmp/greptimedb/wal".to_string(), options.wal_dir);
assert_eq!("127.0.0.1:3306".to_string(), options.mysql_addr);
assert_eq!(4, options.mysql_runtime_size);
assert_eq!(
"1.1.1.1:3002".to_string(),
options.meta_client_opts.metasrv_addr
);
assert_eq!(5000, options.meta_client_opts.connect_timeout_millis);
assert_eq!(3000, options.meta_client_opts.timeout_millis);
assert!(!options.meta_client_opts.tcp_nodelay);
assert_eq!("127.0.0.1:4406".to_string(), options.mysql_addr);
assert_eq!(2, options.mysql_runtime_size);
assert_eq!(Some(42), options.node_id);
assert_eq!(Duration::from_secs(600), options.wal.purge_interval);
assert_eq!(1024 * 1024 * 1024, options.wal.file_size.0);
assert_eq!(1024 * 1024 * 1024 * 50, options.wal.purge_threshold.0);
assert!(!options.wal.sync_write);
let MetaClientOptions {
metasrv_addrs: metasrv_addr,
timeout_millis,
connect_timeout_millis,
tcp_nodelay,
} = options.meta_client_options.unwrap();
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
assert_eq!(5000, connect_timeout_millis);
assert_eq!(3000, timeout_millis);
assert!(tcp_nodelay);
match options.storage {
ObjectStoreConfig::File { data_dir } => {
ObjectStoreConfig::File(FileConfig { data_dir }) => {
assert_eq!("/tmp/greptimedb/data/".to_string(), data_dir)
}
ObjectStoreConfig::S3 { .. } => unreachable!(),
ObjectStoreConfig::Oss { .. } => unreachable!(),
};
assert_eq!(
CompactionConfig {
max_inflight_tasks: 4,
max_files_in_level0: 8,
max_purge_tasks: 32,
},
options.compaction
);
}
#[test]
fn test_try_from_cmd() {
assert_eq!(
Mode::Standalone,
DatanodeOptions::try_from(StartCommand {
node_id: None,
rpc_addr: None,
mysql_addr: None,
metasrv_addr: None,
config_file: None
})
.unwrap()
.mode
DatanodeOptions::try_from(StartCommand::default())
.unwrap()
.mode
);
let mode = DatanodeOptions::try_from(StartCommand {
node_id: Some(42),
rpc_addr: None,
mysql_addr: None,
metasrv_addr: Some("127.0.0.1:3002".to_string()),
config_file: None,
..Default::default()
})
.unwrap()
.mode;
assert_matches!(mode, Mode::Distributed);
assert!(DatanodeOptions::try_from(StartCommand {
node_id: None,
rpc_addr: None,
mysql_addr: None,
metasrv_addr: Some("127.0.0.1:3002".to_string()),
config_file: None,
..Default::default()
})
.is_err());
// Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value
DatanodeOptions::try_from(StartCommand {
node_id: Some(42),
rpc_addr: None,
mysql_addr: None,
metasrv_addr: None,
config_file: None,
..Default::default()
})
.unwrap();
}
#[test]
fn test_merge_config() {
let dn_opts = DatanodeOptions::try_from(StartCommand {
node_id: None,
rpc_addr: None,
mysql_addr: None,
metasrv_addr: None,
config_file: Some(format!(
"{}/../../config/datanode.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
)),
})
.unwrap();
assert_eq!(Some(42), dn_opts.node_id);
assert_eq!("1.1.1.1:3002", dn_opts.meta_client_opts.metasrv_addr);
}
}

Some files were not shown because too many files have changed in this diff Show More