Compare commits

..

201 Commits

Author SHA1 Message Date
discord9
93fcb7454c chore: after rebase 2024-05-10 15:11:17 +08:00
discord9
cfe28b6974 chore: debug log 2024-05-10 14:52:22 +08:00
discord9
9ec6107988 refactor: remove unused imports 2024-05-10 14:52:22 +08:00
discord9
e840bb469d chore: refactor& remove TODOs 2024-05-10 14:52:22 +08:00
discord9
684850f451 fix: get true QueryContext& dedup code 2024-05-10 14:52:22 +08:00
discord9
76aadb2223 chore: cleanup debug log&fix after rebase 2024-05-10 14:52:22 +08:00
discord9
f360b2e812 tests: fix tests 2024-05-10 14:52:22 +08:00
discord9
138a2aba7f fix: allow empty expire when 2024-05-10 14:52:22 +08:00
discord9
8f6462c0b0 feat: parse expire when 2024-05-10 14:52:22 +08:00
discord9
46d0b3cd64 fix: table name trying best to get full name 2024-05-10 14:52:22 +08:00
discord9
a17a7f4e47 feat: working poc demo...ish 2024-05-10 14:52:22 +08:00
discord9
50335dd53c write back seems didn't work 2024-05-10 14:52:22 +08:00
discord9
abaf881f06 refactor: tableName as array 2024-05-10 14:52:22 +08:00
discord9
e1a8215394 tests(WIP): get demo working 2024-05-10 14:52:22 +08:00
discord9
d7942a1a00 fix: make worker handle async 2024-05-10 14:52:22 +08:00
discord9
a6727e2e8d refactor: use table name for sink table 2024-05-10 14:52:22 +08:00
discord9
d5bdbedcd6 refactor: rwlock for frontend invoker&async lock 2024-05-10 14:51:44 +08:00
discord9
878737f781 feat: integrate flow to standalone(untested) 2024-05-10 14:51:44 +08:00
discord9
d88cff6f51 feat: impl Flownode for FlowNodeManager 2024-05-10 14:51:44 +08:00
discord9
e7801abd0c feat(WIP): simple parser 2024-05-10 14:51:44 +08:00
discord9
d7a132a02f chore: remove some TODO done 2024-05-10 14:51:44 +08:00
discord9
a3417f50cf refactor: rename some task to flow 2024-05-10 14:51:44 +08:00
discord9
099f414f63 chore: rename some task to flow 2024-05-10 14:51:44 +08:00
discord9
c22185abce refactor: make worker sync only and separate thread&test 2024-05-10 14:51:44 +08:00
discord9
e33afa53f4 feat: grpc trait&Server trait 2024-05-10 14:51:44 +08:00
discord9
7eaf471808 feat(WIP): main loop 2024-05-10 14:51:44 +08:00
discord9
acba753500 chore: remove unused 2024-05-10 14:51:44 +08:00
discord9
5736373820 feat impl grpc server 2024-05-10 14:51:44 +08:00
discord9
74dee25688 feat: new() for FlowNodeManager 2024-05-10 14:51:44 +08:00
discord9
edcbc89c38 feat: gen write back req 2024-05-10 14:51:44 +08:00
discord9
e88a40b58b refactor: use seperate Worker 2024-05-10 14:51:44 +08:00
discord9
c7647759be feat(WIP): FlowWorker: !Send 2024-05-10 14:51:44 +08:00
discord9
d8a191a2db refactor: FlowNodeContext 2024-05-10 14:51:44 +08:00
discord9
ea40691c71 chore: some unfinished tests 2024-05-10 14:51:44 +08:00
discord9
640674b9bc feat: get table schema 2024-05-10 14:51:44 +08:00
discord9
3fb3fb18c2 tests: add one for Send 2024-05-10 14:51:44 +08:00
discord9
1067d3453d feat: dataflow adapter 2024-05-10 14:51:44 +08:00
discord9
57e3912aca feat(WIP): FlowNodeManager 2024-05-10 14:51:44 +08:00
discord9
ebcfb0f1d7 feat: now types of errors 2024-05-10 14:51:44 +08:00
discord9
6442c96847 feat: render src/sink 2024-05-10 14:51:44 +08:00
discord9
b19febc97c feat: sql_to_flow_plan 2024-05-10 14:51:44 +08:00
discord9
8240a1ace1 feat: find all used collection 2024-05-10 14:51:44 +08:00
Weny Xu
89dbf6ddd2 chore: bump proto to 219b24 (#3899)
chore: bump to 219b24
2024-05-10 03:55:34 +00:00
Ruihang Xia
66aa08d815 fix: memory leak in unit test of telemetry (#3897)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-09 14:29:22 +00:00
Weny Xu
b8a325d18c feat: implement CacheContainer & TableFlownodeSetCache (#3885)
* feat: implement the `CacheContainer`

* feat: implement the `TableFlownodeSetCache`

* chore: remove unused feature

* chore: remove unused `Arc`

* refactor: refactor origin `get` to `get_by_ref`

* chore: update comments

* refactor: refactor `CacheContainer`

* chore: move `CacheContainer` to container.rs

* feat: add metrics

* chore: update tests

* test: add tests for value not exists

* test: add test for get

* chore: apply suggestions from CR
2024-05-09 09:26:35 +00:00
Ruihang Xia
ed95e99556 chore: deprecate nyc-taxi benchmark (#3891)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-09 08:59:38 +00:00
Weny Xu
5545a8b023 feat: implement drop flow procedure (#3877)
* feat: implement `destroy_flow_metadata` method

* chore: bump proto to 65c1364

* feat: implement the drop flow procedure

* feat: add `MockFlownodeManager`

* tests: add tests for create flow & drop flow procedure

* chore: apply suggestions from CR

* chore: use `ClusterId`
2024-05-09 08:23:19 +00:00
Weny Xu
5140d247e3 feat: implement drop flow parser (#3888)
* feat: implement drop flow parser

* Update src/sql/src/parsers/drop_parser.rs

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* fix: fmt code

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-09 07:14:22 +00:00
Jeremyhi
f995f6099f chore: avoid double future (#3890) 2024-05-09 07:11:22 +00:00
Jeremyhi
7de62ef5d0 feat: add metasrv start time to node info (#3883) 2024-05-09 06:53:55 +00:00
Weny Xu
0e05f85a9d feat: pass QueryContext to FlowRequestHeader (#3878)
* feat: pass `QueryContext` to `DdlTaskRequest`

* feat: pass `QueryContext` to `FlowRequestHeader`

* chore: fmt toml
2024-05-09 04:57:33 +00:00
Ruihang Xia
a6a702de4e feat: support querying field column names in Prometheus HTTP API (#3880)
* feat: support querying field column names in Prometheus HTTP API

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use tables stream API

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-08 12:18:20 +00:00
Weny Xu
d99746385b refactor: move mirror insertion tasks to the background runtime (#3879) 2024-05-08 07:41:06 +00:00
Yingwen
9d8f72d611 fix: add data type to vector cache key (#3876)
* test: test for null tag

* test: sqlness test

* fix: add type to vector cache key

* test: update sqlness test
2024-05-08 06:30:28 +00:00
Lei, HUANG
c07a1babd5 refactor(logstore): remove Entry::namemspace (#3875)
refactor(logstore): remove LogStore::namemspace and related associate types on Entry.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>
2024-05-08 06:13:33 +00:00
zyy17
cc8d6b1200 refactor: move Plugins to plugins.rs and use rwlock (#3862)
* refactor: move `Plugins` to `plugins.rs` and add unit tests

* refactor: use rwlock instead of mutex to improve performance
2024-05-07 23:32:22 +00:00
irenjj
5274806108 feat: limit total rows copied in COPY TABLE FROM stmt (#3819)
* feat: limit total rows copied in `COPY TABLE FROM` stmt

* fix: break outer loop

* fmt

* fixup

* test: add limit rows test

* fix test

* fix test: add drop

* fix test

* fix test

* fix test

* fix: change to const
2024-05-07 15:44:02 +00:00
Jeremyhi
6e1cc1df55 feat: metasrvs nodeinfo (#3873)
* feat: get metasrv nodeinfo

* fix: sqlness test

* chore: by comment

* feat: proto version
2024-05-07 11:20:57 +00:00
Ruihang Xia
65f80af9a9 build: run cargo gc to build binaries (#3874)
* build: run cargo gc to build binaries

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add comment

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-07 11:16:47 +00:00
zyy17
a68072cb21 refactor: add the main cli entry point struct Command{} and simplify main() (#3850)
refactor: add the main cli entrypoint struct `Command{}` to simplify `main()`
2024-05-07 09:08:35 +00:00
Ruihang Xia
71c1c7ca24 fix: return metric name instead of query in Prometheus /series API (#3864)
* fix: return metric name instead of query in Prometheus /series API

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* omit non-tag columns from result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-07 09:07:16 +00:00
Weny Xu
1b5862223c fix: register regions during procedure recovery (#3859)
* fix: register regions during procedure recovery

* feat: add `recover` to `Procedure` trait

* refactor: move recovery to `recover` method
2024-05-07 08:30:41 +00:00
Ruihang Xia
c0be0c30de build: strip sqlness binary (#3872)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-07 07:53:48 +00:00
Kelvin Wu
154f561da1 refactor: passing QueryContext to RegionServer (#3829)
* refactor: passing QueryContext to RegionServer

* refactor: change the return type of build() in QueryContextBuilder

* fix: update greptime-proto reference

* chore: apply suggestion

* chore: revert the last commit

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2024-05-07 15:10:59 +08:00
Weny Xu
aa2934b422 docs: run fuzz tests in dev mode & disable sanitizer (#3871)
doc: run fuzz tests in dev mode & disable sanitizer
2024-05-07 14:15:07 +08:00
shuiyisong
1b93a026c2 ci: align clippy checks and fixup (#3868)
* fix: make clippy

* fix: use make in CI
2024-05-07 03:00:38 +00:00
Ruihang Xia
530353785c refactor: remove re-export from logging (#3865)
* refactor: remove re-export from logging

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix merge problem

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* run formatter

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-06 13:26:01 +00:00
discord9
573c19be32 feat(flow): mirror insert req to flow node (#3858)
* feat: mirror insert req to flow node

* refactor: group_requests_by_peer

* chore: rename `nodes` to `flows` to be more apt

* docs: add TODO

* refactor: split flow&data node grouping to two func

* refactor: mirror_flow_node_request

* chore: add some TODOs

* refactor: use Option in value

* feat: skip non-src table quickly

* docs: add TODO for  `Peer.address`

* fix: dedup
2024-05-06 11:33:14 +00:00
Lei, HUANG
f3b68253c2 chore: remove one slice copy in wal encoding (#3861)
* chore: remove one slice copy in wal encoding

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* fix: cr comments

---------

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>
2024-05-06 10:26:40 +00:00
shuiyisong
6e9e8fad26 refactor!: remove opentsdb tcp server (#3828)
* refactor: remove opentsdb tcp server

* refactor: remove config and add test

* refactor: update docs and remove unused code
2024-05-06 06:42:05 +00:00
Weny Xu
6e12e1b84b fix: retrieve all info instead of checking on demand (#3846)
* fix: renew region lease bug

* refactor: only register regions once

* chore: apply suggestions from CR
2024-05-06 04:44:47 +00:00
tison
7d447c20c5 chore: try avoid one slice (#3856)
Signed-off-by: tison <wander4096@gmail.com>
2024-05-06 03:44:12 +00:00
tison
9c3b9600ca ci: avoid trying to send slack notification in forks (#3857) 2024-05-06 03:18:21 +00:00
tison
73fe075049 ci: replace pull-request actions with cyborg (#3854)
* ci: replace pull-request actions with cyborg

Signed-off-by: tison <wander4096@gmail.com>

* skip cron maintenance in forks

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-05-04 03:12:26 +00:00
tison
2748cec7e2 ci: create cyborg for all heavy GitHub integrated actions (#3852)
* ci: create cyborg for all heavy GitHub integrated actions

Signed-off-by: tison <wander4096@gmail.com>

* hack trigger for testing

Signed-off-by: tison <wander4096@gmail.com>

* fixup token population

Signed-off-by: tison <wander4096@gmail.com>

* tidy up

Signed-off-by: tison <wander4096@gmail.com>

* use tsx

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-05-02 12:31:18 +00:00
dennis zhuang
65d47bab56 feat: adds information_schema cluster_info table (#3832)
* feat: adds server running mode to KvBackendCatalogManager

* feat: adds MetaClient to KvBackendCatalogManager

* feat: impl information_schema.cluster_info table

* fix: forgot files

* test: update information_schema result

* feat: adds start_time and uptime to cluster_info

* chore: tweak cargo and comment

* feat: rename greptime_region_peers to region_peers

* fix: cluster_info result

* chore: simplify sqlness commands

* chore: set peer_id to -1 for frontends

* fix: move cluster_info to greptime catalog

* chore: use official proto

* feat: adds active_time

* chore: apply suggestion

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>

* chore: STANDALONE for runtime_metrics

---------

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>
Co-authored-by: tison <wander4096@gmail.com>
2024-05-02 02:49:46 +00:00
Weny Xu
f6e2039eb8 test: introduce unstable fuzz create table test (#3788)
* feat: implement unstable_fuzz_create_table_standalone

* chore: use drop database

* docs: update docs

* chore: add ci config

* chore: add feature gate

* fix: fix clippy

* chore: update ci

* Apply suggestions from code review

* feat: reduce num

* Apply suggestions from code review

* chore: apply suggestions from CR

* Apply suggestions from code review

* chore: reduce `wait_timeout` in health check

* Update .env.example

* refactor: use `init_greptime_connections_via_env`

* refactor: use `init_greptime_connections_via_env`

---------

Co-authored-by: tison <wander4096@gmail.com>
2024-05-01 06:08:49 +00:00
dimbtp
3b89b9ddd8 refactor: move database client to test (#3820)
* remove `Database` in `cli export`

* move `Database` to tests-integration

* fix clippy

* move `DatabaseClient` along with `Database`

* `cli export` now use http api

* add TODO for `tests-integration` dependencies

* cleanup code

* 'cli export' test use http api

* remove unsed dependencies in cmd crate

* apply review comments and clean code

* remove unused methods

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
Co-authored-by: tison <wander4096@gmail.com>
2024-05-01 05:55:13 +00:00
tison
695746193b ci: reduce workflow files by merging actions (#3848)
* ci: merge license header checker into dev ci

Signed-off-by: tison <wander4096@gmail.com>

* ci: merge nightly-funtional-tests.yml into nightly-ci.yml

Signed-off-by: tison <wander4096@gmail.com>

* fix typos

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-05-01 04:44:00 +00:00
Yohan Wal
573d369f77 feat(fuzz): add insert logical table target (#3842)
* refactor: let upper caller control whether to omit column list

* feat(fuzz): add insert logical table target

* ci: add fuzz_insert_logical_table ci cfg
2024-05-01 03:48:51 +00:00
Ruihang Xia
e6eca8ca0c fix: count_wildcard_to_time_index_rule doesn't handle table reference properly (#3847)
* validate time index col

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use TableReference instead

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add more tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-04-30 15:59:56 +00:00
Ruihang Xia
e84b1eefdf perf: optimize count(*) (#3845)
* perf: optimize count(*)

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fallback to count(1) for temporary table

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* handle alias expr in range plan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* handle subquery alias

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* rename file

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-04-30 10:10:11 +00:00
Yingwen
777bc3b89d fix: compiler warnings on Windows (#3844)
* chore: fix windows warnings

* test: try fix create_metadata_region on windows

* style: fix clippy

* style: fix clippy
2024-04-30 09:40:11 +00:00
Weny Xu
81f3007f6f refactor: remove TableMetaKey trait (#3837)
* refactor: use MetaKey trait instead of TableMetaKey

* test: add tests

* test: add more tests

* refactor: use `&str` instead of `String`
2024-04-30 06:31:45 +00:00
shuiyisong
863ee608ca chore: adding Grafana config for cluster monitor (#3781)
* chore: add greptimedb-cluster.json

* chore: update readme in grafana

* chore: update readme
2024-04-30 06:20:23 +00:00
Weny Xu
20cbc039e6 refactor: remove catalog prefix (#3835)
* refactor: remove catalog prefix

* refactor: remove scope.rs

* fix: fix tests

* chore: update comments

* chore: apply suggestions from CR
2024-04-30 03:27:33 +00:00
tison
d11b1fa389 chore: correct RepeatedField origin (#3838)
* chore: correct RepeatedField origin

Signed-off-by: tison <wander4096@gmail.com>

* fixup

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-04-30 03:13:54 +00:00
Ruihang Xia
a0f4881c6e feat: physical optimizer RemoveDuplicate to remove duplicate exec plans (#3839)
* feat: physical optimizer RemoveDuplicate to remove duplicate exec plans

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update document

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness results

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-04-30 03:13:39 +00:00
Kould
aba5e41799 feat: support ALTER TABLE ... MODIFY COLUMN ... ... (#3796)
* feat: support `ALTER COLUMN xxx TYPE xxx`

* fix: test `test_parse_alter_change_column_type`

* style: code fmt

* style: move to new test: `test_make_alter_column_type_region_request`

* style: simplify the code

* style: remove `v1::region::ChangeColumnType`

* resolve conflicts

* fix: test `test_make_alter_column_type_region_request`

* style: simplify the code

* rebase

* rebase

* rebase

* fix: `ALTER COLUMN ... TYPE` -> `MODIFY COLUMN`

* fix: `parser` -> `self.parser`

* Apply suggestions from code review

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: WenyXu <wenymedia@gmail.com>
2024-04-30 03:13:33 +00:00
Ruihang Xia
371d4cf9f5 fix: broken link in contributing guide (#3831)
* docs: revise style guide about test mod

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use relative path instead

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-04-30 03:06:47 +00:00
Yohan Wal
8e3515d396 ci: add fuzz_insert ci cfg (#3840)
chore(ci): add fuzz_insert ci cfg
2024-04-30 02:44:36 +00:00
Weny Xu
701aba9cdb refactor: rename flow task to flow (#3833)
* refactor: rename to `MIN_USER_FLOW_ID`

* refactor: rename to `FLOW_ID_SEQ`

* refactor: rename to `flow_id_sequence`

* refactor: rename to `FlowMetadataManager`

* refactor: rename flow_task.rs to flow.rs

* refactor: rename to FlowInfoManager

* refactor: rename to FlowName

* refactor: rename to FlownodeFlow

* refactor: rename to TableFlow

* refactor: remove TASK

* refactor: rename to __flow

* refactor: rename to flow_id

* refactor: rename to flow_name

* refactor: update comments

* refactor: rename to flow_metadata_manager

* refactor: rename to flow_metadata_allocator

* refactor: rename to FlowMetadataAllocator

* refactor: rename task suffix

* refactor: rename FlowTask to FlowInfo

* refactor: rename FlowTaskScoped to FlowScoped

* refactor: rename FlowTaskId to FlowId

* chore: bump proto to b5412f7

* chore: apply suggestions from CR

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2024-04-29 14:02:52 +00:00
Weny Xu
b493ea1b38 feat: implement the CreateFlowProcedure (#3810)
* feat: implement `FlowTaskMetadataAllocator`

* feat: add `FlowTaskMetadataManagerRef` and `FlowTaskMetadataAllocatorRef`

* chore: fix clippy

* feat: add `FlowTaskNameLock`

* feat: implement the `CreateFlowTaskProcedure`

* chore: rename to `CreateFlowProcedure`

* chore: apply suggestions from CR

* feat: invoke create flow procedure

* chore: apply suggestions from CR

* refactor: rename TYPE_NAME

* feat: register the procedure

* chore: apply suggestions from CR

* feat: acquire the lock of sink table name
2024-04-29 12:34:11 +00:00
Jeremyhi
336db38ce9 refactor!: remove duration type from gRPC (#3825)
* refactor: remove duration type

* chore: proto
2024-04-29 02:56:09 +00:00
tison
c387687262 refactor: use secrecy SerectString to hold secrets option (#3804)
* build: centralize secrecy dependency

Signed-off-by: tison <wander4096@gmail.com>

* add secrecy to sql crate

Signed-off-by: tison <wander4096@gmail.com>

* try impl

Signed-off-by: tison <wander4096@gmail.com>

* update test

Signed-off-by: tison <wander4096@gmail.com>

* make linters happy

Signed-off-by: tison <wander4096@gmail.com>

* bundle secrecy

Signed-off-by: tison <wander4096@gmail.com>

* bundle secrecy

Signed-off-by: tison <wander4096@gmail.com>

* replace secrecy

Signed-off-by: tison <wander4096@gmail.com>

* tidy clones

Signed-off-by: tison <wander4096@gmail.com>

* fixup

Signed-off-by: tison <wander4096@gmail.com>

* fixup

Signed-off-by: tison <wander4096@gmail.com>

* updated

Signed-off-by: tison <wander4096@gmail.com>

* Apply suggestions from code review

Co-authored-by: LFC <990479+MichaelScofield@users.noreply.github.com>

* use BTreeMap

Signed-off-by: tison <wander4096@gmail.com>

* tidy

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
Co-authored-by: LFC <990479+MichaelScofield@users.noreply.github.com>
2024-04-29 02:18:18 +00:00
Ruihang Xia
7ef18c0915 feat: impl parser and operator for CREATE FLOW (#3806)
* feat: impl parser for CREATE TASK

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* finish parser

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* wip expr

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* finish expr

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* rename output to sink

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix parser

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove debug code

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* upload lock file

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* rename symbol

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Apply suggestions from code review

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>

* remove other task word

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* task name to flow name

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* one more comment

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Jeremyhi <jiachun_feng@proton.me>
2024-04-28 12:26:49 +00:00
tison
1bbde15a15 feat: improve error message for typo in IF NOT EXISTS (#3817)
* refactor: improve error message for typo in IF NOT EXISTS

Signed-off-by: tison <wander4096@gmail.com>

* support table name 'if'

Signed-off-by: tison <wander4096@gmail.com>

* add sqlness cases

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-04-28 11:20:17 +00:00
Ruihang Xia
3dac7cbe37 refactor: remove location_opt and DebugFormat (#3830)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-04-28 11:18:55 +00:00
Weny Xu
08263995f6 feat: implement the FrontendInvoker (#3824)
* chore: add `common-frontend`

* feat: add `FrontendInvoker` trait

* feat: implement the `FrontendInvoker`
2024-04-28 11:11:34 +00:00
Ruihang Xia
c0b909330a fix: wrong handler implementation of prometheus remote write (#3826)
* fix: wrong handler implementation of prometheus remote write

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* refactor code

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-04-28 10:52:32 +00:00
Weny Xu
dadee99d69 chore: add warn log for denied to renew region lease (#3827)
chore: add warn log for region lease renewing
2024-04-28 09:04:01 +00:00
Yohan Wal
f29aebf89f feat(fuzz): add alter logical table target (#3818)
* feat(fuzz): add alter logical table target

* chore(ci): add fuzz_alter_logical_table ci cfg
2024-04-28 06:40:37 +00:00
tison
e154dc5fd4 refactor: DfUdfAdapter to bridge ScalaUdf (#3814)
* refactor: DfUdfAdapter to bridge ScalaUdf

Signed-off-by: tison <wander4096@gmail.com>

* tidy impl

Signed-off-by: tison <wander4096@gmail.com>

* for more

Signed-off-by: tison <wander4096@gmail.com>

* for more

Signed-off-by: tison <wander4096@gmail.com>

* for more

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-04-28 04:17:06 +00:00
Jeremyhi
ed8b13689e refactor!: remove some admin APIs (#3821)
chore: remove some admin APIs as they have been replaced by the information schema.
2024-04-28 03:24:13 +00:00
Weny Xu
3112ced9c0 chore: rename all datanode_manager to node_manager (#3813) 2024-04-28 02:34:06 +00:00
Yingwen
e410192560 fix: push down order hint of the query again (#3797)
* feat: add dummy catalog list to query

* chore: fix compiler errors

* feat: use query's dummy catalog

* chore: remove error

* feat: match dummy provider in the order hint

* docs: revert config change

* Apply suggestions from code review

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-04-26 11:55:44 +00:00
Weny Xu
eb3d2ca759 refactor: refactor DatanodeManager to NodeManager (#3811)
* chore: bump greptime-proto to 2c14c6e

* refactor: refactor `DatanodeManager` to `NodeManager`
2024-04-26 11:31:03 +00:00
Yohan Wal
934c7e3fef feat(fuzz): add create logical table target (#3756)
* feat(fuzz): add create logical table target

* fix: drop physical table after fuzz test

* fix: remove backticks of table name in with clause

* fix: create physical and logical table properly

* chore: update comments

* chore(ci): add fuzz_create_logical_table ci cfg

* fix: create one logical table once a time

* fix: avoid possible duplicate table and column name

* feat: use hard-code physical table

* chore: remove useless phantom

* refactor: create logical table with struct initialization

* chore: suggested changes and corresponding test changes

* chore: clean up
2024-04-26 09:09:08 +00:00
tison
d8ea7c5585 chore: respect axum test client's origin (#3805)
Signed-off-by: tison <wander4096@gmail.com>
2024-04-26 07:19:46 +00:00
Yingwen
77fc1e6de0 fix: prune row groups correctly for columns with the same name (#3802)
* test: add prune test case

* fix: use latest region metadata to get column id

* test: sort output
2024-04-26 06:52:23 +00:00
tison
4eadd9f8a8 refactor: log statement on error by Display (#3803)
* refactor: log statement on error by Display

Signed-off-by: tison <wander4096@gmail.com>

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>
Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2024-04-26 04:05:13 +00:00
Weny Xu
1ec595134d feat: define CreateFlowTask and DropFlowTask (#3801)
* chore: bump proto to cefc73f

* feat: add `CreateFlowTask` and `DropFlowTask`

* chore: bump to 87f2b38

* chore: bump to 783682f
2024-04-25 12:30:31 +00:00
Weny Xu
9206f60b28 feat: implement FlowTaskMetadataManager (#3766)
* feat: implement `FlowMetadataManager`

* chore: remove dead code

* refactor: change `sink_tables` to `sink_table`

* refactor: add `PartitionId`

* feat: implement FlowTaskNameManager

* refactor: update doc of keys

* fix: return partition id in `tasks`

* refactor: rename to `FlowTaskId`

* chore: add comments

* chore: add `task_id` in `TaskAlreadyExists`

* chore: add comments

* fix: fmt

* refactor: simplify the docoder

* chore: update comments

* feat: implement `FlowTaskScoped` and `CatalogScoped`

* refactor: refactor flow task keys

* refactor: remove metadata mod

* refactor: rename to `FlowTaskInfo`

* chore: add comments

* refactor: rename to `FlowTaskMetadataManager`

* chore: remove dead code

* Apply suggestions from code review

* chore: change to `pub(crate)`

* chore: apply suggestions from CR

* fix: fix fmt

* chore: fmt doc
2024-04-25 11:59:24 +00:00
discord9
2d0f493040 feat(flow): render reduce (#3769)
* feat: render reduce

* fix: distinct input dedup&change per review

* chore: typos

* fix: eval_distinct_core&per review

* chore: typos

* docs: more comment on accums internal

* chore: per review

* chore: rebase to main
2024-04-25 09:35:12 +00:00
tison
bba3108e0d refactor!: unify sql options into OptionMap (#3792)
* unify sql options into OptionMap

Signed-off-by: tison <wander4096@gmail.com>

* fixup

Signed-off-by: tison <wander4096@gmail.com>

* Update src/sql/src/util.rs

* drop legacy regions option

Signed-off-by: tison <wander4096@gmail.com>

* fixup

Signed-off-by: tison <wander4096@gmail.com>

* fixup

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-04-25 04:06:05 +00:00
Jeremyhi
9524ec83bc fix!: use the right precision (#3794)
* fix: use write precision

* chore: rename error

* chore: add doc

* chore: by comment

* chore: right test result

* chore: typo

* chore: add doc
2024-04-25 03:20:10 +00:00
Weny Xu
e0b5f52c2a fix: fix fuzz test ci (#3795) 2024-04-24 10:22:41 +00:00
Ruihang Xia
1272bc9afc fix: post process result on query full column name of prom labels API (#3793)
* fix: post process result on query full column name of prom labels API

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* only preserve tag column

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-04-24 09:38:03 +00:00
Ruihang Xia
df01ac05a1 feat: add validate method to CreateExpr (#3772)
* feat: add validate method to CreateExpr

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add sqlness reproducer

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* verify region create request

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix existing test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add tailing empty line

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add more validation

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* disable metric table fuzz

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* minor refactor

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-04-24 07:29:10 +00:00
Zhenchi
659d34a170 refactor(flow): refine comments and code (#3785)
* refactor(flow): refine comments and code

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* doc: description of the properties of removed keys

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: `get`'s fast path for cur val

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
Co-authored-by: discord9 <discord9@163.com>
2024-04-24 07:09:53 +00:00
irenjj
62037ee4c8 feat: impl Display for Statement (#3744)
* feat: impl Display for Statement

* fix: add license header

* fix: inline function manually

* fix: redacte options

* fix: check secret key and replace value

* test: add test for statement display

* fix: fix check

* fix: inline method

* fix: inline methods

* fix: format

* showcase how to write Display impl

Signed-off-by: tison <wander4096@gmail.com>

* for others

Signed-off-by: tison <wander4096@gmail.com>

* create and copy

Signed-off-by: tison <wander4096@gmail.com>

* create rest

Signed-off-by: tison <wander4096@gmail.com>

* fixup

Signed-off-by: tison <wander4096@gmail.com>

* address comments

Signed-off-by: tison <wander4096@gmail.com>

* fixup quote

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
Co-authored-by: tison <wander4096@gmail.com>
2024-04-24 07:09:06 +00:00
Lei, HUANG
8d229dda98 chore: always obsolete wal to avoid discontinuous log entries (#3789) 2024-04-24 07:08:01 +00:00
Kould
42e7403fcc feat: support different types for CompatReader (#3745)
* feat: support different types for `CompatReader`

* chore: only compare whether we need: (data_type)

* fix: optimize code based on review suggestions

- add unit test `test_safe_cast_to_null` to test safely cast
- add DataType to projected_fields
- remove TODO

* fix: assert_eq fail on `projection.rs`

* style: codefmt

* style: fix the code based on review suggestions
2024-04-24 06:27:52 +00:00
Weny Xu
20a933e395 refactor: simplify the PaginationStream (#3787)
* refactor: simplify the `PaginationStream`

* refactor: refactor decode fn

* fix: fix clippy
2024-04-24 04:29:41 +00:00
Kould
b619950c70 feat: add ChangeColumnType for AlterKind (#3757)
* feat: add `ModifyColumn` for `AlterKind`

* chore: additional code comments for `AlterKind::ModifyColumns`

* fix: add nullable check on `ModifyColumn`

* style: codefmt

* style: fix the code based on review suggestions

* style: fix the code based on review suggestions

* style: rename `ModifyColumn` -> `ChangeColumnType`

* style: code fmt

* style: `change_columns_type` -> `change_column_types`
2024-04-24 04:27:23 +00:00
Yingwen
4685b59ef1 feat: write manifests in background tasks (#3709)
* chore: truncate wip

* feat: truncate and edit write manifest in background

* refactor: wrap in manifest context

* feat: alter write manifest in background

* chore: fix compiler errors

* feat: flush update manifest in background

* feat: compaction update manifest in background

* feat: set dropping state

* feat: reset drop state

* feat: check state before updating manifest

* test: fix compaction test

* refactor: rename method

* chore: update comment

* chore: discard state guard

* refactor: use atomic cell to store state enum

* chore: fix clippy

* chore: update toml

* chore: remove unused type alias

* feat: check state after writing manifest

* chore: address CR comments

* chore: change status code

* chore: Update src/mito2/src/region.rs

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

* fix: executes applier

---------

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
2024-04-24 03:09:48 +00:00
LFC
86a989517e refactor: move the version string to common (#3783) 2024-04-23 14:21:34 +00:00
Yingwen
0aaf7621bd build: only build amd64 image for CentOS (#3784)
build: only build amd64 for centos
2024-04-23 13:44:26 +00:00
WU Jingdi
924c52af7c fix: promql scalar when input empty batch (#3779) 2024-04-23 11:16:40 +00:00
Lei, HUANG
f5e5a89e44 chore: bump jobserver (#3778)
chore: upgrade jobserver to 0.1.31 so that it can build on platforms with older glibc
2024-04-23 09:32:04 +00:00
Lei, HUANG
778e195f07 fix: do not remove deletion markers when window time range overlaps (#3773)
* fix: do not remove deletion markers when window time range overlaps

* chore: fix some minor issues; add compaction test

* chore: add more test

* fix: nitpick master's nitpick
2024-04-23 08:05:16 +00:00
Ruihang Xia
f764fd5847 fix: consider both db param and extended db header in Prometheus HTTP API (#3776)
* fix: consider both db param and extended db header in Prometheus HTTP API

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove debug code

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-04-23 07:13:05 +00:00
LFC
19a9035f4b fix: operating region guards should be dropped when procedure is done (#3775) 2024-04-23 06:21:53 +00:00
LFC
96c01a3bf0 fix: the dropping_regions guards should be dropped on procedure done (#3771)
* fix: the `dropping_regions` guards should be dropped on procedure done

* fix ci
2024-04-23 02:44:12 +00:00
Ruihang Xia
bf21527f18 fix: set is_time_index properly on updating physical table's schema (#3770)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-04-22 12:37:25 +00:00
Weny Xu
9e1441e48b docs: add how-to-write-fuzz-tests.md (#3763)
* docs: add how-to-write-fuzz-tests.md

* chore: apply suggestions from CR

* chore: apply suggestion from CR

* Update docs/how-to/how-to-write-fuzz-tests.md

Co-authored-by: tison <wander4096@gmail.com>

* Apply suggestions from code review

Co-authored-by: tison <wander4096@gmail.com>

* chore: apply suggestions from CR

* chore: apply suggestions from CR

---------

Co-authored-by: tison <wander4096@gmail.com>
2024-04-22 11:56:16 +00:00
ZonaHe
eeb4e26c71 feat: update dashboard to v0.5.0 (#3768)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-04-22 11:41:00 +00:00
tison
7ca0fa52d4 ci: secrets.GITHUB_TOKEN not exist on self-hosted runner (#3765) 2024-04-22 11:29:08 +00:00
Ruihang Xia
443722597b ci: temporary disable compatibility test (#3767)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-04-22 11:10:24 +00:00
discord9
d4b814f698 refactor(flow): split render.rs and other minor changes (#3750)
* refactor: split render.rs

* chore: per review

* chore: per review

* chore: docs explain `apply_updates` behavior

* chore: per review
2024-04-22 09:48:09 +00:00
Ruihang Xia
d0b2a11f2b feat: add preserve arg to sqlness runner (#3724)
* feat: add preserve arg to sqlness runner

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* replace tempdir with tempfile

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-04-22 09:21:37 +00:00
dependabot[bot]
54432df92f build(deps): bump rustls from 0.22.3 to 0.22.4 (#3764)
Bumps [rustls](https://github.com/rustls/rustls) from 0.22.3 to 0.22.4.
- [Release notes](https://github.com/rustls/rustls/releases)
- [Changelog](https://github.com/rustls/rustls/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rustls/rustls/compare/v/0.22.3...v/0.22.4)

---
updated-dependencies:
- dependency-name: rustls
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-22 17:19:08 +08:00
dennis zhuang
8f2ce4abe8 feat: impl show collation and show charset statements (#3753)
* feat: impl show collation and show charset statements

* docs: add api docs
2024-04-20 06:01:32 +00:00
WU Jingdi
d077892e1c feat: support PromQL scalar (#3693) 2024-04-19 09:56:09 +00:00
LFC
cfed466fcd chore: update greptime-proto to main (#3743) 2024-04-19 06:38:34 +00:00
Ruihang Xia
0c5f4801b7 build: update toolchain to nightly-2024-04-18 (#3740)
* chore: update toolchain to nightly-2024-04-17

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix test clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix ut

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update fuzz test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update to nightly-2024-04-18

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add document

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update CI

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* avoid unnecessary allow clippy attrs

Signed-off-by: tison <wander4096@gmail.com>

* help the compiler find the clone is unnecessary and make clippy happy

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Signed-off-by: tison <wander4096@gmail.com>
Co-authored-by: tison <wander4096@gmail.com>
2024-04-19 05:42:34 +00:00
Eugene Tolbakov
2114b153e7 refactor: avoid unnecessary alloc by using unwrap_or_else (#3742)
feat(promql): address post-merge CR
2024-04-19 01:31:25 +00:00
LFC
314f2704d4 build(deps): update datafusion to latest and arrow to 51.0 (#3661)
* chore: update datafusion

* update sqlness case of time.sql

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: adjust range query partition

* fix: hisogram incorrect result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: ignore filter pushdown temporarily

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: update limit sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: histogram with wrong distribution

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: update negative ordinal sqlness case

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: bump df to cd7a00b

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* resolve conflicts

* ignore test_range_filter

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix promql exec panic

* fix "select count(*)" exec error

* re-enable the "test_range_filter" test since the filter push down seems not necessary to be removed

* fix: range query schema error

* update sqlness results

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* resolve conflicts

* update datafusion, again

* fix pyo3 compile error, and update some sqlness results

* update decimal sqlness cases

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: promql literal

* fix udaf tests

* fix filter pushdown sqlness tests

* fix?: test_cast

* fix: rspy test fail due to datafusion `sin` signature change

* rebase main to see if there are any failed tests

* debug ci

* debug ci

* debug ci

* enforce input partition

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* debug ci

* fix ci

* fix ci

* debug ci

* debug ci

* debug ci

* fix sqlness

* feat: do not return error while creating a filter

* chore: remove array from error

* chore: replace todo with unimplemented

* Update src/flow/clippy.toml

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: WUJingdi <taylor-lagrange@qq.com>
Co-authored-by: discord9 <discord9@163.com>
Co-authored-by: evenyag <realevenyag@gmail.com>
Co-authored-by: tison <wander4096@gmail.com>
2024-04-18 12:07:18 +00:00
Weny Xu
510782261d refactor: avoid unnecessary cloning (#3734)
refactor: using `TxnOpGetResponseSet`
2024-04-18 09:02:28 +00:00
Jeremyhi
20e8c3d864 chore: remove TableIdProvider (#3733) 2024-04-18 05:36:37 +00:00
Weny Xu
2a2a44883f refactor(meta): Ensure all moving values remain unchanged between two transactions (#3727)
* feat: implement `move_values`

* refactor: using `move_values`

* refactor: refactor executor

* chore: fix clippy

* refactor: remove `CasKeyChanged` error

* refactor: refactor `move_values`

* chore: update comments

* refactor: do not compare `dest_key`

* chore: update comments

* chore: apply suggestions from CR

* chore: remove `#[inline]`

* chore: check length of keys and dest_key
2024-04-18 05:35:54 +00:00
maco
4248dfcf36 feat: support invalidate schema name key cache (#3725)
* feat: support invalidate schema name key cache

* fix: remove pub for invalidate_schema_cache

* refactor: add DropMetadataBroadcast State Op

* fix: delete files
2024-04-18 04:02:06 +00:00
Yohan Wal
64945533dd feat: add tinytext, mediumtext and longtext data types (#3731) 2024-04-18 03:15:21 +00:00
Yohan Wal
ffc8074556 feat(fuzz): enable create-if-not-exists option (#3732) 2024-04-18 02:50:57 +00:00
Ruihang Xia
7e56bf250b docs: add style guide (#3730)
* docs: add style guide

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add comments section

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add comment order

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* about error handling

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* about error logging

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-04-17 11:28:02 +00:00
tison
50ae4dc174 refactor: merge RegionHandleResult into RegionHandleResponse (#3721)
* refactor: merge RegionHandleResult into RegionHandleResponse

Signed-off-by: tison <wander4096@gmail.com>

* RegionResponse to api::region

Signed-off-by: tison <wander4096@gmail.com>

* order

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-04-17 10:03:20 +00:00
Ruihang Xia
16aef70089 fix: remove ttl option from metadata region (#3726)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-04-17 09:13:53 +00:00
tison
786f43da91 chore: cleanup todos that should be panic (#3720)
Signed-off-by: tison <wander4096@gmail.com>
2024-04-17 05:04:14 +00:00
zyy17
3e9bda3267 ci: use greptimedb-ci-tester account (#3719) 2024-04-16 14:43:17 +00:00
Eugene Tolbakov
89d58538c7 chore(mito): set null value data size to i64 (#3722)
* chore(mito): set null value data size to i64

* chore(mito): move comment to a relevant place
2024-04-16 14:40:16 +00:00
Weny Xu
d12379106e feat(drop_table): support to rollback table metadata (#3692)
* feat: support to rollback table metadata

* refactor: store table route value instead of physical table route

* feat(drop_table): support to rollback table metadata

* test: add rollback tests for drop table

* fix: do not set region to readonly

* test: add sqlness tests

* feat: implement TombstoneManager

* test: add tests for TombstoneManager

* refactor: using TombstoneManager

* chore: remove unused code

* fix: fix typo

* refactor: using `on_restore_metadata`

* refactor: add `executor` to `DropTableProcedure`

* refactor: simplify the `TombstoneManager`

* refactor: refactor `Key`

* refactor: carry more info

* feat: add `destroy_table_metadata`

* refactor: remove redundant table_route_value

* feat: ensure the key is empty

* feat: introcude `table_metadata_keys`

* chore: carry more info

* chore: remove clone

* chore: apply suggestions from CR

* feat: delete metadata tombstone
2024-04-16 09:22:41 +00:00
Weny Xu
64941d848e fix(alter_table): ignore request outdated error (#3715) 2024-04-16 08:18:38 +00:00
Ruihang Xia
96a40e0300 feat: check partition rule (#3711)
* feat: check partition rule

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy and fmt

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add more tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* correct test comment

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-04-16 08:13:49 +00:00
Yingwen
d2e081c1f9 docs: update memtable config example (#3712) 2024-04-16 07:26:20 +00:00
tison
cdbdb04d93 refactor: remove redundant try_flush invocations (#3706)
* refactor: remove redundant try_flush invocations

Signed-off-by: tison <wander4096@gmail.com>

* fixup

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-04-16 06:35:55 +00:00
Lei, HUANG
5af87baeb0 feat: add filter_deleted option to avoid removing deletion markers (#3707)
* feat: add `filter_deleted` scan option to avoid removing deletion markers.

* refactor: move sort_batches_and_print to test_util
2024-04-16 06:34:41 +00:00
maco
d5a948a0a6 test: Add tests for KvBackend trait implement (#3700)
* test: add etcd

* optimize code

* test: add etcd tests

* fix: typos

* fix: taplo error and clippy

* avoid print

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
Co-authored-by: tison <wander4096@gmail.com>
2024-04-15 10:51:59 +00:00
Eugene Tolbakov
bbea651d08 feat(promql): parameterize lookback (#3630)
* feat(promql): parameterize lookback

* chore(promql): address CR, adjusted sqlness

* chore(promql): fmt

* chore(promql): fix accidental removal

* fix(promql): address CR

* fix(promql): address CR

* feat(promql): add initial lookback parameter grpc support

* fix: update greptime-proto revision

* chore: restore accidental removal
2024-04-15 09:11:21 +00:00
zyy17
8060c81e1d refactor: use toml2docs to generate config docs (#3704)
* refactor: use toml2docs to generate config docs

* ci: add docs check in 'check-typos-and-docs'
2024-04-15 09:08:32 +00:00
Jeremyhi
e6507aaf34 chore: debt 3696 (#3705) 2024-04-15 09:02:19 +00:00
Jeremyhi
87795248dd feat: get metasrv clusterinfo (#3696)
* feat: add doc for MetasrvOptions

* feat: register candidate before election

* feat: get all peers metasrv

* chore: simply code

* chore: proto rev

* Update src/common/meta/src/cluster.rs

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* Update src/meta-client/src/client.rs

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* fmt

Signed-off-by: tison <wander4096@gmail.com>

* Apply suggestions from code review

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* impl<T: AsRef<[u8]>> From<T> for LeaderValue

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
Co-authored-by: tison <wander4096@gmail.com>
2024-04-15 08:10:48 +00:00
irenjj
7a04bfe50a feat: add strict mode to validate protocol strings (#3638)
* feat: add strict mode to validate protocol strings

* hotfix: fix test

* fix: fix return pair and test param

* test: add test for utf-8 validation

* fix: cargo check

* Update src/servers/src/prom_row_builder.rs

Co-authored-by: Eugene Tolbakov <ev.tolbakov@gmail.com>

* fix: fix param of without_strict_mode

* fix: change field name in HttpOptions

* fix: replace if else with match

* fix: replace all strict_mode with is_stirct_mode

* fix: fix test_config_api

* fix: fix bench, add vm handshake, catch error

---------

Co-authored-by: Eugene Tolbakov <ev.tolbakov@gmail.com>
Co-authored-by: tison <wander4096@gmail.com>
2024-04-15 07:53:48 +00:00
Yingwen
2f4726f7b5 refactor: Move manifest manager lock to MitoRegion (#3689)
* feat: remove manager inner wip

* feat: put manifest lock in region

* feat: don't update manifest if manager is stopped

* chore: address CR comments
2024-04-15 05:48:25 +00:00
dennis zhuang
75d85f9915 feat: impl table_constraints table for information_schema (#3698)
* feat: impl table_constraints table for information_schema

* test: update information_schema sqlness test

* test: adds table_constraints sqlness test
2024-04-15 03:59:16 +00:00
discord9
db329f6c80 feat(flow): transform substrait SELECT&WHERE&GROUP BY to Flow Plan (#3690)
* feat: transofrm substrait SELECT&WHERE&GROUP BY to Flow Plan

* chore: reexport from common/substrait

* feat: use datafusion Aggr Func to map to Flow aggr func

* chore: remove unwrap&split literal

* refactor: split transform.rs into smaller files

* feat: apply optimize for variadic fn

* refactor: split unit test

* chore: per review
2024-04-12 07:38:42 +00:00
Ning Sun
544c4a70f8 refactor: check error type before logging (#3697)
* refactor: check error type before logging

* chore: update log level for broken pipe

* refactor: leave a debugging output for non critial error
2024-04-12 02:18:14 +00:00
dimbtp
02f806fba9 fix: cli export "create table" with quoted names (#3684)
* fix: cli export `create table` with quoted names

* add test

* apply review comments

* fix to pass check

* remove eprintln for clippy check

* use prebuilt binary to avoid compile

* ci run coverage after build

* drop dirty hack test

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
Co-authored-by: tison <wander4096@gmail.com>
2024-04-11 06:56:14 +00:00
tison
9459ace33e ci: add CODEOWNERS file (#3691)
Signed-off-by: tison <wander4096@gmail.com>
2024-04-10 17:47:54 +00:00
Weny Xu
c1e005b148 refactor: drop table procedure (#3688)
* refactor: refactor drop table procedure

* refactor: refactor test utils
2024-04-10 12:22:10 +00:00
discord9
c00c1d95ee chore(flow): more comments&lint (#3680)
* chore: more comments&lint

* chore: per review

* chore: remove abundant dep
2024-04-10 03:31:22 +00:00
tison
5d739932c0 chore: remove TODO that has been done (#3683)
This TODO is done by https://github.com/GreptimeTeam/greptimedb/pull/3473.
2024-04-09 22:55:55 +00:00
Ruihang Xia
aab7367804 feat: try get pk values from cache when applying predicate to parquet (#3286)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: tison <wander4096@gmail.com>
2024-04-09 12:53:38 +00:00
Yohan Wal
34f935df66 chore: create database api change in protobuf (#3682) 2024-04-09 12:11:38 +00:00
Weny Xu
fda1523ced refactor: refactor alter table procedure (#3678)
* refactor: refactor alter table procedure

* chore: apply suggestions from CR

* chore: remove `alter_expr` and `alter_kind`
2024-04-09 10:35:51 +00:00
tison
2c0c7759ee feat: add checksum for checkpoint data (#3651)
* feat: add checksum for checkpoint data

Signed-off-by: tison <wander4096@gmail.com>

* add test

Signed-off-by: tison <wander4096@gmail.com>

* clippy

Signed-off-by: tison <wander4096@gmail.com>

* fix: checksum should calculate on uncompressed data

Signed-off-by: tison <wander4096@gmail.com>

* address comments

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-04-09 08:32:24 +00:00
Weny Xu
2398918adf feat(fuzz): support to create metric table (#3617)
Co-authored-by: tison <wander4096@gmail.com>
2024-04-09 06:00:04 +00:00
Ruihang Xia
50bea2f107 feat: treat all number types as field candidates (#3670)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-04-09 03:28:21 +00:00
JeremyHi
1629435888 chore: unify name metasrv (#3671)
chore: unify name
2024-04-09 03:03:26 +00:00
tison
b3c94a303b chore: add a fix-clippy Makefile target (#3677)
* chore: add a fix-clippy Makefile target

* Update Makefile
2024-04-09 02:59:55 +00:00
tison
883b7fce96 refactor: bundle the lightweight axum test client (#3669)
* refactor: bundle the lightweight axum test client

Signed-off-by: tison <wander4096@gmail.com>

* address comments

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-04-09 02:33:26 +00:00
discord9
ea9367f371 refactor(flow): func spec api&use Error not EvalError in mfp (#3657)
* refactor: func's specialization& use Error not EvalError

* docs: some pub item

* chore: typo

* docs: add comments for every pub item

* chore: per review

* chore: per reveiw&derive Copy

* chore: per review&test for binary fn spec

* docs: comment explain how binary func spec works

* chore: minor style change

* fix: Error not EvalError
2024-04-09 02:32:02 +00:00
tison
2896e1f868 refactor: pass http method to metasrv http handler (#3667)
* refactor: pass http method to metasrc http handler

Signed-off-by: tison <wander4096@gmail.com>

* update maintenance endpoint

Signed-off-by: tison <wander4096@gmail.com>

* fixup

Signed-off-by: tison <wander4096@gmail.com>

* Update src/meta-srv/src/service/admin.rs

Co-authored-by: dennis zhuang <killme2008@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
2024-04-09 02:26:42 +00:00
Lei, HUANG
183fccbbd6 chore: remove global_ttl config (#3673)
* chore: remove global_ttl config

* fix: clippy
2024-04-09 02:00:50 +00:00
Weny Xu
b51089fa61 fix: DeserializedValueWithBytes::from_inner misusing (#3676)
* fix: fix `DeserializedValueWithBytes::from_inner` misusing

* Update src/common/meta/src/key.rs

---------

Co-authored-by: tison <wander4096@gmail.com>
2024-04-09 01:48:35 +00:00
Yohan Wal
682b04cbe4 feat(fuzz): add create database target (#3675)
* feat(fuzz): add create database target

* chore(ci): add fuzz_create_database ci cfg
2024-04-09 01:33:29 +00:00
tison
e1d2f9a596 chore: improve contributor click in git-cliff (#3672)
Signed-off-by: tison <wander4096@gmail.com>
2024-04-08 18:15:00 +00:00
tison
2fca45b048 ci: setup-protoc always with token (#3674)
Signed-off-by: tison <wander4096@gmail.com>
2024-04-08 18:13:24 +00:00
Yingwen
3e1a125732 feat: add append mode to table options (#3624)
* feat: add append mode to table options

* test: add append mode test

* test: rename test tables

* chore: Add delete test for append mode
2024-04-08 13:42:58 +00:00
Mofeng
34b1427a82 fix(readme): fix link of Ingester-js (#3668) 2024-04-08 12:17:44 +00:00
discord9
28fd0dc276 feat(flow): render map&related tests (#3581)
* feat: render map&related tests

* chore: license header

* chore: update Cargo.lock&remove unused

* refactor: rename ComputeState to DataflowState

* chore: use org fork

* chore: fix typos

* chore: per review

* chore: more explain to use `VecDeque` in err collector

* chore: typos

* chore: more comment on `Plan::Let`

* chore: typos

* refactor mfp rendering

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: update `now` in closure

* feat: use insert_local

* chore: remove unused

* chore: per review

* chore: fmt comment

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
Co-authored-by: Zhenchi <zhongzc_arch@outlook.com>
2024-04-08 11:36:07 +00:00
Weny Xu
32b9639d7c feat(procedure): support to rollback (#3625)
* feat: add rollback method

* refactor: simplify the state control

* feat(procedure): support to rollback

* test: add tests for rollback

* feat: persist rollback procedure state

* feat: rollback procedure after restarting

* feat: add `CommitRollback`, `RollingBack` to ProcedureStateResponse

* chore: apply suggestions from CR

* feat: persist rollback error

* feat: add `is_support_rollback`

* chore: apply suggestions from CR

* chore: update greptime-proto

* chore: rename to `rollback_supported`

* chore: rename to `RollbackProcedureRecovered`
2024-04-08 11:23:23 +00:00
765 changed files with 43511 additions and 13936 deletions

View File

@@ -24,3 +24,7 @@ GT_KAFKA_ENDPOINTS = localhost:9092
# Setting for fuzz tests
GT_MYSQL_ADDR = localhost:4002
# Setting for unstable fuzz tests
GT_FUZZ_BINARY_PATH=/path/to/
GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime

27
.github/CODEOWNERS vendored Normal file
View File

@@ -0,0 +1,27 @@
# GreptimeDB CODEOWNERS
# These owners will be the default owners for everything in the repo.
* @GreptimeTeam/db-approver
## [Module] Databse Engine
/src/index @zhongzc
/src/mito2 @evenyag @v0y4g3r @waynexia
/src/query @evenyag
## [Module] Distributed
/src/common/meta @MichaelScofield
/src/common/procedure @MichaelScofield
/src/meta-client @MichaelScofield
/src/meta-srv @MichaelScofield
## [Module] Write Ahead Log
/src/log-store @v0y4g3r
/src/store-api @v0y4g3r
## [Module] Metrics Engine
/src/metric-engine @waynexia
/src/promql @waynexia
## [Module] Flow
/src/flow @zhongzc @waynexia

View File

@@ -39,7 +39,7 @@ body:
- Query Engine
- Table Engine
- Write Protocols
- MetaSrv
- Metasrv
- Frontend
- Datanode
- Other

View File

@@ -22,15 +22,15 @@ inputs:
build-dev-builder-ubuntu:
description: Build dev-builder-ubuntu image
required: false
default: 'true'
default: "true"
build-dev-builder-centos:
description: Build dev-builder-centos image
required: false
default: 'true'
default: "true"
build-dev-builder-android:
description: Build dev-builder-android image
required: false
default: 'true'
default: "true"
runs:
using: composite
steps:
@@ -47,7 +47,7 @@ runs:
run: |
make dev-builder \
BASE_IMAGE=ubuntu \
BUILDX_MULTI_PLATFORM_BUILD=true \
BUILDX_MULTI_PLATFORM_BUILD=all \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
IMAGE_TAG=${{ inputs.version }}
@@ -58,7 +58,7 @@ runs:
run: |
make dev-builder \
BASE_IMAGE=centos \
BUILDX_MULTI_PLATFORM_BUILD=true \
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
IMAGE_TAG=${{ inputs.version }}
@@ -72,5 +72,5 @@ runs:
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
IMAGE_TAG=${{ inputs.version }} && \
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}

View File

@@ -16,7 +16,7 @@ inputs:
dev-mode:
description: Enable dev mode, only build standard greptime
required: false
default: 'false'
default: "false"
working-dir:
description: Working directory to build the artifacts
required: false
@@ -68,7 +68,7 @@ runs:
- name: Build greptime on centos base image
uses: ./.github/actions/build-greptime-binary
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build centos7 base image for amd64.
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
with:
base-image: centos
features: servers/dashboard
@@ -79,7 +79,7 @@ runs:
- name: Build greptime on android base image
uses: ./.github/actions/build-greptime-binary
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build android base image on amd64.
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds arm64 greptime binary for android if the host machine amd64.
with:
base-image: android
artifacts-dir: greptime-android-arm64-${{ inputs.version }}

View File

@@ -3,11 +3,17 @@ description: 'Fuzz test given setup and service'
inputs:
target:
description: "The fuzz target to test"
required: true
max-total-time:
description: "Max total time(secs)"
required: true
unstable:
default: 'false'
description: "Enable unstable feature"
runs:
using: composite
steps:
- name: Run Fuzz Test
shell: bash
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none -- -max_total_time=120
env:
GT_MYSQL_ADDR: 127.0.0.1:4002
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none ${{ inputs.unstable == 'true' && '--features=unstable' || '' }} -- -max_total_time=${{ inputs.max-total-time }}

View File

@@ -1,13 +0,0 @@
{
"LABEL": {
"name": "breaking change",
"color": "D93F0B"
},
"CHECKS": {
"regexp": "^(?:(?!!:).)*$",
"ignoreLabels": [
"ignore-title"
],
"alwaysPassCI": true
}
}

View File

@@ -1,12 +0,0 @@
{
"LABEL": {
"name": "Invalid PR Title",
"color": "B60205"
},
"CHECKS": {
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?\\!?:.*",
"ignoreLabels": [
"ignore-title"
]
}
}

View File

@@ -13,7 +13,7 @@ on:
name: Build API docs
env:
RUST_TOOLCHAIN: nightly-2023-12-19
RUST_TOOLCHAIN: nightly-2024-04-18
jobs:
apidoc:

View File

@@ -30,15 +30,27 @@ concurrency:
cancel-in-progress: true
env:
RUST_TOOLCHAIN: nightly-2023-12-19
RUST_TOOLCHAIN: nightly-2024-04-18
jobs:
typos:
name: Spell Check with Typos
check-typos-and-docs:
name: Check typos and docs
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
- uses: crate-ci/typos@v1.13.10
- uses: crate-ci/typos@master
- name: Check the config docs
run: |
make config-docs && \
git diff --name-only --exit-code ./config/config.md \
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
license-header-check:
runs-on: ubuntu-20.04
name: Check License Header
steps:
- uses: actions/checkout@v4
- uses: korandoru/hawkeye@v5
check:
name: Check
@@ -93,6 +105,8 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
@@ -100,9 +114,13 @@ jobs:
with:
# Shares across multiple jobs
shared-key: "build-binaries"
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin
- name: Build greptime binaries
shell: bash
run: cargo build --bin greptime --bin sqlness-runner
# `cargo gc` will invoke `cargo build` with specified args
run: cargo gc -- --bin greptime --bin sqlness-runner
- name: Pack greptime binaries
shell: bash
run: |
@@ -123,10 +141,57 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
target: [ "fuzz_create_table", "fuzz_alter_table" ]
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
name: bins
path: .
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- name: Run GreptimeDB
run: |
./bins/greptime standalone start&
- name: Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
with:
target: ${{ matrix.target }}
max-total-time: 120
unstable-fuzztest:
name: Unstable Fuzz Test
needs: build
runs-on: ubuntu-latest
strategy:
matrix:
target: [ "unstable_fuzz_create_table_standalone" ]
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
@@ -147,15 +212,25 @@ jobs:
path: .
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- name: Run GreptimeDB
run: |
./bins/greptime standalone start&
- name: Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
GT_FUZZ_BINARY_PATH: ./bins/greptime
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
with:
target: ${{ matrix.target }}
max-total-time: 120
unstable: 'true'
- name: Upload unstable fuzz test logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: unstable-fuzz-logs
path: /tmp/unstable-greptime/
retention-days: 3
sqlness:
name: Sqlness Test
@@ -175,13 +250,13 @@ jobs:
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins --preserve-state
- name: Upload sqlness logs
if: always()
uses: actions/upload-artifact@v4
with:
name: sqlness-logs
path: /tmp/greptime-*.log
path: /tmp/sqlness-*
retention-days: 3
sqlness-kafka-wal:
@@ -205,13 +280,13 @@ jobs:
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins --preserve-state
- name: Upload sqlness logs
if: always()
uses: actions/upload-artifact@v4
with:
name: sqlness-logs-with-kafka-wal
path: /tmp/greptime-*.log
path: /tmp/sqlness-*
retention-days: 3
fmt:
@@ -255,7 +330,7 @@ jobs:
# Shares with `Check` job
shared-key: "check-lint"
- name: Run cargo clippy
run: cargo clippy --workspace --all-targets -- -D warnings
run: make clippy
coverage:
if: github.event.pull_request.draft == false
@@ -305,10 +380,10 @@ jobs:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
GT_S3_REGION: ${{ secrets.S3_REGION }}
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
UNITTEST_LOG_DIR: "__unittest_logs"
@@ -321,20 +396,20 @@ jobs:
fail_ci_if_error: false
verbose: true
compat:
name: Compatibility Test
needs: build
runs-on: ubuntu-20.04
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
name: bins
path: .
- name: Unzip binaries
run: |
mkdir -p ./bins/current
tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
- run: ./tests/compat/test-compat.sh 0.6.0
# compat:
# name: Compatibility Test
# needs: build
# runs-on: ubuntu-20.04
# timeout-minutes: 60
# steps:
# - uses: actions/checkout@v4
# - name: Download pre-built binaries
# uses: actions/download-artifact@v4
# with:
# name: bins
# path: .
# - name: Unzip binaries
# run: |
# mkdir -p ./bins/current
# tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
# - run: ./tests/compat/test-compat.sh 0.6.0

View File

@@ -34,7 +34,14 @@ jobs:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
- uses: crate-ci/typos@v1.13.10
- uses: crate-ci/typos@master
license-header-check:
runs-on: ubuntu-20.04
name: Check License Header
steps:
- uses: actions/checkout@v4
- uses: korandoru/hawkeye@v5
check:
name: Check

View File

@@ -1,16 +0,0 @@
name: License checker
on:
push:
branches:
- main
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
jobs:
license-header-check:
runs-on: ubuntu-20.04
name: license-header-check
steps:
- uses: actions/checkout@v4
- name: Check License Header
uses: korandoru/hawkeye@v5

View File

@@ -1,5 +1,3 @@
# Nightly CI: runs tests every night for our second tier plaforms (Windows)
on:
schedule:
- cron: '0 23 * * 1-5'
@@ -12,16 +10,32 @@ concurrency:
cancel-in-progress: true
env:
RUST_TOOLCHAIN: nightly-2023-12-19
RUST_TOOLCHAIN: nightly-2024-04-18
jobs:
sqlness:
name: Sqlness Test
sqlness-test:
name: Run sqlness test
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ windows-latest-8-cores ]
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Run sqlness test
uses: ./.github/actions/sqlness-test
with:
data-root: sqlness-test
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
sqlness-windows:
name: Sqlness tests on Windows
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: windows-latest-8-cores
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
@@ -52,6 +66,7 @@ jobs:
retention-days: 3
test-on-windows:
name: Run tests on Windows
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: windows-latest-8-cores
timeout-minutes: 60
@@ -85,10 +100,10 @@ jobs:
env:
RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
GT_S3_REGION: ${{ secrets.S3_REGION }}
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
UNITTEST_LOG_DIR: "__unittest_logs"
- name: Notify slack if failed
if: failure()

View File

@@ -1,27 +0,0 @@
name: Nightly functional tests
on:
schedule:
# At 00:00 on Tuesday.
- cron: '0 0 * * 2'
workflow_dispatch:
jobs:
sqlness-test:
name: Run sqlness test
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Run sqlness test
uses: ./.github/actions/sqlness-test
with:
data-root: sqlness-test
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}

View File

@@ -1,29 +0,0 @@
name: "PR Title Checker"
on:
pull_request_target:
types:
- opened
- edited
- synchronize
- labeled
- unlabeled
jobs:
check:
runs-on: ubuntu-20.04
timeout-minutes: 10
steps:
- uses: thehanimo/pr-title-checker@v1.4.2
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
pass_on_octokit_error: false
configuration_path: ".github/pr-title-checker-config.json"
breaking:
runs-on: ubuntu-20.04
timeout-minutes: 10
steps:
- uses: thehanimo/pr-title-checker@v1.4.2
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
pass_on_octokit_error: false
configuration_path: ".github/pr-title-breaking-change-label-config.json"

View File

@@ -82,7 +82,7 @@ on:
# Use env variables to control all the release process.
env:
# The arguments of building greptime.
RUST_TOOLCHAIN: nightly-2023-12-19
RUST_TOOLCHAIN: nightly-2024-04-18
CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness.
@@ -436,7 +436,7 @@ jobs:
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
notification:
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
if: ${{ always() || github.repository == 'GreptimeTeam/greptimedb' }}
name: Send notification to Greptime team
needs: [
release-images-to-dockerhub,

33
.github/workflows/schedule.yml vendored Normal file
View File

@@ -0,0 +1,33 @@
name: Schedule Management
on:
schedule:
- cron: '4 2 * * *'
workflow_dispatch:
permissions:
contents: read
issues: write
pull-requests: write
jobs:
maintenance:
name: Periodic Maintenance
runs-on: ubuntu-latest
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 22
- uses: pnpm/action-setup@v3
with:
package_json_file: 'cyborg/package.json'
run_install: true
- name: Describe the Environment
working-directory: cyborg
run: pnpm tsx -v
- name: Do Maintenance
working-directory: cyborg
run: pnpm tsx bin/schedule.ts
env:
GITHUB_TOKEN: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}

View File

@@ -0,0 +1,30 @@
name: "Semantic Pull Request"
on:
pull_request_target:
types:
- opened
- reopened
- edited
jobs:
check:
runs-on: ubuntu-20.04
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 22
- uses: pnpm/action-setup@v3
with:
package_json_file: 'cyborg/package.json'
run_install: true
- name: Describe the Environment
working-directory: cyborg
run: pnpm tsx -v
- name: Check Pull Request
working-directory: cyborg
run: pnpm tsx bin/check-pull-request.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,21 +0,0 @@
name: Auto Unassign
on:
schedule:
- cron: '4 2 * * *'
workflow_dispatch:
permissions:
contents: read
issues: write
pull-requests: write
jobs:
auto-unassign:
name: Auto Unassign
runs-on: ubuntu-latest
steps:
- name: Auto Unassign
uses: tisonspieces/auto-unassign@main
with:
token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
repository: ${{ github.repository }}

View File

@@ -50,7 +50,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).

2933
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -11,6 +11,7 @@ members = [
"src/common/config",
"src/common/datasource",
"src/common/error",
"src/common/frontend",
"src/common/function",
"src/common/macro",
"src/common/greptimedb-telemetry",
@@ -70,16 +71,24 @@ license = "Apache-2.0"
clippy.print_stdout = "warn"
clippy.print_stderr = "warn"
clippy.implicit_clone = "warn"
clippy.readonly_write_lock = "allow"
rust.unknown_lints = "deny"
# Remove this after https://github.com/PyO3/pyo3/issues/4094
rust.non_local_definitions = "allow"
[workspace.dependencies]
# We turn off default-features for some dependencies here so the workspaces which inherit them can
# selectively turn them on if needed, since we can override default-features = true (from false)
# for the inherited dependency but cannot do the reverse (override from true to false).
#
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
ahash = { version = "0.8", features = ["compile-time-rng"] }
aquamarine = "0.3"
arrow = { version = "47.0" }
arrow-array = "47.0"
arrow-flight = "47.0"
arrow-ipc = { version = "47.0", features = ["lz4"] }
arrow-schema = { version = "47.0", features = ["serde"] }
arrow = { version = "51.0.0", features = ["prettyprint"] }
arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] }
arrow-flight = "51.0"
arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4"] }
arrow-schema = { version = "51.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
axum = { version = "0.6", features = ["headers"] }
@@ -90,21 +99,24 @@ bytemuck = "1.12"
bytes = { version = "1.5", features = ["serde"] }
chrono = { version = "0.4", features = ["serde"] }
clap = { version = "4.4", features = ["derive"] }
crossbeam-utils = "0.8"
dashmap = "5.4"
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
derive_builder = "0.12"
dotenv = "0.15"
etcd-client = "0.12"
# TODO(LFC): Wait for https://github.com/etcdv3/etcd-client/pull/76
etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev = "4c371e9b3ea8e0a8ee2f9cbd7ded26e54a45df3b" }
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "1bd2398b686e5ac6c1eef6daf615867ce27f75c1" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "219b2409bb701f75b43fc0ba64967d2ed8e75491" }
humantime = "2.1"
humantime-serde = "1.1"
itertools = "0.10"
@@ -115,12 +127,12 @@ moka = "0.12"
notify = "6.1"
num_cpus = "1.16"
once_cell = "1.18"
opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
opentelemetry-proto = { version = "0.5", features = [
"gen-tonic",
"metrics",
"trace",
] }
parquet = "47.0"
parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
paste = "1.0"
pin-project = "1.0"
prometheus = { version = "0.13.3", features = ["process"] }
@@ -133,6 +145,7 @@ reqwest = { version = "0.11", default-features = false, features = [
"json",
"rustls-tls-native-roots",
"stream",
"multipart",
] }
rskafka = "0.5"
rust_decimal = "1.33"
@@ -143,18 +156,18 @@ serde_with = "3"
smallvec = { version = "1", features = ["serde"] }
snafu = "0.7"
sysinfo = "0.30"
# on branch v0.38.x
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "6a93567ae38d42be5c8d08b13c8ff4dde26502ef", features = [
# on branch v0.44.x
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "c919990bf62ad38d2b0c0a3bc90b26ad919d51b0", features = [
"visitor",
] }
strum = { version = "0.25", features = ["derive"] }
tempfile = "3"
tokio = { version = "1.28", features = ["full"] }
tokio = { version = "1.36", features = ["full"] }
tokio-stream = { version = "0.1" }
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8"
tonic = { version = "0.10", features = ["tls"] }
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
tonic = { version = "0.11", features = ["tls"] }
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
zstd = "0.13"
## workspaces members
@@ -169,6 +182,7 @@ common-config = { path = "src/common/config" }
common-datasource = { path = "src/common/datasource" }
common-decimal = { path = "src/common/decimal" }
common-error = { path = "src/common/error" }
common-frontend = { path = "src/common/frontend" }
common-function = { path = "src/common/function" }
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
common-grpc = { path = "src/common/grpc" }
@@ -190,6 +204,7 @@ common-wal = { path = "src/common/wal" }
datanode = { path = "src/datanode" }
datatypes = { path = "src/datatypes" }
file-engine = { path = "src/file-engine" }
flow = { path = "src/flow" }
frontend = { path = "src/frontend" }
index = { path = "src/index" }
log-store = { path = "src/log-store" }
@@ -211,6 +226,8 @@ sql = { path = "src/sql" }
store-api = { path = "src/store-api" }
substrait = { path = "src/common/substrait" }
table = { path = "src/table" }
# TODO some code depends on this
tests-integration = { path = "tests-integration" }
[workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git"
@@ -225,3 +242,7 @@ strip = true
lto = "thin"
debug = false
incremental = false
[profile.dev.package.sqlness-runner]
debug = false
strip = true

View File

@@ -54,8 +54,10 @@ ifneq ($(strip $(RELEASE)),)
CARGO_BUILD_OPTS += --release
endif
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), true)
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), all)
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
else
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
endif
@@ -169,6 +171,10 @@ check: ## Cargo check all the targets.
clippy: ## Check clippy rules.
cargo clippy --workspace --all-targets --all-features -- -D warnings
.PHONY: fix-clippy
fix-clippy: ## Fix clippy violations.
cargo clippy --workspace --all-targets --all-features --fix
.PHONY: fmt-check
fmt-check: ## Check code format.
cargo fmt --all -- --check
@@ -188,6 +194,16 @@ run-it-in-container: start-etcd ## Run integration tests in dev-builder.
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
##@ Docs
config-docs: ## Generate configuration documentation from toml files.
docker run --rm \
-v ${PWD}:/greptimedb \
-w /greptimedb/config \
toml2docs/toml2docs:latest \
-p '##' \
-t ./config-docs-template.md \
-o ./config.md
##@ General
# The help target prints out all targets with their descriptions organized

View File

@@ -143,7 +143,7 @@ cargo run -- standalone start
- [GreptimeDB C++ Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-cpp)
- [GreptimeDB Erlang Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-erl)
- [GreptimeDB Rust Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-rust)
- [GreptimeDB JavaScript Ingester](https://github.com/GreptimeTeam/greptime-ingester-js)
- [GreptimeDB JavaScript Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-js)
### Grafana Dashboard

View File

@@ -33,6 +33,8 @@ rand.workspace = true
rskafka.workspace = true
serde.workspace = true
store-api.workspace = true
# TODO depend `Database` client
tests-integration.workspace = true
tokio.workspace = true
toml.workspace = true
uuid.workspace = true

View File

@@ -1,543 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Use the taxi trip records from New York City dataset to bench. You can download the dataset from
//! [here](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page).
#![allow(clippy::print_stdout)]
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::time::Instant;
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
use arrow::datatypes::{DataType, Float64Type, Int64Type};
use arrow::record_batch::RecordBatch;
use clap::Parser;
use client::api::v1::column::Values;
use client::api::v1::{
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
};
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use futures_util::TryStreamExt;
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
use tokio::task::JoinSet;
const CATALOG_NAME: &str = "greptime";
const SCHEMA_NAME: &str = "public";
#[derive(Parser)]
#[command(name = "NYC benchmark runner")]
struct Args {
/// Path to the dataset
#[arg(short, long)]
path: Option<String>,
/// Batch size of insert request.
#[arg(short = 's', long = "batch-size", default_value_t = 4096)]
batch_size: usize,
/// Number of client threads on write (parallel on file level)
#[arg(short = 't', long = "thread-num", default_value_t = 4)]
thread_num: usize,
/// Number of query iteration
#[arg(short = 'i', long = "iter-num", default_value_t = 3)]
iter_num: usize,
#[arg(long = "skip-write")]
skip_write: bool,
#[arg(long = "skip-read")]
skip_read: bool,
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
endpoint: String,
}
fn get_file_list<P: AsRef<Path>>(path: P) -> Vec<PathBuf> {
std::fs::read_dir(path)
.unwrap()
.map(|dir| dir.unwrap().path().canonicalize().unwrap())
.collect()
}
fn new_table_name() -> String {
format!("nyc_taxi_{}", chrono::Utc::now().timestamp())
}
async fn write_data(
table_name: &str,
batch_size: usize,
db: &Database,
path: PathBuf,
mpb: MultiProgress,
pb_style: ProgressStyle,
) -> u128 {
let file = std::fs::File::open(&path).unwrap();
let record_batch_reader_builder = ParquetRecordBatchReaderBuilder::try_new(file).unwrap();
let row_num = record_batch_reader_builder
.metadata()
.file_metadata()
.num_rows();
let record_batch_reader = record_batch_reader_builder
.with_batch_size(batch_size)
.build()
.unwrap();
let progress_bar = mpb.add(ProgressBar::new(row_num as _));
progress_bar.set_style(pb_style);
progress_bar.set_message(format!("{path:?}"));
let mut total_rpc_elapsed_ms = 0;
for record_batch in record_batch_reader {
let record_batch = record_batch.unwrap();
if !is_record_batch_full(&record_batch) {
continue;
}
let (columns, row_count) = convert_record_batch(record_batch);
let request = InsertRequest {
table_name: table_name.to_string(),
columns,
row_count,
};
let requests = InsertRequests {
inserts: vec![request],
};
let now = Instant::now();
db.insert(requests).await.unwrap();
let elapsed = now.elapsed();
total_rpc_elapsed_ms += elapsed.as_millis();
progress_bar.inc(row_count as _);
}
progress_bar.finish_with_message(format!("file {path:?} done in {total_rpc_elapsed_ms}ms",));
total_rpc_elapsed_ms
}
fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
let schema = record_batch.schema();
let fields = schema.fields();
let row_count = record_batch.num_rows();
let mut columns = vec![];
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
let (values, datatype) = build_values(array);
let semantic_type = match field.name().as_str() {
"VendorID" => SemanticType::Tag,
"tpep_pickup_datetime" => SemanticType::Timestamp,
_ => SemanticType::Field,
};
let column = Column {
column_name: field.name().clone(),
values: Some(values),
null_mask: array
.to_data()
.nulls()
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
.unwrap_or_default(),
datatype: datatype.into(),
semantic_type: semantic_type as i32,
..Default::default()
};
columns.push(column);
}
(columns, row_count as _)
}
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
match column.data_type() {
DataType::Int64 => {
let array = column
.as_any()
.downcast_ref::<PrimitiveArray<Int64Type>>()
.unwrap();
let values = array.values();
(
Values {
i64_values: values.to_vec(),
..Default::default()
},
ColumnDataType::Int64,
)
}
DataType::Float64 => {
let array = column
.as_any()
.downcast_ref::<PrimitiveArray<Float64Type>>()
.unwrap();
let values = array.values();
(
Values {
f64_values: values.to_vec(),
..Default::default()
},
ColumnDataType::Float64,
)
}
DataType::Timestamp(_, _) => {
let array = column
.as_any()
.downcast_ref::<TimestampMicrosecondArray>()
.unwrap();
let values = array.values();
(
Values {
timestamp_microsecond_values: values.to_vec(),
..Default::default()
},
ColumnDataType::TimestampMicrosecond,
)
}
DataType::Utf8 => {
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
(
Values {
string_values: values,
..Default::default()
},
ColumnDataType::String,
)
}
DataType::Null
| DataType::Boolean
| DataType::Int8
| DataType::Int16
| DataType::Int32
| DataType::UInt8
| DataType::UInt16
| DataType::UInt32
| DataType::UInt64
| DataType::Float16
| DataType::Float32
| DataType::Date32
| DataType::Date64
| DataType::Time32(_)
| DataType::Time64(_)
| DataType::Duration(_)
| DataType::Interval(_)
| DataType::Binary
| DataType::FixedSizeBinary(_)
| DataType::LargeBinary
| DataType::LargeUtf8
| DataType::List(_)
| DataType::FixedSizeList(_, _)
| DataType::LargeList(_)
| DataType::Struct(_)
| DataType::Union(_, _)
| DataType::Dictionary(_, _)
| DataType::Decimal128(_, _)
| DataType::Decimal256(_, _)
| DataType::RunEndEncoded(_, _)
| DataType::Map(_, _) => todo!(),
}
}
fn is_record_batch_full(batch: &RecordBatch) -> bool {
batch.columns().iter().all(|col| col.null_count() == 0)
}
fn create_table_expr(table_name: &str) -> CreateTableExpr {
CreateTableExpr {
catalog_name: CATALOG_NAME.to_string(),
schema_name: SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
desc: String::default(),
column_defs: vec![
ColumnDef {
name: "VendorID".to_string(),
data_type: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Tag as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "tpep_pickup_datetime".to_string(),
data_type: ColumnDataType::TimestampMicrosecond as i32,
is_nullable: false,
default_constraint: vec![],
semantic_type: SemanticType::Timestamp as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "tpep_dropoff_datetime".to_string(),
data_type: ColumnDataType::TimestampMicrosecond as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "passenger_count".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "trip_distance".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "RatecodeID".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "store_and_fwd_flag".to_string(),
data_type: ColumnDataType::String as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "PULocationID".to_string(),
data_type: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "DOLocationID".to_string(),
data_type: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "payment_type".to_string(),
data_type: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "fare_amount".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "extra".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "mta_tax".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "tip_amount".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "tolls_amount".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "improvement_surcharge".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "total_amount".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "congestion_surcharge".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "airport_fee".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
],
time_index: "tpep_pickup_datetime".to_string(),
primary_keys: vec!["VendorID".to_string()],
create_if_not_exists: true,
table_options: Default::default(),
table_id: None,
engine: "mito".to_string(),
}
}
fn query_set(table_name: &str) -> HashMap<String, String> {
HashMap::from([
(
"count_all".to_string(),
format!("SELECT COUNT(*) FROM {table_name};"),
),
(
"fare_amt_by_passenger".to_string(),
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {table_name} GROUP BY passenger_count"),
)
])
}
async fn do_write(args: &Args, db: &Database, table_name: &str) {
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
let mut write_jobs = JoinSet::new();
let create_table_result = db.create(create_table_expr(table_name)).await;
println!("Create table result: {create_table_result:?}");
let progress_bar_style = ProgressStyle::with_template(
"[{elapsed_precise}] {bar:60.cyan/blue} {pos:>7}/{len:7} {msg}",
)
.unwrap()
.progress_chars("##-");
let multi_progress_bar = MultiProgress::new();
let file_progress = multi_progress_bar.add(ProgressBar::new(file_list.len() as _));
file_progress.inc(0);
let batch_size = args.batch_size;
for _ in 0..args.thread_num {
if let Some(path) = file_list.pop() {
let db = db.clone();
let mpb = multi_progress_bar.clone();
let pb_style = progress_bar_style.clone();
let table_name = table_name.to_string();
let _ = write_jobs.spawn(async move {
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
});
}
}
while write_jobs.join_next().await.is_some() {
file_progress.inc(1);
if let Some(path) = file_list.pop() {
let db = db.clone();
let mpb = multi_progress_bar.clone();
let pb_style = progress_bar_style.clone();
let table_name = table_name.to_string();
let _ = write_jobs.spawn(async move {
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
});
}
}
}
async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
for (query_name, query) in query_set(table_name) {
println!("Running query: {query}");
for i in 0..num_iter {
let now = Instant::now();
let res = db.sql(&query).await.unwrap();
match res.data {
OutputData::AffectedRows(_) | OutputData::RecordBatches(_) => (),
OutputData::Stream(stream) => {
stream.try_collect::<Vec<_>>().await.unwrap();
}
}
let elapsed = now.elapsed();
println!(
"query {}, iteration {}: {}ms",
query_name,
i,
elapsed.as_millis(),
);
}
}
}
fn main() {
let args = Args::parse();
tokio::runtime::Builder::new_multi_thread()
.worker_threads(args.thread_num)
.enable_all()
.build()
.unwrap()
.block_on(async {
let client = Client::with_urls(vec![&args.endpoint]);
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
let table_name = new_table_name();
if !args.skip_write {
do_write(&args, &db, &table_name).await;
}
if !args.skip_read {
do_query(args.iter_num, &db, &table_name).await;
}
})
}

View File

@@ -53,7 +53,7 @@ Release date: {{ timestamp | date(format="%B %d, %Y") }}
## New Contributors
{% endif -%}
{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
* @{{ contributor.username }} made their first contribution
* [@{{ contributor.username }}](https://github.com/{{ contributor.username }}) made their first contribution
{%- if contributor.pr_number %} in \
[#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
{%- endif %}
@@ -65,7 +65,17 @@ Release date: {{ timestamp | date(format="%B %d, %Y") }}
We would like to thank the following contributors from the GreptimeDB community:
{{ github.contributors | map(attribute="username") | join(sep=", ") }}
{%- set contributors = github.contributors | sort(attribute="username") | map(attribute="username") -%}
{%- set bots = ['dependabot[bot]'] %}
{% for contributor in contributors %}
{%- if bots is containing(contributor) -%}{% continue %}{%- endif -%}
{%- if loop.first -%}
[@{{ contributor }}](https://github.com/{{ contributor }})
{%- else -%}
, [@{{ contributor }}](https://github.com/{{ contributor }})
{%- endif -%}
{%- endfor %}
{%- endif %}
{% raw %}\n{% endraw %}

View File

@@ -0,0 +1,19 @@
# Configurations
## Standalone Mode
{{ toml2docs "./standalone.example.toml" }}
## Cluster Mode
### Frontend
{{ toml2docs "./frontend.example.toml" }}
### Metasrv
{{ toml2docs "./metasrv.example.toml" }}
### Datanode
{{ toml2docs "./datanode.example.toml" }}

372
config/config.md Normal file
View File

@@ -0,0 +1,372 @@
# Configurations
## Standalone Mode
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `default_timezone` | String | `None` | The default timezone of the server. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `mysql` | -- | -- | MySQL server options. |
| `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
| `mysql.tls` | -- | -- | -- |
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
| `mysql.tls.cert_path` | String | `None` | Certificate file path. |
| `mysql.tls.key_path` | String | `None` | Private key file path. |
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `postgres` | -- | -- | PostgresSQL server options. |
| `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql_options.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
| `postgres.tls.key_path` | String | `None` | Private key file path. |
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
| `influxdb` | -- | -- | InfluxDB protocol options. |
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
| `prom_store` | -- | -- | Prometheus remote storage options |
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
| `wal` | -- | -- | The WAL options. |
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
| `wal.dir` | String | `None` | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
| `metadata_store` | -- | -- | Metadata storage options. |
| `metadata_store.file_size` | String | `256MB` | Kv file size in bytes. |
| `metadata_store.purge_threshold` | String | `4GB` | Kv purge threshold. |
| `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | `None` | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
| `storage.cache_capacity` | String | `None` | The local file cache capacity in bytes. |
| `storage.bucket` | String | `None` | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | `None` | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
| `storage.access_key_id` | String | `None` | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
| `storage.secret_access_key` | String | `None` | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
| `storage.access_key_secret` | String | `None` | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
| `storage.account_name` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.region` | String | `None` | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
| `region_engine.mito` | -- | -- | The Mito engine options. |
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
| `region_engine.mito.max_background_jobs` | Integer | `4` | Max number of running background jobs |
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
| `region_engine.mito.global_write_buffer_size` | String | `1GB` | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
| `region_engine.mito.memtable` | -- | -- | -- |
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
## Cluster Mode
### Frontend
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `default_timezone` | String | `None` | The default timezone of the server. |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `mysql` | -- | -- | MySQL server options. |
| `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
| `mysql.tls` | -- | -- | -- |
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
| `mysql.tls.cert_path` | String | `None` | Certificate file path. |
| `mysql.tls.key_path` | String | `None` | Private key file path. |
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `postgres` | -- | -- | PostgresSQL server options. |
| `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql_options.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
| `postgres.tls.key_path` | String | `None` | Private key file path. |
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
| `influxdb` | -- | -- | InfluxDB protocol options. |
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
| `prom_store` | -- | -- | Prometheus remote storage options |
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
| `datanode` | -- | -- | Datanode options. |
| `datanode.client` | -- | -- | Datanode client options. |
| `datanode.client.timeout` | String | `10s` | -- |
| `datanode.client.connect_timeout` | String | `10s` | -- |
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
### Metasrv
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
| `store_addr` | String | `127.0.0.1:2379` | Etcd server address. |
| `selector` | String | `lease_based` | Datanode selector type.<br/>- `lease_based` (default value).<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
| `use_memory_store` | Bool | `false` | Store data in memory. |
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
| `failure_detector` | -- | -- | -- |
| `failure_detector.threshold` | Float | `8.0` | -- |
| `failure_detector.min_std_deviation` | String | `100ms` | -- |
| `failure_detector.acceptable_heartbeat_pause` | String | `3000ms` | -- |
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | -- |
| `datanode` | -- | -- | Datanode options. |
| `datanode.client` | -- | -- | Datanode client options. |
| `datanode.client.timeout` | String | `10s` | -- |
| `datanode.client.connect_timeout` | String | `10s` | -- |
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
| `wal` | -- | -- | -- |
| `wal.provider` | String | `raft_engine` | -- |
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
| `wal.num_topics` | Integer | `64` | Number of topics to be created upon start. |
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`. |
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
| `wal.backoff_max` | String | `10s` | The maximum backoff for kafka clients. |
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
### Datanode
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `node_id` | Integer | `None` | The datanode identifier and should be unique in the cluster. |
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `rpc_addr` | String | `127.0.0.1:3001` | The gRPC address of the datanode. |
| `rpc_hostname` | String | `None` | The hostname of the datanode. |
| `rpc_runtime_size` | Integer | `8` | The number of gRPC server worker threads. |
| `rpc_max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `rpc_max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
| `wal` | -- | -- | The WAL options. |
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
| `wal.dir` | String | `None` | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | `None` | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
| `storage.cache_capacity` | String | `None` | The local file cache capacity in bytes. |
| `storage.bucket` | String | `None` | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | `None` | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
| `storage.access_key_id` | String | `None` | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
| `storage.secret_access_key` | String | `None` | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
| `storage.access_key_secret` | String | `None` | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
| `storage.account_name` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.region` | String | `None` | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
| `region_engine.mito` | -- | -- | The Mito engine options. |
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
| `region_engine.mito.max_background_jobs` | Integer | `4` | Max number of running background jobs |
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
| `region_engine.mito.global_write_buffer_size` | String | `1GB` | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
| `region_engine.mito.memtable` | -- | -- | -- |
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |

View File

@@ -1,171 +1,430 @@
# Node running mode, see `standalone.example.toml`.
mode = "distributed"
# The datanode identifier, should be unique.
## The running mode of the datanode. It can be `standalone` or `distributed`.
mode = "standalone"
## The datanode identifier and should be unique in the cluster.
## +toml2docs:none-default
node_id = 42
# gRPC server address, "127.0.0.1:3001" by default.
rpc_addr = "127.0.0.1:3001"
# Hostname of this node.
rpc_hostname = "127.0.0.1"
# The number of gRPC server worker threads, 8 by default.
rpc_runtime_size = 8
# Start services after regions have obtained leases.
# It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
## Start services after regions have obtained leases.
## It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
require_lease_before_startup = false
# Initialize all regions in the background during the startup.
# By default, it provides services after all regions have been initialized.
## Initialize all regions in the background during the startup.
## By default, it provides services after all regions have been initialized.
init_regions_in_background = false
## The gRPC address of the datanode.
rpc_addr = "127.0.0.1:3001"
## The hostname of the datanode.
## +toml2docs:none-default
rpc_hostname = "127.0.0.1"
## The number of gRPC server worker threads.
rpc_runtime_size = 8
## The maximum receive message size for gRPC server.
rpc_max_recv_message_size = "512MB"
## The maximum send message size for gRPC server.
rpc_max_send_message_size = "512MB"
## Enable telemetry to collect anonymous usage data.
enable_telemetry = true
## The heartbeat options.
[heartbeat]
# Interval for sending heartbeat messages to the Metasrv, 3 seconds by default.
## Interval for sending heartbeat messages to the metasrv.
interval = "3s"
# Metasrv client options.
## Interval for retrying to send heartbeat messages to the metasrv.
retry_interval = "3s"
## The metasrv client options.
[meta_client]
# Metasrv address list.
## The addresses of the metasrv.
metasrv_addrs = ["127.0.0.1:3002"]
# Heartbeat timeout, 500 milliseconds by default.
heartbeat_timeout = "500ms"
# Operation timeout, 3 seconds by default.
## Operation timeout.
timeout = "3s"
# Connect server timeout, 1 second by default.
## Heartbeat timeout.
heartbeat_timeout = "500ms"
## DDL timeout.
ddl_timeout = "10s"
## Connect server timeout.
connect_timeout = "1s"
# `TCP_NODELAY` option for accepted connections, true by default.
## `TCP_NODELAY` option for accepted connections.
tcp_nodelay = true
# WAL options.
## The configuration about the cache of the metadata.
metadata_cache_max_capacity = 100000
## TTL of the metadata cache.
metadata_cache_ttl = "10m"
# TTI of the metadata cache.
metadata_cache_tti = "5m"
## The WAL options.
[wal]
## The provider of the WAL.
## - `raft_engine`: the wal is stored in the local file system by raft-engine.
## - `kafka`: it's remote wal that data is stored in Kafka.
provider = "raft_engine"
# Raft-engine wal options, see `standalone.example.toml`.
# dir = "/tmp/greptimedb/wal"
## The directory to store the WAL files.
## **It's only used when the provider is `raft_engine`**.
## +toml2docs:none-default
dir = "/tmp/greptimedb/wal"
## The size of the WAL segment file.
## **It's only used when the provider is `raft_engine`**.
file_size = "256MB"
## The threshold of the WAL size to trigger a flush.
## **It's only used when the provider is `raft_engine`**.
purge_threshold = "4GB"
## The interval to trigger a flush.
## **It's only used when the provider is `raft_engine`**.
purge_interval = "10m"
## The read batch size.
## **It's only used when the provider is `raft_engine`**.
read_batch_size = 128
## Whether to use sync write.
## **It's only used when the provider is `raft_engine`**.
sync_write = false
# Kafka wal options, see `standalone.example.toml`.
# broker_endpoints = ["127.0.0.1:9092"]
# Warning: Kafka has a default limit of 1MB per message in a topic.
# max_batch_size = "1MB"
# linger = "200ms"
# consumer_wait_timeout = "100ms"
# backoff_init = "500ms"
# backoff_max = "10s"
# backoff_base = 2
# backoff_deadline = "5mins"
## Whether to reuse logically truncated log files.
## **It's only used when the provider is `raft_engine`**.
enable_log_recycle = true
# Storage options, see `standalone.example.toml`.
## Whether to pre-create log files on start up.
## **It's only used when the provider is `raft_engine`**.
prefill_log_files = false
## Duration for fsyncing log files.
## **It's only used when the provider is `raft_engine`**.
sync_period = "10s"
## The Kafka broker endpoints.
## **It's only used when the provider is `kafka`**.
broker_endpoints = ["127.0.0.1:9092"]
## The max size of a single producer batch.
## Warning: Kafka has a default limit of 1MB per message in a topic.
## **It's only used when the provider is `kafka`**.
max_batch_size = "1MB"
## The linger duration of a kafka batch producer.
## **It's only used when the provider is `kafka`**.
linger = "200ms"
## The consumer wait timeout.
## **It's only used when the provider is `kafka`**.
consumer_wait_timeout = "100ms"
## The initial backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_init = "500ms"
## The maximum backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_max = "10s"
## The exponential backoff rate, i.e. next backoff = base * current backoff.
## **It's only used when the provider is `kafka`**.
backoff_base = 2
## The deadline of retries.
## **It's only used when the provider is `kafka`**.
backoff_deadline = "5mins"
# Example of using S3 as the storage.
# [storage]
# type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# Example of using Oss as the storage.
# [storage]
# type = "Oss"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# access_key_secret = "123456"
# endpoint = "https://oss-cn-hangzhou.aliyuncs.com"
# Example of using Azblob as the storage.
# [storage]
# type = "Azblob"
# container = "greptimedb"
# root = "data"
# account_name = "test"
# account_key = "123456"
# endpoint = "https://greptimedb.blob.core.windows.net"
# sas_token = ""
# Example of using Gcs as the storage.
# [storage]
# type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# endpoint = "https://storage.googleapis.com"
## The data storage options.
[storage]
# The working home directory.
## The working home directory.
data_home = "/tmp/greptimedb/"
# Storage type.
type = "File"
# TTL for all tables. Disabled by default.
# global_ttl = "7d"
# Cache configuration for object storage such as 'S3' etc.
# The local file cache directory
# cache_path = "/path/local_cache"
# The local file cache capacity in bytes.
# cache_capacity = "256MB"
## The storage type used to store the data.
## - `File`: the data is stored in the local file system.
## - `S3`: the data is stored in the S3 object storage.
## - `Gcs`: the data is stored in the Google Cloud Storage.
## - `Azblob`: the data is stored in the Azure Blob Storage.
## - `Oss`: the data is stored in the Aliyun OSS.
type = "File"
## Cache configuration for object storage such as 'S3' etc.
## The local file cache directory.
## +toml2docs:none-default
cache_path = "/path/local_cache"
## The local file cache capacity in bytes.
## +toml2docs:none-default
cache_capacity = "256MB"
## The S3 bucket name.
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
## +toml2docs:none-default
bucket = "greptimedb"
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
## +toml2docs:none-default
root = "greptimedb"
## The access key id of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3` and `Oss`**.
## +toml2docs:none-default
access_key_id = "test"
## The secret access key of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3`**.
## +toml2docs:none-default
secret_access_key = "test"
## The secret access key of the aliyun account.
## **It's only used when the storage type is `Oss`**.
## +toml2docs:none-default
access_key_secret = "test"
## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**.
## +toml2docs:none-default
account_name = "test"
## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**.
## +toml2docs:none-default
account_key = "test"
## The scope of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## +toml2docs:none-default
scope = "test"
## The credential path of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## +toml2docs:none-default
credential_path = "test"
## The container of the azure account.
## **It's only used when the storage type is `Azblob`**.
## +toml2docs:none-default
container = "greptimedb"
## The sas token of the azure account.
## **It's only used when the storage type is `Azblob`**.
## +toml2docs:none-default
sas_token = ""
## The endpoint of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## +toml2docs:none-default
endpoint = "https://s3.amazonaws.com"
## The region of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## +toml2docs:none-default
region = "us-west-2"
# Custom storage options
#[[storage.providers]]
#type = "S3"
#[[storage.providers]]
#type = "Gcs"
# [[storage.providers]]
# type = "S3"
# [[storage.providers]]
# type = "Gcs"
# Mito engine options
## The region engine options. You can configure multiple region engines.
[[region_engine]]
## The Mito engine options.
[region_engine.mito]
# Number of region workers
## Number of region workers.
num_workers = 8
# Request channel size of each worker
## Request channel size of each worker.
worker_channel_size = 128
# Max batch size for a worker to handle requests
## Max batch size for a worker to handle requests.
worker_request_batch_size = 64
# Number of meta action updated to trigger a new checkpoint for the manifest
## Number of meta action updated to trigger a new checkpoint for the manifest.
manifest_checkpoint_distance = 10
# Whether to compress manifest and checkpoint file by gzip (default false).
## Whether to compress manifest and checkpoint file by gzip (default false).
compress_manifest = false
# Max number of running background jobs
## Max number of running background jobs
max_background_jobs = 4
# Interval to auto flush a region if it has not flushed yet.
## Interval to auto flush a region if it has not flushed yet.
auto_flush_interval = "1h"
# Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
global_write_buffer_size = "1GB"
# Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
global_write_buffer_reject_size = "2GB"
# Cache size for SST metadata. Setting it to 0 to disable the cache.
# If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
## Cache size for SST metadata. Setting it to 0 to disable the cache.
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
sst_meta_cache_size = "128MB"
# Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
# If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
vector_cache_size = "512MB"
# Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
# If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
page_cache_size = "512MB"
# Buffer size for SST writing.
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
# Parallelism to scan a region (default: 1/4 of cpu cores).
# - 0: using the default value (1/4 of cpu cores).
# - 1: scan in current thread.
# - n: scan in parallelism n.
## Parallelism to scan a region (default: 1/4 of cpu cores).
## - `0`: using the default value (1/4 of cpu cores).
## - `1`: scan in current thread.
## - `n`: scan in parallelism n.
scan_parallelism = 0
# Capacity of the channel to send data from parallel scan tasks to the main task (default 32).
## Capacity of the channel to send data from parallel scan tasks to the main task.
parallel_scan_channel_size = 32
# Whether to allow stale WAL entries read during replay.
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false
## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index]
# Whether to create the index on flush.
# - "auto": automatically
# - "disable": never
## Whether to create the index on flush.
## - `auto`: automatically
## - `disable`: never
create_on_flush = "auto"
# Whether to create the index on compaction.
# - "auto": automatically
# - "disable": never
## Whether to create the index on compaction.
## - `auto`: automatically
## - `disable`: never
create_on_compaction = "auto"
# Whether to apply the index on query
# - "auto": automatically
# - "disable": never
## Whether to apply the index on query
## - `auto`: automatically
## - `disable`: never
apply_on_query = "auto"
# Memory threshold for performing an external sort during index creation.
# Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
## Memory threshold for performing an external sort during index creation.
## Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
mem_threshold_on_create = "64M"
# File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
## File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
intermediate_path = ""
[region_engine.mito.memtable]
# Memtable type.
# - "partition_tree": partition tree memtable
# - "time_series": time-series memtable (deprecated)
type = "partition_tree"
# The max number of keys in one shard.
## Memtable type.
## - `time_series`: time-series memtable
## - `partition_tree`: partition tree memtable (experimental)
type = "time_series"
## The max number of keys in one shard.
## Only available for `partition_tree` memtable.
index_max_keys_per_shard = 8192
# The max rows of data inside the actively writing buffer in one shard.
## The max rows of data inside the actively writing buffer in one shard.
## Only available for `partition_tree` memtable.
data_freeze_threshold = 32768
# Max dictionary bytes.
## Max dictionary bytes.
## Only available for `partition_tree` memtable.
fork_dictionary_bytes = "1GiB"
# Log options, see `standalone.example.toml`
# [logging]
# dir = "/tmp/greptimedb/logs"
# level = "info"
## The logging options.
[logging]
## The directory to store the log files.
dir = "/tmp/greptimedb/logs"
# Datanode export the metrics generated by itself
# encoded to Prometheus remote-write format
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
# [export_metrics]
# whether enable export metrics, default is false
# enable = false
# The interval of export metrics
# write_interval = "30s"
# [export_metrics.remote_write]
# The url the metrics send to. The url is empty by default, url example: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`
# url = ""
# HTTP headers of Prometheus remote-write carry
# headers = {}
## The log level. Can be `info`/`debug`/`warn`/`error`.
## +toml2docs:none-default
level = "info"
## Enable OTLP tracing.
enable_otlp_tracing = false
## The OTLP tracing endpoint.
## +toml2docs:none-default
otlp_endpoint = ""
## Whether to append logs to stdout.
append_stdout = true
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
[export_metrics.self_import]
## +toml2docs:none-default
db = "information_schema"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }

View File

@@ -1,106 +1,188 @@
# Node running mode, see `standalone.example.toml`.
mode = "distributed"
# The default timezone of the server
# default_timezone = "UTC"
## The running mode of the datanode. It can be `standalone` or `distributed`.
mode = "standalone"
## The default timezone of the server.
## +toml2docs:none-default
default_timezone = "UTC"
## The heartbeat options.
[heartbeat]
# Interval for sending heartbeat task to the Metasrv, 5 seconds by default.
interval = "5s"
# Interval for retry sending heartbeat task, 5 seconds by default.
retry_interval = "5s"
## Interval for sending heartbeat messages to the metasrv.
interval = "18s"
# HTTP server options, see `standalone.example.toml`.
## Interval for retrying to send heartbeat messages to the metasrv.
retry_interval = "3s"
## The HTTP server options.
[http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout.
timeout = "30s"
## HTTP request body limit.
## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
body_limit = "64MB"
# gRPC server options, see `standalone.example.toml`.
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
addr = "127.0.0.1:4001"
## The number of server worker threads.
runtime_size = 8
# MySQL server options, see `standalone.example.toml`.
## MySQL server options.
[mysql]
## Whether to enable.
enable = true
## The addr to bind the MySQL server.
addr = "127.0.0.1:4002"
## The number of server worker threads.
runtime_size = 2
# MySQL server TLS options, see `standalone.example.toml`.
# MySQL server TLS options.
[mysql.tls]
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
## - `disable` (default value)
## - `prefer`
## - `require`
## - `verify-ca`
## - `verify-full`
mode = "disable"
## Certificate file path.
## +toml2docs:none-default
cert_path = ""
## Private key file path.
## +toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload
watch = false
# PostgresSQL server options, see `standalone.example.toml`.
## PostgresSQL server options.
[postgres]
## Whether to enable
enable = true
## The addr to bind the PostgresSQL server.
addr = "127.0.0.1:4003"
## The number of server worker threads.
runtime_size = 2
# PostgresSQL server TLS options, see `standalone.example.toml`.
## PostgresSQL server TLS options, see `mysql_options.tls` section.
[postgres.tls]
## TLS mode.
mode = "disable"
## Certificate file path.
## +toml2docs:none-default
cert_path = ""
## Private key file path.
## +toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload
watch = false
# OpenTSDB protocol options, see `standalone.example.toml`.
## OpenTSDB protocol options.
[opentsdb]
## Whether to enable OpenTSDB put in HTTP API.
enable = true
addr = "127.0.0.1:4242"
runtime_size = 2
# InfluxDB protocol options, see `standalone.example.toml`.
## InfluxDB protocol options.
[influxdb]
## Whether to enable InfluxDB protocol in HTTP API.
enable = true
# Prometheus remote storage options, see `standalone.example.toml`.
## Prometheus remote storage options
[prom_store]
## Whether to enable Prometheus remote write and read in HTTP API.
enable = true
# Whether to store the data from Prometheus remote write in metric engine.
# true by default
## Whether to store the data from Prometheus remote write in metric engine.
with_metric_engine = true
# Metasrv client options, see `datanode.example.toml`.
## The metasrv client options.
[meta_client]
## The addresses of the metasrv.
metasrv_addrs = ["127.0.0.1:3002"]
## Operation timeout.
timeout = "3s"
# DDL timeouts options.
## Heartbeat timeout.
heartbeat_timeout = "500ms"
## DDL timeout.
ddl_timeout = "10s"
## Connect server timeout.
connect_timeout = "1s"
## `TCP_NODELAY` option for accepted connections.
tcp_nodelay = true
# The configuration about the cache of the Metadata.
# default: 100000
## The configuration about the cache of the metadata.
metadata_cache_max_capacity = 100000
# default: 10m
## TTL of the metadata cache.
metadata_cache_ttl = "10m"
# default: 5m
# TTI of the metadata cache.
metadata_cache_tti = "5m"
# Log options, see `standalone.example.toml`
# [logging]
# dir = "/tmp/greptimedb/logs"
# level = "info"
# Datanode options.
## Datanode options.
[datanode]
# Datanode client options.
## Datanode client options.
[datanode.client]
timeout = "10s"
connect_timeout = "10s"
tcp_nodelay = true
# Frontend export the metrics generated by itself
# encoded to Prometheus remote-write format
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
# [export_metrics]
# whether enable export metrics, default is false
# enable = false
# The interval of export metrics
# write_interval = "30s"
# for `frontend`, `self_import` is recommend to collect metrics generated by itself
# [export_metrics.self_import]
# db = "information_schema"
## The logging options.
[logging]
## The directory to store the log files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
## +toml2docs:none-default
level = "info"
## Enable OTLP tracing.
enable_otlp_tracing = false
## The OTLP tracing endpoint.
## +toml2docs:none-default
otlp_endpoint = ""
## Whether to append logs to stdout.
append_stdout = true
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
[export_metrics.self_import]
## +toml2docs:none-default
db = "information_schema"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }

View File

@@ -1,39 +1,44 @@
# The working home directory.
## The working home directory.
data_home = "/tmp/metasrv/"
# The bind address of metasrv, "127.0.0.1:3002" by default.
## The bind address of metasrv.
bind_addr = "127.0.0.1:3002"
# The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
server_addr = "127.0.0.1:3002"
# Etcd server address, "127.0.0.1:2379" by default.
## Etcd server address.
store_addr = "127.0.0.1:2379"
# Datanode selector type.
# - "lease_based" (default value).
# - "load_based"
# For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
## Datanode selector type.
## - `lease_based` (default value).
## - `load_based`
## For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
selector = "lease_based"
# Store data in memory, false by default.
## Store data in memory.
use_memory_store = false
# Whether to enable greptimedb telemetry, true by default.
## Whether to enable greptimedb telemetry.
enable_telemetry = true
# If it's not empty, the metasrv will store all data with this key prefix.
## If it's not empty, the metasrv will store all data with this key prefix.
store_key_prefix = ""
# Log options, see `standalone.example.toml`
# [logging]
# dir = "/tmp/greptimedb/logs"
# level = "info"
# Procedure storage options.
## Procedure storage options.
[procedure]
# Procedure max retry time.
## Procedure max retry time.
max_retry_times = 12
# Initial retry delay of procedures, increases exponentially
## Initial retry delay of procedures, increases exponentially
retry_delay = "500ms"
# Auto split large value
# GreptimeDB procedure uses etcd as the default metadata storage backend.
# The etcd the maximum size of any request is 1.5 MiB
# 1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)
# Comments out the `max_metadata_value_size`, for don't split large value (no limit).
## Auto split large value
## GreptimeDB procedure uses etcd as the default metadata storage backend.
## The etcd the maximum size of any request is 1.5 MiB
## 1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)
## Comments out the `max_metadata_value_size`, for don't split large value (no limit).
max_metadata_value_size = "1500KiB"
# Failure detectors options.
@@ -43,57 +48,96 @@ min_std_deviation = "100ms"
acceptable_heartbeat_pause = "3000ms"
first_heartbeat_estimate = "1000ms"
# # Datanode options.
# [datanode]
# # Datanode client options.
# [datanode.client_options]
# timeout = "10s"
# connect_timeout = "10s"
# tcp_nodelay = true
## Datanode options.
[datanode]
## Datanode client options.
[datanode.client]
timeout = "10s"
connect_timeout = "10s"
tcp_nodelay = true
[wal]
# Available wal providers:
# - "raft_engine" (default)
# - "kafka"
# - `raft_engine` (default): there're none raft-engine wal config since metasrv only involves in remote wal currently.
# - `kafka`: metasrv **have to be** configured with kafka wal config when using kafka wal provider in datanode.
provider = "raft_engine"
# There're none raft-engine wal config since meta srv only involves in remote wal currently.
# Kafka wal config.
# The broker endpoints of the Kafka cluster. ["127.0.0.1:9092"] by default.
# broker_endpoints = ["127.0.0.1:9092"]
# Number of topics to be created upon start.
# num_topics = 64
# Topic selector type.
# Available selector types:
# - "round_robin" (default)
# selector_type = "round_robin"
# A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
# topic_name_prefix = "greptimedb_wal_topic"
# Expected number of replicas of each partition.
# replication_factor = 1
# Above which a topic creation operation will be cancelled.
# create_topic_timeout = "30s"
# The initial backoff for kafka clients.
# backoff_init = "500ms"
# The maximum backoff for kafka clients.
# backoff_max = "10s"
# Exponential backoff rate, i.e. next backoff = base * current backoff.
# backoff_base = 2
# Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
# backoff_deadline = "5mins"
# Metasrv export the metrics generated by itself
# encoded to Prometheus remote-write format
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
# [export_metrics]
# whether enable export metrics, default is false
# enable = false
# The interval of export metrics
# write_interval = "30s"
# [export_metrics.remote_write]
# The url the metrics send to. The url is empty by default, url example: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`
# url = ""
# HTTP headers of Prometheus remote-write carry
# headers = {}
## The broker endpoints of the Kafka cluster.
broker_endpoints = ["127.0.0.1:9092"]
## Number of topics to be created upon start.
num_topics = 64
## Topic selector type.
## Available selector types:
## - `round_robin` (default)
selector_type = "round_robin"
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
topic_name_prefix = "greptimedb_wal_topic"
## Expected number of replicas of each partition.
replication_factor = 1
## Above which a topic creation operation will be cancelled.
create_topic_timeout = "30s"
## The initial backoff for kafka clients.
backoff_init = "500ms"
## The maximum backoff for kafka clients.
backoff_max = "10s"
## Exponential backoff rate, i.e. next backoff = base * current backoff.
backoff_base = 2
## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
backoff_deadline = "5mins"
## The logging options.
[logging]
## The directory to store the log files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
## +toml2docs:none-default
level = "info"
## Enable OTLP tracing.
enable_otlp_tracing = false
## The OTLP tracing endpoint.
## +toml2docs:none-default
otlp_endpoint = ""
## Whether to append logs to stdout.
append_stdout = true
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
[export_metrics.self_import]
## +toml2docs:none-default
db = "information_schema"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }

View File

@@ -1,286 +1,473 @@
# Node running mode, "standalone" or "distributed".
## The running mode of the datanode. It can be `standalone` or `distributed`.
mode = "standalone"
# Whether to enable greptimedb telemetry, true by default.
enable_telemetry = true
# The default timezone of the server
# default_timezone = "UTC"
# HTTP server options.
## Enable telemetry to collect anonymous usage data.
enable_telemetry = true
## The default timezone of the server.
## +toml2docs:none-default
default_timezone = "UTC"
## The HTTP server options.
[http]
# Server address, "127.0.0.1:4000" by default.
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
# HTTP request timeout, 30s by default.
## HTTP request timeout.
timeout = "30s"
# HTTP request body limit, 64Mb by default.
# the following units are supported: B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB
## HTTP request body limit.
## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
body_limit = "64MB"
# gRPC server options.
## The gRPC server options.
[grpc]
# Server address, "127.0.0.1:4001" by default.
## The address to bind the gRPC server.
addr = "127.0.0.1:4001"
# The number of server worker threads, 8 by default.
## The number of server worker threads.
runtime_size = 8
# MySQL server options.
## MySQL server options.
[mysql]
# Whether to enable
## Whether to enable.
enable = true
# Server address, "127.0.0.1:4002" by default.
## The addr to bind the MySQL server.
addr = "127.0.0.1:4002"
# The number of server worker threads, 2 by default.
## The number of server worker threads.
runtime_size = 2
# MySQL server TLS options.
[mysql.tls]
# TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
# - "disable" (default value)
# - "prefer"
# - "require"
# - "verify-ca"
# - "verify-full"
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
## - `disable` (default value)
## - `prefer`
## - `require`
## - `verify-ca`
## - `verify-full`
mode = "disable"
# Certificate file path.
## Certificate file path.
## +toml2docs:none-default
cert_path = ""
# Private key file path.
## Private key file path.
## +toml2docs:none-default
key_path = ""
# Watch for Certificate and key file change and auto reload
## Watch for Certificate and key file change and auto reload
watch = false
# PostgresSQL server options.
## PostgresSQL server options.
[postgres]
# Whether to enable
## Whether to enable
enable = true
# Server address, "127.0.0.1:4003" by default.
## The addr to bind the PostgresSQL server.
addr = "127.0.0.1:4003"
# The number of server worker threads, 2 by default.
## The number of server worker threads.
runtime_size = 2
# PostgresSQL server TLS options, see `[mysql_options.tls]` section.
## PostgresSQL server TLS options, see `mysql_options.tls` section.
[postgres.tls]
# TLS mode.
## TLS mode.
mode = "disable"
# certificate file path.
## Certificate file path.
## +toml2docs:none-default
cert_path = ""
# private key file path.
## Private key file path.
## +toml2docs:none-default
key_path = ""
# Watch for Certificate and key file change and auto reload
## Watch for Certificate and key file change and auto reload
watch = false
# OpenTSDB protocol options.
## OpenTSDB protocol options.
[opentsdb]
# Whether to enable
## Whether to enable OpenTSDB put in HTTP API.
enable = true
# OpenTSDB telnet API server address, "127.0.0.1:4242" by default.
addr = "127.0.0.1:4242"
# The number of server worker threads, 2 by default.
runtime_size = 2
# InfluxDB protocol options.
## InfluxDB protocol options.
[influxdb]
# Whether to enable InfluxDB protocol in HTTP API, true by default.
## Whether to enable InfluxDB protocol in HTTP API.
enable = true
# Prometheus remote storage options
## Prometheus remote storage options
[prom_store]
# Whether to enable Prometheus remote write and read in HTTP API, true by default.
## Whether to enable Prometheus remote write and read in HTTP API.
enable = true
# Whether to store the data from Prometheus remote write in metric engine.
# true by default
## Whether to store the data from Prometheus remote write in metric engine.
with_metric_engine = true
## The WAL options.
[wal]
# Available wal providers:
# - "raft_engine" (default)
# - "kafka"
## The provider of the WAL.
## - `raft_engine`: the wal is stored in the local file system by raft-engine.
## - `kafka`: it's remote wal that data is stored in Kafka.
provider = "raft_engine"
# Raft-engine wal options.
# WAL data directory
# dir = "/tmp/greptimedb/wal"
# WAL file size in bytes.
## The directory to store the WAL files.
## **It's only used when the provider is `raft_engine`**.
## +toml2docs:none-default
dir = "/tmp/greptimedb/wal"
## The size of the WAL segment file.
## **It's only used when the provider is `raft_engine`**.
file_size = "256MB"
# WAL purge threshold.
## The threshold of the WAL size to trigger a flush.
## **It's only used when the provider is `raft_engine`**.
purge_threshold = "4GB"
# WAL purge interval in seconds.
## The interval to trigger a flush.
## **It's only used when the provider is `raft_engine`**.
purge_interval = "10m"
# WAL read batch size.
## The read batch size.
## **It's only used when the provider is `raft_engine`**.
read_batch_size = 128
# Whether to sync log file after every write.
## Whether to use sync write.
## **It's only used when the provider is `raft_engine`**.
sync_write = false
# Whether to reuse logically truncated log files.
## Whether to reuse logically truncated log files.
## **It's only used when the provider is `raft_engine`**.
enable_log_recycle = true
# Whether to pre-create log files on start up
## Whether to pre-create log files on start up.
## **It's only used when the provider is `raft_engine`**.
prefill_log_files = false
# Duration for fsyncing log files.
sync_period = "1000ms"
# Kafka wal options.
# The broker endpoints of the Kafka cluster. ["127.0.0.1:9092"] by default.
# broker_endpoints = ["127.0.0.1:9092"]
## Duration for fsyncing log files.
## **It's only used when the provider is `raft_engine`**.
sync_period = "10s"
# Number of topics to be created upon start.
# num_topics = 64
# Topic selector type.
# Available selector types:
# - "round_robin" (default)
# selector_type = "round_robin"
# The prefix of topic name.
# topic_name_prefix = "greptimedb_wal_topic"
# The number of replicas of each partition.
# Warning: the replication factor must be positive and must not be greater than the number of broker endpoints.
# replication_factor = 1
## The Kafka broker endpoints.
## **It's only used when the provider is `kafka`**.
broker_endpoints = ["127.0.0.1:9092"]
# The max size of a single producer batch.
# Warning: Kafka has a default limit of 1MB per message in a topic.
# max_batch_size = "1MB"
# The linger duration.
# linger = "200ms"
# The consumer wait timeout.
# consumer_wait_timeout = "100ms"
# Create topic timeout.
# create_topic_timeout = "30s"
## The max size of a single producer batch.
## Warning: Kafka has a default limit of 1MB per message in a topic.
## **It's only used when the provider is `kafka`**.
max_batch_size = "1MB"
# The initial backoff delay.
# backoff_init = "500ms"
# The maximum backoff delay.
# backoff_max = "10s"
# Exponential backoff rate, i.e. next backoff = base * current backoff.
# backoff_base = 2
# The deadline of retries.
# backoff_deadline = "5mins"
## The linger duration of a kafka batch producer.
## **It's only used when the provider is `kafka`**.
linger = "200ms"
# Metadata storage options.
## The consumer wait timeout.
## **It's only used when the provider is `kafka`**.
consumer_wait_timeout = "100ms"
## The initial backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_init = "500ms"
## The maximum backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_max = "10s"
## The exponential backoff rate, i.e. next backoff = base * current backoff.
## **It's only used when the provider is `kafka`**.
backoff_base = 2
## The deadline of retries.
## **It's only used when the provider is `kafka`**.
backoff_deadline = "5mins"
## Metadata storage options.
[metadata_store]
# Kv file size in bytes.
## Kv file size in bytes.
file_size = "256MB"
# Kv purge threshold.
## Kv purge threshold.
purge_threshold = "4GB"
# Procedure storage options.
## Procedure storage options.
[procedure]
# Procedure max retry time.
## Procedure max retry time.
max_retry_times = 3
# Initial retry delay of procedures, increases exponentially
## Initial retry delay of procedures, increases exponentially
retry_delay = "500ms"
# Storage options.
# Example of using S3 as the storage.
# [storage]
# type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# Example of using Oss as the storage.
# [storage]
# type = "Oss"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# access_key_secret = "123456"
# endpoint = "https://oss-cn-hangzhou.aliyuncs.com"
# Example of using Azblob as the storage.
# [storage]
# type = "Azblob"
# container = "greptimedb"
# root = "data"
# account_name = "test"
# account_key = "123456"
# endpoint = "https://greptimedb.blob.core.windows.net"
# sas_token = ""
# Example of using Gcs as the storage.
# [storage]
# type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# endpoint = "https://storage.googleapis.com"
## The data storage options.
[storage]
# The working home directory.
## The working home directory.
data_home = "/tmp/greptimedb/"
# Storage type.
## The storage type used to store the data.
## - `File`: the data is stored in the local file system.
## - `S3`: the data is stored in the S3 object storage.
## - `Gcs`: the data is stored in the Google Cloud Storage.
## - `Azblob`: the data is stored in the Azure Blob Storage.
## - `Oss`: the data is stored in the Aliyun OSS.
type = "File"
# TTL for all tables. Disabled by default.
# global_ttl = "7d"
# Cache configuration for object storage such as 'S3' etc.
# cache_path = "/path/local_cache"
# The local file cache capacity in bytes.
# cache_capacity = "256MB"
## Cache configuration for object storage such as 'S3' etc.
## The local file cache directory.
## +toml2docs:none-default
cache_path = "/path/local_cache"
## The local file cache capacity in bytes.
## +toml2docs:none-default
cache_capacity = "256MB"
## The S3 bucket name.
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
## +toml2docs:none-default
bucket = "greptimedb"
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
## +toml2docs:none-default
root = "greptimedb"
## The access key id of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3` and `Oss`**.
## +toml2docs:none-default
access_key_id = "test"
## The secret access key of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3`**.
## +toml2docs:none-default
secret_access_key = "test"
## The secret access key of the aliyun account.
## **It's only used when the storage type is `Oss`**.
## +toml2docs:none-default
access_key_secret = "test"
## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**.
## +toml2docs:none-default
account_name = "test"
## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**.
## +toml2docs:none-default
account_key = "test"
## The scope of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## +toml2docs:none-default
scope = "test"
## The credential path of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## +toml2docs:none-default
credential_path = "test"
## The container of the azure account.
## **It's only used when the storage type is `Azblob`**.
## +toml2docs:none-default
container = "greptimedb"
## The sas token of the azure account.
## **It's only used when the storage type is `Azblob`**.
## +toml2docs:none-default
sas_token = ""
## The endpoint of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## +toml2docs:none-default
endpoint = "https://s3.amazonaws.com"
## The region of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## +toml2docs:none-default
region = "us-west-2"
# Custom storage options
#[[storage.providers]]
#type = "S3"
#[[storage.providers]]
#type = "Gcs"
# [[storage.providers]]
# type = "S3"
# [[storage.providers]]
# type = "Gcs"
# Mito engine options
## The region engine options. You can configure multiple region engines.
[[region_engine]]
## The Mito engine options.
[region_engine.mito]
# Number of region workers
## Number of region workers.
num_workers = 8
# Request channel size of each worker
## Request channel size of each worker.
worker_channel_size = 128
# Max batch size for a worker to handle requests
## Max batch size for a worker to handle requests.
worker_request_batch_size = 64
# Number of meta action updated to trigger a new checkpoint for the manifest
## Number of meta action updated to trigger a new checkpoint for the manifest.
manifest_checkpoint_distance = 10
# Whether to compress manifest and checkpoint file by gzip (default false).
## Whether to compress manifest and checkpoint file by gzip (default false).
compress_manifest = false
# Max number of running background jobs
## Max number of running background jobs
max_background_jobs = 4
# Interval to auto flush a region if it has not flushed yet.
## Interval to auto flush a region if it has not flushed yet.
auto_flush_interval = "1h"
# Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
global_write_buffer_size = "1GB"
# Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
global_write_buffer_reject_size = "2GB"
# Cache size for SST metadata. Setting it to 0 to disable the cache.
# If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
## Cache size for SST metadata. Setting it to 0 to disable the cache.
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
sst_meta_cache_size = "128MB"
# Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
# If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
vector_cache_size = "512MB"
# Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
# If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
page_cache_size = "512MB"
# Buffer size for SST writing.
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
# Parallelism to scan a region (default: 1/4 of cpu cores).
# - 0: using the default value (1/4 of cpu cores).
# - 1: scan in current thread.
# - n: scan in parallelism n.
## Parallelism to scan a region (default: 1/4 of cpu cores).
## - `0`: using the default value (1/4 of cpu cores).
## - `1`: scan in current thread.
## - `n`: scan in parallelism n.
scan_parallelism = 0
# Capacity of the channel to send data from parallel scan tasks to the main task (default 32).
## Capacity of the channel to send data from parallel scan tasks to the main task.
parallel_scan_channel_size = 32
# Whether to allow stale WAL entries read during replay.
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false
## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index]
# Whether to create the index on flush.
# - "auto": automatically
# - "disable": never
## Whether to create the index on flush.
## - `auto`: automatically
## - `disable`: never
create_on_flush = "auto"
# Whether to create the index on compaction.
# - "auto": automatically
# - "disable": never
## Whether to create the index on compaction.
## - `auto`: automatically
## - `disable`: never
create_on_compaction = "auto"
# Whether to apply the index on query
# - "auto": automatically
# - "disable": never
## Whether to apply the index on query
## - `auto`: automatically
## - `disable`: never
apply_on_query = "auto"
# Memory threshold for performing an external sort during index creation.
# Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
## Memory threshold for performing an external sort during index creation.
## Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
mem_threshold_on_create = "64M"
# File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
## File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
intermediate_path = ""
[region_engine.mito.memtable]
# Memtable type.
# - "partition_tree": partition tree memtable
# - "time_series": time-series memtable (deprecated)
type = "partition_tree"
# The max number of keys in one shard.
## Memtable type.
## - `time_series`: time-series memtable
## - `partition_tree`: partition tree memtable (experimental)
type = "time_series"
## The max number of keys in one shard.
## Only available for `partition_tree` memtable.
index_max_keys_per_shard = 8192
# The max rows of data inside the actively writing buffer in one shard.
## The max rows of data inside the actively writing buffer in one shard.
## Only available for `partition_tree` memtable.
data_freeze_threshold = 32768
# Max dictionary bytes.
## Max dictionary bytes.
## Only available for `partition_tree` memtable.
fork_dictionary_bytes = "1GiB"
# Log options
# [logging]
# Specify logs directory.
# dir = "/tmp/greptimedb/logs"
# Specify the log level [info | debug | error | warn]
# level = "info"
# whether enable tracing, default is false
# enable_otlp_tracing = false
# tracing exporter endpoint with format `ip:port`, we use grpc oltp as exporter, default endpoint is `localhost:4317`
# otlp_endpoint = "localhost:4317"
# Whether to append logs to stdout. Defaults to true.
# append_stdout = true
# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
# [logging.tracing_sample_ratio]
# default_ratio = 0.0
## The logging options.
[logging]
## The directory to store the log files.
dir = "/tmp/greptimedb/logs"
# Standalone export the metrics generated by itself
# encoded to Prometheus remote-write format
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
# [export_metrics]
# whether enable export metrics, default is false
# enable = false
# The interval of export metrics
# write_interval = "30s"
# for `standalone`, `self_import` is recommend to collect metrics generated by itself
# [export_metrics.self_import]
# db = "information_schema"
## The log level. Can be `info`/`debug`/`warn`/`error`.
## +toml2docs:none-default
level = "info"
## Enable OTLP tracing.
enable_otlp_tracing = false
## The OTLP tracing endpoint.
## +toml2docs:none-default
otlp_endpoint = ""
## Whether to append logs to stdout.
append_stdout = true
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
[export_metrics.self_import]
## +toml2docs:none-default
db = "information_schema"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }

2
cyborg/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
node_modules
.env

View File

@@ -0,0 +1,79 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from '@actions/core'
import {handleError, obtainClient} from "@/common";
import {context} from "@actions/github";
import {PullRequestEvent} from "@octokit/webhooks-types";
import {Options, sync as conventionalCommitsParser} from 'conventional-commits-parser';
import conventionalCommitTypes from 'conventional-commit-types';
import _ from "lodash";
const defaultTypes = Object.keys(conventionalCommitTypes.types)
const breakingChangeLabel = "breaking-change"
// These options are copied from [1].
// [1] https://github.com/conventional-changelog/conventional-changelog/blob/3f60b464/packages/conventional-changelog-conventionalcommits/src/parser.js
export const parserOpts: Options = {
headerPattern: /^(\w*)(?:\((.*)\))?!?: (.*)$/,
breakingHeaderPattern: /^(\w*)(?:\((.*)\))?!: (.*)$/,
headerCorrespondence: [
'type',
'scope',
'subject'
],
noteKeywords: ['BREAKING CHANGE', 'BREAKING-CHANGE'],
revertPattern: /^(?:Revert|revert:)\s"?([\s\S]+?)"?\s*This reverts commit (\w*)\./i,
revertCorrespondence: ['header', 'hash'],
issuePrefixes: ['#']
}
async function main() {
if (!context.payload.pull_request) {
throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
}
const client = obtainClient("GITHUB_TOKEN")
const payload = context.payload as PullRequestEvent
const { owner, repo, number } = {
owner: payload.pull_request.base.user.login,
repo: payload.pull_request.base.repo.name,
number: payload.pull_request.number,
}
const { data: pull_request } = await client.rest.pulls.get({
owner, repo, pull_number: number,
})
const commit = conventionalCommitsParser(pull_request.title, parserOpts)
core.info(`Receive commit: ${JSON.stringify(commit)}`)
if (!commit.type) {
throw Error(`Malformed commit: ${JSON.stringify(commit)}`)
}
if (!defaultTypes.includes(commit.type)) {
throw Error(`Unexpected type ${JSON.stringify(commit.type)} of commit: ${JSON.stringify(commit)}`)
}
const breakingChanges = _.filter(commit.notes, _.matches({ title: 'BREAKING CHANGE'}))
if (breakingChanges.length > 0) {
await client.rest.issues.addLabels({
owner, repo, issue_number: number, labels: [breakingChangeLabel]
})
}
}
main().catch(handleError)

73
cyborg/bin/schedule.ts Normal file
View File

@@ -0,0 +1,73 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from '@actions/core'
import {GitHub} from "@actions/github/lib/utils"
import _ from "lodash";
import dayjs from "dayjs";
import {handleError, obtainClient} from "@/common";
async function main() {
const client = obtainClient("GITHUB_TOKEN")
await unassign(client)
}
async function unassign(client: InstanceType<typeof GitHub>) {
const owner = "GreptimeTeam"
const repo = "greptimedb"
const dt = dayjs().subtract(14, 'days');
core.info(`Open issues updated before ${dt.toISOString()} will be considered stale.`)
const members = await client.paginate(client.rest.repos.listCollaborators, {
owner,
repo,
permission: "push",
per_page: 100
}).then((members) => members.map((member) => member.login))
core.info(`Members (${members.length}): ${members}`)
const issues = await client.paginate(client.rest.issues.listForRepo, {
owner,
repo,
state: "open",
sort: "created",
direction: "asc",
per_page: 100
})
for (const issue of issues) {
let assignees = [];
if (issue.assignee) {
assignees.push(issue.assignee.login)
}
for (const assignee of issue.assignees) {
assignees.push(assignee.login)
}
assignees = _.uniq(assignees)
assignees = _.difference(assignees, members)
if (assignees.length > 0 && dayjs(issue.updated_at).isBefore(dt)) {
core.info(`Assignees ${assignees} of issue ${issue.number} will be unassigned.`)
await client.rest.issues.removeAssignees({
owner,
repo,
issue_number: issue.number,
assignees: assignees,
})
}
}
}
main().catch(handleError)

25
cyborg/package.json Normal file
View File

@@ -0,0 +1,25 @@
{
"name": "cyborg",
"version": "1.0.0",
"description": "Automator for GreptimeDB Repository Management",
"private": true,
"packageManager": "pnpm@8.15.5",
"dependencies": {
"@actions/core": "^1.10.1",
"@actions/github": "^6.0.0",
"@octokit/webhooks-types": "^7.5.1",
"conventional-commit-types": "^3.0.0",
"conventional-commits-parser": "^5.0.0",
"dayjs": "^1.11.11",
"dotenv": "^16.4.5",
"lodash": "^4.17.21"
},
"devDependencies": {
"@types/conventional-commits-parser": "^5.0.0",
"@types/lodash": "^4.17.0",
"@types/node": "^20.12.7",
"tsconfig-paths": "^4.2.0",
"tsx": "^4.8.2",
"typescript": "^5.4.5"
}
}

602
cyborg/pnpm-lock.yaml generated Normal file
View File

@@ -0,0 +1,602 @@
lockfileVersion: '6.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
dependencies:
'@actions/core':
specifier: ^1.10.1
version: 1.10.1
'@actions/github':
specifier: ^6.0.0
version: 6.0.0
'@octokit/webhooks-types':
specifier: ^7.5.1
version: 7.5.1
conventional-commit-types:
specifier: ^3.0.0
version: 3.0.0
conventional-commits-parser:
specifier: ^5.0.0
version: 5.0.0
dayjs:
specifier: ^1.11.11
version: 1.11.11
dotenv:
specifier: ^16.4.5
version: 16.4.5
lodash:
specifier: ^4.17.21
version: 4.17.21
devDependencies:
'@types/conventional-commits-parser':
specifier: ^5.0.0
version: 5.0.0
'@types/lodash':
specifier: ^4.17.0
version: 4.17.0
'@types/node':
specifier: ^20.12.7
version: 20.12.7
tsconfig-paths:
specifier: ^4.2.0
version: 4.2.0
tsx:
specifier: ^4.8.2
version: 4.8.2
typescript:
specifier: ^5.4.5
version: 5.4.5
packages:
/@actions/core@1.10.1:
resolution: {integrity: sha512-3lBR9EDAY+iYIpTnTIXmWcNbX3T2kCkAEQGIQx4NVQ0575nk2k3GRZDTPQG+vVtS2izSLmINlxXf0uLtnrTP+g==}
dependencies:
'@actions/http-client': 2.2.1
uuid: 8.3.2
dev: false
/@actions/github@6.0.0:
resolution: {integrity: sha512-alScpSVnYmjNEXboZjarjukQEzgCRmjMv6Xj47fsdnqGS73bjJNDpiiXmp8jr0UZLdUB6d9jW63IcmddUP+l0g==}
dependencies:
'@actions/http-client': 2.2.1
'@octokit/core': 5.2.0
'@octokit/plugin-paginate-rest': 9.2.1(@octokit/core@5.2.0)
'@octokit/plugin-rest-endpoint-methods': 10.4.1(@octokit/core@5.2.0)
dev: false
/@actions/http-client@2.2.1:
resolution: {integrity: sha512-KhC/cZsq7f8I4LfZSJKgCvEwfkE8o1538VoBeoGzokVLLnbFDEAdFD3UhoMklxo2un9NJVBdANOresx7vTHlHw==}
dependencies:
tunnel: 0.0.6
undici: 5.28.4
dev: false
/@esbuild/aix-ppc64@0.20.2:
resolution: {integrity: sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==}
engines: {node: '>=12'}
cpu: [ppc64]
os: [aix]
requiresBuild: true
dev: true
optional: true
/@esbuild/android-arm64@0.20.2:
resolution: {integrity: sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==}
engines: {node: '>=12'}
cpu: [arm64]
os: [android]
requiresBuild: true
dev: true
optional: true
/@esbuild/android-arm@0.20.2:
resolution: {integrity: sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==}
engines: {node: '>=12'}
cpu: [arm]
os: [android]
requiresBuild: true
dev: true
optional: true
/@esbuild/android-x64@0.20.2:
resolution: {integrity: sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==}
engines: {node: '>=12'}
cpu: [x64]
os: [android]
requiresBuild: true
dev: true
optional: true
/@esbuild/darwin-arm64@0.20.2:
resolution: {integrity: sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==}
engines: {node: '>=12'}
cpu: [arm64]
os: [darwin]
requiresBuild: true
dev: true
optional: true
/@esbuild/darwin-x64@0.20.2:
resolution: {integrity: sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==}
engines: {node: '>=12'}
cpu: [x64]
os: [darwin]
requiresBuild: true
dev: true
optional: true
/@esbuild/freebsd-arm64@0.20.2:
resolution: {integrity: sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==}
engines: {node: '>=12'}
cpu: [arm64]
os: [freebsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/freebsd-x64@0.20.2:
resolution: {integrity: sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==}
engines: {node: '>=12'}
cpu: [x64]
os: [freebsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-arm64@0.20.2:
resolution: {integrity: sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==}
engines: {node: '>=12'}
cpu: [arm64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-arm@0.20.2:
resolution: {integrity: sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==}
engines: {node: '>=12'}
cpu: [arm]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-ia32@0.20.2:
resolution: {integrity: sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==}
engines: {node: '>=12'}
cpu: [ia32]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-loong64@0.20.2:
resolution: {integrity: sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==}
engines: {node: '>=12'}
cpu: [loong64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-mips64el@0.20.2:
resolution: {integrity: sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==}
engines: {node: '>=12'}
cpu: [mips64el]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-ppc64@0.20.2:
resolution: {integrity: sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==}
engines: {node: '>=12'}
cpu: [ppc64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-riscv64@0.20.2:
resolution: {integrity: sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==}
engines: {node: '>=12'}
cpu: [riscv64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-s390x@0.20.2:
resolution: {integrity: sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==}
engines: {node: '>=12'}
cpu: [s390x]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-x64@0.20.2:
resolution: {integrity: sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==}
engines: {node: '>=12'}
cpu: [x64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/netbsd-x64@0.20.2:
resolution: {integrity: sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==}
engines: {node: '>=12'}
cpu: [x64]
os: [netbsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/openbsd-x64@0.20.2:
resolution: {integrity: sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==}
engines: {node: '>=12'}
cpu: [x64]
os: [openbsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/sunos-x64@0.20.2:
resolution: {integrity: sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==}
engines: {node: '>=12'}
cpu: [x64]
os: [sunos]
requiresBuild: true
dev: true
optional: true
/@esbuild/win32-arm64@0.20.2:
resolution: {integrity: sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==}
engines: {node: '>=12'}
cpu: [arm64]
os: [win32]
requiresBuild: true
dev: true
optional: true
/@esbuild/win32-ia32@0.20.2:
resolution: {integrity: sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==}
engines: {node: '>=12'}
cpu: [ia32]
os: [win32]
requiresBuild: true
dev: true
optional: true
/@esbuild/win32-x64@0.20.2:
resolution: {integrity: sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==}
engines: {node: '>=12'}
cpu: [x64]
os: [win32]
requiresBuild: true
dev: true
optional: true
/@fastify/busboy@2.1.1:
resolution: {integrity: sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==}
engines: {node: '>=14'}
dev: false
/@octokit/auth-token@4.0.0:
resolution: {integrity: sha512-tY/msAuJo6ARbK6SPIxZrPBms3xPbfwBrulZe0Wtr/DIY9lje2HeV1uoebShn6mx7SjCHif6EjMvoREj+gZ+SA==}
engines: {node: '>= 18'}
dev: false
/@octokit/core@5.2.0:
resolution: {integrity: sha512-1LFfa/qnMQvEOAdzlQymH0ulepxbxnCYAKJZfMci/5XJyIHWgEYnDmgnKakbTh7CH2tFQ5O60oYDvns4i9RAIg==}
engines: {node: '>= 18'}
dependencies:
'@octokit/auth-token': 4.0.0
'@octokit/graphql': 7.1.0
'@octokit/request': 8.4.0
'@octokit/request-error': 5.1.0
'@octokit/types': 13.5.0
before-after-hook: 2.2.3
universal-user-agent: 6.0.1
dev: false
/@octokit/endpoint@9.0.5:
resolution: {integrity: sha512-ekqR4/+PCLkEBF6qgj8WqJfvDq65RH85OAgrtnVp1mSxaXF03u2xW/hUdweGS5654IlC0wkNYC18Z50tSYTAFw==}
engines: {node: '>= 18'}
dependencies:
'@octokit/types': 13.5.0
universal-user-agent: 6.0.1
dev: false
/@octokit/graphql@7.1.0:
resolution: {integrity: sha512-r+oZUH7aMFui1ypZnAvZmn0KSqAUgE1/tUXIWaqUCa1758ts/Jio84GZuzsvUkme98kv0WFY8//n0J1Z+vsIsQ==}
engines: {node: '>= 18'}
dependencies:
'@octokit/request': 8.4.0
'@octokit/types': 13.5.0
universal-user-agent: 6.0.1
dev: false
/@octokit/openapi-types@20.0.0:
resolution: {integrity: sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA==}
dev: false
/@octokit/openapi-types@22.2.0:
resolution: {integrity: sha512-QBhVjcUa9W7Wwhm6DBFu6ZZ+1/t/oYxqc2tp81Pi41YNuJinbFRx8B133qVOrAaBbF7D/m0Et6f9/pZt9Rc+tg==}
dev: false
/@octokit/plugin-paginate-rest@9.2.1(@octokit/core@5.2.0):
resolution: {integrity: sha512-wfGhE/TAkXZRLjksFXuDZdmGnJQHvtU/joFQdweXUgzo1XwvBCD4o4+75NtFfjfLK5IwLf9vHTfSiU3sLRYpRw==}
engines: {node: '>= 18'}
peerDependencies:
'@octokit/core': '5'
dependencies:
'@octokit/core': 5.2.0
'@octokit/types': 12.6.0
dev: false
/@octokit/plugin-rest-endpoint-methods@10.4.1(@octokit/core@5.2.0):
resolution: {integrity: sha512-xV1b+ceKV9KytQe3zCVqjg+8GTGfDYwaT1ATU5isiUyVtlVAO3HNdzpS4sr4GBx4hxQ46s7ITtZrAsxG22+rVg==}
engines: {node: '>= 18'}
peerDependencies:
'@octokit/core': '5'
dependencies:
'@octokit/core': 5.2.0
'@octokit/types': 12.6.0
dev: false
/@octokit/request-error@5.1.0:
resolution: {integrity: sha512-GETXfE05J0+7H2STzekpKObFe765O5dlAKUTLNGeH+x47z7JjXHfsHKo5z21D/o/IOZTUEI6nyWyR+bZVP/n5Q==}
engines: {node: '>= 18'}
dependencies:
'@octokit/types': 13.5.0
deprecation: 2.3.1
once: 1.4.0
dev: false
/@octokit/request@8.4.0:
resolution: {integrity: sha512-9Bb014e+m2TgBeEJGEbdplMVWwPmL1FPtggHQRkV+WVsMggPtEkLKPlcVYm/o8xKLkpJ7B+6N8WfQMtDLX2Dpw==}
engines: {node: '>= 18'}
dependencies:
'@octokit/endpoint': 9.0.5
'@octokit/request-error': 5.1.0
'@octokit/types': 13.5.0
universal-user-agent: 6.0.1
dev: false
/@octokit/types@12.6.0:
resolution: {integrity: sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw==}
dependencies:
'@octokit/openapi-types': 20.0.0
dev: false
/@octokit/types@13.5.0:
resolution: {integrity: sha512-HdqWTf5Z3qwDVlzCrP8UJquMwunpDiMPt5er+QjGzL4hqr/vBVY/MauQgS1xWxCDT1oMx1EULyqxncdCY/NVSQ==}
dependencies:
'@octokit/openapi-types': 22.2.0
dev: false
/@octokit/webhooks-types@7.5.1:
resolution: {integrity: sha512-1dozxWEP8lKGbtEu7HkRbK1F/nIPuJXNfT0gd96y6d3LcHZTtRtlf8xz3nicSJfesADxJyDh+mWBOsdLkqgzYw==}
dev: false
/@types/conventional-commits-parser@5.0.0:
resolution: {integrity: sha512-loB369iXNmAZglwWATL+WRe+CRMmmBPtpolYzIebFaX4YA3x+BEfLqhUAV9WanycKI3TG1IMr5bMJDajDKLlUQ==}
dependencies:
'@types/node': 20.12.7
dev: true
/@types/lodash@4.17.0:
resolution: {integrity: sha512-t7dhREVv6dbNj0q17X12j7yDG4bD/DHYX7o5/DbDxobP0HnGPgpRz2Ej77aL7TZT3DSw13fqUTj8J4mMnqa7WA==}
dev: true
/@types/node@20.12.7:
resolution: {integrity: sha512-wq0cICSkRLVaf3UGLMGItu/PtdY7oaXaI/RVU+xliKVOtRna3PRY57ZDfztpDL0n11vfymMUnXv8QwYCO7L1wg==}
dependencies:
undici-types: 5.26.5
dev: true
/JSONStream@1.3.5:
resolution: {integrity: sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==}
hasBin: true
dependencies:
jsonparse: 1.3.1
through: 2.3.8
dev: false
/before-after-hook@2.2.3:
resolution: {integrity: sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==}
dev: false
/conventional-commit-types@3.0.0:
resolution: {integrity: sha512-SmmCYnOniSsAa9GqWOeLqc179lfr5TRu5b4QFDkbsrJ5TZjPJx85wtOr3zn+1dbeNiXDKGPbZ72IKbPhLXh/Lg==}
dev: false
/conventional-commits-parser@5.0.0:
resolution: {integrity: sha512-ZPMl0ZJbw74iS9LuX9YIAiW8pfM5p3yh2o/NbXHbkFuZzY5jvdi5jFycEOkmBW5H5I7nA+D6f3UcsCLP2vvSEA==}
engines: {node: '>=16'}
hasBin: true
dependencies:
JSONStream: 1.3.5
is-text-path: 2.0.0
meow: 12.1.1
split2: 4.2.0
dev: false
/dayjs@1.11.11:
resolution: {integrity: sha512-okzr3f11N6WuqYtZSvm+F776mB41wRZMhKP+hc34YdW+KmtYYK9iqvHSwo2k9FEH3fhGXvOPV6yz2IcSrfRUDg==}
dev: false
/deprecation@2.3.1:
resolution: {integrity: sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==}
dev: false
/dotenv@16.4.5:
resolution: {integrity: sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==}
engines: {node: '>=12'}
dev: false
/esbuild@0.20.2:
resolution: {integrity: sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==}
engines: {node: '>=12'}
hasBin: true
requiresBuild: true
optionalDependencies:
'@esbuild/aix-ppc64': 0.20.2
'@esbuild/android-arm': 0.20.2
'@esbuild/android-arm64': 0.20.2
'@esbuild/android-x64': 0.20.2
'@esbuild/darwin-arm64': 0.20.2
'@esbuild/darwin-x64': 0.20.2
'@esbuild/freebsd-arm64': 0.20.2
'@esbuild/freebsd-x64': 0.20.2
'@esbuild/linux-arm': 0.20.2
'@esbuild/linux-arm64': 0.20.2
'@esbuild/linux-ia32': 0.20.2
'@esbuild/linux-loong64': 0.20.2
'@esbuild/linux-mips64el': 0.20.2
'@esbuild/linux-ppc64': 0.20.2
'@esbuild/linux-riscv64': 0.20.2
'@esbuild/linux-s390x': 0.20.2
'@esbuild/linux-x64': 0.20.2
'@esbuild/netbsd-x64': 0.20.2
'@esbuild/openbsd-x64': 0.20.2
'@esbuild/sunos-x64': 0.20.2
'@esbuild/win32-arm64': 0.20.2
'@esbuild/win32-ia32': 0.20.2
'@esbuild/win32-x64': 0.20.2
dev: true
/fsevents@2.3.3:
resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
os: [darwin]
requiresBuild: true
dev: true
optional: true
/get-tsconfig@4.7.3:
resolution: {integrity: sha512-ZvkrzoUA0PQZM6fy6+/Hce561s+faD1rsNwhnO5FelNjyy7EMGJ3Rz1AQ8GYDWjhRs/7dBLOEJvhK8MiEJOAFg==}
dependencies:
resolve-pkg-maps: 1.0.0
dev: true
/is-text-path@2.0.0:
resolution: {integrity: sha512-+oDTluR6WEjdXEJMnC2z6A4FRwFoYuvShVVEGsS7ewc0UTi2QtAKMDJuL4BDEVt+5T7MjFo12RP8ghOM75oKJw==}
engines: {node: '>=8'}
dependencies:
text-extensions: 2.4.0
dev: false
/json5@2.2.3:
resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==}
engines: {node: '>=6'}
hasBin: true
dev: true
/jsonparse@1.3.1:
resolution: {integrity: sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==}
engines: {'0': node >= 0.2.0}
dev: false
/lodash@4.17.21:
resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==}
dev: false
/meow@12.1.1:
resolution: {integrity: sha512-BhXM0Au22RwUneMPwSCnyhTOizdWoIEPU9sp0Aqa1PnDMR5Wv2FGXYDjuzJEIX+Eo2Rb8xuYe5jrnm5QowQFkw==}
engines: {node: '>=16.10'}
dev: false
/minimist@1.2.8:
resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
dev: true
/once@1.4.0:
resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
dependencies:
wrappy: 1.0.2
dev: false
/resolve-pkg-maps@1.0.0:
resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==}
dev: true
/split2@4.2.0:
resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==}
engines: {node: '>= 10.x'}
dev: false
/strip-bom@3.0.0:
resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==}
engines: {node: '>=4'}
dev: true
/text-extensions@2.4.0:
resolution: {integrity: sha512-te/NtwBwfiNRLf9Ijqx3T0nlqZiQ2XrrtBvu+cLL8ZRrGkO0NHTug8MYFKyoSrv/sHTaSKfilUkizV6XhxMJ3g==}
engines: {node: '>=8'}
dev: false
/through@2.3.8:
resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==}
dev: false
/tsconfig-paths@4.2.0:
resolution: {integrity: sha512-NoZ4roiN7LnbKn9QqE1amc9DJfzvZXxF4xDavcOWt1BPkdx+m+0gJuPM+S0vCe7zTJMYUP0R8pO2XMr+Y8oLIg==}
engines: {node: '>=6'}
dependencies:
json5: 2.2.3
minimist: 1.2.8
strip-bom: 3.0.0
dev: true
/tsx@4.8.2:
resolution: {integrity: sha512-hmmzS4U4mdy1Cnzpl/NQiPUC2k34EcNSTZYVJThYKhdqTwuBeF+4cG9KUK/PFQ7KHaAaYwqlb7QfmsE2nuj+WA==}
engines: {node: '>=18.0.0'}
hasBin: true
dependencies:
esbuild: 0.20.2
get-tsconfig: 4.7.3
optionalDependencies:
fsevents: 2.3.3
dev: true
/tunnel@0.0.6:
resolution: {integrity: sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==}
engines: {node: '>=0.6.11 <=0.7.0 || >=0.7.3'}
dev: false
/typescript@5.4.5:
resolution: {integrity: sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==}
engines: {node: '>=14.17'}
hasBin: true
dev: true
/undici-types@5.26.5:
resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==}
dev: true
/undici@5.28.4:
resolution: {integrity: sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==}
engines: {node: '>=14.0'}
dependencies:
'@fastify/busboy': 2.1.1
dev: false
/universal-user-agent@6.0.1:
resolution: {integrity: sha512-yCzhz6FN2wU1NiiQRogkTQszlQSlpWaw8SvVegAc+bDxbzHgh1vX8uIe8OYyMH6DwH+sdTJsgMl36+mSMdRJIQ==}
dev: false
/uuid@8.3.2:
resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==}
hasBin: true
dev: false
/wrappy@1.0.2:
resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
dev: false

30
cyborg/src/common.ts Normal file
View File

@@ -0,0 +1,30 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from "@actions/core";
import {config} from "dotenv";
import {getOctokit} from "@actions/github";
import {GitHub} from "@actions/github/lib/utils";
export function handleError(err: any): void {
console.error(err)
core.setFailed(`Unhandled error: ${err}`)
}
export function obtainClient(token: string): InstanceType<typeof GitHub> {
config()
return getOctokit(process.env[token])
}

14
cyborg/tsconfig.json Normal file
View File

@@ -0,0 +1,14 @@
{
"ts-node": {
"require": ["tsconfig-paths/register"]
},
"compilerOptions": {
"module": "NodeNext",
"moduleResolution": "NodeNext",
"target": "ES6",
"paths": {
"@/*": ["./src/*"]
},
"resolveJsonModule": true,
}
}

View File

@@ -0,0 +1,136 @@
# How to write fuzz tests
This document introduces how to write fuzz tests in GreptimeDB.
## What is a fuzz test
Fuzz test is tool that leverage deterministic random generation to assist in finding bugs. The goal of fuzz tests is to identify inputs generated by the fuzzer that cause system panics, crashes, or unexpected behaviors to occur. And we are using the [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) to run our fuzz test targets.
## Why we need them
- Find bugs by leveraging random generation
- Integrate with other tests (e.g., e2e)
## Resources
All fuzz test-related resources are located in the `/tests-fuzz` directory.
There are two types of resources: (1) fundamental components and (2) test targets.
### Fundamental components
They are located in the `/tests-fuzz/src` directory. The fundamental components define how to generate SQLs (including dialects for different protocols) and validate execution results (e.g., column attribute validation), etc.
### Test targets
They are located in the `/tests-fuzz/targets` directory, with each file representing an independent fuzz test case. The target utilizes fundamental components to generate SQLs, sends the generated SQLs via specified protocol, and validates the results of SQL execution.
Figure 1 illustrates the fundamental components of the fuzz test provide the ability to generate random SQLs. It utilizes a Random Number Generator (Rng) to generate the Intermediate Representation (IR), then employs a DialectTranslator to produce specified dialects for different protocols. Finally, the fuzz tests send the generated SQL via the specified protocol and verify that the execution results meet expectations.
```
Rng
|
|
v
ExprGenerator
|
|
v
Intermediate representation (IR)
|
|
+----------------------+----------------------+
| | |
v v v
MySQLTranslator PostgreSQLTranslator OtherDialectTranslator
| | |
| | |
v v v
SQL(MySQL Dialect) ..... .....
|
|
v
Fuzz Test
```
(Figure1: Overview of fuzz tests)
For more details about fuzz targets and fundamental components, please refer to this [tracking issue](https://github.com/GreptimeTeam/greptimedb/issues/3174).
## How to add a fuzz test target
1. Create an empty rust source file under the `/tests-fuzz/targets/<fuzz-target>.rs` directory.
2. Register the fuzz test target in the `/tests-fuzz/Cargo.toml` file.
```toml
[[bin]]
name = "<fuzz-target>"
path = "targets/<fuzz-target>.rs"
test = false
bench = false
doc = false
```
3. Define the `FuzzInput` in the `/tests-fuzz/targets/<fuzz-target>.rs`.
```rust
#![no_main]
use libfuzzer_sys::arbitrary::{Arbitrary, Unstructured};
#[derive(Clone, Debug)]
struct FuzzInput {
seed: u64,
}
impl Arbitrary<'_> for FuzzInput {
fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> {
let seed = u.int_in_range(u64::MIN..=u64::MAX)?;
Ok(FuzzInput { seed })
}
}
```
4. Write your first fuzz test target in the `/tests-fuzz/targets/<fuzz-target>.rs`.
```rust
use libfuzzer_sys::fuzz_target;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaChaRng;
use snafu::ResultExt;
use sqlx::{MySql, Pool};
use tests_fuzz::fake::{
merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map,
MappedGenerator, WordGenerator,
};
use tests_fuzz::generator::create_expr::CreateTableExprGeneratorBuilder;
use tests_fuzz::generator::Generator;
use tests_fuzz::ir::CreateTableExpr;
use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
use tests_fuzz::translator::DslTranslator;
use tests_fuzz::utils::{init_greptime_connections, Connections};
fuzz_target!(|input: FuzzInput| {
common_telemetry::init_default_ut_logging();
common_runtime::block_on_write(async {
let Connections { mysql } = init_greptime_connections().await;
let mut rng = ChaChaRng::seed_from_u64(input.seed);
let columns = rng.gen_range(2..30);
let create_table_generator = CreateTableExprGeneratorBuilder::default()
.name_generator(Box::new(MappedGenerator::new(
WordGenerator,
merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map),
)))
.columns(columns)
.engine("mito")
.if_not_exists(if_not_exists)
.build()
.unwrap();
let ir = create_table_generator.generate(&mut rng);
let translator = CreateTableExprTranslator;
let sql = translator.translate(&expr).unwrap();
mysql.execute(&sql).await
})
});
```
5. Run your fuzz test target
```bash
cargo fuzz run <fuzz-target> --fuzz-dir tests-fuzz
```
For more details, please refer to this [document](/tests-fuzz/README.md).

View File

@@ -27,8 +27,8 @@ subgraph Frontend["Frontend"]
end
end
MyTable --> MetaSrv
MetaSrv --> ETCD
MyTable --> Metasrv
Metasrv --> ETCD
MyTable-->TableEngine0
MyTable-->TableEngine1
@@ -95,8 +95,8 @@ subgraph Frontend["Frontend"]
end
end
MyTable --> MetaSrv
MetaSrv --> ETCD
MyTable --> Metasrv
Metasrv --> ETCD
MyTable-->RegionEngine
MyTable-->RegionEngine1

View File

@@ -36,7 +36,7 @@ Hence, we choose the third option, and use a simple logical plan that's anagonis
## Deploy mode and protocol
- Greptime Flow is an independent streaming compute component. It can be used either within a standalone node or as a dedicated node at the same level as frontend in distributed mode.
- It accepts insert request Rows, which is used between frontend and datanode.
- New flow job is submitted in the format of modified SQL query like snowflake do, like: `CREATE TASK avg_over_5m WINDOW_SIZE = "5m" AS SELECT avg(value) FROM table WHERE time > now() - 5m GROUP BY time(1m)`. Flow job then got stored in MetaSrv.
- New flow job is submitted in the format of modified SQL query like snowflake do, like: `CREATE TASK avg_over_5m WINDOW_SIZE = "5m" AS SELECT avg(value) FROM table WHERE time > now() - 5m GROUP BY time(1m)`. Flow job then got stored in Metasrv.
- It also persists results in the format of Rows to frontend.
- The query plan uses Substrait as codec format. It's the same with GreptimeDB's query engine.
- Greptime Flow needs a WAL for recovering. It's possible to reuse datanode's.

View File

@@ -73,7 +73,7 @@ CREATE TABLE cpu (
usage_system DOUBLE,
datacenter STRING,
TIME INDEX (ts),
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
PRIMARY KEY(datacenter, host)) ENGINE=mito;
```
Then the table's `TableMeta` may look like this:
@@ -249,7 +249,7 @@ CREATE TABLE cpu (
usage_system DOUBLE,
datacenter STRING,
TIME INDEX (ts),
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
PRIMARY KEY(datacenter, host)) ENGINE=mito;
select ts, usage_system from cpu;
```

46
docs/style-guide.md Normal file
View File

@@ -0,0 +1,46 @@
# GreptimeDB Style Guide
This style guide is intended to help contributors to GreptimeDB write code that is consistent with the rest of the codebase. It is a living document and will be updated as the codebase evolves.
It's mainly an complement to the [Rust Style Guide](https://pingcap.github.io/style-guide/rust/).
## Table of Contents
- Formatting
- Modules
- Comments
## Formatting
- Place all `mod` declaration before any `use`.
- Use `unimplemented!()` instead of `todo!()` for things that aren't likely to be implemented.
- Add an empty line before and after declaration blocks.
- Place comment before attributes (`#[]`) and derive (`#[derive]`).
## Modules
- Use the file with same name instead of `mod.rs` to define a module. E.g.:
```
.
├── cache
│ ├── cache_size.rs
│ └── write_cache.rs
└── cache.rs
```
## Comments
- Add comments for public functions and structs.
- Prefer document comment (`///`) over normal comment (`//`) for structs, fields, functions etc.
- Add link (`[]`) to struct, method, or any other reference. And make sure that link works.
## Error handling
- Define a custom error type for the module if needed.
- Prefer `with_context()` over `context()` when allocation is needed to construct an error.
- Use `error!()` or `warn!()` macros in the `common_telemetry` crate to log errors. E.g.:
```rust
error!(e; "Failed to do something");
```

View File

@@ -7,4 +7,60 @@ Status notify: we are still working on this config. It's expected to change freq
# How to use
## `greptimedb.json`
Open Grafana Dashboard page, choose `New` -> `Import`. And upload `greptimedb.json` file.
## `greptimedb-cluster.json`
This cluster dashboard provides a comprehensive view of incoming requests, response statuses, and internal activities such as flush and compaction, with a layered structure from frontend to datanode. Designed with a focus on alert functionality, its primary aim is to highlight any anomalies in metrics, allowing users to quickly pinpoint the cause of errors.
We use Prometheus to scrape off metrics from nodes in GreptimeDB cluster, Grafana to visualize the diagram. Any compatible stack should work too.
__Note__: This dashboard is still in an early stage of development. Any issue or advice on improvement is welcomed.
### Configuration
Please ensure the following configuration before importing the dashboard into Grafana.
__1. Prometheus scrape config__
Assign `greptime_pod` label to each host target. We use this label to identify each node instance.
```yml
# example config
# only to indicate how to assign labels to each target
# modify yours accordingly
scrape_configs:
- job_name: metasrv
static_configs:
- targets: ['<ip>:<port>']
labels:
greptime_pod: metasrv
- job_name: datanode
static_configs:
- targets: ['<ip>:<port>']
labels:
greptime_pod: datanode1
- targets: ['<ip>:<port>']
labels:
greptime_pod: datanode2
- targets: ['<ip>:<port>']
labels:
greptime_pod: datanode3
- job_name: frontend
static_configs:
- targets: ['<ip>:<port>']
labels:
greptime_pod: frontend
```
__2. Grafana config__
Create a Prometheus data source in Grafana before using this dashboard. We use `datasource` as a variable in Grafana dashboard so that multiple environments are supported.
### Usage
Use `datasource` or `greptime_pod` on the upper-left corner to filter data from certain node.

File diff suppressed because it is too large Load Diff

View File

@@ -17,12 +17,15 @@ headerPath = "Apache-2.0.txt"
includes = [
"*.rs",
"*.py",
"*.ts",
]
excludes = [
# copied sources
"src/common/base/src/readable_size.rs",
"src/common/base/src/secrets.rs",
"src/servers/src/repeated_field.rs",
"src/servers/src/http/test_helpers.rs",
]
[properties]

View File

@@ -1,2 +1,2 @@
[toolchain]
channel = "nightly-2023-12-19"
channel = "nightly-2024-04-18"

View File

@@ -20,21 +20,20 @@ use common_decimal::Decimal128;
use common_time::interval::IntervalUnit;
use common_time::time::Time;
use common_time::timestamp::TimeUnit;
use common_time::{Date, DateTime, Duration, Interval, Timestamp};
use common_time::{Date, DateTime, Interval, Timestamp};
use datatypes::prelude::{ConcreteDataType, ValueRef};
use datatypes::scalars::ScalarVector;
use datatypes::types::{
DurationType, Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type,
Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type,
};
use datatypes::value::{OrderedF32, OrderedF64, Value};
use datatypes::vectors::{
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector,
DurationMicrosecondVector, DurationMillisecondVector, DurationNanosecondVector,
DurationSecondVector, Float32Vector, Float64Vector, Int32Vector, Int64Vector,
IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector, PrimitiveVector,
StringVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector,
TimeSecondVector, TimestampMicrosecondVector, TimestampMillisecondVector,
TimestampNanosecondVector, TimestampSecondVector, UInt32Vector, UInt64Vector, VectorRef,
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector, Float32Vector,
Float64Vector, Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
IntervalYearMonthVector, PrimitiveVector, StringVector, TimeMicrosecondVector,
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
UInt64Vector, VectorRef,
};
use greptime_proto::v1;
use greptime_proto::v1::column_data_type_extension::TypeExt;
@@ -127,14 +126,6 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
ColumnDataType::IntervalMonthDayNano => {
ConcreteDataType::interval_month_day_nano_datatype()
}
ColumnDataType::DurationSecond => ConcreteDataType::duration_second_datatype(),
ColumnDataType::DurationMillisecond => {
ConcreteDataType::duration_millisecond_datatype()
}
ColumnDataType::DurationMicrosecond => {
ConcreteDataType::duration_microsecond_datatype()
}
ColumnDataType::DurationNanosecond => ConcreteDataType::duration_nanosecond_datatype(),
ColumnDataType::Decimal128 => {
if let Some(TypeExt::DecimalType(d)) = datatype_wrapper
.datatype_ext
@@ -212,11 +203,7 @@ impl_column_type_functions_with_snake!(
TimeNanosecond,
IntervalYearMonth,
IntervalDayTime,
IntervalMonthDayNano,
DurationSecond,
DurationMillisecond,
DurationMicrosecond,
DurationNanosecond
IntervalMonthDayNano
);
impl ColumnDataTypeWrapper {
@@ -270,16 +257,11 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
IntervalType::DayTime(_) => ColumnDataType::IntervalDayTime,
IntervalType::MonthDayNano(_) => ColumnDataType::IntervalMonthDayNano,
},
ConcreteDataType::Duration(d) => match d {
DurationType::Second(_) => ColumnDataType::DurationSecond,
DurationType::Millisecond(_) => ColumnDataType::DurationMillisecond,
DurationType::Microsecond(_) => ColumnDataType::DurationMicrosecond,
DurationType::Nanosecond(_) => ColumnDataType::DurationNanosecond,
},
ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128,
ConcreteDataType::Null(_)
| ConcreteDataType::List(_)
| ConcreteDataType::Dictionary(_) => {
| ConcreteDataType::Dictionary(_)
| ConcreteDataType::Duration(_) => {
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
}
};
@@ -409,22 +391,6 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
interval_month_day_nano_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::DurationSecond => Values {
duration_second_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::DurationMillisecond => Values {
duration_millisecond_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::DurationMicrosecond => Values {
duration_microsecond_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::DurationNanosecond => Values {
duration_nanosecond_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Decimal128 => Values {
decimal128_values: Vec::with_capacity(capacity),
..Default::default()
@@ -476,14 +442,8 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
.interval_month_day_nano_values
.push(convert_i128_to_interval(val.to_i128())),
},
Value::Duration(val) => match val.unit() {
TimeUnit::Second => values.duration_second_values.push(val.value()),
TimeUnit::Millisecond => values.duration_millisecond_values.push(val.value()),
TimeUnit::Microsecond => values.duration_microsecond_values.push(val.value()),
TimeUnit::Nanosecond => values.duration_nanosecond_values.push(val.value()),
},
Value::Decimal128(val) => values.decimal128_values.push(convert_to_pb_decimal128(val)),
Value::List(_) => unreachable!(),
Value::List(_) | Value::Duration(_) => unreachable!(),
});
column.null_mask = null_mask.into_vec();
}
@@ -518,6 +478,8 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
Some(Expr::Alter(_)) => "ddl.alter",
Some(Expr::DropTable(_)) => "ddl.drop_table",
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
Some(Expr::CreateFlow(_)) => "ddl.create_flow",
Some(Expr::DropFlow(_)) => "ddl.drop_flow",
None => "ddl.empty",
}
}
@@ -583,10 +545,6 @@ pub fn pb_value_to_value_ref<'a>(
let interval = Interval::from_month_day_nano(v.months, v.days, v.nanoseconds);
ValueRef::Interval(interval)
}
ValueData::DurationSecondValue(v) => ValueRef::Duration(Duration::new_second(*v)),
ValueData::DurationMillisecondValue(v) => ValueRef::Duration(Duration::new_millisecond(*v)),
ValueData::DurationMicrosecondValue(v) => ValueRef::Duration(Duration::new_microsecond(*v)),
ValueData::DurationNanosecondValue(v) => ValueRef::Duration(Duration::new_nanosecond(*v)),
ValueData::Decimal128Value(v) => {
// get precision and scale from datatype_extension
if let Some(TypeExt::DecimalType(d)) = datatype_ext
@@ -681,26 +639,15 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
))
}
},
ConcreteDataType::Duration(unit) => match unit {
DurationType::Second(_) => Arc::new(DurationSecondVector::from_vec(
values.duration_second_values,
)),
DurationType::Millisecond(_) => Arc::new(DurationMillisecondVector::from_vec(
values.duration_millisecond_values,
)),
DurationType::Microsecond(_) => Arc::new(DurationMicrosecondVector::from_vec(
values.duration_microsecond_values,
)),
DurationType::Nanosecond(_) => Arc::new(DurationNanosecondVector::from_vec(
values.duration_nanosecond_values,
)),
},
ConcreteDataType::Decimal128(d) => Arc::new(Decimal128Vector::from_values(
values.decimal128_values.iter().map(|x| {
Decimal128::from_value_precision_scale(x.hi, x.lo, d.precision(), d.scale()).into()
}),
)),
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
ConcreteDataType::Null(_)
| ConcreteDataType::List(_)
| ConcreteDataType::Dictionary(_)
| ConcreteDataType::Duration(_) => {
unreachable!()
}
}
@@ -849,26 +796,6 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
))
})
.collect(),
ConcreteDataType::Duration(DurationType::Second(_)) => values
.duration_second_values
.into_iter()
.map(|v| Value::Duration(Duration::new_second(v)))
.collect(),
ConcreteDataType::Duration(DurationType::Millisecond(_)) => values
.duration_millisecond_values
.into_iter()
.map(|v| Value::Duration(Duration::new_millisecond(v)))
.collect(),
ConcreteDataType::Duration(DurationType::Microsecond(_)) => values
.duration_microsecond_values
.into_iter()
.map(|v| Value::Duration(Duration::new_microsecond(v)))
.collect(),
ConcreteDataType::Duration(DurationType::Nanosecond(_)) => values
.duration_nanosecond_values
.into_iter()
.map(|v| Value::Duration(Duration::new_nanosecond(v)))
.collect(),
ConcreteDataType::Decimal128(d) => values
.decimal128_values
.into_iter()
@@ -881,7 +808,10 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
))
})
.collect(),
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
ConcreteDataType::Null(_)
| ConcreteDataType::List(_)
| ConcreteDataType::Dictionary(_)
| ConcreteDataType::Duration(_) => {
unreachable!()
}
}
@@ -993,24 +923,10 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
)),
},
},
Value::Duration(v) => match v.unit() {
TimeUnit::Second => v1::Value {
value_data: Some(ValueData::DurationSecondValue(v.value())),
},
TimeUnit::Millisecond => v1::Value {
value_data: Some(ValueData::DurationMillisecondValue(v.value())),
},
TimeUnit::Microsecond => v1::Value {
value_data: Some(ValueData::DurationMicrosecondValue(v.value())),
},
TimeUnit::Nanosecond => v1::Value {
value_data: Some(ValueData::DurationNanosecondValue(v.value())),
},
},
Value::Decimal128(v) => v1::Value {
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
},
Value::List(_) => return None,
Value::List(_) | Value::Duration(_) => return None,
};
Some(proto_value)
@@ -1047,10 +963,6 @@ pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
ValueData::IntervalYearMonthValue(_) => ColumnDataType::IntervalYearMonth,
ValueData::IntervalDayTimeValue(_) => ColumnDataType::IntervalDayTime,
ValueData::IntervalMonthDayNanoValue(_) => ColumnDataType::IntervalMonthDayNano,
ValueData::DurationSecondValue(_) => ColumnDataType::DurationSecond,
ValueData::DurationMillisecondValue(_) => ColumnDataType::DurationMillisecond,
ValueData::DurationMicrosecondValue(_) => ColumnDataType::DurationMicrosecond,
ValueData::DurationNanosecondValue(_) => ColumnDataType::DurationNanosecond,
ValueData::Decimal128Value(_) => ColumnDataType::Decimal128,
};
Some(value_type)
@@ -1108,14 +1020,8 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
ValueData::IntervalMonthDayNanoValue(convert_i128_to_interval(v.to_i128()))
}
}),
Value::Duration(v) => Some(match v.unit() {
TimeUnit::Second => ValueData::DurationSecondValue(v.value()),
TimeUnit::Millisecond => ValueData::DurationMillisecondValue(v.value()),
TimeUnit::Microsecond => ValueData::DurationMicrosecondValue(v.value()),
TimeUnit::Nanosecond => ValueData::DurationNanosecondValue(v.value()),
}),
Value::Decimal128(v) => Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
Value::List(_) => unreachable!(),
Value::List(_) | Value::Duration(_) => unreachable!(),
},
}
}
@@ -1125,16 +1031,15 @@ mod tests {
use std::sync::Arc;
use datatypes::types::{
DurationMillisecondType, DurationSecondType, Int32Type, IntervalDayTimeType,
IntervalMonthDayNanoType, IntervalYearMonthType, TimeMillisecondType, TimeSecondType,
TimestampMillisecondType, TimestampSecondType, UInt32Type,
Int32Type, IntervalDayTimeType, IntervalMonthDayNanoType, IntervalYearMonthType,
TimeMillisecondType, TimeSecondType, TimestampMillisecondType, TimestampSecondType,
UInt32Type,
};
use datatypes::vectors::{
BooleanVector, DurationMicrosecondVector, DurationMillisecondVector,
DurationNanosecondVector, DurationSecondVector, IntervalDayTimeVector,
IntervalMonthDayNanoVector, IntervalYearMonthVector, TimeMicrosecondVector,
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, Vector,
BooleanVector, IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector,
TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector,
TimestampMicrosecondVector, TimestampMillisecondVector, TimestampNanosecondVector,
TimestampSecondVector, Vector,
};
use paste::paste;
@@ -1210,10 +1115,6 @@ mod tests {
let values = values.interval_month_day_nano_values;
assert_eq!(2, values.capacity());
let values = values_with_capacity(ColumnDataType::DurationMillisecond, 2);
let values = values.duration_millisecond_values;
assert_eq!(2, values.capacity());
let values = values_with_capacity(ColumnDataType::Decimal128, 2);
let values = values.decimal128_values;
assert_eq!(2, values.capacity());
@@ -1301,10 +1202,6 @@ mod tests {
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
ColumnDataTypeWrapper::interval_month_day_nano_datatype().into()
);
assert_eq!(
ConcreteDataType::duration_millisecond_datatype(),
ColumnDataTypeWrapper::duration_millisecond_datatype().into()
);
assert_eq!(
ConcreteDataType::decimal128_datatype(10, 2),
ColumnDataTypeWrapper::decimal128_datatype(10, 2).into()
@@ -1397,12 +1294,6 @@ mod tests {
.try_into()
.unwrap()
);
assert_eq!(
ColumnDataTypeWrapper::duration_millisecond_datatype(),
ConcreteDataType::duration_millisecond_datatype()
.try_into()
.unwrap()
);
assert_eq!(
ColumnDataTypeWrapper::decimal128_datatype(10, 2),
@@ -1556,48 +1447,6 @@ mod tests {
});
}
#[test]
fn test_column_put_duration_values() {
let mut column = Column {
column_name: "test".to_string(),
semantic_type: 0,
values: Some(Values {
..Default::default()
}),
null_mask: vec![],
datatype: 0,
..Default::default()
};
let vector = Arc::new(DurationNanosecondVector::from_vec(vec![1, 2, 3]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![1, 2, 3],
column.values.as_ref().unwrap().duration_nanosecond_values
);
let vector = Arc::new(DurationMicrosecondVector::from_vec(vec![7, 8, 9]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![7, 8, 9],
column.values.as_ref().unwrap().duration_microsecond_values
);
let vector = Arc::new(DurationMillisecondVector::from_vec(vec![4, 5, 6]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![4, 5, 6],
column.values.as_ref().unwrap().duration_millisecond_values
);
let vector = Arc::new(DurationSecondVector::from_vec(vec![10, 11, 12]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![10, 11, 12],
column.values.as_ref().unwrap().duration_second_values
);
}
#[test]
fn test_column_put_vector() {
use crate::v1::SemanticType;
@@ -1699,39 +1548,6 @@ mod tests {
assert_eq!(expect, actual);
}
#[test]
fn test_convert_duration_values() {
// second
let actual = pb_values_to_values(
&ConcreteDataType::Duration(DurationType::Second(DurationSecondType)),
Values {
duration_second_values: vec![1_i64, 2_i64, 3_i64],
..Default::default()
},
);
let expect = vec![
Value::Duration(Duration::new_second(1_i64)),
Value::Duration(Duration::new_second(2_i64)),
Value::Duration(Duration::new_second(3_i64)),
];
assert_eq!(expect, actual);
// millisecond
let actual = pb_values_to_values(
&ConcreteDataType::Duration(DurationType::Millisecond(DurationMillisecondType)),
Values {
duration_millisecond_values: vec![1_i64, 2_i64, 3_i64],
..Default::default()
},
);
let expect = vec![
Value::Duration(Duration::new_millisecond(1_i64)),
Value::Duration(Duration::new_millisecond(2_i64)),
Value::Duration(Duration::new_millisecond(3_i64)),
];
assert_eq!(expect, actual);
}
#[test]
fn test_convert_interval_values() {
// year_month

View File

@@ -21,6 +21,7 @@ pub mod prom_store {
}
}
pub mod region;
pub mod v1;
pub use greptime_proto;

42
src/api/src/region.rs Normal file
View File

@@ -0,0 +1,42 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use common_base::AffectedRows;
use greptime_proto::v1::region::RegionResponse as RegionResponseV1;
/// This result struct is derived from [RegionResponseV1]
#[derive(Debug)]
pub struct RegionResponse {
pub affected_rows: AffectedRows,
pub extension: HashMap<String, Vec<u8>>,
}
impl RegionResponse {
pub fn from_region_response(region_response: RegionResponseV1) -> Self {
Self {
affected_rows: region_response.affected_rows as _,
extension: region_response.extension,
}
}
/// Creates one response without extension
pub fn new(affected_rows: AffectedRows) -> Self {
Self {
affected_rows,
extension: Default::default(),
}
}
}

View File

@@ -14,12 +14,12 @@ workspace = true
[dependencies]
api.workspace = true
async-trait.workspace = true
common-base.workspace = true
common-error.workspace = true
common-macro.workspace = true
common-telemetry.workspace = true
digest = "0.10"
notify.workspace = true
secrecy = { version = "0.8", features = ["serde", "alloc"] }
sha1 = "0.10"
snafu.workspace = true
sql.workspace = true

View File

@@ -14,8 +14,8 @@
use std::sync::Arc;
use common_base::secrets::SecretString;
use digest::Digest;
use secrecy::SecretString;
use sha1::Sha1;
use snafu::{ensure, OptionExt};

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use secrecy::ExposeSecret;
use common_base::secrets::ExposeSecret;
use crate::error::{
AccessDeniedSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
@@ -45,9 +45,9 @@ impl Default for MockUserProvider {
impl MockUserProvider {
pub fn set_authorization_info(&mut self, info: DatabaseAuthInfo) {
self.catalog = info.catalog.to_owned();
self.schema = info.schema.to_owned();
self.username = info.username.to_owned();
info.catalog.clone_into(&mut self.catalog);
info.schema.clone_into(&mut self.schema);
info.username.clone_into(&mut self.username);
}
}

View File

@@ -21,7 +21,7 @@ use std::io;
use std::io::BufRead;
use std::path::Path;
use secrecy::ExposeSecret;
use common_base::secrets::ExposeSecret;
use snafu::{ensure, OptionExt, ResultExt};
use crate::common::{Identity, Password};

View File

@@ -17,6 +17,7 @@ arrow-schema.workspace = true
async-stream.workspace = true
async-trait = "0.1"
common-catalog.workspace = true
common-config.workspace = true
common-error.workspace = true
common-macro.workspace = true
common-meta.workspace = true
@@ -30,6 +31,7 @@ datafusion.workspace = true
datatypes.workspace = true
futures = "0.3"
futures-util.workspace = true
humantime.workspace = true
itertools.workspace = true
lazy_static.workspace = true
meta-client.workspace = true

View File

@@ -49,6 +49,12 @@ pub enum Error {
source: BoxedError,
},
#[snafu(display("Failed to list nodes in cluster: {source}"))]
ListNodes {
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to re-compile script due to internal error"))]
CompileScriptInternal {
location: Location,
@@ -216,7 +222,7 @@ pub enum Error {
},
#[snafu(display("Failed to perform metasrv operation"))]
MetaSrv {
Metasrv {
location: Location,
source: meta_client::error::Error,
},
@@ -294,6 +300,7 @@ impl ErrorExt for Error {
}
Error::ListCatalogs { source, .. }
| Error::ListNodes { source, .. }
| Error::ListSchemas { source, .. }
| Error::ListTables { source, .. } => source.status_code(),
@@ -304,7 +311,7 @@ impl ErrorExt for Error {
| Error::CreateTable { source, .. }
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
Error::MetaSrv { source, .. } => source.status_code(),
Error::Metasrv { source, .. } => source.status_code(),
Error::SystemCatalogTableScan { source, .. } => source.status_code(),
Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
mod cluster_info;
pub mod columns;
pub mod key_column_usage;
mod memory_table;
@@ -20,8 +21,10 @@ mod predicate;
mod region_peers;
mod runtime_metrics;
pub mod schemata;
mod table_constraints;
mod table_names;
pub mod tables;
pub(crate) mod utils;
use std::collections::HashMap;
use std::sync::{Arc, Weak};
@@ -46,12 +49,14 @@ pub use table_names::*;
use self::columns::InformationSchemaColumns;
use crate::error::Result;
use crate::information_schema::cluster_info::InformationSchemaClusterInfo;
use crate::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
use crate::information_schema::memory_table::{get_schema_columns, MemoryTable};
use crate::information_schema::partitions::InformationSchemaPartitions;
use crate::information_schema::region_peers::InformationSchemaRegionPeers;
use crate::information_schema::runtime_metrics::InformationSchemaMetrics;
use crate::information_schema::schemata::InformationSchemaSchemata;
use crate::information_schema::table_constraints::InformationSchemaTableConstraints;
use crate::information_schema::tables::InformationSchemaTables;
use crate::CatalogManager;
@@ -148,6 +153,7 @@ impl InformationSchemaProvider {
fn build_tables(&mut self) {
let mut tables = HashMap::new();
// SECURITY NOTE:
// Carefully consider the tables that may expose sensitive cluster configurations,
// authentication details, and other critical information.
// Only put these tables under `greptime` catalog to prevent info leak.
@@ -164,6 +170,10 @@ impl InformationSchemaProvider {
REGION_PEERS.to_string(),
self.build_table(REGION_PEERS).unwrap(),
);
tables.insert(
CLUSTER_INFO.to_string(),
self.build_table(CLUSTER_INFO).unwrap(),
);
}
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
@@ -173,6 +183,10 @@ impl InformationSchemaProvider {
KEY_COLUMN_USAGE.to_string(),
self.build_table(KEY_COLUMN_USAGE).unwrap(),
);
tables.insert(
TABLE_CONSTRAINTS.to_string(),
self.build_table(TABLE_CONSTRAINTS).unwrap(),
);
// Add memory tables
for name in MEMORY_TABLES.iter() {
@@ -241,6 +255,13 @@ impl InformationSchemaProvider {
self.catalog_name.clone(),
self.catalog_manager.clone(),
)) as _),
TABLE_CONSTRAINTS => Some(Arc::new(InformationSchemaTableConstraints::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
)) as _),
CLUSTER_INFO => Some(Arc::new(InformationSchemaClusterInfo::new(
self.catalog_manager.clone(),
)) as _),
_ => None,
}
}

View File

@@ -0,0 +1,317 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::{Arc, Weak};
use std::time::Duration;
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID;
use common_config::Mode;
use common_error::ext::BoxedError;
use common_meta::cluster::{ClusterInfo, NodeInfo, NodeStatus};
use common_meta::peer::Peer;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use common_telemetry::warn;
use common_time::timestamp::Timestamp;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::timestamp::TimestampMillisecond;
use datatypes::value::Value;
use datatypes::vectors::{
Int64VectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
};
use snafu::ResultExt;
use store_api::storage::{ScanRequest, TableId};
use super::CLUSTER_INFO;
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, ListNodesSnafu, Result};
use crate::information_schema::{utils, InformationTable, Predicates};
use crate::CatalogManager;
const PEER_ID: &str = "peer_id";
const PEER_TYPE: &str = "peer_type";
const PEER_ADDR: &str = "peer_addr";
const VERSION: &str = "version";
const GIT_COMMIT: &str = "git_commit";
const START_TIME: &str = "start_time";
const UPTIME: &str = "uptime";
const ACTIVE_TIME: &str = "active_time";
const INIT_CAPACITY: usize = 42;
/// The `CLUSTER_INFO` table provides information about the current topology information of the cluster.
///
/// - `peer_id`: the peer server id.
/// - `peer_type`: the peer type, such as `datanode`, `frontend`, `metasrv` etc.
/// - `peer_addr`: the peer gRPC address.
/// - `version`: the build package version of the peer.
/// - `git_commit`: the build git commit hash of the peer.
/// - `start_time`: the starting time of the peer.
/// - `uptime`: the uptime of the peer.
/// - `active_time`: the time since the last activity of the peer.
///
pub(super) struct InformationSchemaClusterInfo {
schema: SchemaRef,
catalog_manager: Weak<dyn CatalogManager>,
start_time_ms: u64,
}
impl InformationSchemaClusterInfo {
pub(super) fn new(catalog_manager: Weak<dyn CatalogManager>) -> Self {
Self {
schema: Self::schema(),
catalog_manager,
start_time_ms: common_time::util::current_time_millis() as u64,
}
}
pub(crate) fn schema() -> SchemaRef {
Arc::new(Schema::new(vec![
ColumnSchema::new(PEER_ID, ConcreteDataType::int64_datatype(), false),
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(VERSION, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(GIT_COMMIT, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(
START_TIME,
ConcreteDataType::timestamp_millisecond_datatype(),
true,
),
ColumnSchema::new(UPTIME, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(ACTIVE_TIME, ConcreteDataType::string_datatype(), true),
]))
}
fn builder(&self) -> InformationSchemaClusterInfoBuilder {
InformationSchemaClusterInfoBuilder::new(
self.schema.clone(),
self.catalog_manager.clone(),
self.start_time_ms,
)
}
}
impl InformationTable for InformationSchemaClusterInfo {
fn table_id(&self) -> TableId {
INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID
}
fn table_name(&self) -> &'static str {
CLUSTER_INFO
}
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_cluster_info(Some(request))
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
));
Ok(Box::pin(
RecordBatchStreamAdapter::try_new(stream)
.map_err(BoxedError::new)
.context(InternalSnafu)?,
))
}
}
struct InformationSchemaClusterInfoBuilder {
schema: SchemaRef,
start_time_ms: u64,
catalog_manager: Weak<dyn CatalogManager>,
peer_ids: Int64VectorBuilder,
peer_types: StringVectorBuilder,
peer_addrs: StringVectorBuilder,
versions: StringVectorBuilder,
git_commits: StringVectorBuilder,
start_times: TimestampMillisecondVectorBuilder,
uptimes: StringVectorBuilder,
active_times: StringVectorBuilder,
}
impl InformationSchemaClusterInfoBuilder {
fn new(
schema: SchemaRef,
catalog_manager: Weak<dyn CatalogManager>,
start_time_ms: u64,
) -> Self {
Self {
schema,
catalog_manager,
peer_ids: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
peer_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
peer_addrs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
versions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
git_commits: StringVectorBuilder::with_capacity(INIT_CAPACITY),
start_times: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
uptimes: StringVectorBuilder::with_capacity(INIT_CAPACITY),
active_times: StringVectorBuilder::with_capacity(INIT_CAPACITY),
start_time_ms,
}
}
/// Construct the `information_schema.cluster_info` virtual table
async fn make_cluster_info(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
let predicates = Predicates::from_scan_request(&request);
let mode = utils::running_mode(&self.catalog_manager)?.unwrap_or(Mode::Standalone);
match mode {
Mode::Standalone => {
let build_info = common_version::build_info();
self.add_node_info(
&predicates,
NodeInfo {
// For the standalone:
// - id always 0
// - empty string for peer_addr
peer: Peer {
id: 0,
addr: "".to_string(),
},
last_activity_ts: -1,
status: NodeStatus::Standalone,
version: build_info.version.to_string(),
git_commit: build_info.commit_short.to_string(),
// Use `self.start_time_ms` instead.
// It's not precise but enough.
start_time_ms: self.start_time_ms,
},
);
}
Mode::Distributed => {
if let Some(meta_client) = utils::meta_client(&self.catalog_manager)? {
let node_infos = meta_client
.list_nodes(None)
.await
.map_err(BoxedError::new)
.context(ListNodesSnafu)?;
for node_info in node_infos {
self.add_node_info(&predicates, node_info);
}
} else {
warn!("Could not find meta client in distributed mode.");
}
}
}
self.finish()
}
fn add_node_info(&mut self, predicates: &Predicates, node_info: NodeInfo) {
let peer_type = node_info.status.role_name();
let row = [
(PEER_ID, &Value::from(node_info.peer.id)),
(PEER_TYPE, &Value::from(peer_type)),
(PEER_ADDR, &Value::from(node_info.peer.addr.as_str())),
(VERSION, &Value::from(node_info.version.as_str())),
(GIT_COMMIT, &Value::from(node_info.git_commit.as_str())),
];
if !predicates.eval(&row) {
return;
}
if peer_type == "FRONTEND" || peer_type == "METASRV" {
// Always set peer_id to be -1 for frontends and metasrvs
self.peer_ids.push(Some(-1));
} else {
self.peer_ids.push(Some(node_info.peer.id as i64));
}
self.peer_types.push(Some(peer_type));
self.peer_addrs.push(Some(&node_info.peer.addr));
self.versions.push(Some(&node_info.version));
self.git_commits.push(Some(&node_info.git_commit));
if node_info.start_time_ms > 0 {
self.start_times
.push(Some(TimestampMillisecond(Timestamp::new_millisecond(
node_info.start_time_ms as i64,
))));
self.uptimes.push(Some(
Self::format_duration_since(node_info.start_time_ms).as_str(),
));
} else {
self.start_times.push(None);
self.uptimes.push(None);
}
if node_info.last_activity_ts > 0 {
self.active_times.push(Some(
Self::format_duration_since(node_info.last_activity_ts as u64).as_str(),
));
} else {
self.active_times.push(None);
}
}
fn format_duration_since(ts: u64) -> String {
let now = common_time::util::current_time_millis() as u64;
let duration_since = now - ts;
humantime::format_duration(Duration::from_millis(duration_since)).to_string()
}
fn finish(&mut self) -> Result<RecordBatch> {
let columns: Vec<VectorRef> = vec![
Arc::new(self.peer_ids.finish()),
Arc::new(self.peer_types.finish()),
Arc::new(self.peer_addrs.finish()),
Arc::new(self.versions.finish()),
Arc::new(self.git_commits.finish()),
Arc::new(self.start_times.finish()),
Arc::new(self.uptimes.finish()),
Arc::new(self.active_times.finish()),
];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
}
}
impl DfPartitionStream for InformationSchemaClusterInfo {
fn schema(&self) -> &ArrowSchemaRef {
self.schema.arrow_schema()
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_cluster_info(None)
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
))
}
}

View File

@@ -258,7 +258,7 @@ impl InformationSchemaColumnsBuilder {
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
while let Some(table) = stream.try_next().await? {
let keys = &table.table_info().meta.primary_key_indices;
@@ -274,8 +274,8 @@ impl InformationSchemaColumnsBuilder {
};
self.add_column(
idx,
&predicates,
idx,
&catalog_name,
&schema_name,
&table.table_info().name,
@@ -292,8 +292,8 @@ impl InformationSchemaColumnsBuilder {
#[allow(clippy::too_many_arguments)]
fn add_column(
&mut self,
index: usize,
predicates: &Predicates,
index: usize,
catalog_name: &str,
schema_name: &str,
table_name: &str,

View File

@@ -49,6 +49,11 @@ pub const COLUMN_NAME: &str = "column_name";
pub const ORDINAL_POSITION: &str = "ordinal_position";
const INIT_CAPACITY: usize = 42;
/// Primary key constraint name
pub(crate) const PRI_CONSTRAINT_NAME: &str = "PRIMARY";
/// Time index constraint name
pub(crate) const TIME_INDEX_CONSTRAINT_NAME: &str = "TIME INDEX";
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
pub(super) struct InformationSchemaKeyColumnUsage {
schema: SchemaRef,
@@ -232,7 +237,7 @@ impl InformationSchemaKeyColumnUsageBuilder {
self.add_key_column_usage(
&predicates,
&schema_name,
"TIME INDEX",
TIME_INDEX_CONSTRAINT_NAME,
&catalog_name,
&schema_name,
&table_name,
@@ -262,7 +267,7 @@ impl InformationSchemaKeyColumnUsageBuilder {
self.add_key_column_usage(
&predicates,
&schema_name,
"PRIMARY",
PRI_CONSTRAINT_NAME,
&catalog_name,
&schema_name,
&table_name,

View File

@@ -243,7 +243,6 @@ impl InformationSchemaPartitionsBuilder {
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let table_info_stream = catalog_manager
.tables(&catalog_name, &schema_name)
.await
.try_filter_map(|t| async move {
let table_info = t.table_info();
if table_info.table_type == TableType::Temporary {

View File

@@ -109,11 +109,7 @@ impl Predicate {
};
}
Predicate::Not(p) => {
let Some(b) = p.eval(row) else {
return None;
};
return Some(!b);
return Some(!p.eval(row)?);
}
}
@@ -125,13 +121,7 @@ impl Predicate {
fn from_expr(expr: DfExpr) -> Option<Predicate> {
match expr {
// NOT expr
DfExpr::Not(expr) => {
let Some(p) = Self::from_expr(*expr) else {
return None;
};
Some(Predicate::Not(Box::new(p)))
}
DfExpr::Not(expr) => Some(Predicate::Not(Box::new(Self::from_expr(*expr)?))),
// expr LIKE pattern
DfExpr::Like(Like {
negated,
@@ -178,25 +168,15 @@ impl Predicate {
}
// left AND right
(left, Operator::And, right) => {
let Some(left) = Self::from_expr(left) else {
return None;
};
let Some(right) = Self::from_expr(right) else {
return None;
};
let left = Self::from_expr(left)?;
let right = Self::from_expr(right)?;
Some(Predicate::And(Box::new(left), Box::new(right)))
}
// left OR right
(left, Operator::Or, right) => {
let Some(left) = Self::from_expr(left) else {
return None;
};
let Some(right) = Self::from_expr(right) else {
return None;
};
let left = Self::from_expr(left)?;
let right = Self::from_expr(right)?;
Some(Predicate::Or(Box::new(left), Box::new(right)))
}

View File

@@ -55,7 +55,7 @@ const INIT_CAPACITY: usize = 42;
///
/// - `region_id`: the region id
/// - `peer_id`: the region storage datanode peer id
/// - `peer_addr`: the region storage datanode peer address
/// - `peer_addr`: the region storage datanode gRPC peer address
/// - `is_leader`: whether the peer is the leader
/// - `status`: the region status, `ALIVE` or `DOWNGRADED`.
/// - `down_seconds`: the duration of being offline, in seconds.
@@ -179,7 +179,6 @@ impl InformationSchemaRegionPeersBuilder {
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let table_id_stream = catalog_manager
.tables(&catalog_name, &schema_name)
.await
.try_filter_map(|t| async move {
let table_info = t.table_info();
if table_info.table_type == TableType::Temporary {

View File

@@ -28,8 +28,8 @@ use datatypes::prelude::{ConcreteDataType, MutableVector};
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::{
ConstantVector, Float64VectorBuilder, StringVector, StringVectorBuilder,
TimestampMillisecondVector, VectorRef,
ConstantVector, Float64VectorBuilder, StringVectorBuilder, TimestampMillisecondVector,
VectorRef,
};
use itertools::Itertools;
use snafu::ResultExt;
@@ -45,8 +45,8 @@ pub(super) struct InformationSchemaMetrics {
const METRIC_NAME: &str = "metric_name";
const METRIC_VALUE: &str = "value";
const METRIC_LABELS: &str = "labels";
const NODE: &str = "node";
const NODE_TYPE: &str = "node_type";
const PEER_ADDR: &str = "peer_addr";
const PEER_TYPE: &str = "peer_type";
const TIMESTAMP: &str = "timestamp";
/// The `information_schema.runtime_metrics` virtual table.
@@ -63,8 +63,8 @@ impl InformationSchemaMetrics {
ColumnSchema::new(METRIC_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(METRIC_VALUE, ConcreteDataType::float64_datatype(), false),
ColumnSchema::new(METRIC_LABELS, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(NODE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(NODE_TYPE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(
TIMESTAMP,
ConcreteDataType::timestamp_millisecond_datatype(),
@@ -104,6 +104,7 @@ impl InformationTable for InformationSchemaMetrics {
.map_err(Into::into)
}),
));
Ok(Box::pin(
RecordBatchStreamAdapter::try_new(stream)
.map_err(BoxedError::new)
@@ -118,6 +119,8 @@ struct InformationSchemaMetricsBuilder {
metric_names: StringVectorBuilder,
metric_values: Float64VectorBuilder,
metric_labels: StringVectorBuilder,
peer_addrs: StringVectorBuilder,
peer_types: StringVectorBuilder,
}
impl InformationSchemaMetricsBuilder {
@@ -127,13 +130,24 @@ impl InformationSchemaMetricsBuilder {
metric_names: StringVectorBuilder::with_capacity(42),
metric_values: Float64VectorBuilder::with_capacity(42),
metric_labels: StringVectorBuilder::with_capacity(42),
peer_addrs: StringVectorBuilder::with_capacity(42),
peer_types: StringVectorBuilder::with_capacity(42),
}
}
fn add_metric(&mut self, metric_name: &str, labels: String, metric_value: f64) {
fn add_metric(
&mut self,
metric_name: &str,
labels: String,
metric_value: f64,
peer: Option<&str>,
peer_type: &str,
) {
self.metric_names.push(Some(metric_name));
self.metric_values.push(Some(metric_value));
self.metric_labels.push(Some(&labels));
self.peer_addrs.push(peer);
self.peer_types.push(Some(peer_type));
}
async fn make_metrics(&mut self, _request: Option<ScanRequest>) -> Result<RecordBatch> {
@@ -170,18 +184,19 @@ impl InformationSchemaMetricsBuilder {
.join(", "),
// Safety: always has a sample
ts.samples[0].value,
// The peer column is always `None` for standalone
None,
"STANDALONE",
);
}
// FIXME(dennis): fetching other peers metrics
self.finish()
}
fn finish(&mut self) -> Result<RecordBatch> {
let rows_num = self.metric_names.len();
let unknowns = Arc::new(ConstantVector::new(
Arc::new(StringVector::from(vec!["unknown"])),
rows_num,
));
let timestamps = Arc::new(ConstantVector::new(
Arc::new(TimestampMillisecondVector::from_slice([
current_time_millis(),
@@ -193,9 +208,8 @@ impl InformationSchemaMetricsBuilder {
Arc::new(self.metric_names.finish()),
Arc::new(self.metric_values.finish()),
Arc::new(self.metric_labels.finish()),
// TODO(dennis): supports node and node_type for cluster
unknowns.clone(),
unknowns,
Arc::new(self.peer_addrs.finish()),
Arc::new(self.peer_types.finish()),
timestamps,
];
@@ -243,8 +257,8 @@ mod tests {
assert!(result_literal.contains(METRIC_NAME));
assert!(result_literal.contains(METRIC_VALUE));
assert!(result_literal.contains(METRIC_LABELS));
assert!(result_literal.contains(NODE));
assert!(result_literal.contains(NODE_TYPE));
assert!(result_literal.contains(PEER_ADDR));
assert!(result_literal.contains(PEER_TYPE));
assert!(result_literal.contains(TIMESTAMP));
}
}

View File

@@ -0,0 +1,286 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID;
use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
use datatypes::prelude::{ConcreteDataType, MutableVector};
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::value::Value;
use datatypes::vectors::{ConstantVector, StringVector, StringVectorBuilder, VectorRef};
use futures::TryStreamExt;
use snafu::{OptionExt, ResultExt};
use store_api::storage::{ScanRequest, TableId};
use super::{InformationTable, TABLE_CONSTRAINTS};
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
use crate::information_schema::key_column_usage::{
PRI_CONSTRAINT_NAME, TIME_INDEX_CONSTRAINT_NAME,
};
use crate::information_schema::Predicates;
use crate::CatalogManager;
/// The `TABLE_CONSTRAINTS` table describes which tables have constraints.
pub(super) struct InformationSchemaTableConstraints {
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
}
const CONSTRAINT_CATALOG: &str = "constraint_catalog";
const CONSTRAINT_SCHEMA: &str = "constraint_schema";
const CONSTRAINT_NAME: &str = "constraint_name";
const TABLE_SCHEMA: &str = "table_schema";
const TABLE_NAME: &str = "table_name";
const CONSTRAINT_TYPE: &str = "constraint_type";
const ENFORCED: &str = "enforced";
const INIT_CAPACITY: usize = 42;
const TIME_INDEX_CONSTRAINT_TYPE: &str = "TIME INDEX";
const PRI_KEY_CONSTRAINT_TYPE: &str = "PRIMARY KEY";
impl InformationSchemaTableConstraints {
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
Self {
schema: Self::schema(),
catalog_name,
catalog_manager,
}
}
fn schema() -> SchemaRef {
Arc::new(Schema::new(vec![
ColumnSchema::new(
CONSTRAINT_CATALOG,
ConcreteDataType::string_datatype(),
false,
),
ColumnSchema::new(
CONSTRAINT_SCHEMA,
ConcreteDataType::string_datatype(),
false,
),
ColumnSchema::new(CONSTRAINT_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(CONSTRAINT_TYPE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(ENFORCED, ConcreteDataType::string_datatype(), false),
]))
}
fn builder(&self) -> InformationSchemaTableConstraintsBuilder {
InformationSchemaTableConstraintsBuilder::new(
self.schema.clone(),
self.catalog_name.clone(),
self.catalog_manager.clone(),
)
}
}
impl InformationTable for InformationSchemaTableConstraints {
fn table_id(&self) -> TableId {
INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID
}
fn table_name(&self) -> &'static str {
TABLE_CONSTRAINTS
}
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_table_constraints(Some(request))
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
));
Ok(Box::pin(
RecordBatchStreamAdapter::try_new(stream)
.map_err(BoxedError::new)
.context(InternalSnafu)?,
))
}
}
struct InformationSchemaTableConstraintsBuilder {
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
constraint_schemas: StringVectorBuilder,
constraint_names: StringVectorBuilder,
table_schemas: StringVectorBuilder,
table_names: StringVectorBuilder,
constraint_types: StringVectorBuilder,
}
impl InformationSchemaTableConstraintsBuilder {
fn new(
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
) -> Self {
Self {
schema,
catalog_name,
catalog_manager,
constraint_schemas: StringVectorBuilder::with_capacity(INIT_CAPACITY),
constraint_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
table_schemas: StringVectorBuilder::with_capacity(INIT_CAPACITY),
table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
constraint_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
}
}
/// Construct the `information_schema.table_constraints` virtual table
async fn make_table_constraints(
&mut self,
request: Option<ScanRequest>,
) -> Result<RecordBatch> {
let catalog_name = self.catalog_name.clone();
let catalog_manager = self
.catalog_manager
.upgrade()
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
while let Some(table) = stream.try_next().await? {
let keys = &table.table_info().meta.primary_key_indices;
let schema = table.schema();
if schema.timestamp_index().is_some() {
self.add_table_constraint(
&predicates,
&schema_name,
TIME_INDEX_CONSTRAINT_NAME,
&schema_name,
&table.table_info().name,
TIME_INDEX_CONSTRAINT_TYPE,
);
}
if !keys.is_empty() {
self.add_table_constraint(
&predicates,
&schema_name,
PRI_CONSTRAINT_NAME,
&schema_name,
&table.table_info().name,
PRI_KEY_CONSTRAINT_TYPE,
);
}
}
}
self.finish()
}
fn add_table_constraint(
&mut self,
predicates: &Predicates,
constraint_schema: &str,
constraint_name: &str,
table_schema: &str,
table_name: &str,
constraint_type: &str,
) {
let row = [
(CONSTRAINT_SCHEMA, &Value::from(constraint_schema)),
(CONSTRAINT_NAME, &Value::from(constraint_name)),
(TABLE_SCHEMA, &Value::from(table_schema)),
(TABLE_NAME, &Value::from(table_name)),
(CONSTRAINT_TYPE, &Value::from(constraint_type)),
];
if !predicates.eval(&row) {
return;
}
self.constraint_schemas.push(Some(constraint_schema));
self.constraint_names.push(Some(constraint_name));
self.table_schemas.push(Some(table_schema));
self.table_names.push(Some(table_name));
self.constraint_types.push(Some(constraint_type));
}
fn finish(&mut self) -> Result<RecordBatch> {
let rows_num = self.constraint_names.len();
let constraint_catalogs = Arc::new(ConstantVector::new(
Arc::new(StringVector::from(vec!["def"])),
rows_num,
));
let enforceds = Arc::new(ConstantVector::new(
Arc::new(StringVector::from(vec!["YES"])),
rows_num,
));
let columns: Vec<VectorRef> = vec![
constraint_catalogs,
Arc::new(self.constraint_schemas.finish()),
Arc::new(self.constraint_names.finish()),
Arc::new(self.table_schemas.finish()),
Arc::new(self.table_names.finish()),
Arc::new(self.constraint_types.finish()),
enforceds,
];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
}
}
impl DfPartitionStream for InformationSchemaTableConstraints {
fn schema(&self) -> &ArrowSchemaRef {
self.schema.arrow_schema()
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_table_constraints(None)
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
))
}
}

View File

@@ -40,4 +40,6 @@ pub const GLOBAL_STATUS: &str = "global_status";
pub const SESSION_STATUS: &str = "session_status";
pub const RUNTIME_METRICS: &str = "runtime_metrics";
pub const PARTITIONS: &str = "partitions";
pub const REGION_PEERS: &str = "greptime_region_peers";
pub const REGION_PEERS: &str = "region_peers";
pub const TABLE_CONSTRAINTS: &str = "table_constraints";
pub const CLUSTER_INFO: &str = "cluster_info";

View File

@@ -161,7 +161,7 @@ impl InformationSchemaTablesBuilder {
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
while let Some(table) = stream.try_next().await? {
let table_info = table.table_info();

View File

@@ -0,0 +1,53 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::{Arc, Weak};
use common_config::Mode;
use meta_client::client::MetaClient;
use snafu::OptionExt;
use crate::error::{Result, UpgradeWeakCatalogManagerRefSnafu};
use crate::kvbackend::KvBackendCatalogManager;
use crate::CatalogManager;
/// Try to get the server running mode from `[CatalogManager]` weak reference.
pub fn running_mode(catalog_manager: &Weak<dyn CatalogManager>) -> Result<Option<Mode>> {
let catalog_manager = catalog_manager
.upgrade()
.context(UpgradeWeakCatalogManagerRefSnafu)?;
Ok(catalog_manager
.as_any()
.downcast_ref::<KvBackendCatalogManager>()
.map(|manager| manager.running_mode())
.copied())
}
/// Try to get the `[MetaClient]` from `[CatalogManager]` weak reference.
pub fn meta_client(catalog_manager: &Weak<dyn CatalogManager>) -> Result<Option<Arc<MetaClient>>> {
let catalog_manager = catalog_manager
.upgrade()
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let meta_client = match catalog_manager
.as_any()
.downcast_ref::<KvBackendCatalogManager>()
{
None => None,
Some(manager) => manager.meta_client(),
};
Ok(meta_client)
}

View File

@@ -17,7 +17,6 @@ use std::fmt::Debug;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Duration;
use std::usize;
use common_error::ext::BoxedError;
use common_meta::cache_invalidator::KvCacheInvalidator;
@@ -506,32 +505,32 @@ mod tests {
}
async fn range(&self, _req: RangeRequest) -> Result<RangeResponse, Self::Error> {
todo!()
unimplemented!()
}
async fn batch_put(&self, _req: BatchPutRequest) -> Result<BatchPutResponse, Self::Error> {
todo!()
unimplemented!()
}
async fn compare_and_put(
&self,
_req: CompareAndPutRequest,
) -> Result<CompareAndPutResponse, Self::Error> {
todo!()
unimplemented!()
}
async fn delete_range(
&self,
_req: DeleteRangeRequest,
) -> Result<DeleteRangeResponse, Self::Error> {
todo!()
unimplemented!()
}
async fn batch_delete(
&self,
_req: BatchDeleteRequest,
) -> Result<BatchDeleteResponse, Self::Error> {
todo!()
unimplemented!()
}
}

View File

@@ -22,6 +22,7 @@ use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
};
use common_catalog::format_full_table_name;
use common_config::Mode;
use common_error::ext::BoxedError;
use common_meta::cache_invalidator::{CacheInvalidator, Context, MultiCacheInvalidator};
use common_meta::instruction::CacheIdent;
@@ -33,6 +34,7 @@ use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef;
use futures_util::stream::BoxStream;
use futures_util::{StreamExt, TryStreamExt};
use meta_client::client::MetaClient;
use moka::future::{Cache as AsyncCache, CacheBuilder};
use moka::sync::Cache;
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
@@ -56,6 +58,8 @@ use crate::CatalogManager;
/// comes from `SystemCatalog`, which is static and read-only.
#[derive(Clone)]
pub struct KvBackendCatalogManager {
mode: Mode,
meta_client: Option<Arc<MetaClient>>,
partition_manager: PartitionRuleManagerRef,
table_metadata_manager: TableMetadataManagerRef,
/// A sub-CatalogManager that handles system tables
@@ -101,6 +105,8 @@ const TABLE_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
impl KvBackendCatalogManager {
pub async fn new(
mode: Mode,
meta_client: Option<Arc<MetaClient>>,
backend: KvBackendRef,
multi_cache_invalidator: Arc<MultiCacheInvalidator>,
) -> Arc<Self> {
@@ -113,6 +119,8 @@ impl KvBackendCatalogManager {
.await;
Arc::new_cyclic(|me| Self {
mode,
meta_client,
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
system_catalog: SystemCatalog {
@@ -127,6 +135,16 @@ impl KvBackendCatalogManager {
})
}
/// Returns the server running mode.
pub fn running_mode(&self) -> &Mode {
&self.mode
}
/// Returns the `[MetaClient]`.
pub fn meta_client(&self) -> Option<Arc<MetaClient>> {
self.meta_client.clone()
}
pub fn partition_manager(&self) -> PartitionRuleManagerRef {
self.partition_manager.clone()
}
@@ -283,11 +301,7 @@ impl CatalogManager for KvBackendCatalogManager {
})
}
async fn tables<'a>(
&'a self,
catalog: &'a str,
schema: &'a str,
) -> BoxStream<'a, Result<TableRef>> {
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>> {
let sys_tables = try_stream!({
// System tables
let sys_table_names = self.system_catalog.table_names(schema);

View File

@@ -59,11 +59,7 @@ pub trait CatalogManager: Send + Sync {
) -> Result<Option<TableRef>>;
/// Returns all tables with a stream by catalog and schema.
async fn tables<'a>(
&'a self,
catalog: &'a str,
schema: &'a str,
) -> BoxStream<'a, Result<TableRef>>;
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>>;
}
pub type CatalogManagerRef = Arc<dyn CatalogManager>;

View File

@@ -117,11 +117,7 @@ impl CatalogManager for MemoryCatalogManager {
Ok(result)
}
async fn tables<'a>(
&'a self,
catalog: &'a str,
schema: &'a str,
) -> BoxStream<'a, Result<TableRef>> {
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>> {
let catalogs = self.catalogs.read().unwrap();
let Some(schemas) = catalogs.get(catalog) else {
@@ -141,11 +137,11 @@ impl CatalogManager for MemoryCatalogManager {
let tables = tables.values().cloned().collect::<Vec<_>>();
return Box::pin(try_stream!({
Box::pin(try_stream!({
for table in tables {
yield table;
}
}));
}))
}
}
@@ -368,9 +364,7 @@ mod tests {
.await
.unwrap()
.unwrap();
let stream = catalog_list
.tables(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
.await;
let stream = catalog_list.tables(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME);
let tables = stream.try_collect::<Vec<_>>().await.unwrap();
assert_eq!(tables.len(), 1);
assert_eq!(

View File

@@ -49,10 +49,7 @@ impl DfTableSourceProvider {
}
}
pub fn resolve_table_ref<'a>(
&'a self,
table_ref: TableReference<'a>,
) -> Result<ResolvedTableReference<'a>> {
pub fn resolve_table_ref(&self, table_ref: TableReference) -> Result<ResolvedTableReference> {
if self.disallow_cross_catalog_query {
match &table_ref {
TableReference::Bare { .. } => (),
@@ -76,7 +73,7 @@ impl DfTableSourceProvider {
pub async fn resolve_table(
&mut self,
table_ref: TableReference<'_>,
table_ref: TableReference,
) -> Result<Arc<dyn TableSource>> {
let table_ref = self.resolve_table_ref(table_ref)?;
@@ -106,8 +103,6 @@ impl DfTableSourceProvider {
#[cfg(test)]
mod tests {
use std::borrow::Cow;
use session::context::QueryContext;
use super::*;
@@ -120,68 +115,37 @@ mod tests {
let table_provider =
DfTableSourceProvider::new(MemoryCatalogManager::with_default_setup(), true, query_ctx);
let table_ref = TableReference::Bare {
table: Cow::Borrowed("table_name"),
};
let table_ref = TableReference::bare("table_name");
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let table_ref = TableReference::Partial {
schema: Cow::Borrowed("public"),
table: Cow::Borrowed("table_name"),
};
let table_ref = TableReference::partial("public", "table_name");
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let table_ref = TableReference::Partial {
schema: Cow::Borrowed("wrong_schema"),
table: Cow::Borrowed("table_name"),
};
let table_ref = TableReference::partial("wrong_schema", "table_name");
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let table_ref = TableReference::Full {
catalog: Cow::Borrowed("greptime"),
schema: Cow::Borrowed("public"),
table: Cow::Borrowed("table_name"),
};
let table_ref = TableReference::full("greptime", "public", "table_name");
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let table_ref = TableReference::Full {
catalog: Cow::Borrowed("wrong_catalog"),
schema: Cow::Borrowed("public"),
table: Cow::Borrowed("table_name"),
};
let table_ref = TableReference::full("wrong_catalog", "public", "table_name");
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_err());
let table_ref = TableReference::Partial {
schema: Cow::Borrowed("information_schema"),
table: Cow::Borrowed("columns"),
};
let table_ref = TableReference::partial("information_schema", "columns");
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let table_ref = TableReference::Full {
catalog: Cow::Borrowed("greptime"),
schema: Cow::Borrowed("information_schema"),
table: Cow::Borrowed("columns"),
};
let table_ref = TableReference::full("greptime", "information_schema", "columns");
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
let table_ref = TableReference::Full {
catalog: Cow::Borrowed("dummy"),
schema: Cow::Borrowed("information_schema"),
table: Cow::Borrowed("columns"),
};
let table_ref = TableReference::full("dummy", "information_schema", "columns");
assert!(table_provider.resolve_table_ref(table_ref).is_err());
let table_ref = TableReference::Full {
catalog: Cow::Borrowed("greptime"),
schema: Cow::Borrowed("greptime_private"),
table: Cow::Borrowed("columns"),
};
let table_ref = TableReference::full("greptime", "greptime_private", "columns");
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
}
}

View File

@@ -1,115 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::{ColumnDataType, ColumnDef, CreateTableExpr, SemanticType, TableId};
use client::{Client, Database};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
use prost::Message;
use substrait_proto::proto::plan_rel::RelType as PlanRelType;
use substrait_proto::proto::read_rel::{NamedTable, ReadType};
use substrait_proto::proto::rel::RelType;
use substrait_proto::proto::{PlanRel, ReadRel, Rel};
use tracing::{event, Level};
fn main() {
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
.unwrap();
run();
}
#[tokio::main]
async fn run() {
let client = Client::with_urls(vec!["127.0.0.1:3001"]);
let create_table_expr = CreateTableExpr {
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
table_name: "test_logical_dist_exec".to_string(),
desc: String::default(),
column_defs: vec![
ColumnDef {
name: "timestamp".to_string(),
data_type: ColumnDataType::TimestampMillisecond as i32,
is_nullable: false,
default_constraint: vec![],
semantic_type: SemanticType::Timestamp as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "key".to_string(),
data_type: ColumnDataType::Uint64 as i32,
is_nullable: false,
default_constraint: vec![],
semantic_type: SemanticType::Tag as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "value".to_string(),
data_type: ColumnDataType::Uint64 as i32,
is_nullable: false,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
],
time_index: "timestamp".to_string(),
primary_keys: vec!["key".to_string()],
create_if_not_exists: false,
table_options: Default::default(),
table_id: Some(TableId { id: 1024 }),
engine: MITO_ENGINE.to_string(),
};
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
let result = db.create(create_table_expr).await.unwrap();
event!(Level::INFO, "create table result: {:#?}", result);
let logical = mock_logical_plan();
event!(Level::INFO, "plan size: {:#?}", logical.len());
let result = db.logical_plan(logical).await.unwrap();
event!(Level::INFO, "result: {:#?}", result);
}
fn mock_logical_plan() -> Vec<u8> {
let catalog_name = "greptime".to_string();
let schema_name = "public".to_string();
let table_name = "test_logical_dist_exec".to_string();
let named_table = NamedTable {
names: vec![catalog_name, schema_name, table_name],
advanced_extension: None,
};
let read_type = ReadType::NamedTable(named_table);
let read_rel = ReadRel {
read_type: Some(read_type),
..Default::default()
};
let mut buf = vec![];
let rel = Rel {
rel_type: Some(RelType::Read(Box::new(read_rel))),
};
let plan_rel = PlanRel {
rel_type: Some(PlanRelType::Rel(rel)),
};
plan_rel.encode(&mut buf).unwrap();
buf
}

View File

@@ -1,181 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::*;
use client::{Client, Database, DEFAULT_SCHEMA_NAME};
use derive_new::new;
use tracing::{error, info};
fn main() {
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
.unwrap();
run();
}
#[tokio::main]
async fn run() {
let greptimedb_endpoint =
std::env::var("GREPTIMEDB_ENDPOINT").unwrap_or_else(|_| "localhost:4001".to_owned());
let greptimedb_dbname =
std::env::var("GREPTIMEDB_DBNAME").unwrap_or_else(|_| DEFAULT_SCHEMA_NAME.to_owned());
let grpc_client = Client::with_urls(vec![&greptimedb_endpoint]);
let client = Database::new_with_dbname(greptimedb_dbname, grpc_client);
let stream_inserter = client.streaming_inserter().unwrap();
if let Err(e) = stream_inserter
.insert(vec![to_insert_request(weather_records_1())])
.await
{
error!("Error: {e:?}");
}
if let Err(e) = stream_inserter
.insert(vec![to_insert_request(weather_records_2())])
.await
{
error!("Error: {e:?}");
}
let result = stream_inserter.finish().await;
match result {
Ok(rows) => {
info!("Rows written: {rows}");
}
Err(e) => {
error!("Error: {e:?}");
}
};
}
#[derive(new)]
struct WeatherRecord {
timestamp_millis: i64,
collector: String,
temperature: f32,
humidity: i32,
}
fn weather_records_1() -> Vec<WeatherRecord> {
vec![
WeatherRecord::new(1686109527000, "c1".to_owned(), 26.4, 15),
WeatherRecord::new(1686023127000, "c1".to_owned(), 29.3, 20),
WeatherRecord::new(1685936727000, "c1".to_owned(), 31.8, 13),
WeatherRecord::new(1686109527000, "c2".to_owned(), 20.4, 67),
WeatherRecord::new(1686023127000, "c2".to_owned(), 18.0, 74),
WeatherRecord::new(1685936727000, "c2".to_owned(), 19.2, 81),
]
}
fn weather_records_2() -> Vec<WeatherRecord> {
vec![
WeatherRecord::new(1686109527001, "c3".to_owned(), 26.4, 15),
WeatherRecord::new(1686023127002, "c3".to_owned(), 29.3, 20),
WeatherRecord::new(1685936727003, "c3".to_owned(), 31.8, 13),
WeatherRecord::new(1686109527004, "c4".to_owned(), 20.4, 67),
WeatherRecord::new(1686023127005, "c4".to_owned(), 18.0, 74),
WeatherRecord::new(1685936727006, "c4".to_owned(), 19.2, 81),
]
}
/// This function generates some random data and bundle them into a
/// `InsertRequest`.
///
/// Data structure:
///
/// - `ts`: a timestamp column
/// - `collector`: a tag column
/// - `temperature`: a value field of f32
/// - `humidity`: a value field of i32
///
fn to_insert_request(records: Vec<WeatherRecord>) -> InsertRequest {
// convert records into columns
let rows = records.len();
// transpose records into columns
let (timestamp_millis, collectors, temp, humidity) = records.into_iter().fold(
(
Vec::with_capacity(rows),
Vec::with_capacity(rows),
Vec::with_capacity(rows),
Vec::with_capacity(rows),
),
|mut acc, rec| {
acc.0.push(rec.timestamp_millis);
acc.1.push(rec.collector);
acc.2.push(rec.temperature);
acc.3.push(rec.humidity);
acc
},
);
let columns = vec![
// timestamp column: `ts`
Column {
column_name: "ts".to_owned(),
values: Some(column::Values {
timestamp_millisecond_values: timestamp_millis,
..Default::default()
}),
semantic_type: SemanticType::Timestamp as i32,
datatype: ColumnDataType::TimestampMillisecond as i32,
..Default::default()
},
// tag column: collectors
Column {
column_name: "collector".to_owned(),
values: Some(column::Values {
string_values: collectors.into_iter().collect(),
..Default::default()
}),
semantic_type: SemanticType::Tag as i32,
datatype: ColumnDataType::String as i32,
..Default::default()
},
// field column: temperature
Column {
column_name: "temperature".to_owned(),
values: Some(column::Values {
f32_values: temp,
..Default::default()
}),
semantic_type: SemanticType::Field as i32,
datatype: ColumnDataType::Float32 as i32,
..Default::default()
},
// field column: humidity
Column {
column_name: "humidity".to_owned(),
values: Some(column::Values {
i32_values: humidity,
..Default::default()
}),
semantic_type: SemanticType::Field as i32,
datatype: ColumnDataType::Int32 as i32,
..Default::default()
},
];
InsertRequest {
table_name: "weather_demo".to_owned(),
columns,
row_count: rows as u32,
}
}

View File

@@ -14,7 +14,6 @@
use std::sync::Arc;
use api::v1::greptime_database_client::GreptimeDatabaseClient;
use api::v1::health_check_client::HealthCheckClient;
use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
use api::v1::region::region_client::RegionClient as PbRegionClient;
@@ -28,21 +27,17 @@ use tonic::transport::Channel;
use crate::load_balance::{LoadBalance, Loadbalancer};
use crate::{error, Result};
pub(crate) struct DatabaseClient {
pub(crate) inner: GreptimeDatabaseClient<Channel>,
}
pub(crate) struct FlightClient {
pub struct FlightClient {
addr: String,
client: FlightServiceClient<Channel>,
}
impl FlightClient {
pub(crate) fn addr(&self) -> &str {
pub fn addr(&self) -> &str {
&self.addr
}
pub(crate) fn mut_inner(&mut self) -> &mut FlightServiceClient<Channel> {
pub fn mut_inner(&mut self) -> &mut FlightServiceClient<Channel> {
&mut self.client
}
}
@@ -138,7 +133,7 @@ impl Client {
Ok((addr, channel))
}
fn max_grpc_recv_message_size(&self) -> usize {
pub fn max_grpc_recv_message_size(&self) -> usize {
self.inner
.channel_manager
.config()
@@ -146,7 +141,7 @@ impl Client {
.as_bytes() as usize
}
fn max_grpc_send_message_size(&self) -> usize {
pub fn max_grpc_send_message_size(&self) -> usize {
self.inner
.channel_manager
.config()
@@ -154,7 +149,7 @@ impl Client {
.as_bytes() as usize
}
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
pub fn make_flight_client(&self) -> Result<FlightClient> {
let (addr, channel) = self.find_channel()?;
Ok(FlightClient {
addr,
@@ -164,15 +159,6 @@ impl Client {
})
}
pub(crate) fn make_database_client(&self) -> Result<DatabaseClient> {
let (_, channel) = self.find_channel()?;
Ok(DatabaseClient {
inner: GreptimeDatabaseClient::new(channel)
.max_decoding_message_size(self.max_grpc_recv_message_size())
.max_encoding_message_size(self.max_grpc_send_message_size()),
})
}
pub(crate) fn raw_region_client(&self) -> Result<PbRegionClient<Channel>> {
let (_, channel) = self.find_channel()?;
Ok(PbRegionClient::new(channel)

View File

@@ -17,7 +17,7 @@ use std::sync::Arc;
use std::time::Duration;
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
use common_meta::datanode_manager::{Datanode, DatanodeManager};
use common_meta::node_manager::{DatanodeRef, FlownodeRef, NodeManager};
use common_meta::peer::Peer;
use moka::future::{Cache, CacheBuilder};
@@ -44,12 +44,17 @@ impl Debug for DatanodeClients {
}
#[async_trait::async_trait]
impl DatanodeManager for DatanodeClients {
async fn datanode(&self, datanode: &Peer) -> Arc<dyn Datanode> {
impl NodeManager for DatanodeClients {
async fn datanode(&self, datanode: &Peer) -> DatanodeRef {
let client = self.get_client(datanode).await;
Arc::new(RegionRequester::new(client))
}
async fn flownode(&self, _node: &Peer) -> FlownodeRef {
// TODO(weny): Support it.
unimplemented!()
}
}
impl DatanodeClients {

View File

@@ -14,12 +14,10 @@
mod client;
pub mod client_manager;
mod database;
pub mod error;
pub mod load_balance;
mod metrics;
pub mod region;
mod stream_insert;
pub use api;
use api::v1::greptime_response::Response;
@@ -31,9 +29,7 @@ pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
use snafu::OptionExt;
pub use self::client::Client;
pub use self::database::Database;
pub use self::error::{Error, Result};
pub use self::stream_insert::StreamInserter;
use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};
pub fn from_grpc_response(response: GreptimeResponse) -> Result<u32> {

View File

@@ -14,6 +14,7 @@
use std::sync::Arc;
use api::region::RegionResponse;
use api::v1::region::{QueryRequest, RegionRequest};
use api::v1::ResponseHeader;
use arc_swap::ArcSwapOption;
@@ -23,8 +24,8 @@ use async_trait::async_trait;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_grpc::flight::{FlightDecoder, FlightMessage};
use common_meta::datanode_manager::{Datanode, HandleResponse};
use common_meta::error::{self as meta_error, Result as MetaResult};
use common_meta::node_manager::Datanode;
use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
use common_telemetry::error;
@@ -46,7 +47,7 @@ pub struct RegionRequester {
#[async_trait]
impl Datanode for RegionRequester {
async fn handle(&self, request: RegionRequest) -> MetaResult<HandleResponse> {
async fn handle(&self, request: RegionRequest) -> MetaResult<RegionResponse> {
self.handle_inner(request).await.map_err(|err| {
if err.should_retry() {
meta_error::Error::RetryLater {
@@ -165,7 +166,7 @@ impl RegionRequester {
Ok(Box::pin(record_batch_stream))
}
async fn handle_inner(&self, request: RegionRequest) -> Result<HandleResponse> {
async fn handle_inner(&self, request: RegionRequest) -> Result<RegionResponse> {
let request_type = request
.body
.as_ref()
@@ -194,10 +195,10 @@ impl RegionRequester {
check_response_header(&response.header)?;
Ok(HandleResponse::from_region_response(response))
Ok(RegionResponse::from_region_response(response))
}
pub async fn handle(&self, request: RegionRequest) -> Result<HandleResponse> {
pub async fn handle(&self, request: RegionRequest) -> Result<RegionResponse> {
self.handle_inner(request).await
}
}

View File

@@ -1,118 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::greptime_database_client::GreptimeDatabaseClient;
use api::v1::greptime_request::Request;
use api::v1::{
AuthHeader, GreptimeRequest, GreptimeResponse, InsertRequest, InsertRequests, RequestHeader,
RowInsertRequest, RowInsertRequests,
};
use tokio::sync::mpsc;
use tokio::task::JoinHandle;
use tokio_stream::wrappers::ReceiverStream;
use tonic::transport::Channel;
use tonic::{Response, Status};
use crate::error::{self, Result};
use crate::from_grpc_response;
/// A structure that provides some methods for streaming data insert.
///
/// [`StreamInserter`] cannot be constructed via the `StreamInserter::new` method.
/// You can use the following way to obtain [`StreamInserter`].
///
/// ```ignore
/// let grpc_client = Client::with_urls(vec!["127.0.0.1:4002"]);
/// let client = Database::new_with_dbname("db_name", grpc_client);
/// let stream_inserter = client.streaming_inserter().unwrap();
/// ```
///
/// If you want to see a concrete usage example, please see
/// [stream_inserter.rs](https://github.com/GreptimeTeam/greptimedb/blob/main/src/client/examples/stream_ingest.rs).
pub struct StreamInserter {
sender: mpsc::Sender<GreptimeRequest>,
auth_header: Option<AuthHeader>,
dbname: String,
join: JoinHandle<std::result::Result<Response<GreptimeResponse>, Status>>,
}
impl StreamInserter {
pub(crate) fn new(
mut client: GreptimeDatabaseClient<Channel>,
dbname: String,
auth_header: Option<AuthHeader>,
channel_size: usize,
) -> StreamInserter {
let (send, recv) = tokio::sync::mpsc::channel(channel_size);
let join: JoinHandle<std::result::Result<Response<GreptimeResponse>, Status>> =
tokio::spawn(async move {
let recv_stream = ReceiverStream::new(recv);
client.handle_requests(recv_stream).await
});
StreamInserter {
sender: send,
auth_header,
dbname,
join,
}
}
pub async fn insert(&self, requests: Vec<InsertRequest>) -> Result<()> {
let inserts = InsertRequests { inserts: requests };
let request = self.to_rpc_request(Request::Inserts(inserts));
self.sender.send(request).await.map_err(|e| {
error::ClientStreamingSnafu {
err_msg: e.to_string(),
}
.build()
})
}
pub async fn row_insert(&self, requests: Vec<RowInsertRequest>) -> Result<()> {
let inserts = RowInsertRequests { inserts: requests };
let request = self.to_rpc_request(Request::RowInserts(inserts));
self.sender.send(request).await.map_err(|e| {
error::ClientStreamingSnafu {
err_msg: e.to_string(),
}
.build()
})
}
pub async fn finish(self) -> Result<u32> {
drop(self.sender);
let response = self.join.await.unwrap()?;
let response = response.into_inner();
from_grpc_response(response)
}
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
GreptimeRequest {
header: Some(RequestHeader {
authorization: self.auth_header.clone(),
dbname: self.dbname.clone(),
..Default::default()
}),
request: Some(request),
}
}
}

View File

@@ -18,6 +18,7 @@ workspace = true
[dependencies]
async-trait.workspace = true
auth.workspace = true
base64.workspace = true
catalog.workspace = true
chrono.workspace = true
clap.workspace = true
@@ -36,6 +37,7 @@ common-telemetry = { workspace = true, features = [
"deadlock_detection",
] }
common-time.workspace = true
common-version.workspace = true
common-wal.workspace = true
config = "0.13"
datanode.workspace = true
@@ -43,6 +45,7 @@ datatypes.workspace = true
either = "1.8"
etcd-client.workspace = true
file-engine.workspace = true
flow.workspace = true
frontend.workspace = true
futures.workspace = true
human-panic = "1.2.2"
@@ -57,6 +60,7 @@ prost.workspace = true
query.workspace = true
rand.workspace = true
regex.workspace = true
reqwest.workspace = true
rustyline = "10.1"
serde.workspace = true
serde_json.workspace = true
@@ -76,6 +80,7 @@ tikv-jemallocator = "0.5"
common-test-util.workspace = true
serde.workspace = true
temp-env = "0.3"
tempfile.workspace = true
[target.'cfg(not(windows))'.dev-dependencies]
rexpect = "0.5"

View File

@@ -16,23 +16,42 @@
use std::fmt;
use clap::{FromArgMatches, Parser, Subcommand};
use clap::{Parser, Subcommand};
use cmd::error::Result;
use cmd::options::{CliOptions, Options};
use cmd::{
cli, datanode, frontend, greptimedb_cli, log_versions, metasrv, standalone, start_app, App,
};
use cmd::options::{GlobalOptions, Options};
use cmd::{cli, datanode, frontend, log_versions, metasrv, standalone, start_app, App};
use common_version::{short_version, version};
#[derive(Parser)]
#[command(name = "greptime", author, version, long_version = version!(), about)]
#[command(propagate_version = true)]
pub(crate) struct Command {
#[clap(subcommand)]
pub(crate) subcmd: SubCommand,
#[clap(flatten)]
pub(crate) global_options: GlobalOptions,
}
#[derive(Subcommand)]
enum SubCommand {
/// Start datanode service.
#[clap(name = "datanode")]
Datanode(datanode::Command),
/// Start frontend service.
#[clap(name = "frontend")]
Frontend(frontend::Command),
/// Start metasrv service.
#[clap(name = "metasrv")]
Metasrv(metasrv::Command),
/// Run greptimedb as a standalone service.
#[clap(name = "standalone")]
Standalone(standalone::Command),
/// Execute the cli tools for greptimedb.
#[clap(name = "cli")]
Cli(cli::Command),
}
@@ -66,13 +85,13 @@ impl SubCommand {
Ok(app)
}
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
match self {
SubCommand::Datanode(cmd) => cmd.load_options(cli_options),
SubCommand::Frontend(cmd) => cmd.load_options(cli_options),
SubCommand::Metasrv(cmd) => cmd.load_options(cli_options),
SubCommand::Standalone(cmd) => cmd.load_options(cli_options),
SubCommand::Cli(cmd) => cmd.load_options(cli_options),
SubCommand::Datanode(cmd) => cmd.load_options(global_options),
SubCommand::Frontend(cmd) => cmd.load_options(global_options),
SubCommand::Metasrv(cmd) => cmd.load_options(global_options),
SubCommand::Standalone(cmd) => cmd.load_options(global_options),
SubCommand::Cli(cmd) => cmd.load_options(global_options),
}
}
}
@@ -95,6 +114,32 @@ static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
#[tokio::main]
async fn main() -> Result<()> {
setup_human_panic();
start(Command::parse()).await
}
async fn start(cli: Command) -> Result<()> {
let subcmd = cli.subcmd;
let app_name = subcmd.to_string();
let opts = subcmd.load_options(&cli.global_options)?;
let _guard = common_telemetry::init_global_logging(
&app_name,
opts.logging_options(),
cli.global_options.tracing_options(),
opts.node_id(),
);
log_versions(version!(), short_version!());
let app = subcmd.build(opts).await?;
start_app(app).await
}
fn setup_human_panic() {
let metadata = human_panic::Metadata {
version: env!("CARGO_PKG_VERSION").into(),
name: "GreptimeDB".into(),
@@ -104,34 +149,4 @@ async fn main() -> Result<()> {
human_panic::setup_panic!(metadata);
common_telemetry::set_panic_hook();
let cli = greptimedb_cli();
let cli = SubCommand::augment_subcommands(cli);
let args = cli.get_matches();
let subcmd = match SubCommand::from_arg_matches(&args) {
Ok(subcmd) => subcmd,
Err(e) => e.exit(),
};
let app_name = subcmd.to_string();
let cli_options = CliOptions::new(&args);
let opts = subcmd.load_options(&cli_options)?;
let _guard = common_telemetry::init_global_logging(
&app_name,
opts.logging_options(),
cli_options.tracing_options(),
opts.node_id(),
);
log_versions();
let app = subcmd.build(opts).await?;
start_app(app).await
}

View File

@@ -22,7 +22,7 @@ mod helper;
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
#[allow(unused)]
mod repl;
// mod repl;
// TODO(weny): Removes it
#[allow(deprecated)]
mod upgrade;
@@ -31,12 +31,12 @@ use async_trait::async_trait;
use bench::BenchTableMetadataCommand;
use clap::Parser;
use common_telemetry::logging::LoggingOptions;
pub use repl::Repl;
// pub use repl::Repl;
use upgrade::UpgradeCommand;
use self::export::ExportCommand;
use crate::error::Result;
use crate::options::{CliOptions, Options};
use crate::options::{GlobalOptions, Options};
use crate::App;
#[async_trait]
@@ -80,14 +80,14 @@ impl Command {
self.cmd.build().await
}
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
let mut logging_opts = LoggingOptions::default();
if let Some(dir) = &cli_options.log_dir {
logging_opts.dir = dir.clone();
if let Some(dir) = &global_options.log_dir {
logging_opts.dir.clone_from(dir);
}
logging_opts.level = cli_options.log_level.clone();
logging_opts.level.clone_from(&global_options.log_level);
Ok(Options::Cli(Box::new(logging_opts)))
}

View File

@@ -107,14 +107,11 @@ impl TableMetadataBencher {
.unwrap();
let start = Instant::now();
let table_info = table_info.unwrap();
let table_route = table_route.unwrap();
let table_id = table_info.table_info.ident.table_id;
let _ = self
.table_metadata_manager
.delete_table_metadata(
table_id,
&table_info.table_name(),
table_route.unwrap().region_routes().unwrap(),
)
.delete_table_metadata(table_id, &table_info.table_name(), &table_route)
.await;
start.elapsed()
},
@@ -140,7 +137,7 @@ impl TableMetadataBencher {
let start = Instant::now();
let _ = self
.table_metadata_manager
.rename_table(table_info.unwrap(), new_table_name)
.rename_table(&table_info.unwrap(), new_table_name)
.await;
start.elapsed()

View File

@@ -16,14 +16,14 @@ use std::path::Path;
use std::sync::Arc;
use async_trait::async_trait;
use base64::engine::general_purpose;
use base64::Engine;
use clap::{Parser, ValueEnum};
use client::api::v1::auth_header::AuthScheme;
use client::api::v1::Basic;
use client::{Client, Database, OutputData, DEFAULT_SCHEMA_NAME};
use common_recordbatch::util::collect;
use client::DEFAULT_SCHEMA_NAME;
use common_telemetry::{debug, error, info, warn};
use datatypes::scalars::ScalarVector;
use datatypes::vectors::{StringVector, Vector};
use serde_json::Value;
use servers::http::greptime_result_v1::GreptimedbV1Response;
use servers::http::GreptimeQueryOutput;
use snafu::{OptionExt, ResultExt};
use tokio::fs::File;
use tokio::io::{AsyncWriteExt, BufWriter};
@@ -31,9 +31,8 @@ use tokio::sync::Semaphore;
use crate::cli::{Instance, Tool};
use crate::error::{
CollectRecordBatchesSnafu, ConnectServerSnafu, EmptyResultSnafu, Error, FileIoSnafu,
IllegalConfigSnafu, InvalidDatabaseNameSnafu, NotDataFromOutputSnafu, RequestDatabaseSnafu,
Result,
EmptyResultSnafu, Error, FileIoSnafu, HttpQuerySqlSnafu, InvalidDatabaseNameSnafu, Result,
SerdeJsonSnafu,
};
type TableReference = (String, String, String);
@@ -80,51 +79,75 @@ pub struct ExportCommand {
impl ExportCommand {
pub async fn build(&self) -> Result<Instance> {
let client = Client::with_urls([self.addr.clone()]);
client
.health_check()
.await
.with_context(|_| ConnectServerSnafu {
addr: self.addr.clone(),
})?;
let (catalog, schema) = split_database(&self.database)?;
let mut database_client = Database::new(
catalog.clone(),
schema.clone().unwrap_or(DEFAULT_SCHEMA_NAME.to_string()),
client,
);
if let Some(auth_basic) = &self.auth_basic {
let (username, password) = auth_basic.split_once(':').context(IllegalConfigSnafu {
msg: "auth_basic cannot be split by ':'".to_string(),
})?;
database_client.set_auth(AuthScheme::Basic(Basic {
username: username.to_string(),
password: password.to_string(),
}));
}
let auth_header = if let Some(basic) = &self.auth_basic {
let encoded = general_purpose::STANDARD.encode(basic);
Some(format!("basic {}", encoded))
} else {
None
};
Ok(Instance::new(Box::new(Export {
client: database_client,
addr: self.addr.clone(),
catalog,
schema,
output_dir: self.output_dir.clone(),
parallelism: self.export_jobs,
target: self.target.clone(),
auth_header,
})))
}
}
pub struct Export {
client: Database,
addr: String,
catalog: String,
schema: Option<String>,
output_dir: String,
parallelism: usize,
target: ExportTarget,
auth_header: Option<String>,
}
impl Export {
/// Execute one single sql query.
async fn sql(&self, sql: &str) -> Result<Option<Vec<Vec<Value>>>> {
let url = format!(
"http://{}/v1/sql?db={}-{}&sql={}",
self.addr,
self.catalog,
self.schema.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME),
sql
);
let mut request = reqwest::Client::new()
.get(&url)
.header("Content-Type", "application/x-www-form-urlencoded");
if let Some(ref auth) = self.auth_header {
request = request.header("Authorization", auth);
}
let response = request.send().await.with_context(|_| HttpQuerySqlSnafu {
reason: format!("bad url: {}", url),
})?;
let response = response
.error_for_status()
.with_context(|_| HttpQuerySqlSnafu {
reason: format!("query failed: {}", sql),
})?;
let text = response.text().await.with_context(|_| HttpQuerySqlSnafu {
reason: "cannot get response text".to_string(),
})?;
let body = serde_json::from_str::<GreptimedbV1Response>(&text).context(SerdeJsonSnafu)?;
Ok(body.output().first().and_then(|output| match output {
GreptimeQueryOutput::Records(records) => Some(records.rows().clone()),
GreptimeQueryOutput::AffectedRows(_) => None,
}))
}
/// Iterate over all db names.
///
/// Newbie: `db_name` is catalog + schema.
@@ -132,35 +155,19 @@ impl Export {
if let Some(schema) = &self.schema {
Ok(vec![(self.catalog.clone(), schema.clone())])
} else {
let mut client = self.client.clone();
client.set_catalog(self.catalog.clone());
let result =
client
.sql("show databases")
.await
.with_context(|_| RequestDatabaseSnafu {
sql: "show databases".to_string(),
})?;
let OutputData::Stream(stream) = result.data else {
NotDataFromOutputSnafu.fail()?
let result = self.sql("show databases").await?;
let Some(records) = result else {
EmptyResultSnafu.fail()?
};
let record_batch = collect(stream)
.await
.context(CollectRecordBatchesSnafu)?
.pop()
.context(EmptyResultSnafu)?;
let schemas = record_batch
.column(0)
.as_any()
.downcast_ref::<StringVector>()
.unwrap();
let mut result = Vec::with_capacity(schemas.len());
for i in 0..schemas.len() {
let schema = schemas.get_data(i).unwrap().to_owned();
let mut result = Vec::with_capacity(records.len());
for value in records {
let serde_json::Value::String(schema) = &value[0] else {
unreachable!()
};
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
continue;
}
result.push((self.catalog.clone(), schema));
result.push((self.catalog.clone(), schema.clone()));
}
Ok(result)
}
@@ -172,85 +179,49 @@ impl Export {
// TODO: SQL injection hurts
let sql = format!(
"select table_catalog, table_schema, table_name from \
information_schema.tables where table_type = \'BASE TABLE\'\
information_schema.tables where table_type = \'BASE TABLE\' \
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'",
);
let mut client = self.client.clone();
client.set_catalog(catalog);
client.set_schema(schema);
let result = client
.sql(&sql)
.await
.with_context(|_| RequestDatabaseSnafu { sql })?;
let OutputData::Stream(stream) = result.data else {
NotDataFromOutputSnafu.fail()?
};
let Some(record_batch) = collect(stream)
.await
.context(CollectRecordBatchesSnafu)?
.pop()
else {
return Ok(vec![]);
let result = self.sql(&sql).await?;
let Some(records) = result else {
EmptyResultSnafu.fail()?
};
debug!("Fetched table list: {}", record_batch.pretty_print());
debug!("Fetched table list: {:?}", records);
if record_batch.num_rows() == 0 {
if records.is_empty() {
return Ok(vec![]);
}
let mut result = Vec::with_capacity(record_batch.num_rows());
let catalog_column = record_batch
.column(0)
.as_any()
.downcast_ref::<StringVector>()
.unwrap();
let schema_column = record_batch
.column(1)
.as_any()
.downcast_ref::<StringVector>()
.unwrap();
let table_column = record_batch
.column(2)
.as_any()
.downcast_ref::<StringVector>()
.unwrap();
for i in 0..record_batch.num_rows() {
let catalog = catalog_column.get_data(i).unwrap().to_owned();
let schema = schema_column.get_data(i).unwrap().to_owned();
let table = table_column.get_data(i).unwrap().to_owned();
result.push((catalog, schema, table));
let mut result = Vec::with_capacity(records.len());
for value in records {
let mut t = Vec::with_capacity(3);
for v in &value {
let serde_json::Value::String(value) = v else {
unreachable!()
};
t.push(value);
}
result.push((t[0].clone(), t[1].clone(), t[2].clone()));
}
Ok(result)
}
async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> {
let sql = format!("show create table {}.{}.{}", catalog, schema, table);
let mut client = self.client.clone();
client.set_catalog(catalog);
client.set_schema(schema);
let result = client
.sql(&sql)
.await
.with_context(|_| RequestDatabaseSnafu { sql })?;
let OutputData::Stream(stream) = result.data else {
NotDataFromOutputSnafu.fail()?
let sql = format!(
r#"show create table "{}"."{}"."{}""#,
catalog, schema, table
);
let result = self.sql(&sql).await?;
let Some(records) = result else {
EmptyResultSnafu.fail()?
};
let serde_json::Value::String(create_table) = &records[0][1] else {
unreachable!()
};
let record_batch = collect(stream)
.await
.context(CollectRecordBatchesSnafu)?
.pop()
.context(EmptyResultSnafu)?;
let create_table = record_batch
.column(1)
.as_any()
.downcast_ref::<StringVector>()
.unwrap()
.get_data(0)
.unwrap();
Ok(format!("{create_table};\n"))
Ok(format!("{};\n", create_table))
}
async fn export_create_table(&self) -> Result<()> {
@@ -273,7 +244,7 @@ impl Export {
for (c, s, t) in table_list {
match self.show_create_table(&c, &s, &t).await {
Err(e) => {
error!(e; "Failed to export table {}.{}.{}", c, s, t)
error!(e; r#"Failed to export table "{}"."{}"."{}""#, c, s, t)
}
Ok(create_table) => {
file.write_all(create_table.as_bytes())
@@ -318,20 +289,13 @@ impl Export {
.context(FileIoSnafu)?;
let output_dir = Path::new(&self.output_dir).join(format!("{catalog}-{schema}/"));
let mut client = self.client.clone();
client.set_catalog(catalog.clone());
client.set_schema(schema.clone());
// copy database to
let sql = format!(
"copy database {} to '{}' with (format='parquet');",
schema,
output_dir.to_str().unwrap()
);
client
.sql(sql.clone())
.await
.context(RequestDatabaseSnafu { sql })?;
self.sql(&sql).await?;
info!("finished exporting {catalog}.{schema} data");
// export copy from sql

View File

@@ -19,13 +19,14 @@ use std::time::Instant;
use catalog::kvbackend::{
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager,
};
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use client::{Client, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_base::Plugins;
use common_config::Mode;
use common_error::ext::ErrorExt;
use common_meta::cache_invalidator::MultiCacheInvalidator;
use common_query::Output;
use common_recordbatch::RecordBatches;
use common_telemetry::logging;
use common_telemetry::debug;
use either::Either;
use meta_client::client::MetaClientBuilder;
use query::datafusion::DatafusionQueryEngine;
@@ -77,7 +78,7 @@ impl Repl {
let history_file = history_file();
if let Err(e) = rl.load_history(&history_file) {
logging::debug!(
debug!(
"failed to load history file on {}, error: {e}",
history_file.display()
);
@@ -160,7 +161,10 @@ impl Repl {
let start = Instant::now();
let output = if let Some(query_engine) = &self.query_engine {
let query_ctx = QueryContext::with(self.database.catalog(), self.database.schema());
let query_ctx = Arc::new(QueryContext::with(
self.database.catalog(),
self.database.schema(),
));
let stmt = QueryLanguageParser::parse_sql(&sql, &query_ctx)
.with_context(|_| ParseSqlSnafu { sql: sql.clone() })?;
@@ -224,7 +228,7 @@ impl Drop for Repl {
if self.rl.helper().is_some() {
let history_file = history_file();
if let Err(e) = self.rl.save_history(&history_file) {
logging::debug!(
debug!(
"failed to save history file on {}, error: {e}",
history_file.display()
);
@@ -256,8 +260,13 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::with_invalidators(vec![
cached_meta_backend.clone(),
]));
let catalog_list =
KvBackendCatalogManager::new(cached_meta_backend.clone(), multi_cache_invalidator).await;
let catalog_list = KvBackendCatalogManager::new(
Mode::Distributed,
Some(meta_client.clone()),
cached_meta_backend.clone(),
multi_cache_invalidator,
)
.await;
let plugins: Plugins = Default::default();
let state = Arc::new(QueryEngineState::new(
catalog_list,

View File

@@ -27,7 +27,7 @@ use common_meta::key::table_info::{TableInfoKey, TableInfoValue};
use common_meta::key::table_name::{TableNameKey, TableNameValue};
use common_meta::key::table_region::{TableRegionKey, TableRegionValue};
use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue};
use common_meta::key::{RegionDistribution, TableMetaKey, TableMetaValue};
use common_meta::key::{MetaKey, RegionDistribution, TableMetaValue};
use common_meta::kv_backend::etcd::EtcdStore;
use common_meta::kv_backend::KvBackendRef;
use common_meta::range_stream::PaginationStream;
@@ -137,7 +137,7 @@ impl MigrateTableMetadata {
while let Some((key, value)) = stream.try_next().await.context(error::IterStreamSnafu)? {
let table_id = self.migrate_table_route_key(value).await?;
keys.push(key);
keys.push(TableRegionKey::new(table_id).as_raw_key())
keys.push(TableRegionKey::new(table_id).to_bytes())
}
info!("Total migrated TableRouteKeys: {}", keys.len() / 2);
@@ -165,7 +165,7 @@ impl MigrateTableMetadata {
self.etcd_store
.put(
PutRequest::new()
.with_key(new_key.as_raw_key())
.with_key(new_key.to_bytes())
.with_value(new_table_value.try_as_raw_value().unwrap()),
)
.await
@@ -192,10 +192,10 @@ impl MigrateTableMetadata {
let key = v1SchemaKey::parse(key_str)
.unwrap_or_else(|e| panic!("schema key is corrupted: {e}, key: {key_str}"));
Ok((key, ()))
Ok(key)
}),
);
while let Some((key, _)) = stream.try_next().await.context(error::IterStreamSnafu)? {
while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
let _ = self.migrate_schema_key(&key).await;
keys.push(key.to_string().as_bytes().to_vec());
}
@@ -217,7 +217,7 @@ impl MigrateTableMetadata {
self.etcd_store
.put(
PutRequest::new()
.with_key(new_key.as_raw_key())
.with_key(new_key.to_bytes())
.with_value(schema_name_value.try_as_raw_value().unwrap()),
)
.await
@@ -244,10 +244,10 @@ impl MigrateTableMetadata {
let key = v1CatalogKey::parse(key_str)
.unwrap_or_else(|e| panic!("catalog key is corrupted: {e}, key: {key_str}"));
Ok((key, ()))
Ok(key)
}),
);
while let Some((key, _)) = stream.try_next().await.context(error::IterStreamSnafu)? {
while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
let _ = self.migrate_catalog_key(&key).await;
keys.push(key.to_string().as_bytes().to_vec());
}
@@ -269,7 +269,7 @@ impl MigrateTableMetadata {
self.etcd_store
.put(
PutRequest::new()
.with_key(new_key.as_raw_key())
.with_key(new_key.to_bytes())
.with_value(catalog_name_value.try_as_raw_value().unwrap()),
)
.await
@@ -346,11 +346,11 @@ impl MigrateTableMetadata {
.batch_put(
BatchPutRequest::new()
.add_kv(
table_info_key.as_raw_key(),
table_info_key.to_bytes(),
table_info_value.try_as_raw_value().unwrap(),
)
.add_kv(
table_region_key.as_raw_key(),
table_region_key.to_bytes(),
table_region_value.try_as_raw_value().unwrap(),
),
)
@@ -378,7 +378,7 @@ impl MigrateTableMetadata {
self.etcd_store
.put(
PutRequest::new()
.with_key(table_name_key.as_raw_key())
.with_key(table_name_key.to_bytes())
.with_value(table_name_value.try_as_raw_value().unwrap()),
)
.await
@@ -425,7 +425,7 @@ impl MigrateTableMetadata {
} else {
let mut req = BatchPutRequest::new();
for (key, value) in datanode_table_kvs {
req = req.add_kv(key.as_raw_key(), value.try_as_raw_value().unwrap());
req = req.add_kv(key.to_bytes(), value.try_as_raw_value().unwrap());
}
self.etcd_store.batch_put(req).await.unwrap();
}

View File

@@ -18,7 +18,7 @@ use std::time::Duration;
use async_trait::async_trait;
use catalog::kvbackend::MetaKvBackend;
use clap::Parser;
use common_telemetry::{info, logging};
use common_telemetry::info;
use common_wal::config::DatanodeWalConfig;
use datanode::config::DatanodeOptions;
use datanode::datanode::{Datanode, DatanodeBuilder};
@@ -28,7 +28,7 @@ use servers::Mode;
use snafu::{OptionExt, ResultExt};
use crate::error::{MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
use crate::options::{CliOptions, Options};
use crate::options::{GlobalOptions, Options};
use crate::App;
pub struct Instance {
@@ -82,8 +82,8 @@ impl Command {
self.subcmd.build(opts).await
}
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
self.subcmd.load_options(cli_options)
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
self.subcmd.load_options(global_options)
}
}
@@ -99,9 +99,9 @@ impl SubCommand {
}
}
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
match self {
SubCommand::Start(cmd) => cmd.load_options(cli_options),
SubCommand::Start(cmd) => cmd.load_options(global_options),
}
}
}
@@ -131,27 +131,27 @@ struct StartCommand {
}
impl StartCommand {
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
let mut opts: DatanodeOptions = Options::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
DatanodeOptions::env_list_keys(),
)?;
if let Some(dir) = &cli_options.log_dir {
opts.logging.dir = dir.clone();
if let Some(dir) = &global_options.log_dir {
opts.logging.dir.clone_from(dir);
}
if cli_options.log_level.is_some() {
opts.logging.level = cli_options.log_level.clone();
if global_options.log_level.is_some() {
opts.logging.level.clone_from(&global_options.log_level);
}
if let Some(addr) = &self.rpc_addr {
opts.rpc_addr = addr.clone();
opts.rpc_addr.clone_from(addr);
}
if self.rpc_hostname.is_some() {
opts.rpc_hostname = self.rpc_hostname.clone();
opts.rpc_hostname.clone_from(&self.rpc_hostname);
}
if let Some(node_id) = self.node_id {
@@ -161,7 +161,8 @@ impl StartCommand {
if let Some(metasrv_addrs) = &self.metasrv_addr {
opts.meta_client
.get_or_insert_with(MetaClientOptions::default)
.metasrv_addrs = metasrv_addrs.clone();
.metasrv_addrs
.clone_from(metasrv_addrs);
opts.mode = Mode::Distributed;
}
@@ -173,7 +174,7 @@ impl StartCommand {
}
if let Some(data_home) = &self.data_home {
opts.storage.data_home = data_home.clone();
opts.storage.data_home.clone_from(data_home);
}
// `wal_dir` only affects raft-engine config.
@@ -191,7 +192,7 @@ impl StartCommand {
}
if let Some(http_addr) = &self.http_addr {
opts.http.addr = http_addr.clone();
opts.http.addr.clone_from(http_addr);
}
if let Some(http_timeout) = self.http_timeout {
@@ -209,8 +210,8 @@ impl StartCommand {
.await
.context(StartDatanodeSnafu)?;
logging::info!("Datanode start command: {:#?}", self);
logging::info!("Datanode options: {:#?}", opts);
info!("Datanode start command: {:#?}", self);
info!("Datanode options: {:#?}", opts);
let node_id = opts
.node_id
@@ -258,7 +259,7 @@ mod tests {
use servers::Mode;
use super::*;
use crate::options::{CliOptions, ENV_VAR_SEP};
use crate::options::{GlobalOptions, ENV_VAR_SEP};
#[test]
fn test_read_from_config_file() {
@@ -314,7 +315,8 @@ mod tests {
..Default::default()
};
let Options::Datanode(options) = cmd.load_options(&CliOptions::default()).unwrap() else {
let Options::Datanode(options) = cmd.load_options(&GlobalOptions::default()).unwrap()
else {
unreachable!()
};
@@ -376,7 +378,7 @@ mod tests {
#[test]
fn test_try_from_cmd() {
if let Options::Datanode(opt) = StartCommand::default()
.load_options(&CliOptions::default())
.load_options(&GlobalOptions::default())
.unwrap()
{
assert_eq!(Mode::Standalone, opt.mode)
@@ -387,7 +389,7 @@ mod tests {
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
..Default::default()
})
.load_options(&CliOptions::default())
.load_options(&GlobalOptions::default())
.unwrap()
{
assert_eq!(Mode::Distributed, opt.mode)
@@ -397,7 +399,7 @@ mod tests {
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
..Default::default()
})
.load_options(&CliOptions::default())
.load_options(&GlobalOptions::default())
.is_err());
// Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value
@@ -405,7 +407,7 @@ mod tests {
node_id: Some(42),
..Default::default()
})
.load_options(&CliOptions::default())
.load_options(&GlobalOptions::default())
.is_ok());
}
@@ -414,7 +416,7 @@ mod tests {
let cmd = StartCommand::default();
let options = cmd
.load_options(&CliOptions {
.load_options(&GlobalOptions {
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
log_level: Some("debug".to_string()),
@@ -503,7 +505,8 @@ mod tests {
..Default::default()
};
let Options::Datanode(opts) = command.load_options(&CliOptions::default()).unwrap()
let Options::Datanode(opts) =
command.load_options(&GlobalOptions::default()).unwrap()
else {
unreachable!()
};

View File

@@ -139,13 +139,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to request database, sql: {sql}"))]
RequestDatabase {
sql: String,
location: Location,
source: client::Error,
},
#[snafu(display("Failed to collect RecordBatches"))]
CollectRecordBatches {
location: Location,
@@ -218,6 +211,14 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to run http request: {reason}"))]
HttpQuerySql {
reason: String,
#[snafu(source)]
error: reqwest::Error,
location: Location,
},
#[snafu(display("Expect data from output, but got another thing"))]
NotDataFromOutput { location: Location },
@@ -290,8 +291,9 @@ impl ErrorExt for Error {
Error::StartProcedureManager { source, .. }
| Error::StopProcedureManager { source, .. } => source.status_code(),
Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
Error::RequestDatabase { source, .. } => source.status_code(),
Error::ReplCreation { .. } | Error::Readline { .. } | Error::HttpQuerySql { .. } => {
StatusCode::Internal
}
Error::CollectRecordBatches { source, .. }
| Error::PrettyPrintRecordBatches { source, .. } => source.status_code(),
Error::StartMetaClient { source, .. } => source.status_code(),

View File

@@ -22,7 +22,7 @@ use client::client_manager::DatanodeClients;
use common_meta::cache_invalidator::MultiCacheInvalidator;
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
use common_meta::heartbeat::handler::HandlerGroupExecutor;
use common_telemetry::logging;
use common_telemetry::info;
use common_time::timezone::set_default_timezone;
use frontend::frontend::FrontendOptions;
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
@@ -36,7 +36,7 @@ use servers::Mode;
use snafu::{OptionExt, ResultExt};
use crate::error::{self, InitTimezoneSnafu, MissingConfigSnafu, Result, StartFrontendSnafu};
use crate::options::{CliOptions, Options};
use crate::options::{GlobalOptions, Options};
use crate::App;
pub struct Instance {
@@ -90,8 +90,8 @@ impl Command {
self.subcmd.build(opts).await
}
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
self.subcmd.load_options(cli_options)
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
self.subcmd.load_options(global_options)
}
}
@@ -107,9 +107,9 @@ impl SubCommand {
}
}
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
match self {
SubCommand::Start(cmd) => cmd.load_options(cli_options),
SubCommand::Start(cmd) => cmd.load_options(global_options),
}
}
}
@@ -126,8 +126,6 @@ pub struct StartCommand {
mysql_addr: Option<String>,
#[clap(long)]
postgres_addr: Option<String>,
#[clap(long)]
opentsdb_addr: Option<String>,
#[clap(short, long)]
config_file: Option<String>,
#[clap(short, long)]
@@ -149,19 +147,19 @@ pub struct StartCommand {
}
impl StartCommand {
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
let mut opts: FrontendOptions = Options::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
FrontendOptions::env_list_keys(),
)?;
if let Some(dir) = &cli_options.log_dir {
opts.logging.dir = dir.clone();
if let Some(dir) = &global_options.log_dir {
opts.logging.dir.clone_from(dir);
}
if cli_options.log_level.is_some() {
opts.logging.level = cli_options.log_level.clone();
if global_options.log_level.is_some() {
opts.logging.level.clone_from(&global_options.log_level);
}
let tls_opts = TlsOption::new(
@@ -171,7 +169,7 @@ impl StartCommand {
);
if let Some(addr) = &self.http_addr {
opts.http.addr = addr.clone()
opts.http.addr.clone_from(addr);
}
if let Some(http_timeout) = self.http_timeout {
@@ -183,26 +181,21 @@ impl StartCommand {
}
if let Some(addr) = &self.rpc_addr {
opts.grpc.addr = addr.clone()
opts.grpc.addr.clone_from(addr);
}
if let Some(addr) = &self.mysql_addr {
opts.mysql.enable = true;
opts.mysql.addr = addr.clone();
opts.mysql.addr.clone_from(addr);
opts.mysql.tls = tls_opts.clone();
}
if let Some(addr) = &self.postgres_addr {
opts.postgres.enable = true;
opts.postgres.addr = addr.clone();
opts.postgres.addr.clone_from(addr);
opts.postgres.tls = tls_opts;
}
if let Some(addr) = &self.opentsdb_addr {
opts.opentsdb.enable = true;
opts.opentsdb.addr = addr.clone();
}
if let Some(enable) = self.influxdb_enable {
opts.influxdb.enable = enable;
}
@@ -210,11 +203,12 @@ impl StartCommand {
if let Some(metasrv_addrs) = &self.metasrv_addr {
opts.meta_client
.get_or_insert_with(MetaClientOptions::default)
.metasrv_addrs = metasrv_addrs.clone();
.metasrv_addrs
.clone_from(metasrv_addrs);
opts.mode = Mode::Distributed;
}
opts.user_provider = self.user_provider.clone();
opts.user_provider.clone_from(&self.user_provider);
Ok(Options::Frontend(Box::new(opts)))
}
@@ -225,8 +219,8 @@ impl StartCommand {
.await
.context(StartFrontendSnafu)?;
logging::info!("Frontend start command: {:#?}", self);
logging::info!("Frontend options: {:#?}", opts);
info!("Frontend start command: {:#?}", self);
info!("Frontend options: {:#?}", opts);
set_default_timezone(opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?;
@@ -252,6 +246,8 @@ impl StartCommand {
cached_meta_backend.clone(),
]));
let catalog_manager = KvBackendCatalogManager::new(
opts.mode,
Some(meta_client.clone()),
cached_meta_backend.clone(),
multi_cache_invalidator.clone(),
)
@@ -265,6 +261,7 @@ impl StartCommand {
]);
let heartbeat_task = HeartbeatTask::new(
&opts,
meta_client.clone(),
opts.heartbeat.clone(),
Arc::new(executor),
@@ -307,7 +304,7 @@ mod tests {
use servers::http::HttpOptions;
use super::*;
use crate::options::{CliOptions, ENV_VAR_SEP};
use crate::options::{GlobalOptions, ENV_VAR_SEP};
#[test]
fn test_try_from_start_command() {
@@ -315,13 +312,13 @@ mod tests {
http_addr: Some("127.0.0.1:1234".to_string()),
mysql_addr: Some("127.0.0.1:5678".to_string()),
postgres_addr: Some("127.0.0.1:5432".to_string()),
opentsdb_addr: Some("127.0.0.1:4321".to_string()),
influxdb_enable: Some(false),
disable_dashboard: Some(false),
..Default::default()
};
let Options::Frontend(opts) = command.load_options(&CliOptions::default()).unwrap() else {
let Options::Frontend(opts) = command.load_options(&GlobalOptions::default()).unwrap()
else {
unreachable!()
};
@@ -329,7 +326,6 @@ mod tests {
assert_eq!(ReadableSize::mb(64), opts.http.body_limit);
assert_eq!(opts.mysql.addr, "127.0.0.1:5678");
assert_eq!(opts.postgres.addr, "127.0.0.1:5432");
assert_eq!(opts.opentsdb.addr, "127.0.0.1:4321");
let default_opts = FrontendOptions::default();
@@ -342,10 +338,6 @@ mod tests {
default_opts.postgres.runtime_size
);
assert!(opts.opentsdb.enable);
assert_eq!(
opts.opentsdb.runtime_size,
default_opts.opentsdb.runtime_size
);
assert!(!opts.influxdb.enable);
}
@@ -361,6 +353,9 @@ mod tests {
timeout = "30s"
body_limit = "2GB"
[opentsdb]
enable = false
[logging]
level = "debug"
dir = "/tmp/greptimedb/test/logs"
@@ -373,7 +368,7 @@ mod tests {
..Default::default()
};
let Options::Frontend(fe_opts) = command.load_options(&CliOptions::default()).unwrap()
let Options::Frontend(fe_opts) = command.load_options(&GlobalOptions::default()).unwrap()
else {
unreachable!()
};
@@ -385,6 +380,7 @@ mod tests {
assert_eq!("debug", fe_opts.logging.level.as_ref().unwrap());
assert_eq!("/tmp/greptimedb/test/logs".to_string(), fe_opts.logging.dir);
assert!(!fe_opts.opentsdb.enable);
}
#[tokio::test]
@@ -419,7 +415,7 @@ mod tests {
};
let options = cmd
.load_options(&CliOptions {
.load_options(&GlobalOptions {
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
log_level: Some("debug".to_string()),
@@ -505,7 +501,7 @@ mod tests {
};
let Options::Frontend(fe_opts) =
command.load_options(&CliOptions::default()).unwrap()
command.load_options(&GlobalOptions::default()).unwrap()
else {
unreachable!()
};

View File

@@ -15,7 +15,6 @@
#![feature(assert_matches, let_chains)]
use async_trait::async_trait;
use clap::arg;
use common_telemetry::{error, info};
pub mod cli;
@@ -64,62 +63,21 @@ pub async fn start_app(mut app: Box<dyn App>) -> error::Result<()> {
Ok(())
}
pub fn log_versions() {
/// Log the versions of the application, and the arguments passed to the cli.
/// `version_string` should be the same as the output of cli "--version";
/// and the `app_version` is the short version of the codes, often consist of git branch and commit.
pub fn log_versions(version_string: &str, app_version: &str) {
// Report app version as gauge.
APP_VERSION
.with_label_values(&[short_version(), full_version()])
.with_label_values(&[env!("CARGO_PKG_VERSION"), app_version])
.inc();
// Log version and argument flags.
info!(
"short_version: {}, full_version: {}",
short_version(),
full_version()
);
info!("GreptimeDB version: {}", version_string);
log_env_flags();
}
pub fn greptimedb_cli() -> clap::Command {
let cmd = clap::Command::new("greptimedb")
.version(print_version())
.subcommand_required(true);
#[cfg(feature = "tokio-console")]
let cmd = cmd.arg(arg!(--"tokio-console-addr"[TOKIO_CONSOLE_ADDR]));
cmd.args([arg!(--"log-dir"[LOG_DIR]), arg!(--"log-level"[LOG_LEVEL])])
}
fn print_version() -> &'static str {
concat!(
"\nbranch: ",
env!("GIT_BRANCH"),
"\ncommit: ",
env!("GIT_COMMIT"),
"\ndirty: ",
env!("GIT_DIRTY"),
"\nversion: ",
env!("CARGO_PKG_VERSION")
)
}
fn short_version() -> &'static str {
env!("CARGO_PKG_VERSION")
}
// {app_name}-{branch_name}-{commit_short}
// The branch name (tag) of a release build should already contain the short
// version so the full version doesn't concat the short version explicitly.
fn full_version() -> &'static str {
concat!(
"greptimedb-",
env!("GIT_BRANCH"),
"-",
env!("GIT_COMMIT_SHORT")
)
}
fn log_env_flags() {
info!("command line arguments");
for argument in std::env::args() {

View File

@@ -16,21 +16,21 @@ use std::time::Duration;
use async_trait::async_trait;
use clap::Parser;
use common_telemetry::logging;
use meta_srv::bootstrap::MetaSrvInstance;
use meta_srv::metasrv::MetaSrvOptions;
use common_telemetry::info;
use meta_srv::bootstrap::MetasrvInstance;
use meta_srv::metasrv::MetasrvOptions;
use snafu::ResultExt;
use crate::error::{self, Result, StartMetaServerSnafu};
use crate::options::{CliOptions, Options};
use crate::options::{GlobalOptions, Options};
use crate::App;
pub struct Instance {
instance: MetaSrvInstance,
instance: MetasrvInstance,
}
impl Instance {
fn new(instance: MetaSrvInstance) -> Self {
fn new(instance: MetasrvInstance) -> Self {
Self { instance }
}
}
@@ -42,7 +42,7 @@ impl App for Instance {
}
async fn start(&mut self) -> Result<()> {
plugins::start_meta_srv_plugins(self.instance.plugins())
plugins::start_metasrv_plugins(self.instance.plugins())
.await
.context(StartMetaServerSnafu)?;
@@ -64,12 +64,12 @@ pub struct Command {
}
impl Command {
pub async fn build(self, opts: MetaSrvOptions) -> Result<Instance> {
pub async fn build(self, opts: MetasrvOptions) -> Result<Instance> {
self.subcmd.build(opts).await
}
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
self.subcmd.load_options(cli_options)
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
self.subcmd.load_options(global_options)
}
}
@@ -79,15 +79,15 @@ enum SubCommand {
}
impl SubCommand {
async fn build(self, opts: MetaSrvOptions) -> Result<Instance> {
async fn build(self, opts: MetasrvOptions) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.build(opts).await,
}
}
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
match self {
SubCommand::Start(cmd) => cmd.load_options(cli_options),
SubCommand::Start(cmd) => cmd.load_options(global_options),
}
}
}
@@ -126,31 +126,31 @@ struct StartCommand {
}
impl StartCommand {
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
let mut opts: MetaSrvOptions = Options::load_layered_options(
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
let mut opts: MetasrvOptions = Options::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
MetaSrvOptions::env_list_keys(),
MetasrvOptions::env_list_keys(),
)?;
if let Some(dir) = &cli_options.log_dir {
opts.logging.dir = dir.clone();
if let Some(dir) = &global_options.log_dir {
opts.logging.dir.clone_from(dir);
}
if cli_options.log_level.is_some() {
opts.logging.level = cli_options.log_level.clone();
if global_options.log_level.is_some() {
opts.logging.level.clone_from(&global_options.log_level);
}
if let Some(addr) = &self.bind_addr {
opts.bind_addr = addr.clone();
opts.bind_addr.clone_from(addr);
}
if let Some(addr) = &self.server_addr {
opts.server_addr = addr.clone();
opts.server_addr.clone_from(addr);
}
if let Some(addr) = &self.store_addr {
opts.store_addr = addr.clone();
opts.store_addr.clone_from(addr);
}
if let Some(selector_type) = &self.selector {
@@ -168,7 +168,7 @@ impl StartCommand {
}
if let Some(http_addr) = &self.http_addr {
opts.http.addr = http_addr.clone();
opts.http.addr.clone_from(http_addr);
}
if let Some(http_timeout) = self.http_timeout {
@@ -176,11 +176,11 @@ impl StartCommand {
}
if let Some(data_home) = &self.data_home {
opts.data_home = data_home.clone();
opts.data_home.clone_from(data_home);
}
if !self.store_key_prefix.is_empty() {
opts.store_key_prefix = self.store_key_prefix.clone()
opts.store_key_prefix.clone_from(&self.store_key_prefix)
}
if let Some(max_txn_ops) = self.max_txn_ops {
@@ -193,20 +193,20 @@ impl StartCommand {
Ok(Options::Metasrv(Box::new(opts)))
}
async fn build(self, mut opts: MetaSrvOptions) -> Result<Instance> {
let plugins = plugins::setup_meta_srv_plugins(&mut opts)
async fn build(self, mut opts: MetasrvOptions) -> Result<Instance> {
let plugins = plugins::setup_metasrv_plugins(&mut opts)
.await
.context(StartMetaServerSnafu)?;
logging::info!("MetaSrv start command: {:#?}", self);
logging::info!("MetaSrv options: {:#?}", opts);
info!("Metasrv start command: {:#?}", self);
info!("Metasrv options: {:#?}", opts);
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins.clone(), None)
.await
.context(error::BuildMetaServerSnafu)?;
let metasrv = builder.build().await.context(error::BuildMetaServerSnafu)?;
let instance = MetaSrvInstance::new(opts, plugins, metasrv)
let instance = MetasrvInstance::new(opts, plugins, metasrv)
.await
.context(error::BuildMetaServerSnafu)?;
@@ -235,7 +235,7 @@ mod tests {
..Default::default()
};
let Options::Metasrv(options) = cmd.load_options(&CliOptions::default()).unwrap() else {
let Options::Metasrv(options) = cmd.load_options(&GlobalOptions::default()).unwrap() else {
unreachable!()
};
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
@@ -270,7 +270,7 @@ mod tests {
..Default::default()
};
let Options::Metasrv(options) = cmd.load_options(&CliOptions::default()).unwrap() else {
let Options::Metasrv(options) = cmd.load_options(&GlobalOptions::default()).unwrap() else {
unreachable!()
};
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
@@ -315,7 +315,7 @@ mod tests {
};
let options = cmd
.load_options(&CliOptions {
.load_options(&GlobalOptions {
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
log_level: Some("debug".to_string()),
@@ -379,7 +379,8 @@ mod tests {
..Default::default()
};
let Options::Metasrv(opts) = command.load_options(&CliOptions::default()).unwrap()
let Options::Metasrv(opts) =
command.load_options(&GlobalOptions::default()).unwrap()
else {
unreachable!()
};

View File

@@ -12,15 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use clap::ArgMatches;
use clap::Parser;
use common_config::KvBackendConfig;
use common_telemetry::logging::{LoggingOptions, TracingOptions};
use common_wal::config::MetaSrvWalConfig;
use common_wal::config::MetasrvWalConfig;
use config::{Config, Environment, File, FileFormat};
use datanode::config::{DatanodeOptions, ProcedureConfig};
use frontend::error::{Result as FeResult, TomlFormatSnafu};
use frontend::frontend::{FrontendOptions, TomlSerializable};
use meta_srv::metasrv::MetaSrvOptions;
use meta_srv::metasrv::MetasrvOptions;
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
@@ -38,7 +38,7 @@ pub struct MixOptions {
pub frontend: FrontendOptions,
pub datanode: DatanodeOptions,
pub logging: LoggingOptions,
pub wal_meta: MetaSrvWalConfig,
pub wal_meta: MetasrvWalConfig,
}
impl From<MixOptions> for FrontendOptions {
@@ -56,31 +56,28 @@ impl TomlSerializable for MixOptions {
pub enum Options {
Datanode(Box<DatanodeOptions>),
Frontend(Box<FrontendOptions>),
Metasrv(Box<MetaSrvOptions>),
Metasrv(Box<MetasrvOptions>),
Standalone(Box<MixOptions>),
Cli(Box<LoggingOptions>),
}
#[derive(Default)]
pub struct CliOptions {
#[derive(Parser, Default, Debug, Clone)]
pub struct GlobalOptions {
#[clap(long, value_name = "LOG_DIR")]
#[arg(global = true)]
pub log_dir: Option<String>,
#[clap(long, value_name = "LOG_LEVEL")]
#[arg(global = true)]
pub log_level: Option<String>,
#[cfg(feature = "tokio-console")]
#[clap(long, value_name = "TOKIO_CONSOLE_ADDR")]
#[arg(global = true)]
pub tokio_console_addr: Option<String>,
}
impl CliOptions {
pub fn new(args: &ArgMatches) -> Self {
Self {
log_dir: args.get_one::<String>("log-dir").cloned(),
log_level: args.get_one::<String>("log-level").cloned(),
#[cfg(feature = "tokio-console")]
tokio_console_addr: args.get_one::<String>("tokio-console-addr").cloned(),
}
}
impl GlobalOptions {
pub fn tracing_options(&self) -> TracingOptions {
TracingOptions {
#[cfg(feature = "tokio-console")]

View File

@@ -18,15 +18,17 @@ use std::{fs, path};
use async_trait::async_trait;
use catalog::kvbackend::KvBackendCatalogManager;
use clap::Parser;
use common_catalog::consts::MIN_USER_TABLE_ID;
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
use common_config::{metadata_store_dir, KvBackendConfig};
use common_meta::cache_invalidator::{CacheInvalidatorRef, MultiCacheInvalidator};
use common_meta::datanode_manager::DatanodeManagerRef;
use common_meta::ddl::flow_meta::{FlowMetadataAllocator, FlowMetadataAllocatorRef};
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
use common_meta::ddl::ProcedureExecutorRef;
use common_meta::ddl::{DdlContext, ProcedureExecutorRef};
use common_meta::ddl_manager::DdlManager;
use common_meta::key::flow::{FlowMetadataManager, FlowMetadataManagerRef};
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef;
use common_meta::node_manager::NodeManagerRef;
use common_meta::region_keeper::MemoryRegionKeeper;
use common_meta::sequence::SequenceBuilder;
use common_meta::wal_options_allocator::{WalOptionsAllocator, WalOptionsAllocatorRef};
@@ -38,6 +40,7 @@ use common_wal::config::StandaloneWalConfig;
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
use datanode::datanode::{Datanode, DatanodeBuilder};
use file_engine::config::EngineConfig as FileEngineConfig;
use flow::FlownodeBuilder;
use frontend::frontend::FrontendOptions;
use frontend::instance::builder::FrontendBuilder;
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
@@ -45,6 +48,7 @@ use frontend::server::Services;
use frontend::service_config::{
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
};
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
use mito2::config::MitoConfig;
use serde::{Deserialize, Serialize};
use servers::export_metrics::ExportMetricsOption;
@@ -58,7 +62,7 @@ use crate::error::{
Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
StartProcedureManagerSnafu, StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
};
use crate::options::{CliOptions, MixOptions, Options};
use crate::options::{GlobalOptions, MixOptions, Options};
use crate::App;
#[derive(Parser)]
@@ -72,8 +76,8 @@ impl Command {
self.subcmd.build(opts).await
}
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
self.subcmd.load_options(cli_options)
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
self.subcmd.load_options(global_options)
}
}
@@ -89,9 +93,9 @@ impl SubCommand {
}
}
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
match self {
SubCommand::Start(cmd) => cmd.load_options(cli_options),
SubCommand::Start(cmd) => cmd.load_options(global_options),
}
}
}
@@ -253,8 +257,6 @@ pub struct StartCommand {
mysql_addr: Option<String>,
#[clap(long)]
postgres_addr: Option<String>,
#[clap(long)]
opentsdb_addr: Option<String>,
#[clap(short, long)]
influxdb_enable: bool,
#[clap(short, long)]
@@ -275,29 +277,29 @@ pub struct StartCommand {
}
impl StartCommand {
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
let opts: StandaloneOptions = Options::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
StandaloneOptions::env_list_keys(),
)?;
self.convert_options(cli_options, opts)
self.convert_options(global_options, opts)
}
pub fn convert_options(
&self,
cli_options: &CliOptions,
global_options: &GlobalOptions,
mut opts: StandaloneOptions,
) -> Result<Options> {
opts.mode = Mode::Standalone;
if let Some(dir) = &cli_options.log_dir {
opts.logging.dir = dir.clone();
if let Some(dir) = &global_options.log_dir {
opts.logging.dir.clone_from(dir);
}
if cli_options.log_level.is_some() {
opts.logging.level = cli_options.log_level.clone();
if global_options.log_level.is_some() {
opts.logging.level.clone_from(&global_options.log_level);
}
let tls_opts = TlsOption::new(
@@ -307,11 +309,11 @@ impl StartCommand {
);
if let Some(addr) = &self.http_addr {
opts.http.addr = addr.clone()
opts.http.addr.clone_from(addr);
}
if let Some(data_home) = &self.data_home {
opts.storage.data_home = data_home.clone();
opts.storage.data_home.clone_from(data_home);
}
if let Some(addr) = &self.rpc_addr {
@@ -325,31 +327,26 @@ impl StartCommand {
}
.fail();
}
opts.grpc.addr = addr.clone()
opts.grpc.addr.clone_from(addr)
}
if let Some(addr) = &self.mysql_addr {
opts.mysql.enable = true;
opts.mysql.addr = addr.clone();
opts.mysql.addr.clone_from(addr);
opts.mysql.tls = tls_opts.clone();
}
if let Some(addr) = &self.postgres_addr {
opts.postgres.enable = true;
opts.postgres.addr = addr.clone();
opts.postgres.addr.clone_from(addr);
opts.postgres.tls = tls_opts;
}
if let Some(addr) = &self.opentsdb_addr {
opts.opentsdb.enable = true;
opts.opentsdb.addr = addr.clone();
}
if self.influxdb_enable {
opts.influxdb.enable = self.influxdb_enable;
}
opts.user_provider = self.user_provider.clone();
opts.user_provider.clone_from(&self.user_provider);
let metadata_store = opts.metadata_store.clone();
let procedure = opts.procedure.clone();
@@ -401,53 +398,83 @@ impl StartCommand {
.context(StartFrontendSnafu)?;
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::default());
let catalog_manager =
KvBackendCatalogManager::new(kv_backend.clone(), multi_cache_invalidator.clone()).await;
let catalog_manager = KvBackendCatalogManager::new(
dn_opts.mode,
None,
kv_backend.clone(),
multi_cache_invalidator.clone(),
)
.await;
let table_metadata_manager =
Self::create_table_metadata_manager(kv_backend.clone()).await?;
let flow_builder = FlownodeBuilder::new(
Default::default(),
fe_plugins.clone(),
table_metadata_manager.clone(),
catalog_manager.clone(),
)
.with_kv_backend(kv_backend.clone());
let flownode = Arc::new(flow_builder.build().await);
let builder =
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
let datanode = builder.build().await.context(StartDatanodeSnafu)?;
let datanode_manager = Arc::new(StandaloneDatanodeManager(datanode.region_server()));
let node_manager = Arc::new(StandaloneDatanodeManager {
region_server: datanode.region_server(),
flow_server: flownode.clone(),
});
let table_id_sequence = Arc::new(
SequenceBuilder::new("table_id", kv_backend.clone())
SequenceBuilder::new(TABLE_ID_SEQ, kv_backend.clone())
.initial(MIN_USER_TABLE_ID as u64)
.step(10)
.build(),
);
let flow_id_sequence = Arc::new(
SequenceBuilder::new(FLOW_ID_SEQ, kv_backend.clone())
.initial(MIN_USER_FLOW_ID as u64)
.step(10)
.build(),
);
let wal_options_allocator = Arc::new(WalOptionsAllocator::new(
opts.wal_meta.clone(),
kv_backend.clone(),
));
let table_metadata_manager =
Self::create_table_metadata_manager(kv_backend.clone()).await?;
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
table_id_sequence,
wal_options_allocator.clone(),
));
let flow_meta_allocator = Arc::new(FlowMetadataAllocator::with_noop_peer_allocator(
flow_id_sequence,
));
let ddl_task_executor = Self::create_ddl_task_executor(
table_metadata_manager,
procedure_manager.clone(),
datanode_manager.clone(),
node_manager.clone(),
multi_cache_invalidator,
table_metadata_manager,
table_meta_allocator,
flow_metadata_manager,
flow_meta_allocator,
)
.await?;
let mut frontend = FrontendBuilder::new(
kv_backend,
catalog_manager,
datanode_manager,
ddl_task_executor,
)
.with_plugin(fe_plugins.clone())
.try_build()
.await
.context(StartFrontendSnafu)?;
let mut frontend =
FrontendBuilder::new(kv_backend, catalog_manager, node_manager, ddl_task_executor)
.with_plugin(fe_plugins.clone())
.try_build()
.await
.context(StartFrontendSnafu)?;
// flow server need to be able to use frontend to write insert requests back
flownode
.set_frontend_invoker(Box::new(frontend.clone()))
.await;
let _handle = flownode.clone().run_background();
let servers = Services::new(fe_opts.clone(), Arc::new(frontend.clone()), fe_plugins)
.build()
@@ -466,20 +493,26 @@ impl StartCommand {
}
pub async fn create_ddl_task_executor(
table_metadata_manager: TableMetadataManagerRef,
procedure_manager: ProcedureManagerRef,
datanode_manager: DatanodeManagerRef,
node_manager: NodeManagerRef,
cache_invalidator: CacheInvalidatorRef,
table_meta_allocator: TableMetadataAllocatorRef,
table_metadata_manager: TableMetadataManagerRef,
table_metadata_allocator: TableMetadataAllocatorRef,
flow_metadata_manager: FlowMetadataManagerRef,
flow_metadata_allocator: FlowMetadataAllocatorRef,
) -> Result<ProcedureExecutorRef> {
let procedure_executor: ProcedureExecutorRef = Arc::new(
DdlManager::try_new(
DdlContext {
node_manager,
cache_invalidator,
memory_region_keeper: Arc::new(MemoryRegionKeeper::default()),
table_metadata_manager,
table_metadata_allocator,
flow_metadata_manager,
flow_metadata_allocator,
},
procedure_manager,
datanode_manager,
cache_invalidator,
table_metadata_manager,
table_meta_allocator,
Arc::new(MemoryRegionKeeper::default()),
true,
)
.context(InitDdlManagerSnafu)?,
@@ -516,7 +549,7 @@ mod tests {
use servers::Mode;
use super::*;
use crate::options::{CliOptions, ENV_VAR_SEP};
use crate::options::{GlobalOptions, ENV_VAR_SEP};
#[tokio::test]
async fn test_try_from_start_command_to_anymap() {
@@ -590,6 +623,9 @@ mod tests {
timeout = "33s"
body_limit = "128MB"
[opentsdb]
enable = true
[logging]
level = "debug"
dir = "/tmp/greptimedb/test/logs"
@@ -601,7 +637,8 @@ mod tests {
..Default::default()
};
let Options::Standalone(options) = cmd.load_options(&CliOptions::default()).unwrap() else {
let Options::Standalone(options) = cmd.load_options(&GlobalOptions::default()).unwrap()
else {
unreachable!()
};
let fe_opts = options.frontend;
@@ -617,6 +654,7 @@ mod tests {
assert_eq!(2, fe_opts.mysql.runtime_size);
assert_eq!(None, fe_opts.mysql.reject_no_database);
assert!(fe_opts.influxdb.enable);
assert!(fe_opts.opentsdb.enable);
let DatanodeWalConfig::RaftEngine(raft_engine_config) = dn_opts.wal else {
unreachable!()
@@ -635,7 +673,7 @@ mod tests {
match &dn_opts.storage.providers[1] {
datanode::config::ObjectStoreConfig::S3(s3_config) => {
assert_eq!(
"Secret([REDACTED alloc::string::String])".to_string(),
"SecretBox<alloc::string::String>([REDACTED])".to_string(),
format!("{:?}", s3_config.access_key_id)
);
}
@@ -656,7 +694,7 @@ mod tests {
};
let Options::Standalone(opts) = cmd
.load_options(&CliOptions {
.load_options(&GlobalOptions {
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
log_level: Some("debug".to_string()),
@@ -729,7 +767,7 @@ mod tests {
};
let Options::Standalone(opts) =
command.load_options(&CliOptions::default()).unwrap()
command.load_options(&GlobalOptions::default()).unwrap()
else {
unreachable!()
};

View File

@@ -16,6 +16,7 @@ common-macro.workspace = true
paste = "1.0"
serde = { version = "1.0", features = ["derive"] }
snafu.workspace = true
zeroize = { version = "1.6", default-features = false, features = ["alloc"] }
[dev-dependencies]
toml.workspace = true

View File

@@ -53,14 +53,6 @@ impl ErrorExt for Error {
fn as_any(&self) -> &dyn Any {
self
}
fn location_opt(&self) -> Option<common_error::snafu::Location> {
match self {
Error::Overflow { location, .. } => Some(*location),
Error::Underflow { location, .. } => Some(*location),
Error::Eof { location, .. } => Some(*location),
}
}
}
macro_rules! impl_read_le {

View File

@@ -15,67 +15,12 @@
pub mod bit_vec;
pub mod buffer;
pub mod bytes;
pub mod plugins;
#[allow(clippy::all)]
pub mod readable_size;
use core::any::Any;
use std::sync::{Arc, Mutex, MutexGuard};
pub mod secrets;
pub type AffectedRows = usize;
pub use bit_vec::BitVec;
/// [`Plugins`] is a wrapper of Arc contents.
/// Make it Cloneable and we can treat it like an Arc struct.
#[derive(Default, Clone)]
pub struct Plugins {
inner: Arc<Mutex<anymap::Map<dyn Any + Send + Sync>>>,
}
impl Plugins {
pub fn new() -> Self {
Self {
inner: Arc::new(Mutex::new(anymap::Map::new())),
}
}
fn lock(&self) -> MutexGuard<anymap::Map<dyn Any + Send + Sync>> {
self.inner.lock().unwrap()
}
pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
let _ = self.lock().insert(value);
}
pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
let binding = self.lock();
binding.get::<T>().cloned()
}
pub fn map_mut<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> R
where
F: FnOnce(Option<&mut T>) -> R,
{
let mut binding = self.lock();
let opt = binding.get_mut::<T>();
mapper(opt)
}
pub fn map<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> Option<R>
where
F: FnOnce(&T) -> R,
{
let binding = self.lock();
binding.get::<T>().map(mapper)
}
pub fn len(&self) -> usize {
let binding = self.lock();
binding.len()
}
pub fn is_empty(&self) -> bool {
let binding = self.lock();
binding.is_empty()
}
}
pub use plugins::Plugins;

View File

@@ -0,0 +1,127 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard};
/// [`Plugins`] is a wrapper of [AnyMap](https://github.com/chris-morgan/anymap) and provides a thread-safe way to store and retrieve plugins.
/// Make it Cloneable and we can treat it like an Arc struct.
#[derive(Default, Clone)]
pub struct Plugins {
inner: Arc<RwLock<anymap::Map<dyn Any + Send + Sync>>>,
}
impl Plugins {
pub fn new() -> Self {
Self {
inner: Arc::new(RwLock::new(anymap::Map::new())),
}
}
pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
let _ = self.write().insert(value);
}
pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
self.read().get::<T>().cloned()
}
pub fn map_mut<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> R
where
F: FnOnce(Option<&mut T>) -> R,
{
let mut binding = self.write();
let opt = binding.get_mut::<T>();
mapper(opt)
}
pub fn map<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> Option<R>
where
F: FnOnce(&T) -> R,
{
self.read().get::<T>().map(mapper)
}
pub fn len(&self) -> usize {
self.read().len()
}
pub fn is_empty(&self) -> bool {
self.read().is_empty()
}
fn read(&self) -> RwLockReadGuard<anymap::Map<dyn Any + Send + Sync>> {
self.inner.read().unwrap()
}
fn write(&self) -> RwLockWriteGuard<anymap::Map<dyn Any + Send + Sync>> {
self.inner.write().unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_plugins() {
#[derive(Debug, Clone)]
struct FooPlugin {
x: i32,
}
#[derive(Debug, Clone)]
struct BarPlugin {
y: String,
}
let plugins = Plugins::new();
let m = plugins.clone();
let thread1 = std::thread::spawn(move || {
m.insert(FooPlugin { x: 42 });
if let Some(foo) = m.get::<FooPlugin>() {
assert_eq!(foo.x, 42);
}
assert_eq!(m.map::<FooPlugin, _, _>(|foo| foo.x * 2), Some(84));
});
let m = plugins.clone();
let thread2 = std::thread::spawn(move || {
m.clone().insert(BarPlugin {
y: "hello".to_string(),
});
if let Some(bar) = m.get::<BarPlugin>() {
assert_eq!(bar.y, "hello");
}
m.map_mut::<BarPlugin, _, _>(|bar| {
if let Some(bar) = bar {
bar.y = "world".to_string();
}
});
assert_eq!(m.get::<BarPlugin>().unwrap().y, "world");
});
thread1.join().unwrap();
thread2.join().unwrap();
assert_eq!(plugins.len(), 2);
assert!(!plugins.is_empty());
}
}

Some files were not shown because too many files have changed in this diff Show More