Compare commits

..

102 Commits

Author SHA1 Message Date
zyy17
ddc7a80f56 fix: add serialize_ignore_column_ids() to fix deserialize region options failed from json string (#4229)
* fix: add serialize_ignore_column_ids() to fix deserialize region options failed from json string

* refactor: return empty vector if column_id is empty
2024-06-30 09:59:14 +00:00
Ruihang Xia
a7aa556763 feat: output multiple partition in MergeScanExec (#4227)
* feat: output multiple partition in MergeScanExec

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix range manipulate

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-28 13:45:22 +00:00
Lei, HUANG
ef935a1de6 feat!: reduce sorted runs during compaction (#3702)
* feat: add functions to find and merge sorted runs

* chore: refactor code

* chore: remove some duplicates

* chore: remove one clone

* refactor: change max_active_window_files to max_active_window_runs

* feat: integrate with sorted runs

* fix: unit tests

* feat: limit num of sorted runs during compaction

* fix: some test

* fix: some cr comments

* feat: use smallvec

* chore: rebase main

* feat/reduce-sorted-runs:
 Refactor compaction logic and update test configurations

 - Refactored `merge_all_runs` function to use `sort_ranged_items` for sorting.
 - Improved item merging logic by iterating with `into_iter` and handling overlaps.
 - Updated test configurations to use `max_active_window_runs` instead of `max_active_window_files` for consistency.

---------

Co-authored-by: tison <wander4096@gmail.com>
2024-06-28 08:17:30 +00:00
Weny Xu
352cc9ddde test: add e2e test for region failover (#4188)
* test: add e2e test for region failover

* chore: add ci cfg

* chore: reduce parallelism to 8

* fix(ci): enable region failure

* chore: set sqlx LogLevel to Off

* refactor: move help functions to utils
2024-06-28 06:49:41 +00:00
discord9
b6585e3581 refactor(flow): make from_substrait_* async& worker handle refactor (#4210)
* refactor: use oneshot to receive result

* refactor: make from_substrait_* async

* refacrot: remove serde for plan&expr
2024-06-27 17:17:46 +00:00
Yingwen
10b7a3d24d feat: Implements merge_mode region options (#4208)
* feat: add update_mode to region options

* test: add test

* feat: last not null iter

* feat: time series last not null

* feat: partition tree update mode

* feat: partition tree

* fix: last not null iter slice

* test: add test for compaction

* test: use second resolution

* style: fix clippy

* chore: merge two lines

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>

* chore: address CR comments

* refactor: UpdateMode -> MergeMode

* refactor: LastNotNull -> LastNonNull

* chore: return None earlier

* feat: validate region options

make merge mode optional and use default while it is None

* test: fix tests

---------

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>
2024-06-27 07:52:58 +00:00
Eugene Tolbakov
8702066967 feat(sql): add casting support for shortened intervals (#4220)
* feat(sql): add casting support for shortened intervals

* chore(sql): apply CR suggestion, minor renamings
2024-06-26 22:07:09 +00:00
Jeremyhi
df0fff2f2c feat(servers): make http timeout and body limit optional (#4217)
* feat(servers): make http timeout and body limit optional

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add comment

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore: make config-docs

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-26 06:14:14 +00:00
dennis zhuang
a779cb36ec fix: wrong frontend registration address (#4199)
* fix: frontend registration address is wrong, #4186

* fix: license header

* chore: adds hostname to frontend grpc

* fix: forgot run make config-docs

* chore: warn when using bind_addr

* fix: flow node heartbeat carrying address
2024-06-26 06:13:07 +00:00
zyy17
948c8695d0 refactor: add SerializedPickerOutput and field modification of CompactorRequest (#4198)
* refactor: remove compaction_options and use RegionOptions type for region_options

* refactor: add file_purger field in CompactionRegion

* refactor: add SerializedPickerOutput

* refactor: rename CompactorRequest to OpenCompactionRegionRequest and remove PickerOutput

* refactor: use &PickerOutput instead of clone()
2024-06-25 13:04:07 +00:00
Ruihang Xia
4d4a6cd265 feat: validate partition rule on create table (#4213)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-25 12:55:01 +00:00
Zhenchi
5dde148b3d feat(puffin): implement CachedPuffinReader (#4209)
* feat(puffin): implement CachedPuffinReader

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: next PR to introduce CachedPuffinManager

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: rename

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-06-25 12:27:06 +00:00
Weny Xu
8cbe7166b0 refactor: migrate region failover implementation to region migration (#4172)
* refactor: migrate region failover implementation to region migration

* fix: use HEARTBEAT_INTERVAL_MILLIS as lease secs

* fix: return false if leader is downgraded

* fix: only remove failure detector after submitting procedure successfully

* feat: ignore dropped region

* refactor: retrieve table routes in batches

* refactor: disable region failover on local WAL implementation

* fix: move the guard into procedure

* feat: use real peer addr

* feat: use interval instead of sleep

* chore: rename `HeartbeatSender` to `HeartbeatAcceptor`

* chore: apply suggestions from CR

* chore: reduce duplicate code

* chore: apply suggestions from CR

* feat: lookup peer addr

* chore: add comments

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2024-06-25 11:58:17 +00:00
Yingwen
f5ac158605 docs: remove outdated docs (#4205)
* docs: remove outdated docs

* ci: align ci

* chore: Revert "ci: align ci"

This reverts commit 2c3c0eed7e.

* ci: fix docs ci
2024-06-25 09:46:30 +00:00
Lei, HUANG
120447779c feat: bulk memtable codec (#4163)
* feat: introduce bulk memtable encoder/decoder

* chore: rebase main

* chore: resolve some comments

* refactor: only carries time unit in ArraysSorter

* fix: some comments
2024-06-25 09:02:20 +00:00
discord9
82f6373574 feat: FlownodeClient (#4206)
* feat: FlownodeClient

* chore: remove wrong doc

* fix: debug impl for NodeClients

* chore: rename `FlownodeClient` to `FlowRequester`
2024-06-25 08:40:24 +00:00
Zhenchi
1e815dddf1 feat(puffin): implement CachedPuffinWriter (#4203)
* feat(puffin): support lz4 compression for footer

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat(puffin): introduce puffin manager trait

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: polish

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat(puffin): implement CachedPuffinWriter

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: polish

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-06-25 08:00:48 +00:00
Weny Xu
b2f61aa1cf fix: format error correctly (#4204)
* chore: remove TODO comments

* fix: format error correctly
2024-06-25 07:56:13 +00:00
Ruihang Xia
a1e2612bbf fix: align workflows again for the troublesome GHA (#4196)
* fix: align workflows again for the troublesome GHA

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* unify

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-25 06:52:01 +00:00
Yingwen
9aaf7d79bf feat: Dedup strategy that keeps the last not null field (#4184)
* feat: dedup strategy: last not null

* fix: fix tests

* fix: fix single batch

* chore: warning

* chore: skip has_null check

* refactor: rename fields

* fix: merge last fields may not reset builder

* chore: clear before filter deleted

* chore: remove debug logs

* chore: Update comment

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-25 06:38:48 +00:00
Yingwen
4a4237115a test: wait until checkpoint finish (#4202) 2024-06-25 06:21:19 +00:00
Eugene Tolbakov
840f81e0fd fix(sql): improve compound signed number processing (#4200) 2024-06-25 04:01:46 +00:00
Eugene Tolbakov
cdd4baf183 feat(sql): improve interval expression, support shortened version (#4182)
* feat(sql): improve interval expression, support shortened version

* fix(sql): remove accidental change of sqlness assertion

* fix(sql): address CR feedback, add more tests

* chore(sql): add more tests
2024-06-24 20:29:34 +00:00
Zhenchi
4b42c7b840 feat(puffin): introduce puffin manager trait (#4195)
* feat(puffin): support lz4 compression for footer

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat(puffin): introduce puffin manager trait

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: polish

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-06-24 16:02:52 +00:00
discord9
a44fe627ce feat: heartbeat task&peer lookup in proc (#4179)
* feat: herat beat task

* feat: use real flow peer allocator when building

* feat: add peer look up in ddl context

* fix: drop flow test

* refactor: per review(WIP)

* refactor: not check if is alive

* refactor: per review

* refactor: remove useless `reset`

* refactor: per bot advices

* refactor: alive peer

* chore: bot review
2024-06-24 15:06:33 +00:00
taobo
77904adaaf fix: region_peers returns same region_id for multi logical tables (#4190)
* fix: `region_peers` returns same region_id for multi logical tables

* test: add sqlness test for information_schema.region_peers

* refactor: region_peers sqlness
2024-06-24 14:12:36 +00:00
Zhenchi
07cbabab7b feat(puffin): support lz4 compression for footer (#4194)
* feat(puffin): support lz4 compression for footer

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-06-24 11:39:03 +00:00
zyy17
ea7c17089f refactor: add region_dir in CompactionRegion (#4187) 2024-06-24 08:25:52 +00:00
discord9
517917453d fix(flow): fix call df func bug&sqlness test (#4165)
* tests: flow sqlness tests

* tests: WIP df func test

* fix: use schema before expand for transform expr

* tests: some basic flow tests

* tests: unit test

* chore: dep use rev not patch

* fix: wired sqlness error?

* refactor: per review

* fix: temp sqlness bug

* fix: use fixed sqlness

* fix: impl drop as async shutdown

* refactor: per bot's review

* tests: drop worker handler both sync/async

* docs: add rationale for test

* refactor: per review

* chore: fmt
2024-06-24 07:52:45 +00:00
zyy17
0139a70549 refactor: make RegionOptions and MergeOutput serializable (#4180)
* chore: make RegionOptions serializable and add region_dir in CompactionRegion

* refactor: make `PickerOutput` and `MergeOutput` serializable and deserializable

* refactor: remove Serialize and Deserialize from PickerOutput

* chore: revert changes for file.rs

* chore: revert changes for compactor.rs and compaction.rs

---------

Co-authored-by: tison <wander4096@gmail.com>
2024-06-24 07:37:53 +00:00
tison
5566dd72f2 chore: highlight our committers in CONTRIBUTING.md (#4189)
* chore: highlight our committers in CONTRIBUTING.md

Signed-off-by: tison <wander4096@gmail.com>

* chore: appy suggestion

Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>

* chore: apply suggestion

Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>

* Trigger CI

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
2024-06-22 08:23:30 +00:00
ZonaHe
dea33a7aaf feat: update dashboard to v0.5.2 (#4185)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2024-06-21 14:32:46 +00:00
Weny Xu
15ad9f2f6f fix: region logical regions after catching up (#4176)
* fix: region logical regions after catching up

* test: add metric table migration test

* chore: apply suggestions from CR
2024-06-21 10:30:18 +00:00
Ruihang Xia
fce65c97e3 feat: make RegionScanner aware of PartitionRange (#4170)
* define PartitionRange

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add optimizer rule

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* implement interfaces

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl aggr stream

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add fallback method

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add document and rename struct

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add more comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-21 09:54:22 +00:00
Ruihang Xia
ac574b66ab feat: add num_rows and num_row_groups to manifest (#4183)
* featadd num_rows and num_row_groups to manifest

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add document

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-21 07:15:13 +00:00
Weny Xu
1e52ba325f feat: introduce chaos crds (#4173)
feat: add chaos-mesh crds
2024-06-21 06:28:51 +00:00
Yohan Wal
b739c9fd10 feat: PREPARE and EXECUTE statement from mysql client (#4125)
* feat: prepare stmt in mysql client

* feat: execute stmt in mysql client

* fix: handle parameters properly

* refactor: use existing funcs to convert expr to scalar value

* refactor: use uuid strings as stmt_key for queries from COM_PREPARE packet

* refactor: take prepare and execute parser as submodule

* test: add unit test for converting expr to scalar value

* feat: deallocate stmt in mysql client

* chore: comments and duplicates

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2024-06-21 02:02:57 +00:00
Ruihang Xia
21c89f3247 perf: optimize RecordBatch to HttpOutput conversion (#4178)
* add benchmark

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* save 70ms

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add profiler

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* save 50ms

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* save 160ms

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* format toml file

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix license header

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix windows build

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-20 12:33:58 +00:00
Jeremyhi
5bcd7a14bb feat: use the write runtime to handle the heartbeats (#4177) 2024-06-20 08:45:07 +00:00
dennis zhuang
4306cba866 feat: show database options (#4174)
* test: test create table with database ttl

* feat: show database options

* fix: comment

* chore: apply suggestion

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>

* chore: fix CR comments and refactor

* chore: style

Co-authored-by: Weny Xu <wenymedia@gmail.com>

---------

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>
Co-authored-by: Weny Xu <wenymedia@gmail.com>
2024-06-20 04:21:58 +00:00
Jeremyhi
4c3d4af127 feat: register flow node (#4166)
* feat: rename keys.rs to key.rs

* feat: refactor datanode keys

* feat: add flownode key

* feat: keep flownode's lease info in metasrv

* feat: flow selector

* feat: impl_try_from_lease_key and impl_from_str_lease_key to simple code
2024-06-20 03:46:19 +00:00
localhost
48a0f39b19 chore: enhance add pipeline http api return data (#4167)
* chore: enhance add pipeline http api return data

* chore: replaceing hard code header value
2024-06-20 02:19:31 +00:00
ZonaHe
8abebad458 feat: update dashboard to v0.5.1 (#4171)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2024-06-20 01:46:54 +00:00
LFC
cc2f7efb98 chore: bump datafusion version to fix last_value regression (#4169)
* chore: bump datafusion version to fix `last_value` regression

* fix: resolve PR comments

* fix ci
2024-06-19 07:47:17 +00:00
taobo
22d12683b4 refactor!: unify FrontendOptions and DatanodeOptions by using GrpcOptions (#4088)
* refactor: move GrpcOptions to servers/grpc

* fix: optimize code

* fix: docs

* refactor: move DatanodeOptions.rpc_hostname to grpc.hostname

* fix: merge main

* refactor code impl

test: add test_depreacted_cli_options unit test

* Update src/servers/src/grpc.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2024-06-18 22:45:38 +00:00
Yingwen
fe74efdafe feat: Implement memtable range (#4162)
* refactor: RangeBase

* feat: memtable range

* feat: scanner use mem range

* feat: remove base from mem range context

* feat: impl ranges for memtables

* chore: fix warnings

* refactor: make predicate cheap to clone

* refactor: MemRange -> MemtableRange

* feat: pub empty memtable to fix warnings

* test: fix sqlness result
2024-06-18 22:25:19 +00:00
discord9
cd9705ccd7 feat(flow): support datafusion scalar function (#4142)
* chore: call df function types

* feat: RelationDesc to DfSchema

* refactor: use RelationDesc instead of Type

* chore: WIP get to phy expr

* feat: custom deserialize

* chore: fmt

* refactor: renaming to DfScalarFunction

* feat: eval df func(untested)

* fix: had to spawn a thread for calling async

* chore: per review advices

* tests: test df scalar function
2024-06-18 12:34:38 +00:00
Weny Xu
ea2d067cf1 feat: implement the OrderedBatchProducer (#4134)
* feat: implement the `OrderedBatchProducer`

* test: add test of cancel safety

* chore: apply suggestions from CR

* chore: apply suggestions from CR

* refactor: simplify the `BackgroundProducerWorker`

* feat: implement the OrderedBatchProducer v2

* refactor: switch to `OrderedBatchProducer`

* chore: rename to `MAX_FLUSH_QUEUE_SIZE`

* refactor: switch to `OrderedBatchProducerV2`

* refactor: remove `OrderedBatchProducerV1`

* test: add tests

* refactor: make config configurable

* refactor: minor refactor

* chore: remove unused code

* chore: remove `benchmarks` crate

* chore: update config doc

* chore: remove unused comment

* refactor: refactor client registry

* refactor: rename `max_batch_size` to `max_batch_bytes`

* chore: use constant value

* chore: ensure serialized meta < ESTIMATED_META_SIZE

* chore: apply suggestions from CR

* chore: remove the `CHANNEL_SIZE`

* chore: apply suggestions from CR

* fix: ensure serialized meta < ESTIMATED_META_SIZE

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2024-06-18 07:20:01 +00:00
Ning Sun
70d113a355 feat: update default size of bgworkers, add hbworkers (#4129)
* feat: update default size of bgworkers, add hbworkers

* feat: update frontend heartbeat as well

* chore: update sample config files and default settings

* chore: update config docs

* Revert "chore: update config docs"

This reverts commit 8107f4c120.

* Revert "chore: update sample config files and default settings"

This reverts commit f5ae701c8d.

* feat: use default heartbeat runtime size

* chore: update config docs
2024-06-18 06:18:37 +00:00
shuiyisong
cb657ae51e feat(pipeline): join processor (#4158)
* feat: add join processor

* test: add join simple test

* chore: fix header

* chore: update commit

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* test: add more join test

* chore: fix lint

* chore: update comment

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2024-06-18 05:00:34 +00:00
Jeremyhi
141d017576 feat: enable metasrv to accept flownode's heartbeats (#4160)
* feat: make metasrv can accept flownode's heartbeat

* chore: proto
2024-06-18 04:07:46 +00:00
yuanbohan
0fc18b6865 feat(pipeline): gsub prosessor (#4121)
* chore: add log http ingester scaffold

* chore: add some example code

* chore: add log inserter

* chore: add log handler file

* chore: add pipeline lib

* chore: import log handler

* chore: add pipelime http handler

* chore: add pipeline private table

* chore: add pipeline API

* chore: improve error handling

* chore: merge main

* chore: add multi content type support for log handler

* refactor: remove servers dep on pipeline

* refactor: move define_into_tonic_status to common-error

* refactor: bring in pipeline 3eb890c551b8d7f60c4491fcfec18966e2b210a4

* chore: fix typo

* refactor: bring in pipeline a95c9767d7056ab01dd8ca5fa1214456c6ffc72c

* chore: fix typo and license header

* refactor: move http event handler to a separate file

* chore: add test for pipeline

* chore: fmt

* refactor: bring in pipeline 7d2402701877901871dd1294a65ac937605a6a93

* refactor: move `pipeline_operator` to `pipeline` crate

* chore: minor update

* refactor: bring in pipeline 1711f4d46687bada72426d88cda417899e0ae3a4

* chore: add log

* chore: add log

* chore: remove open hook

* chore: minor update

* chore: fix fmt

* chore: minor update

* chore: rename desc for pipeline table

* refactor: remove updated_at in pipelines

* chore: add more content type support for log inserter api

* chore: introduce pipeline crate

* chore: update upload pipeline api

* chore: fix by pr commit

* chore: add some doc for pub fn/struct

* chore: some minro fix

* chore: add pipeline version support

* chore: impl log pipeline version

* gsub prosessor

* chore: add test

* chore: update commit

Co-authored-by: dennis zhuang <killme2008@gmail.com>

---------

Co-authored-by: paomian <xpaomian@gmail.com>
Co-authored-by: shuiyisong <xixing.sys@gmail.com>
Co-authored-by: shuiyisong <113876041+shuiyisong@users.noreply.github.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
2024-06-17 07:57:47 +00:00
yuanbohan
0aceebf0a3 feat(pipeline): transform support on_failure (#4123)
* chore: add log http ingester scaffold

* chore: add some example code

* chore: add log inserter

* chore: add log handler file

* chore: add pipeline lib

* chore: import log handler

* chore: add pipelime http handler

* chore: add pipeline private table

* chore: add pipeline API

* chore: improve error handling

* chore: merge main

* chore: add multi content type support for log handler

* refactor: remove servers dep on pipeline

* refactor: move define_into_tonic_status to common-error

* refactor: bring in pipeline 3eb890c551b8d7f60c4491fcfec18966e2b210a4

* chore: fix typo

* refactor: bring in pipeline a95c9767d7056ab01dd8ca5fa1214456c6ffc72c

* chore: fix typo and license header

* refactor: move http event handler to a separate file

* chore: add test for pipeline

* chore: fmt

* refactor: bring in pipeline 7d2402701877901871dd1294a65ac937605a6a93

* refactor: move `pipeline_operator` to `pipeline` crate

* chore: minor update

* refactor: bring in pipeline 1711f4d46687bada72426d88cda417899e0ae3a4

* chore: add log

* chore: add log

* chore: remove open hook

* chore: minor update

* chore: fix fmt

* chore: minor update

* chore: rename desc for pipeline table

* refactor: remove updated_at in pipelines

* chore: add more content type support for log inserter api

* chore: introduce pipeline crate

* chore: update upload pipeline api

* chore: fix by pr commit

* chore: add some doc for pub fn/struct

* chore: some minro fix

* chore: add pipeline version support

* chore: impl log pipeline version

* transform on_failure

* chore: add test

* chore: move test to a separate file

* chore: add comment

---------

Co-authored-by: paomian <xpaomian@gmail.com>
Co-authored-by: shuiyisong <xixing.sys@gmail.com>
2024-06-17 06:56:31 +00:00
Yingwen
558272de61 refactor: Decouple dedup and merge (#4139)
* feat: remove dedup/filter deleted from merge reader

* feat: impl dedup reader

* feat: support filter deleted flag

* test: test dedup reader

* feat: remove put_only field

* chore: fix clippy

* feat: metrics

* test: test empty batch

* perf: optimize dedup strategy

Avoid iterating all timestamps.

* test: fix test

* feat: generic
2024-06-17 04:09:50 +00:00
LFC
f4a5a44549 refactor: make region manifest checkpoint ran in background (#4133)
* refactor: use in-database manifest as checkpoint instead of merging incremental files in object store

* refactor: make region manifest checkpoint ran in background

* reduce unnecessary metrics

* Update src/mito2/src/manifest/checkpointer.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* resolve PR comments

* resolve PR comments

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2024-06-17 03:47:18 +00:00
zyy17
5390603855 refactor: add Compactor trait to abstract the compaction (#4097)
* refactor: add Compactor trait

* chore: add compact() in Compactor trait and expose compaction module

* refactor: add CompactionRequest and open_compaction_region

* refactor: export the compaction api

* refactor: add DefaultCompactor::new_from_request

* refactor: no need to pass mito_config in open_compaction_region()

* refactor: CompactionRequest -> &CompactionRequest

* fix: typo

* docs: add docs for public apis

* refactor: remove 'Picker' from Compactor

* chore: add logs

* chore: change pub attribute for Picker

* refactor: remove do_merge_ssts()

* refactor: update comments

* refactor: use CompactionRegion argument in Picker

* chore: make compaction module public and remove unnessary clone

* refactor: move build_compaction_task() in CompactionScheduler{}

* chore: use  in open_compaction_region() and add some comments for public structure

* refactor: add 'manifest_dir()' in store-api

* refactor: move the default implementation to DefaultCompactor

* refactor: remove Options from MergeOutput

* chore: minor modification

* fix: clippy errors

* fix: unit test errors

* refactor: remove 'manifest_dir()' from store-api crate(already have one in opener)

* refactor: use 'region_dir' in CompactionRequest

* refactor: refine naming

* refactor: refine naming

* refactor: remove clone()

* chore: add comments

* refactor: add PickerOutput field in CompactorRequest
2024-06-17 03:03:47 +00:00
Ruihang Xia
a2e3532a57 docs: add guide for tsbs benchmark (#4151)
* docs: add guide for tsbs benchmark

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-16 16:02:15 +08:00
Ruihang Xia
2faa6d6c97 ci: align docs with develop (#4152)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-15 16:33:57 +00:00
Weny Xu
d6392acd65 fix(sqlness): catch different format timestamp (#4149) 2024-06-15 11:13:01 +00:00
localhost
01e3a24cf7 feat: log ingestion support (#4014)
* chore: add log http ingester scaffold

* chore: add some example code

* chore: add log inserter

* chore: add log handler file

* chore: add pipeline lib

* chore: import log handler

* chore: add pipelime http handler

* chore: add pipeline private table

* chore: add pipeline API

* chore: improve error handling

* chore: merge main

* chore: add multi content type support for log handler

* refactor: remove servers dep on pipeline

* refactor: move define_into_tonic_status to common-error

* refactor: bring in pipeline 3eb890c551b8d7f60c4491fcfec18966e2b210a4

* chore: fix typo

* refactor: bring in pipeline a95c9767d7056ab01dd8ca5fa1214456c6ffc72c

* chore: fix typo and license header

* refactor: move http event handler to a separate file

* chore: add test for pipeline

* chore: fmt

* refactor: bring in pipeline 7d2402701877901871dd1294a65ac937605a6a93

* refactor: move `pipeline_operator` to `pipeline` crate

* chore: minor update

* refactor: bring in pipeline 1711f4d46687bada72426d88cda417899e0ae3a4

* chore: add log

* chore: add log

* chore: remove open hook

* chore: minor update

* chore: fix fmt

* chore: minor update

* chore: rename desc for pipeline table

* refactor: remove updated_at in pipelines

* chore: add more content type support for log inserter api

* chore: introduce pipeline crate

* chore: update upload pipeline api

* chore: fix by pr commit

* chore: add some doc for pub fn/struct

* chore: some minro fix

* chore: add pipeline version support

* chore: impl log pipeline version

* chore: fix format issue

* fix: make the LogicalPlan of a query pipeline sorted in desc order

* chore: remove some debug log

* chore: replacing hashmap cache with moak

* chore: fix by pr commit

* chore: fix toml format issue

* chore: update Cargo.lock

* chore: fix by pr commit

* chore: fix some issue by pr commit

* chore: add more doc for pipeline version

---------

Co-authored-by: shuiyisong <xixing.sys@gmail.com>
2024-06-14 17:03:30 +00:00
Weny Xu
bf3ad44584 fix: fix release CI typo (#4147)
* fix: ci typo

* chore: use aws registry
2024-06-14 14:29:34 +00:00
Weny Xu
11a903f193 chore(ci): switch to aws registry (#4145)
chore: switch to aws registry
2024-06-14 11:46:57 +00:00
Weny Xu
acdfaabfa5 fix(ci): use ld_classic on macOS (#4143)
fix: use ld_classic on macos
2024-06-14 08:09:12 +00:00
Weny Xu
54ca06ba08 chore: bump version to v0.8.2 (#4141) 2024-06-14 03:39:08 +00:00
Weny Xu
1f315e300f fix: retry on unknown error (#4138) 2024-06-13 11:15:38 +00:00
Weny Xu
573e25a40f chore: run fuzz tests with disk cache (#4118)
* chore: run fuzz tests with disk cache

* fix: print error messages correctly
2024-06-13 09:07:12 +00:00
Lei, HUANG
f8ec46493f refactor: simplify parquet writer (#4112)
* refactor: simplify parquet writer

* chore: fix clippy

* refactor: use AsyncArrowWriter instead of BufferedWriter

* refactor: remove BufferedWriter

* fix: add chunk parameter to avoid entity too small issue

* refactor: use AtomicUsize instead of Mutex

* fix: add chunk argument to stream_to_parquet

* chore: fmt

* wip: fail check

* fix: check

* fmt

* refactor: use impl Future instead of async_trait

* fmt

* refactor: use associate types
2024-06-13 07:32:47 +00:00
Weny Xu
14a2d83594 chore: remove unused code (#4135)
* chore: remove unused code

* Update src/mito2/src/wal/entry_reader.rs

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-12 13:18:33 +00:00
Yingwen
65f8b72d34 feat: Implement RegionScanner for SeqScan (#4060)
* feat: ordered builder wip

* feat: impl RegionScanner for SeqScan

* feat: implement scan_partition and build_stream

* chore: return SeqScan as RegionScanner

* fix: group parts

* feat: split parts

* chore: reader metrics

* chore: metrics

* chore: remove unused codes

* chore: support holding a group of ranges in ScanPart

* feat: group ScanParts to ScanParts

* feat: impl SeqScanner again

* chore: observe build cost in ScannerMetrics

* chore: fix compiler warnings

* style: fix clippy

* docs: update config docs

* chore: forward DisplayAs to scanner

* test: update sqlness tests

* chore: update debug fmt

* chore: custom debug for timestamp

fix test compiling issue with common-macro when running
cargo nextest -p common-time

* chore: update debug format

* feat: update fmt for scan part

* chore: fix warning

* fix: sanitize parallelism

* feat: split parts

* test: fix config api test

* feat: update logs

* chore: Revert "chore: remove unused codes"

This reverts commit b548b30a01eeded59b1a0a8d89f9293ca63afc41.

* chore: Revert "docs: update config docs"

This reverts commit a7997e78d6ddcf635560574de8c1948c495bdd12.

* feat: each partition scan files in parallel

* test: fix config api test

* docs: fix typo

* chore: address comments, simplify tests

* feat: global semaphore

* feat: always spawn task

* chore: simplify default explain output format

* handle output partiton number is 0

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-12 08:21:30 +00:00
LFC
9473daab8b fix: explicitly set config instead of using changable default in tests (#4132)
* fix: explicitly set config instead of using changable default in tests

* fix: resolve PR comments
2024-06-11 10:51:01 +00:00
LFC
5a6021e34f refactor: remove substrait ser/de for region query in standalone (#3812)
* refactor: remove substrait serde for region query in standalone

* fix ci

* move QueryRequest to common-query

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* format code

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* format toml file

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore: format toml

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2024-06-11 09:33:47 +00:00
discord9
1b00526de5 fix(flow): infer table schema correctly (#4113)
* refactor: make individual col name optional

* chore: rename TypedPlan's `typ` to `schema`

* feat: add optional col name to typed plan

* feat: pass col name all along

* feat: correct infer output table schema

* chore: unused import

* fix: error when key is not projected

* refactor: per review

* chore: fmt
2024-06-11 08:57:47 +00:00
Yingwen
5533bd9293 chore(common-macro): remove features covered by full (#4131) 2024-06-11 07:44:53 +00:00
Ning Sun
587e99d806 fix: macro crate cannot be compiled alone (#4130)
* fix: macro crate cannot be compiled alone

* Update src/common/macro/Cargo.toml

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Co-authored-by: tison <wander4096@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2024-06-11 05:36:29 +00:00
Yingwen
9cae15bd1b fix: executes pending ddls if region memtable is empty while scheduling next flush (#4119)
* ci: enable debug log

* chore: test to reproduce panic

* chore: Revert "ci: enable debug log"

This reverts commit 17eff2a045.

* test: add test for alter during flush

* fix: clear status if region has nothing to flush

It will also executes pending ddls and requests

* docs: fix typo
2024-06-11 00:10:17 +00:00
cjw
d8b51cfaba refactor: remove double checks of memtable size (#4117)
* refactor: remove unnecessary unwrap

* Update src/mito2/src/region/version.rs

Co-authored-by: dennis zhuang <killme2008@gmail.com>

---------

Co-authored-by: Kermit <chenjiawei1@xiaohongshu.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
2024-06-07 23:34:03 +00:00
Weny Xu
e142ca40d7 feat: invoke handle_batch_open_requests (#4107)
* feat: open all regions via invoking `handle_batch_open_requests`

* tests: add sqlness tests

* refactor: avoid cloning

* chore: apply suggestions from CR

* chore: update config.md

* chore: apply suggestions from CR
2024-06-07 09:07:45 +00:00
Yingwen
e982d2e55c fix: Update region Version in the worker loop (#4114)
* feat: handle region edit result

* feat: handle edit result

* feat: handle truncate result

* feat: flush compaction

* feat: invoke in worker

* feat: remove unused fields

* style: fix clippy

* feat: remove applier

---------

Co-authored-by: Weny Xu <wenymedia@gmail.com>
2024-06-07 06:27:16 +00:00
Weny Xu
09e0e1b246 chore: run fuzz tests with kafka remote wal (#4105)
* chore: add fuzz tests with kafka

* chore(ci): use minio

* chore: add empty line

* chore(ci): refactor

* chore: add empty line

* fix: update config

* fix: add default value for `MetaClientOptions`

* fix: remove redundant `debug_assert`

* chore: run fuzz tests with disk cache

* chore: remove redundant minio setup

* chore: cache targets

* Revert "chore: run fuzz tests with disk cache"

This reverts commit d81783187d.

* chore: fix typo

* chore: apply suggestions from CR

* Revert "fix: remove redundant `debug_assert`"

This reverts commit 09b899eed1.
2024-06-07 03:47:40 +00:00
irenjj
9c42825f5d feat: Implement SHOW CREATE FLOW (#4040)
* feat: Implement SHOW CREATE FLOW

* fmt

* stmt for display

* Update src/operator/src/statement.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* test: add sqlness test

* fix test

* parse query in parser

* test: move test to standalone

* reuse ParserContext::new()

* Update tests/cases/standalone/show_create_flow.result

Co-authored-by: Weny Xu <wenymedia@gmail.com>

* add line breaks

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
Co-authored-by: Weny Xu <wenymedia@gmail.com>
2024-06-07 03:24:56 +00:00
Jeremyhi
4719569e4f feat: support gRPC cancellation (#4092)
* feat: support cancellation

* chore: add unit test for cancellation

* chore: minor refactor

* feat: we do not need to spawn in distributed mode

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-06 08:27:25 +00:00
Jeremyhi
b03cb3860e chore: reduce some burden on the write path (#4110)
* chore: remove unnecessary checking

* chore: avoid do the same thing in a loop
2024-06-06 06:45:19 +00:00
shuiyisong
2ade511f26 feat: introduce pipeline crate (#4109)
* chore: introduce pipeline crate

* chore: fix typo
2024-06-05 17:23:25 +00:00
Weny Xu
16b85b06b6 chore: remove gc before running fuzz tests (#4108) 2024-06-05 11:59:29 +00:00
Ruihang Xia
03cacf9948 ci: cargo gc all fuzz test runner (#4081)
* ci: cargo gc all fuzz test runner

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore(ci): disable caching targets

* chore(ci): remove .tar file after unzip

* fix cargo-gc command

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: WenyXu <wenymedia@gmail.com>
2024-06-05 09:58:29 +00:00
Weny Xu
c23f8ad113 feat: implement the handle_batch_open_requests (#4075)
* feat: implement the `handle_batch_open_requests`

* refactor: refactor `handle_batch_open_requests` method signature

* test: add tests for `handle_batch_open_requests`

* chore: fmt code

* chore: apply suggestions from CR

* chore: apply suggestions from CR

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2024-06-05 09:22:34 +00:00
Weny Xu
e0a2c5a581 chore(ci): remove redundant sqlness test config (#4106) 2024-06-05 08:39:39 +00:00
zyy17
417ab3b779 ci: add 'make run-cluster-with-etcd' to run greptimedb cluster by using docker-compose (#4103) 2024-06-05 08:07:29 +00:00
tison
1850fe2956 feat: show create table only for base table (#4099)
* feat: show create table only for base table

Signed-off-by: tison <wander4096@gmail.com>

* add new cases

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-06-04 21:29:07 +00:00
taobo
dd06e107f9 test: add fuzz tests for column data type alteration (#4076)
* feat: support make fuzz-stable in Makefile

* test: add fuzz tests for column data type alteration

* fix: optimize code by cr
2024-06-04 13:38:57 +00:00
sarailQAQ
98c19ed0fa feat: implement drop multiple tables (#4085)
* feat: implement drop multiple tables

* fix: pass fmt and clippy checks

* add: drop multiple sqlness test

* update: accept review suggestions

* update: accept reviem suggestion

Co-authored-by: Weny Xu <wenymedia@gmail.com>

* fix: pass clippy check

---------

Co-authored-by: Weny Xu <wenymedia@gmail.com>
2024-06-04 13:11:41 +00:00
LFC
c0aed1d267 feat: set global runtime size by config file (#4063)
* set global runtime size

* fix: resolve PR comments

* fix: log the whole option

* fix ci

* debug ci

* debug ci

---------

Co-authored-by: Weny Xu <wenymedia@gmail.com>
2024-06-04 10:03:33 +00:00
discord9
0a07130931 fix(flow): mfp operator missing rows (#4084)
* fix: mfp missing rows if run twice in same tick

* tests: run mfp for multiple times

* refactor: make mfp less hacky

* feat: make channel larger

* chore: typos
2024-06-04 09:07:13 +00:00
Weny Xu
a6269397c8 fix: fix EntityTooSmall issue (#4100)
* fix: fix EntityTooSmall issue

* chore(ci): add minio to coverage

* tests: add test for parquet writer

* chore: move tests to `common-datasource` crate
2024-06-04 08:43:33 +00:00
Lei, HUANG
a80059b47f fix: recover memtable options when opening physical regions (#4102)
* fix: recover memtable options when opening physical regions

* chore: fmt

* chore: merge data region options
2024-06-04 08:20:29 +00:00
Weny Xu
b3a4362626 test: run test_flush_reopen_region and test_region_replay with KafkaLogStore (#4083)
* feat: add `LogStoreFactory` to `TestEnv`

* feat: add `multiple_log_store_factories` template

* test: run `test_flush_reopen_region` and `test_region_replay` with `KafkaLogStore`

* chore: move deps to workspace

* chore: apply suggestions from CR
2024-06-04 08:11:15 +00:00
Kelvin Wu
51e2b6e728 fix: display the PartitionBound and PartitionDef correctly (#4101)
* fix: display the PartitionBound and PartitionDef correctly

* Update src/partition/src/partition.rs

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* fix: fix unit test of partition definition

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2024-06-04 08:10:44 +00:00
shuiyisong
d1838fb28d refactor: move define_into_tonic_status to common-error (#4095)
* chore: finish cherry-pick

* chore: remove unused code
2024-06-04 03:29:15 +00:00
Weny Xu
cd97a39904 chore: enable strip for tests-fuzz crate (#4093) 2024-06-03 14:32:11 +00:00
Weny Xu
4e5dd1ebb0 ci: try to free space after fuzz tests (#4089)
* chore(ci): remove .tar file after unzip

* chore: free space

* chore: include debug info
2024-06-02 21:22:22 +00:00
Kelvin Wu
88cdefa41e feat: implement Display for PartitionExpr (#4087) 2024-06-02 21:09:00 +00:00
Ruihang Xia
c2218f8be8 build(deps): bump datafusion 20240528 (#4061)
* build(deps): bump datafusion 20240528

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* another update

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update expected sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix first/last value

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* reformat comment

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix remaining errors

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* revert toml format

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix pyo3 feature

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove dead code

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Apply suggestions from code review

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>

* format file

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Jeremyhi <jiachun_feng@proton.me>
2024-06-01 14:03:00 +00:00
572 changed files with 31573 additions and 8867 deletions

View File

@@ -24,6 +24,14 @@ inputs:
description: Build android artifacts
required: false
default: 'false'
image-namespace:
description: Image Namespace
required: false
default: 'greptime'
image-registry:
description: Image Registry
required: false
default: 'docker.io'
runs:
using: composite
steps:
@@ -35,7 +43,9 @@ runs:
make build-by-dev-builder \
CARGO_PROFILE=${{ inputs.cargo-profile }} \
FEATURES=${{ inputs.features }} \
BASE_IMAGE=${{ inputs.base-image }}
BASE_IMAGE=${{ inputs.base-image }} \
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }}
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts
@@ -53,7 +63,9 @@ runs:
shell: bash
if: ${{ inputs.build-android-artifacts == 'true' }}
run: |
cd ${{ inputs.working-dir }} && make strip-android-bin
cd ${{ inputs.working-dir }} && make strip-android-bin \
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }}
- name: Upload android artifacts
uses: ./.github/actions/upload-artifacts

View File

@@ -30,7 +30,9 @@ runs:
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
run: |
cd ${{ inputs.working-dir }} && \
make run-it-in-container BUILD_JOBS=4
make run-it-in-container BUILD_JOBS=4 \
IMAGE_NAMESPACE=i8k6a5e1/greptime \
IMAGE_REGISTRY=public.ecr.aws
- name: Upload sqlness logs
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
@@ -49,6 +51,8 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime
- name: Build greptime without pyo3
if: ${{ inputs.dev-mode == 'false' }}
@@ -60,6 +64,8 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
shell: bash
@@ -76,6 +82,8 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime
- name: Build greptime on android base image
uses: ./.github/actions/build-greptime-binary
@@ -86,3 +94,5 @@ runs:
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
build-android-artifacts: true
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime

View File

@@ -59,9 +59,15 @@ runs:
if: ${{ inputs.disable-run-tests == 'false' }}
uses: taiki-e/install-action@nextest
# Get proper backtraces in mac Sonoma. Currently there's an issue with the new
# linker that prevents backtraces from getting printed correctly.
#
# <https://github.com/rust-lang/rust/issues/113783>
- name: Run integration tests
if: ${{ inputs.disable-run-tests == 'false' }}
shell: bash
env:
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
run: |
make test sqlness-test
@@ -75,6 +81,8 @@ runs:
- name: Build greptime binary
shell: bash
env:
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
run: |
make build \
CARGO_PROFILE=${{ inputs.cargo-profile }} \

17
.github/actions/setup-chaos/action.yml vendored Normal file
View File

@@ -0,0 +1,17 @@
name: Setup Kind
description: Deploy Kind
runs:
using: composite
steps:
- uses: actions/checkout@v4
- name: Create kind cluster
shell: bash
run: |
helm repo add chaos-mesh https://charts.chaos-mesh.org
kubectl create ns chaos-mesh
helm install chaos-mesh chaos-mesh/chaos-mesh -n=chaos-mesh --version 2.6.3
- name: Print Chaos-mesh
if: always()
shell: bash
run: |
kubectl get po -n chaos-mesh

View File

@@ -22,6 +22,10 @@ inputs:
etcd-endpoints:
default: "etcd.etcd-cluster.svc.cluster.local:2379"
description: "Etcd endpoints"
values-filename:
default: "with-minio.yaml"
enable-region-failover:
default: false
runs:
using: composite
@@ -44,6 +48,7 @@ runs:
helm upgrade \
--install my-greptimedb \
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
--set image.registry=${{ inputs.image-registry }} \
--set image.repository=${{ inputs.image-repository }} \
--set image.tag=${{ inputs.image-tag }} \
@@ -57,6 +62,7 @@ runs:
greptime/greptimedb-cluster \
--create-namespace \
-n my-greptimedb \
--values ./.github/actions/setup-greptimedb-cluster/${{ inputs.values-filename }} \
--wait \
--wait-for-jobs
- name: Wait for GreptimeDB

View File

@@ -0,0 +1,18 @@
meta:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
datanode:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
frontend:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8

View File

@@ -0,0 +1,38 @@
meta:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[datanode]
[datanode.client]
timeout = "60s"
datanode:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[storage]
cache_path = "/data/greptimedb/s3cache"
cache_capacity = "256MB"
frontend:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123

View File

@@ -0,0 +1,34 @@
meta:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[datanode]
[datanode.client]
timeout = "60s"
datanode:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
frontend:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123

View File

@@ -0,0 +1,45 @@
meta:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[wal]
provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
num_topics = 3
[datanode]
[datanode.client]
timeout = "60s"
datanode:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[wal]
provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
linger = "2ms"
frontend:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123

View File

@@ -0,0 +1,24 @@
name: Setup Kafka cluster
description: Deploy Kafka cluster on Kubernetes
inputs:
controller-replicas:
default: 3
description: "Kafka controller replicas"
namespace:
default: "kafka-cluster"
runs:
using: composite
steps:
- name: Install Kafka cluster
shell: bash
run: |
helm upgrade \
--install kafka oci://registry-1.docker.io/bitnamicharts/kafka \
--set controller.replicaCount=${{ inputs.controller-replicas }} \
--set controller.resources.requests.cpu=50m \
--set controller.resources.requests.memory=128Mi \
--set listeners.controller.protocol=PLAINTEXT \
--set listeners.client.protocol=PLAINTEXT \
--create-namespace \
-n ${{ inputs.namespace }}

24
.github/actions/setup-minio/action.yml vendored Normal file
View File

@@ -0,0 +1,24 @@
name: Setup Minio cluster
description: Deploy Minio cluster on Kubernetes
inputs:
replicas:
default: 1
description: "replicas"
runs:
using: composite
steps:
- name: Install Etcd cluster
shell: bash
run: |
helm repo add minio https://charts.min.io/
helm upgrade --install minio \
--set resources.requests.memory=128Mi \
--set replicas=${{ inputs.replicas }} \
--set mode=standalone \
--set rootUser=rootuser,rootPassword=rootpass123 \
--set buckets[0].name=default \
--set service.port=80,service.targetPort=9000 \
minio/minio \
--create-namespace \
-n minio

View File

@@ -160,14 +160,16 @@ jobs:
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz
cargo +nightly install cargo-fuzz cargo-gc-bin
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
name: bins
path: .
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
run: |
tar -xvf ./bins.tar.gz
rm ./bins.tar.gz
- name: Run GreptimeDB
run: |
./bins/greptime standalone start&
@@ -182,7 +184,7 @@ jobs:
unstable-fuzztest:
name: Unstable Fuzz Test
needs: build
needs: build-greptime-ci
runs-on: ubuntu-latest
strategy:
matrix:
@@ -204,27 +206,22 @@ jobs:
shell: bash
run: |
sudo apt update && sudo apt install -y libfuzzer-14-dev
cargo install cargo-fuzz
- name: Download pre-built binaries
cargo install cargo-fuzz cargo-gc-bin
- name: Download pre-built binariy
uses: actions/download-artifact@v4
with:
name: bins
name: bin
path: .
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- name: Build Fuzz Test
shell: bash
- name: Unzip bianry
run: |
cd tests-fuzz &
cargo install cargo-gc-bin &
cargo gc &
cd ..
tar -xvf ./bin.tar.gz
rm ./bin.tar.gz
- name: Run Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
GT_FUZZ_BINARY_PATH: ./bins/greptime
GT_FUZZ_BINARY_PATH: ./bin/greptime
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
with:
target: ${{ matrix.target }}
@@ -263,7 +260,7 @@ jobs:
- name: Build greptime bianry
shell: bash
# `cargo gc` will invoke `cargo build` with specified args
run: cargo build --bin greptime --profile ci
run: cargo gc --profile ci -- --bin greptime
- name: Pack greptime binary
shell: bash
run: |
@@ -278,16 +275,39 @@ jobs:
version: current
distributed-fuzztest:
name: Fuzz Test (Distributed, Disk)
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
runs-on: ubuntu-latest
needs: build-greptime-ci
strategy:
matrix:
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
mode:
- name: "Disk"
minio: false
kafka: false
values: "with-disk.yaml"
- name: "Minio"
minio: true
kafka: false
values: "with-minio.yaml"
- name: "Minio with Cache"
minio: true
kafka: false
values: "with-minio-and-cache.yaml"
- name: "Remote WAL"
minio: true
kafka: true
values: "with-remote-wal.yaml"
steps:
- uses: actions/checkout@v4
- name: Setup Kind
uses: ./.github/actions/setup-kind
- if: matrix.mode.minio
name: Setup Minio
uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka
name: Setup Kafka cluser
uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster
# Prepares for fuzz tests
@@ -307,7 +327,7 @@ jobs:
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz
cargo +nightly install cargo-fuzz cargo-gc-bin
# Downloads ci image
- name: Download pre-built binariy
uses: actions/download-artifact@v4
@@ -315,7 +335,9 @@ jobs:
name: bin
path: .
- name: Unzip binary
run: tar -xvf ./bin.tar.gz
run: |
tar -xvf ./bin.tar.gz
rm ./bin.tar.gz
- name: Build and push GreptimeDB image
uses: ./.github/actions/build-and-push-ci-image
- name: Wait for etcd
@@ -325,6 +347,22 @@ jobs:
pod -l app.kubernetes.io/instance=etcd \
--timeout=120s \
-n etcd-cluster
- if: matrix.mode.minio
name: Wait for minio
run: |
kubectl wait \
--for=condition=Ready \
pod -l app=minio \
--timeout=120s \
-n minio
- if: matrix.mode.kafka
name: Wait for kafka
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=kafka \
--timeout=120s \
-n kafka-cluster
- name: Print etcd info
shell: bash
run: kubectl get all --show-labels -n etcd-cluster
@@ -333,6 +371,7 @@ jobs:
uses: ./.github/actions/setup-greptimedb-cluster
with:
image-registry: localhost:5001
values-filename: ${{ matrix.mode.values }}
- name: Port forward (mysql)
run: |
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
@@ -358,45 +397,158 @@ jobs:
if: failure()
uses: actions/upload-artifact@v4
with:
name: fuzz-tests-kind-logs-${{ matrix.target }}
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
path: /tmp/kind
retention-days: 3
- name: Delete cluster
if: success()
shell: bash
run: |
kind delete cluster
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
docker system prune -f
sqlness:
name: Sqlness Test
needs: build
runs-on: ${{ matrix.os }}
distributed-fuzztest-with-chaos:
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
runs-on: ubuntu-latest
needs: build-greptime-ci
strategy:
matrix:
os: [ ubuntu-20.04 ]
timeout-minutes: 60
target: ["fuzz_failover_mito_regions"]
mode:
- name: "Remote WAL"
minio: true
kafka: true
values: "with-remote-wal.yaml"
steps:
- uses: actions/checkout@v4
- name: Download pre-built binaries
- name: Setup Kind
uses: ./.github/actions/setup-kind
- name: Setup Chaos Mesh
uses: ./.github/actions/setup-chaos
- if: matrix.mode.minio
name: Setup Minio
uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka
name: Setup Kafka cluser
uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster
# Prepares for fuzz tests
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin
# Downloads ci image
- name: Download pre-built binariy
uses: actions/download-artifact@v4
with:
name: bins
name: bin
path: .
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins --preserve-state
- name: Upload sqlness logs
if: always()
- name: Unzip binary
run: |
tar -xvf ./bin.tar.gz
rm ./bin.tar.gz
- name: Build and push GreptimeDB image
uses: ./.github/actions/build-and-push-ci-image
- name: Wait for etcd
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=etcd \
--timeout=120s \
-n etcd-cluster
- if: matrix.mode.minio
name: Wait for minio
run: |
kubectl wait \
--for=condition=Ready \
pod -l app=minio \
--timeout=120s \
-n minio
- if: matrix.mode.kafka
name: Wait for kafka
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=kafka \
--timeout=120s \
-n kafka-cluster
- name: Print etcd info
shell: bash
run: kubectl get all --show-labels -n etcd-cluster
# Setup cluster for test
- name: Setup GreptimeDB cluster
uses: ./.github/actions/setup-greptimedb-cluster
with:
image-registry: localhost:5001
values-filename: ${{ matrix.mode.values }}
enable-region-failover: true
- name: Port forward (mysql)
run: |
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
- name: Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
with:
target: ${{ matrix.target }}
max-total-time: 120
- name: Describe Nodes
if: failure()
shell: bash
run: |
kubectl describe nodes
- name: Export kind logs
if: failure()
shell: bash
run: |
kind export logs /tmp/kind
- name: Upload logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: sqlness-logs
path: /tmp/sqlness*
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
path: /tmp/kind
retention-days: 3
- name: Delete cluster
if: success()
shell: bash
run: |
kind delete cluster
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
docker system prune -f
sqlness-kafka-wal:
name: Sqlness Test with Kafka Wal
sqlness:
name: Sqlness Test (${{ matrix.mode.name }})
needs: build
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
mode:
- name: "Basic"
opts: ""
kafka: false
- name: "Remote WAL"
opts: "-w kafka -k 127.0.0.1:9092"
kafka: true
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
@@ -407,16 +559,17 @@ jobs:
path: .
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- name: Setup kafka server
- if: matrix.mode.kafka
name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins --preserve-state
run: RUST_BACKTRACE=1 ./bins/sqlness-runner ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
- name: Upload sqlness logs
if: always()
if: failure()
uses: actions/upload-artifact@v4
with:
name: sqlness-logs-with-kafka-wal
name: sqlness-logs-${{ matrix.mode.name }}
path: /tmp/sqlness*
retention-days: 3
@@ -505,6 +658,9 @@ jobs:
- name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup minio
working-directory: tests-integration/fixtures/minio
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run nextest cases
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
env:
@@ -515,6 +671,11 @@ jobs:
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
GT_MINIO_BUCKET: greptime
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
GT_MINIO_ACCESS_KEY: superpower_password
GT_MINIO_REGION: us-west-2
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
UNITTEST_LOG_DIR: "__unittest_logs"

View File

@@ -67,19 +67,13 @@ jobs:
- run: 'echo "No action required"'
sqlness:
name: Sqlness Test
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
steps:
- run: 'echo "No action required"'
sqlness-kafka-wal:
name: Sqlness Test with Kafka Wal
name: Sqlness Test (${{ matrix.mode.name }})
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
mode:
- name: "Basic"
- name: "Remote WAL"
steps:
- run: 'echo "No action required"'

View File

@@ -2,7 +2,14 @@
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
Please read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
You can find our contributors at https://github.com/GreptimeTeam/greptimedb/graphs/contributors. When you dedicate to GreptimeDB for a few months and keep bringing high-quality contributions (code, docs, advocate, etc.), you will be a candidate of a committer.
A committer will be granted both read & write access to GreptimeDB repos. Here is a list of current committers except GreptimeDB team members:
* [Eugene Tolbakov](https://github.com/etolbakov): PromQL support, SQL engine, InfluxDB APIs, and more.
* [@NiwakaDev](https://github.com/NiwakaDev): SQL engine and storage layer.
Please read the guidelines, and they can help you get started. Communicate respectfully with the developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).

2866
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,5 @@
[workspace]
members = [
"benchmarks",
"src/api",
"src/auth",
"src/catalog",
@@ -46,6 +45,7 @@ members = [
"src/object-store",
"src/operator",
"src/partition",
"src/pipeline",
"src/plugins",
"src/promql",
"src/puffin",
@@ -64,7 +64,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.8.1"
version = "0.8.2"
edition = "2021"
license = "Apache-2.0"
@@ -104,15 +104,15 @@ clap = { version = "4.4", features = ["derive"] }
config = "0.13.0"
crossbeam-utils = "0.8"
dashmap = "5.4"
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-physical-plan = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
derive_builder = "0.12"
dotenv = "0.15"
# TODO(LFC): Wait for https://github.com/etcdv3/etcd-client/pull/76
@@ -120,7 +120,7 @@ etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ae26136accd82fbdf8be540cd502f2e94951077e" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a70a6af9c69e40f9a918936a48717343402b4393" }
humantime = "2.1"
humantime-serde = "1.1"
itertools = "0.10"
@@ -146,13 +146,15 @@ raft-engine = { version = "0.4.1", default-features = false }
rand = "0.8"
regex = "1.8"
regex-automata = { version = "0.4" }
reqwest = { version = "0.11", default-features = false, features = [
reqwest = { version = "0.12", default-features = false, features = [
"json",
"rustls-tls-native-roots",
"stream",
"multipart",
] }
rskafka = "0.5"
rstest = "0.21"
rstest_reuse = "0.7"
rust_decimal = "1.33"
schemars = "0.8"
serde = { version = "1.0", features = ["derive"] }
@@ -162,7 +164,7 @@ smallvec = { version = "1", features = ["serde"] }
snafu = "0.8"
sysinfo = "0.30"
# on branch v0.44.x
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "e4e496b8d62416ad50ce70a1b460c7313610cf5d", features = [
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
"visitor",
] }
strum = { version = "0.25", features = ["derive"] }
@@ -222,6 +224,7 @@ mito2 = { path = "src/mito2" }
object-store = { path = "src/object-store" }
operator = { path = "src/operator" }
partition = { path = "src/partition" }
pipeline = { path = "src/pipeline" }
plugins = { path = "src/plugins" }
promql = { path = "src/promql" }
puffin = { path = "src/puffin" }
@@ -250,9 +253,12 @@ incremental = false
[profile.ci]
inherits = "dev"
debug = false
strip = true
[profile.dev.package.sqlness-runner]
debug = false
strip = true
[profile.dev.package.tests-fuzz]
debug = false
strip = true

View File

@@ -163,6 +163,17 @@ nextest: ## Install nextest tools.
sqlness-test: ## Run sqlness test.
cargo sqlness
# Run fuzz test ${FUZZ_TARGET}.
RUNS ?= 1
FUZZ_TARGET ?= fuzz_alter_table
.PHONY: fuzz
fuzz:
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
.PHONY: fuzz-ls
fuzz-ls:
cargo fuzz list --fuzz-dir tests-fuzz
.PHONY: check
check: ## Cargo check all the targets.
cargo check --workspace --all-targets --all-features
@@ -194,6 +205,10 @@ run-it-in-container: start-etcd ## Run integration tests in dev-builder.
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
.PHONY: run-cluster-with-etcd
run-cluster-with-etcd: ## Run greptime cluster with etcd in docker-compose.
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml up
##@ Docs
config-docs: ## Generate configuration documentation from toml files.
docker run --rm \

View File

@@ -1,38 +0,0 @@
[package]
name = "benchmarks"
version.workspace = true
edition.workspace = true
license.workspace = true
[lints]
workspace = true
[dependencies]
api.workspace = true
arrow.workspace = true
chrono.workspace = true
clap.workspace = true
client = { workspace = true, features = ["testing"] }
common-base.workspace = true
common-telemetry.workspace = true
common-wal.workspace = true
dotenv.workspace = true
futures.workspace = true
futures-util.workspace = true
humantime.workspace = true
humantime-serde.workspace = true
indicatif = "0.17.1"
itertools.workspace = true
lazy_static.workspace = true
log-store.workspace = true
mito2.workspace = true
num_cpus.workspace = true
parquet.workspace = true
prometheus.workspace = true
rand.workspace = true
rskafka.workspace = true
serde.workspace = true
store-api.workspace = true
tokio.workspace = true
toml.workspace = true
uuid.workspace = true

View File

@@ -1,11 +0,0 @@
Benchmarkers for GreptimeDB
--------------------------------
## Wal Benchmarker
The wal benchmarker serves to evaluate the performance of GreptimeDB's Write-Ahead Log (WAL) component. It meticulously assesses the read/write performance of the WAL under diverse workloads generated by the benchmarker.
### How to use
To compile the benchmarker, navigate to the `greptimedb/benchmarks` directory and execute `cargo build --release`. Subsequently, you'll find the compiled target located at `greptimedb/target/release/wal_bench`.
The `./wal_bench -h` command reveals numerous arguments that the target accepts. Among these, a notable one is the `cfg-file` argument. By utilizing a configuration file in the TOML format, you can bypass the need to repeatedly specify cumbersome arguments.

View File

@@ -1,21 +0,0 @@
# Refers to the documents of `Args` in benchmarks/src/wal.rs`.
wal_provider = "kafka"
bootstrap_brokers = ["localhost:9092"]
num_workers = 10
num_topics = 32
num_regions = 1000
num_scrapes = 1000
num_rows = 5
col_types = "ifs"
max_batch_size = "512KB"
linger = "1ms"
backoff_init = "10ms"
backoff_max = "1ms"
backoff_base = 2
backoff_deadline = "3s"
compression = "zstd"
rng_seed = 42
skip_read = false
skip_write = false
random_topics = true
report_metrics = false

View File

@@ -1,326 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(int_roundings)]
use std::fs;
use std::sync::Arc;
use std::time::Instant;
use api::v1::{ColumnDataType, ColumnSchema, SemanticType};
use benchmarks::metrics;
use benchmarks::wal_bench::{Args, Config, Region, WalProvider};
use clap::Parser;
use common_telemetry::info;
use common_wal::config::kafka::common::BackoffConfig;
use common_wal::config::kafka::DatanodeKafkaConfig as KafkaConfig;
use common_wal::config::raft_engine::RaftEngineConfig;
use common_wal::options::{KafkaWalOptions, WalOptions};
use itertools::Itertools;
use log_store::kafka::log_store::KafkaLogStore;
use log_store::raft_engine::log_store::RaftEngineLogStore;
use mito2::wal::Wal;
use prometheus::{Encoder, TextEncoder};
use rand::distributions::{Alphanumeric, DistString};
use rand::rngs::SmallRng;
use rand::SeedableRng;
use rskafka::client::partition::Compression;
use rskafka::client::ClientBuilder;
use store_api::logstore::LogStore;
use store_api::storage::RegionId;
async fn run_benchmarker<S: LogStore>(cfg: &Config, topics: &[String], wal: Arc<Wal<S>>) {
let chunk_size = cfg.num_regions.div_ceil(cfg.num_workers);
let region_chunks = (0..cfg.num_regions)
.map(|id| {
build_region(
id as u64,
topics,
&mut SmallRng::seed_from_u64(cfg.rng_seed),
cfg,
)
})
.chunks(chunk_size as usize)
.into_iter()
.map(|chunk| Arc::new(chunk.collect::<Vec<_>>()))
.collect::<Vec<_>>();
let mut write_elapsed = 0;
let mut read_elapsed = 0;
if !cfg.skip_write {
info!("Benchmarking write ...");
let num_scrapes = cfg.num_scrapes;
let timer = Instant::now();
futures::future::join_all((0..cfg.num_workers).map(|i| {
let wal = wal.clone();
let regions = region_chunks[i as usize].clone();
tokio::spawn(async move {
for _ in 0..num_scrapes {
let mut wal_writer = wal.writer();
regions
.iter()
.for_each(|region| region.add_wal_entry(&mut wal_writer));
wal_writer.write_to_wal().await.unwrap();
}
})
}))
.await;
write_elapsed += timer.elapsed().as_millis();
}
if !cfg.skip_read {
info!("Benchmarking read ...");
let timer = Instant::now();
futures::future::join_all((0..cfg.num_workers).map(|i| {
let wal = wal.clone();
let regions = region_chunks[i as usize].clone();
tokio::spawn(async move {
for region in regions.iter() {
region.replay(&wal).await;
}
})
}))
.await;
read_elapsed = timer.elapsed().as_millis();
}
dump_report(cfg, write_elapsed, read_elapsed);
}
fn build_region(id: u64, topics: &[String], rng: &mut SmallRng, cfg: &Config) -> Region {
let wal_options = match cfg.wal_provider {
WalProvider::Kafka => {
assert!(!topics.is_empty());
WalOptions::Kafka(KafkaWalOptions {
topic: topics.get(id as usize % topics.len()).cloned().unwrap(),
})
}
WalProvider::RaftEngine => WalOptions::RaftEngine,
};
Region::new(
RegionId::from_u64(id),
build_schema(&parse_col_types(&cfg.col_types), rng),
wal_options,
cfg.num_rows,
cfg.rng_seed,
)
}
fn build_schema(col_types: &[ColumnDataType], mut rng: &mut SmallRng) -> Vec<ColumnSchema> {
col_types
.iter()
.map(|col_type| ColumnSchema {
column_name: Alphanumeric.sample_string(&mut rng, 5),
datatype: *col_type as i32,
semantic_type: SemanticType::Field as i32,
datatype_extension: None,
})
.chain(vec![ColumnSchema {
column_name: "ts".to_string(),
datatype: ColumnDataType::TimestampMillisecond as i32,
semantic_type: SemanticType::Tag as i32,
datatype_extension: None,
}])
.collect()
}
fn dump_report(cfg: &Config, write_elapsed: u128, read_elapsed: u128) {
let cost_report = format!(
"write costs: {} ms, read costs: {} ms",
write_elapsed, read_elapsed,
);
let total_written_bytes = metrics::METRIC_WAL_WRITE_BYTES_TOTAL.get() as u128;
let write_throughput = if write_elapsed > 0 {
(total_written_bytes * 1000).div_floor(write_elapsed)
} else {
0
};
let total_read_bytes = metrics::METRIC_WAL_READ_BYTES_TOTAL.get() as u128;
let read_throughput = if read_elapsed > 0 {
(total_read_bytes * 1000).div_floor(read_elapsed)
} else {
0
};
let throughput_report = format!(
"total written bytes: {} bytes, total read bytes: {} bytes, write throuput: {} bytes/s ({} mb/s), read throughput: {} bytes/s ({} mb/s)",
total_written_bytes,
total_read_bytes,
write_throughput,
write_throughput.div_floor(1 << 20),
read_throughput,
read_throughput.div_floor(1 << 20),
);
let metrics_report = if cfg.report_metrics {
let mut buffer = Vec::new();
let encoder = TextEncoder::new();
let metrics = prometheus::gather();
encoder.encode(&metrics, &mut buffer).unwrap();
String::from_utf8(buffer).unwrap()
} else {
String::new()
};
info!(
r#"
Benchmark config:
{cfg:?}
Benchmark report:
{cost_report}
{throughput_report}
{metrics_report}"#
);
}
async fn create_topics(cfg: &Config) -> Vec<String> {
// Creates topics.
let client = ClientBuilder::new(cfg.bootstrap_brokers.clone())
.build()
.await
.unwrap();
let ctrl_client = client.controller_client().unwrap();
let (topics, tasks): (Vec<_>, Vec<_>) = (0..cfg.num_topics)
.map(|i| {
let topic = if cfg.random_topics {
format!(
"greptime_wal_bench_topic_{}_{}",
uuid::Uuid::new_v4().as_u128(),
i
)
} else {
format!("greptime_wal_bench_topic_{}", i)
};
let task = ctrl_client.create_topic(
topic.clone(),
1,
cfg.bootstrap_brokers.len() as i16,
2000,
);
(topic, task)
})
.unzip();
// Must ignore errors since we allow topics being created more than once.
let _ = futures::future::try_join_all(tasks).await;
topics
}
fn parse_compression(comp: &str) -> Compression {
match comp {
"no" => Compression::NoCompression,
"gzip" => Compression::Gzip,
"lz4" => Compression::Lz4,
"snappy" => Compression::Snappy,
"zstd" => Compression::Zstd,
other => unreachable!("Unrecognized compression {other}"),
}
}
fn parse_col_types(col_types: &str) -> Vec<ColumnDataType> {
let parts = col_types.split('x').collect::<Vec<_>>();
assert!(parts.len() <= 2);
let pattern = parts[0];
let repeat = parts
.get(1)
.map(|r| r.parse::<usize>().unwrap())
.unwrap_or(1);
pattern
.chars()
.map(|c| match c {
'i' | 'I' => ColumnDataType::Int64,
'f' | 'F' => ColumnDataType::Float64,
's' | 'S' => ColumnDataType::String,
other => unreachable!("Cannot parse {other} as a column data type"),
})
.cycle()
.take(pattern.len() * repeat)
.collect()
}
fn main() {
// Sets the global logging to INFO and suppress loggings from rskafka other than ERROR and upper ones.
std::env::set_var("UNITTEST_LOG_LEVEL", "info,rskafka=error");
common_telemetry::init_default_ut_logging();
let args = Args::parse();
let cfg = if !args.cfg_file.is_empty() {
toml::from_str(&fs::read_to_string(&args.cfg_file).unwrap()).unwrap()
} else {
Config::from(args)
};
// Validates arguments.
if cfg.num_regions < cfg.num_workers {
panic!("num_regions must be greater than or equal to num_workers");
}
if cfg
.num_workers
.min(cfg.num_topics)
.min(cfg.num_regions)
.min(cfg.num_scrapes)
.min(cfg.max_batch_size.as_bytes() as u32)
.min(cfg.bootstrap_brokers.len() as u32)
== 0
{
panic!("Invalid arguments");
}
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap()
.block_on(async {
match cfg.wal_provider {
WalProvider::Kafka => {
let topics = create_topics(&cfg).await;
let kafka_cfg = KafkaConfig {
broker_endpoints: cfg.bootstrap_brokers.clone(),
max_batch_size: cfg.max_batch_size,
linger: cfg.linger,
backoff: BackoffConfig {
init: cfg.backoff_init,
max: cfg.backoff_max,
base: cfg.backoff_base,
deadline: Some(cfg.backoff_deadline),
},
compression: parse_compression(&cfg.compression),
..Default::default()
};
let store = Arc::new(KafkaLogStore::try_new(&kafka_cfg).await.unwrap());
let wal = Arc::new(Wal::new(store));
run_benchmarker(&cfg, &topics, wal).await;
}
WalProvider::RaftEngine => {
// The benchmarker assumes the raft engine directory exists.
let store = RaftEngineLogStore::try_new(
"/tmp/greptimedb/raft-engine-wal".to_string(),
RaftEngineConfig::default(),
)
.await
.map(Arc::new)
.unwrap();
let wal = Arc::new(Wal::new(store));
run_benchmarker(&cfg, &[], wal).await;
}
}
});
}

View File

@@ -1,39 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use lazy_static::lazy_static;
use prometheus::*;
/// Logstore label.
pub const LOGSTORE_LABEL: &str = "logstore";
/// Operation type label.
pub const OPTYPE_LABEL: &str = "optype";
lazy_static! {
/// Counters of bytes of each operation on a logstore.
pub static ref METRIC_WAL_OP_BYTES_TOTAL: IntCounterVec = register_int_counter_vec!(
"greptime_bench_wal_op_bytes_total",
"wal operation bytes total",
&[OPTYPE_LABEL],
)
.unwrap();
/// Counter of bytes of the append_batch operation.
pub static ref METRIC_WAL_WRITE_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
&["write"],
);
/// Counter of bytes of the read operation.
pub static ref METRIC_WAL_READ_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
&["read"],
);
}

View File

@@ -1,366 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::mem::size_of;
use std::sync::atomic::{AtomicI64, AtomicU64, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Duration;
use api::v1::value::ValueData;
use api::v1::{ColumnDataType, ColumnSchema, Mutation, OpType, Row, Rows, Value, WalEntry};
use clap::{Parser, ValueEnum};
use common_base::readable_size::ReadableSize;
use common_wal::options::WalOptions;
use futures::StreamExt;
use mito2::wal::{Wal, WalWriter};
use rand::distributions::{Alphanumeric, DistString, Uniform};
use rand::rngs::SmallRng;
use rand::{Rng, SeedableRng};
use serde::{Deserialize, Serialize};
use store_api::logstore::provider::Provider;
use store_api::logstore::LogStore;
use store_api::storage::RegionId;
use crate::metrics;
/// The wal provider.
#[derive(Clone, ValueEnum, Default, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum WalProvider {
#[default]
RaftEngine,
Kafka,
}
#[derive(Parser)]
pub struct Args {
/// The provided configuration file.
/// The example configuration file can be found at `greptimedb/benchmarks/config/wal_bench.example.toml`.
#[clap(long, short = 'c')]
pub cfg_file: String,
/// The wal provider.
#[clap(long, value_enum, default_value_t = WalProvider::default())]
pub wal_provider: WalProvider,
/// The advertised addresses of the kafka brokers.
/// If there're multiple bootstrap brokers, their addresses should be separated by comma, for e.g. "localhost:9092,localhost:9093".
#[clap(long, short = 'b', default_value = "localhost:9092")]
pub bootstrap_brokers: String,
/// The number of workers each running in a dedicated thread.
#[clap(long, default_value_t = num_cpus::get() as u32)]
pub num_workers: u32,
/// The number of kafka topics to be created.
#[clap(long, default_value_t = 32)]
pub num_topics: u32,
/// The number of regions.
#[clap(long, default_value_t = 1000)]
pub num_regions: u32,
/// The number of times each region is scraped.
#[clap(long, default_value_t = 1000)]
pub num_scrapes: u32,
/// The number of rows in each wal entry.
/// Each time a region is scraped, a wal entry containing will be produced.
#[clap(long, default_value_t = 5)]
pub num_rows: u32,
/// The column types of the schema for each region.
/// Currently, three column types are supported:
/// - i = ColumnDataType::Int64
/// - f = ColumnDataType::Float64
/// - s = ColumnDataType::String
/// For e.g., "ifs" will be parsed as three columns: i64, f64, and string.
///
/// Additionally, a "x" sign can be provided to repeat the column types for a given number of times.
/// For e.g., "iix2" will be parsed as 4 columns: i64, i64, i64, and i64.
/// This feature is useful if you want to specify many columns.
#[clap(long, default_value = "ifs")]
pub col_types: String,
/// The maximum size of a batch of kafka records.
/// The default value is 1mb.
#[clap(long, default_value = "512KB")]
pub max_batch_size: ReadableSize,
/// The minimum latency the kafka client issues a batch of kafka records.
/// However, a batch of kafka records would be immediately issued if a record cannot be fit into the batch.
#[clap(long, default_value = "1ms")]
pub linger: String,
/// The initial backoff delay of the kafka consumer.
#[clap(long, default_value = "10ms")]
pub backoff_init: String,
/// The maximum backoff delay of the kafka consumer.
#[clap(long, default_value = "1s")]
pub backoff_max: String,
/// The exponential backoff rate of the kafka consumer. The next back off = base * the current backoff.
#[clap(long, default_value_t = 2)]
pub backoff_base: u32,
/// The deadline of backoff. The backoff ends if the total backoff delay reaches the deadline.
#[clap(long, default_value = "3s")]
pub backoff_deadline: String,
/// The client-side compression algorithm for kafka records.
#[clap(long, default_value = "zstd")]
pub compression: String,
/// The seed of random number generators.
#[clap(long, default_value_t = 42)]
pub rng_seed: u64,
/// Skips the read phase, aka. region replay, if set to true.
#[clap(long, default_value_t = false)]
pub skip_read: bool,
/// Skips the write phase if set to true.
#[clap(long, default_value_t = false)]
pub skip_write: bool,
/// Randomly generates topic names if set to true.
/// Useful when you want to run the benchmarker without worrying about the topics created before.
#[clap(long, default_value_t = false)]
pub random_topics: bool,
/// Logs out the gathered prometheus metrics when the benchmarker ends.
#[clap(long, default_value_t = false)]
pub report_metrics: bool,
}
/// Benchmarker config.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
pub wal_provider: WalProvider,
pub bootstrap_brokers: Vec<String>,
pub num_workers: u32,
pub num_topics: u32,
pub num_regions: u32,
pub num_scrapes: u32,
pub num_rows: u32,
pub col_types: String,
pub max_batch_size: ReadableSize,
#[serde(with = "humantime_serde")]
pub linger: Duration,
#[serde(with = "humantime_serde")]
pub backoff_init: Duration,
#[serde(with = "humantime_serde")]
pub backoff_max: Duration,
pub backoff_base: u32,
#[serde(with = "humantime_serde")]
pub backoff_deadline: Duration,
pub compression: String,
pub rng_seed: u64,
pub skip_read: bool,
pub skip_write: bool,
pub random_topics: bool,
pub report_metrics: bool,
}
impl From<Args> for Config {
fn from(args: Args) -> Self {
let cfg = Self {
wal_provider: args.wal_provider,
bootstrap_brokers: args
.bootstrap_brokers
.split(',')
.map(ToString::to_string)
.collect::<Vec<_>>(),
num_workers: args.num_workers.min(num_cpus::get() as u32),
num_topics: args.num_topics,
num_regions: args.num_regions,
num_scrapes: args.num_scrapes,
num_rows: args.num_rows,
col_types: args.col_types,
max_batch_size: args.max_batch_size,
linger: humantime::parse_duration(&args.linger).unwrap(),
backoff_init: humantime::parse_duration(&args.backoff_init).unwrap(),
backoff_max: humantime::parse_duration(&args.backoff_max).unwrap(),
backoff_base: args.backoff_base,
backoff_deadline: humantime::parse_duration(&args.backoff_deadline).unwrap(),
compression: args.compression,
rng_seed: args.rng_seed,
skip_read: args.skip_read,
skip_write: args.skip_write,
random_topics: args.random_topics,
report_metrics: args.report_metrics,
};
cfg
}
}
/// The region used for wal benchmarker.
pub struct Region {
id: RegionId,
schema: Vec<ColumnSchema>,
provider: Provider,
next_sequence: AtomicU64,
next_entry_id: AtomicU64,
next_timestamp: AtomicI64,
rng: Mutex<Option<SmallRng>>,
num_rows: u32,
}
impl Region {
/// Creates a new region.
pub fn new(
id: RegionId,
schema: Vec<ColumnSchema>,
wal_options: WalOptions,
num_rows: u32,
rng_seed: u64,
) -> Self {
let provider = match wal_options {
WalOptions::RaftEngine => Provider::raft_engine_provider(id.as_u64()),
WalOptions::Kafka(opts) => Provider::kafka_provider(opts.topic),
};
Self {
id,
schema,
provider,
next_sequence: AtomicU64::new(1),
next_entry_id: AtomicU64::new(1),
next_timestamp: AtomicI64::new(1655276557000),
rng: Mutex::new(Some(SmallRng::seed_from_u64(rng_seed))),
num_rows,
}
}
/// Scrapes the region and adds the generated entry to wal.
pub fn add_wal_entry<S: LogStore>(&self, wal_writer: &mut WalWriter<S>) {
let mutation = Mutation {
op_type: OpType::Put as i32,
sequence: self
.next_sequence
.fetch_add(self.num_rows as u64, Ordering::Relaxed),
rows: Some(self.build_rows()),
};
let entry = WalEntry {
mutations: vec![mutation],
};
metrics::METRIC_WAL_WRITE_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
wal_writer
.add_entry(
self.id,
self.next_entry_id.fetch_add(1, Ordering::Relaxed),
&entry,
&self.provider,
)
.unwrap();
}
/// Replays the region.
pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) {
let mut wal_stream = wal.scan(self.id, 0, &self.provider).unwrap();
while let Some(res) = wal_stream.next().await {
let (_, entry) = res.unwrap();
metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
}
}
/// Computes the estimated size in bytes of the entry.
pub fn entry_estimated_size(entry: &WalEntry) -> usize {
let wrapper_size = size_of::<WalEntry>()
+ entry.mutations.capacity() * size_of::<Mutation>()
+ size_of::<Rows>();
let rows = entry.mutations[0].rows.as_ref().unwrap();
let schema_size = rows.schema.capacity() * size_of::<ColumnSchema>()
+ rows
.schema
.iter()
.map(|s| s.column_name.capacity())
.sum::<usize>();
let values_size = (rows.rows.capacity() * size_of::<Row>())
+ rows
.rows
.iter()
.map(|r| r.values.capacity() * size_of::<Value>())
.sum::<usize>();
wrapper_size + schema_size + values_size
}
fn build_rows(&self) -> Rows {
let cols = self
.schema
.iter()
.map(|col_schema| {
let col_data_type = ColumnDataType::try_from(col_schema.datatype).unwrap();
self.build_col(&col_data_type, self.num_rows)
})
.collect::<Vec<_>>();
let rows = (0..self.num_rows)
.map(|i| {
let values = cols.iter().map(|col| col[i as usize].clone()).collect();
Row { values }
})
.collect();
Rows {
schema: self.schema.clone(),
rows,
}
}
fn build_col(&self, col_data_type: &ColumnDataType, num_rows: u32) -> Vec<Value> {
let mut rng_guard = self.rng.lock().unwrap();
let rng = rng_guard.as_mut().unwrap();
match col_data_type {
ColumnDataType::TimestampMillisecond => (0..num_rows)
.map(|_| {
let ts = self.next_timestamp.fetch_add(1000, Ordering::Relaxed);
Value {
value_data: Some(ValueData::TimestampMillisecondValue(ts)),
}
})
.collect(),
ColumnDataType::Int64 => (0..num_rows)
.map(|_| {
let v = rng.sample(Uniform::new(0, 10_000));
Value {
value_data: Some(ValueData::I64Value(v)),
}
})
.collect(),
ColumnDataType::Float64 => (0..num_rows)
.map(|_| {
let v = rng.sample(Uniform::new(0.0, 5000.0));
Value {
value_data: Some(ValueData::F64Value(v)),
}
})
.collect(),
ColumnDataType::String => (0..num_rows)
.map(|_| {
let v = Alphanumeric.sample_string(rng, 10);
Value {
value_data: Some(ValueData::StringValue(v)),
}
})
.collect(),
_ => unreachable!(),
}
}
}

View File

@@ -13,10 +13,14 @@
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `default_timezone` | String | `None` | The default timezone of the server. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
@@ -62,8 +66,7 @@
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
@@ -154,15 +157,20 @@
| --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `default_timezone` | String | `None` | The default timezone of the server. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
@@ -240,6 +248,10 @@
| `use_memory_store` | Bool | `false` | Store data in memory. |
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
| `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
@@ -294,12 +306,28 @@
| `node_id` | Integer | `None` | The datanode identifier and should be unique in the cluster. |
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `rpc_addr` | String | `127.0.0.1:3001` | The gRPC address of the datanode. |
| `rpc_hostname` | String | `None` | The hostname of the datanode. |
| `rpc_runtime_size` | Integer | `8` | The number of gRPC server worker threads. |
| `rpc_max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `rpc_max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `rpc_addr` | String | `None` | Deprecated, use `grpc.addr` instead. |
| `rpc_hostname` | String | `None` | Deprecated, use `grpc.hostname` instead. |
| `rpc_runtime_size` | Integer | `None` | Deprecated, use `grpc.runtime_size` instead. |
| `rpc_max_recv_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
| `rpc_max_send_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
| `grpc.tls.key_path` | String | `None` | Private key file path. |
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
@@ -325,8 +353,7 @@
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |

View File

@@ -13,24 +13,71 @@ require_lease_before_startup = false
## By default, it provides services after all regions have been initialized.
init_regions_in_background = false
## The gRPC address of the datanode.
## Enable telemetry to collect anonymous usage data.
enable_telemetry = true
## Parallelism of initializing regions.
init_regions_parallelism = 16
## Deprecated, use `grpc.addr` instead.
## +toml2docs:none-default
rpc_addr = "127.0.0.1:3001"
## The hostname of the datanode.
## Deprecated, use `grpc.hostname` instead.
## +toml2docs:none-default
rpc_hostname = "127.0.0.1"
## The number of gRPC server worker threads.
## Deprecated, use `grpc.runtime_size` instead.
## +toml2docs:none-default
rpc_runtime_size = 8
## The maximum receive message size for gRPC server.
## Deprecated, use `grpc.rpc_max_recv_message_size` instead.
## +toml2docs:none-default
rpc_max_recv_message_size = "512MB"
## The maximum send message size for gRPC server.
## Deprecated, use `grpc.rpc_max_send_message_size` instead.
## +toml2docs:none-default
rpc_max_send_message_size = "512MB"
## Enable telemetry to collect anonymous usage data.
enable_telemetry = true
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
addr = "127.0.0.1:3001"
## The hostname advertised to the metasrv,
## and used for connections from outside the host
hostname = "127.0.0.1"
## The number of server worker threads.
runtime_size = 8
## The maximum receive message size for gRPC server.
max_recv_message_size = "512MB"
## The maximum send message size for gRPC server.
max_send_message_size = "512MB"
## gRPC server TLS options, see `mysql.tls` section.
[grpc.tls]
## TLS mode.
mode = "disable"
## Certificate file path.
## +toml2docs:none-default
cert_path = ""
## Private key file path.
## +toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload.
## For now, gRPC tls config does not support auto reload.
watch = false
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 4
## The heartbeat options.
[heartbeat]
@@ -120,11 +167,7 @@ broker_endpoints = ["127.0.0.1:9092"]
## The max size of a single producer batch.
## Warning: Kafka has a default limit of 1MB per message in a topic.
## **It's only used when the provider is `kafka`**.
max_batch_size = "1MB"
## The linger duration of a kafka batch producer.
## **It's only used when the provider is `kafka`**.
linger = "200ms"
max_batch_bytes = "1MB"
## The consumer wait timeout.
## **It's only used when the provider is `kafka`**.

View File

@@ -5,6 +5,15 @@ mode = "standalone"
## +toml2docs:none-default
default_timezone = "UTC"
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 4
## The heartbeat options.
[heartbeat]
## Interval for sending heartbeat messages to the metasrv.
@@ -17,16 +26,20 @@ retry_interval = "3s"
[http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout.
## HTTP request timeout. Set to 0 to disable timeout.
timeout = "30s"
## HTTP request body limit.
## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
addr = "127.0.0.1:4001"
## The hostname advertised to the metasrv,
## and used for connections from outside the host
hostname = "127.0.0.1"
## The number of server worker threads.
runtime_size = 8

View File

@@ -25,6 +25,15 @@ enable_telemetry = true
## If it's not empty, the metasrv will store all data with this key prefix.
store_key_prefix = ""
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 4
## Procedure storage options.
[procedure]

View File

@@ -8,14 +8,24 @@ enable_telemetry = true
## +toml2docs:none-default
default_timezone = "UTC"
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 4
## The HTTP server options.
[http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout.
## HTTP request timeout. Set to 0 to disable timeout.
timeout = "30s"
## HTTP request body limit.
## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## The gRPC server options.
@@ -166,11 +176,7 @@ broker_endpoints = ["127.0.0.1:9092"]
## The max size of a single producer batch.
## Warning: Kafka has a default limit of 1MB per message in a topic.
## **It's only used when the provider is `kafka`**.
max_batch_size = "1MB"
## The linger duration of a kafka batch producer.
## **It's only used when the provider is `kafka`**.
linger = "200ms"
max_batch_bytes = "1MB"
## The consumer wait timeout.
## **It's only used when the provider is `kafka`**.

View File

@@ -0,0 +1,102 @@
x-custom:
initial_cluster_token: &initial_cluster_token "--initial-cluster-token=etcd-cluster"
common_settings: &common_settings
image: quay.io/coreos/etcd:v3.5.10
entrypoint: /usr/local/bin/etcd
services:
etcd0:
<<: *common_settings
container_name: etcd0
ports:
- 2379:2379
- 2380:2380
command:
- --name=etcd0
- --data-dir=/var/lib/etcd
- --initial-advertise-peer-urls=http://etcd0:2380
- --listen-peer-urls=http://0.0.0.0:2380
- --listen-client-urls=http://0.0.0.0:2379
- --advertise-client-urls=http://etcd0:2379
- --heartbeat-interval=250
- --election-timeout=1250
- --initial-cluster=etcd0=http://etcd0:2380
- --initial-cluster-state=new
- *initial_cluster_token
volumes:
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
healthcheck:
test: [ "CMD", "etcdctl", "--endpoints=http://etcd0:2379", "endpoint", "health" ]
interval: 5s
timeout: 3s
retries: 5
networks:
- greptimedb
metasrv:
image: docker.io/greptime/greptimedb:latest
container_name: metasrv
ports:
- 3002:3002
command:
- metasrv
- start
- --bind-addr=0.0.0.0:3002
- --server-addr=metasrv:3002
- --store-addrs=etcd0:2379
healthcheck:
test: [ "CMD", "curl", "-f", "http://metasrv:3002/health" ]
interval: 5s
timeout: 3s
retries: 5
depends_on:
etcd0:
condition: service_healthy
networks:
- greptimedb
datanode0:
image: docker.io/greptime/greptimedb:latest
container_name: datanode0
ports:
- 3001:3001
command:
- datanode
- start
- --node-id=0
- --rpc-addr=0.0.0.0:3001
- --rpc-hostname=datanode0:3001
- --metasrv-addr=metasrv:3002
volumes:
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
depends_on:
metasrv:
condition: service_healthy
networks:
- greptimedb
frontend0:
image: docker.io/greptime/greptimedb:latest
container_name: frontend0
ports:
- 4000:4000
- 4001:4001
- 4002:4002
- 4003:4003
command:
- frontend
- start
- --metasrv-addrs=metasrv:3002
- --http-addr=0.0.0.0:4000
- --rpc-addr=0.0.0.0:4001
- --mysql-addr=0.0.0.0:4002
- --postgres-addr=0.0.0.0:4003
depends_on:
metasrv:
condition: service_healthy
networks:
- greptimedb
networks:
greptimedb:
name: greptimedb

View File

@@ -0,0 +1,253 @@
# How to run TSBS Benchmark
This document contains the steps to run TSBS Benchmark. Our results are listed in other files in the same directory.
## Prerequires
You need the following tools to run TSBS Benchmark:
- Go
- git
- make
- rust (optional, if you want to build the DB from source)
## Build TSBS suite
Clone our fork of TSBS:
```shell
git clone https://github.com/GreptimeTeam/tsbs.git
```
Then build it:
```shell
cd tsbs
make
```
You can check the `bin/` directory for compiled binaries. We will only use some of them.
```shell
ls ./bin/
```
Binaries we will use later:
- `tsbs_generate_data`
- `tsbs_generate_queries`
- `tsbs_load_greptime`
- `tsbs_run_queries_influx`
## Generate test data and queries
The data is generated by `tsbs_generate_data`
```shell
mkdir bench-data
./bin/tsbs_generate_data --use-case="cpu-only" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:00Z" \
--log-interval="10s" --format="influx" \
> ./bench-data/influx-data.lp
```
Here we generates 4000 time-series in 3 days with 10s interval. We'll use influx line protocol to write so the target format is `influx`.
Queries are generated by `tsbs_generate_queries`. You can change the parameters but need to make sure it matches with `tsbs_generate_data`.
```shell
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type cpu-max-all-1 \
--format="greptime" \
> ./bench-data/greptime-queries-cpu-max-all-1.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type cpu-max-all-8 \
--format="greptime" \
> ./bench-data/greptime-queries-cpu-max-all-8.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=50 \
--query-type double-groupby-1 \
--format="greptime" \
> ./bench-data/greptime-queries-double-groupby-1.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=50 \
--query-type double-groupby-5 \
--format="greptime" \
> ./bench-data/greptime-queries-double-groupby-5.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=50 \
--query-type double-groupby-all \
--format="greptime" \
> ./bench-data/greptime-queries-double-groupby-all.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=50 \
--query-type groupby-orderby-limit \
--format="greptime" \
> ./bench-data/greptime-queries-groupby-orderby-limit.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type high-cpu-1 \
--format="greptime" \
> ./bench-data/greptime-queries-high-cpu-1.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=50 \
--query-type high-cpu-all \
--format="greptime" \
> ./bench-data/greptime-queries-high-cpu-all.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=10 \
--query-type lastpoint \
--format="greptime" \
> ./bench-data/greptime-queries-lastpoint.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type single-groupby-1-1-1 \
--format="greptime" \
> ./bench-data/greptime-queries-single-groupby-1-1-1.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type single-groupby-1-1-12 \
--format="greptime" \
> ./bench-data/greptime-queries-single-groupby-1-1-12.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type single-groupby-1-8-1 \
--format="greptime" \
> ./bench-data/greptime-queries-single-groupby-1-8-1.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type single-groupby-5-1-1 \
--format="greptime" \
> ./bench-data/greptime-queries-single-groupby-5-1-1.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type single-groupby-5-1-12 \
--format="greptime" \
> ./bench-data/greptime-queries-single-groupby-5-1-12.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type single-groupby-5-8-1 \
--format="greptime" \
> ./bench-data/greptime-queries-single-groupby-5-8-1.dat
```
## Start GreptimeDB
Reference to our [document](https://docs.greptime.com/getting-started/installation/overview) for how to install and start a GreptimeDB. Or you can also check this [document](https://docs.greptime.com/contributor-guide/getting-started#compile-and-run) for how to build a GreptimeDB from source.
## Write Data
After the DB is started, we can use `tsbs_load_greptime` to test the write performance.
```shell
./bin/tsbs_load_greptime \
--urls=http://localhost:4000 \
--file=./bench-data/influx-data.lp \
--batch-size=3000 \
--gzip=false \
--workers=6
```
Parameters here are only provided as an example. You can choose whatever you like or adjust them to match your target scenario.
Notice that if you want to rerun `tsbs_load_greptime`, please destroy and restart the DB and clear its previous data first. Existing duplicated data will impact the write and query performance.
## Query Data
After the data is imported, you can then run queries. The following script runs all queries. You can also choose a subset of queries to run.
```shell
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-cpu-max-all-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-cpu-max-all-8.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-5.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-all.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-groupby-orderby-limit.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-high-cpu-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-high-cpu-all.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-lastpoint.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-1-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-1-12.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-8-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-1-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-1-12.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-8-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
```
Rerun queries need not to re-import data. Just execute the corresponding command again is fine.

View File

@@ -1,527 +0,0 @@
# Schema Structs
# Common Schemas
The `datatypes` crate defines the elementary schema struct to describe the metadata.
## ColumnSchema
[ColumnSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/column_schema.rs#L36) represents the metadata of a column. It is equivalent to arrow's [Field](https://docs.rs/arrow/latest/arrow/datatypes/struct.Field.html) with additional metadata such as default constraint and whether the column is a time index. The time index is the column with a `TIME INDEX` constraint of a table. We can convert the `ColumnSchema` into an arrow `Field` and convert the `Field` back to the `ColumnSchema` without losing metadata.
```rust
pub struct ColumnSchema {
pub name: String,
pub data_type: ConcreteDataType,
is_nullable: bool,
is_time_index: bool,
default_constraint: Option<ColumnDefaultConstraint>,
metadata: Metadata,
}
```
## Schema
[Schema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema.rs#L38) is an ordered sequence of `ColumnSchema`. It is equivalent to arrow's [Schema](https://docs.rs/arrow/latest/arrow/datatypes/struct.Schema.html) with additional metadata including the index of the time index column and the version of this schema. Same as `ColumnSchema`, we can convert our `Schema` from/to arrow's `Schema`.
```rust
use arrow::datatypes::Schema as ArrowSchema;
pub struct Schema {
column_schemas: Vec<ColumnSchema>,
name_to_index: HashMap<String, usize>,
arrow_schema: Arc<ArrowSchema>,
timestamp_index: Option<usize>,
version: u32,
}
pub type SchemaRef = Arc<Schema>;
```
We alias `Arc<Schema>` as `SchemaRef` since it is used frequently. Mostly, we use our `ColumnSchema` and `Schema` structs instead of Arrow's `Field` and `Schema` unless we need to invoke third-party libraries (like DataFusion or ArrowFlight) that rely on Arrow.
## RawSchema
`Schema` contains fields like a map from column names to their indices in the `ColumnSchema` sequences and a cached arrow `Schema`. We can construct these fields from the `ColumnSchema` sequences thus we don't want to serialize them. This is why we don't derive `Serialize` and `Deserialize` for `Schema`. We introduce a new struct [RawSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/raw.rs#L24) which keeps all required fields of a `Schema` and derives the serialization traits. To serialize a `Schema`, we need to convert it into a `RawSchema` first and serialize the `RawSchema`.
```rust
pub struct RawSchema {
pub column_schemas: Vec<ColumnSchema>,
pub timestamp_index: Option<usize>,
pub version: u32,
}
```
We want to keep the `Schema` simple and avoid putting too much business-related metadata in it as many different structs or traits rely on it.
# Schema of the Table
A table maintains its schema in [TableMeta](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/table/src/metadata.rs#L97).
```rust
pub struct TableMeta {
pub schema: SchemaRef,
pub primary_key_indices: Vec<usize>,
pub value_indices: Vec<usize>,
// ...
}
```
The order of columns in `TableMeta::schema` is the same as the order specified in the `CREATE TABLE` statement which users use to create this table.
The field `primary_key_indices` stores indices of primary key columns. The field `value_indices` records the indices of value columns (non-primary key and time index, we sometimes call them field columns).
Suppose we create a table with the following SQL
```sql
CREATE TABLE cpu (
ts TIMESTAMP,
host STRING,
usage_user DOUBLE,
usage_system DOUBLE,
datacenter STRING,
TIME INDEX (ts),
PRIMARY KEY(datacenter, host)) ENGINE=mito;
```
Then the table's `TableMeta` may look like this:
```json
{
"schema":{
"column_schemas":[
"ts",
"host",
"usage_user",
"usage_system",
"datacenter"
],
"time_index":0,
"version":0
},
"primary_key_indices":[
4,
1
],
"value_indices":[
2,
3
]
}
```
# Schemas of the storage engine
We split a table into one or more units with the same schema and then store these units in the storage engine. Each unit is a region in the storage engine.
The storage engine maintains schemas of regions in more complicated ways because it
- adds internal columns that are invisible to users to store additional metadata for each row
- provides a data model similar to the key-value model so it organizes columns in a different order
- maintains additional metadata like column id or column family
So the storage engine defines several schema structs:
- RegionSchema
- StoreSchema
- ProjectedSchema
## RegionSchema
A [RegionSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/region.rs#L37) describes the schema of a region.
```rust
pub struct RegionSchema {
user_schema: SchemaRef,
store_schema: StoreSchemaRef,
columns: ColumnsMetadataRef,
}
```
Each region reserves some columns called `internal columns` for internal usage:
- `__sequence`, sequence number of a row
- `__op_type`, operation type of a row, such as `PUT` or `DELETE`
- `__version`, user-specified version of a row, reserved but not used. We might remove this in the future
The table engine can't see the `__sequence` and `__op_type` columns, so the `RegionSchema` itself maintains two internal schemas:
- User schema, a `Schema` struct that doesn't have internal columns
- Store schema, a `StoreSchema` struct that has internal columns
The `ColumnsMetadata` struct keeps metadata about all columns but most time we only need to use metadata in user schema and store schema, so we just ignore it. We may remove this struct in the future.
`RegionSchema` organizes columns in the following order:
```
key columns, timestamp, [__version,] value columns, __sequence, __op_type
```
We can ignore the `__version` column because it is disabled now:
```
key columns, timestamp, value columns, __sequence, __op_type
```
Key columns are columns of a table's primary key. Timestamp is the time index column. A region sorts all rows by key columns, timestamp, sequence, and op type.
So the `RegionSchema` of our `cpu` table above looks like this:
```json
{
"user_schema":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system"
],
"store_schema":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system",
"__sequence",
"__op_type"
]
}
```
## StoreSchema
As described above, a [StoreSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/store.rs#L36) is a schema that knows all internal columns.
```rust
struct StoreSchema {
columns: Vec<ColumnMetadata>,
schema: SchemaRef,
row_key_end: usize,
user_column_end: usize,
}
```
The columns in the `columns` and `schema` fields have the same order. The `ColumnMetadata` has metadata like column id, column family id, and comment. The `StoreSchema` also stores this metadata in `StoreSchema::schema`, so we can convert the `StoreSchema` between arrow's `Schema`. We use this feature to persist the `StoreSchema` in the SST since our SST format is `Parquet`, which can take arrow's `Schema` as its schema.
The `StoreSchema` of the region above is similar to this:
```json
{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system",
"__sequence",
"__op_type"
],
"time_index":2,
"version":0
},
"row_key_end":3,
"user_column_end":5
}
```
The key and timestamp columns form row keys of rows. We put them together so we can use `row_key_end` to get indices of all row key columns. Similarly, we can use the `user_column_end` to get indices of all user columns (non-internal columns).
```rust
impl StoreSchema {
#[inline]
pub(crate) fn row_key_indices(&self) -> impl Iterator<Item = usize> {
0..self.row_key_end
}
#[inline]
pub(crate) fn value_indices(&self) -> impl Iterator<Item = usize> {
self.row_key_end..self.user_column_end
}
}
```
Another useful feature of `StoreSchema` is that we ensure it always contains key columns, a timestamp column, and internal columns because we need them to perform merge, deduplication, and delete. Projection on `StoreSchema` only projects value columns.
## ProjectedSchema
To support arbitrary projection, we introduce the [ProjectedSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/projected.rs#L106).
```rust
pub struct ProjectedSchema {
projection: Option<Projection>,
schema_to_read: StoreSchemaRef,
projected_user_schema: SchemaRef,
}
```
We need to handle many cases while doing projection:
- The columns' order of table and region is different
- The projection can be in arbitrary order, e.g. `select usage_user, host from cpu` and `select host, usage_user from cpu` have different projection order
- We support `ALTER TABLE` so data files may have different schemas.
### Projection
Let's take an example to see how projection works. Suppose we want to select `ts`, `usage_system` from the `cpu` table.
```sql
CREATE TABLE cpu (
ts TIMESTAMP,
host STRING,
usage_user DOUBLE,
usage_system DOUBLE,
datacenter STRING,
TIME INDEX (ts),
PRIMARY KEY(datacenter, host)) ENGINE=mito;
select ts, usage_system from cpu;
```
The query engine uses the projection `[0, 3]` to scan the table. However, columns in the region have a different order, so the table engine adjusts the projection to `2, 4`.
```json
{
"user_schema":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system"
],
}
```
As you can see, the output order is still `[ts, usage_system]`. This is the schema users can see after projection so we call it `projected user schema`.
But the storage engine also needs to read key columns, a timestamp column, and internal columns. So we maintain a `StoreSchema` after projection in the `ProjectedSchema`.
The `Projection` struct is a helper struct to help compute the projected user schema and store schema.
So we can construct the following `ProjectedSchema`:
```json
{
"schema_to_read":{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_system",
"__sequence",
"__op_type"
],
"time_index":2,
"version":0
},
"row_key_end":3,
"user_column_end":4
},
"projected_user_schema":{
"column_schemas":[
"ts",
"usage_system"
],
"time_index":0
}
}
```
As you can see, `schema_to_read` doesn't contain the column `usage_user` that is not intended to be read (not in projection).
### ReadAdapter
As mentioned above, we can alter a table so the underlying files (SSTs) and memtables in the storage engine may have different schemas.
To simplify the logic of `ProjectedSchema`, we handle the difference between schemas before projection (constructing the `ProjectedSchema`). We introduce [ReadAdapter](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/compat.rs#L90) that adapts rows with different source schemas to the same expected schema.
So we can always use the current `RegionSchema` of the region to construct the `ProjectedSchema`, and then create a `ReadAdapter` for each memtable or SST.
```rust
#[derive(Debug)]
pub struct ReadAdapter {
source_schema: StoreSchemaRef,
dest_schema: ProjectedSchemaRef,
indices_in_result: Vec<Option<usize>>,
is_source_needed: Vec<bool>,
}
```
For each column required by `dest_schema`, `indices_in_result` stores the index of that column in the row read from the source memtable or SST. If the source row doesn't contain that column, the index is `None`.
The field `is_source_needed` stores whether a column in the source memtable or SST is needed.
Suppose we add a new column `usage_idle` to the table `cpu`.
```sql
ALTER TABLE cpu ADD COLUMN usage_idle DOUBLE;
```
The new `StoreSchema` becomes:
```json
{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system",
"usage_idle",
"__sequence",
"__op_type"
],
"time_index":2,
"version":1
},
"row_key_end":3,
"user_column_end":6
}
```
Note that we bump the version of the schema to 1.
If we want to select `ts`, `usage_system`, and `usage_idle`. While reading from the old schema, the storage engine creates a `ReadAdapter` like this:
```json
{
"source_schema":{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system",
"__sequence",
"__op_type"
],
"time_index":2,
"version":0
},
"row_key_end":3,
"user_column_end":5
},
"dest_schema":{
"schema_to_read":{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_system",
"usage_idle",
"__sequence",
"__op_type"
],
"time_index":2,
"version":1
},
"row_key_end":3,
"user_column_end":5
},
"projected_user_schema":{
"column_schemas":[
"ts",
"usage_system",
"usage_idle"
],
"time_index":0
}
},
"indices_in_result":[
0,
1,
2,
3,
null,
4,
5
],
"is_source_needed":[
true,
true,
true,
false,
true,
true,
true
]
}
```
We don't need to read `usage_user` so `is_source_needed[3]` is false. The old schema doesn't have column `usage_idle` so `indices_in_result[4]` is `null` and the `ReadAdapter` needs to insert a null column to the output row so the output schema still contains `usage_idle`.
The figure below shows the relationship between `RegionSchema`, `StoreSchema`, `ProjectedSchema`, and `ReadAdapter`.
```text
┌──────────────────────────────┐
│ │
│ ┌────────────────────┐ │
│ │ store_schema │ │
│ │ │ │
│ │ StoreSchema │ │
│ │ version 1 │ │
│ └────────────────────┘ │
│ │
│ ┌────────────────────┐ │
│ │ user_schema │ │
│ └────────────────────┘ │
│ │
│ RegionSchema │
│ │
└──────────────┬───────────────┘
┌──────────────▼───────────────┐
│ │
│ ┌──────────────────────────┐ │
│ │ schema_to_read │ │
│ │ │ │
│ │ StoreSchema (projected) │ │
│ │ version 1 │ │
│ └──────────────────────────┘ │
┌───┤ ├───┐
│ │ ┌──────────────────────────┐ │ │
│ │ │ projected_user_schema │ │ │
│ │ └──────────────────────────┘ │ │
│ │ │ │
│ │ ProjectedSchema │ │
dest schema │ └──────────────────────────────┘ │ dest schema
│ │
│ │
┌──────▼───────┐ ┌───────▼──────┐
│ │ │ │
│ ReadAdapter │ │ ReadAdapter │
│ │ │ │
└──────▲───────┘ └───────▲──────┘
│ │
│ │
source schema │ │ source schema
│ │
┌───────┴─────────┐ ┌────────┴────────┐
│ │ │ │
│ ┌─────────────┐ │ │ ┌─────────────┐ │
│ │ │ │ │ │ │ │
│ │ StoreSchema │ │ │ │ StoreSchema │ │
│ │ │ │ │ │ │ │
│ │ version 0 │ │ │ │ version 1 │ │
│ │ │ │ │ │ │ │
│ └─────────────┘ │ │ └─────────────┘ │
│ │ │ │
│ SST 0 │ │ SST 1 │
│ │ │ │
└─────────────────┘ └─────────────────┘
```
# Conversion
This figure shows the conversion between schemas:
```text
┌─────────────┐ schema From ┌─────────────┐
│ ├──────────────────┐ ┌────────────────────────────► │
│ TableMeta │ │ │ │ RawSchema │
│ │ │ │ ┌─────────────────────────┤ │
└─────────────┘ │ │ │ TryFrom └─────────────┘
│ │ │
│ │ │
│ │ │
│ │ │
│ │ │
┌───────────────────┐ ┌─────▼──┴──▼──┐ arrow_schema() ┌─────────────────┐
│ │ │ ├─────────────────────► │
│ ColumnsMetadata │ ┌─────► Schema │ │ ArrowSchema ├──┐
│ │ │ │ ◄─────────────────────┤ │ │
└────┬───────────▲──┘ │ └───▲───▲──────┘ TryFrom └─────────────────┘ │
│ │ │ │ │ │
│ │ │ │ └────────────────────────────────────────┐ │
│ │ │ │ │ │
│ columns │ user_schema() │ │ │
│ │ │ │ projected_user_schema() schema() │
│ │ │ │ │ │
│ ┌───┴─────────────┴─┐ │ ┌────────────────────┐ │ │
columns │ │ │ └─────────────────┤ │ │ │ TryFrom
│ │ RegionSchema │ │ ProjectedSchema │ │ │
│ │ ├─────────────────────────► │ │ │
│ └─────────────────┬─┘ ProjectedSchema::new() └──────────────────┬─┘ │ │
│ │ │ │ │
│ │ │ │ │
│ │ │ │ │
│ │ │ │ │
┌────▼────────────────────┐ │ store_schema() ┌────▼───────┴──┐ │
│ │ └─────────────────────────────────────────► │ │
│ Vec<ColumnMetadata> │ │ StoreSchema ◄─────┘
│ ◄──────────────────────────────────────────────┤ │
└─────────────────────────┘ columns └───────────────┘
```

View File

@@ -30,6 +30,7 @@ pub enum PermissionReq<'a> {
PromStoreWrite,
PromStoreRead,
Otlp,
LogWrite,
}
#[derive(Debug)]

View File

@@ -52,7 +52,10 @@ fn test_permission_checker() {
let sql_result = checker.check_permission(
None,
PermissionReq::SqlStatement(&Statement::ShowDatabases(ShowDatabases::new(ShowKind::All))),
PermissionReq::SqlStatement(&Statement::ShowDatabases(ShowDatabases::new(
ShowKind::All,
false,
))),
);
assert_matches!(sql_result, Ok(PermissionResp::Reject));

View File

@@ -31,7 +31,7 @@ use datatypes::value::Value;
use datatypes::vectors::{Int64VectorBuilder, StringVectorBuilder, UInt64VectorBuilder};
use futures::{StreamExt, TryStreamExt};
use snafu::{OptionExt, ResultExt};
use store_api::storage::{ScanRequest, TableId};
use store_api::storage::{RegionId, ScanRequest, TableId};
use table::metadata::TableType;
use super::REGION_PEERS;
@@ -205,8 +205,8 @@ impl InformationSchemaRegionPeersBuilder {
table_ids.into_iter().map(|id| (id, vec![])).collect()
};
for routes in table_routes.values() {
self.add_region_peers(&predicates, routes);
for (table_id, routes) in table_routes {
self.add_region_peers(&predicates, table_id, &routes);
}
}
}
@@ -214,9 +214,14 @@ impl InformationSchemaRegionPeersBuilder {
self.finish()
}
fn add_region_peers(&mut self, predicates: &Predicates, routes: &[RegionRoute]) {
fn add_region_peers(
&mut self,
predicates: &Predicates,
table_id: TableId,
routes: &[RegionRoute],
) {
for route in routes {
let region_id = route.region.id.as_u64();
let region_id = RegionId::new(table_id, route.region.id.region_number()).as_u64();
let peer_id = route.leader_peer.clone().map(|p| p.id);
let peer_addr = route.leader_peer.clone().map(|p| p.addr);
let status = if let Some(status) = route.leader_status {

View File

@@ -17,6 +17,7 @@ use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_SCHEMATA_TABLE_ID;
use common_error::ext::BoxedError;
use common_meta::key::schema_name::SchemaNameKey;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::execution::TaskContext;
@@ -32,15 +33,18 @@ use store_api::storage::{ScanRequest, TableId};
use super::SCHEMATA;
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
CreateRecordBatchSnafu, InternalSnafu, Result, SchemaNotFoundSnafu, TableMetadataManagerSnafu,
UpgradeWeakCatalogManagerRefSnafu,
};
use crate::information_schema::{InformationTable, Predicates};
use crate::information_schema::{utils, InformationTable, Predicates};
use crate::CatalogManager;
pub const CATALOG_NAME: &str = "catalog_name";
pub const SCHEMA_NAME: &str = "schema_name";
const DEFAULT_CHARACTER_SET_NAME: &str = "default_character_set_name";
const DEFAULT_COLLATION_NAME: &str = "default_collation_name";
/// The database options
pub const SCHEMA_OPTS: &str = "options";
const INIT_CAPACITY: usize = 42;
/// The `information_schema.schemata` table implementation.
@@ -74,6 +78,7 @@ impl InformationSchemaSchemata {
false,
),
ColumnSchema::new("sql_path", ConcreteDataType::string_datatype(), true),
ColumnSchema::new(SCHEMA_OPTS, ConcreteDataType::string_datatype(), true),
]))
}
@@ -133,6 +138,7 @@ struct InformationSchemaSchemataBuilder {
charset_names: StringVectorBuilder,
collation_names: StringVectorBuilder,
sql_paths: StringVectorBuilder,
schema_options: StringVectorBuilder,
}
impl InformationSchemaSchemataBuilder {
@@ -150,6 +156,7 @@ impl InformationSchemaSchemataBuilder {
charset_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
collation_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
sql_paths: StringVectorBuilder::with_capacity(INIT_CAPACITY),
schema_options: StringVectorBuilder::with_capacity(INIT_CAPACITY),
}
}
@@ -160,21 +167,50 @@ impl InformationSchemaSchemataBuilder {
.catalog_manager
.upgrade()
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let table_metadata_manager = utils::table_meta_manager(&self.catalog_manager)?;
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
self.add_schema(&predicates, &catalog_name, &schema_name);
let opts = if let Some(table_metadata_manager) = &table_metadata_manager {
let schema_opts = table_metadata_manager
.schema_manager()
.get(SchemaNameKey::new(&catalog_name, &schema_name))
.await
.context(TableMetadataManagerSnafu)?
.context(SchemaNotFoundSnafu {
catalog: &catalog_name,
schema: &schema_name,
})?;
Some(format!("{schema_opts}"))
} else {
None
};
self.add_schema(
&predicates,
&catalog_name,
&schema_name,
opts.as_deref().unwrap_or(""),
);
}
self.finish()
}
fn add_schema(&mut self, predicates: &Predicates, catalog_name: &str, schema_name: &str) {
fn add_schema(
&mut self,
predicates: &Predicates,
catalog_name: &str,
schema_name: &str,
schema_options: &str,
) {
let row = [
(CATALOG_NAME, &Value::from(catalog_name)),
(SCHEMA_NAME, &Value::from(schema_name)),
(DEFAULT_CHARACTER_SET_NAME, &Value::from("utf8")),
(DEFAULT_COLLATION_NAME, &Value::from("utf8_bin")),
(SCHEMA_OPTS, &Value::from(schema_options)),
];
if !predicates.eval(&row) {
@@ -186,6 +222,7 @@ impl InformationSchemaSchemataBuilder {
self.charset_names.push(Some("utf8"));
self.collation_names.push(Some("utf8_bin"));
self.sql_paths.push(None);
self.schema_options.push(Some(schema_options));
}
fn finish(&mut self) -> Result<RecordBatch> {
@@ -195,6 +232,7 @@ impl InformationSchemaSchemataBuilder {
Arc::new(self.charset_names.finish()),
Arc::new(self.collation_names.finish()),
Arc::new(self.sql_paths.finish()),
Arc::new(self.schema_options.finish()),
];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
}

View File

@@ -105,7 +105,9 @@ impl InformationTable for InformationSchemaTables {
.make_tables(Some(request))
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
.map_err(|err| {
datafusion::error::DataFusionError::External(format!("{err:?}").into())
})
}),
));
Ok(Box::pin(

View File

@@ -15,6 +15,7 @@
use std::sync::{Arc, Weak};
use common_config::Mode;
use common_meta::key::TableMetadataManagerRef;
use meta_client::client::MetaClient;
use snafu::OptionExt;
@@ -51,3 +52,17 @@ pub fn meta_client(catalog_manager: &Weak<dyn CatalogManager>) -> Result<Option<
Ok(meta_client)
}
/// Try to get the `[TableMetadataManagerRef]` from `[CatalogManager]` weak reference.
pub fn table_meta_manager(
catalog_manager: &Weak<dyn CatalogManager>,
) -> Result<Option<TableMetadataManagerRef>> {
let catalog_manager = catalog_manager
.upgrade()
.context(UpgradeWeakCatalogManagerRefSnafu)?;
Ok(catalog_manager
.as_any()
.downcast_ref::<KvBackendCatalogManager>()
.map(|manager| manager.table_metadata_manager_ref().clone()))
}

View File

@@ -31,9 +31,11 @@ moka = { workspace = true, features = ["future"] }
parking_lot = "0.12"
prometheus.workspace = true
prost.workspace = true
query.workspace = true
rand.workspace = true
serde_json.workspace = true
snafu.workspace = true
substrait.workspace = true
tokio.workspace = true
tokio-stream = { workspace = true, features = ["net"] }
tonic.workspace = true
@@ -42,7 +44,6 @@ tonic.workspace = true
common-grpc-expr.workspace = true
datanode.workspace = true
derive-new = "0.5"
substrait.workspace = true
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }

View File

@@ -14,6 +14,7 @@
use std::sync::Arc;
use api::v1::flow::flow_client::FlowClient as PbFlowClient;
use api::v1::health_check_client::HealthCheckClient;
use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
use api::v1::region::region_client::RegionClient as PbRegionClient;
@@ -183,6 +184,16 @@ impl Client {
Ok((addr, client))
}
pub(crate) fn raw_flow_client(&self) -> Result<(String, PbFlowClient<Channel>)> {
let (addr, channel) = self.find_channel()?;
let client = PbFlowClient::new(channel)
.max_decoding_message_size(self.max_grpc_recv_message_size())
.max_encoding_message_size(self.max_grpc_send_message_size())
.accept_compressed(CompressionEncoding::Zstd)
.send_compressed(CompressionEncoding::Zstd);
Ok((addr, client))
}
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {
let (_, channel) = self.find_channel()?;
let client = PrometheusGatewayClient::new(channel)

View File

@@ -21,43 +21,45 @@ use common_meta::node_manager::{DatanodeRef, FlownodeRef, NodeManager};
use common_meta::peer::Peer;
use moka::future::{Cache, CacheBuilder};
use crate::flow::FlowRequester;
use crate::region::RegionRequester;
use crate::Client;
pub struct DatanodeClients {
pub struct NodeClients {
channel_manager: ChannelManager,
clients: Cache<Peer, Client>,
}
impl Default for DatanodeClients {
impl Default for NodeClients {
fn default() -> Self {
Self::new(ChannelConfig::new())
}
}
impl Debug for DatanodeClients {
impl Debug for NodeClients {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DatanodeClients")
f.debug_struct("NodeClients")
.field("channel_manager", &self.channel_manager)
.finish()
}
}
#[async_trait::async_trait]
impl NodeManager for DatanodeClients {
impl NodeManager for NodeClients {
async fn datanode(&self, datanode: &Peer) -> DatanodeRef {
let client = self.get_client(datanode).await;
Arc::new(RegionRequester::new(client))
}
async fn flownode(&self, _node: &Peer) -> FlownodeRef {
// TODO(weny): Support it.
unimplemented!()
async fn flownode(&self, flownode: &Peer) -> FlownodeRef {
let client = self.get_client(flownode).await;
Arc::new(FlowRequester::new(client))
}
}
impl DatanodeClients {
impl NodeClients {
pub fn new(config: ChannelConfig) -> Self {
Self {
channel_manager: ChannelManager::with_config(config),

View File

@@ -98,6 +98,15 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to request FlowServer {}, code: {}", addr, code))]
FlowServer {
addr: String,
code: Code,
source: BoxedError,
#[snafu(implicit)]
location: Location,
},
// Server error carried in Tonic Status's metadata.
#[snafu(display("{}", msg))]
Server {
@@ -136,7 +145,8 @@ impl ErrorExt for Error {
Error::Server { code, .. } => *code,
Error::FlightGet { source, .. }
| Error::HandleRequest { source, .. }
| Error::RegionServer { source, .. } => source.status_code(),
| Error::RegionServer { source, .. }
| Error::FlowServer { source, .. } => source.status_code(),
Error::CreateChannel { source, .. }
| Error::ConvertFlightData { source, .. }
| Error::CreateTlsChannel { source, .. } => source.status_code(),
@@ -192,6 +202,9 @@ impl Error {
} | Self::RegionServer {
code: Code::Unavailable,
..
} | Self::RegionServer {
code: Code::Unknown,
..
}
)
}

104
src/client/src/flow.rs Normal file
View File

@@ -0,0 +1,104 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::flow::{FlowRequest, FlowResponse};
use api::v1::region::InsertRequests;
use common_error::ext::BoxedError;
use common_meta::node_manager::Flownode;
use snafu::{location, Location, ResultExt};
use crate::error::Result;
use crate::Client;
#[derive(Debug)]
pub struct FlowRequester {
client: Client,
}
#[async_trait::async_trait]
impl Flownode for FlowRequester {
async fn handle(&self, request: FlowRequest) -> common_meta::error::Result<FlowResponse> {
self.handle_inner(request)
.await
.map_err(BoxedError::new)
.context(common_meta::error::ExternalSnafu)
}
async fn handle_inserts(
&self,
request: InsertRequests,
) -> common_meta::error::Result<FlowResponse> {
self.handle_inserts_inner(request)
.await
.map_err(BoxedError::new)
.context(common_meta::error::ExternalSnafu)
}
}
impl FlowRequester {
pub fn new(client: Client) -> Self {
Self { client }
}
async fn handle_inner(&self, request: FlowRequest) -> Result<FlowResponse> {
let (addr, mut client) = self.client.raw_flow_client()?;
let response = client
.handle_create_remove(request)
.await
.map_err(|e| {
let code = e.code();
let err: crate::error::Error = e.into();
crate::error::Error::FlowServer {
addr,
code,
source: BoxedError::new(err),
location: location!(),
}
})?
.into_inner();
Ok(response)
}
async fn handle_inserts_inner(&self, request: InsertRequests) -> Result<FlowResponse> {
let (addr, mut client) = self.client.raw_flow_client()?;
let requests = api::v1::flow::InsertRequests {
requests: request
.requests
.into_iter()
.map(|insert| api::v1::flow::InsertRequest {
region_id: insert.region_id,
rows: insert.rows,
})
.collect(),
};
let response = client
.handle_mirror_request(requests)
.await
.map_err(|e| {
let code = e.code();
let err: crate::error::Error = e.into();
crate::error::Error::FlowServer {
addr,
code,
source: BoxedError::new(err),
location: location!(),
}
})?
.into_inner();
Ok(response)
}
}

View File

@@ -19,6 +19,7 @@ pub mod client_manager;
#[cfg(feature = "testing")]
mod database;
pub mod error;
pub mod flow;
pub mod load_balance;
mod metrics;
pub mod region;

View File

@@ -15,7 +15,7 @@
use std::sync::Arc;
use api::region::RegionResponse;
use api::v1::region::{QueryRequest, RegionRequest};
use api::v1::region::RegionRequest;
use api::v1::ResponseHeader;
use arc_swap::ArcSwapOption;
use arrow_flight::Ticket;
@@ -26,12 +26,15 @@ use common_error::status_code::StatusCode;
use common_grpc::flight::{FlightDecoder, FlightMessage};
use common_meta::error::{self as meta_error, Result as MetaResult};
use common_meta::node_manager::Datanode;
use common_query::request::QueryRequest;
use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
use common_telemetry::error;
use common_telemetry::tracing_context::TracingContext;
use prost::Message;
use query::query_engine::DefaultSerializer;
use snafu::{location, Location, OptionExt, ResultExt};
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use tokio_stream::StreamExt;
use crate::error::{
@@ -63,6 +66,17 @@ impl Datanode for RegionRequester {
}
async fn handle_query(&self, request: QueryRequest) -> MetaResult<SendableRecordBatchStream> {
let plan = DFLogicalSubstraitConvertor
.encode(&request.plan, DefaultSerializer)
.map_err(BoxedError::new)
.context(meta_error::ExternalSnafu)?
.to_vec();
let request = api::v1::region::QueryRequest {
header: request.header,
region_id: request.region_id.as_u64(),
plan,
};
let ticket = Ticket {
ticket: request.encode_to_vec().into(),
};

View File

@@ -19,11 +19,10 @@ use async_trait::async_trait;
use catalog::kvbackend::MetaKvBackend;
use clap::Parser;
use common_config::Configurable;
use common_telemetry::info;
use common_telemetry::logging::TracingOptions;
use common_telemetry::{info, warn};
use common_version::{short_version, version};
use common_wal::config::DatanodeWalConfig;
use datanode::config::DatanodeOptions;
use datanode::datanode::{Datanode, DatanodeBuilder};
use datanode::service::DatanodeServiceBuilder;
use meta_client::MetaClientOptions;
@@ -34,11 +33,13 @@ use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{
LoadLayeredConfigSnafu, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu,
};
use crate::options::GlobalOptions;
use crate::options::{GlobalOptions, GreptimeOptions};
use crate::{log_versions, App};
pub const APP_NAME: &str = "greptime-datanode";
type DatanodeOptions = GreptimeOptions<datanode::config::DatanodeOptions>;
pub struct Instance {
datanode: Datanode,
@@ -97,7 +98,9 @@ impl Command {
}
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
self.subcmd.load_options(global_options)
match &self.subcmd {
SubCommand::Start(cmd) => cmd.load_options(global_options),
}
}
}
@@ -112,12 +115,6 @@ impl SubCommand {
SubCommand::Start(cmd) => cmd.build(opts).await,
}
}
fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
match self {
SubCommand::Start(cmd) => cmd.load_options(global_options),
}
}
}
#[derive(Debug, Parser, Default)]
@@ -146,22 +143,26 @@ struct StartCommand {
impl StartCommand {
fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
self.merge_with_cli_options(
global_options,
DatanodeOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?,
let mut opts = DatanodeOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?;
self.merge_with_cli_options(global_options, &mut opts)?;
Ok(opts)
}
// The precedence order is: cli > config file > environment variables > default values.
#[allow(deprecated)]
fn merge_with_cli_options(
&self,
global_options: &GlobalOptions,
mut opts: DatanodeOptions,
) -> Result<DatanodeOptions> {
opts: &mut DatanodeOptions,
) -> Result<()> {
let opts = &mut opts.component;
if let Some(dir) = &global_options.log_dir {
opts.logging.dir.clone_from(dir);
}
@@ -176,11 +177,32 @@ impl StartCommand {
};
if let Some(addr) = &self.rpc_addr {
opts.rpc_addr.clone_from(addr);
opts.grpc.addr.clone_from(addr);
} else if let Some(addr) = &opts.rpc_addr {
warn!("Use the deprecated attribute `DatanodeOptions.rpc_addr`, please use `grpc.addr` instead.");
opts.grpc.addr.clone_from(addr);
}
if self.rpc_hostname.is_some() {
opts.rpc_hostname.clone_from(&self.rpc_hostname);
if let Some(hostname) = &self.rpc_hostname {
opts.grpc.hostname.clone_from(hostname);
} else if let Some(hostname) = &opts.rpc_hostname {
warn!("Use the deprecated attribute `DatanodeOptions.rpc_hostname`, please use `grpc.hostname` instead.");
opts.grpc.hostname.clone_from(hostname);
}
if let Some(runtime_size) = opts.rpc_runtime_size {
warn!("Use the deprecated attribute `DatanodeOptions.rpc_runtime_size`, please use `grpc.runtime_size` instead.");
opts.grpc.runtime_size = runtime_size;
}
if let Some(max_recv_message_size) = opts.rpc_max_recv_message_size {
warn!("Use the deprecated attribute `DatanodeOptions.rpc_max_recv_message_size`, please use `grpc.max_recv_message_size` instead.");
opts.grpc.max_recv_message_size = max_recv_message_size;
}
if let Some(max_send_message_size) = opts.rpc_max_send_message_size {
warn!("Use the deprecated attribute `DatanodeOptions.rpc_max_send_message_size`, please use `grpc.max_send_message_size` instead.");
opts.grpc.max_send_message_size = max_send_message_size;
}
if let Some(node_id) = self.node_id {
@@ -231,25 +253,28 @@ impl StartCommand {
// Disable dashboard in datanode.
opts.http.disable_dashboard = true;
Ok(opts)
Ok(())
}
async fn build(&self, mut opts: DatanodeOptions) -> Result<Instance> {
async fn build(&self, opts: DatanodeOptions) -> Result<Instance> {
common_runtime::init_global_runtimes(&opts.runtime);
let guard = common_telemetry::init_global_logging(
APP_NAME,
&opts.logging,
&opts.tracing,
opts.node_id.map(|x| x.to_string()),
&opts.component.logging,
&opts.component.tracing,
opts.component.node_id.map(|x| x.to_string()),
);
log_versions(version!(), short_version!());
info!("Datanode start command: {:#?}", self);
info!("Datanode options: {:#?}", opts);
let mut opts = opts.component;
let plugins = plugins::setup_datanode_plugins(&mut opts)
.await
.context(StartDatanodeSnafu)?;
info!("Datanode start command: {:#?}", self);
info!("Datanode options: {:#?}", opts);
let node_id = opts
.node_id
.context(MissingConfigSnafu { msg: "'node_id'" })?;
@@ -299,6 +324,34 @@ mod tests {
use super::*;
use crate::options::GlobalOptions;
#[test]
fn test_deprecated_cli_options() {
common_telemetry::init_default_ut_logging();
let mut file = create_named_temp_file();
let toml_str = r#"
mode = "distributed"
enable_memory_catalog = false
node_id = 42
rpc_addr = "127.0.0.1:4001"
rpc_hostname = "192.168.0.1"
[grpc]
addr = "127.0.0.1:3001"
hostname = "127.0.0.1"
runtime_size = 8
"#;
write!(file, "{}", toml_str).unwrap();
let cmd = StartCommand {
config_file: Some(file.path().to_str().unwrap().to_string()),
..Default::default()
};
let options = cmd.load_options(&Default::default()).unwrap().component;
assert_eq!("127.0.0.1:4001".to_string(), options.grpc.addr);
assert_eq!("192.168.0.1".to_string(), options.grpc.hostname);
}
#[test]
fn test_read_from_config_file() {
let mut file = create_named_temp_file();
@@ -306,9 +359,11 @@ mod tests {
mode = "distributed"
enable_memory_catalog = false
node_id = 42
rpc_addr = "127.0.0.1:3001"
rpc_hostname = "127.0.0.1"
rpc_runtime_size = 8
[grpc]
addr = "127.0.0.1:3001"
hostname = "127.0.0.1"
runtime_size = 8
[heartbeat]
interval = "300ms"
@@ -353,9 +408,9 @@ mod tests {
..Default::default()
};
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
let options = cmd.load_options(&Default::default()).unwrap().component;
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
assert_eq!("127.0.0.1:3001".to_string(), options.grpc.addr);
assert_eq!(Some(42), options.node_id);
let DatanodeWalConfig::RaftEngine(raft_engine_config) = options.wal else {
@@ -414,7 +469,8 @@ mod tests {
fn test_try_from_cmd() {
let opt = StartCommand::default()
.load_options(&GlobalOptions::default())
.unwrap();
.unwrap()
.component;
assert_eq!(Mode::Standalone, opt.mode);
let opt = (StartCommand {
@@ -423,7 +479,8 @@ mod tests {
..Default::default()
})
.load_options(&GlobalOptions::default())
.unwrap();
.unwrap()
.component;
assert_eq!(Mode::Distributed, opt.mode);
assert!((StartCommand {
@@ -454,7 +511,8 @@ mod tests {
#[cfg(feature = "tokio-console")]
tokio_console_addr: None,
})
.unwrap();
.unwrap()
.component;
let logging_opt = options.logging;
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
@@ -469,8 +527,8 @@ mod tests {
enable_memory_catalog = false
node_id = 42
rpc_addr = "127.0.0.1:3001"
rpc_hostname = "127.0.0.1"
rpc_runtime_size = 8
rpc_hostname = "10.103.174.219"
[meta_client]
timeout = "3s"
@@ -536,7 +594,7 @@ mod tests {
..Default::default()
};
let opts = command.load_options(&GlobalOptions::default()).unwrap();
let opts = command.load_options(&Default::default()).unwrap().component;
// Should be read from env, env > default values.
let DatanodeWalConfig::RaftEngine(raft_engine_config) = opts.wal else {
@@ -562,7 +620,11 @@ mod tests {
assert_eq!(raft_engine_config.dir.unwrap(), "/other/wal/dir");
// Should be default value.
assert_eq!(opts.http.addr, DatanodeOptions::default().http.addr);
assert_eq!(
opts.http.addr,
DatanodeOptions::default().component.http.addr
);
assert_eq!(opts.grpc.hostname, "10.103.174.219");
},
);
}

View File

@@ -19,7 +19,7 @@ use async_trait::async_trait;
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
use clap::Parser;
use client::client_manager::DatanodeClients;
use client::client_manager::NodeClients;
use common_config::Configurable;
use common_grpc::channel_manager::ChannelConfig;
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
@@ -29,7 +29,6 @@ use common_telemetry::info;
use common_telemetry::logging::TracingOptions;
use common_time::timezone::set_default_timezone;
use common_version::{short_version, version};
use frontend::frontend::FrontendOptions;
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
use frontend::heartbeat::HeartbeatTask;
use frontend::instance::builder::FrontendBuilder;
@@ -44,9 +43,11 @@ use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{
self, InitTimezoneSnafu, LoadLayeredConfigSnafu, MissingConfigSnafu, Result, StartFrontendSnafu,
};
use crate::options::GlobalOptions;
use crate::options::{GlobalOptions, GreptimeOptions};
use crate::{log_versions, App};
type FrontendOptions = GreptimeOptions<frontend::frontend::FrontendOptions>;
pub struct Instance {
frontend: FeInstance,
@@ -164,22 +165,25 @@ pub struct StartCommand {
impl StartCommand {
fn load_options(&self, global_options: &GlobalOptions) -> Result<FrontendOptions> {
self.merge_with_cli_options(
global_options,
FrontendOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?,
let mut opts = FrontendOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?;
self.merge_with_cli_options(global_options, &mut opts)?;
Ok(opts)
}
// The precedence order is: cli > config file > environment variables > default values.
fn merge_with_cli_options(
&self,
global_options: &GlobalOptions,
mut opts: FrontendOptions,
) -> Result<FrontendOptions> {
opts: &mut FrontendOptions,
) -> Result<()> {
let opts = &mut opts.component;
if let Some(dir) = &global_options.log_dir {
opts.logging.dir.clone_from(dir);
}
@@ -242,26 +246,29 @@ impl StartCommand {
opts.user_provider.clone_from(&self.user_provider);
Ok(opts)
Ok(())
}
async fn build(&self, mut opts: FrontendOptions) -> Result<Instance> {
async fn build(&self, opts: FrontendOptions) -> Result<Instance> {
common_runtime::init_global_runtimes(&opts.runtime);
let guard = common_telemetry::init_global_logging(
APP_NAME,
&opts.logging,
&opts.tracing,
opts.node_id.clone(),
&opts.component.logging,
&opts.component.tracing,
opts.component.node_id.clone(),
);
log_versions(version!(), short_version!());
info!("Frontend start command: {:#?}", self);
info!("Frontend options: {:#?}", opts);
let mut opts = opts.component;
#[allow(clippy::unnecessary_mut_passed)]
let plugins = plugins::setup_frontend_plugins(&mut opts)
.await
.context(StartFrontendSnafu)?;
info!("Frontend start command: {:#?}", self);
info!("Frontend options: {:#?}", opts);
set_default_timezone(opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?;
let meta_client_options = opts.meta_client.as_ref().context(MissingConfigSnafu {
@@ -326,7 +333,7 @@ impl StartCommand {
timeout: None,
..Default::default()
};
let client = DatanodeClients::new(channel_config);
let client = NodeClients::new(channel_config);
let mut instance = FrontendBuilder::new(
cached_meta_backend.clone(),
@@ -363,7 +370,7 @@ mod tests {
use common_base::readable_size::ReadableSize;
use common_config::ENV_VAR_SEP;
use common_test_util::temp_dir::create_named_temp_file;
use frontend::service_config::GrpcOptions;
use servers::grpc::GrpcOptions;
use servers::http::HttpOptions;
use super::*;
@@ -380,14 +387,14 @@ mod tests {
..Default::default()
};
let opts = command.load_options(&GlobalOptions::default()).unwrap();
let opts = command.load_options(&Default::default()).unwrap().component;
assert_eq!(opts.http.addr, "127.0.0.1:1234");
assert_eq!(ReadableSize::mb(64), opts.http.body_limit);
assert_eq!(opts.mysql.addr, "127.0.0.1:5678");
assert_eq!(opts.postgres.addr, "127.0.0.1:5432");
let default_opts = FrontendOptions::default();
let default_opts = FrontendOptions::default().component;
assert_eq!(opts.grpc.addr, default_opts.grpc.addr);
assert!(opts.mysql.enable);
@@ -428,7 +435,8 @@ mod tests {
..Default::default()
};
let fe_opts = command.load_options(&GlobalOptions::default()).unwrap();
let fe_opts = command.load_options(&Default::default()).unwrap().component;
assert_eq!(Mode::Distributed, fe_opts.mode);
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
@@ -442,7 +450,7 @@ mod tests {
#[tokio::test]
async fn test_try_from_start_command_to_anymap() {
let mut fe_opts = FrontendOptions {
let mut fe_opts = frontend::frontend::FrontendOptions {
http: HttpOptions {
disable_dashboard: false,
..Default::default()
@@ -479,7 +487,8 @@ mod tests {
#[cfg(feature = "tokio-console")]
tokio_console_addr: None,
})
.unwrap();
.unwrap()
.component;
let logging_opt = options.logging;
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
@@ -557,7 +566,7 @@ mod tests {
..Default::default()
};
let fe_opts = command.load_options(&GlobalOptions::default()).unwrap();
let fe_opts = command.load_options(&Default::default()).unwrap().component;
// Should be read from env, env > default values.
assert_eq!(fe_opts.mysql.runtime_size, 11);

View File

@@ -21,14 +21,15 @@ use common_telemetry::info;
use common_telemetry::logging::TracingOptions;
use common_version::{short_version, version};
use meta_srv::bootstrap::MetasrvInstance;
use meta_srv::metasrv::MetasrvOptions;
use snafu::ResultExt;
use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{self, LoadLayeredConfigSnafu, Result, StartMetaServerSnafu};
use crate::options::GlobalOptions;
use crate::options::{GlobalOptions, GreptimeOptions};
use crate::{log_versions, App};
type MetasrvOptions = GreptimeOptions<meta_srv::metasrv::MetasrvOptions>;
pub const APP_NAME: &str = "greptime-metasrv";
pub struct Instance {
@@ -139,22 +140,25 @@ struct StartCommand {
impl StartCommand {
fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
self.merge_with_cli_options(
global_options,
MetasrvOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?,
let mut opts = MetasrvOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?;
self.merge_with_cli_options(global_options, &mut opts)?;
Ok(opts)
}
// The precedence order is: cli > config file > environment variables > default values.
fn merge_with_cli_options(
&self,
global_options: &GlobalOptions,
mut opts: MetasrvOptions,
) -> Result<MetasrvOptions> {
opts: &mut MetasrvOptions,
) -> Result<()> {
let opts = &mut opts.component;
if let Some(dir) = &global_options.log_dir {
opts.logging.dir.clone_from(dir);
}
@@ -217,21 +221,28 @@ impl StartCommand {
// Disable dashboard in metasrv.
opts.http.disable_dashboard = true;
Ok(opts)
Ok(())
}
async fn build(&self, mut opts: MetasrvOptions) -> Result<Instance> {
let guard =
common_telemetry::init_global_logging(APP_NAME, &opts.logging, &opts.tracing, None);
log_versions(version!(), short_version!());
async fn build(&self, opts: MetasrvOptions) -> Result<Instance> {
common_runtime::init_global_runtimes(&opts.runtime);
let plugins = plugins::setup_metasrv_plugins(&mut opts)
.await
.context(StartMetaServerSnafu)?;
let guard = common_telemetry::init_global_logging(
APP_NAME,
&opts.component.logging,
&opts.component.tracing,
None,
);
log_versions(version!(), short_version!());
info!("Metasrv start command: {:#?}", self);
info!("Metasrv options: {:#?}", opts);
let mut opts = opts.component;
let plugins = plugins::setup_metasrv_plugins(&mut opts)
.await
.context(StartMetaServerSnafu)?;
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins.clone(), None)
.await
.context(error::BuildMetaServerSnafu)?;
@@ -266,7 +277,7 @@ mod tests {
..Default::default()
};
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
let options = cmd.load_options(&Default::default()).unwrap().component;
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
assert_eq!(vec!["127.0.0.1:2380".to_string()], options.store_addrs);
assert_eq!(SelectorType::LoadBased, options.selector);
@@ -299,7 +310,7 @@ mod tests {
..Default::default()
};
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
let options = cmd.load_options(&Default::default()).unwrap().component;
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
assert_eq!(vec!["127.0.0.1:2379".to_string()], options.store_addrs);
@@ -349,7 +360,8 @@ mod tests {
#[cfg(feature = "tokio-console")]
tokio_console_addr: None,
})
.unwrap();
.unwrap()
.component;
let logging_opt = options.logging;
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
@@ -406,7 +418,7 @@ mod tests {
..Default::default()
};
let opts = command.load_options(&GlobalOptions::default()).unwrap();
let opts = command.load_options(&Default::default()).unwrap().component;
// Should be read from env, env > default values.
assert_eq!(opts.bind_addr, "127.0.0.1:14002");

View File

@@ -13,6 +13,9 @@
// limitations under the License.
use clap::Parser;
use common_config::Configurable;
use common_runtime::global::RuntimeOptions;
use serde::{Deserialize, Serialize};
#[derive(Parser, Default, Debug, Clone)]
pub struct GlobalOptions {
@@ -29,3 +32,22 @@ pub struct GlobalOptions {
#[arg(global = true)]
pub tokio_console_addr: Option<String>,
}
// TODO(LFC): Move logging and tracing options into global options, like the runtime options.
/// All the options of GreptimeDB.
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
#[serde(default)]
pub struct GreptimeOptions<T> {
/// The runtime options.
pub runtime: RuntimeOptions,
/// The options of each component (like Datanode or Standalone) of GreptimeDB.
#[serde(flatten)]
pub component: T,
}
impl<T: Configurable> Configurable for GreptimeOptions<T> {
fn env_list_keys() -> Option<&'static [&'static str]> {
T::env_list_keys()
}
}

View File

@@ -31,6 +31,7 @@ use common_meta::key::flow::{FlowMetadataManager, FlowMetadataManagerRef};
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef;
use common_meta::node_manager::NodeManagerRef;
use common_meta::peer::StandalonePeerLookupService;
use common_meta::region_keeper::MemoryRegionKeeper;
use common_meta::sequence::SequenceBuilder;
use common_meta::wal_options_allocator::{WalOptionsAllocator, WalOptionsAllocatorRef};
@@ -49,12 +50,13 @@ use frontend::instance::builder::FrontendBuilder;
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
use frontend::server::Services;
use frontend::service_config::{
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
};
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
use mito2::config::MitoConfig;
use serde::{Deserialize, Serialize};
use servers::export_metrics::ExportMetricsOption;
use servers::grpc::GrpcOptions;
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
use servers::Mode;
@@ -67,7 +69,7 @@ use crate::error::{
ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
};
use crate::options::GlobalOptions;
use crate::options::{GlobalOptions, GreptimeOptions};
use crate::{log_versions, App};
pub const APP_NAME: &str = "greptime-standalone";
@@ -79,11 +81,14 @@ pub struct Command {
}
impl Command {
pub async fn build(&self, opts: StandaloneOptions) -> Result<Instance> {
pub async fn build(&self, opts: GreptimeOptions<StandaloneOptions>) -> Result<Instance> {
self.subcmd.build(opts).await
}
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<StandaloneOptions> {
pub fn load_options(
&self,
global_options: &GlobalOptions,
) -> Result<GreptimeOptions<StandaloneOptions>> {
self.subcmd.load_options(global_options)
}
}
@@ -94,20 +99,23 @@ enum SubCommand {
}
impl SubCommand {
async fn build(&self, opts: StandaloneOptions) -> Result<Instance> {
async fn build(&self, opts: GreptimeOptions<StandaloneOptions>) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.build(opts).await,
}
}
fn load_options(&self, global_options: &GlobalOptions) -> Result<StandaloneOptions> {
fn load_options(
&self,
global_options: &GlobalOptions,
) -> Result<GreptimeOptions<StandaloneOptions>> {
match self {
SubCommand::Start(cmd) => cmd.load_options(global_options),
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
#[serde(default)]
pub struct StandaloneOptions {
pub mode: Mode,
@@ -161,7 +169,7 @@ impl Default for StandaloneOptions {
}
}
impl Configurable<'_> for StandaloneOptions {
impl Configurable for StandaloneOptions {
fn env_list_keys() -> Option<&'static [&'static str]> {
Some(&["wal.broker_endpoints"])
}
@@ -197,7 +205,7 @@ impl StandaloneOptions {
wal: cloned_opts.wal.into(),
storage: cloned_opts.storage,
region_engine: cloned_opts.region_engine,
rpc_addr: cloned_opts.grpc.addr,
grpc: cloned_opts.grpc,
..Default::default()
}
}
@@ -291,23 +299,27 @@ pub struct StartCommand {
}
impl StartCommand {
fn load_options(&self, global_options: &GlobalOptions) -> Result<StandaloneOptions> {
self.merge_with_cli_options(
global_options,
StandaloneOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?,
fn load_options(
&self,
global_options: &GlobalOptions,
) -> Result<GreptimeOptions<StandaloneOptions>> {
let mut opts = GreptimeOptions::<StandaloneOptions>::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?;
self.merge_with_cli_options(global_options, &mut opts.component)?;
Ok(opts)
}
// The precedence order is: cli > config file > environment variables > default values.
pub fn merge_with_cli_options(
&self,
global_options: &GlobalOptions,
mut opts: StandaloneOptions,
) -> Result<StandaloneOptions> {
opts: &mut StandaloneOptions,
) -> Result<()> {
// Should always be standalone mode.
opts.mode = Mode::Standalone;
@@ -340,7 +352,7 @@ impl StartCommand {
if let Some(addr) = &self.rpc_addr {
// frontend grpc addr conflict with datanode default grpc addr
let datanode_grpc_addr = DatanodeOptions::default().rpc_addr;
let datanode_grpc_addr = DatanodeOptions::default().grpc.addr;
if addr.eq(&datanode_grpc_addr) {
return IllegalConfigSnafu {
msg: format!(
@@ -369,20 +381,27 @@ impl StartCommand {
opts.user_provider.clone_from(&self.user_provider);
Ok(opts)
Ok(())
}
#[allow(unreachable_code)]
#[allow(unused_variables)]
#[allow(clippy::diverging_sub_expression)]
async fn build(&self, opts: StandaloneOptions) -> Result<Instance> {
let guard =
common_telemetry::init_global_logging(APP_NAME, &opts.logging, &opts.tracing, None);
async fn build(&self, opts: GreptimeOptions<StandaloneOptions>) -> Result<Instance> {
common_runtime::init_global_runtimes(&opts.runtime);
let guard = common_telemetry::init_global_logging(
APP_NAME,
&opts.component.logging,
&opts.component.tracing,
None,
);
log_versions(version!(), short_version!());
info!("Standalone start command: {:#?}", self);
info!("Building standalone instance with {opts:#?}");
info!("Standalone options: {opts:#?}");
let opts = opts.component;
let mut fe_opts = opts.frontend_options();
#[allow(clippy::unnecessary_mut_passed)]
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts) // mut ref is MUST, DO NOT change it
@@ -437,9 +456,11 @@ impl StartCommand {
);
let flownode = Arc::new(flow_builder.build().await);
let builder =
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
let datanode = builder.build().await.context(StartDatanodeSnafu)?;
let datanode = DatanodeBuilder::new(dn_opts, fe_plugins.clone())
.with_kv_backend(kv_backend.clone())
.build()
.await
.context(StartDatanodeSnafu)?;
let node_manager = Arc::new(StandaloneDatanodeManager {
region_server: datanode.region_server(),
@@ -537,6 +558,7 @@ impl StartCommand {
table_metadata_allocator,
flow_metadata_manager,
flow_metadata_allocator,
peer_lookup_service: Arc::new(StandalonePeerLookupService::new()),
},
procedure_manager,
true,
@@ -664,7 +686,10 @@ mod tests {
..Default::default()
};
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
let options = cmd
.load_options(&GlobalOptions::default())
.unwrap()
.component;
let fe_opts = options.frontend_options();
let dn_opts = options.datanode_options();
let logging_opts = options.logging;
@@ -725,7 +750,8 @@ mod tests {
#[cfg(feature = "tokio-console")]
tokio_console_addr: None,
})
.unwrap();
.unwrap()
.component;
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
assert_eq!("debug", opts.logging.level.unwrap());
@@ -787,7 +813,7 @@ mod tests {
..Default::default()
};
let opts = command.load_options(&GlobalOptions::default()).unwrap();
let opts = command.load_options(&Default::default()).unwrap().component;
// Should be read from env, env > default values.
assert_eq!(opts.logging.dir, "/other/log/dir");

View File

@@ -0,0 +1,241 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::time::Duration;
use cmd::options::GreptimeOptions;
use cmd::standalone::StandaloneOptions;
use common_base::readable_size::ReadableSize;
use common_config::Configurable;
use common_grpc::channel_manager::{
DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE, DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
};
use common_runtime::global::RuntimeOptions;
use common_telemetry::logging::LoggingOptions;
use common_wal::config::raft_engine::RaftEngineConfig;
use common_wal::config::{DatanodeWalConfig, StandaloneWalConfig};
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
use frontend::frontend::FrontendOptions;
use frontend::service_config::datanode::DatanodeClientOptions;
use meta_client::MetaClientOptions;
use meta_srv::metasrv::MetasrvOptions;
use meta_srv::selector::SelectorType;
use mito2::config::MitoConfig;
use servers::export_metrics::ExportMetricsOption;
use servers::grpc::GrpcOptions;
#[allow(deprecated)]
#[test]
fn test_load_datanode_example_config() {
let example_config = common_test_util::find_workspace_path("config/datanode.example.toml");
let options =
GreptimeOptions::<DatanodeOptions>::load_layered_options(example_config.to_str(), "")
.unwrap();
let expected = GreptimeOptions::<DatanodeOptions> {
runtime: RuntimeOptions {
read_rt_size: 8,
write_rt_size: 8,
bg_rt_size: 4,
},
component: DatanodeOptions {
node_id: Some(42),
meta_client: Some(MetaClientOptions {
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
timeout: Duration::from_secs(3),
heartbeat_timeout: Duration::from_millis(500),
ddl_timeout: Duration::from_secs(10),
connect_timeout: Duration::from_secs(1),
tcp_nodelay: true,
metadata_cache_max_capacity: 100000,
metadata_cache_ttl: Duration::from_secs(600),
metadata_cache_tti: Duration::from_secs(300),
}),
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
dir: Some("/tmp/greptimedb/wal".to_string()),
sync_period: Some(Duration::from_secs(10)),
..Default::default()
}),
storage: StorageConfig {
data_home: "/tmp/greptimedb/".to_string(),
..Default::default()
},
region_engine: vec![RegionEngineConfig::Mito(MitoConfig {
num_workers: 8,
auto_flush_interval: Duration::from_secs(3600),
scan_parallelism: 0,
global_write_buffer_size: ReadableSize::gb(1),
global_write_buffer_reject_size: ReadableSize::gb(2),
sst_meta_cache_size: ReadableSize::mb(128),
vector_cache_size: ReadableSize::mb(512),
page_cache_size: ReadableSize::mb(512),
max_background_jobs: 4,
..Default::default()
})],
logging: LoggingOptions {
level: Some("info".to_string()),
otlp_endpoint: Some("".to_string()),
tracing_sample_ratio: Some(Default::default()),
..Default::default()
},
export_metrics: ExportMetricsOption {
self_import: Some(Default::default()),
remote_write: Some(Default::default()),
..Default::default()
},
grpc: GrpcOptions::default().with_addr("127.0.0.1:3001"),
rpc_addr: Some("127.0.0.1:3001".to_string()),
rpc_hostname: Some("127.0.0.1".to_string()),
rpc_runtime_size: Some(8),
rpc_max_recv_message_size: Some(DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE),
rpc_max_send_message_size: Some(DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE),
..Default::default()
},
};
assert_eq!(options, expected);
}
#[test]
fn test_load_frontend_example_config() {
let example_config = common_test_util::find_workspace_path("config/frontend.example.toml");
let options =
GreptimeOptions::<FrontendOptions>::load_layered_options(example_config.to_str(), "")
.unwrap();
let expected = GreptimeOptions::<FrontendOptions> {
runtime: RuntimeOptions {
read_rt_size: 8,
write_rt_size: 8,
bg_rt_size: 4,
},
component: FrontendOptions {
default_timezone: Some("UTC".to_string()),
meta_client: Some(MetaClientOptions {
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
timeout: Duration::from_secs(3),
heartbeat_timeout: Duration::from_millis(500),
ddl_timeout: Duration::from_secs(10),
connect_timeout: Duration::from_secs(1),
tcp_nodelay: true,
metadata_cache_max_capacity: 100000,
metadata_cache_ttl: Duration::from_secs(600),
metadata_cache_tti: Duration::from_secs(300),
}),
logging: LoggingOptions {
level: Some("info".to_string()),
otlp_endpoint: Some("".to_string()),
tracing_sample_ratio: Some(Default::default()),
..Default::default()
},
datanode: frontend::service_config::DatanodeOptions {
client: DatanodeClientOptions {
connect_timeout: Duration::from_secs(10),
tcp_nodelay: true,
},
},
export_metrics: ExportMetricsOption {
self_import: Some(Default::default()),
remote_write: Some(Default::default()),
..Default::default()
},
..Default::default()
},
};
assert_eq!(options, expected);
}
#[test]
fn test_load_metasrv_example_config() {
let example_config = common_test_util::find_workspace_path("config/metasrv.example.toml");
let options =
GreptimeOptions::<MetasrvOptions>::load_layered_options(example_config.to_str(), "")
.unwrap();
let expected = GreptimeOptions::<MetasrvOptions> {
runtime: RuntimeOptions {
read_rt_size: 8,
write_rt_size: 8,
bg_rt_size: 4,
},
component: MetasrvOptions {
selector: SelectorType::LeaseBased,
data_home: "/tmp/metasrv/".to_string(),
logging: LoggingOptions {
dir: "/tmp/greptimedb/logs".to_string(),
level: Some("info".to_string()),
otlp_endpoint: Some("".to_string()),
tracing_sample_ratio: Some(Default::default()),
..Default::default()
},
export_metrics: ExportMetricsOption {
self_import: Some(Default::default()),
remote_write: Some(Default::default()),
..Default::default()
},
..Default::default()
},
};
assert_eq!(options, expected);
}
#[test]
fn test_load_standalone_example_config() {
let example_config = common_test_util::find_workspace_path("config/standalone.example.toml");
let options =
GreptimeOptions::<StandaloneOptions>::load_layered_options(example_config.to_str(), "")
.unwrap();
let expected = GreptimeOptions::<StandaloneOptions> {
runtime: RuntimeOptions {
read_rt_size: 8,
write_rt_size: 8,
bg_rt_size: 4,
},
component: StandaloneOptions {
default_timezone: Some("UTC".to_string()),
wal: StandaloneWalConfig::RaftEngine(RaftEngineConfig {
dir: Some("/tmp/greptimedb/wal".to_string()),
sync_period: Some(Duration::from_secs(10)),
..Default::default()
}),
region_engine: vec![RegionEngineConfig::Mito(MitoConfig {
num_workers: 8,
auto_flush_interval: Duration::from_secs(3600),
scan_parallelism: 0,
global_write_buffer_size: ReadableSize::gb(1),
global_write_buffer_reject_size: ReadableSize::gb(2),
sst_meta_cache_size: ReadableSize::mb(128),
vector_cache_size: ReadableSize::mb(512),
page_cache_size: ReadableSize::mb(512),
max_background_jobs: 4,
..Default::default()
})],
storage: StorageConfig {
data_home: "/tmp/greptimedb/".to_string(),
..Default::default()
},
logging: LoggingOptions {
level: Some("info".to_string()),
otlp_endpoint: Some("".to_string()),
tracing_sample_ratio: Some(Default::default()),
..Default::default()
},
export_metrics: ExportMetricsOption {
self_import: Some(Default::default()),
remote_write: Some(Default::default()),
..Default::default()
},
..Default::default()
},
};
assert_eq!(options, expected);
}

View File

@@ -81,16 +81,17 @@ impl PartialEq<Bytes> for [u8] {
/// Now this buffer is restricted to only hold valid UTF-8 string (only allow constructing `StringBytes`
/// from String or str). We may support other encoding in the future.
#[derive(Debug, Default, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct StringBytes(bytes::Bytes);
pub struct StringBytes(String);
impl StringBytes {
/// View this string as UTF-8 string slice.
///
/// # Safety
/// We only allow constructing `StringBytes` from String/str, so the inner
/// buffer must holds valid UTF-8.
pub fn as_utf8(&self) -> &str {
unsafe { std::str::from_utf8_unchecked(&self.0) }
&self.0
}
/// Convert this string into owned UTF-8 string.
pub fn into_string(self) -> String {
self.0
}
pub fn len(&self) -> usize {
@@ -104,37 +105,37 @@ impl StringBytes {
impl From<String> for StringBytes {
fn from(string: String) -> StringBytes {
StringBytes(bytes::Bytes::from(string))
StringBytes(string)
}
}
impl From<&str> for StringBytes {
fn from(string: &str) -> StringBytes {
StringBytes(bytes::Bytes::copy_from_slice(string.as_bytes()))
StringBytes(string.to_string())
}
}
impl PartialEq<String> for StringBytes {
fn eq(&self, other: &String) -> bool {
self.0 == other.as_bytes()
&self.0 == other
}
}
impl PartialEq<StringBytes> for String {
fn eq(&self, other: &StringBytes) -> bool {
self.as_bytes() == other.0
self == &other.0
}
}
impl PartialEq<str> for StringBytes {
fn eq(&self, other: &str) -> bool {
self.0 == other.as_bytes()
self.0.as_str() == other
}
}
impl PartialEq<StringBytes> for str {
fn eq(&self, other: &StringBytes) -> bool {
self.as_bytes() == other.0
self == other.0
}
}

View File

@@ -13,7 +13,8 @@
// limitations under the License.
use config::{Environment, File, FileFormat};
use serde::{Deserialize, Serialize};
use serde::de::DeserializeOwned;
use serde::Serialize;
use snafu::ResultExt;
use crate::error::{LoadLayeredConfigSnafu, Result, SerdeJsonSnafu, TomlFormatSnafu};
@@ -25,7 +26,7 @@ pub const ENV_VAR_SEP: &str = "__";
pub const ENV_LIST_SEP: &str = ",";
/// Configuration trait defines the common interface for configuration that can be loaded from multiple sources and serialized to TOML.
pub trait Configurable<'de>: Serialize + Deserialize<'de> + Default + Sized {
pub trait Configurable: Serialize + DeserializeOwned + Default + Sized {
/// Load the configuration from multiple sources and merge them.
/// The precedence order is: config file > environment variables > default values.
/// `env_prefix` is the prefix of environment variables, e.g. "FRONTEND__xxx".
@@ -128,7 +129,7 @@ mod tests {
}
}
impl Configurable<'_> for TestDatanodeConfig {
impl Configurable for TestDatanodeConfig {
fn env_list_keys() -> Option<&'static [&'static str]> {
Some(&["meta_client.metasrv_addrs"])
}

View File

@@ -20,6 +20,7 @@ async-compression = { version = "0.3", features = [
] }
async-trait.workspace = true
bytes.workspace = true
common-base.workspace = true
common-error.workspace = true
common-macro.workspace = true
common-recordbatch.workspace = true
@@ -33,6 +34,7 @@ object-store.workspace = true
orc-rust = { git = "https://github.com/datafusion-contrib/datafusion-orc.git", rev = "502217315726314c4008808fe169764529640599" }
parquet.workspace = true
paste = "1.0"
rand.workspace = true
regex = "1.7"
serde.workspace = true
snafu.workspace = true
@@ -42,4 +44,7 @@ tokio-util.workspace = true
url = "2.3"
[dev-dependencies]
common-telemetry.workspace = true
common-test-util.workspace = true
dotenv.workspace = true
uuid.workspace = true

View File

@@ -92,34 +92,44 @@ impl CompressionType {
macro_rules! impl_compression_type {
($(($enum_item:ident, $prefix:ident)),*) => {
paste::item! {
use bytes::{Buf, BufMut, BytesMut};
impl CompressionType {
pub async fn encode(&self, content: impl AsRef<[u8]>) -> io::Result<Vec<u8>> {
pub async fn encode<B: Buf>(&self, mut content: B) -> io::Result<Vec<u8>> {
match self {
$(
CompressionType::$enum_item => {
let mut buffer = Vec::with_capacity(content.as_ref().len());
let mut buffer = Vec::with_capacity(content.remaining());
let mut encoder = write::[<$prefix Encoder>]::new(&mut buffer);
encoder.write_all(content.as_ref()).await?;
encoder.write_all_buf(&mut content).await?;
encoder.shutdown().await?;
Ok(buffer)
}
)*
CompressionType::Uncompressed => Ok(content.as_ref().to_vec()),
CompressionType::Uncompressed => {
let mut bs = BytesMut::with_capacity(content.remaining());
bs.put(content);
Ok(bs.to_vec())
},
}
}
pub async fn decode(&self, content: impl AsRef<[u8]>) -> io::Result<Vec<u8>> {
pub async fn decode<B: Buf>(&self, mut content: B) -> io::Result<Vec<u8>> {
match self {
$(
CompressionType::$enum_item => {
let mut buffer = Vec::with_capacity(content.as_ref().len() * 2);
let mut buffer = Vec::with_capacity(content.remaining() * 2);
let mut encoder = write::[<$prefix Decoder>]::new(&mut buffer);
encoder.write_all(content.as_ref()).await?;
encoder.write_all_buf(&mut content).await?;
encoder.shutdown().await?;
Ok(buffer)
}
)*
CompressionType::Uncompressed => Ok(content.as_ref().to_vec()),
CompressionType::Uncompressed => {
let mut bs = BytesMut::with_capacity(content.remaining());
bs.put(content);
Ok(bs.to_vec())
},
}
}
@@ -151,13 +161,13 @@ macro_rules! impl_compression_type {
$(
#[tokio::test]
async fn [<test_ $enum_item:lower _compression>]() {
let string = "foo_bar".as_bytes().to_vec();
let string = "foo_bar".as_bytes();
let compress = CompressionType::$enum_item
.encode(&string)
.encode(string)
.await
.unwrap();
let decompress = CompressionType::$enum_item
.decode(&compress)
.decode(compress.as_slice())
.await
.unwrap();
assert_eq!(decompress, string);
@@ -165,13 +175,13 @@ macro_rules! impl_compression_type {
#[tokio::test]
async fn test_uncompression() {
let string = "foo_bar".as_bytes().to_vec();
let string = "foo_bar".as_bytes();
let compress = CompressionType::Uncompressed
.encode(&string)
.encode(string)
.await
.unwrap();
let decompress = CompressionType::Uncompressed
.decode(&compress)
.decode(compress.as_slice())
.await
.unwrap();
assert_eq!(decompress, string);

View File

@@ -36,6 +36,7 @@ use datafusion::physical_plan::SendableRecordBatchStream;
use futures::StreamExt;
use object_store::ObjectStore;
use snafu::ResultExt;
use tokio_util::compat::FuturesAsyncWriteCompatExt;
use self::csv::CsvFormat;
use self::json::JsonFormat;
@@ -45,6 +46,7 @@ use crate::buffered_writer::{DfRecordBatchEncoder, LazyBufferedWriter};
use crate::compression::CompressionType;
use crate::error::{self, Result};
use crate::share_buffer::SharedBuffer;
use crate::DEFAULT_WRITE_BUFFER_SIZE;
pub const FORMAT_COMPRESSION_TYPE: &str = "compression_type";
pub const FORMAT_DELIMITER: &str = "delimiter";
@@ -146,7 +148,8 @@ pub fn open_with_decoder<T: ArrowDecoder, F: Fn() -> DataFusionResult<T>>(
let reader = object_store
.reader(&path)
.await
.map_err(|e| DataFusionError::External(Box::new(e)))?;
.map_err(|e| DataFusionError::External(Box::new(e)))?
.into_bytes_stream(..);
let mut upstream = compression_type.convert_stream(reader).fuse();
@@ -202,7 +205,9 @@ pub async fn stream_to_file<T: DfRecordBatchEncoder, U: Fn(SharedBuffer) -> T>(
store
.writer_with(&path)
.concurrent(concurrency)
.chunk(DEFAULT_WRITE_BUFFER_SIZE.as_bytes() as usize)
.await
.map(|v| v.into_futures_async_write().compat_write())
.context(error::WriteObjectSnafu { path })
});

View File

@@ -29,6 +29,7 @@ use datafusion::physical_plan::SendableRecordBatchStream;
use derive_builder::Builder;
use object_store::ObjectStore;
use snafu::ResultExt;
use tokio_util::compat::FuturesAsyncReadCompatExt;
use tokio_util::io::SyncIoBridge;
use super::stream_to_file;
@@ -164,10 +165,16 @@ impl FileOpener for CsvOpener {
#[async_trait]
impl FileFormat for CsvFormat {
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
let meta = store
.stat(path)
.await
.context(error::ReadObjectSnafu { path })?;
let reader = store
.reader(path)
.await
.context(error::ReadObjectSnafu { path })?;
.context(error::ReadObjectSnafu { path })?
.into_futures_async_read(0..meta.content_length())
.compat();
let decoded = self.compression_type.convert_async_read(reader);

View File

@@ -31,6 +31,7 @@ use datafusion::error::{DataFusionError, Result as DataFusionResult};
use datafusion::physical_plan::SendableRecordBatchStream;
use object_store::ObjectStore;
use snafu::ResultExt;
use tokio_util::compat::FuturesAsyncReadCompatExt;
use tokio_util::io::SyncIoBridge;
use super::stream_to_file;
@@ -82,10 +83,16 @@ impl Default for JsonFormat {
#[async_trait]
impl FileFormat for JsonFormat {
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
let meta = store
.stat(path)
.await
.context(error::ReadObjectSnafu { path })?;
let reader = store
.reader(path)
.await
.context(error::ReadObjectSnafu { path })?;
.context(error::ReadObjectSnafu { path })?
.into_futures_async_read(0..meta.content_length())
.compat();
let decoded = self.compression_type.convert_async_read(reader);

View File

@@ -16,15 +16,17 @@ use std::sync::Arc;
use arrow_schema::{ArrowError, Schema, SchemaRef};
use async_trait::async_trait;
use bytes::Bytes;
use common_recordbatch::adapter::RecordBatchStreamTypeAdapter;
use datafusion::datasource::physical_plan::{FileMeta, FileOpenFuture, FileOpener};
use datafusion::error::{DataFusionError, Result as DfResult};
use futures::{StreamExt, TryStreamExt};
use futures::future::BoxFuture;
use futures::{FutureExt, StreamExt, TryStreamExt};
use object_store::ObjectStore;
use orc_rust::arrow_reader::ArrowReaderBuilder;
use orc_rust::async_arrow_reader::ArrowStreamReader;
use orc_rust::reader::AsyncChunkReader;
use snafu::ResultExt;
use tokio::io::{AsyncRead, AsyncSeek};
use crate::error::{self, Result};
use crate::file_format::FileFormat;
@@ -32,18 +34,49 @@ use crate::file_format::FileFormat;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub struct OrcFormat;
pub async fn new_orc_stream_reader<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
reader: R,
) -> Result<ArrowStreamReader<R>> {
#[derive(Clone)]
pub struct ReaderAdapter {
reader: object_store::Reader,
len: u64,
}
impl ReaderAdapter {
pub fn new(reader: object_store::Reader, len: u64) -> Self {
Self { reader, len }
}
}
impl AsyncChunkReader for ReaderAdapter {
fn len(&mut self) -> BoxFuture<'_, std::io::Result<u64>> {
async move { Ok(self.len) }.boxed()
}
fn get_bytes(
&mut self,
offset_from_start: u64,
length: u64,
) -> BoxFuture<'_, std::io::Result<Bytes>> {
async move {
let bytes = self
.reader
.read(offset_from_start..offset_from_start + length)
.await?;
Ok(bytes.to_bytes())
}
.boxed()
}
}
pub async fn new_orc_stream_reader(
reader: ReaderAdapter,
) -> Result<ArrowStreamReader<ReaderAdapter>> {
let reader_build = ArrowReaderBuilder::try_new_async(reader)
.await
.context(error::OrcReaderSnafu)?;
Ok(reader_build.build_async())
}
pub async fn infer_orc_schema<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
reader: R,
) -> Result<Schema> {
pub async fn infer_orc_schema(reader: ReaderAdapter) -> Result<Schema> {
let reader = new_orc_stream_reader(reader).await?;
Ok(reader.schema().as_ref().clone())
}
@@ -51,13 +84,15 @@ pub async fn infer_orc_schema<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>
#[async_trait]
impl FileFormat for OrcFormat {
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
let meta = store
.stat(path)
.await
.context(error::ReadObjectSnafu { path })?;
let reader = store
.reader(path)
.await
.context(error::ReadObjectSnafu { path })?;
let schema = infer_orc_schema(reader).await?;
let schema = infer_orc_schema(ReaderAdapter::new(reader, meta.content_length())).await?;
Ok(schema)
}
}
@@ -97,15 +132,23 @@ impl FileOpener for OrcOpener {
};
let projection = self.projection.clone();
Ok(Box::pin(async move {
let reader = object_store
.reader(meta.location().to_string().as_str())
let path = meta.location().to_string();
let meta = object_store
.stat(&path)
.await
.map_err(|e| DataFusionError::External(Box::new(e)))?;
let stream_reader = new_orc_stream_reader(reader)
let reader = object_store
.reader(&path)
.await
.map_err(|e| DataFusionError::External(Box::new(e)))?;
let stream_reader =
new_orc_stream_reader(ReaderAdapter::new(reader, meta.content_length()))
.await
.map_err(|e| DataFusionError::External(Box::new(e)))?;
let stream =
RecordBatchStreamTypeAdapter::new(projected_schema, stream_reader, projection);

View File

@@ -16,7 +16,7 @@ use std::result;
use std::sync::Arc;
use arrow::record_batch::RecordBatch;
use arrow_schema::{Schema, SchemaRef};
use arrow_schema::Schema;
use async_trait::async_trait;
use datafusion::datasource::physical_plan::{FileMeta, ParquetFileReaderFactory};
use datafusion::error::Result as DatafusionResult;
@@ -29,15 +29,18 @@ use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
use datafusion::physical_plan::SendableRecordBatchStream;
use futures::future::BoxFuture;
use futures::StreamExt;
use object_store::{ObjectStore, Reader, Writer};
use object_store::{FuturesAsyncReader, ObjectStore};
use parquet::arrow::AsyncArrowWriter;
use parquet::basic::{Compression, ZstdLevel};
use parquet::file::properties::WriterProperties;
use snafu::ResultExt;
use tokio_util::compat::{Compat, FuturesAsyncReadCompatExt, FuturesAsyncWriteCompatExt};
use crate::buffered_writer::{ArrowWriterCloser, DfRecordBatchEncoder, LazyBufferedWriter};
use crate::error::{self, Result};
use crate::buffered_writer::{ArrowWriterCloser, DfRecordBatchEncoder};
use crate::error::{self, Result, WriteObjectSnafu, WriteParquetSnafu};
use crate::file_format::FileFormat;
use crate::share_buffer::SharedBuffer;
use crate::DEFAULT_WRITE_BUFFER_SIZE;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub struct ParquetFormat {}
@@ -45,10 +48,16 @@ pub struct ParquetFormat {}
#[async_trait]
impl FileFormat for ParquetFormat {
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
let meta = store
.stat(path)
.await
.context(error::ReadObjectSnafu { path })?;
let mut reader = store
.reader(path)
.await
.context(error::ReadObjectSnafu { path })?;
.context(error::ReadObjectSnafu { path })?
.into_futures_async_read(0..meta.content_length())
.compat();
let metadata = reader
.get_metadata()
@@ -98,7 +107,7 @@ impl ParquetFileReaderFactory for DefaultParquetFileReaderFactory {
pub struct LazyParquetFileReader {
object_store: ObjectStore,
reader: Option<Reader>,
reader: Option<Compat<FuturesAsyncReader>>,
path: String,
}
@@ -114,7 +123,13 @@ impl LazyParquetFileReader {
/// Must initialize the reader, or throw an error from the future.
async fn maybe_initialize(&mut self) -> result::Result<(), object_store::Error> {
if self.reader.is_none() {
let reader = self.object_store.reader(&self.path).await?;
let meta = self.object_store.stat(&self.path).await?;
let reader = self
.object_store
.reader(&self.path)
.await?
.into_futures_async_read(0..meta.content_length())
.compat();
self.reader = Some(reader);
}
@@ -160,72 +175,6 @@ impl ArrowWriterCloser for ArrowWriter<SharedBuffer> {
}
}
/// Parquet writer that buffers row groups in memory and writes buffered data to an underlying
/// storage by chunks to reduce memory consumption.
pub struct BufferedWriter {
inner: InnerBufferedWriter,
}
type InnerBufferedWriter = LazyBufferedWriter<
object_store::Writer,
ArrowWriter<SharedBuffer>,
impl Fn(String) -> BoxFuture<'static, Result<Writer>>,
>;
impl BufferedWriter {
fn make_write_factory(
store: ObjectStore,
concurrency: usize,
) -> impl Fn(String) -> BoxFuture<'static, Result<Writer>> {
move |path| {
let store = store.clone();
Box::pin(async move {
store
.writer_with(&path)
.concurrent(concurrency)
.await
.context(error::WriteObjectSnafu { path })
})
}
}
pub async fn try_new(
path: String,
store: ObjectStore,
arrow_schema: SchemaRef,
props: Option<WriterProperties>,
buffer_threshold: usize,
concurrency: usize,
) -> error::Result<Self> {
let buffer = SharedBuffer::with_capacity(buffer_threshold);
let arrow_writer = ArrowWriter::try_new(buffer.clone(), arrow_schema.clone(), props)
.context(error::WriteParquetSnafu { path: &path })?;
Ok(Self {
inner: LazyBufferedWriter::new(
buffer_threshold,
buffer,
arrow_writer,
&path,
Self::make_write_factory(store, concurrency),
),
})
}
/// Write a record batch to stream writer.
pub async fn write(&mut self, arrow_batch: &RecordBatch) -> error::Result<()> {
self.inner.write(arrow_batch).await
}
/// Close parquet writer.
///
/// Return file metadata and bytes written.
pub async fn close(self) -> error::Result<(FileMetaData, u64)> {
self.inner.close_with_arrow_writer().await
}
}
/// Output the stream to a parquet file.
///
/// Returns number of rows written.
@@ -233,29 +182,33 @@ pub async fn stream_to_parquet(
mut stream: SendableRecordBatchStream,
store: ObjectStore,
path: &str,
threshold: usize,
concurrency: usize,
) -> Result<usize> {
let write_props = WriterProperties::builder()
.set_compression(Compression::ZSTD(ZstdLevel::default()))
.build();
let schema = stream.schema();
let mut buffered_writer = BufferedWriter::try_new(
path.to_string(),
store,
schema,
Some(write_props),
threshold,
concurrency,
)
.await?;
let inner_writer = store
.writer_with(path)
.concurrent(concurrency)
.chunk(DEFAULT_WRITE_BUFFER_SIZE.as_bytes() as usize)
.await
.map(|w| w.into_futures_async_write().compat_write())
.context(WriteObjectSnafu { path })?;
let mut writer = AsyncArrowWriter::try_new(inner_writer, schema, Some(write_props))
.context(WriteParquetSnafu { path })?;
let mut rows_written = 0;
while let Some(batch) = stream.next().await {
let batch = batch.context(error::ReadRecordBatchSnafu)?;
buffered_writer.write(&batch).await?;
writer
.write(&batch)
.await
.context(WriteParquetSnafu { path })?;
rows_written += batch.num_rows();
}
buffered_writer.close().await?;
writer.close().await.context(WriteParquetSnafu { path })?;
Ok(rows_written)
}

View File

@@ -19,7 +19,6 @@ use std::vec;
use common_test_util::find_workspace_path;
use datafusion::assert_batches_eq;
use datafusion::config::TableParquetOptions;
use datafusion::datasource::physical_plan::{FileOpener, FileScanConfig, FileStream, ParquetExec};
use datafusion::execution::context::TaskContext;
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
@@ -167,8 +166,9 @@ async fn test_parquet_exec() {
.to_string();
let base_config = scan_config(schema.clone(), None, path);
let exec = ParquetExec::new(base_config, None, None, TableParquetOptions::default())
.with_parquet_file_reader_factory(Arc::new(DefaultParquetFileReaderFactory::new(store)));
let exec = ParquetExec::builder(base_config)
.with_parquet_file_reader_factory(Arc::new(DefaultParquetFileReaderFactory::new(store)))
.build();
let ctx = SessionContext::new();

View File

@@ -27,3 +27,8 @@ pub mod test_util;
#[cfg(test)]
pub mod tests;
pub mod util;
use common_base::readable_size::ReadableSize;
/// Default write buffer size, it should be greater than the default minimum upload part of S3 (5mb).
pub const DEFAULT_WRITE_BUFFER_SIZE: ReadableSize = ReadableSize::mb(8);

View File

@@ -120,7 +120,7 @@ pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usi
let written = tmp_store.read(&output_path).await.unwrap();
let origin = store.read(origin_path).await.unwrap();
assert_eq_lines(written, origin);
assert_eq_lines(written.to_vec(), origin.to_vec());
}
pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usize) -> usize) {
@@ -158,7 +158,7 @@ pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usiz
let written = tmp_store.read(&output_path).await.unwrap();
let origin = store.read(origin_path).await.unwrap();
assert_eq_lines(written, origin);
assert_eq_lines(written.to_vec(), origin.to_vec());
}
// Ignore the CRLF difference across operating systems.

View File

@@ -10,3 +10,4 @@ workspace = true
[dependencies]
snafu.workspace = true
strum.workspace = true
tonic.workspace = true

View File

@@ -105,6 +105,10 @@ impl BoxedError {
inner: Box::new(err),
}
}
pub fn into_inner(self) -> Box<dyn crate::ext::ErrorExt + Send + Sync> {
self.inner
}
}
impl std::fmt::Debug for BoxedError {

View File

@@ -15,6 +15,7 @@
use std::fmt;
use strum::{AsRefStr, EnumIter, EnumString, FromRepr};
use tonic::Code;
/// Common status code for public API.
#[derive(Debug, Clone, Copy, PartialEq, Eq, EnumString, AsRefStr, EnumIter, FromRepr)]
@@ -202,6 +203,75 @@ impl fmt::Display for StatusCode {
}
}
#[macro_export]
macro_rules! define_into_tonic_status {
($Error: ty) => {
impl From<$Error> for tonic::Status {
fn from(err: $Error) -> Self {
use tonic::codegen::http::{HeaderMap, HeaderValue};
use tonic::metadata::MetadataMap;
use $crate::GREPTIME_DB_HEADER_ERROR_CODE;
let mut headers = HeaderMap::<HeaderValue>::with_capacity(2);
// If either of the status_code or error msg cannot convert to valid HTTP header value
// (which is a very rare case), just ignore. Client will use Tonic status code and message.
let status_code = err.status_code();
headers.insert(
GREPTIME_DB_HEADER_ERROR_CODE,
HeaderValue::from(status_code as u32),
);
let root_error = err.output_msg();
let metadata = MetadataMap::from_headers(headers);
tonic::Status::with_metadata(
$crate::status_code::status_to_tonic_code(status_code),
root_error,
metadata,
)
}
}
};
}
/// Returns the tonic [Code] of a [StatusCode].
pub fn status_to_tonic_code(status_code: StatusCode) -> Code {
match status_code {
StatusCode::Success => Code::Ok,
StatusCode::Unknown => Code::Unknown,
StatusCode::Unsupported => Code::Unimplemented,
StatusCode::Unexpected
| StatusCode::Internal
| StatusCode::PlanQuery
| StatusCode::EngineExecuteQuery => Code::Internal,
StatusCode::InvalidArguments | StatusCode::InvalidSyntax | StatusCode::RequestOutdated => {
Code::InvalidArgument
}
StatusCode::Cancelled => Code::Cancelled,
StatusCode::TableAlreadyExists
| StatusCode::TableColumnExists
| StatusCode::RegionAlreadyExists
| StatusCode::FlowAlreadyExists => Code::AlreadyExists,
StatusCode::TableNotFound
| StatusCode::RegionNotFound
| StatusCode::TableColumnNotFound
| StatusCode::DatabaseNotFound
| StatusCode::UserNotFound
| StatusCode::FlowNotFound => Code::NotFound,
StatusCode::StorageUnavailable | StatusCode::RegionNotReady => Code::Unavailable,
StatusCode::RuntimeResourcesExhausted
| StatusCode::RateLimited
| StatusCode::RegionBusy => Code::ResourceExhausted,
StatusCode::UnsupportedPasswordType
| StatusCode::UserPasswordMismatch
| StatusCode::AuthHeaderNotFound
| StatusCode::InvalidAuthHeader => Code::Unauthenticated,
StatusCode::AccessDenied | StatusCode::PermissionDenied | StatusCode::RegionReadonly => {
Code::PermissionDenied
}
}
}
#[cfg(test)]
mod tests {
use strum::IntoEnumIterator;

View File

@@ -44,10 +44,10 @@ struct ProcedureStateJson {
/// A function to query procedure state by its id.
/// Such as `procedure_state(pid)`.
#[admin_fn(
name = "ProcedureStateFunction",
display_name = "procedure_state",
sig_fn = "signature",
ret = "string"
name = ProcedureStateFunction,
display_name = procedure_state,
sig_fn = signature,
ret = string
)]
pub(crate) async fn procedure_state(
procedure_service_handler: &ProcedureServiceHandlerRef,

View File

@@ -35,7 +35,7 @@ use crate::helper::cast_u64;
macro_rules! define_region_function {
($name: expr, $display_name_str: expr, $display_name: ident) => {
/// A function to $display_name
#[admin_fn(name = $name, display_name = $display_name_str, sig_fn = "signature", ret = "uint64")]
#[admin_fn(name = $name, display_name = $display_name_str, sig_fn = signature, ret = uint64)]
pub(crate) async fn $display_name(
table_mutation_handler: &TableMutationHandlerRef,
query_ctx: &QueryContextRef,
@@ -53,7 +53,7 @@ macro_rules! define_region_function {
let Some(region_id) = cast_u64(&params[0])? else {
return UnsupportedInputDataTypeSnafu {
function: $display_name_str,
function: stringify!($display_name_str),
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
}
.fail();
@@ -68,9 +68,9 @@ macro_rules! define_region_function {
};
}
define_region_function!("FlushRegionFunction", "flush_region", flush_region);
define_region_function!(FlushRegionFunction, flush_region, flush_region);
define_region_function!("CompactRegionFunction", "compact_region", compact_region);
define_region_function!(CompactRegionFunction, compact_region, compact_region);
fn signature() -> Signature {
Signature::uniform(1, ConcreteDataType::numerics(), Volatility::Immutable)

View File

@@ -40,10 +40,10 @@ use crate::handlers::TableMutationHandlerRef;
const COMPACT_TYPE_STRICT_WINDOW: &str = "strict_window";
#[admin_fn(
name = "FlushTableFunction",
display_name = "flush_table",
sig_fn = "flush_signature",
ret = "uint64"
name = FlushTableFunction,
display_name = flush_table,
sig_fn = flush_signature,
ret = uint64
)]
pub(crate) async fn flush_table(
table_mutation_handler: &TableMutationHandlerRef,
@@ -87,10 +87,10 @@ pub(crate) async fn flush_table(
}
#[admin_fn(
name = "CompactTableFunction",
display_name = "compact_table",
sig_fn = "compact_signature",
ret = "uint64"
name = CompactTableFunction,
display_name = compact_table,
sig_fn = compact_signature,
ret = uint64
)]
pub(crate) async fn compact_table(
table_mutation_handler: &TableMutationHandlerRef,

View File

@@ -46,10 +46,10 @@ const DEFAULT_REPLAY_TIMEOUT_SECS: u64 = 10;
/// - `from_peer`: the source peer id
/// - `to_peer`: the target peer id
#[admin_fn(
name = "MigrateRegionFunction",
display_name = "migrate_region",
sig_fn = "signature",
ret = "string"
name = MigrateRegionFunction,
display_name = migrate_region,
sig_fn = signature,
ret = string
)]
pub(crate) async fn migrate_region(
procedure_service_handler: &ProcedureServiceHandlerRef,

View File

@@ -13,13 +13,7 @@ workspace = true
[dependencies]
proc-macro2 = "1.0.66"
quote = "1.0"
syn = "1.0"
syn2 = { version = "2.0", package = "syn", features = [
"derive",
"parsing",
"printing",
"clone-impls",
"proc-macro",
syn = { version = "2.0", features = [
"extra-traits",
"full",
] }

View File

@@ -16,11 +16,11 @@ use proc_macro::TokenStream;
use quote::quote;
use syn::spanned::Spanned;
use syn::{
parse_macro_input, Attribute, AttributeArgs, Ident, ItemFn, Signature, Type, TypePath,
TypeReference, Visibility,
parse_macro_input, Attribute, Ident, ItemFn, Signature, Type, TypePath, TypeReference,
Visibility,
};
use crate::utils::{extract_arg_map, extract_input_types, get_ident};
use crate::utils::extract_input_types;
/// Internal util macro to early return on error.
macro_rules! ok {
@@ -40,12 +40,31 @@ macro_rules! error {
}
pub(crate) fn process_admin_fn(args: TokenStream, input: TokenStream) -> TokenStream {
let mut result = TokenStream::new();
let mut name: Option<Ident> = None;
let mut display_name: Option<Ident> = None;
let mut sig_fn: Option<Ident> = None;
let mut ret: Option<Ident> = None;
let parser = syn::meta::parser(|meta| {
if meta.path.is_ident("name") {
name = Some(meta.value()?.parse()?);
Ok(())
} else if meta.path.is_ident("display_name") {
display_name = Some(meta.value()?.parse()?);
Ok(())
} else if meta.path.is_ident("sig_fn") {
sig_fn = Some(meta.value()?.parse()?);
Ok(())
} else if meta.path.is_ident("ret") {
ret = Some(meta.value()?.parse()?);
Ok(())
} else {
Err(meta.error("unsupported property"))
}
});
// extract arg map
let arg_pairs = parse_macro_input!(args as AttributeArgs);
let arg_span = arg_pairs[0].span();
let arg_map = ok!(extract_arg_map(arg_pairs));
parse_macro_input!(args with parser);
// decompose the fn block
let compute_fn = parse_macro_input!(input as ItemFn);
@@ -72,16 +91,17 @@ pub(crate) fn process_admin_fn(args: TokenStream, input: TokenStream) -> TokenSt
}
let handler_type = ok!(extract_handler_type(&arg_types));
let mut result = TokenStream::new();
// build the struct and its impl block
// only do this when `display_name` is specified
if let Ok(display_name) = get_ident(&arg_map, "display_name", arg_span) {
if let Some(display_name) = display_name {
let struct_code = build_struct(
attrs,
vis,
fn_name,
ok!(get_ident(&arg_map, "name", arg_span)),
ok!(get_ident(&arg_map, "sig_fn", arg_span)),
ok!(get_ident(&arg_map, "ret", arg_span)),
name.expect("name required"),
sig_fn.expect("sig_fn required"),
ret.expect("ret required"),
handler_type,
display_name,
);

View File

@@ -14,28 +14,24 @@
use proc_macro::TokenStream;
use quote::{quote, ToTokens};
use syn::{parse_macro_input, AttributeArgs, ItemFn, Lit, Meta, NestedMeta};
use syn::{parse_macro_input, ItemFn, LitInt};
pub(crate) fn process_print_caller(args: TokenStream, input: TokenStream) -> TokenStream {
let mut depth = 1;
let args = parse_macro_input!(args as AttributeArgs);
for meta in args.iter() {
if let NestedMeta::Meta(Meta::NameValue(name_value)) = meta {
let ident = name_value
.path
.get_ident()
.expect("Expected an ident!")
.to_string();
if ident == "depth" {
let Lit::Int(i) = &name_value.lit else {
panic!("Expected 'depth' to be a valid int!")
};
depth = i.base10_parse::<usize>().expect("Invalid 'depth' value");
break;
}
let parser = syn::meta::parser(|meta| {
if meta.path.is_ident("depth") {
depth = meta
.value()?
.parse::<LitInt>()
.and_then(|v| v.base10_parse::<usize>())
.expect("Invalid 'depth' value");
Ok(())
} else {
Err(meta.error("unsupported property"))
}
}
});
parse_macro_input!(args with parser);
let tokens: TokenStream = quote! {
{

View File

@@ -16,11 +16,10 @@ use proc_macro::TokenStream;
use quote::quote;
use syn::spanned::Spanned;
use syn::{
parse_macro_input, Attribute, AttributeArgs, Ident, ItemFn, Signature, Type, TypeReference,
Visibility,
parse_macro_input, Attribute, Ident, ItemFn, Signature, Type, TypeReference, Visibility,
};
use crate::utils::{extract_arg_map, extract_input_types, get_ident};
use crate::utils::extract_input_types;
macro_rules! ok {
($item:expr) => {
@@ -32,12 +31,27 @@ macro_rules! ok {
}
pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
let mut result = TokenStream::new();
let mut name: Option<Ident> = None;
let mut display_name: Option<Ident> = None;
let mut ret: Option<Ident> = None;
let parser = syn::meta::parser(|meta| {
if meta.path.is_ident("name") {
name = Some(meta.value()?.parse()?);
Ok(())
} else if meta.path.is_ident("display_name") {
display_name = Some(meta.value()?.parse()?);
Ok(())
} else if meta.path.is_ident("ret") {
ret = Some(meta.value()?.parse()?);
Ok(())
} else {
Err(meta.error("unsupported property"))
}
});
// extract arg map
let arg_pairs = parse_macro_input!(args as AttributeArgs);
let arg_span = arg_pairs[0].span();
let arg_map = ok!(extract_arg_map(arg_pairs));
parse_macro_input!(args with parser);
// decompose the fn block
let compute_fn = parse_macro_input!(input as ItemFn);
@@ -68,25 +82,27 @@ pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenSt
})
.collect::<Vec<_>>();
let mut result = TokenStream::new();
// build the struct and its impl block
// only do this when `display_name` is specified
if let Ok(display_name) = get_ident(&arg_map, "display_name", arg_span) {
if let Some(display_name) = display_name {
let struct_code = build_struct(
attrs,
vis,
ok!(get_ident(&arg_map, "name", arg_span)),
name.clone().expect("name required"),
display_name,
array_types,
ok!(get_ident(&arg_map, "ret", arg_span)),
ret.clone().expect("ret required"),
);
result.extend(struct_code);
}
let calc_fn_code = build_calc_fn(
ok!(get_ident(&arg_map, "name", arg_span)),
name.expect("name required"),
arg_types,
fn_name.clone(),
ok!(get_ident(&arg_map, "ret", arg_span)),
ret.expect("ret required"),
);
// preserve this fn, but remove its `pub` modifier
let input_fn_code: TokenStream = quote! {

View File

@@ -16,13 +16,13 @@
use proc_macro2::{Span, TokenStream as TokenStream2};
use quote::{quote, quote_spanned};
use syn2::spanned::Spanned;
use syn2::{parenthesized, Attribute, Ident, ItemEnum, Variant};
use syn::spanned::Spanned;
use syn::{parenthesized, Attribute, Ident, ItemEnum, Variant};
pub fn stack_trace_style_impl(args: TokenStream2, input: TokenStream2) -> TokenStream2 {
let input_cloned: TokenStream2 = input.clone();
let error_enum_definition: ItemEnum = syn2::parse2(input_cloned).unwrap();
let error_enum_definition: ItemEnum = syn::parse2(input_cloned).unwrap();
let enum_name = error_enum_definition.ident;
let mut variants = vec![];

View File

@@ -12,48 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use proc_macro2::Span;
use syn::punctuated::Punctuated;
use syn::spanned::Spanned;
use syn::token::Comma;
use syn::{FnArg, Ident, Meta, MetaNameValue, NestedMeta, Type};
/// Extract a String <-> Ident map from the attribute args.
pub(crate) fn extract_arg_map(args: Vec<NestedMeta>) -> Result<HashMap<String, Ident>, syn::Error> {
args.into_iter()
.map(|meta| {
if let NestedMeta::Meta(Meta::NameValue(MetaNameValue { path, lit, .. })) = meta {
let name = path.get_ident().unwrap().to_string();
let ident = match lit {
syn::Lit::Str(lit_str) => lit_str.parse::<Ident>(),
_ => Err(syn::Error::new(
lit.span(),
"Unexpected attribute format. Expected `name = \"value\"`",
)),
}?;
Ok((name, ident))
} else {
Err(syn::Error::new(
meta.span(),
"Unexpected attribute format. Expected `name = \"value\"`",
))
}
})
.collect::<Result<HashMap<String, Ident>, syn::Error>>()
}
/// Helper function to get an Ident from the previous arg map.
pub(crate) fn get_ident(
map: &HashMap<String, Ident>,
key: &str,
span: Span,
) -> Result<Ident, syn::Error> {
map.get(key)
.cloned()
.ok_or_else(|| syn::Error::new(span, format!("Expect attribute {key} but not found")))
}
use syn::{FnArg, Type};
/// Extract the argument list from the annotated function.
pub(crate) fn extract_input_types(

View File

@@ -25,11 +25,13 @@ common-grpc-expr.workspace = true
common-macro.workspace = true
common-procedure.workspace = true
common-procedure-test.workspace = true
common-query.workspace = true
common-recordbatch.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
common-wal.workspace = true
datafusion-common.workspace = true
datafusion-expr.workspace = true
datatypes.workspace = true
derive_builder.workspace = true
etcd-client.workspace = true

View File

@@ -99,6 +99,7 @@ pub struct NodeInfo {
pub enum Role {
Datanode,
Frontend,
Flownode,
Metasrv,
}
@@ -106,6 +107,7 @@ pub enum Role {
pub enum NodeStatus {
Datanode(DatanodeStatus),
Frontend(FrontendStatus),
Flownode(FlownodeStatus),
Metasrv(MetasrvStatus),
Standalone,
}
@@ -116,6 +118,7 @@ impl NodeStatus {
match self {
NodeStatus::Datanode(_) => "DATANODE",
NodeStatus::Frontend(_) => "FRONTEND",
NodeStatus::Flownode(_) => "FLOWNODE",
NodeStatus::Metasrv(_) => "METASRV",
NodeStatus::Standalone => "STANDALONE",
}
@@ -139,6 +142,10 @@ pub struct DatanodeStatus {
#[derive(Debug, Serialize, Deserialize)]
pub struct FrontendStatus {}
/// The status of a flownode.
#[derive(Debug, Serialize, Deserialize)]
pub struct FlownodeStatus {}
/// The status of a metasrv.
#[derive(Debug, Serialize, Deserialize)]
pub struct MetasrvStatus {
@@ -235,7 +242,8 @@ impl From<Role> for i32 {
match role {
Role::Datanode => 0,
Role::Frontend => 1,
Role::Metasrv => 2,
Role::Flownode => 2,
Role::Metasrv => 99,
}
}
}
@@ -247,7 +255,8 @@ impl TryFrom<i32> for Role {
match role {
0 => Ok(Self::Datanode),
1 => Ok(Self::Frontend),
2 => Ok(Self::Metasrv),
2 => Ok(Self::Flownode),
99 => Ok(Self::Metasrv),
_ => InvalidRoleSnafu { role }.fail(),
}
}

View File

@@ -26,6 +26,7 @@ use crate::key::flow::FlowMetadataManagerRef;
use crate::key::table_route::PhysicalTableRouteValue;
use crate::key::TableMetadataManagerRef;
use crate::node_manager::NodeManagerRef;
use crate::peer::PeerLookupServiceRef;
use crate::region_keeper::MemoryRegionKeeperRef;
use crate::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse};
use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse};
@@ -118,4 +119,6 @@ pub struct DdlContext {
pub flow_metadata_manager: FlowMetadataManagerRef,
/// Allocator for flow metadata.
pub flow_metadata_allocator: FlowMetadataAllocatorRef,
/// look up peer by id.
pub peer_lookup_service: PeerLookupServiceRef,
}

View File

@@ -23,10 +23,11 @@ impl CreateFlowProcedure {
pub(crate) async fn allocate_flow_id(&mut self) -> Result<()> {
//TODO(weny, ruihang): We doesn't support the partitions. It's always be 1, now.
let partitions = 1;
let cluster_id = self.data.cluster_id;
let (flow_id, peers) = self
.context
.flow_metadata_allocator
.create(partitions)
.create(cluster_id, partitions)
.await?;
self.data.flow_id = Some(flow_id);
self.data.peers = peers;

View File

@@ -131,9 +131,10 @@ mod tests {
use std::sync::Arc;
use api::region::RegionResponse;
use api::v1::region::{QueryRequest, RegionRequest};
use api::v1::region::RegionRequest;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::ext::BoxedError;
use common_query::request::QueryRequest;
use common_recordbatch::SendableRecordBatchStream;
use table::table_name::TableName;

View File

@@ -25,18 +25,17 @@ use common_procedure::{
use common_telemetry::info;
use futures::future::join_all;
use serde::{Deserialize, Serialize};
use snafu::{ensure, ResultExt};
use snafu::{ensure, OptionExt, ResultExt};
use strum::AsRefStr;
use super::utils::{add_peer_context_if_needed, handle_retry_error};
use crate::cache_invalidator::Context;
use crate::ddl::DdlContext;
use crate::error::{self, Result};
use crate::error::{self, Result, UnexpectedSnafu};
use crate::flow_name::FlowName;
use crate::instruction::{CacheIdent, DropFlow};
use crate::key::flow::flow_info::FlowInfoValue;
use crate::lock_key::{CatalogLock, FlowLock};
use crate::peer::Peer;
use crate::rpc::ddl::DropFlowTask;
use crate::{metrics, ClusterId};
@@ -103,10 +102,17 @@ impl DropFlowProcedure {
let flownode_ids = &self.data.flow_info_value.as_ref().unwrap().flownode_ids;
let flow_id = self.data.task.flow_id;
let mut drop_flow_tasks = Vec::with_capacity(flownode_ids.len());
let cluster_id = self.data.cluster_id;
for flownode in flownode_ids.values() {
// TODO(weny): use the real peer.
let peer = Peer::new(*flownode, "");
let peer = self
.context
.peer_lookup_service
.flownode(cluster_id, *flownode)
.await?
.with_context(|| UnexpectedSnafu {
err_msg: "Attempted to drop flow on a node that could not be found. Consider verifying node availability.",
})?;
let requester = self.context.node_manager.flownode(&peer).await;
let request = FlowRequest {
body: Some(flow_request::Body::Drop(DropRequest {

View File

@@ -20,6 +20,7 @@ use crate::error::Result;
use crate::key::FlowId;
use crate::peer::Peer;
use crate::sequence::SequenceRef;
use crate::ClusterId;
/// The reference of [FlowMetadataAllocator].
pub type FlowMetadataAllocatorRef = Arc<FlowMetadataAllocator>;
@@ -42,6 +43,16 @@ impl FlowMetadataAllocator {
}
}
pub fn with_peer_allocator(
flow_id_sequence: SequenceRef,
peer_allocator: Arc<dyn PartitionPeerAllocator>,
) -> Self {
Self {
flow_id_sequence,
partition_peer_allocator: peer_allocator,
}
}
/// Allocates a the [FlowId].
pub(crate) async fn allocate_flow_id(&self) -> Result<FlowId> {
let flow_id = self.flow_id_sequence.next().await? as FlowId;
@@ -49,9 +60,16 @@ impl FlowMetadataAllocator {
}
/// Allocates the [FlowId] and [Peer]s.
pub async fn create(&self, partitions: usize) -> Result<(FlowId, Vec<Peer>)> {
pub async fn create(
&self,
cluster_id: ClusterId,
partitions: usize,
) -> Result<(FlowId, Vec<Peer>)> {
let flow_id = self.allocate_flow_id().await?;
let peers = self.partition_peer_allocator.alloc(partitions).await?;
let peers = self
.partition_peer_allocator
.alloc(cluster_id, partitions)
.await?;
Ok((flow_id, peers))
}
@@ -61,7 +79,7 @@ impl FlowMetadataAllocator {
#[async_trait]
pub trait PartitionPeerAllocator: Send + Sync {
/// Allocates [Peer] nodes for storing partitions.
async fn alloc(&self, partitions: usize) -> Result<Vec<Peer>>;
async fn alloc(&self, cluster_id: ClusterId, partitions: usize) -> Result<Vec<Peer>>;
}
/// [PartitionPeerAllocatorRef] allocates [Peer]s for partitions.
@@ -71,7 +89,7 @@ struct NoopPartitionPeerAllocator;
#[async_trait]
impl PartitionPeerAllocator for NoopPartitionPeerAllocator {
async fn alloc(&self, partitions: usize) -> Result<Vec<Peer>> {
async fn alloc(&self, _cluster_id: ClusterId, partitions: usize) -> Result<Vec<Peer>> {
Ok(vec![Peer::default(); partitions])
}
}

View File

@@ -13,9 +13,10 @@
// limitations under the License.
use api::region::RegionResponse;
use api::v1::region::{QueryRequest, RegionRequest};
use api::v1::region::RegionRequest;
use common_error::ext::{BoxedError, ErrorExt, StackError};
use common_error::status_code::StatusCode;
use common_query::request::QueryRequest;
use common_recordbatch::SendableRecordBatchStream;
use common_telemetry::debug;
use snafu::{ResultExt, Snafu};

View File

@@ -810,7 +810,7 @@ mod tests {
use crate::key::TableMetadataManager;
use crate::kv_backend::memory::MemoryKvBackend;
use crate::node_manager::{DatanodeRef, FlownodeRef, NodeManager};
use crate::peer::Peer;
use crate::peer::{Peer, StandalonePeerLookupService};
use crate::region_keeper::MemoryRegionKeeper;
use crate::sequence::SequenceBuilder;
use crate::state_store::KvStateStore;
@@ -855,6 +855,7 @@ mod tests {
flow_metadata_manager,
flow_metadata_allocator,
memory_region_keeper: Arc::new(MemoryRegionKeeper::default()),
peer_lookup_service: Arc::new(StandalonePeerLookupService::new()),
},
procedure_manager.clone(),
true,

View File

@@ -30,6 +30,8 @@ pub const REGION_LEASE_SECS: u64 =
/// If the node's lease has expired, the `Selector` will not select it.
pub const DATANODE_LEASE_SECS: u64 = REGION_LEASE_SECS;
pub const FLOWNODE_LEASE_SECS: u64 = DATANODE_LEASE_SECS;
/// The lease seconds of metasrv leader.
pub const META_LEASE_SECS: u64 = 3;

View File

@@ -83,20 +83,16 @@
pub mod catalog_name;
pub mod datanode_table;
/// TODO(weny):removes id.
#[allow(unused)]
pub mod flow;
pub mod schema_name;
pub mod table_info;
pub mod table_name;
pub mod view_info;
// TODO(weny): removes it.
#[allow(deprecated)]
pub mod table_route;
#[cfg(any(test, feature = "testing"))]
pub mod test_utils;
mod tombstone;
pub(crate) mod txn_helper;
pub mod view_info;
use std::collections::{BTreeMap, HashMap, HashSet};
use std::fmt::Debug;
@@ -139,7 +135,7 @@ use crate::rpc::store::BatchDeleteRequest;
use crate::DatanodeId;
pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
pub const MAINTENANCE_KEY: &str = "maintenance";
pub const MAINTENANCE_KEY: &str = "__maintenance";
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
pub const TABLE_INFO_KEY_PREFIX: &str = "__table_info";

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub(crate) mod flow_info;
pub mod flow_info;
pub(crate) mod flow_name;
pub(crate) mod flownode_flow;
pub(crate) mod table_flow;
@@ -29,9 +29,9 @@ use self::flownode_flow::FlownodeFlowKey;
use self::table_flow::TableFlowKey;
use crate::ensure_values;
use crate::error::{self, Result};
use crate::key::flow::flow_info::{FlowInfoManager, FlowInfoManagerRef};
use crate::key::flow::flow_name::{FlowNameManager, FlowNameManagerRef};
use crate::key::flow::flownode_flow::{FlownodeFlowManager, FlownodeFlowManagerRef};
use crate::key::flow::flow_info::FlowInfoManager;
use crate::key::flow::flow_name::FlowNameManager;
use crate::key::flow::flownode_flow::FlownodeFlowManager;
pub use crate::key::flow::table_flow::{TableFlowManager, TableFlowManagerRef};
use crate::key::txn_helper::TxnOpGetResponseSet;
use crate::key::{FlowId, MetaKey};
@@ -214,15 +214,15 @@ impl FlowMetadataManager {
let source_table_ids = flow_value.source_table_ids();
let mut keys =
Vec::with_capacity(2 + flow_value.flownode_ids.len() * (source_table_ids.len() + 1));
/// Builds flow name key
// Builds flow name key
let flow_name = FlowNameKey::new(&flow_value.catalog_name, &flow_value.flow_name);
keys.push(flow_name.to_bytes());
/// Builds flow value key
// Builds flow value key
let flow_info_key = FlowInfoKey::new(flow_id);
keys.push(flow_info_key.to_bytes());
/// Builds flownode flow keys & table flow keys
// Builds flownode flow keys & table flow keys
flow_value
.flownode_ids
.iter()
@@ -310,7 +310,6 @@ mod tests {
}
fn test_flow_info_value(
flow_id: FlowId,
flow_name: &str,
flownode_ids: BTreeMap<FlowPartitionId, FlownodeId>,
source_table_ids: Vec<TableId>,
@@ -339,8 +338,7 @@ mod tests {
let mem_kv = Arc::new(MemoryKvBackend::default());
let flow_metadata_manager = FlowMetadataManager::new(mem_kv.clone());
let flow_id = 10;
let flow_value =
test_flow_info_value(flow_id, "flow", [(0, 1u64)].into(), vec![1024, 1025, 1026]);
let flow_value = test_flow_info_value("flow", [(0, 1u64)].into(), vec![1024, 1025, 1026]);
flow_metadata_manager
.create_flow_metadata(flow_id, flow_value.clone())
.await
@@ -380,9 +378,7 @@ mod tests {
let mem_kv = Arc::new(MemoryKvBackend::default());
let flow_metadata_manager = FlowMetadataManager::new(mem_kv);
let flow_id = 10;
let catalog_name = "greptime";
let flow_value =
test_flow_info_value(flow_id, "flow", [(0, 1u64)].into(), vec![1024, 1025, 1026]);
let flow_value = test_flow_info_value("flow", [(0, 1u64)].into(), vec![1024, 1025, 1026]);
flow_metadata_manager
.create_flow_metadata(flow_id, flow_value.clone())
.await
@@ -401,8 +397,7 @@ mod tests {
let flow_metadata_manager = FlowMetadataManager::new(mem_kv);
let flow_id = 10;
let catalog_name = "greptime";
let flow_value =
test_flow_info_value(flow_id, "flow", [(0, 1u64)].into(), vec![1024, 1025, 1026]);
let flow_value = test_flow_info_value("flow", [(0, 1u64)].into(), vec![1024, 1025, 1026]);
flow_metadata_manager
.create_flow_metadata(flow_id, flow_value.clone())
.await
@@ -436,9 +431,7 @@ mod tests {
let mem_kv = Arc::new(MemoryKvBackend::default());
let flow_metadata_manager = FlowMetadataManager::new(mem_kv.clone());
let flow_id = 10;
let catalog_name = "greptime";
let flow_value =
test_flow_info_value(flow_id, "flow", [(0, 1u64)].into(), vec![1024, 1025, 1026]);
let flow_value = test_flow_info_value("flow", [(0, 1u64)].into(), vec![1024, 1025, 1026]);
flow_metadata_manager
.create_flow_metadata(flow_id, flow_value.clone())
.await

View File

@@ -141,6 +141,26 @@ impl FlowInfoValue {
pub fn source_table_ids(&self) -> &[TableId] {
&self.source_table_ids
}
pub fn flow_name(&self) -> &String {
&self.flow_name
}
pub fn sink_table_name(&self) -> &TableName {
&self.sink_table_name
}
pub fn raw_sql(&self) -> &String {
&self.raw_sql
}
pub fn expire_after(&self) -> Option<i64> {
self.expire_after
}
pub fn comment(&self) -> &String {
&self.comment
}
}
pub type FlowInfoManagerRef = Arc<FlowInfoManager>;

View File

@@ -12,9 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use api::v1::flow::flow_server::Flow;
use lazy_static::lazy_static;
use regex::Regex;
use serde::{Deserialize, Serialize};
@@ -23,9 +20,7 @@ use snafu::OptionExt;
use crate::error::{self, Result};
use crate::key::flow::FlowScoped;
use crate::key::txn_helper::TxnOpGetResponseSet;
use crate::key::{
txn_helper, DeserializedValueWithBytes, FlowId, MetaKey, TableMetaValue, NAME_PATTERN,
};
use crate::key::{DeserializedValueWithBytes, FlowId, MetaKey, TableMetaValue, NAME_PATTERN};
use crate::kv_backend::txn::Txn;
use crate::kv_backend::KvBackendRef;
@@ -50,11 +45,13 @@ impl<'a> FlowNameKey<'a> {
FlowNameKey(FlowScoped::new(inner))
}
#[cfg(test)]
/// Returns the catalog.
pub fn catalog(&self) -> &str {
self.0.catalog_name
}
#[cfg(test)]
/// Return the `flow_name`
pub fn flow_name(&self) -> &str {
self.0.flow_name
@@ -143,8 +140,6 @@ impl FlowNameValue {
}
}
pub type FlowNameManagerRef = Arc<FlowNameManager>;
/// The manager of [FlowNameKey].
pub struct FlowNameManager {
kv_backend: KvBackendRef,

View File

@@ -79,6 +79,7 @@ impl FlownodeFlowKey {
self.0.flow_id
}
#[cfg(test)]
/// Returns the [FlownodeId].
pub fn flownode_id(&self) -> FlownodeId {
self.0.flownode_id
@@ -150,8 +151,6 @@ impl<'a> MetaKey<'a, FlownodeFlowKeyInner> for FlownodeFlowKeyInner {
}
}
pub type FlownodeFlowManagerRef = Arc<FlownodeFlowManager>;
/// The manager of [FlownodeFlowKey].
pub struct FlownodeFlowManager {
kv_backend: KvBackendRef,

View File

@@ -57,6 +57,17 @@ pub struct SchemaNameValue {
pub ttl: Option<Duration>,
}
impl Display for SchemaNameValue {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(ttl) = self.ttl {
let ttl = humantime::format_duration(ttl);
write!(f, "ttl='{ttl}'")?;
}
Ok(())
}
}
impl TryFrom<&HashMap<String, String>> for SchemaNameValue {
type Error = Error;
@@ -233,6 +244,17 @@ mod tests {
use super::*;
use crate::kv_backend::memory::MemoryKvBackend;
#[test]
fn test_display_schema_value() {
let schema_value = SchemaNameValue { ttl: None };
assert_eq!("", schema_value.to_string());
let schema_value = SchemaNameValue {
ttl: Some(Duration::from_secs(9)),
};
assert_eq!("ttl='9s'", schema_value.to_string());
}
#[test]
fn test_serialization() {
let key = SchemaNameKey::new("my-catalog", "my-schema");

View File

@@ -16,8 +16,9 @@ use std::sync::Arc;
use api::region::RegionResponse;
use api::v1::flow::{FlowRequest, FlowResponse};
use api::v1::region::{InsertRequests, QueryRequest, RegionRequest};
use api::v1::region::{InsertRequests, RegionRequest};
pub use common_base::AffectedRows;
use common_query::request::QueryRequest;
use common_recordbatch::SendableRecordBatchStream;
use crate::error::Result;

View File

@@ -13,10 +13,14 @@
// limitations under the License.
use std::fmt::{Display, Formatter};
use std::sync::Arc;
use api::v1::meta::Peer as PbPeer;
use serde::{Deserialize, Serialize};
use crate::error::Error;
use crate::{ClusterId, DatanodeId, FlownodeId};
#[derive(Debug, Default, Clone, Hash, Eq, PartialEq, Deserialize, Serialize)]
pub struct Peer {
/// Node identifier. Unique in a cluster.
@@ -64,3 +68,50 @@ impl Display for Peer {
write!(f, "peer-{}({})", self.id, self.addr)
}
}
/// can query peer given a node id
#[async_trait::async_trait]
pub trait PeerLookupService {
async fn datanode(&self, cluster_id: ClusterId, id: DatanodeId) -> Result<Option<Peer>, Error>;
async fn flownode(&self, cluster_id: ClusterId, id: FlownodeId) -> Result<Option<Peer>, Error>;
}
pub type PeerLookupServiceRef = Arc<dyn PeerLookupService + Send + Sync>;
/// always return `Peer::new(0, "")` for any query
pub struct StandalonePeerLookupService {
default_peer: Peer,
}
impl StandalonePeerLookupService {
pub fn new() -> Self {
Self {
default_peer: Peer::new(0, ""),
}
}
}
impl Default for StandalonePeerLookupService {
fn default() -> Self {
Self::new()
}
}
#[async_trait::async_trait]
impl PeerLookupService for StandalonePeerLookupService {
async fn datanode(
&self,
_cluster_id: ClusterId,
_id: DatanodeId,
) -> Result<Option<Peer>, Error> {
Ok(Some(self.default_peer.clone()))
}
async fn flownode(
&self,
_cluster_id: ClusterId,
_id: FlownodeId,
) -> Result<Option<Peer>, Error> {
Ok(Some(self.default_peer.clone()))
}
}

View File

@@ -16,8 +16,9 @@ use std::sync::Arc;
use api::region::RegionResponse;
use api::v1::flow::{FlowRequest, FlowResponse};
use api::v1::region::{InsertRequests, QueryRequest, RegionRequest};
use api::v1::region::{InsertRequests, RegionRequest};
pub use common_base::AffectedRows;
use common_query::request::QueryRequest;
use common_recordbatch::SendableRecordBatchStream;
use crate::cache_invalidator::DummyCacheInvalidator;
@@ -32,10 +33,11 @@ use crate::kv_backend::KvBackendRef;
use crate::node_manager::{
Datanode, DatanodeRef, Flownode, FlownodeRef, NodeManager, NodeManagerRef,
};
use crate::peer::Peer;
use crate::peer::{Peer, PeerLookupService, StandalonePeerLookupService};
use crate::region_keeper::MemoryRegionKeeper;
use crate::sequence::SequenceBuilder;
use crate::wal_options_allocator::WalOptionsAllocator;
use crate::{ClusterId, DatanodeId, FlownodeId};
#[async_trait::async_trait]
pub trait MockDatanodeHandler: Sync + Send + Clone {
@@ -179,5 +181,19 @@ pub fn new_ddl_context_with_kv_backend(
table_metadata_manager,
flow_metadata_allocator,
flow_metadata_manager,
peer_lookup_service: Arc::new(StandalonePeerLookupService::new()),
}
}
pub struct NoopPeerLookupService;
#[async_trait::async_trait]
impl PeerLookupService for NoopPeerLookupService {
async fn datanode(&self, _cluster_id: ClusterId, id: DatanodeId) -> Result<Option<Peer>> {
Ok(Some(Peer::empty(id)))
}
async fn flownode(&self, _cluster_id: ClusterId, id: FlownodeId) -> Result<Option<Peer>> {
Ok(Some(Peer::empty(id)))
}
}

View File

@@ -87,6 +87,11 @@ impl WalOptionsAllocator {
}
}
}
/// Returns true if it's the remote WAL.
pub fn is_remote_wal(&self) -> bool {
matches!(&self, WalOptionsAllocator::Kafka(_))
}
}
/// Allocates a wal options for each region. The allocated wal options is encoded immediately.

View File

@@ -179,7 +179,7 @@ impl StateStore for ObjectStateStore {
))
})
.context(ListStateSnafu { path: key })?;
yield (key.into(), value);
yield (key.into(), value.to_vec());
}
}
});

View File

@@ -27,6 +27,7 @@ snafu.workspace = true
sqlparser.workspace = true
sqlparser_derive = "0.1"
statrs = "0.16"
store-api.workspace = true
[dev-dependencies]
common-base.workspace = true

View File

@@ -17,6 +17,7 @@ pub mod error;
mod function;
pub mod logical_plan;
pub mod prelude;
pub mod request;
mod signature;
#[cfg(any(test, feature = "testing"))]
pub mod test_util;

View File

@@ -22,7 +22,7 @@ use std::sync::Arc;
use datafusion::arrow::datatypes::Field;
use datafusion_common::Result;
use datafusion_expr::function::AccumulatorArgs;
use datafusion_expr::function::{AccumulatorArgs, StateFieldsArgs};
use datafusion_expr::{
Accumulator, AccumulatorFactoryFunction, AggregateUDF as DfAggregateUdf, AggregateUDFImpl,
};
@@ -129,13 +129,13 @@ impl AggregateUDFImpl for DfUdafAdapter {
(self.accumulator)(acc_args)
}
fn state_fields(&self, name: &str, _: ArrowDataType, _: Vec<Field>) -> Result<Vec<Field>> {
fn state_fields(&self, args: StateFieldsArgs) -> Result<Vec<Field>> {
let state_types = self.creator.state_types()?;
let fields = state_types
.into_iter()
.enumerate()
.map(|(i, t)| {
let name = format!("{name}_{i}");
let name = format!("{}_{i}", args.name);
Field::new(name, t.as_arrow_type(), true)
})
.collect::<Vec<_>>();

View File

@@ -108,6 +108,10 @@ impl ScalarUDFImpl for DfUdfAdapter {
fn invoke(&self, args: &[DfColumnarValue]) -> datafusion_common::Result<DfColumnarValue> {
(self.fun)(args)
}
fn invoke_no_args(&self, number_rows: usize) -> datafusion_common::Result<DfColumnarValue> {
Ok((self.fun)(&[])?.into_array(number_rows)?.into())
}
}
impl From<ScalarUdf> for DfScalarUDF {

Some files were not shown because too many files have changed in this diff Show More