Compare commits

...

234 Commits

Author SHA1 Message Date
zyy17
ddc7a80f56 fix: add serialize_ignore_column_ids() to fix deserialize region options failed from json string (#4229)
* fix: add serialize_ignore_column_ids() to fix deserialize region options failed from json string

* refactor: return empty vector if column_id is empty
2024-06-30 09:59:14 +00:00
Ruihang Xia
a7aa556763 feat: output multiple partition in MergeScanExec (#4227)
* feat: output multiple partition in MergeScanExec

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix range manipulate

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-28 13:45:22 +00:00
Lei, HUANG
ef935a1de6 feat!: reduce sorted runs during compaction (#3702)
* feat: add functions to find and merge sorted runs

* chore: refactor code

* chore: remove some duplicates

* chore: remove one clone

* refactor: change max_active_window_files to max_active_window_runs

* feat: integrate with sorted runs

* fix: unit tests

* feat: limit num of sorted runs during compaction

* fix: some test

* fix: some cr comments

* feat: use smallvec

* chore: rebase main

* feat/reduce-sorted-runs:
 Refactor compaction logic and update test configurations

 - Refactored `merge_all_runs` function to use `sort_ranged_items` for sorting.
 - Improved item merging logic by iterating with `into_iter` and handling overlaps.
 - Updated test configurations to use `max_active_window_runs` instead of `max_active_window_files` for consistency.

---------

Co-authored-by: tison <wander4096@gmail.com>
2024-06-28 08:17:30 +00:00
Weny Xu
352cc9ddde test: add e2e test for region failover (#4188)
* test: add e2e test for region failover

* chore: add ci cfg

* chore: reduce parallelism to 8

* fix(ci): enable region failure

* chore: set sqlx LogLevel to Off

* refactor: move help functions to utils
2024-06-28 06:49:41 +00:00
discord9
b6585e3581 refactor(flow): make from_substrait_* async& worker handle refactor (#4210)
* refactor: use oneshot to receive result

* refactor: make from_substrait_* async

* refacrot: remove serde for plan&expr
2024-06-27 17:17:46 +00:00
Yingwen
10b7a3d24d feat: Implements merge_mode region options (#4208)
* feat: add update_mode to region options

* test: add test

* feat: last not null iter

* feat: time series last not null

* feat: partition tree update mode

* feat: partition tree

* fix: last not null iter slice

* test: add test for compaction

* test: use second resolution

* style: fix clippy

* chore: merge two lines

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>

* chore: address CR comments

* refactor: UpdateMode -> MergeMode

* refactor: LastNotNull -> LastNonNull

* chore: return None earlier

* feat: validate region options

make merge mode optional and use default while it is None

* test: fix tests

---------

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>
2024-06-27 07:52:58 +00:00
Eugene Tolbakov
8702066967 feat(sql): add casting support for shortened intervals (#4220)
* feat(sql): add casting support for shortened intervals

* chore(sql): apply CR suggestion, minor renamings
2024-06-26 22:07:09 +00:00
Jeremyhi
df0fff2f2c feat(servers): make http timeout and body limit optional (#4217)
* feat(servers): make http timeout and body limit optional

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add comment

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore: make config-docs

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-26 06:14:14 +00:00
dennis zhuang
a779cb36ec fix: wrong frontend registration address (#4199)
* fix: frontend registration address is wrong, #4186

* fix: license header

* chore: adds hostname to frontend grpc

* fix: forgot run make config-docs

* chore: warn when using bind_addr

* fix: flow node heartbeat carrying address
2024-06-26 06:13:07 +00:00
zyy17
948c8695d0 refactor: add SerializedPickerOutput and field modification of CompactorRequest (#4198)
* refactor: remove compaction_options and use RegionOptions type for region_options

* refactor: add file_purger field in CompactionRegion

* refactor: add SerializedPickerOutput

* refactor: rename CompactorRequest to OpenCompactionRegionRequest and remove PickerOutput

* refactor: use &PickerOutput instead of clone()
2024-06-25 13:04:07 +00:00
Ruihang Xia
4d4a6cd265 feat: validate partition rule on create table (#4213)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-25 12:55:01 +00:00
Zhenchi
5dde148b3d feat(puffin): implement CachedPuffinReader (#4209)
* feat(puffin): implement CachedPuffinReader

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: next PR to introduce CachedPuffinManager

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: rename

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-06-25 12:27:06 +00:00
Weny Xu
8cbe7166b0 refactor: migrate region failover implementation to region migration (#4172)
* refactor: migrate region failover implementation to region migration

* fix: use HEARTBEAT_INTERVAL_MILLIS as lease secs

* fix: return false if leader is downgraded

* fix: only remove failure detector after submitting procedure successfully

* feat: ignore dropped region

* refactor: retrieve table routes in batches

* refactor: disable region failover on local WAL implementation

* fix: move the guard into procedure

* feat: use real peer addr

* feat: use interval instead of sleep

* chore: rename `HeartbeatSender` to `HeartbeatAcceptor`

* chore: apply suggestions from CR

* chore: reduce duplicate code

* chore: apply suggestions from CR

* feat: lookup peer addr

* chore: add comments

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2024-06-25 11:58:17 +00:00
Yingwen
f5ac158605 docs: remove outdated docs (#4205)
* docs: remove outdated docs

* ci: align ci

* chore: Revert "ci: align ci"

This reverts commit 2c3c0eed7e.

* ci: fix docs ci
2024-06-25 09:46:30 +00:00
Lei, HUANG
120447779c feat: bulk memtable codec (#4163)
* feat: introduce bulk memtable encoder/decoder

* chore: rebase main

* chore: resolve some comments

* refactor: only carries time unit in ArraysSorter

* fix: some comments
2024-06-25 09:02:20 +00:00
discord9
82f6373574 feat: FlownodeClient (#4206)
* feat: FlownodeClient

* chore: remove wrong doc

* fix: debug impl for NodeClients

* chore: rename `FlownodeClient` to `FlowRequester`
2024-06-25 08:40:24 +00:00
Zhenchi
1e815dddf1 feat(puffin): implement CachedPuffinWriter (#4203)
* feat(puffin): support lz4 compression for footer

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat(puffin): introduce puffin manager trait

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: polish

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat(puffin): implement CachedPuffinWriter

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: polish

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-06-25 08:00:48 +00:00
Weny Xu
b2f61aa1cf fix: format error correctly (#4204)
* chore: remove TODO comments

* fix: format error correctly
2024-06-25 07:56:13 +00:00
Ruihang Xia
a1e2612bbf fix: align workflows again for the troublesome GHA (#4196)
* fix: align workflows again for the troublesome GHA

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* unify

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-25 06:52:01 +00:00
Yingwen
9aaf7d79bf feat: Dedup strategy that keeps the last not null field (#4184)
* feat: dedup strategy: last not null

* fix: fix tests

* fix: fix single batch

* chore: warning

* chore: skip has_null check

* refactor: rename fields

* fix: merge last fields may not reset builder

* chore: clear before filter deleted

* chore: remove debug logs

* chore: Update comment

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-25 06:38:48 +00:00
Yingwen
4a4237115a test: wait until checkpoint finish (#4202) 2024-06-25 06:21:19 +00:00
Eugene Tolbakov
840f81e0fd fix(sql): improve compound signed number processing (#4200) 2024-06-25 04:01:46 +00:00
Eugene Tolbakov
cdd4baf183 feat(sql): improve interval expression, support shortened version (#4182)
* feat(sql): improve interval expression, support shortened version

* fix(sql): remove accidental change of sqlness assertion

* fix(sql): address CR feedback, add more tests

* chore(sql): add more tests
2024-06-24 20:29:34 +00:00
Zhenchi
4b42c7b840 feat(puffin): introduce puffin manager trait (#4195)
* feat(puffin): support lz4 compression for footer

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat(puffin): introduce puffin manager trait

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: polish

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-06-24 16:02:52 +00:00
discord9
a44fe627ce feat: heartbeat task&peer lookup in proc (#4179)
* feat: herat beat task

* feat: use real flow peer allocator when building

* feat: add peer look up in ddl context

* fix: drop flow test

* refactor: per review(WIP)

* refactor: not check if is alive

* refactor: per review

* refactor: remove useless `reset`

* refactor: per bot advices

* refactor: alive peer

* chore: bot review
2024-06-24 15:06:33 +00:00
taobo
77904adaaf fix: region_peers returns same region_id for multi logical tables (#4190)
* fix: `region_peers` returns same region_id for multi logical tables

* test: add sqlness test for information_schema.region_peers

* refactor: region_peers sqlness
2024-06-24 14:12:36 +00:00
Zhenchi
07cbabab7b feat(puffin): support lz4 compression for footer (#4194)
* feat(puffin): support lz4 compression for footer

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-06-24 11:39:03 +00:00
zyy17
ea7c17089f refactor: add region_dir in CompactionRegion (#4187) 2024-06-24 08:25:52 +00:00
discord9
517917453d fix(flow): fix call df func bug&sqlness test (#4165)
* tests: flow sqlness tests

* tests: WIP df func test

* fix: use schema before expand for transform expr

* tests: some basic flow tests

* tests: unit test

* chore: dep use rev not patch

* fix: wired sqlness error?

* refactor: per review

* fix: temp sqlness bug

* fix: use fixed sqlness

* fix: impl drop as async shutdown

* refactor: per bot's review

* tests: drop worker handler both sync/async

* docs: add rationale for test

* refactor: per review

* chore: fmt
2024-06-24 07:52:45 +00:00
zyy17
0139a70549 refactor: make RegionOptions and MergeOutput serializable (#4180)
* chore: make RegionOptions serializable and add region_dir in CompactionRegion

* refactor: make `PickerOutput` and `MergeOutput` serializable and deserializable

* refactor: remove Serialize and Deserialize from PickerOutput

* chore: revert changes for file.rs

* chore: revert changes for compactor.rs and compaction.rs

---------

Co-authored-by: tison <wander4096@gmail.com>
2024-06-24 07:37:53 +00:00
tison
5566dd72f2 chore: highlight our committers in CONTRIBUTING.md (#4189)
* chore: highlight our committers in CONTRIBUTING.md

Signed-off-by: tison <wander4096@gmail.com>

* chore: appy suggestion

Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>

* chore: apply suggestion

Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>

* Trigger CI

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
2024-06-22 08:23:30 +00:00
ZonaHe
dea33a7aaf feat: update dashboard to v0.5.2 (#4185)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2024-06-21 14:32:46 +00:00
Weny Xu
15ad9f2f6f fix: region logical regions after catching up (#4176)
* fix: region logical regions after catching up

* test: add metric table migration test

* chore: apply suggestions from CR
2024-06-21 10:30:18 +00:00
Ruihang Xia
fce65c97e3 feat: make RegionScanner aware of PartitionRange (#4170)
* define PartitionRange

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add optimizer rule

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* implement interfaces

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl aggr stream

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add fallback method

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add document and rename struct

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add more comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-21 09:54:22 +00:00
Ruihang Xia
ac574b66ab feat: add num_rows and num_row_groups to manifest (#4183)
* featadd num_rows and num_row_groups to manifest

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add document

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-21 07:15:13 +00:00
Weny Xu
1e52ba325f feat: introduce chaos crds (#4173)
feat: add chaos-mesh crds
2024-06-21 06:28:51 +00:00
Yohan Wal
b739c9fd10 feat: PREPARE and EXECUTE statement from mysql client (#4125)
* feat: prepare stmt in mysql client

* feat: execute stmt in mysql client

* fix: handle parameters properly

* refactor: use existing funcs to convert expr to scalar value

* refactor: use uuid strings as stmt_key for queries from COM_PREPARE packet

* refactor: take prepare and execute parser as submodule

* test: add unit test for converting expr to scalar value

* feat: deallocate stmt in mysql client

* chore: comments and duplicates

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2024-06-21 02:02:57 +00:00
Ruihang Xia
21c89f3247 perf: optimize RecordBatch to HttpOutput conversion (#4178)
* add benchmark

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* save 70ms

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add profiler

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* save 50ms

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* save 160ms

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* format toml file

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix license header

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix windows build

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-20 12:33:58 +00:00
Jeremyhi
5bcd7a14bb feat: use the write runtime to handle the heartbeats (#4177) 2024-06-20 08:45:07 +00:00
dennis zhuang
4306cba866 feat: show database options (#4174)
* test: test create table with database ttl

* feat: show database options

* fix: comment

* chore: apply suggestion

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>

* chore: fix CR comments and refactor

* chore: style

Co-authored-by: Weny Xu <wenymedia@gmail.com>

---------

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>
Co-authored-by: Weny Xu <wenymedia@gmail.com>
2024-06-20 04:21:58 +00:00
Jeremyhi
4c3d4af127 feat: register flow node (#4166)
* feat: rename keys.rs to key.rs

* feat: refactor datanode keys

* feat: add flownode key

* feat: keep flownode's lease info in metasrv

* feat: flow selector

* feat: impl_try_from_lease_key and impl_from_str_lease_key to simple code
2024-06-20 03:46:19 +00:00
localhost
48a0f39b19 chore: enhance add pipeline http api return data (#4167)
* chore: enhance add pipeline http api return data

* chore: replaceing hard code header value
2024-06-20 02:19:31 +00:00
ZonaHe
8abebad458 feat: update dashboard to v0.5.1 (#4171)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2024-06-20 01:46:54 +00:00
LFC
cc2f7efb98 chore: bump datafusion version to fix last_value regression (#4169)
* chore: bump datafusion version to fix `last_value` regression

* fix: resolve PR comments

* fix ci
2024-06-19 07:47:17 +00:00
taobo
22d12683b4 refactor!: unify FrontendOptions and DatanodeOptions by using GrpcOptions (#4088)
* refactor: move GrpcOptions to servers/grpc

* fix: optimize code

* fix: docs

* refactor: move DatanodeOptions.rpc_hostname to grpc.hostname

* fix: merge main

* refactor code impl

test: add test_depreacted_cli_options unit test

* Update src/servers/src/grpc.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2024-06-18 22:45:38 +00:00
Yingwen
fe74efdafe feat: Implement memtable range (#4162)
* refactor: RangeBase

* feat: memtable range

* feat: scanner use mem range

* feat: remove base from mem range context

* feat: impl ranges for memtables

* chore: fix warnings

* refactor: make predicate cheap to clone

* refactor: MemRange -> MemtableRange

* feat: pub empty memtable to fix warnings

* test: fix sqlness result
2024-06-18 22:25:19 +00:00
discord9
cd9705ccd7 feat(flow): support datafusion scalar function (#4142)
* chore: call df function types

* feat: RelationDesc to DfSchema

* refactor: use RelationDesc instead of Type

* chore: WIP get to phy expr

* feat: custom deserialize

* chore: fmt

* refactor: renaming to DfScalarFunction

* feat: eval df func(untested)

* fix: had to spawn a thread for calling async

* chore: per review advices

* tests: test df scalar function
2024-06-18 12:34:38 +00:00
Weny Xu
ea2d067cf1 feat: implement the OrderedBatchProducer (#4134)
* feat: implement the `OrderedBatchProducer`

* test: add test of cancel safety

* chore: apply suggestions from CR

* chore: apply suggestions from CR

* refactor: simplify the `BackgroundProducerWorker`

* feat: implement the OrderedBatchProducer v2

* refactor: switch to `OrderedBatchProducer`

* chore: rename to `MAX_FLUSH_QUEUE_SIZE`

* refactor: switch to `OrderedBatchProducerV2`

* refactor: remove `OrderedBatchProducerV1`

* test: add tests

* refactor: make config configurable

* refactor: minor refactor

* chore: remove unused code

* chore: remove `benchmarks` crate

* chore: update config doc

* chore: remove unused comment

* refactor: refactor client registry

* refactor: rename `max_batch_size` to `max_batch_bytes`

* chore: use constant value

* chore: ensure serialized meta < ESTIMATED_META_SIZE

* chore: apply suggestions from CR

* chore: remove the `CHANNEL_SIZE`

* chore: apply suggestions from CR

* fix: ensure serialized meta < ESTIMATED_META_SIZE

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2024-06-18 07:20:01 +00:00
Ning Sun
70d113a355 feat: update default size of bgworkers, add hbworkers (#4129)
* feat: update default size of bgworkers, add hbworkers

* feat: update frontend heartbeat as well

* chore: update sample config files and default settings

* chore: update config docs

* Revert "chore: update config docs"

This reverts commit 8107f4c120.

* Revert "chore: update sample config files and default settings"

This reverts commit f5ae701c8d.

* feat: use default heartbeat runtime size

* chore: update config docs
2024-06-18 06:18:37 +00:00
shuiyisong
cb657ae51e feat(pipeline): join processor (#4158)
* feat: add join processor

* test: add join simple test

* chore: fix header

* chore: update commit

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* test: add more join test

* chore: fix lint

* chore: update comment

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2024-06-18 05:00:34 +00:00
Jeremyhi
141d017576 feat: enable metasrv to accept flownode's heartbeats (#4160)
* feat: make metasrv can accept flownode's heartbeat

* chore: proto
2024-06-18 04:07:46 +00:00
yuanbohan
0fc18b6865 feat(pipeline): gsub prosessor (#4121)
* chore: add log http ingester scaffold

* chore: add some example code

* chore: add log inserter

* chore: add log handler file

* chore: add pipeline lib

* chore: import log handler

* chore: add pipelime http handler

* chore: add pipeline private table

* chore: add pipeline API

* chore: improve error handling

* chore: merge main

* chore: add multi content type support for log handler

* refactor: remove servers dep on pipeline

* refactor: move define_into_tonic_status to common-error

* refactor: bring in pipeline 3eb890c551b8d7f60c4491fcfec18966e2b210a4

* chore: fix typo

* refactor: bring in pipeline a95c9767d7056ab01dd8ca5fa1214456c6ffc72c

* chore: fix typo and license header

* refactor: move http event handler to a separate file

* chore: add test for pipeline

* chore: fmt

* refactor: bring in pipeline 7d2402701877901871dd1294a65ac937605a6a93

* refactor: move `pipeline_operator` to `pipeline` crate

* chore: minor update

* refactor: bring in pipeline 1711f4d46687bada72426d88cda417899e0ae3a4

* chore: add log

* chore: add log

* chore: remove open hook

* chore: minor update

* chore: fix fmt

* chore: minor update

* chore: rename desc for pipeline table

* refactor: remove updated_at in pipelines

* chore: add more content type support for log inserter api

* chore: introduce pipeline crate

* chore: update upload pipeline api

* chore: fix by pr commit

* chore: add some doc for pub fn/struct

* chore: some minro fix

* chore: add pipeline version support

* chore: impl log pipeline version

* gsub prosessor

* chore: add test

* chore: update commit

Co-authored-by: dennis zhuang <killme2008@gmail.com>

---------

Co-authored-by: paomian <xpaomian@gmail.com>
Co-authored-by: shuiyisong <xixing.sys@gmail.com>
Co-authored-by: shuiyisong <113876041+shuiyisong@users.noreply.github.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
2024-06-17 07:57:47 +00:00
yuanbohan
0aceebf0a3 feat(pipeline): transform support on_failure (#4123)
* chore: add log http ingester scaffold

* chore: add some example code

* chore: add log inserter

* chore: add log handler file

* chore: add pipeline lib

* chore: import log handler

* chore: add pipelime http handler

* chore: add pipeline private table

* chore: add pipeline API

* chore: improve error handling

* chore: merge main

* chore: add multi content type support for log handler

* refactor: remove servers dep on pipeline

* refactor: move define_into_tonic_status to common-error

* refactor: bring in pipeline 3eb890c551b8d7f60c4491fcfec18966e2b210a4

* chore: fix typo

* refactor: bring in pipeline a95c9767d7056ab01dd8ca5fa1214456c6ffc72c

* chore: fix typo and license header

* refactor: move http event handler to a separate file

* chore: add test for pipeline

* chore: fmt

* refactor: bring in pipeline 7d2402701877901871dd1294a65ac937605a6a93

* refactor: move `pipeline_operator` to `pipeline` crate

* chore: minor update

* refactor: bring in pipeline 1711f4d46687bada72426d88cda417899e0ae3a4

* chore: add log

* chore: add log

* chore: remove open hook

* chore: minor update

* chore: fix fmt

* chore: minor update

* chore: rename desc for pipeline table

* refactor: remove updated_at in pipelines

* chore: add more content type support for log inserter api

* chore: introduce pipeline crate

* chore: update upload pipeline api

* chore: fix by pr commit

* chore: add some doc for pub fn/struct

* chore: some minro fix

* chore: add pipeline version support

* chore: impl log pipeline version

* transform on_failure

* chore: add test

* chore: move test to a separate file

* chore: add comment

---------

Co-authored-by: paomian <xpaomian@gmail.com>
Co-authored-by: shuiyisong <xixing.sys@gmail.com>
2024-06-17 06:56:31 +00:00
Yingwen
558272de61 refactor: Decouple dedup and merge (#4139)
* feat: remove dedup/filter deleted from merge reader

* feat: impl dedup reader

* feat: support filter deleted flag

* test: test dedup reader

* feat: remove put_only field

* chore: fix clippy

* feat: metrics

* test: test empty batch

* perf: optimize dedup strategy

Avoid iterating all timestamps.

* test: fix test

* feat: generic
2024-06-17 04:09:50 +00:00
LFC
f4a5a44549 refactor: make region manifest checkpoint ran in background (#4133)
* refactor: use in-database manifest as checkpoint instead of merging incremental files in object store

* refactor: make region manifest checkpoint ran in background

* reduce unnecessary metrics

* Update src/mito2/src/manifest/checkpointer.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* resolve PR comments

* resolve PR comments

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2024-06-17 03:47:18 +00:00
zyy17
5390603855 refactor: add Compactor trait to abstract the compaction (#4097)
* refactor: add Compactor trait

* chore: add compact() in Compactor trait and expose compaction module

* refactor: add CompactionRequest and open_compaction_region

* refactor: export the compaction api

* refactor: add DefaultCompactor::new_from_request

* refactor: no need to pass mito_config in open_compaction_region()

* refactor: CompactionRequest -> &CompactionRequest

* fix: typo

* docs: add docs for public apis

* refactor: remove 'Picker' from Compactor

* chore: add logs

* chore: change pub attribute for Picker

* refactor: remove do_merge_ssts()

* refactor: update comments

* refactor: use CompactionRegion argument in Picker

* chore: make compaction module public and remove unnessary clone

* refactor: move build_compaction_task() in CompactionScheduler{}

* chore: use  in open_compaction_region() and add some comments for public structure

* refactor: add 'manifest_dir()' in store-api

* refactor: move the default implementation to DefaultCompactor

* refactor: remove Options from MergeOutput

* chore: minor modification

* fix: clippy errors

* fix: unit test errors

* refactor: remove 'manifest_dir()' from store-api crate(already have one in opener)

* refactor: use 'region_dir' in CompactionRequest

* refactor: refine naming

* refactor: refine naming

* refactor: remove clone()

* chore: add comments

* refactor: add PickerOutput field in CompactorRequest
2024-06-17 03:03:47 +00:00
Ruihang Xia
a2e3532a57 docs: add guide for tsbs benchmark (#4151)
* docs: add guide for tsbs benchmark

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-16 16:02:15 +08:00
Ruihang Xia
2faa6d6c97 ci: align docs with develop (#4152)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-15 16:33:57 +00:00
Weny Xu
d6392acd65 fix(sqlness): catch different format timestamp (#4149) 2024-06-15 11:13:01 +00:00
localhost
01e3a24cf7 feat: log ingestion support (#4014)
* chore: add log http ingester scaffold

* chore: add some example code

* chore: add log inserter

* chore: add log handler file

* chore: add pipeline lib

* chore: import log handler

* chore: add pipelime http handler

* chore: add pipeline private table

* chore: add pipeline API

* chore: improve error handling

* chore: merge main

* chore: add multi content type support for log handler

* refactor: remove servers dep on pipeline

* refactor: move define_into_tonic_status to common-error

* refactor: bring in pipeline 3eb890c551b8d7f60c4491fcfec18966e2b210a4

* chore: fix typo

* refactor: bring in pipeline a95c9767d7056ab01dd8ca5fa1214456c6ffc72c

* chore: fix typo and license header

* refactor: move http event handler to a separate file

* chore: add test for pipeline

* chore: fmt

* refactor: bring in pipeline 7d2402701877901871dd1294a65ac937605a6a93

* refactor: move `pipeline_operator` to `pipeline` crate

* chore: minor update

* refactor: bring in pipeline 1711f4d46687bada72426d88cda417899e0ae3a4

* chore: add log

* chore: add log

* chore: remove open hook

* chore: minor update

* chore: fix fmt

* chore: minor update

* chore: rename desc for pipeline table

* refactor: remove updated_at in pipelines

* chore: add more content type support for log inserter api

* chore: introduce pipeline crate

* chore: update upload pipeline api

* chore: fix by pr commit

* chore: add some doc for pub fn/struct

* chore: some minro fix

* chore: add pipeline version support

* chore: impl log pipeline version

* chore: fix format issue

* fix: make the LogicalPlan of a query pipeline sorted in desc order

* chore: remove some debug log

* chore: replacing hashmap cache with moak

* chore: fix by pr commit

* chore: fix toml format issue

* chore: update Cargo.lock

* chore: fix by pr commit

* chore: fix some issue by pr commit

* chore: add more doc for pipeline version

---------

Co-authored-by: shuiyisong <xixing.sys@gmail.com>
2024-06-14 17:03:30 +00:00
Weny Xu
bf3ad44584 fix: fix release CI typo (#4147)
* fix: ci typo

* chore: use aws registry
2024-06-14 14:29:34 +00:00
Weny Xu
11a903f193 chore(ci): switch to aws registry (#4145)
chore: switch to aws registry
2024-06-14 11:46:57 +00:00
Weny Xu
acdfaabfa5 fix(ci): use ld_classic on macOS (#4143)
fix: use ld_classic on macos
2024-06-14 08:09:12 +00:00
Weny Xu
54ca06ba08 chore: bump version to v0.8.2 (#4141) 2024-06-14 03:39:08 +00:00
Weny Xu
1f315e300f fix: retry on unknown error (#4138) 2024-06-13 11:15:38 +00:00
Weny Xu
573e25a40f chore: run fuzz tests with disk cache (#4118)
* chore: run fuzz tests with disk cache

* fix: print error messages correctly
2024-06-13 09:07:12 +00:00
Lei, HUANG
f8ec46493f refactor: simplify parquet writer (#4112)
* refactor: simplify parquet writer

* chore: fix clippy

* refactor: use AsyncArrowWriter instead of BufferedWriter

* refactor: remove BufferedWriter

* fix: add chunk parameter to avoid entity too small issue

* refactor: use AtomicUsize instead of Mutex

* fix: add chunk argument to stream_to_parquet

* chore: fmt

* wip: fail check

* fix: check

* fmt

* refactor: use impl Future instead of async_trait

* fmt

* refactor: use associate types
2024-06-13 07:32:47 +00:00
Weny Xu
14a2d83594 chore: remove unused code (#4135)
* chore: remove unused code

* Update src/mito2/src/wal/entry_reader.rs

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-12 13:18:33 +00:00
Yingwen
65f8b72d34 feat: Implement RegionScanner for SeqScan (#4060)
* feat: ordered builder wip

* feat: impl RegionScanner for SeqScan

* feat: implement scan_partition and build_stream

* chore: return SeqScan as RegionScanner

* fix: group parts

* feat: split parts

* chore: reader metrics

* chore: metrics

* chore: remove unused codes

* chore: support holding a group of ranges in ScanPart

* feat: group ScanParts to ScanParts

* feat: impl SeqScanner again

* chore: observe build cost in ScannerMetrics

* chore: fix compiler warnings

* style: fix clippy

* docs: update config docs

* chore: forward DisplayAs to scanner

* test: update sqlness tests

* chore: update debug fmt

* chore: custom debug for timestamp

fix test compiling issue with common-macro when running
cargo nextest -p common-time

* chore: update debug format

* feat: update fmt for scan part

* chore: fix warning

* fix: sanitize parallelism

* feat: split parts

* test: fix config api test

* feat: update logs

* chore: Revert "chore: remove unused codes"

This reverts commit b548b30a01eeded59b1a0a8d89f9293ca63afc41.

* chore: Revert "docs: update config docs"

This reverts commit a7997e78d6ddcf635560574de8c1948c495bdd12.

* feat: each partition scan files in parallel

* test: fix config api test

* docs: fix typo

* chore: address comments, simplify tests

* feat: global semaphore

* feat: always spawn task

* chore: simplify default explain output format

* handle output partiton number is 0

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-12 08:21:30 +00:00
LFC
9473daab8b fix: explicitly set config instead of using changable default in tests (#4132)
* fix: explicitly set config instead of using changable default in tests

* fix: resolve PR comments
2024-06-11 10:51:01 +00:00
LFC
5a6021e34f refactor: remove substrait ser/de for region query in standalone (#3812)
* refactor: remove substrait serde for region query in standalone

* fix ci

* move QueryRequest to common-query

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* format code

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* format toml file

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore: format toml

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2024-06-11 09:33:47 +00:00
discord9
1b00526de5 fix(flow): infer table schema correctly (#4113)
* refactor: make individual col name optional

* chore: rename TypedPlan's `typ` to `schema`

* feat: add optional col name to typed plan

* feat: pass col name all along

* feat: correct infer output table schema

* chore: unused import

* fix: error when key is not projected

* refactor: per review

* chore: fmt
2024-06-11 08:57:47 +00:00
Yingwen
5533bd9293 chore(common-macro): remove features covered by full (#4131) 2024-06-11 07:44:53 +00:00
Ning Sun
587e99d806 fix: macro crate cannot be compiled alone (#4130)
* fix: macro crate cannot be compiled alone

* Update src/common/macro/Cargo.toml

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Co-authored-by: tison <wander4096@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2024-06-11 05:36:29 +00:00
Yingwen
9cae15bd1b fix: executes pending ddls if region memtable is empty while scheduling next flush (#4119)
* ci: enable debug log

* chore: test to reproduce panic

* chore: Revert "ci: enable debug log"

This reverts commit 17eff2a045.

* test: add test for alter during flush

* fix: clear status if region has nothing to flush

It will also executes pending ddls and requests

* docs: fix typo
2024-06-11 00:10:17 +00:00
cjw
d8b51cfaba refactor: remove double checks of memtable size (#4117)
* refactor: remove unnecessary unwrap

* Update src/mito2/src/region/version.rs

Co-authored-by: dennis zhuang <killme2008@gmail.com>

---------

Co-authored-by: Kermit <chenjiawei1@xiaohongshu.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
2024-06-07 23:34:03 +00:00
Weny Xu
e142ca40d7 feat: invoke handle_batch_open_requests (#4107)
* feat: open all regions via invoking `handle_batch_open_requests`

* tests: add sqlness tests

* refactor: avoid cloning

* chore: apply suggestions from CR

* chore: update config.md

* chore: apply suggestions from CR
2024-06-07 09:07:45 +00:00
Yingwen
e982d2e55c fix: Update region Version in the worker loop (#4114)
* feat: handle region edit result

* feat: handle edit result

* feat: handle truncate result

* feat: flush compaction

* feat: invoke in worker

* feat: remove unused fields

* style: fix clippy

* feat: remove applier

---------

Co-authored-by: Weny Xu <wenymedia@gmail.com>
2024-06-07 06:27:16 +00:00
Weny Xu
09e0e1b246 chore: run fuzz tests with kafka remote wal (#4105)
* chore: add fuzz tests with kafka

* chore(ci): use minio

* chore: add empty line

* chore(ci): refactor

* chore: add empty line

* fix: update config

* fix: add default value for `MetaClientOptions`

* fix: remove redundant `debug_assert`

* chore: run fuzz tests with disk cache

* chore: remove redundant minio setup

* chore: cache targets

* Revert "chore: run fuzz tests with disk cache"

This reverts commit d81783187d.

* chore: fix typo

* chore: apply suggestions from CR

* Revert "fix: remove redundant `debug_assert`"

This reverts commit 09b899eed1.
2024-06-07 03:47:40 +00:00
irenjj
9c42825f5d feat: Implement SHOW CREATE FLOW (#4040)
* feat: Implement SHOW CREATE FLOW

* fmt

* stmt for display

* Update src/operator/src/statement.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* test: add sqlness test

* fix test

* parse query in parser

* test: move test to standalone

* reuse ParserContext::new()

* Update tests/cases/standalone/show_create_flow.result

Co-authored-by: Weny Xu <wenymedia@gmail.com>

* add line breaks

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
Co-authored-by: Weny Xu <wenymedia@gmail.com>
2024-06-07 03:24:56 +00:00
Jeremyhi
4719569e4f feat: support gRPC cancellation (#4092)
* feat: support cancellation

* chore: add unit test for cancellation

* chore: minor refactor

* feat: we do not need to spawn in distributed mode

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-06-06 08:27:25 +00:00
Jeremyhi
b03cb3860e chore: reduce some burden on the write path (#4110)
* chore: remove unnecessary checking

* chore: avoid do the same thing in a loop
2024-06-06 06:45:19 +00:00
shuiyisong
2ade511f26 feat: introduce pipeline crate (#4109)
* chore: introduce pipeline crate

* chore: fix typo
2024-06-05 17:23:25 +00:00
Weny Xu
16b85b06b6 chore: remove gc before running fuzz tests (#4108) 2024-06-05 11:59:29 +00:00
Ruihang Xia
03cacf9948 ci: cargo gc all fuzz test runner (#4081)
* ci: cargo gc all fuzz test runner

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore(ci): disable caching targets

* chore(ci): remove .tar file after unzip

* fix cargo-gc command

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: WenyXu <wenymedia@gmail.com>
2024-06-05 09:58:29 +00:00
Weny Xu
c23f8ad113 feat: implement the handle_batch_open_requests (#4075)
* feat: implement the `handle_batch_open_requests`

* refactor: refactor `handle_batch_open_requests` method signature

* test: add tests for `handle_batch_open_requests`

* chore: fmt code

* chore: apply suggestions from CR

* chore: apply suggestions from CR

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2024-06-05 09:22:34 +00:00
Weny Xu
e0a2c5a581 chore(ci): remove redundant sqlness test config (#4106) 2024-06-05 08:39:39 +00:00
zyy17
417ab3b779 ci: add 'make run-cluster-with-etcd' to run greptimedb cluster by using docker-compose (#4103) 2024-06-05 08:07:29 +00:00
tison
1850fe2956 feat: show create table only for base table (#4099)
* feat: show create table only for base table

Signed-off-by: tison <wander4096@gmail.com>

* add new cases

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-06-04 21:29:07 +00:00
taobo
dd06e107f9 test: add fuzz tests for column data type alteration (#4076)
* feat: support make fuzz-stable in Makefile

* test: add fuzz tests for column data type alteration

* fix: optimize code by cr
2024-06-04 13:38:57 +00:00
sarailQAQ
98c19ed0fa feat: implement drop multiple tables (#4085)
* feat: implement drop multiple tables

* fix: pass fmt and clippy checks

* add: drop multiple sqlness test

* update: accept review suggestions

* update: accept reviem suggestion

Co-authored-by: Weny Xu <wenymedia@gmail.com>

* fix: pass clippy check

---------

Co-authored-by: Weny Xu <wenymedia@gmail.com>
2024-06-04 13:11:41 +00:00
LFC
c0aed1d267 feat: set global runtime size by config file (#4063)
* set global runtime size

* fix: resolve PR comments

* fix: log the whole option

* fix ci

* debug ci

* debug ci

---------

Co-authored-by: Weny Xu <wenymedia@gmail.com>
2024-06-04 10:03:33 +00:00
discord9
0a07130931 fix(flow): mfp operator missing rows (#4084)
* fix: mfp missing rows if run twice in same tick

* tests: run mfp for multiple times

* refactor: make mfp less hacky

* feat: make channel larger

* chore: typos
2024-06-04 09:07:13 +00:00
Weny Xu
a6269397c8 fix: fix EntityTooSmall issue (#4100)
* fix: fix EntityTooSmall issue

* chore(ci): add minio to coverage

* tests: add test for parquet writer

* chore: move tests to `common-datasource` crate
2024-06-04 08:43:33 +00:00
Lei, HUANG
a80059b47f fix: recover memtable options when opening physical regions (#4102)
* fix: recover memtable options when opening physical regions

* chore: fmt

* chore: merge data region options
2024-06-04 08:20:29 +00:00
Weny Xu
b3a4362626 test: run test_flush_reopen_region and test_region_replay with KafkaLogStore (#4083)
* feat: add `LogStoreFactory` to `TestEnv`

* feat: add `multiple_log_store_factories` template

* test: run `test_flush_reopen_region` and `test_region_replay` with `KafkaLogStore`

* chore: move deps to workspace

* chore: apply suggestions from CR
2024-06-04 08:11:15 +00:00
Kelvin Wu
51e2b6e728 fix: display the PartitionBound and PartitionDef correctly (#4101)
* fix: display the PartitionBound and PartitionDef correctly

* Update src/partition/src/partition.rs

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* fix: fix unit test of partition definition

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2024-06-04 08:10:44 +00:00
shuiyisong
d1838fb28d refactor: move define_into_tonic_status to common-error (#4095)
* chore: finish cherry-pick

* chore: remove unused code
2024-06-04 03:29:15 +00:00
Weny Xu
cd97a39904 chore: enable strip for tests-fuzz crate (#4093) 2024-06-03 14:32:11 +00:00
Weny Xu
4e5dd1ebb0 ci: try to free space after fuzz tests (#4089)
* chore(ci): remove .tar file after unzip

* chore: free space

* chore: include debug info
2024-06-02 21:22:22 +00:00
Kelvin Wu
88cdefa41e feat: implement Display for PartitionExpr (#4087) 2024-06-02 21:09:00 +00:00
Ruihang Xia
c2218f8be8 build(deps): bump datafusion 20240528 (#4061)
* build(deps): bump datafusion 20240528

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* another update

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update expected sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix first/last value

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* reformat comment

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix remaining errors

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* revert toml format

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix pyo3 feature

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove dead code

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Apply suggestions from code review

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>

* format file

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Jeremyhi <jiachun_feng@proton.me>
2024-06-01 14:03:00 +00:00
Ruihang Xia
45fee948e9 fix: display error in correct format (#4082)
* fix: display error in correct format

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add address to RegionServer error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-31 09:25:14 +00:00
discord9
ea49f8a5c4 feat(flow): make write path faster with shared lock (#4073)
* feat(WIP): make write faster

* feat: read lock on fast path

* chore: per review
2024-05-31 06:50:22 +00:00
Jeremyhi
43afea1a9d refactor!: remove the tableid in ddl response since tableids is enough (#4080)
* refactor: remove the tableid in ddl response since tableids is enough

* chore: upgrade proto
2024-05-31 06:41:53 +00:00
Weny Xu
fcfcf86385 feat: implement WalEntryDistributor, WalEntryReciver (#4031)
* feat: implement the `WalEntryDistributor` and `WalEntryReceiver`

* test: add tests for `WalEntryDistributor`

* refactor: use bounded channel

* chore: apply suggestions from CR
2024-05-31 03:03:38 +00:00
Jeremyhi
26b112ab57 refactor: remove upgrade cli tool (#4077) 2024-05-31 00:40:27 +00:00
dennis zhuang
24612f62dd feat: querying from view works (#3952)
* feat: querying from view works

* feat: use MemoryCatalogProviderList instead of DummyCatalogList

* refactor: revert src/query/src/dummy_catalog.rs

* chore: clean code

* fix: make clippy happy

* fix: toml format

* fix: sqlness

* fix: forgot files

* fix: make sqlness happy

* test: table source, serializer and decoder

* fix: fail to decode plan because of invalid table names

* test: adds more sqlness test for view

* chore: remove unused errors

* fix: comments

* fix: typo

* fix: invalidate view info cache after creating view successfully

* chore: apply suggestion

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* chore: apply suggestion

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* fix: compile error after rebeasing

* chore: style

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* fix: don't export table_name in common-meta

* chore: change ViewInfo::new signature

* docs: leave a TODO for optimize param

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-30 21:45:56 +00:00
Ruihang Xia
85a231850d fix: add tailing separator to prefix (#4078)
* fix: add tailing separator to prefix

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* project select result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-30 15:29:26 +00:00
Ruihang Xia
f024054ed3 ci: cargo gc fuzz test runner (#4074)
* ci: cargo gc fuzz test runner

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* change profile to dev

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-30 10:41:29 +00:00
discord9
05751084e7 chore: bump to v0.8.1 (#4055) 2024-05-30 07:59:52 +00:00
Jeremyhi
8b6596faa0 feat: avoid some cloning when mirror requests to flownode (#4068)
* feat: some refactor mirror requests to flownode

* feat: use spawn_bg to avoid impact front-ground write

* feat: add mirror row count metric
2024-05-30 07:29:13 +00:00
Weny Xu
eab309ff7e fix: avoid acquiring lock during reading stats (#4070)
* fix: avoid acquiring lock during reading stats

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2024-05-30 07:08:04 +00:00
discord9
7de336f087 fix(flow): correctness bugs (#4018)
* fix: optional args of tumble

* fix(WIP): choose

* feat: rename default ts to GREPTIME_TIMESTAMP

* fix: default timestamp name

* fix: reorder write requests

* fix: expire state

* fix: test of tumble

* fix: send buf clear

* fix: ts<start time correct window

* fix: window_start when ts<start_time

* mend

* fix: range begin>range end

* refactor: per reviews

* feat!: ts placeholder rename to __ts_placeholder

* refactor: better condition

* tests(WIP): func sig choose

* tests(WIP): tumble func

* feat: make run_available optional blocking

* tests: tumble transform

* chore: clippy

* fix?: lagged missing data

* fix: flow source break on empty chnl
2024-05-30 03:49:11 +00:00
Weny Xu
6e9a9dc333 refactor(log_store): remove associated type Namespace and Entry in LogStore (#4038)
* refactor(log_store): remove associated type `Namespace` in `LogStore`

* fix(test): filter entries

* refactor: ignore incomplete parts

* refactor: simplify `RawEntryReaderFilter`

* chore: avoid cloning

* test: add tests for `maybe_emit_entry`

* refactor: remove `Namespace` trait and rename `LogStoreNamespace` to `Namespace`

* chore: apply suggestions from CR

* refactor: refine `entry` method signature

* feat: ingore  any potential incomplete parts

* refactor: rename `namespace` to `provider`

* chore: add debug assertion

* refactor: associated type `Entry` in `LogStore`

* refactor: renamse `namespace` to `provider`

* refactor: remove unwrap

* refactor: let `remaining_entries` return a optional vector

* test: add basic tests for kafka logstore

* refactor: move `append` method under `cfg(test)`

* refactor: rename `RawEntry` to `Entry`

* refactor: rename `CorruptedLogEntry` to `CorruptedEntry`

* test: add tests for handling corrupted raw entry stream

* refactor: rename `ns` to `provider`

* refactor: remove `entry_stream.rs` file

* chore: remove unused code

* chore: update comments

* chore: apply suggestions from CR

* chore: update comments

* chore: apply suggestions from CR

* chore: remove Deref

* chore: add comments

* fix: ignores tail corrupted data

* chore: add comments

* fix: add `MIN_BATCH_SIZE` limit
2024-05-29 13:44:01 +00:00
Yingwen
848bd7e553 feat: Implements row group level parallel unordered scanner (#3992)
* feat: unordered scanner

* feat: support compat

* chore: update debug print

fix: missing ranges in scan parts

* fix: ensure chunk size > 0

* fix: parallel is disabled if there is only one file and memtable

* chore: reader metrics

* chore: remove todo

* refactor: add ScanPartBuilder trait

* chore: pass file meta to the part builder

* chore: make part builder private

* docs: update comment

* chore: remove meta()

* refactor: only prune file ranges in ScanInput

replaces ScanPartBuilder with FileRangeCollector which only collect file
ranges

* chore: address typo

* fix: panic when no partition

* feat: Postpone part distribution

* chore: handle empty partition in mito

* style: fix clippy
2024-05-29 11:06:08 +00:00
LYZJU2019
f0effd2680 feat: Implement SHOW STATUS (#4050)
* show status returning empty contents

* return an empty set instead of affected rows

* chore: Update src/query/src/sql.rs

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2024-05-29 04:49:05 +00:00
dennis zhuang
aafb468547 fix: set local or session time_zone not work (#4064)
* fix: set local or session time_zone not work

* chore: supports PostgreSQL-specific setting time zone
2024-05-29 00:06:13 +00:00
Weny Xu
4aa756c896 feat: open region in background (#4052)
* feat: open region in background

* feat: trace opening regions

* feat: wait for the opening region

* feat: let engine to handle the future open request

* fix: fix `test_region_registering`
2024-05-28 13:58:15 +00:00
Weny Xu
d3860671a8 chore: add LAST_SENT_HEARTBEAT_ELAPSED metric (#4062) 2024-05-28 08:05:37 +00:00
tison
9dd6e033a7 refactor: move Database to client crate behind testing feature (#4059)
* refactor: move Database to client crate behind testing feature

Signed-off-by: tison <wander4096@gmail.com>

* partial move

Signed-off-by: tison <wander4096@gmail.com>

* catch up more

Signed-off-by: tison <wander4096@gmail.com>

* fix imports

Signed-off-by: tison <wander4096@gmail.com>

* finish

Signed-off-by: tison <wander4096@gmail.com>

* tidy

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-05-28 03:21:43 +00:00
Weny Xu
097f62f459 refactor(fuzz-tests): generate ts value separately (#4056)
refactor: generate ts value separately
2024-05-27 14:07:32 +00:00
Weny Xu
048368fd87 feat: invoke flush_table and compact_table in fuzz tests (#4045)
* feat: invoke `flush_table` and `compact_table` in fuzz tests

* feat: support to flush and compact physical metric table

* fix: avoid to create tables with the same name

* feat: validate values after flushing or compacting table
2024-05-27 09:26:50 +00:00
tison
f9db5ff0d6 build(deps): upgrade opendal to 0.46 (#4037)
* build(deps): upgrade opendal to 0.46

Signed-off-by: tison <wander4096@gmail.com>

* migrate writes

Signed-off-by: tison <wander4096@gmail.com>

* migrate reads

Signed-off-by: tison <wander4096@gmail.com>

* fixup object safety

Signed-off-by: tison <wander4096@gmail.com>

* fixup names

Signed-off-by: tison <wander4096@gmail.com>

* fixup compilation

Signed-off-by: tison <wander4096@gmail.com>

* fixup compilation

Signed-off-by: tison <wander4096@gmail.com>

* a few Buffer to Vec

Signed-off-by: tison <wander4096@gmail.com>

* Make greptime buildable with opendal 0.46 (#5)

Signed-off-by: Xuanwo <github@xuanwo.io>

* fixup toml check

Signed-off-by: tison <wander4096@gmail.com>

* test_orc_opener

Signed-off-by: tison <wander4096@gmail.com>

* Fix lru cache (#6)

Signed-off-by: Xuanwo <github@xuanwo.io>

* clippy

Signed-off-by: tison <wander4096@gmail.com>

* improve comments

Signed-off-by: tison <wander4096@gmail.com>

* address comments

Signed-off-by: tison <wander4096@gmail.com>

* reduce buf copy

Signed-off-by: tison <wander4096@gmail.com>

* upgrade to reqwest 0.12

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
Signed-off-by: Xuanwo <github@xuanwo.io>
Co-authored-by: Xuanwo <github@xuanwo.io>
2024-05-27 09:12:23 +00:00
Weny Xu
20ce7d428d fix(metric-engine): missing catchup implementation (#4048)
* fix(metric-engine): missing catchup implementation

* fix: should be `metadata_region_id`
2024-05-27 07:56:46 +00:00
Weny Xu
75bddc0bf5 fix(fuzz-tests): avoid to drop in-use database (#4049)
* fix(fuzz-tests): avoid to drop in-use database

* fix: correct datahome path

* fix: correct `schema_name`

* chore: apply suggestions from CR
2024-05-27 07:44:59 +00:00
tison
c78043d526 build(deps): merge tower deps to workspace (#4036)
Signed-off-by: tison <wander4096@gmail.com>
2024-05-27 07:15:30 +00:00
LFC
297105266b feat: enable tcp keepalive for http server (#4019)
* feat: enable tcp keepalive for http server

* chore: for enterprise's update

* resolve PR comments
2024-05-27 04:07:36 +00:00
Ruihang Xia
1de17aec74 feat: change EXPIRE WHEN to EXPIRE AFTER (#4002)
* feat: change EXPIRE WHEN to EXPIRE AFTER

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* change remaining

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* rename create_if_not_exist to create_if_not_exists

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* parse interval expr

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update comment

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Apply suggestions from code review

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Jeremyhi <jiachun_feng@proton.me>
2024-05-27 04:05:55 +00:00
Weny Xu
389ded93d1 chore: add logs for setting the region to writable (#4044)
* chore: add logs for setting the region to writable

* fix: ignore redundant logs
2024-05-27 04:01:40 +00:00
Eugene Tolbakov
af486ec0d0 feat(opertor): check if a database is in use before dropping it (#4035)
feat(opertor): check if database is in use before dropping it
2024-05-27 03:31:58 +00:00
irenjj
25d64255a3 feat: support table level comment (#4042)
* feat: support table level comment

* use constants

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
Co-authored-by: tison <wander4096@gmail.com>
2024-05-27 02:28:52 +00:00
tison
3790020d78 build(deps): upgrade promql-parser to 0.4 (#4047)
* build(deps): upgrade promql-parser to 0.4

Signed-off-by: tison <wander4096@gmail.com>

* lock

Signed-off-by: tison <wander4096@gmail.com>

* catch up upgrades

Signed-off-by: tison <wander4096@gmail.com>

* concise method

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-05-27 01:51:59 +00:00
Weny Xu
5df3d4e5da feat: implement the LogStoreRawEntryReader and RawEntryReaderFilter (#4030)
* feat: implement the `LogStoreRawEntryReader`

* feat: implement the `RawEntryReaderFilter`

* test: add tests
2024-05-24 11:53:15 +00:00
tison
af670df515 ci: skip notification for manual releases (#4033)
Signed-off-by: tison <wander4096@gmail.com>
2024-05-24 10:16:06 +00:00
Ruihang Xia
a58256d4d3 feat: round-robin selector (#4024)
* feat: implement round robin peer selector

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add document and test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-24 07:29:07 +00:00
Weny Xu
466f7c6448 feat: add RawEntryReader and OneshotWalEntryReader trait (#4027)
* feat: add `RawEntryReader` and `OneShotWalEntryReader` trait

* chore: rename `OneShot` to `Oneshot`

* refacotr: remove `region_id` from `OneshotWalEntryReader`
2024-05-24 06:30:50 +00:00
Ruihang Xia
0101657649 feat: remove one clone on constructing partition (#4028)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-24 04:01:19 +00:00
taobo
a3a2c8d063 feat: Add TLS support for gRPC service (#3957)
* feat: Add tls support for grpc service

* feat: add integration test

* fix: integration test

* fix: revert by suggestion

* fix: typos

* fix: optimize code

* fix: optimize code

* docs: update configs
2024-05-23 19:00:16 +00:00
Yingwen
dfc1acbb2a fix: notifies all workers once a region is flushed (#4016)
* fix: notify workers to handle stalled requests if flush is finished

* chore: change stalled count to gauge

* feat: process stalled requests eagerly
2024-05-23 12:45:00 +00:00
Lei, HUANG
0d055b6ee6 refactor: remove unused log config (#4021) 2024-05-23 08:59:42 +00:00
Weny Xu
614643ef7b chore(ci): add more replicas (#4015) 2024-05-23 02:43:24 +00:00
Ning Sun
b90b7adf6f feat: add fallback logic for vmagent sending wrong content type (#4009)
* feat: add fallback logic for vmagent sending wrong content type

* fix: resolve lint issues

* Update src/servers/src/http/prom_store.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2024-05-23 02:40:17 +00:00
Jeremyhi
418090b464 chore: log error for detail (#4011)
* chore: log error for detail

* chore: by cr
2024-05-22 12:17:20 +00:00
Lei, HUANG
090b59e8d6 feat: manual compaction (#3988)
* add compaction udf params

* wip: pass compaction options through grpc

* wip: pass compaction options all the way down to region server

* wip: window compaction task

* feat: trigger major compaction

* refactor: optimize compaction parameter parsing

* chore: rebase main

* chore: update proto

* chore: add some tests

* feat: validate catalog

* chore: fix typo and rebase main

* fix: some cr comments

* fix: file_time_bucket_span

* fix: avoid upper bound overflow

* chore: update proto
2024-05-22 09:42:21 +00:00
shuiyisong
9e1af79637 chore: add ttl to write_cache (#4010)
* chore: add ttl to write_cache

* chore: update test & add example config

* chore: fix typo

* chore: fix typo

* chore: fix typo
2024-05-22 06:50:12 +00:00
Yohan Wal
9800807fe5 fix(fuzz): sort inserted rows with primary keys and time index (#4008)
* fix(fuzz): sort inserted rows with primary keys and time index

* fix: correct index when replacing default

* fix: put null behind all values
2024-05-22 03:32:19 +00:00
zyy17
b86d79b906 fix: can't print log because the tracing guard is dropped (#4005)
* fix: avoid logging guard drop

* chore: remove unused '#[allow(dead_code)]'
2024-05-22 03:24:40 +00:00
Lei, HUANG
e070ba3c32 feat: respect time range when building parquet reader (#3947)
* feat: convert timestamp range filters to predicates

* chore: rebase main

* fix: remove prediactes once they have been added to timestamp filters to avoid duplicate filtering

* fix: some comments

* fix: resolve conflicts
2024-05-21 16:02:25 +00:00
Weny Xu
43bf7bffd0 fix: try to fix unstable fuzz test (#4003)
fix: ignore PoolTimedOut
2024-05-21 12:57:09 +00:00
Weny Xu
56aed6e6ff chore(ci): export kind logs (#3996)
* chore(ci): export kind logs

* chore: add empty line
2024-05-21 11:56:03 +00:00
zyy17
47785756e5 fix: move log_version() into build() of App to fix no log version on bootstrap (#4004) 2024-05-21 09:15:15 +00:00
Jeremyhi
0aa523cd8c feat: make create view procedure simple as others (#4001) 2024-05-21 08:30:57 +00:00
Weny Xu
7a8222dd97 fix: try to fix broken CI (#3998)
* fix: try to fix broken CI

* chore: using loop to check status
2024-05-21 07:02:18 +00:00
maco
40c585890a refactor: replace Expr with datafusion::Expr (#3995)
* refactor: replace Expr with datafusion::Expr

* fix: fmt-toml

* fix: cr comment
2024-05-21 06:40:29 +00:00
zyy17
da925e956e ci: change the image name of nightly build (#3994) 2024-05-21 06:11:12 +00:00
Weny Xu
d7ade3c854 chore(ci): add fuzz tests for distributed mode (#3967)
* chore(ci): add cfg for setup GreptimeDB cluster

* chore: use kind

* chore: always print info

* chore: add debug print

* chore: set etcd replica to 1

* ci: refactor e2e cfg

* ci: add Fuzz Test for distributed mode

* Apply suggestions from code review

* chore: apply suggestions from CR

* chore(ci): upload logs
2024-05-21 04:58:42 +00:00
Yingwen
179c8c716c feat: Adds RegionScanner trait (#3948)
* feat: define region scanner

* feat: single partition scanner

* feat: use single partition scanner

* feat: implement ExecutionPlan wip

* feat: mito engine returns single partition scanner

* feat: implement DisplayAs for region server

* feat: dummy table provider use handle_partitioned_query()

* test: update sqlness test

* feat: table provider use ReadFromRegion

* refactor: remove StreamScanAdapter

* chore: update lock

* style: fix clippy

* refactor: remove handle_query from the RegionEngine trait

* chore: address CR comments

* refactor: rename methods

* refactor: rename ReadFromRegion to RegionScanExec
2024-05-20 11:52:00 +00:00
shuiyisong
19543f9819 feat: support compression on gRPC server (#3961)
* feat: enable gzip in grpc server side

* feat: add enable_gzip_compression config

* test: add grpc compression test

* feat: support user configured compression on grpc server

* chore: update doc

* chore: add tests

* fix: make config-docs

* chore: fix cr issue

* chore: add test

* refactor: remove config on server side, auto enable all compression support

* chore: minor update

* chore: remove unused code

* refactor: enable zstd compression internally by default

* chore: minor fix
2024-05-20 11:28:00 +00:00
discord9
533ada70ca chore: remove a dbg! forget to remove (#3990)
* chore: remove a dbg! forget to remove

* remove other dbg! and add lint

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix pyo3 feature

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-20 08:34:47 +00:00
zyy17
c50ff23194 ci: add 'contents: write' permission (#3989) 2024-05-20 06:23:54 +00:00
tison
d7f1150098 ci: fixup strings in check ci status (#3987)
Signed-off-by: tison <wander4096@gmail.com>
2024-05-20 03:59:05 +00:00
zyy17
82c3eca25e refactor: make the command entry cleaner (#3981)
* refactor: move run() in App trait

* refactor: introduce AppBuilder trait

* chore: remove AppBuilder

* refactor: remove Options struct and make the start() clean

* refactor: init once for common_telemetry::init_global_logging
2024-05-20 03:34:06 +00:00
Yingwen
df13832a59 feat: use cache in compaction (#3982) 2024-05-20 02:36:51 +00:00
tison
7da92eb9eb ci: check-status for nightly-ci (#3984)
Signed-off-by: tison <wander4096@gmail.com>
2024-05-19 07:10:59 +00:00
Weny Xu
c71298d3d5 chore: pin cargo-ndk to 3.5.4 (#3979) 2024-05-18 08:46:01 +00:00
Yingwen
de594833ac docs: add v0.8.0 TSBS report (#3983)
docs: add v0.8.0 tsbs report
2024-05-18 08:09:16 +00:00
Eugene Tolbakov
6a9a92931d chore: change binary array type from LargeBinaryArray to BinaryArray (#3924)
* chore: change binary array type from LargeBinaryArray to BinaryArray

* fix: adjust try_into_vector logic

* fix: apply CR suggestions, add tests

* chore: fix failing test

* chore: fix integration test

* chore: adjust the assertions according to changed implementation

* chore: add a test with LargeBinary type

* chore: apply CR suggestions

* chore: simplify tests
2024-05-18 08:04:41 +00:00
tison
11ad5b3ed1 ci: report CI failures with creating issues (#3976)
* ci: report CI failures with creating issues

Signed-off-by: tison <wander4096@gmail.com>

* integrate with CI workflows

Signed-off-by: tison <wander4096@gmail.com>

* mention db-approver

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-05-18 03:03:56 +00:00
zyy17
b8354bbb55 docs: add toc for config docs (#3974) 2024-05-18 01:57:49 +00:00
Weny Xu
258675b75e chore: bump to v0.8.0 (#3971) 2024-05-17 15:05:20 +00:00
Weny Xu
11a08cb272 feat(cli): prevent exporting physical table data (#3978)
* feat: prevent exporting physical table data

* chore: apply suggestions from CR
2024-05-17 14:58:10 +00:00
Ruihang Xia
e9b178b8b9 fix: tql parser hang on abnormal input (#3977)
* fix: tql parser hang on abnormal input

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* apply review sugg

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-17 14:22:20 +00:00
discord9
3477fde0e5 feat(flow): tumble window func (#3968)
* feat(WIP): tumble window rewrite parser

* tests: tumble func

* feat: add `update_at` column for all flow output

* chore: cleanup per review

* fix: update_at not as time index

* fix: demo tumble

* fix: tests&tumble signature&accept both ts&datetime

* refactor: update_at now ts millis type

* chore: per review advices
2024-05-17 12:10:28 +00:00
dennis zhuang
9baa431656 fix: changing column data type can't process type alias (#3972) 2024-05-17 11:34:31 +00:00
WU Jingdi
e2a1cb5840 feat: support evaluate expr in range query param (#3823)
* feat: support evaluate expr in range query param

* chore: fix comment

* chore: fix code comment

* fix: disbale now in duration param
2024-05-17 08:31:55 +00:00
Weny Xu
f696f41a02 fix: prevent registering logical regions with AliveKeeper (#3965)
* fix: register logical region

* chore: fix Clippy

* chore: apply suggestions from CR
2024-05-17 07:38:35 +00:00
Weny Xu
0168d43d60 fix: prevent exporting metric physical table data (#3970) 2024-05-17 07:19:28 +00:00
Yingwen
e372e25e30 build: add RUSTUP_WINDOWS_PATH_ADD_BIN env (#3969)
build: add RUSTUP_WINDOWS_PATH_ADD_BIN: 1
2024-05-17 06:01:46 +00:00
zyy17
ca409a732f refactor(naming): use the better naming for pubsub (#3960) 2024-05-17 03:00:15 +00:00
Ruihang Xia
5c0a530ad1 feat: skip read-only region when trying to flush on region full (#3966)
* feat: skip read-only region when trying to flush on region full

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* improve log

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* also skip in periodically

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-16 14:56:43 +00:00
Ruihang Xia
4b030456f6 feat: remove timeout in the channel between frontend and datanode (#3962)
* style: change builder pattern

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: remove timeout

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove unused config

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update docs

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-16 14:12:42 +00:00
irenjj
f93b5b19f0 feat: limit total rows copied in COPY TABLE FROM with LIMIT segment (#3910)
* feat: limit total rows copied in COPY TABLE FROM with LIMIT segment

* fmt

* disable default limit

* fix: check parse

* fix test, add error case

* fix: forbide LIMIT in database

* fix: only support LIMIT segment

* fix: simplify

* fix

* fix

* fix

* fix

* fix: test

* fix: change error info

* fix clippy

* fix: fix error msg

* fix test

* fix: test error info
2024-05-16 13:39:26 +00:00
Yingwen
669a6d84e9 test: gracefully shutdown postgres client in sql tests (#3958)
* chore: debug log

* test: gracefully shutdown pg client
2024-05-16 11:50:45 +00:00
discord9
a45017ad71 feat(flow): expire arrange according to time_index type (#3956)
* feat: render_reduce's arrangement expire after time passed

* feat: set expire when create flow
2024-05-16 11:41:03 +00:00
discord9
0d9e71b653 feat(flow): flow node manager (#3954)
* feat(flow): flow node manager

feat(flow): render src/sink

feat(flow): flow node manager in standalone

fix?: higher run freq

chore: remove abunant error enum variant

fix: run with higher freq if insert more

chore: fix after rebase

chore: typos

* chore(WIP): per review

* chore: per review
2024-05-16 11:37:14 +00:00
discord9
93f178f3ad feat(flow): avg func rewrite to sum/count (#3955)
* feat(WIP): parse avg

* feat: RelationType::apply_mfp no need expr typs

* feat: avg&tests

* fix(WIP): avg eval

* fix: sum ret correct type

* chore: typos
2024-05-16 10:03:56 +00:00
WU Jingdi
9f4a6c6fe2 feat: support any precision in PromQL (#3933)
* feat: support any precision in PromQL

* chore: add test
2024-05-16 07:00:24 +00:00
Weny Xu
c915916b62 feat(cli): export metric physical tables first (#3949)
* feat: export metric physical tables first

* chore: apply suggestions from CR
2024-05-16 06:30:20 +00:00
Weny Xu
dff7ba7598 feat: ignore internal columns in SHOW CREATE TABLE (#3950)
* feat: ignore internal columns

* chore: add new line

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2024-05-16 06:28:48 +00:00
Ning Sun
fe34ebf770 test: give windows file watcher more time (#3953)
* test: give windows file watcher more time

* refactor: use constants for timeout
2024-05-16 01:58:45 +00:00
tison
a1c51a5885 chore: catch up label updates (#3951)
Signed-off-by: tison <wander4096@gmail.com>
2024-05-15 16:44:17 +00:00
zyy17
63a8d293a1 refactor: add Configurable trait (#3917)
* refactor: add Configurable trait

* refactor: add merge_with_cli_options() to simplify load_options()

* docs: add comments

* fix: clippy errors

* fix: toml format

* fix: build error

* fix: clippy errors

* build: downgrade config-rs

* refactor: use '#[snafu(source(from()))'

* refactor: minor modification for load_layered_options() to make it clean
2024-05-15 12:56:40 +00:00
tison
6c621b7fcf ci: implement docbot in cyborg (#3937)
* ci: implement docbot in cyborg

Signed-off-by: tison <wander4096@gmail.com>

* allow remove non-existing label

Signed-off-by: tison <wander4096@gmail.com>

* fixup

Signed-off-by: tison <wander4096@gmail.com>

* fixup org name

Signed-off-by: tison <wander4096@gmail.com>

* fixup step name

Signed-off-by: tison <wander4096@gmail.com>

* remove unused file

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-05-15 12:55:49 +00:00
Yingwen
529e344450 ci: Use lld linker in windows tests (#3946)
* ci: disable other test

* ci: timeout 30

* ci: try to use lld

* ci: change linker

* test: wait for file change in test multiple times

* ci: enable other tests

* chore: revert sleep in loop
2024-05-15 12:34:10 +00:00
Zhenchi
2a169f9364 perf(operator): reuse table info from table creation (#3945)
perf(operator): reuse table info from creating

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-05-15 09:18:17 +00:00
discord9
97eb196699 feat(flow): query table schema&refactor (#3943)
* feat: get table info

* feat: remove new&unwrap

* chore: per PR advices

* chore: per review
2024-05-15 08:35:12 +00:00
Yohan Wal
cfae276d37 feat(fuzz): add validator for inserted rows (#3932)
* feat(fuzz): add validator for inserted rows

* fix: compatibility with mysql types

* feat(fuzz): add datetime and date type in mysql for row validator
2024-05-15 07:05:51 +00:00
Weny Xu
09129a911e chore: update greptime-proto to a11db14 (#3942) 2024-05-15 03:38:47 +00:00
discord9
15d7b9755e feat(flow): flow worker (#3934)
* feat: flow worker

* chore: fix after cherry pick

* refactor: error handling

* refactor: error handling

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: merge origin/main

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
Co-authored-by: Zhenchi <zhongzc_arch@outlook.com>
2024-05-14 16:09:25 +00:00
Jeremyhi
72897a20e3 chore: minor refactor on etcd kvbackend (#3940)
* chore: minor refactor on etcd kvbackend

* chore: avoid clone
2024-05-14 13:25:22 +00:00
Lei, HUANG
c04d02460f fix(metric engine): label mismatch in metric engine (#3927)
* fix: label mismatch

* test: add unit test

* chore: avoid updating full primary keys

* fix: style

* chore: add some doc for PkIndexMap

* chore: update some doc
2024-05-14 12:58:22 +00:00
discord9
4ca7ac7632 feat(flow): add types for every plan enum variant (#3938)
* feat: Plan with types

* chore: per review advices
2024-05-14 10:51:37 +00:00
Jeremyhi
a260ba3ee7 feat: use txn to impl cas (#3936)
* feat: usr txn to impl cas

* chore: fix test
2024-05-14 08:21:27 +00:00
dennis zhuang
efd3f04b7c feat: create view (#3807)
* add statement

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: rebase with main

* fix: create flow

* feat: adds gRPC stuff

* feat: impl create_view ddl in operator

* feat: impl CreateViewProcedure

* chore: update cargo lock

* fix: format

* chore: compile error after rebasing main

* chore: refactor and test create view parser

* chore: fixed todo list and comments

* fix: compile error after rebeasing

* test: add create view test

* test: test view_info keys

* test: adds test for CreateViewProcedure and clean code

* test: adds more sqlness test for creating views

* chore: update cargo lock

* fix: don't replace normal table in CreateViewProcedure

* chore: apply suggestion

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>

* chore: style

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Jeremyhi <jiachun_feng@proton.me>
2024-05-14 08:03:29 +00:00
Weny Xu
f16ce3ca27 feat: support to invalidate flow cache (#3926)
* feat: add `FlowName` & `FlowId` to `CacheIdent`

* feat: support to invalidate flow cache

* chore: apply suggestions from CR
2024-05-14 06:24:09 +00:00
Yingwen
6214180ecd build: upgrade rust toolchain to fix ci issues on Windows (#3898)
* ci: use windows 2019

* test: ignore cleanup result

* chore: revert change

* test: unstable repeated task test

* build: update rust toolchain and windows

* ci: test sqlness

* chore: enable other tests
2024-05-14 06:13:43 +00:00
Weny Xu
00e21e2021 fix: potential deadlock (#3930)
fix: fix potential deadlock
2024-05-14 03:04:03 +00:00
maco
494ce65729 feat: limiting the size of query results to Dashboard (#3901)
* feat: limiting the size of query results to Dashboard

* optimize code

* fix by cr

* fix integration tests error

* remove RequestSource::parse

* refactor: sql query params

* fix: unit test

---------

Co-authored-by: tison <wander4096@gmail.com>
2024-05-14 01:57:30 +00:00
Weny Xu
e15294db41 feat: introduce TableRouteCache to PartitionRuleManager (#3922)
* chore: add `CompositeTableRouteCacheRef` to `PartitionRuleManager`

* chore: update comments

* fix: add metrics for `get`

* chore: apply suggestions from CR

* chore: correct cache name

* feat: implement `LayeredCacheRegistry`

* fix: invalidate logical tables by physical table id

* refactor: replace `CacheRegistry` with `LayeredCacheRegistry`

* chore: update comments

* chore: apply suggestions from CR

* chore: fix fmt

* refactor: use `TableRouteCache` instead

* chore: apply suggestions from CR

* chore: fix clippy
2024-05-13 13:26:43 +00:00
discord9
be1eb4efb7 feat(flow): render source/sink (#3903)
* feat(flow): render src/sink

* chore: add empty impl

* chore: typos

* refactor: according to review(WIP)

* refactor: reexport df_sbustrait&use to_sub_plan

* fix: add implict location to error enum

* fix: error handling unwrap query_ctx
2024-05-13 11:58:02 +00:00
Jeff Chiang
9d12496aaf feat: create database with options (#3751)
* feat: create database with options

* fix: clippy

* fix: clippy

* feat: rebase and add Display test

* feat: sqlness test for creating database with options

* address comments

Signed-off-by: tison <wander4096@gmail.com>

* fixup tests

Signed-off-by: tison <wander4096@gmail.com>

* catch up

Signed-off-by: tison <wander4096@gmail.com>

* DefaultOnNull

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
Co-authored-by: tison <wander4096@gmail.com>
2024-05-13 09:00:15 +00:00
Jeremyhi
5d8084a32f chore: store-addr to store-addrs (#3925) 2024-05-13 08:30:25 +00:00
zyy17
60eb5de3f1 refactor: add tracing options in xOptions (#3919)
* refactor: add tracing options in {DatanodeOptions, FrontendOptions, MetasrvOptions, StandaloneOptions}

* ci: fix integration error

* refactor: minor modification for initialization of tracing options
2024-05-13 05:16:50 +00:00
maco
a0be7198f9 feat: migrate orc-rs to datafusion-orc (#3923) 2024-05-13 05:15:06 +00:00
Jeremyhi
6ab3aeb142 refactor: rename metasrv_addr to metasrv_addrs (#3921) 2024-05-12 03:44:21 +00:00
Ruihang Xia
590aedd466 fix: sort unstable HTTP result in label values query (#3920)
* fix: sort unstable HTTP result in label values query

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore: Update src/servers/src/http/prometheus.rs

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2024-05-11 11:33:01 +00:00
Weny Xu
27e376e892 feat: implement the CompositeTableRoute (#3918)
* feat: implement the `CompositeTableRoute`

* chore: update comments
2024-05-11 11:09:00 +00:00
Weny Xu
36c41d129c feat: support to create & drop flow via grpc (#3915)
* feat: support to create & drop flow via grpc

* chore: apply suggestions from CR

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2024-05-11 11:01:48 +00:00
Weny Xu
89da42dbc1 refactor: refactor frontend cache (#3912)
* feat: implement the `TableCache`

* refactor: use `TableCache`

* refactor: replace `TableFlowManager` with `TableFlownodeSetCache`

* refactor: introduce cache crate

* chore: add comments

* chore: update comments

* chore: apply suggestions from CR

* chore: rename `cache_invalidator` to `local_cache_invalidator`

* chore(fuzz): set `acquire_timeout` to 30s
2024-05-11 09:58:18 +00:00
Jeremyhi
04852aa27e chore: keep the same naming style (#3916) 2024-05-11 09:39:49 +00:00
Yingwen
d0820bb26d refactor: Remove PhysicalPlan trait and use ExecutionPlan directly (#3894)
* refactor: remove PhysicalPlan

* refactor: remove physical_plan mod

* refactor: import

* fix merge error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-11 07:38:03 +00:00
Jeremyhi
fa6c371380 feat: metaclient builder options with role (#3909)
* feat: metaclient builder options with default role

* chore: remove unnecessary ut
2024-05-11 06:17:14 +00:00
Jeremyhi
9aa2182cb2 refactor: make txn easy to use (#3905)
refactor: put_if_not_exists and compare_and_put API
2024-05-11 04:45:04 +00:00
Weny Xu
bca2e393bf refactor: add procedure_loader macro (#3906) 2024-05-11 04:41:21 +00:00
zyy17
b1ef327bac refactor: remove MixOptions and use StandaloneOptions only (#3893)
* refactor: remove MixOptions and use StandaloneOptions only

* refactor: refactor code by code review comments

1. Use '&self' in frontend_options() and datanode_options();

2. Remove unused 'clone()';

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* ci: fix integration error

---------

Signed-off-by: zyy17 <zyylsxm@gmail.com>
2024-05-10 16:08:34 +00:00
Ruihang Xia
115c74791d build(deps): bump snafu to 0.8 (#3911)
* change Cargo.toml

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* global replace

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* handle alias in script engine

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-10 13:36:25 +00:00
Ruihang Xia
aec5cca2c7 feat: support distributed EXPLAIN ANALYZE (#3908)
* feat: fetch and pass per-plan metrics

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl DistAnalyzeExec

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness results

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo again

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/query/src/analyze.rs

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Jeremyhi <jiachun_feng@proton.me>
2024-05-10 12:31:29 +00:00
Weny Xu
06e1c43743 feat: support to drop flow (#3900)
* feat: support to drop flow

* chore: apply suggestions from CR
2024-05-10 12:09:37 +00:00
Weny Xu
9d36c31209 feat: introduce the CacheRegistry (#3896)
* feat: implement the `CacheRegistry`

* refactor: change `CacheInvalidator` signature

* feat: implement `CacheInvalidator`

* feat: add `get_or_register`

* fix: fmt toml

* feat: implement the `CacheRegistryBuilder`

* chore: apply suggestions from CR

* chore: fmt code
2024-05-10 11:53:42 +00:00
Weny Xu
c91132bd14 feat: introduce TableNameCache & TableInfoCache & TableRouteCache (#3895)
* feat: implement the `TableNameCache`

* feat: implement the `TableInfoCache`

* feat: implement the `TableRouteCache`

* test: add tests for `TableInfoCache` & `TableRouteCache`

* chore: use `TableId`
2024-05-10 09:50:44 +00:00
Ruihang Xia
25e9076f5b docs: correct v0.7 benchmark report (#3907)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-10 09:33:06 +00:00
Ruihang Xia
08945f128b fix: sort unstable HTTP result on test profile
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-10 17:01:42 +08:00
Yingwen
5a0629eaa0 feat: Parquet reader builder supports building multiple ranges to read (#3841)
* chore: change `&mut self` to `&self`

* feat: define partition and partition context

* refactor: move precise_filter to PartitionContext

* feat: filter  wip

* feat: compute projection and fields in format

* feat: use RowGroupReader to implement ParquetReader

* fix: use expected meta to get column id for filters

* feat: partition returns row group reader

* style: fix clippy

* feat: add build partitions method

* docs: comment

* refactor: rename Partition to FileRange

* chore: address CR comments

* feat: avoid allocating column ids while constructing ReadFormat
2024-05-10 07:39:38 +00:00
904 changed files with 56568 additions and 16291 deletions

View File

@@ -1,7 +1,7 @@
---
name: Bug report
description: Is something not working? Help us fix it!
labels: [ "bug" ]
labels: [ "C-bug" ]
body:
- type: markdown
attributes:

View File

@@ -4,5 +4,5 @@ contact_links:
url: https://greptime.com/slack
about: Get free help from the Greptime community
- name: Greptime Community Discussion
url: https://github.com/greptimeTeam/greptimedb/discussions
url: https://github.com/greptimeTeam/discussions
about: Get free help from the Greptime community

View File

@@ -1,7 +1,7 @@
---
name: Enhancement
description: Suggest an enhancement to existing functionality
labels: [ "enhancement" ]
labels: [ "C-enhancement" ]
body:
- type: dropdown
id: type

View File

@@ -1,7 +1,7 @@
---
name: Feature request
name: New Feature
description: Suggest a new feature for GreptimeDB
labels: [ "feature request" ]
labels: [ "C-feature" ]
body:
- type: markdown
id: info

View File

@@ -0,0 +1,18 @@
name: Build and push CI Docker image
description: Build and push CI Docker image to local registry
inputs:
binary_path:
default: "./bin"
description: "Binary path"
runs:
using: composite
steps:
- name: Build and push to local registry
uses: docker/build-push-action@v5
with:
context: .
file: ./docker/ci/ubuntu/Dockerfile.fuzztests
push: true
tags: localhost:5001/greptime/greptimedb:latest
build-args: |
BINARY_PATH=${{ inputs.binary_path }}

View File

@@ -24,6 +24,14 @@ inputs:
description: Build android artifacts
required: false
default: 'false'
image-namespace:
description: Image Namespace
required: false
default: 'greptime'
image-registry:
description: Image Registry
required: false
default: 'docker.io'
runs:
using: composite
steps:
@@ -35,7 +43,9 @@ runs:
make build-by-dev-builder \
CARGO_PROFILE=${{ inputs.cargo-profile }} \
FEATURES=${{ inputs.features }} \
BASE_IMAGE=${{ inputs.base-image }}
BASE_IMAGE=${{ inputs.base-image }} \
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }}
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts
@@ -53,7 +63,9 @@ runs:
shell: bash
if: ${{ inputs.build-android-artifacts == 'true' }}
run: |
cd ${{ inputs.working-dir }} && make strip-android-bin
cd ${{ inputs.working-dir }} && make strip-android-bin \
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }}
- name: Upload android artifacts
uses: ./.github/actions/upload-artifacts

View File

@@ -30,7 +30,9 @@ runs:
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
run: |
cd ${{ inputs.working-dir }} && \
make run-it-in-container BUILD_JOBS=4
make run-it-in-container BUILD_JOBS=4 \
IMAGE_NAMESPACE=i8k6a5e1/greptime \
IMAGE_REGISTRY=public.ecr.aws
- name: Upload sqlness logs
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
@@ -49,6 +51,8 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime
- name: Build greptime without pyo3
if: ${{ inputs.dev-mode == 'false' }}
@@ -60,6 +64,8 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
shell: bash
@@ -76,6 +82,8 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime
- name: Build greptime on android base image
uses: ./.github/actions/build-greptime-binary
@@ -86,3 +94,5 @@ runs:
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
build-android-artifacts: true
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime

View File

@@ -59,9 +59,15 @@ runs:
if: ${{ inputs.disable-run-tests == 'false' }}
uses: taiki-e/install-action@nextest
# Get proper backtraces in mac Sonoma. Currently there's an issue with the new
# linker that prevents backtraces from getting printed correctly.
#
# <https://github.com/rust-lang/rust/issues/113783>
- name: Run integration tests
if: ${{ inputs.disable-run-tests == 'false' }}
shell: bash
env:
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
run: |
make test sqlness-test
@@ -75,6 +81,8 @@ runs:
- name: Build greptime binary
shell: bash
env:
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
run: |
make build \
CARGO_PROFILE=${{ inputs.cargo-profile }} \

View File

@@ -59,6 +59,9 @@ runs:
if: ${{ inputs.disable-run-tests == 'false' }}
shell: pwsh
run: make test sqlness-test
env:
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
RUST_BACKTRACE: 1
- name: Upload sqlness logs
if: ${{ failure() }} # Only upload logs when the integration tests failed.

17
.github/actions/setup-chaos/action.yml vendored Normal file
View File

@@ -0,0 +1,17 @@
name: Setup Kind
description: Deploy Kind
runs:
using: composite
steps:
- uses: actions/checkout@v4
- name: Create kind cluster
shell: bash
run: |
helm repo add chaos-mesh https://charts.chaos-mesh.org
kubectl create ns chaos-mesh
helm install chaos-mesh chaos-mesh/chaos-mesh -n=chaos-mesh --version 2.6.3
- name: Print Chaos-mesh
if: always()
shell: bash
run: |
kubectl get po -n chaos-mesh

16
.github/actions/setup-cyborg/action.yml vendored Normal file
View File

@@ -0,0 +1,16 @@
name: Setup cyborg environment
description: Setup cyborg environment
runs:
using: composite
steps:
- uses: actions/setup-node@v4
with:
node-version: 22
- uses: pnpm/action-setup@v3
with:
package_json_file: 'cyborg/package.json'
run_install: true
- name: Describe the Environment
working-directory: cyborg
shell: bash
run: pnpm tsx -v

View File

@@ -0,0 +1,25 @@
name: Setup Etcd cluster
description: Deploy Etcd cluster on Kubernetes
inputs:
etcd-replicas:
default: 3
description: "Etcd replicas"
namespace:
default: "etcd-cluster"
runs:
using: composite
steps:
- name: Install Etcd cluster
shell: bash
run: |
helm upgrade \
--install etcd oci://registry-1.docker.io/bitnamicharts/etcd \
--set replicaCount=${{ inputs.etcd-replicas }} \
--set resources.requests.cpu=50m \
--set resources.requests.memory=128Mi \
--set auth.rbac.create=false \
--set auth.rbac.token.enabled=false \
--set persistence.size=2Gi \
--create-namespace \
-n ${{ inputs.namespace }}

View File

@@ -0,0 +1,91 @@
name: Setup GreptimeDB cluster
description: Deploy GreptimeDB cluster on Kubernetes
inputs:
frontend-replicas:
default: 2
description: "Number of Frontend replicas"
datanode-replicas:
default: 2
description: "Number of Datanode replicas"
meta-replicas:
default: 3
description: "Number of Metasrv replicas"
image-registry:
default: "docker.io"
description: "Image registry"
image-repository:
default: "greptime/greptimedb"
description: "Image repository"
image-tag:
default: "latest"
description: 'Image tag'
etcd-endpoints:
default: "etcd.etcd-cluster.svc.cluster.local:2379"
description: "Etcd endpoints"
values-filename:
default: "with-minio.yaml"
enable-region-failover:
default: false
runs:
using: composite
steps:
- name: Install GreptimeDB operator
shell: bash
run: |
helm repo add greptime https://greptimeteam.github.io/helm-charts/
helm repo update
helm upgrade \
--install \
--create-namespace \
greptimedb-operator greptime/greptimedb-operator \
-n greptimedb-admin \
--wait \
--wait-for-jobs
- name: Install GreptimeDB cluster
shell: bash
run: |
helm upgrade \
--install my-greptimedb \
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
--set image.registry=${{ inputs.image-registry }} \
--set image.repository=${{ inputs.image-repository }} \
--set image.tag=${{ inputs.image-tag }} \
--set base.podTemplate.main.resources.requests.cpu=50m \
--set base.podTemplate.main.resources.requests.memory=256Mi \
--set base.podTemplate.main.resources.limits.cpu=1000m \
--set base.podTemplate.main.resources.limits.memory=2Gi \
--set frontend.replicas=${{ inputs.frontend-replicas }} \
--set datanode.replicas=${{ inputs.datanode-replicas }} \
--set meta.replicas=${{ inputs.meta-replicas }} \
greptime/greptimedb-cluster \
--create-namespace \
-n my-greptimedb \
--values ./.github/actions/setup-greptimedb-cluster/${{ inputs.values-filename }} \
--wait \
--wait-for-jobs
- name: Wait for GreptimeDB
shell: bash
run: |
while true; do
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
if [ "$PHASE" == "Running" ]; then
echo "Cluster is ready"
break
else
echo "Cluster is not ready yet: Current phase: $PHASE"
kubectl get pods -n my-greptimedb
sleep 5 # wait for 5 seconds before check again.
fi
done
- name: Print GreptimeDB info
if: always()
shell: bash
run: |
kubectl get all --show-labels -n my-greptimedb
- name: Describe Nodes
if: always()
shell: bash
run: |
kubectl describe nodes

View File

@@ -0,0 +1,18 @@
meta:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
datanode:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
frontend:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8

View File

@@ -0,0 +1,38 @@
meta:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[datanode]
[datanode.client]
timeout = "60s"
datanode:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[storage]
cache_path = "/data/greptimedb/s3cache"
cache_capacity = "256MB"
frontend:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123

View File

@@ -0,0 +1,34 @@
meta:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[datanode]
[datanode.client]
timeout = "60s"
datanode:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
frontend:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123

View File

@@ -0,0 +1,45 @@
meta:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[wal]
provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
num_topics = 3
[datanode]
[datanode.client]
timeout = "60s"
datanode:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[wal]
provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
linger = "2ms"
frontend:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123

View File

@@ -0,0 +1,24 @@
name: Setup Kafka cluster
description: Deploy Kafka cluster on Kubernetes
inputs:
controller-replicas:
default: 3
description: "Kafka controller replicas"
namespace:
default: "kafka-cluster"
runs:
using: composite
steps:
- name: Install Kafka cluster
shell: bash
run: |
helm upgrade \
--install kafka oci://registry-1.docker.io/bitnamicharts/kafka \
--set controller.replicaCount=${{ inputs.controller-replicas }} \
--set controller.resources.requests.cpu=50m \
--set controller.resources.requests.memory=128Mi \
--set listeners.controller.protocol=PLAINTEXT \
--set listeners.client.protocol=PLAINTEXT \
--create-namespace \
-n ${{ inputs.namespace }}

10
.github/actions/setup-kind/action.yml vendored Normal file
View File

@@ -0,0 +1,10 @@
name: Setup Kind
description: Deploy Kind
runs:
using: composite
steps:
- uses: actions/checkout@v4
- name: Create kind cluster
shell: bash
run: |
./.github/scripts/kind-with-registry.sh

24
.github/actions/setup-minio/action.yml vendored Normal file
View File

@@ -0,0 +1,24 @@
name: Setup Minio cluster
description: Deploy Minio cluster on Kubernetes
inputs:
replicas:
default: 1
description: "replicas"
runs:
using: composite
steps:
- name: Install Etcd cluster
shell: bash
run: |
helm repo add minio https://charts.min.io/
helm upgrade --install minio \
--set resources.requests.memory=128Mi \
--set replicas=${{ inputs.replicas }} \
--set mode=standalone \
--set rootUser=rootuser,rootPassword=rootpass123 \
--set buckets[0].name=default \
--set service.port=80,service.targetPort=9000 \
minio/minio \
--create-namespace \
-n minio

View File

@@ -57,3 +57,14 @@ runs:
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
run: |
aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive
- name: Export kind logs
if: failure()
shell: bash
run: kind export logs -n greptimedb-operator-e2e /tmp/kind
- name: Upload logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: kind-logs
path: /tmp/kind
retention-days: 3

View File

@@ -1,4 +0,0 @@
Doc not needed:
- '- \[x\] This PR does not require documentation updates.'
Doc update required:
- '- \[ \] This PR does not require documentation updates.'

View File

@@ -15,6 +15,6 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
## Checklist
- [ ] I have written the necessary rustdoc comments.
- [ ] I have added the necessary unit tests and integration tests.
- [x] This PR does not require documentation updates.
- [ ] I have written the necessary rustdoc comments.
- [ ] I have added the necessary unit tests and integration tests.
- [ ] This PR requires documentation updates.

66
.github/scripts/kind-with-registry.sh vendored Executable file
View File

@@ -0,0 +1,66 @@
#!/usr/bin/env bash
set -e
set -o pipefail
# 1. Create registry container unless it already exists
reg_name='kind-registry'
reg_port='5001'
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
docker run \
-d --restart=always -p "127.0.0.1:${reg_port}:5000" --network bridge --name "${reg_name}" \
registry:2
fi
# 2. Create kind cluster with containerd registry config dir enabled
# TODO: kind will eventually enable this by default and this patch will
# be unnecessary.
#
# See:
# https://github.com/kubernetes-sigs/kind/issues/2875
# https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration
# See: https://github.com/containerd/containerd/blob/main/docs/hosts.md
cat <<EOF | kind create cluster --wait 2m --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
EOF
# 3. Add the registry config to the nodes
#
# This is necessary because localhost resolves to loopback addresses that are
# network-namespace local.
# In other words: localhost in the container is not localhost on the host.
#
# We want a consistent name that works from both ends, so we tell containerd to
# alias localhost:${reg_port} to the registry container when pulling images
REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}"
for node in $(kind get nodes); do
docker exec "${node}" mkdir -p "${REGISTRY_DIR}"
cat <<EOF | docker exec -i "${node}" cp /dev/stdin "${REGISTRY_DIR}/hosts.toml"
[host."http://${reg_name}:5000"]
EOF
done
# 4. Connect the registry to the cluster network if not already connected
# This allows kind to bootstrap the network but ensures they're on the same network
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then
docker network connect "kind" "${reg_name}"
fi
# 5. Document the local registry
# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: local-registry-hosting
namespace: kube-public
data:
localRegistryHosting.v1: |
host: "localhost:${reg_port}"
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
EOF

View File

@@ -13,7 +13,7 @@ on:
name: Build API docs
env:
RUST_TOOLCHAIN: nightly-2024-04-18
RUST_TOOLCHAIN: nightly-2024-04-20
jobs:
apidoc:

View File

@@ -82,6 +82,9 @@ env:
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
permissions:
issues: write
jobs:
allocate-runners:
name: Allocate runners
@@ -321,7 +324,7 @@ jobs:
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
notification:
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team
needs: [
release-images-to-dockerhub
@@ -330,16 +333,25 @@ jobs:
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:
- name: Notifiy dev build successful result
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
- name: Notify dev build successful result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
with:
payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
- name: Notifiy dev build failed result
- name: Notify dev build failed result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
with:
payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}

View File

@@ -30,7 +30,7 @@ concurrency:
cancel-in-progress: true
env:
RUST_TOOLCHAIN: nightly-2024-04-18
RUST_TOOLCHAIN: nightly-2024-04-20
jobs:
check-typos-and-docs:
@@ -57,7 +57,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ windows-latest, ubuntu-20.04 ]
os: [ windows-2022, ubuntu-20.04 ]
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
@@ -160,14 +160,16 @@ jobs:
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz
cargo +nightly install cargo-fuzz cargo-gc-bin
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
name: bins
path: .
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
run: |
tar -xvf ./bins.tar.gz
rm ./bins.tar.gz
- name: Run GreptimeDB
run: |
./bins/greptime standalone start&
@@ -182,7 +184,7 @@ jobs:
unstable-fuzztest:
name: Unstable Fuzz Test
needs: build
needs: build-greptime-ci
runs-on: ubuntu-latest
strategy:
matrix:
@@ -204,20 +206,22 @@ jobs:
shell: bash
run: |
sudo apt update && sudo apt install -y libfuzzer-14-dev
cargo install cargo-fuzz
- name: Download pre-built binaries
cargo install cargo-fuzz cargo-gc-bin
- name: Download pre-built binariy
uses: actions/download-artifact@v4
with:
name: bins
name: bin
path: .
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- name: Fuzz Test
- name: Unzip bianry
run: |
tar -xvf ./bin.tar.gz
rm ./bin.tar.gz
- name: Run Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
GT_FUZZ_BINARY_PATH: ./bins/greptime
GT_FUZZ_BINARY_PATH: ./bin/greptime
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
with:
target: ${{ matrix.target }}
@@ -231,10 +235,8 @@ jobs:
path: /tmp/unstable-greptime/
retention-days: 3
sqlness:
name: Sqlness Test
needs: build
build-greptime-ci:
name: Build GreptimeDB binary (profile-CI)
runs-on: ${{ matrix.os }}
strategy:
matrix:
@@ -242,30 +244,311 @@ jobs:
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- name: Download pre-built binaries
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "build-greptime-ci"
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin
- name: Build greptime bianry
shell: bash
# `cargo gc` will invoke `cargo build` with specified args
run: cargo gc --profile ci -- --bin greptime
- name: Pack greptime binary
shell: bash
run: |
mkdir bin && \
mv ./target/ci/greptime bin
- name: Print greptime binaries info
run: ls -lh bin
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts
with:
artifacts-dir: bin
version: current
distributed-fuzztest:
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
runs-on: ubuntu-latest
needs: build-greptime-ci
strategy:
matrix:
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
mode:
- name: "Disk"
minio: false
kafka: false
values: "with-disk.yaml"
- name: "Minio"
minio: true
kafka: false
values: "with-minio.yaml"
- name: "Minio with Cache"
minio: true
kafka: false
values: "with-minio-and-cache.yaml"
- name: "Remote WAL"
minio: true
kafka: true
values: "with-remote-wal.yaml"
steps:
- uses: actions/checkout@v4
- name: Setup Kind
uses: ./.github/actions/setup-kind
- if: matrix.mode.minio
name: Setup Minio
uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka
name: Setup Kafka cluser
uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster
# Prepares for fuzz tests
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin
# Downloads ci image
- name: Download pre-built binariy
uses: actions/download-artifact@v4
with:
name: bins
name: bin
path: .
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins --preserve-state
- name: Upload sqlness logs
if: always()
- name: Unzip binary
run: |
tar -xvf ./bin.tar.gz
rm ./bin.tar.gz
- name: Build and push GreptimeDB image
uses: ./.github/actions/build-and-push-ci-image
- name: Wait for etcd
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=etcd \
--timeout=120s \
-n etcd-cluster
- if: matrix.mode.minio
name: Wait for minio
run: |
kubectl wait \
--for=condition=Ready \
pod -l app=minio \
--timeout=120s \
-n minio
- if: matrix.mode.kafka
name: Wait for kafka
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=kafka \
--timeout=120s \
-n kafka-cluster
- name: Print etcd info
shell: bash
run: kubectl get all --show-labels -n etcd-cluster
# Setup cluster for test
- name: Setup GreptimeDB cluster
uses: ./.github/actions/setup-greptimedb-cluster
with:
image-registry: localhost:5001
values-filename: ${{ matrix.mode.values }}
- name: Port forward (mysql)
run: |
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
- name: Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
with:
target: ${{ matrix.target }}
max-total-time: 120
- name: Describe Nodes
if: failure()
shell: bash
run: |
kubectl describe nodes
- name: Export kind logs
if: failure()
shell: bash
run: |
kind export logs /tmp/kind
- name: Upload logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: sqlness-logs
path: /tmp/sqlness-*
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
path: /tmp/kind
retention-days: 3
- name: Delete cluster
if: success()
shell: bash
run: |
kind delete cluster
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
docker system prune -f
sqlness-kafka-wal:
name: Sqlness Test with Kafka Wal
distributed-fuzztest-with-chaos:
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
runs-on: ubuntu-latest
needs: build-greptime-ci
strategy:
matrix:
target: ["fuzz_failover_mito_regions"]
mode:
- name: "Remote WAL"
minio: true
kafka: true
values: "with-remote-wal.yaml"
steps:
- uses: actions/checkout@v4
- name: Setup Kind
uses: ./.github/actions/setup-kind
- name: Setup Chaos Mesh
uses: ./.github/actions/setup-chaos
- if: matrix.mode.minio
name: Setup Minio
uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka
name: Setup Kafka cluser
uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster
# Prepares for fuzz tests
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin
# Downloads ci image
- name: Download pre-built binariy
uses: actions/download-artifact@v4
with:
name: bin
path: .
- name: Unzip binary
run: |
tar -xvf ./bin.tar.gz
rm ./bin.tar.gz
- name: Build and push GreptimeDB image
uses: ./.github/actions/build-and-push-ci-image
- name: Wait for etcd
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=etcd \
--timeout=120s \
-n etcd-cluster
- if: matrix.mode.minio
name: Wait for minio
run: |
kubectl wait \
--for=condition=Ready \
pod -l app=minio \
--timeout=120s \
-n minio
- if: matrix.mode.kafka
name: Wait for kafka
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=kafka \
--timeout=120s \
-n kafka-cluster
- name: Print etcd info
shell: bash
run: kubectl get all --show-labels -n etcd-cluster
# Setup cluster for test
- name: Setup GreptimeDB cluster
uses: ./.github/actions/setup-greptimedb-cluster
with:
image-registry: localhost:5001
values-filename: ${{ matrix.mode.values }}
enable-region-failover: true
- name: Port forward (mysql)
run: |
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
- name: Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
with:
target: ${{ matrix.target }}
max-total-time: 120
- name: Describe Nodes
if: failure()
shell: bash
run: |
kubectl describe nodes
- name: Export kind logs
if: failure()
shell: bash
run: |
kind export logs /tmp/kind
- name: Upload logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
path: /tmp/kind
retention-days: 3
- name: Delete cluster
if: success()
shell: bash
run: |
kind delete cluster
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
docker system prune -f
sqlness:
name: Sqlness Test (${{ matrix.mode.name }})
needs: build
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
mode:
- name: "Basic"
opts: ""
kafka: false
- name: "Remote WAL"
opts: "-w kafka -k 127.0.0.1:9092"
kafka: true
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
@@ -276,17 +559,18 @@ jobs:
path: .
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- name: Setup kafka server
- if: matrix.mode.kafka
name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins --preserve-state
run: RUST_BACKTRACE=1 ./bins/sqlness-runner ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
- name: Upload sqlness logs
if: always()
if: failure()
uses: actions/upload-artifact@v4
with:
name: sqlness-logs-with-kafka-wal
path: /tmp/sqlness-*
name: sqlness-logs-${{ matrix.mode.name }}
path: /tmp/sqlness*
retention-days: 3
fmt:
@@ -374,6 +658,9 @@ jobs:
- name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup minio
working-directory: tests-integration/fixtures/minio
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run nextest cases
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
env:
@@ -384,6 +671,11 @@ jobs:
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
GT_MINIO_BUCKET: greptime
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
GT_MINIO_ACCESS_KEY: superpower_password
GT_MINIO_REGION: us-west-2
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
UNITTEST_LOG_DIR: "__unittest_logs"

View File

@@ -1,39 +0,0 @@
name: Create Issue in downstream repos
on:
issues:
types:
- labeled
pull_request_target:
types:
- labeled
jobs:
doc_issue:
if: github.event.label.name == 'doc update required'
runs-on: ubuntu-20.04
steps:
- name: create an issue in doc repo
uses: dacbd/create-issue-action@v1.2.1
with:
owner: GreptimeTeam
repo: docs
token: ${{ secrets.DOCS_REPO_TOKEN }}
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
body: |
A document change request is generated from
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
cloud_issue:
if: github.event.label.name == 'cloud followup required'
runs-on: ubuntu-20.04
steps:
- name: create an issue in cloud repo
uses: dacbd/create-issue-action@v1.2.1
with:
owner: GreptimeTeam
repo: greptimedb-cloud
token: ${{ secrets.DOCS_REPO_TOKEN }}
title: Followup changes in ${{ github.event.issue.title || github.event.pull_request.title }}
body: |
A followup request is generated from
${{ github.event.issue.html_url || github.event.pull_request.html_url }}

View File

@@ -1,36 +0,0 @@
name: "PR Doc Labeler"
on:
pull_request_target:
types: [opened, edited, synchronize, ready_for_review, auto_merge_enabled, labeled, unlabeled]
permissions:
pull-requests: write
contents: read
jobs:
triage:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-latest
steps:
- uses: github/issue-labeler@v3.4
with:
configuration-path: .github/doc-label-config.yml
enable-versioned-regex: false
repo-token: ${{ secrets.GITHUB_TOKEN }}
sync-labels: 1
- name: create an issue in doc repo
uses: dacbd/create-issue-action@v1.2.1
if: ${{ github.event.action == 'opened' && contains(github.event.pull_request.body, '- [ ] This PR does not require documentation updates.') }}
with:
owner: GreptimeTeam
repo: docs
token: ${{ secrets.DOCS_REPO_TOKEN }}
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
body: |
A document change request is generated from
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
- name: Check doc labels
uses: docker://agilepathway/pull-request-label-checker:latest
with:
one_of: Doc update required,Doc not needed
repo_token: ${{ secrets.GITHUB_TOKEN }}

22
.github/workflows/docbot.yml vendored Normal file
View File

@@ -0,0 +1,22 @@
name: Follow Up Docs
on:
pull_request_target:
types: [opened, edited]
permissions:
pull-requests: write
contents: read
jobs:
docbot:
runs-on: ubuntu-20.04
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Maybe Follow Up Docs Issue
working-directory: cyborg
run: pnpm tsx bin/follow-up-docs-issue.ts
env:
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -67,19 +67,13 @@ jobs:
- run: 'echo "No action required"'
sqlness:
name: Sqlness Test
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
steps:
- run: 'echo "No action required"'
sqlness-kafka-wal:
name: Sqlness Test with Kafka Wal
name: Sqlness Test (${{ matrix.mode.name }})
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
mode:
- name: "Basic"
- name: "Remote WAL"
steps:
- run: 'echo "No action required"'

View File

@@ -66,6 +66,13 @@ env:
NIGHTLY_RELEASE_PREFIX: nightly
# Use the different image name to avoid conflict with the release images.
# The DockerHub image will be greptime/greptimedb-nightly.
IMAGE_NAME: greptimedb-nightly
permissions:
issues: write
jobs:
allocate-runners:
name: Allocate runners
@@ -188,6 +195,7 @@ jobs:
with:
image-registry: docker.io
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-name: ${{ env.IMAGE_NAME }}
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }}
@@ -220,7 +228,7 @@ jobs:
with:
src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: greptimedb
src-image-name: ${{ env.IMAGE_NAME }}
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
@@ -285,7 +293,7 @@ jobs:
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
notification:
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team
needs: [
release-images-to-dockerhub
@@ -294,16 +302,25 @@ jobs:
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:
- name: Notifiy nightly build successful result
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
- name: Notify nightly build successful result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
with:
payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
- name: Notifiy nightly build failed result
- name: Notify nightly build failed result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
with:
payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}

View File

@@ -1,6 +1,6 @@
on:
schedule:
- cron: '0 23 * * 1-5'
- cron: "0 23 * * 1-5"
workflow_dispatch:
name: Nightly CI
@@ -10,7 +10,10 @@ concurrency:
cancel-in-progress: true
env:
RUST_TOOLCHAIN: nightly-2024-04-18
RUST_TOOLCHAIN: nightly-2024-04-20
permissions:
issues: write
jobs:
sqlness-test:
@@ -22,7 +25,6 @@ jobs:
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Run sqlness test
uses: ./.github/actions/sqlness-test
with:
@@ -35,10 +37,11 @@ jobs:
sqlness-windows:
name: Sqlness tests on Windows
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: windows-latest-8-cores
runs-on: windows-2022-8-cores
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
@@ -49,14 +52,6 @@ jobs:
uses: Swatinem/rust-cache@v2
- name: Run sqlness
run: cargo sqlness
- name: Notify slack if failed
if: failure()
uses: slackapi/slack-github-action@v1.23.0
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
with:
payload: |
{"text": "Nightly CI failed for sqlness tests"}
- name: Upload sqlness logs
if: always()
uses: actions/upload-artifact@v4
@@ -68,14 +63,18 @@ jobs:
test-on-windows:
name: Run tests on Windows
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: windows-latest-8-cores
runs-on: windows-2022-8-cores
timeout-minutes: 60
steps:
- run: git config --global core.autocrlf false
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: KyleMayes/install-llvm-action@v1
with:
version: "14.0"
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@master
with:
@@ -88,7 +87,7 @@ jobs:
- name: Install Python
uses: actions/setup-python@v5
with:
python-version: '3.10'
python-version: "3.10"
- name: Install PyArrow Package
run: pip install pyarrow
- name: Install WSL distribution
@@ -98,18 +97,62 @@ jobs:
- name: Running tests
run: cargo nextest run -F pyo3_backend,dashboard
env:
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
UNITTEST_LOG_DIR: "__unittest_logs"
- name: Notify slack if failed
if: failure()
uses: slackapi/slack-github-action@v1.23.0
check-status:
name: Check status
needs: [
sqlness-test,
sqlness-windows,
test-on-windows,
]
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-20.04
outputs:
check-result: ${{ steps.set-check-result.outputs.check-result }}
steps:
- name: Set check result
id: set-check-result
run: |
echo "check-result=success" >> $GITHUB_OUTPUT
notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team
needs: [
check-status
]
runs-on: ubuntu-20.04
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.check-status.outputs.check-result == 'success' }}
- name: Notify dev build successful result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.check-status.outputs.check-result == 'success' }}
with:
payload: |
{"text": "Nightly CI failed for cargo test"}
{"text": "Nightly CI has completed successfully."}
- name: Notify dev build failed result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.check-status.outputs.check-result != 'success' }}
with:
payload: |
{"text": "Nightly CI failed has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}

View File

@@ -82,7 +82,7 @@ on:
# Use env variables to control all the release process.
env:
# The arguments of building greptime.
RUST_TOOLCHAIN: nightly-2024-04-18
RUST_TOOLCHAIN: nightly-2024-04-20
CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness.
@@ -91,7 +91,12 @@ env:
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
NIGHTLY_RELEASE_PREFIX: nightly
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
NEXT_RELEASE_VERSION: v0.8.0
NEXT_RELEASE_VERSION: v0.9.0
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:
issues: write # Allows the action to create issues for cyborg.
contents: write # Allows the action to create a release.
jobs:
allocate-runners:
@@ -102,7 +107,7 @@ jobs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
windows-runner: windows-latest-8-cores
windows-runner: windows-2022-8-cores
# The following EC2 resource id will be used for resource releasing.
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
@@ -245,7 +250,7 @@ jobs:
- name: Set build macos result
id: set-build-macos-result
run: |
echo "build-macos-result=success" >> $GITHUB_OUTPUT
echo "build-macos-result=success" >> $GITHUB_OUTPUT
build-windows-artifacts:
name: Build Windows artifacts
@@ -318,7 +323,7 @@ jobs:
- name: Set build image result
id: set-build-image-result
run: |
echo "build-image-result=success" >> $GITHUB_OUTPUT
echo "build-image-result=success" >> $GITHUB_OUTPUT
release-cn-artifacts:
name: Release artifacts to CN region
@@ -436,7 +441,7 @@ jobs:
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
notification:
if: ${{ always() || github.repository == 'GreptimeTeam/greptimedb' }}
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
name: Send notification to Greptime team
needs: [
release-images-to-dockerhub,
@@ -447,16 +452,25 @@ jobs:
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:
- name: Notifiy release successful result
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
- name: Notify release successful result
uses: slackapi/slack-github-action@v1.25.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
with:
payload: |
{"text": "GreptimeDB's release version has completed successfully."}
- name: Notifiy release failed result
- name: Notify release failed result
uses: slackapi/slack-github-action@v1.25.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result != 'success' || needs.build-windows-artifacts.outputs.build-windows-result != 'success' || needs.build-macos-artifacts.outputs.build-macos-result != 'success' }}
with:
payload: |
{"text": "GreptimeDB's release version has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/release.yml'."}
{"text": "GreptimeDB's release version has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}

View File

@@ -16,16 +16,7 @@ jobs:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 22
- uses: pnpm/action-setup@v3
with:
package_json_file: 'cyborg/package.json'
run_install: true
- name: Describe the Environment
working-directory: cyborg
run: pnpm tsx -v
- uses: ./.github/actions/setup-cyborg
- name: Do Maintenance
working-directory: cyborg
run: pnpm tsx bin/schedule.ts

View File

@@ -13,16 +13,7 @@ jobs:
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 22
- uses: pnpm/action-setup@v3
with:
package_json_file: 'cyborg/package.json'
run_install: true
- name: Describe the Environment
working-directory: cyborg
run: pnpm tsx -v
- uses: ./.github/actions/setup-cyborg
- name: Check Pull Request
working-directory: cyborg
run: pnpm tsx bin/check-pull-request.ts

View File

@@ -2,7 +2,14 @@
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
Please read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
You can find our contributors at https://github.com/GreptimeTeam/greptimedb/graphs/contributors. When you dedicate to GreptimeDB for a few months and keep bringing high-quality contributions (code, docs, advocate, etc.), you will be a candidate of a committer.
A committer will be granted both read & write access to GreptimeDB repos. Here is a list of current committers except GreptimeDB team members:
* [Eugene Tolbakov](https://github.com/etolbakov): PromQL support, SQL engine, InfluxDB APIs, and more.
* [@NiwakaDev](https://github.com/NiwakaDev): SQL engine and storage layer.
Please read the guidelines, and they can help you get started. Communicate respectfully with the developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).

3287
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +1,9 @@
[workspace]
members = [
"benchmarks",
"src/api",
"src/auth",
"src/catalog",
"src/cache",
"src/client",
"src/cmd",
"src/common/base",
@@ -45,6 +45,7 @@ members = [
"src/object-store",
"src/operator",
"src/partition",
"src/pipeline",
"src/plugins",
"src/promql",
"src/puffin",
@@ -63,13 +64,14 @@ members = [
resolver = "2"
[workspace.package]
version = "0.7.2"
version = "0.8.2"
edition = "2021"
license = "Apache-2.0"
[workspace.lints]
clippy.print_stdout = "warn"
clippy.print_stderr = "warn"
clippy.dbg_macro = "warn"
clippy.implicit_clone = "warn"
clippy.readonly_write_lock = "allow"
rust.unknown_lints = "deny"
@@ -99,16 +101,18 @@ bytemuck = "1.12"
bytes = { version = "1.5", features = ["serde"] }
chrono = { version = "0.4", features = ["serde"] }
clap = { version = "4.4", features = ["derive"] }
config = "0.13.0"
crossbeam-utils = "0.8"
dashmap = "5.4"
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
derive_builder = "0.12"
dotenv = "0.15"
# TODO(LFC): Wait for https://github.com/etcdv3/etcd-client/pull/76
@@ -116,7 +120,7 @@ etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "219b2409bb701f75b43fc0ba64967d2ed8e75491" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a70a6af9c69e40f9a918936a48717343402b4393" }
humantime = "2.1"
humantime-serde = "1.1"
itertools = "0.10"
@@ -136,28 +140,31 @@ parquet = { version = "51.0.0", default-features = false, features = ["arrow", "
paste = "1.0"
pin-project = "1.0"
prometheus = { version = "0.13.3", features = ["process"] }
promql-parser = { version = "0.4" }
prost = "0.12"
raft-engine = { version = "0.4.1", default-features = false }
rand = "0.8"
regex = "1.8"
regex-automata = { version = "0.4" }
reqwest = { version = "0.11", default-features = false, features = [
reqwest = { version = "0.12", default-features = false, features = [
"json",
"rustls-tls-native-roots",
"stream",
"multipart",
] }
rskafka = "0.5"
rstest = "0.21"
rstest_reuse = "0.7"
rust_decimal = "1.33"
schemars = "0.8"
serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0", features = ["float_roundtrip"] }
serde_with = "3"
smallvec = { version = "1", features = ["serde"] }
snafu = "0.7"
snafu = "0.8"
sysinfo = "0.30"
# on branch v0.44.x
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "c919990bf62ad38d2b0c0a3bc90b26ad919d51b0", features = [
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
"visitor",
] }
strum = { version = "0.25", features = ["derive"] }
@@ -166,13 +173,15 @@ tokio = { version = "1.36", features = ["full"] }
tokio-stream = { version = "0.1" }
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8"
tonic = { version = "0.11", features = ["tls"] }
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
tower = { version = "0.4" }
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
zstd = "0.13"
## workspaces members
api = { path = "src/api" }
auth = { path = "src/auth" }
cache = { path = "src/cache" }
catalog = { path = "src/catalog" }
client = { path = "src/client" }
cmd = { path = "src/cmd" }
@@ -204,6 +213,7 @@ common-wal = { path = "src/common/wal" }
datanode = { path = "src/datanode" }
datatypes = { path = "src/datatypes" }
file-engine = { path = "src/file-engine" }
flow = { path = "src/flow" }
frontend = { path = "src/frontend" }
index = { path = "src/index" }
log-store = { path = "src/log-store" }
@@ -214,6 +224,7 @@ mito2 = { path = "src/mito2" }
object-store = { path = "src/object-store" }
operator = { path = "src/operator" }
partition = { path = "src/partition" }
pipeline = { path = "src/pipeline" }
plugins = { path = "src/plugins" }
promql = { path = "src/promql" }
puffin = { path = "src/puffin" }
@@ -225,8 +236,6 @@ sql = { path = "src/sql" }
store-api = { path = "src/store-api" }
substrait = { path = "src/common/substrait" }
table = { path = "src/table" }
# TODO some code depends on this
tests-integration = { path = "tests-integration" }
[workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git"
@@ -242,6 +251,14 @@ lto = "thin"
debug = false
incremental = false
[profile.ci]
inherits = "dev"
strip = true
[profile.dev.package.sqlness-runner]
debug = false
strip = true
[profile.dev.package.tests-fuzz]
debug = false
strip = true

View File

@@ -163,6 +163,17 @@ nextest: ## Install nextest tools.
sqlness-test: ## Run sqlness test.
cargo sqlness
# Run fuzz test ${FUZZ_TARGET}.
RUNS ?= 1
FUZZ_TARGET ?= fuzz_alter_table
.PHONY: fuzz
fuzz:
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
.PHONY: fuzz-ls
fuzz-ls:
cargo fuzz list --fuzz-dir tests-fuzz
.PHONY: check
check: ## Cargo check all the targets.
cargo check --workspace --all-targets --all-features
@@ -194,12 +205,16 @@ run-it-in-container: start-etcd ## Run integration tests in dev-builder.
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
.PHONY: run-cluster-with-etcd
run-cluster-with-etcd: ## Run greptime cluster with etcd in docker-compose.
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml up
##@ Docs
config-docs: ## Generate configuration documentation from toml files.
docker run --rm \
-v ${PWD}:/greptimedb \
-w /greptimedb/config \
toml2docs/toml2docs:latest \
toml2docs/toml2docs:v0.1.1 \
-p '##' \
-t ./config-docs-template.md \
-o ./config.md

View File

@@ -1,40 +0,0 @@
[package]
name = "benchmarks"
version.workspace = true
edition.workspace = true
license.workspace = true
[lints]
workspace = true
[dependencies]
api.workspace = true
arrow.workspace = true
chrono.workspace = true
clap.workspace = true
client.workspace = true
common-base.workspace = true
common-telemetry.workspace = true
common-wal.workspace = true
dotenv.workspace = true
futures.workspace = true
futures-util.workspace = true
humantime.workspace = true
humantime-serde.workspace = true
indicatif = "0.17.1"
itertools.workspace = true
lazy_static.workspace = true
log-store.workspace = true
mito2.workspace = true
num_cpus.workspace = true
parquet.workspace = true
prometheus.workspace = true
rand.workspace = true
rskafka.workspace = true
serde.workspace = true
store-api.workspace = true
# TODO depend `Database` client
tests-integration.workspace = true
tokio.workspace = true
toml.workspace = true
uuid.workspace = true

View File

@@ -1,11 +0,0 @@
Benchmarkers for GreptimeDB
--------------------------------
## Wal Benchmarker
The wal benchmarker serves to evaluate the performance of GreptimeDB's Write-Ahead Log (WAL) component. It meticulously assesses the read/write performance of the WAL under diverse workloads generated by the benchmarker.
### How to use
To compile the benchmarker, navigate to the `greptimedb/benchmarks` directory and execute `cargo build --release`. Subsequently, you'll find the compiled target located at `greptimedb/target/release/wal_bench`.
The `./wal_bench -h` command reveals numerous arguments that the target accepts. Among these, a notable one is the `cfg-file` argument. By utilizing a configuration file in the TOML format, you can bypass the need to repeatedly specify cumbersome arguments.

View File

@@ -1,21 +0,0 @@
# Refers to the documents of `Args` in benchmarks/src/wal.rs`.
wal_provider = "kafka"
bootstrap_brokers = ["localhost:9092"]
num_workers = 10
num_topics = 32
num_regions = 1000
num_scrapes = 1000
num_rows = 5
col_types = "ifs"
max_batch_size = "512KB"
linger = "1ms"
backoff_init = "10ms"
backoff_max = "1ms"
backoff_base = 2
backoff_deadline = "3s"
compression = "zstd"
rng_seed = 42
skip_read = false
skip_write = false
random_topics = true
report_metrics = false

View File

@@ -1,326 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(int_roundings)]
use std::fs;
use std::sync::Arc;
use std::time::Instant;
use api::v1::{ColumnDataType, ColumnSchema, SemanticType};
use benchmarks::metrics;
use benchmarks::wal_bench::{Args, Config, Region, WalProvider};
use clap::Parser;
use common_telemetry::info;
use common_wal::config::kafka::common::BackoffConfig;
use common_wal::config::kafka::DatanodeKafkaConfig as KafkaConfig;
use common_wal::config::raft_engine::RaftEngineConfig;
use common_wal::options::{KafkaWalOptions, WalOptions};
use itertools::Itertools;
use log_store::kafka::log_store::KafkaLogStore;
use log_store::raft_engine::log_store::RaftEngineLogStore;
use mito2::wal::Wal;
use prometheus::{Encoder, TextEncoder};
use rand::distributions::{Alphanumeric, DistString};
use rand::rngs::SmallRng;
use rand::SeedableRng;
use rskafka::client::partition::Compression;
use rskafka::client::ClientBuilder;
use store_api::logstore::LogStore;
use store_api::storage::RegionId;
async fn run_benchmarker<S: LogStore>(cfg: &Config, topics: &[String], wal: Arc<Wal<S>>) {
let chunk_size = cfg.num_regions.div_ceil(cfg.num_workers);
let region_chunks = (0..cfg.num_regions)
.map(|id| {
build_region(
id as u64,
topics,
&mut SmallRng::seed_from_u64(cfg.rng_seed),
cfg,
)
})
.chunks(chunk_size as usize)
.into_iter()
.map(|chunk| Arc::new(chunk.collect::<Vec<_>>()))
.collect::<Vec<_>>();
let mut write_elapsed = 0;
let mut read_elapsed = 0;
if !cfg.skip_write {
info!("Benchmarking write ...");
let num_scrapes = cfg.num_scrapes;
let timer = Instant::now();
futures::future::join_all((0..cfg.num_workers).map(|i| {
let wal = wal.clone();
let regions = region_chunks[i as usize].clone();
tokio::spawn(async move {
for _ in 0..num_scrapes {
let mut wal_writer = wal.writer();
regions
.iter()
.for_each(|region| region.add_wal_entry(&mut wal_writer));
wal_writer.write_to_wal().await.unwrap();
}
})
}))
.await;
write_elapsed += timer.elapsed().as_millis();
}
if !cfg.skip_read {
info!("Benchmarking read ...");
let timer = Instant::now();
futures::future::join_all((0..cfg.num_workers).map(|i| {
let wal = wal.clone();
let regions = region_chunks[i as usize].clone();
tokio::spawn(async move {
for region in regions.iter() {
region.replay(&wal).await;
}
})
}))
.await;
read_elapsed = timer.elapsed().as_millis();
}
dump_report(cfg, write_elapsed, read_elapsed);
}
fn build_region(id: u64, topics: &[String], rng: &mut SmallRng, cfg: &Config) -> Region {
let wal_options = match cfg.wal_provider {
WalProvider::Kafka => {
assert!(!topics.is_empty());
WalOptions::Kafka(KafkaWalOptions {
topic: topics.get(id as usize % topics.len()).cloned().unwrap(),
})
}
WalProvider::RaftEngine => WalOptions::RaftEngine,
};
Region::new(
RegionId::from_u64(id),
build_schema(&parse_col_types(&cfg.col_types), rng),
wal_options,
cfg.num_rows,
cfg.rng_seed,
)
}
fn build_schema(col_types: &[ColumnDataType], mut rng: &mut SmallRng) -> Vec<ColumnSchema> {
col_types
.iter()
.map(|col_type| ColumnSchema {
column_name: Alphanumeric.sample_string(&mut rng, 5),
datatype: *col_type as i32,
semantic_type: SemanticType::Field as i32,
datatype_extension: None,
})
.chain(vec![ColumnSchema {
column_name: "ts".to_string(),
datatype: ColumnDataType::TimestampMillisecond as i32,
semantic_type: SemanticType::Tag as i32,
datatype_extension: None,
}])
.collect()
}
fn dump_report(cfg: &Config, write_elapsed: u128, read_elapsed: u128) {
let cost_report = format!(
"write costs: {} ms, read costs: {} ms",
write_elapsed, read_elapsed,
);
let total_written_bytes = metrics::METRIC_WAL_WRITE_BYTES_TOTAL.get() as u128;
let write_throughput = if write_elapsed > 0 {
(total_written_bytes * 1000).div_floor(write_elapsed)
} else {
0
};
let total_read_bytes = metrics::METRIC_WAL_READ_BYTES_TOTAL.get() as u128;
let read_throughput = if read_elapsed > 0 {
(total_read_bytes * 1000).div_floor(read_elapsed)
} else {
0
};
let throughput_report = format!(
"total written bytes: {} bytes, total read bytes: {} bytes, write throuput: {} bytes/s ({} mb/s), read throughput: {} bytes/s ({} mb/s)",
total_written_bytes,
total_read_bytes,
write_throughput,
write_throughput.div_floor(1 << 20),
read_throughput,
read_throughput.div_floor(1 << 20),
);
let metrics_report = if cfg.report_metrics {
let mut buffer = Vec::new();
let encoder = TextEncoder::new();
let metrics = prometheus::gather();
encoder.encode(&metrics, &mut buffer).unwrap();
String::from_utf8(buffer).unwrap()
} else {
String::new()
};
info!(
r#"
Benchmark config:
{cfg:?}
Benchmark report:
{cost_report}
{throughput_report}
{metrics_report}"#
);
}
async fn create_topics(cfg: &Config) -> Vec<String> {
// Creates topics.
let client = ClientBuilder::new(cfg.bootstrap_brokers.clone())
.build()
.await
.unwrap();
let ctrl_client = client.controller_client().unwrap();
let (topics, tasks): (Vec<_>, Vec<_>) = (0..cfg.num_topics)
.map(|i| {
let topic = if cfg.random_topics {
format!(
"greptime_wal_bench_topic_{}_{}",
uuid::Uuid::new_v4().as_u128(),
i
)
} else {
format!("greptime_wal_bench_topic_{}", i)
};
let task = ctrl_client.create_topic(
topic.clone(),
1,
cfg.bootstrap_brokers.len() as i16,
2000,
);
(topic, task)
})
.unzip();
// Must ignore errors since we allow topics being created more than once.
let _ = futures::future::try_join_all(tasks).await;
topics
}
fn parse_compression(comp: &str) -> Compression {
match comp {
"no" => Compression::NoCompression,
"gzip" => Compression::Gzip,
"lz4" => Compression::Lz4,
"snappy" => Compression::Snappy,
"zstd" => Compression::Zstd,
other => unreachable!("Unrecognized compression {other}"),
}
}
fn parse_col_types(col_types: &str) -> Vec<ColumnDataType> {
let parts = col_types.split('x').collect::<Vec<_>>();
assert!(parts.len() <= 2);
let pattern = parts[0];
let repeat = parts
.get(1)
.map(|r| r.parse::<usize>().unwrap())
.unwrap_or(1);
pattern
.chars()
.map(|c| match c {
'i' | 'I' => ColumnDataType::Int64,
'f' | 'F' => ColumnDataType::Float64,
's' | 'S' => ColumnDataType::String,
other => unreachable!("Cannot parse {other} as a column data type"),
})
.cycle()
.take(pattern.len() * repeat)
.collect()
}
fn main() {
// Sets the global logging to INFO and suppress loggings from rskafka other than ERROR and upper ones.
std::env::set_var("UNITTEST_LOG_LEVEL", "info,rskafka=error");
common_telemetry::init_default_ut_logging();
let args = Args::parse();
let cfg = if !args.cfg_file.is_empty() {
toml::from_str(&fs::read_to_string(&args.cfg_file).unwrap()).unwrap()
} else {
Config::from(args)
};
// Validates arguments.
if cfg.num_regions < cfg.num_workers {
panic!("num_regions must be greater than or equal to num_workers");
}
if cfg
.num_workers
.min(cfg.num_topics)
.min(cfg.num_regions)
.min(cfg.num_scrapes)
.min(cfg.max_batch_size.as_bytes() as u32)
.min(cfg.bootstrap_brokers.len() as u32)
== 0
{
panic!("Invalid arguments");
}
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap()
.block_on(async {
match cfg.wal_provider {
WalProvider::Kafka => {
let topics = create_topics(&cfg).await;
let kafka_cfg = KafkaConfig {
broker_endpoints: cfg.bootstrap_brokers.clone(),
max_batch_size: cfg.max_batch_size,
linger: cfg.linger,
backoff: BackoffConfig {
init: cfg.backoff_init,
max: cfg.backoff_max,
base: cfg.backoff_base,
deadline: Some(cfg.backoff_deadline),
},
compression: parse_compression(&cfg.compression),
..Default::default()
};
let store = Arc::new(KafkaLogStore::try_new(&kafka_cfg).await.unwrap());
let wal = Arc::new(Wal::new(store));
run_benchmarker(&cfg, &topics, wal).await;
}
WalProvider::RaftEngine => {
// The benchmarker assumes the raft engine directory exists.
let store = RaftEngineLogStore::try_new(
"/tmp/greptimedb/raft-engine-wal".to_string(),
RaftEngineConfig::default(),
)
.await
.map(Arc::new)
.unwrap();
let wal = Arc::new(Wal::new(store));
run_benchmarker(&cfg, &[], wal).await;
}
}
});
}

View File

@@ -1,39 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use lazy_static::lazy_static;
use prometheus::*;
/// Logstore label.
pub const LOGSTORE_LABEL: &str = "logstore";
/// Operation type label.
pub const OPTYPE_LABEL: &str = "optype";
lazy_static! {
/// Counters of bytes of each operation on a logstore.
pub static ref METRIC_WAL_OP_BYTES_TOTAL: IntCounterVec = register_int_counter_vec!(
"greptime_bench_wal_op_bytes_total",
"wal operation bytes total",
&[OPTYPE_LABEL],
)
.unwrap();
/// Counter of bytes of the append_batch operation.
pub static ref METRIC_WAL_WRITE_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
&["write"],
);
/// Counter of bytes of the read operation.
pub static ref METRIC_WAL_READ_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
&["read"],
);
}

View File

@@ -1,361 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::mem::size_of;
use std::sync::atomic::{AtomicI64, AtomicU64, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Duration;
use api::v1::value::ValueData;
use api::v1::{ColumnDataType, ColumnSchema, Mutation, OpType, Row, Rows, Value, WalEntry};
use clap::{Parser, ValueEnum};
use common_base::readable_size::ReadableSize;
use common_wal::options::WalOptions;
use futures::StreamExt;
use mito2::wal::{Wal, WalWriter};
use rand::distributions::{Alphanumeric, DistString, Uniform};
use rand::rngs::SmallRng;
use rand::{Rng, SeedableRng};
use serde::{Deserialize, Serialize};
use store_api::logstore::LogStore;
use store_api::storage::RegionId;
use crate::metrics;
/// The wal provider.
#[derive(Clone, ValueEnum, Default, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum WalProvider {
#[default]
RaftEngine,
Kafka,
}
#[derive(Parser)]
pub struct Args {
/// The provided configuration file.
/// The example configuration file can be found at `greptimedb/benchmarks/config/wal_bench.example.toml`.
#[clap(long, short = 'c')]
pub cfg_file: String,
/// The wal provider.
#[clap(long, value_enum, default_value_t = WalProvider::default())]
pub wal_provider: WalProvider,
/// The advertised addresses of the kafka brokers.
/// If there're multiple bootstrap brokers, their addresses should be separated by comma, for e.g. "localhost:9092,localhost:9093".
#[clap(long, short = 'b', default_value = "localhost:9092")]
pub bootstrap_brokers: String,
/// The number of workers each running in a dedicated thread.
#[clap(long, default_value_t = num_cpus::get() as u32)]
pub num_workers: u32,
/// The number of kafka topics to be created.
#[clap(long, default_value_t = 32)]
pub num_topics: u32,
/// The number of regions.
#[clap(long, default_value_t = 1000)]
pub num_regions: u32,
/// The number of times each region is scraped.
#[clap(long, default_value_t = 1000)]
pub num_scrapes: u32,
/// The number of rows in each wal entry.
/// Each time a region is scraped, a wal entry containing will be produced.
#[clap(long, default_value_t = 5)]
pub num_rows: u32,
/// The column types of the schema for each region.
/// Currently, three column types are supported:
/// - i = ColumnDataType::Int64
/// - f = ColumnDataType::Float64
/// - s = ColumnDataType::String
/// For e.g., "ifs" will be parsed as three columns: i64, f64, and string.
///
/// Additionally, a "x" sign can be provided to repeat the column types for a given number of times.
/// For e.g., "iix2" will be parsed as 4 columns: i64, i64, i64, and i64.
/// This feature is useful if you want to specify many columns.
#[clap(long, default_value = "ifs")]
pub col_types: String,
/// The maximum size of a batch of kafka records.
/// The default value is 1mb.
#[clap(long, default_value = "512KB")]
pub max_batch_size: ReadableSize,
/// The minimum latency the kafka client issues a batch of kafka records.
/// However, a batch of kafka records would be immediately issued if a record cannot be fit into the batch.
#[clap(long, default_value = "1ms")]
pub linger: String,
/// The initial backoff delay of the kafka consumer.
#[clap(long, default_value = "10ms")]
pub backoff_init: String,
/// The maximum backoff delay of the kafka consumer.
#[clap(long, default_value = "1s")]
pub backoff_max: String,
/// The exponential backoff rate of the kafka consumer. The next back off = base * the current backoff.
#[clap(long, default_value_t = 2)]
pub backoff_base: u32,
/// The deadline of backoff. The backoff ends if the total backoff delay reaches the deadline.
#[clap(long, default_value = "3s")]
pub backoff_deadline: String,
/// The client-side compression algorithm for kafka records.
#[clap(long, default_value = "zstd")]
pub compression: String,
/// The seed of random number generators.
#[clap(long, default_value_t = 42)]
pub rng_seed: u64,
/// Skips the read phase, aka. region replay, if set to true.
#[clap(long, default_value_t = false)]
pub skip_read: bool,
/// Skips the write phase if set to true.
#[clap(long, default_value_t = false)]
pub skip_write: bool,
/// Randomly generates topic names if set to true.
/// Useful when you want to run the benchmarker without worrying about the topics created before.
#[clap(long, default_value_t = false)]
pub random_topics: bool,
/// Logs out the gathered prometheus metrics when the benchmarker ends.
#[clap(long, default_value_t = false)]
pub report_metrics: bool,
}
/// Benchmarker config.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
pub wal_provider: WalProvider,
pub bootstrap_brokers: Vec<String>,
pub num_workers: u32,
pub num_topics: u32,
pub num_regions: u32,
pub num_scrapes: u32,
pub num_rows: u32,
pub col_types: String,
pub max_batch_size: ReadableSize,
#[serde(with = "humantime_serde")]
pub linger: Duration,
#[serde(with = "humantime_serde")]
pub backoff_init: Duration,
#[serde(with = "humantime_serde")]
pub backoff_max: Duration,
pub backoff_base: u32,
#[serde(with = "humantime_serde")]
pub backoff_deadline: Duration,
pub compression: String,
pub rng_seed: u64,
pub skip_read: bool,
pub skip_write: bool,
pub random_topics: bool,
pub report_metrics: bool,
}
impl From<Args> for Config {
fn from(args: Args) -> Self {
let cfg = Self {
wal_provider: args.wal_provider,
bootstrap_brokers: args
.bootstrap_brokers
.split(',')
.map(ToString::to_string)
.collect::<Vec<_>>(),
num_workers: args.num_workers.min(num_cpus::get() as u32),
num_topics: args.num_topics,
num_regions: args.num_regions,
num_scrapes: args.num_scrapes,
num_rows: args.num_rows,
col_types: args.col_types,
max_batch_size: args.max_batch_size,
linger: humantime::parse_duration(&args.linger).unwrap(),
backoff_init: humantime::parse_duration(&args.backoff_init).unwrap(),
backoff_max: humantime::parse_duration(&args.backoff_max).unwrap(),
backoff_base: args.backoff_base,
backoff_deadline: humantime::parse_duration(&args.backoff_deadline).unwrap(),
compression: args.compression,
rng_seed: args.rng_seed,
skip_read: args.skip_read,
skip_write: args.skip_write,
random_topics: args.random_topics,
report_metrics: args.report_metrics,
};
cfg
}
}
/// The region used for wal benchmarker.
pub struct Region {
id: RegionId,
schema: Vec<ColumnSchema>,
wal_options: WalOptions,
next_sequence: AtomicU64,
next_entry_id: AtomicU64,
next_timestamp: AtomicI64,
rng: Mutex<Option<SmallRng>>,
num_rows: u32,
}
impl Region {
/// Creates a new region.
pub fn new(
id: RegionId,
schema: Vec<ColumnSchema>,
wal_options: WalOptions,
num_rows: u32,
rng_seed: u64,
) -> Self {
Self {
id,
schema,
wal_options,
next_sequence: AtomicU64::new(1),
next_entry_id: AtomicU64::new(1),
next_timestamp: AtomicI64::new(1655276557000),
rng: Mutex::new(Some(SmallRng::seed_from_u64(rng_seed))),
num_rows,
}
}
/// Scrapes the region and adds the generated entry to wal.
pub fn add_wal_entry<S: LogStore>(&self, wal_writer: &mut WalWriter<S>) {
let mutation = Mutation {
op_type: OpType::Put as i32,
sequence: self
.next_sequence
.fetch_add(self.num_rows as u64, Ordering::Relaxed),
rows: Some(self.build_rows()),
};
let entry = WalEntry {
mutations: vec![mutation],
};
metrics::METRIC_WAL_WRITE_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
wal_writer
.add_entry(
self.id,
self.next_entry_id.fetch_add(1, Ordering::Relaxed),
&entry,
&self.wal_options,
)
.unwrap();
}
/// Replays the region.
pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) {
let mut wal_stream = wal.scan(self.id, 0, &self.wal_options).unwrap();
while let Some(res) = wal_stream.next().await {
let (_, entry) = res.unwrap();
metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
}
}
/// Computes the estimated size in bytes of the entry.
pub fn entry_estimated_size(entry: &WalEntry) -> usize {
let wrapper_size = size_of::<WalEntry>()
+ entry.mutations.capacity() * size_of::<Mutation>()
+ size_of::<Rows>();
let rows = entry.mutations[0].rows.as_ref().unwrap();
let schema_size = rows.schema.capacity() * size_of::<ColumnSchema>()
+ rows
.schema
.iter()
.map(|s| s.column_name.capacity())
.sum::<usize>();
let values_size = (rows.rows.capacity() * size_of::<Row>())
+ rows
.rows
.iter()
.map(|r| r.values.capacity() * size_of::<Value>())
.sum::<usize>();
wrapper_size + schema_size + values_size
}
fn build_rows(&self) -> Rows {
let cols = self
.schema
.iter()
.map(|col_schema| {
let col_data_type = ColumnDataType::try_from(col_schema.datatype).unwrap();
self.build_col(&col_data_type, self.num_rows)
})
.collect::<Vec<_>>();
let rows = (0..self.num_rows)
.map(|i| {
let values = cols.iter().map(|col| col[i as usize].clone()).collect();
Row { values }
})
.collect();
Rows {
schema: self.schema.clone(),
rows,
}
}
fn build_col(&self, col_data_type: &ColumnDataType, num_rows: u32) -> Vec<Value> {
let mut rng_guard = self.rng.lock().unwrap();
let rng = rng_guard.as_mut().unwrap();
match col_data_type {
ColumnDataType::TimestampMillisecond => (0..num_rows)
.map(|_| {
let ts = self.next_timestamp.fetch_add(1000, Ordering::Relaxed);
Value {
value_data: Some(ValueData::TimestampMillisecondValue(ts)),
}
})
.collect(),
ColumnDataType::Int64 => (0..num_rows)
.map(|_| {
let v = rng.sample(Uniform::new(0, 10_000));
Value {
value_data: Some(ValueData::I64Value(v)),
}
})
.collect(),
ColumnDataType::Float64 => (0..num_rows)
.map(|_| {
let v = rng.sample(Uniform::new(0.0, 5000.0));
Value {
value_data: Some(ValueData::F64Value(v)),
}
})
.collect(),
ColumnDataType::String => (0..num_rows)
.map(|_| {
let v = Alphanumeric.sample_string(rng, 10);
Value {
value_data: Some(ValueData::StringValue(v)),
}
})
.collect(),
_ => unreachable!(),
}
}
}

View File

@@ -1,10 +1,16 @@
# Configurations
- [Standalone Mode](#standalone-mode)
- [Distributed Mode](#distributed-mode)
- [Frontend](#frontend)
- [Metasrv](#metasrv)
- [Datanode](#datanode)
## Standalone Mode
{{ toml2docs "./standalone.example.toml" }}
## Cluster Mode
## Distributed Mode
### Frontend

View File

@@ -1,5 +1,11 @@
# Configurations
- [Standalone Mode](#standalone-mode)
- [Distributed Mode](#distributed-mode)
- [Frontend](#frontend)
- [Metasrv](#metasrv)
- [Datanode](#datanode)
## Standalone Mode
| Key | Type | Default | Descriptions |
@@ -7,13 +13,22 @@
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `default_timezone` | String | `None` | The default timezone of the server. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
| `grpc.tls.key_path` | String | `None` | Private key file path. |
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `mysql` | -- | -- | MySQL server options. |
| `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
@@ -27,7 +42,7 @@
| `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql_options.tls` section. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
| `postgres.tls.key_path` | String | `None` | Private key file path. |
@@ -51,8 +66,7 @@
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
@@ -96,6 +110,10 @@
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
@@ -127,9 +145,11 @@
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
## Cluster Mode
## Distributed Mode
### Frontend
@@ -137,16 +157,26 @@
| --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `default_timezone` | String | `None` | The default timezone of the server. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
| `grpc.tls.key_path` | String | `None` | Private key file path. |
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `mysql` | -- | -- | MySQL server options. |
| `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
@@ -160,7 +190,7 @@
| `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql_options.tls` section. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
| `postgres.tls.key_path` | String | `None` | Private key file path. |
@@ -184,7 +214,6 @@
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
| `datanode` | -- | -- | Datanode options. |
| `datanode.client` | -- | -- | Datanode client options. |
| `datanode.client.timeout` | String | `10s` | -- |
| `datanode.client.connect_timeout` | String | `10s` | -- |
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
| `logging` | -- | -- | The logging options. |
@@ -203,6 +232,8 @@
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
### Metasrv
@@ -217,6 +248,10 @@
| `use_memory_store` | Bool | `false` | Store data in memory. |
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
| `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
@@ -259,6 +294,8 @@
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
### Datanode
@@ -269,12 +306,28 @@
| `node_id` | Integer | `None` | The datanode identifier and should be unique in the cluster. |
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `rpc_addr` | String | `127.0.0.1:3001` | The gRPC address of the datanode. |
| `rpc_hostname` | String | `None` | The hostname of the datanode. |
| `rpc_runtime_size` | Integer | `8` | The number of gRPC server worker threads. |
| `rpc_max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `rpc_max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `rpc_addr` | String | `None` | Deprecated, use `grpc.addr` instead. |
| `rpc_hostname` | String | `None` | Deprecated, use `grpc.hostname` instead. |
| `rpc_runtime_size` | Integer | `None` | Deprecated, use `grpc.runtime_size` instead. |
| `rpc_max_recv_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
| `rpc_max_send_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
| `grpc.tls.key_path` | String | `None` | Private key file path. |
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
@@ -300,8 +353,7 @@
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
@@ -339,6 +391,10 @@
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
@@ -370,3 +426,5 @@
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |

View File

@@ -13,24 +13,71 @@ require_lease_before_startup = false
## By default, it provides services after all regions have been initialized.
init_regions_in_background = false
## The gRPC address of the datanode.
## Enable telemetry to collect anonymous usage data.
enable_telemetry = true
## Parallelism of initializing regions.
init_regions_parallelism = 16
## Deprecated, use `grpc.addr` instead.
## +toml2docs:none-default
rpc_addr = "127.0.0.1:3001"
## The hostname of the datanode.
## Deprecated, use `grpc.hostname` instead.
## +toml2docs:none-default
rpc_hostname = "127.0.0.1"
## The number of gRPC server worker threads.
## Deprecated, use `grpc.runtime_size` instead.
## +toml2docs:none-default
rpc_runtime_size = 8
## The maximum receive message size for gRPC server.
## Deprecated, use `grpc.rpc_max_recv_message_size` instead.
## +toml2docs:none-default
rpc_max_recv_message_size = "512MB"
## The maximum send message size for gRPC server.
## Deprecated, use `grpc.rpc_max_send_message_size` instead.
## +toml2docs:none-default
rpc_max_send_message_size = "512MB"
## Enable telemetry to collect anonymous usage data.
enable_telemetry = true
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
addr = "127.0.0.1:3001"
## The hostname advertised to the metasrv,
## and used for connections from outside the host
hostname = "127.0.0.1"
## The number of server worker threads.
runtime_size = 8
## The maximum receive message size for gRPC server.
max_recv_message_size = "512MB"
## The maximum send message size for gRPC server.
max_send_message_size = "512MB"
## gRPC server TLS options, see `mysql.tls` section.
[grpc.tls]
## TLS mode.
mode = "disable"
## Certificate file path.
## +toml2docs:none-default
cert_path = ""
## Private key file path.
## +toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload.
## For now, gRPC tls config does not support auto reload.
watch = false
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 4
## The heartbeat options.
[heartbeat]
@@ -120,11 +167,7 @@ broker_endpoints = ["127.0.0.1:9092"]
## The max size of a single producer batch.
## Warning: Kafka has a default limit of 1MB per message in a topic.
## **It's only used when the provider is `kafka`**.
max_batch_size = "1MB"
## The linger duration of a kafka batch producer.
## **It's only used when the provider is `kafka`**.
linger = "200ms"
max_batch_bytes = "1MB"
## The consumer wait timeout.
## **It's only used when the provider is `kafka`**.
@@ -324,6 +367,18 @@ vector_cache_size = "512MB"
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
page_cache_size = "512MB"
## Whether to enable the experimental write cache.
enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}/write_cache`.
experimental_write_cache_path = ""
## Capacity for write cache.
experimental_write_cache_size = "512MB"
## TTL for write cache.
experimental_write_cache_ttl = "1h"
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
@@ -428,3 +483,9 @@ url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
[tracing]
## The tokio console address.
## +toml2docs:none-default
tokio_console_addr = "127.0.0.1"

View File

@@ -5,6 +5,15 @@ mode = "standalone"
## +toml2docs:none-default
default_timezone = "UTC"
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 4
## The heartbeat options.
[heartbeat]
## Interval for sending heartbeat messages to the metasrv.
@@ -17,19 +26,40 @@ retry_interval = "3s"
[http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout.
## HTTP request timeout. Set to 0 to disable timeout.
timeout = "30s"
## HTTP request body limit.
## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
addr = "127.0.0.1:4001"
## The hostname advertised to the metasrv,
## and used for connections from outside the host
hostname = "127.0.0.1"
## The number of server worker threads.
runtime_size = 8
## gRPC server TLS options, see `mysql.tls` section.
[grpc.tls]
## TLS mode.
mode = "disable"
## Certificate file path.
## +toml2docs:none-default
cert_path = ""
## Private key file path.
## +toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload.
## For now, gRPC tls config does not support auto reload.
watch = false
## MySQL server options.
[mysql]
## Whether to enable.
@@ -70,7 +100,7 @@ addr = "127.0.0.1:4003"
## The number of server worker threads.
runtime_size = 2
## PostgresSQL server TLS options, see `mysql_options.tls` section.
## PostgresSQL server TLS options, see `mysql.tls` section.
[postgres.tls]
## TLS mode.
mode = "disable"
@@ -136,7 +166,6 @@ metadata_cache_tti = "5m"
[datanode]
## Datanode client options.
[datanode.client]
timeout = "10s"
connect_timeout = "10s"
tcp_nodelay = true
@@ -186,3 +215,9 @@ url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
[tracing]
## The tokio console address.
## +toml2docs:none-default
tokio_console_addr = "127.0.0.1"

View File

@@ -25,6 +25,15 @@ enable_telemetry = true
## If it's not empty, the metasrv will store all data with this key prefix.
store_key_prefix = ""
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 4
## Procedure storage options.
[procedure]
@@ -141,3 +150,9 @@ url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
[tracing]
## The tokio console address.
## +toml2docs:none-default
tokio_console_addr = "127.0.0.1"

View File

@@ -8,14 +8,24 @@ enable_telemetry = true
## +toml2docs:none-default
default_timezone = "UTC"
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 4
## The HTTP server options.
[http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout.
## HTTP request timeout. Set to 0 to disable timeout.
timeout = "30s"
## HTTP request body limit.
## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## The gRPC server options.
@@ -25,6 +35,23 @@ addr = "127.0.0.1:4001"
## The number of server worker threads.
runtime_size = 8
## gRPC server TLS options, see `mysql.tls` section.
[grpc.tls]
## TLS mode.
mode = "disable"
## Certificate file path.
## +toml2docs:none-default
cert_path = ""
## Private key file path.
## +toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload.
## For now, gRPC tls config does not support auto reload.
watch = false
## MySQL server options.
[mysql]
## Whether to enable.
@@ -65,7 +92,7 @@ addr = "127.0.0.1:4003"
## The number of server worker threads.
runtime_size = 2
## PostgresSQL server TLS options, see `mysql_options.tls` section.
## PostgresSQL server TLS options, see `mysql.tls` section.
[postgres.tls]
## TLS mode.
mode = "disable"
@@ -149,11 +176,7 @@ broker_endpoints = ["127.0.0.1:9092"]
## The max size of a single producer batch.
## Warning: Kafka has a default limit of 1MB per message in a topic.
## **It's only used when the provider is `kafka`**.
max_batch_size = "1MB"
## The linger duration of a kafka batch producer.
## **It's only used when the provider is `kafka`**.
linger = "200ms"
max_batch_bytes = "1MB"
## The consumer wait timeout.
## **It's only used when the provider is `kafka`**.
@@ -367,6 +390,18 @@ vector_cache_size = "512MB"
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
page_cache_size = "512MB"
## Whether to enable the experimental write cache.
enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}/write_cache`.
experimental_write_cache_path = ""
## Capacity for write cache.
experimental_write_cache_size = "512MB"
## TTL for write cache.
experimental_write_cache_ttl = "1h"
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
@@ -471,3 +506,9 @@ url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
[tracing]
## The tokio console address.
## +toml2docs:none-default
tokio_console_addr = "127.0.0.1"

View File

@@ -0,0 +1,106 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from '@actions/core'
import {handleError, obtainClient} from "@/common";
import {context} from "@actions/github";
import {PullRequestEditedEvent, PullRequestEvent, PullRequestOpenedEvent} from "@octokit/webhooks-types";
// @ts-expect-error moduleResolution:nodenext issue 54523
import {RequestError} from "@octokit/request-error";
const needFollowUpDocs = "[x] This PR requires documentation updates."
const labelDocsNotRequired = "docs-not-required"
const labelDocsRequired = "docs-required"
async function main() {
if (!context.payload.pull_request) {
throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
}
const client = obtainClient("GITHUB_TOKEN")
const docsClient = obtainClient("DOCS_REPO_TOKEN")
const payload = context.payload as PullRequestEvent
const { owner, repo, number, actor, title, html_url } = {
owner: payload.pull_request.base.user.login,
repo: payload.pull_request.base.repo.name,
number: payload.pull_request.number,
title: payload.pull_request.title,
html_url: payload.pull_request.html_url,
actor: payload.pull_request.user.login,
}
const followUpDocs = checkPullRequestEvent(payload)
if (followUpDocs) {
core.info("Follow up docs.")
await client.rest.issues.removeLabel({
owner, repo, issue_number: number, name: labelDocsNotRequired,
}).catch((e: RequestError) => {
if (e.status != 404) {
throw e;
}
core.debug(`Label ${labelDocsNotRequired} not exist.`)
})
await client.rest.issues.addLabels({
owner, repo, issue_number: number, labels: [labelDocsRequired],
})
await docsClient.rest.issues.create({
owner: 'GreptimeTeam',
repo: 'docs',
title: `Update docs for ${title}`,
body: `A document change request is generated from ${html_url}`,
assignee: actor,
}).then((res) => {
core.info(`Created issue ${res.data}`)
})
} else {
core.info("No need to follow up docs.")
await client.rest.issues.removeLabel({
owner, repo, issue_number: number, name: labelDocsRequired
}).catch((e: RequestError) => {
if (e.status != 404) {
throw e;
}
core.debug(`Label ${labelDocsRequired} not exist.`)
})
await client.rest.issues.addLabels({
owner, repo, issue_number: number, labels: [labelDocsNotRequired],
})
}
}
function checkPullRequestEvent(payload: PullRequestEvent) {
switch (payload.action) {
case "opened":
return checkPullRequestOpenedEvent(payload as PullRequestOpenedEvent)
case "edited":
return checkPullRequestEditedEvent(payload as PullRequestEditedEvent)
default:
throw new Error(`${payload.action} is unsupported.`)
}
}
function checkPullRequestOpenedEvent(event: PullRequestOpenedEvent): boolean {
// @ts-ignore
return event.pull_request.body?.includes(needFollowUpDocs)
}
function checkPullRequestEditedEvent(event: PullRequestEditedEvent): boolean {
const previous = event.changes.body?.from.includes(needFollowUpDocs)
const current = event.pull_request.body?.includes(needFollowUpDocs)
// from docs-not-need to docs-required
return (!previous) && current
}
main().catch(handleError)

View File

@@ -0,0 +1,83 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from '@actions/core'
import {handleError, obtainClient} from "@/common"
import {context} from "@actions/github"
import _ from "lodash"
async function main() {
const success = process.env["CI_REPORT_STATUS"] === "true"
core.info(`CI_REPORT_STATUS=${process.env["CI_REPORT_STATUS"]}, resolved to ${success}`)
const client = obtainClient("GITHUB_TOKEN")
const title = `Workflow run '${context.workflow}' failed`
const url = `${process.env["GITHUB_SERVER_URL"]}/${process.env["GITHUB_REPOSITORY"]}/actions/runs/${process.env["GITHUB_RUN_ID"]}`
const failure_comment = `@GreptimeTeam/db-approver\nNew failure: ${url} `
const success_comment = `@GreptimeTeam/db-approver\nBack to success: ${url}`
const {owner, repo} = context.repo
const labels = ['O-ci-failure']
const issues = await client.paginate(client.rest.issues.listForRepo, {
owner,
repo,
labels: labels.join(','),
state: "open",
sort: "created",
direction: "desc",
});
const issue = _.find(issues, (i) => i.title === title);
if (issue) { // exist issue
core.info(`Found previous issue ${issue.html_url}`)
if (!success) {
await client.rest.issues.createComment({
owner,
repo,
issue_number: issue.number,
body: failure_comment,
})
} else {
await client.rest.issues.createComment({
owner,
repo,
issue_number: issue.number,
body: success_comment,
})
await client.rest.issues.update({
owner,
repo,
issue_number: issue.number,
state: "closed",
state_reason: "completed",
})
}
core.setOutput("html_url", issue.html_url)
} else if (!success) { // create new issue for failure
const issue = await client.rest.issues.create({
owner,
repo,
title,
labels,
body: failure_comment,
})
core.info(`Created issue ${issue.data.html_url}`)
core.setOutput("html_url", issue.data.html_url)
}
}
main().catch(handleError)

View File

@@ -7,6 +7,7 @@
"dependencies": {
"@actions/core": "^1.10.1",
"@actions/github": "^6.0.0",
"@octokit/request-error": "^6.1.1",
"@octokit/webhooks-types": "^7.5.1",
"conventional-commit-types": "^3.0.0",
"conventional-commits-parser": "^5.0.0",

10
cyborg/pnpm-lock.yaml generated
View File

@@ -11,6 +11,9 @@ dependencies:
'@actions/github':
specifier: ^6.0.0
version: 6.0.0
'@octokit/request-error':
specifier: ^6.1.1
version: 6.1.1
'@octokit/webhooks-types':
specifier: ^7.5.1
version: 7.5.1
@@ -359,6 +362,13 @@ packages:
once: 1.4.0
dev: false
/@octokit/request-error@6.1.1:
resolution: {integrity: sha512-1mw1gqT3fR/WFvnoVpY/zUM2o/XkMs/2AszUUG9I69xn0JFLv6PGkPhNk5lbfvROs79wiS0bqiJNxfCZcRJJdg==}
engines: {node: '>= 18'}
dependencies:
'@octokit/types': 13.5.0
dev: false
/@octokit/request@8.4.0:
resolution: {integrity: sha512-9Bb014e+m2TgBeEJGEbdplMVWwPmL1FPtggHQRkV+WVsMggPtEkLKPlcVYm/o8xKLkpJ7B+6N8WfQMtDLX2Dpw==}
engines: {node: '>= 18'}

View File

@@ -0,0 +1,16 @@
FROM ubuntu:22.04
# The binary name of GreptimeDB executable.
# Defaults to "greptime", but sometimes in other projects it might be different.
ARG TARGET_BIN=greptime
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
ca-certificates \
curl
ARG BINARY_PATH
ADD $BINARY_PATH/$TARGET_BIN /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENTRYPOINT ["greptime"]

View File

@@ -34,7 +34,7 @@ RUN rustup toolchain install ${RUST_TOOLCHAIN}
RUN rustup target add aarch64-linux-android
# Install cargo-ndk
RUN cargo install cargo-ndk
RUN cargo install cargo-ndk@3.5.4
ENV ANDROID_NDK_HOME $NDK_ROOT
# Builder entrypoint.

View File

@@ -0,0 +1,102 @@
x-custom:
initial_cluster_token: &initial_cluster_token "--initial-cluster-token=etcd-cluster"
common_settings: &common_settings
image: quay.io/coreos/etcd:v3.5.10
entrypoint: /usr/local/bin/etcd
services:
etcd0:
<<: *common_settings
container_name: etcd0
ports:
- 2379:2379
- 2380:2380
command:
- --name=etcd0
- --data-dir=/var/lib/etcd
- --initial-advertise-peer-urls=http://etcd0:2380
- --listen-peer-urls=http://0.0.0.0:2380
- --listen-client-urls=http://0.0.0.0:2379
- --advertise-client-urls=http://etcd0:2379
- --heartbeat-interval=250
- --election-timeout=1250
- --initial-cluster=etcd0=http://etcd0:2380
- --initial-cluster-state=new
- *initial_cluster_token
volumes:
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
healthcheck:
test: [ "CMD", "etcdctl", "--endpoints=http://etcd0:2379", "endpoint", "health" ]
interval: 5s
timeout: 3s
retries: 5
networks:
- greptimedb
metasrv:
image: docker.io/greptime/greptimedb:latest
container_name: metasrv
ports:
- 3002:3002
command:
- metasrv
- start
- --bind-addr=0.0.0.0:3002
- --server-addr=metasrv:3002
- --store-addrs=etcd0:2379
healthcheck:
test: [ "CMD", "curl", "-f", "http://metasrv:3002/health" ]
interval: 5s
timeout: 3s
retries: 5
depends_on:
etcd0:
condition: service_healthy
networks:
- greptimedb
datanode0:
image: docker.io/greptime/greptimedb:latest
container_name: datanode0
ports:
- 3001:3001
command:
- datanode
- start
- --node-id=0
- --rpc-addr=0.0.0.0:3001
- --rpc-hostname=datanode0:3001
- --metasrv-addr=metasrv:3002
volumes:
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
depends_on:
metasrv:
condition: service_healthy
networks:
- greptimedb
frontend0:
image: docker.io/greptime/greptimedb:latest
container_name: frontend0
ports:
- 4000:4000
- 4001:4001
- 4002:4002
- 4003:4003
command:
- frontend
- start
- --metasrv-addrs=metasrv:3002
- --http-addr=0.0.0.0:4000
- --rpc-addr=0.0.0.0:4001
- --mysql-addr=0.0.0.0:4002
- --postgres-addr=0.0.0.0:4003
depends_on:
metasrv:
condition: service_healthy
networks:
- greptimedb
networks:
greptimedb:
name: greptimedb

View File

@@ -0,0 +1,253 @@
# How to run TSBS Benchmark
This document contains the steps to run TSBS Benchmark. Our results are listed in other files in the same directory.
## Prerequires
You need the following tools to run TSBS Benchmark:
- Go
- git
- make
- rust (optional, if you want to build the DB from source)
## Build TSBS suite
Clone our fork of TSBS:
```shell
git clone https://github.com/GreptimeTeam/tsbs.git
```
Then build it:
```shell
cd tsbs
make
```
You can check the `bin/` directory for compiled binaries. We will only use some of them.
```shell
ls ./bin/
```
Binaries we will use later:
- `tsbs_generate_data`
- `tsbs_generate_queries`
- `tsbs_load_greptime`
- `tsbs_run_queries_influx`
## Generate test data and queries
The data is generated by `tsbs_generate_data`
```shell
mkdir bench-data
./bin/tsbs_generate_data --use-case="cpu-only" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:00Z" \
--log-interval="10s" --format="influx" \
> ./bench-data/influx-data.lp
```
Here we generates 4000 time-series in 3 days with 10s interval. We'll use influx line protocol to write so the target format is `influx`.
Queries are generated by `tsbs_generate_queries`. You can change the parameters but need to make sure it matches with `tsbs_generate_data`.
```shell
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type cpu-max-all-1 \
--format="greptime" \
> ./bench-data/greptime-queries-cpu-max-all-1.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type cpu-max-all-8 \
--format="greptime" \
> ./bench-data/greptime-queries-cpu-max-all-8.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=50 \
--query-type double-groupby-1 \
--format="greptime" \
> ./bench-data/greptime-queries-double-groupby-1.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=50 \
--query-type double-groupby-5 \
--format="greptime" \
> ./bench-data/greptime-queries-double-groupby-5.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=50 \
--query-type double-groupby-all \
--format="greptime" \
> ./bench-data/greptime-queries-double-groupby-all.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=50 \
--query-type groupby-orderby-limit \
--format="greptime" \
> ./bench-data/greptime-queries-groupby-orderby-limit.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type high-cpu-1 \
--format="greptime" \
> ./bench-data/greptime-queries-high-cpu-1.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=50 \
--query-type high-cpu-all \
--format="greptime" \
> ./bench-data/greptime-queries-high-cpu-all.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=10 \
--query-type lastpoint \
--format="greptime" \
> ./bench-data/greptime-queries-lastpoint.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type single-groupby-1-1-1 \
--format="greptime" \
> ./bench-data/greptime-queries-single-groupby-1-1-1.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type single-groupby-1-1-12 \
--format="greptime" \
> ./bench-data/greptime-queries-single-groupby-1-1-12.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type single-groupby-1-8-1 \
--format="greptime" \
> ./bench-data/greptime-queries-single-groupby-1-8-1.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type single-groupby-5-1-1 \
--format="greptime" \
> ./bench-data/greptime-queries-single-groupby-5-1-1.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type single-groupby-5-1-12 \
--format="greptime" \
> ./bench-data/greptime-queries-single-groupby-5-1-12.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type single-groupby-5-8-1 \
--format="greptime" \
> ./bench-data/greptime-queries-single-groupby-5-8-1.dat
```
## Start GreptimeDB
Reference to our [document](https://docs.greptime.com/getting-started/installation/overview) for how to install and start a GreptimeDB. Or you can also check this [document](https://docs.greptime.com/contributor-guide/getting-started#compile-and-run) for how to build a GreptimeDB from source.
## Write Data
After the DB is started, we can use `tsbs_load_greptime` to test the write performance.
```shell
./bin/tsbs_load_greptime \
--urls=http://localhost:4000 \
--file=./bench-data/influx-data.lp \
--batch-size=3000 \
--gzip=false \
--workers=6
```
Parameters here are only provided as an example. You can choose whatever you like or adjust them to match your target scenario.
Notice that if you want to rerun `tsbs_load_greptime`, please destroy and restart the DB and clear its previous data first. Existing duplicated data will impact the write and query performance.
## Query Data
After the data is imported, you can then run queries. The following script runs all queries. You can also choose a subset of queries to run.
```shell
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-cpu-max-all-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-cpu-max-all-8.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-5.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-all.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-groupby-orderby-limit.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-high-cpu-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-high-cpu-all.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-lastpoint.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-1-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-1-12.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-8-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-1-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-1-12.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-8-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
```
Rerun queries need not to re-import data. Just execute the corresponding command again is fine.

View File

@@ -23,28 +23,28 @@
## Write performance
| Environment | Ingest rate (rows/s) |
| ------------------ | --------------------- |
| Local | 3695814.64 |
| EC2 c5d.2xlarge | 2987166.64 |
| Environment | Ingest rate (rows/s) |
| --------------- | -------------------- |
| Local | 369581.464 |
| EC2 c5d.2xlarge | 298716.664 |
## Query performance
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
| --------------------- | ---------- | ---------------------- |
| cpu-max-all-1 | 30.56 | 54.74 |
| cpu-max-all-8 | 52.69 | 70.50 |
| double-groupby-1 | 664.30 | 1366.63 |
| double-groupby-5 | 1391.26 | 2141.71 |
| double-groupby-all | 2828.94 | 3389.59 |
| groupby-orderby-limit | 718.92 | 1213.90 |
| high-cpu-1 | 29.21 | 52.98 |
| high-cpu-all | 5514.12 | 7194.91 |
| lastpoint | 7571.40 | 9423.41 |
| single-groupby-1-1-1 | 19.09 | 7.77 |
| single-groupby-1-1-12 | 27.28 | 51.64 |
| single-groupby-1-8-1 | 31.85 | 11.64 |
| single-groupby-5-1-1 | 16.14 | 9.67 |
| single-groupby-5-1-12 | 27.21 | 53.62 |
| single-groupby-5-8-1 | 39.62 | 14.96 |
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
| --------------------- | ---------- | -------------------- |
| cpu-max-all-1 | 30.56 | 54.74 |
| cpu-max-all-8 | 52.69 | 70.50 |
| double-groupby-1 | 664.30 | 1366.63 |
| double-groupby-5 | 1391.26 | 2141.71 |
| double-groupby-all | 2828.94 | 3389.59 |
| groupby-orderby-limit | 718.92 | 1213.90 |
| high-cpu-1 | 29.21 | 52.98 |
| high-cpu-all | 5514.12 | 7194.91 |
| lastpoint | 7571.40 | 9423.41 |
| single-groupby-1-1-1 | 19.09 | 7.77 |
| single-groupby-1-1-12 | 27.28 | 51.64 |
| single-groupby-1-8-1 | 31.85 | 11.64 |
| single-groupby-5-1-1 | 16.14 | 9.67 |
| single-groupby-5-1-12 | 27.21 | 53.62 |
| single-groupby-5-8-1 | 39.62 | 14.96 |

View File

@@ -0,0 +1,58 @@
# TSBS benchmark - v0.8.0
## Environment
### Local
| | |
| ------ | ---------------------------------- |
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
| Memory | 32GB |
| Disk | SOLIDIGM SSDPFKNU010TZ |
| OS | Ubuntu 22.04.2 LTS |
### Amazon EC2
| | |
| ------- | -------------- |
| Machine | c5d.2xlarge |
| CPU | 8 core |
| Memory | 16GB |
| Disk | 50GB (GP3) |
| OS | Ubuntu 22.04.1 |
## Write performance
| Environment | Ingest rate (rows/s) |
| --------------- | -------------------- |
| Local | 315369.66 |
| EC2 c5d.2xlarge | 222148.56 |
## Query performance
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
| --------------------- | ---------- | -------------------- |
| cpu-max-all-1 | 24.63 | 15.29 |
| cpu-max-all-8 | 51.69 | 33.53 |
| double-groupby-1 | 673.51 | 1295.38 |
| double-groupby-5 | 1244.93 | 1993.91 |
| double-groupby-all | 2215.44 | 3056.77 |
| groupby-orderby-limit | 754.50 | 1546.49 |
| high-cpu-1 | 19.62 | 11.58 |
| high-cpu-all | 5402.31 | 8011.43 |
| lastpoint | 6756.12 | 9312.67 |
| single-groupby-1-1-1 | 15.70 | 7.67 |
| single-groupby-1-1-12 | 16.72 | 9.29 |
| single-groupby-1-8-1 | 26.72 | 17.97 |
| single-groupby-5-1-1 | 18.17 | 10.09 |
| single-groupby-5-1-12 | 20.04 | 12.37 |
| single-groupby-5-8-1 | 35.63 | 23.13 |
`single-groupby-1-1-1` query throughput
| Environment | Client concurrency | mean time (ms) | qps (queries/sec) |
| --------------- | ------------------ | -------------- | ----------------- |
| Local | 50 | 42.87 | 1165.73 |
| Local | 100 | 89.29 | 1119.38 |
| EC2 c5d.2xlarge | 50 | 69.25 | 721.73 |
| EC2 c5d.2xlarge | 100 | 140.93 | 709.35 |

View File

@@ -1,527 +0,0 @@
# Schema Structs
# Common Schemas
The `datatypes` crate defines the elementary schema struct to describe the metadata.
## ColumnSchema
[ColumnSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/column_schema.rs#L36) represents the metadata of a column. It is equivalent to arrow's [Field](https://docs.rs/arrow/latest/arrow/datatypes/struct.Field.html) with additional metadata such as default constraint and whether the column is a time index. The time index is the column with a `TIME INDEX` constraint of a table. We can convert the `ColumnSchema` into an arrow `Field` and convert the `Field` back to the `ColumnSchema` without losing metadata.
```rust
pub struct ColumnSchema {
pub name: String,
pub data_type: ConcreteDataType,
is_nullable: bool,
is_time_index: bool,
default_constraint: Option<ColumnDefaultConstraint>,
metadata: Metadata,
}
```
## Schema
[Schema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema.rs#L38) is an ordered sequence of `ColumnSchema`. It is equivalent to arrow's [Schema](https://docs.rs/arrow/latest/arrow/datatypes/struct.Schema.html) with additional metadata including the index of the time index column and the version of this schema. Same as `ColumnSchema`, we can convert our `Schema` from/to arrow's `Schema`.
```rust
use arrow::datatypes::Schema as ArrowSchema;
pub struct Schema {
column_schemas: Vec<ColumnSchema>,
name_to_index: HashMap<String, usize>,
arrow_schema: Arc<ArrowSchema>,
timestamp_index: Option<usize>,
version: u32,
}
pub type SchemaRef = Arc<Schema>;
```
We alias `Arc<Schema>` as `SchemaRef` since it is used frequently. Mostly, we use our `ColumnSchema` and `Schema` structs instead of Arrow's `Field` and `Schema` unless we need to invoke third-party libraries (like DataFusion or ArrowFlight) that rely on Arrow.
## RawSchema
`Schema` contains fields like a map from column names to their indices in the `ColumnSchema` sequences and a cached arrow `Schema`. We can construct these fields from the `ColumnSchema` sequences thus we don't want to serialize them. This is why we don't derive `Serialize` and `Deserialize` for `Schema`. We introduce a new struct [RawSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/raw.rs#L24) which keeps all required fields of a `Schema` and derives the serialization traits. To serialize a `Schema`, we need to convert it into a `RawSchema` first and serialize the `RawSchema`.
```rust
pub struct RawSchema {
pub column_schemas: Vec<ColumnSchema>,
pub timestamp_index: Option<usize>,
pub version: u32,
}
```
We want to keep the `Schema` simple and avoid putting too much business-related metadata in it as many different structs or traits rely on it.
# Schema of the Table
A table maintains its schema in [TableMeta](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/table/src/metadata.rs#L97).
```rust
pub struct TableMeta {
pub schema: SchemaRef,
pub primary_key_indices: Vec<usize>,
pub value_indices: Vec<usize>,
// ...
}
```
The order of columns in `TableMeta::schema` is the same as the order specified in the `CREATE TABLE` statement which users use to create this table.
The field `primary_key_indices` stores indices of primary key columns. The field `value_indices` records the indices of value columns (non-primary key and time index, we sometimes call them field columns).
Suppose we create a table with the following SQL
```sql
CREATE TABLE cpu (
ts TIMESTAMP,
host STRING,
usage_user DOUBLE,
usage_system DOUBLE,
datacenter STRING,
TIME INDEX (ts),
PRIMARY KEY(datacenter, host)) ENGINE=mito;
```
Then the table's `TableMeta` may look like this:
```json
{
"schema":{
"column_schemas":[
"ts",
"host",
"usage_user",
"usage_system",
"datacenter"
],
"time_index":0,
"version":0
},
"primary_key_indices":[
4,
1
],
"value_indices":[
2,
3
]
}
```
# Schemas of the storage engine
We split a table into one or more units with the same schema and then store these units in the storage engine. Each unit is a region in the storage engine.
The storage engine maintains schemas of regions in more complicated ways because it
- adds internal columns that are invisible to users to store additional metadata for each row
- provides a data model similar to the key-value model so it organizes columns in a different order
- maintains additional metadata like column id or column family
So the storage engine defines several schema structs:
- RegionSchema
- StoreSchema
- ProjectedSchema
## RegionSchema
A [RegionSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/region.rs#L37) describes the schema of a region.
```rust
pub struct RegionSchema {
user_schema: SchemaRef,
store_schema: StoreSchemaRef,
columns: ColumnsMetadataRef,
}
```
Each region reserves some columns called `internal columns` for internal usage:
- `__sequence`, sequence number of a row
- `__op_type`, operation type of a row, such as `PUT` or `DELETE`
- `__version`, user-specified version of a row, reserved but not used. We might remove this in the future
The table engine can't see the `__sequence` and `__op_type` columns, so the `RegionSchema` itself maintains two internal schemas:
- User schema, a `Schema` struct that doesn't have internal columns
- Store schema, a `StoreSchema` struct that has internal columns
The `ColumnsMetadata` struct keeps metadata about all columns but most time we only need to use metadata in user schema and store schema, so we just ignore it. We may remove this struct in the future.
`RegionSchema` organizes columns in the following order:
```
key columns, timestamp, [__version,] value columns, __sequence, __op_type
```
We can ignore the `__version` column because it is disabled now:
```
key columns, timestamp, value columns, __sequence, __op_type
```
Key columns are columns of a table's primary key. Timestamp is the time index column. A region sorts all rows by key columns, timestamp, sequence, and op type.
So the `RegionSchema` of our `cpu` table above looks like this:
```json
{
"user_schema":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system"
],
"store_schema":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system",
"__sequence",
"__op_type"
]
}
```
## StoreSchema
As described above, a [StoreSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/store.rs#L36) is a schema that knows all internal columns.
```rust
struct StoreSchema {
columns: Vec<ColumnMetadata>,
schema: SchemaRef,
row_key_end: usize,
user_column_end: usize,
}
```
The columns in the `columns` and `schema` fields have the same order. The `ColumnMetadata` has metadata like column id, column family id, and comment. The `StoreSchema` also stores this metadata in `StoreSchema::schema`, so we can convert the `StoreSchema` between arrow's `Schema`. We use this feature to persist the `StoreSchema` in the SST since our SST format is `Parquet`, which can take arrow's `Schema` as its schema.
The `StoreSchema` of the region above is similar to this:
```json
{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system",
"__sequence",
"__op_type"
],
"time_index":2,
"version":0
},
"row_key_end":3,
"user_column_end":5
}
```
The key and timestamp columns form row keys of rows. We put them together so we can use `row_key_end` to get indices of all row key columns. Similarly, we can use the `user_column_end` to get indices of all user columns (non-internal columns).
```rust
impl StoreSchema {
#[inline]
pub(crate) fn row_key_indices(&self) -> impl Iterator<Item = usize> {
0..self.row_key_end
}
#[inline]
pub(crate) fn value_indices(&self) -> impl Iterator<Item = usize> {
self.row_key_end..self.user_column_end
}
}
```
Another useful feature of `StoreSchema` is that we ensure it always contains key columns, a timestamp column, and internal columns because we need them to perform merge, deduplication, and delete. Projection on `StoreSchema` only projects value columns.
## ProjectedSchema
To support arbitrary projection, we introduce the [ProjectedSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/projected.rs#L106).
```rust
pub struct ProjectedSchema {
projection: Option<Projection>,
schema_to_read: StoreSchemaRef,
projected_user_schema: SchemaRef,
}
```
We need to handle many cases while doing projection:
- The columns' order of table and region is different
- The projection can be in arbitrary order, e.g. `select usage_user, host from cpu` and `select host, usage_user from cpu` have different projection order
- We support `ALTER TABLE` so data files may have different schemas.
### Projection
Let's take an example to see how projection works. Suppose we want to select `ts`, `usage_system` from the `cpu` table.
```sql
CREATE TABLE cpu (
ts TIMESTAMP,
host STRING,
usage_user DOUBLE,
usage_system DOUBLE,
datacenter STRING,
TIME INDEX (ts),
PRIMARY KEY(datacenter, host)) ENGINE=mito;
select ts, usage_system from cpu;
```
The query engine uses the projection `[0, 3]` to scan the table. However, columns in the region have a different order, so the table engine adjusts the projection to `2, 4`.
```json
{
"user_schema":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system"
],
}
```
As you can see, the output order is still `[ts, usage_system]`. This is the schema users can see after projection so we call it `projected user schema`.
But the storage engine also needs to read key columns, a timestamp column, and internal columns. So we maintain a `StoreSchema` after projection in the `ProjectedSchema`.
The `Projection` struct is a helper struct to help compute the projected user schema and store schema.
So we can construct the following `ProjectedSchema`:
```json
{
"schema_to_read":{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_system",
"__sequence",
"__op_type"
],
"time_index":2,
"version":0
},
"row_key_end":3,
"user_column_end":4
},
"projected_user_schema":{
"column_schemas":[
"ts",
"usage_system"
],
"time_index":0
}
}
```
As you can see, `schema_to_read` doesn't contain the column `usage_user` that is not intended to be read (not in projection).
### ReadAdapter
As mentioned above, we can alter a table so the underlying files (SSTs) and memtables in the storage engine may have different schemas.
To simplify the logic of `ProjectedSchema`, we handle the difference between schemas before projection (constructing the `ProjectedSchema`). We introduce [ReadAdapter](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/compat.rs#L90) that adapts rows with different source schemas to the same expected schema.
So we can always use the current `RegionSchema` of the region to construct the `ProjectedSchema`, and then create a `ReadAdapter` for each memtable or SST.
```rust
#[derive(Debug)]
pub struct ReadAdapter {
source_schema: StoreSchemaRef,
dest_schema: ProjectedSchemaRef,
indices_in_result: Vec<Option<usize>>,
is_source_needed: Vec<bool>,
}
```
For each column required by `dest_schema`, `indices_in_result` stores the index of that column in the row read from the source memtable or SST. If the source row doesn't contain that column, the index is `None`.
The field `is_source_needed` stores whether a column in the source memtable or SST is needed.
Suppose we add a new column `usage_idle` to the table `cpu`.
```sql
ALTER TABLE cpu ADD COLUMN usage_idle DOUBLE;
```
The new `StoreSchema` becomes:
```json
{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system",
"usage_idle",
"__sequence",
"__op_type"
],
"time_index":2,
"version":1
},
"row_key_end":3,
"user_column_end":6
}
```
Note that we bump the version of the schema to 1.
If we want to select `ts`, `usage_system`, and `usage_idle`. While reading from the old schema, the storage engine creates a `ReadAdapter` like this:
```json
{
"source_schema":{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system",
"__sequence",
"__op_type"
],
"time_index":2,
"version":0
},
"row_key_end":3,
"user_column_end":5
},
"dest_schema":{
"schema_to_read":{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_system",
"usage_idle",
"__sequence",
"__op_type"
],
"time_index":2,
"version":1
},
"row_key_end":3,
"user_column_end":5
},
"projected_user_schema":{
"column_schemas":[
"ts",
"usage_system",
"usage_idle"
],
"time_index":0
}
},
"indices_in_result":[
0,
1,
2,
3,
null,
4,
5
],
"is_source_needed":[
true,
true,
true,
false,
true,
true,
true
]
}
```
We don't need to read `usage_user` so `is_source_needed[3]` is false. The old schema doesn't have column `usage_idle` so `indices_in_result[4]` is `null` and the `ReadAdapter` needs to insert a null column to the output row so the output schema still contains `usage_idle`.
The figure below shows the relationship between `RegionSchema`, `StoreSchema`, `ProjectedSchema`, and `ReadAdapter`.
```text
┌──────────────────────────────┐
│ │
│ ┌────────────────────┐ │
│ │ store_schema │ │
│ │ │ │
│ │ StoreSchema │ │
│ │ version 1 │ │
│ └────────────────────┘ │
│ │
│ ┌────────────────────┐ │
│ │ user_schema │ │
│ └────────────────────┘ │
│ │
│ RegionSchema │
│ │
└──────────────┬───────────────┘
┌──────────────▼───────────────┐
│ │
│ ┌──────────────────────────┐ │
│ │ schema_to_read │ │
│ │ │ │
│ │ StoreSchema (projected) │ │
│ │ version 1 │ │
│ └──────────────────────────┘ │
┌───┤ ├───┐
│ │ ┌──────────────────────────┐ │ │
│ │ │ projected_user_schema │ │ │
│ │ └──────────────────────────┘ │ │
│ │ │ │
│ │ ProjectedSchema │ │
dest schema │ └──────────────────────────────┘ │ dest schema
│ │
│ │
┌──────▼───────┐ ┌───────▼──────┐
│ │ │ │
│ ReadAdapter │ │ ReadAdapter │
│ │ │ │
└──────▲───────┘ └───────▲──────┘
│ │
│ │
source schema │ │ source schema
│ │
┌───────┴─────────┐ ┌────────┴────────┐
│ │ │ │
│ ┌─────────────┐ │ │ ┌─────────────┐ │
│ │ │ │ │ │ │ │
│ │ StoreSchema │ │ │ │ StoreSchema │ │
│ │ │ │ │ │ │ │
│ │ version 0 │ │ │ │ version 1 │ │
│ │ │ │ │ │ │ │
│ └─────────────┘ │ │ └─────────────┘ │
│ │ │ │
│ SST 0 │ │ SST 1 │
│ │ │ │
└─────────────────┘ └─────────────────┘
```
# Conversion
This figure shows the conversion between schemas:
```text
┌─────────────┐ schema From ┌─────────────┐
│ ├──────────────────┐ ┌────────────────────────────► │
│ TableMeta │ │ │ │ RawSchema │
│ │ │ │ ┌─────────────────────────┤ │
└─────────────┘ │ │ │ TryFrom └─────────────┘
│ │ │
│ │ │
│ │ │
│ │ │
│ │ │
┌───────────────────┐ ┌─────▼──┴──▼──┐ arrow_schema() ┌─────────────────┐
│ │ │ ├─────────────────────► │
│ ColumnsMetadata │ ┌─────► Schema │ │ ArrowSchema ├──┐
│ │ │ │ ◄─────────────────────┤ │ │
└────┬───────────▲──┘ │ └───▲───▲──────┘ TryFrom └─────────────────┘ │
│ │ │ │ │ │
│ │ │ │ └────────────────────────────────────────┐ │
│ │ │ │ │ │
│ columns │ user_schema() │ │ │
│ │ │ │ projected_user_schema() schema() │
│ │ │ │ │ │
│ ┌───┴─────────────┴─┐ │ ┌────────────────────┐ │ │
columns │ │ │ └─────────────────┤ │ │ │ TryFrom
│ │ RegionSchema │ │ ProjectedSchema │ │ │
│ │ ├─────────────────────────► │ │ │
│ └─────────────────┬─┘ ProjectedSchema::new() └──────────────────┬─┘ │ │
│ │ │ │ │
│ │ │ │ │
│ │ │ │ │
│ │ │ │ │
┌────▼────────────────────┐ │ store_schema() ┌────▼───────┴──┐ │
│ │ └─────────────────────────────────────────► │ │
│ Vec<ColumnMetadata> │ │ StoreSchema ◄─────┘
│ ◄──────────────────────────────────────────────┤ │
└─────────────────────────┘ columns └───────────────┘
```

View File

@@ -1,2 +1,2 @@
[toolchain]
channel = "nightly-2024-04-18"
channel = "nightly-2024-04-20"

View File

@@ -30,6 +30,7 @@ pub enum Error {
#[snafu(display("Unknown proto column datatype: {}", datatype))]
UnknownColumnDataType {
datatype: i32,
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: prost::DecodeError,
@@ -38,12 +39,14 @@ pub enum Error {
#[snafu(display("Failed to create column datatype from {:?}", from))]
IntoColumnDataType {
from: ConcreteDataType,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to convert column default constraint, column: {}", column))]
ConvertColumnDefaultConstraint {
column: String,
#[snafu(implicit)]
location: Location,
source: datatypes::error::Error,
},
@@ -51,6 +54,7 @@ pub enum Error {
#[snafu(display("Invalid column default constraint, column: {}", column))]
InvalidColumnDefaultConstraint {
column: String,
#[snafu(implicit)]
location: Location,
source: datatypes::error::Error,
},

View File

@@ -480,6 +480,8 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
Some(Expr::CreateFlow(_)) => "ddl.create_flow",
Some(Expr::DropFlow(_)) => "ddl.drop_flow",
Some(Expr::CreateView(_)) => "ddl.create_view",
Some(Expr::DropView(_)) => "ddl.drop_view",
None => "ddl.empty",
}
}

View File

@@ -34,11 +34,13 @@ pub enum Error {
Io {
#[snafu(source)]
error: std::io::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Auth failed"))]
AuthBackend {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
@@ -72,7 +74,10 @@ pub enum Error {
},
#[snafu(display("User is not authorized to perform this action"))]
PermissionDenied { location: Location },
PermissionDenied {
#[snafu(implicit)]
location: Location,
},
}
impl ErrorExt for Error {

View File

@@ -30,6 +30,7 @@ pub enum PermissionReq<'a> {
PromStoreWrite,
PromStoreRead,
Otlp,
LogWrite,
}
#[derive(Debug)]

View File

@@ -52,7 +52,10 @@ fn test_permission_checker() {
let sql_result = checker.check_permission(
None,
PermissionReq::SqlStatement(&Statement::ShowDatabases(ShowDatabases::new(ShowKind::All))),
PermissionReq::SqlStatement(&Statement::ShowDatabases(ShowDatabases::new(
ShowKind::All,
false,
))),
);
assert_matches!(sql_result, Ok(PermissionResp::Reject));

14
src/cache/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,14 @@
[package]
name = "cache"
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
catalog.workspace = true
common-error.workspace = true
common-macro.workspace = true
common-meta.workspace = true
moka.workspace = true
snafu.workspace = true
substrait.workspace = true

44
src/cache/src/error.rs vendored Normal file
View File

@@ -0,0 +1,44 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Failed to get cache from cache registry: {}", name))]
CacheRequired {
#[snafu(implicit)]
location: Location,
name: String,
},
}
pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::CacheRequired { .. } => StatusCode::Internal,
}
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
}

135
src/cache/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,135 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod error;
use std::sync::Arc;
use std::time::Duration;
use catalog::kvbackend::new_table_cache;
use common_meta::cache::{
new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
new_table_route_cache, new_view_info_cache, CacheRegistry, CacheRegistryBuilder,
LayeredCacheRegistryBuilder,
};
use common_meta::kv_backend::KvBackendRef;
use moka::future::CacheBuilder;
use snafu::OptionExt;
use crate::error::Result;
const DEFAULT_CACHE_MAX_CAPACITY: u64 = 65536;
const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
const DEFAULT_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
pub const TABLE_INFO_CACHE_NAME: &str = "table_info_cache";
pub const VIEW_INFO_CACHE_NAME: &str = "view_info_cache";
pub const TABLE_NAME_CACHE_NAME: &str = "table_name_cache";
pub const TABLE_CACHE_NAME: &str = "table_cache";
pub const TABLE_FLOWNODE_SET_CACHE_NAME: &str = "table_flownode_set_cache";
pub const TABLE_ROUTE_CACHE_NAME: &str = "table_route_cache";
pub fn build_fundamental_cache_registry(kv_backend: KvBackendRef) -> CacheRegistry {
// Builds table info cache
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
.time_to_live(DEFAULT_CACHE_TTL)
.time_to_idle(DEFAULT_CACHE_TTI)
.build();
let table_info_cache = Arc::new(new_table_info_cache(
TABLE_INFO_CACHE_NAME.to_string(),
cache,
kv_backend.clone(),
));
// Builds table name cache
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
.time_to_live(DEFAULT_CACHE_TTL)
.time_to_idle(DEFAULT_CACHE_TTI)
.build();
let table_name_cache = Arc::new(new_table_name_cache(
TABLE_NAME_CACHE_NAME.to_string(),
cache,
kv_backend.clone(),
));
// Builds table route cache
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
.time_to_live(DEFAULT_CACHE_TTL)
.time_to_idle(DEFAULT_CACHE_TTI)
.build();
let table_route_cache = Arc::new(new_table_route_cache(
TABLE_ROUTE_CACHE_NAME.to_string(),
cache,
kv_backend.clone(),
));
// Builds table flownode set cache
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
.time_to_live(DEFAULT_CACHE_TTL)
.time_to_idle(DEFAULT_CACHE_TTI)
.build();
let table_flownode_set_cache = Arc::new(new_table_flownode_set_cache(
TABLE_FLOWNODE_SET_CACHE_NAME.to_string(),
cache,
kv_backend.clone(),
));
// Builds the view info cache
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
.time_to_live(DEFAULT_CACHE_TTL)
.time_to_idle(DEFAULT_CACHE_TTI)
.build();
let view_info_cache = Arc::new(new_view_info_cache(
VIEW_INFO_CACHE_NAME.to_string(),
cache,
kv_backend.clone(),
));
CacheRegistryBuilder::default()
.add_cache(table_info_cache)
.add_cache(table_name_cache)
.add_cache(table_route_cache)
.add_cache(view_info_cache)
.add_cache(table_flownode_set_cache)
.build()
}
// TODO(weny): Make the cache configurable.
pub fn with_default_composite_cache_registry(
builder: LayeredCacheRegistryBuilder,
) -> Result<LayeredCacheRegistryBuilder> {
let table_info_cache = builder.get().context(error::CacheRequiredSnafu {
name: TABLE_INFO_CACHE_NAME,
})?;
let table_name_cache = builder.get().context(error::CacheRequiredSnafu {
name: TABLE_NAME_CACHE_NAME,
})?;
// Builds table cache
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
.time_to_live(DEFAULT_CACHE_TTL)
.time_to_idle(DEFAULT_CACHE_TTI)
.build();
let table_cache = Arc::new(new_table_cache(
TABLE_CACHE_NAME.to_string(),
cache,
table_info_cache,
table_name_cache,
));
let registry = CacheRegistryBuilder::default()
.add_cache(table_cache)
.build();
Ok(builder.add_cache_registry(registry))
}

View File

@@ -16,6 +16,7 @@ arrow.workspace = true
arrow-schema.workspace = true
async-stream.workspace = true
async-trait = "0.1"
bytes.workspace = true
common-catalog.workspace = true
common-config.workspace = true
common-error.workspace = true
@@ -48,8 +49,11 @@ table.workspace = true
tokio.workspace = true
[dev-dependencies]
cache.workspace = true
catalog = { workspace = true, features = ["testing"] }
chrono.workspace = true
common-meta = { workspace = true, features = ["testing"] }
common-query = { workspace = true, features = ["testing"] }
common-test-util.workspace = true
log-store.workspace = true
object-store.workspace = true

View File

@@ -19,10 +19,7 @@ use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use datafusion::error::DataFusionError;
use datatypes::prelude::ConcreteDataType;
use snafu::{Location, Snafu};
use table::metadata::TableId;
use tokio::task::JoinError;
#[derive(Snafu)]
#[snafu(visibility(pub))]
@@ -30,12 +27,14 @@ use tokio::task::JoinError;
pub enum Error {
#[snafu(display("Failed to list catalogs"))]
ListCatalogs {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to list {}'s schemas", catalog))]
ListSchemas {
#[snafu(implicit)]
location: Location,
catalog: String,
source: BoxedError,
@@ -43,6 +42,7 @@ pub enum Error {
#[snafu(display("Failed to list {}.{}'s tables", catalog, schema))]
ListTables {
#[snafu(implicit)]
location: Location,
catalog: String,
schema: String,
@@ -51,78 +51,37 @@ pub enum Error {
#[snafu(display("Failed to list nodes in cluster: {source}"))]
ListNodes {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to re-compile script due to internal error"))]
CompileScriptInternal {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to open system catalog table"))]
OpenSystemCatalog {
location: Location,
source: table::error::Error,
},
#[snafu(display("Failed to create system catalog table"))]
CreateSystemCatalog {
location: Location,
source: table::error::Error,
},
#[snafu(display("Failed to create table, table info: {}", table_info))]
CreateTable {
table_info: String,
#[snafu(implicit)]
location: Location,
source: table::error::Error,
},
#[snafu(display("System catalog is not valid: {}", msg))]
SystemCatalog { msg: String, location: Location },
#[snafu(display(
"System catalog table type mismatch, expected: binary, found: {:?}",
data_type,
))]
SystemCatalogTypeMismatch {
data_type: ConcreteDataType,
SystemCatalog {
msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Invalid system catalog entry type: {:?}", entry_type))]
InvalidEntryType {
entry_type: Option<u8>,
location: Location,
},
#[snafu(display("Invalid system catalog key: {:?}", key))]
InvalidKey {
key: Option<String>,
location: Location,
},
#[snafu(display("Catalog value is not present"))]
EmptyValue { location: Location },
#[snafu(display("Failed to deserialize value"))]
ValueDeserialize {
#[snafu(source)]
error: serde_json::error::Error,
location: Location,
},
#[snafu(display("Table engine not found: {}", engine_name))]
TableEngineNotFound {
engine_name: String,
location: Location,
source: table::error::Error,
},
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
CatalogNotFound {
catalog_name: String,
#[snafu(implicit)]
location: Location,
},
@@ -130,43 +89,28 @@ pub enum Error {
SchemaNotFound {
catalog: String,
schema: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Table `{}` already exists", table))]
TableExists { table: String, location: Location },
TableExists {
table: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Table not found: {}", table))]
TableNotExist { table: String, location: Location },
#[snafu(display("Schema {} already exists", schema))]
SchemaExists { schema: String, location: Location },
#[snafu(display("Operation {} not implemented yet", operation))]
Unimplemented {
operation: String,
TableNotExist {
table: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Operation {} not supported", op))]
NotSupported { op: String, location: Location },
#[snafu(display("Failed to open table {table_id}"))]
OpenTable {
table_id: TableId,
location: Location,
source: table::error::Error,
},
#[snafu(display("Failed to open table in parallel"))]
ParallelOpenTable {
#[snafu(source)]
error: JoinError,
},
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
TableNotFound {
table_info: String,
#[snafu(display("View info not found: {}", name))]
ViewInfoNotFound {
name: String,
#[snafu(implicit)]
location: Location,
},
@@ -176,59 +120,44 @@ pub enum Error {
#[snafu(display("Failed to find region routes"))]
FindRegionRoutes { source: partition::error::Error },
#[snafu(display("Failed to read system catalog table records"))]
ReadSystemCatalog {
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to create recordbatch"))]
CreateRecordBatch {
#[snafu(implicit)]
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to insert table creation record to system catalog"))]
InsertCatalogRecord {
location: Location,
source: table::error::Error,
},
#[snafu(display("Failed to scan system catalog table"))]
SystemCatalogTableScan {
location: Location,
source: table::error::Error,
},
#[snafu(display("Internal error"))]
Internal {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to upgrade weak catalog manager reference"))]
UpgradeWeakCatalogManagerRef { location: Location },
UpgradeWeakCatalogManagerRef {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to execute system catalog table scan"))]
SystemCatalogTableScanExec {
#[snafu(display("Failed to decode logical plan for view: {}", name))]
DecodePlan {
name: String,
#[snafu(implicit)]
location: Location,
source: common_query::error::Error,
},
#[snafu(display("Cannot parse catalog value"))]
InvalidCatalogValue {
location: Location,
source: common_catalog::error::Error,
},
#[snafu(display("Failed to perform metasrv operation"))]
Metasrv {
#[snafu(implicit)]
location: Location,
source: meta_client::error::Error,
},
#[snafu(display("Invalid table info in catalog"))]
InvalidTableInfoInCatalog {
#[snafu(implicit)]
location: Location,
source: datatypes::error::Error,
},
@@ -240,29 +169,43 @@ pub enum Error {
Datafusion {
#[snafu(source)]
error: DataFusionError,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Table schema mismatch"))]
TableSchemaMismatch {
location: Location,
source: table::error::Error,
},
#[snafu(display("A generic error has occurred, msg: {}", msg))]
Generic { msg: String, location: Location },
#[snafu(display("Table metadata manager error"))]
TableMetadataManager {
source: common_meta::error::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Get null from table cache, key: {}", key))]
TableCacheNotGet { key: String, location: Location },
#[snafu(display("Failed to get table cache"))]
GetTableCache {
source: common_meta::error::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to get table cache, err: {}", err_msg))]
GetTableCache { err_msg: String },
#[snafu(display("Failed to get view info from cache"))]
GetViewCache {
source: common_meta::error::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Cache not found: {name}"))]
CacheNotFound {
name: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to cast the catalog manager"))]
CastManager {
#[snafu(implicit)]
location: Location,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -270,61 +213,43 @@ pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::InvalidKey { .. }
| Error::SchemaNotFound { .. }
Error::SchemaNotFound { .. }
| Error::CatalogNotFound { .. }
| Error::FindPartitions { .. }
| Error::FindRegionRoutes { .. }
| Error::InvalidEntryType { .. }
| Error::ParallelOpenTable { .. } => StatusCode::Unexpected,
| Error::CacheNotFound { .. }
| Error::CastManager { .. } => StatusCode::Unexpected,
Error::TableNotFound { .. } => StatusCode::TableNotFound,
Error::ViewInfoNotFound { .. } => StatusCode::TableNotFound,
Error::SystemCatalog { .. }
| Error::EmptyValue { .. }
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
Error::SystemCatalog { .. } => StatusCode::StorageUnavailable,
Error::Generic { .. }
| Error::SystemCatalogTypeMismatch { .. }
| Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source, .. } => {
source.status_code()
}
Error::InvalidCatalogValue { source, .. } => source.status_code(),
Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
Error::CreateRecordBatch { source, .. } => source.status_code(),
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
Error::TableNotExist { .. } => StatusCode::TableNotFound,
Error::SchemaExists { .. } | Error::TableEngineNotFound { .. } => {
StatusCode::InvalidArguments
}
Error::ListCatalogs { source, .. }
| Error::ListNodes { source, .. }
| Error::ListSchemas { source, .. }
| Error::ListTables { source, .. } => source.status_code(),
Error::OpenSystemCatalog { source, .. }
| Error::CreateSystemCatalog { source, .. }
| Error::InsertCatalogRecord { source, .. }
| Error::OpenTable { source, .. }
| Error::CreateTable { source, .. }
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
Error::CreateTable { source, .. } => source.status_code(),
Error::Metasrv { source, .. } => source.status_code(),
Error::SystemCatalogTableScan { source, .. } => source.status_code(),
Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
Error::DecodePlan { source, .. } => source.status_code(),
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
source.status_code()
}
Error::Unimplemented { .. } | Error::NotSupported { .. } => StatusCode::Unsupported,
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
Error::TableMetadataManager { source, .. } => source.status_code(),
Error::TableCacheNotGet { .. } | Error::GetTableCache { .. } => StatusCode::Internal,
Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => {
source.status_code()
}
}
}
@@ -356,11 +281,6 @@ mod tests {
.status_code()
);
assert_eq!(
StatusCode::Unexpected,
InvalidKeySnafu { key: None }.build().status_code()
);
assert_eq!(
StatusCode::StorageUnavailable,
Error::SystemCatalog {
@@ -369,19 +289,6 @@ mod tests {
}
.status_code()
);
assert_eq!(
StatusCode::Internal,
Error::SystemCatalogTypeMismatch {
data_type: ConcreteDataType::binary_datatype(),
location: Location::generate(),
}
.status_code()
);
assert_eq!(
StatusCode::StorageUnavailable,
EmptyValueSnafu {}.build().status_code()
);
}
#[test]

View File

@@ -21,11 +21,11 @@ use common_config::Mode;
use common_error::ext::BoxedError;
use common_meta::cluster::{ClusterInfo, NodeInfo, NodeStatus};
use common_meta::peer::Peer;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use common_telemetry::warn;
use common_time::timestamp::Timestamp;
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;

View File

@@ -20,9 +20,9 @@ use common_catalog::consts::{
SEMANTIC_TYPE_TIME_INDEX,
};
use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;

View File

@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_KEY_COLUMN_USAGE_TABLE_ID;
use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;

View File

@@ -17,9 +17,9 @@ use std::sync::Arc;
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;

View File

@@ -18,10 +18,10 @@ use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_PARTITIONS_TABLE_ID;
use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use common_time::datetime::DateTime;
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;

View File

@@ -14,10 +14,9 @@
use arrow::array::StringArray;
use arrow::compute::kernels::comparison;
use common_query::logical_plan::DfExpr;
use datafusion::common::ScalarValue;
use datafusion::logical_expr::expr::Like;
use datafusion::logical_expr::Operator;
use datafusion::logical_expr::{Expr, Operator};
use datatypes::value::Value;
use store_api::storage::ScanRequest;
@@ -118,12 +117,12 @@ impl Predicate {
}
/// Try to create a predicate from datafusion [`Expr`], return None if fails.
fn from_expr(expr: DfExpr) -> Option<Predicate> {
fn from_expr(expr: Expr) -> Option<Predicate> {
match expr {
// NOT expr
DfExpr::Not(expr) => Some(Predicate::Not(Box::new(Self::from_expr(*expr)?))),
Expr::Not(expr) => Some(Predicate::Not(Box::new(Self::from_expr(*expr)?))),
// expr LIKE pattern
DfExpr::Like(Like {
Expr::Like(Like {
negated,
expr,
pattern,
@@ -131,10 +130,10 @@ impl Predicate {
..
}) if is_column(&expr) && is_string_literal(&pattern) => {
// Safety: ensured by gurad
let DfExpr::Column(c) = *expr else {
let Expr::Column(c) = *expr else {
unreachable!();
};
let DfExpr::Literal(ScalarValue::Utf8(Some(pattern))) = *pattern else {
let Expr::Literal(ScalarValue::Utf8(Some(pattern))) = *pattern else {
unreachable!();
};
@@ -147,10 +146,10 @@ impl Predicate {
}
}
// left OP right
DfExpr::BinaryExpr(bin) => match (*bin.left, bin.op, *bin.right) {
Expr::BinaryExpr(bin) => match (*bin.left, bin.op, *bin.right) {
// left == right
(DfExpr::Literal(scalar), Operator::Eq, DfExpr::Column(c))
| (DfExpr::Column(c), Operator::Eq, DfExpr::Literal(scalar)) => {
(Expr::Literal(scalar), Operator::Eq, Expr::Column(c))
| (Expr::Column(c), Operator::Eq, Expr::Literal(scalar)) => {
let Ok(v) = Value::try_from(scalar) else {
return None;
};
@@ -158,8 +157,8 @@ impl Predicate {
Some(Predicate::Eq(c.name, v))
}
// left != right
(DfExpr::Literal(scalar), Operator::NotEq, DfExpr::Column(c))
| (DfExpr::Column(c), Operator::NotEq, DfExpr::Literal(scalar)) => {
(Expr::Literal(scalar), Operator::NotEq, Expr::Column(c))
| (Expr::Column(c), Operator::NotEq, Expr::Literal(scalar)) => {
let Ok(v) = Value::try_from(scalar) else {
return None;
};
@@ -183,14 +182,14 @@ impl Predicate {
_ => None,
},
// [NOT] IN (LIST)
DfExpr::InList(list) => {
Expr::InList(list) => {
match (*list.expr, list.list, list.negated) {
// column [NOT] IN (v1, v2, v3, ...)
(DfExpr::Column(c), list, negated) if is_all_scalars(&list) => {
(Expr::Column(c), list, negated) if is_all_scalars(&list) => {
let mut values = Vec::with_capacity(list.len());
for scalar in list {
// Safety: checked by `is_all_scalars`
let DfExpr::Literal(scalar) = scalar else {
let Expr::Literal(scalar) = scalar else {
unreachable!();
};
@@ -237,12 +236,12 @@ fn like_utf8(s: &str, pattern: &str, case_insensitive: &bool) -> Option<bool> {
Some(booleans.value(0))
}
fn is_string_literal(expr: &DfExpr) -> bool {
matches!(expr, DfExpr::Literal(ScalarValue::Utf8(Some(_))))
fn is_string_literal(expr: &Expr) -> bool {
matches!(expr, Expr::Literal(ScalarValue::Utf8(Some(_))))
}
fn is_column(expr: &DfExpr) -> bool {
matches!(expr, DfExpr::Column(_))
fn is_column(expr: &Expr) -> bool {
matches!(expr, Expr::Column(_))
}
/// A list of predicate
@@ -257,7 +256,7 @@ impl Predicates {
let mut predicates = Vec::with_capacity(request.filters.len());
for filter in &request.filters {
if let Some(predicate) = Predicate::from_expr(filter.df_expr().clone()) {
if let Some(predicate) = Predicate::from_expr(filter.clone()) {
predicates.push(predicate);
}
}
@@ -286,8 +285,8 @@ impl Predicates {
}
/// Returns true when the values are all [`DfExpr::Literal`].
fn is_all_scalars(list: &[DfExpr]) -> bool {
list.iter().all(|v| matches!(v, DfExpr::Literal(_)))
fn is_all_scalars(list: &[Expr]) -> bool {
list.iter().all(|v| matches!(v, Expr::Literal(_)))
}
#[cfg(test)]
@@ -376,7 +375,7 @@ mod tests {
#[test]
fn test_predicate_like() {
// case insensitive
let expr = DfExpr::Like(Like {
let expr = Expr::Like(Like {
negated: false,
expr: Box::new(column("a")),
pattern: Box::new(string_literal("%abc")),
@@ -403,7 +402,7 @@ mod tests {
assert!(p.eval(&[]).is_none());
// case sensitive
let expr = DfExpr::Like(Like {
let expr = Expr::Like(Like {
negated: false,
expr: Box::new(column("a")),
pattern: Box::new(string_literal("%abc")),
@@ -423,7 +422,7 @@ mod tests {
assert!(p.eval(&[]).is_none());
// not like
let expr = DfExpr::Like(Like {
let expr = Expr::Like(Like {
negated: true,
expr: Box::new(column("a")),
pattern: Box::new(string_literal("%abc")),
@@ -437,15 +436,15 @@ mod tests {
assert!(p.eval(&[]).is_none());
}
fn column(name: &str) -> DfExpr {
DfExpr::Column(Column {
fn column(name: &str) -> Expr {
Expr::Column(Column {
relation: None,
name: name.to_string(),
})
}
fn string_literal(v: &str) -> DfExpr {
DfExpr::Literal(ScalarValue::Utf8(Some(v.to_string())))
fn string_literal(v: &str) -> Expr {
Expr::Literal(ScalarValue::Utf8(Some(v.to_string())))
}
fn match_string_value(v: &Value, expected: &str) -> bool {
@@ -463,14 +462,14 @@ mod tests {
result
}
fn mock_exprs() -> (DfExpr, DfExpr) {
let expr1 = DfExpr::BinaryExpr(BinaryExpr {
fn mock_exprs() -> (Expr, Expr) {
let expr1 = Expr::BinaryExpr(BinaryExpr {
left: Box::new(column("a")),
op: Operator::Eq,
right: Box::new(string_literal("a_value")),
});
let expr2 = DfExpr::BinaryExpr(BinaryExpr {
let expr2 = Expr::BinaryExpr(BinaryExpr {
left: Box::new(column("b")),
op: Operator::NotEq,
right: Box::new(string_literal("b_value")),
@@ -491,17 +490,17 @@ mod tests {
assert!(matches!(&p2, Predicate::NotEq(column, v) if column == "b"
&& match_string_value(v, "b_value")));
let and_expr = DfExpr::BinaryExpr(BinaryExpr {
let and_expr = Expr::BinaryExpr(BinaryExpr {
left: Box::new(expr1.clone()),
op: Operator::And,
right: Box::new(expr2.clone()),
});
let or_expr = DfExpr::BinaryExpr(BinaryExpr {
let or_expr = Expr::BinaryExpr(BinaryExpr {
left: Box::new(expr1.clone()),
op: Operator::Or,
right: Box::new(expr2.clone()),
});
let not_expr = DfExpr::Not(Box::new(expr1.clone()));
let not_expr = Expr::Not(Box::new(expr1.clone()));
let and_p = Predicate::from_expr(and_expr).unwrap();
assert!(matches!(and_p, Predicate::And(left, right) if *left == p1 && *right == p2));
@@ -510,7 +509,7 @@ mod tests {
let not_p = Predicate::from_expr(not_expr).unwrap();
assert!(matches!(not_p, Predicate::Not(p) if *p == p1));
let inlist_expr = DfExpr::InList(InList {
let inlist_expr = Expr::InList(InList {
expr: Box::new(column("a")),
list: vec![string_literal("a1"), string_literal("a2")],
negated: false,
@@ -520,7 +519,7 @@ mod tests {
assert!(matches!(&inlist_p, Predicate::InList(c, values) if c == "a"
&& match_string_values(values, &["a1", "a2"])));
let inlist_expr = DfExpr::InList(InList {
let inlist_expr = Expr::InList(InList {
expr: Box::new(column("a")),
list: vec![string_literal("a1"), string_literal("a2")],
negated: true,
@@ -540,7 +539,7 @@ mod tests {
let (expr1, expr2) = mock_exprs();
let request = ScanRequest {
filters: vec![expr1.into(), expr2.into()],
filters: vec![expr1, expr2],
..Default::default()
};
let predicates = Predicates::from_scan_request(&Some(request));
@@ -578,7 +577,7 @@ mod tests {
let (expr1, expr2) = mock_exprs();
let request = ScanRequest {
filters: vec![expr1.into(), expr2.into()],
filters: vec![expr1, expr2],
..Default::default()
};
let predicates = Predicates::from_scan_request(&Some(request));

View File

@@ -19,9 +19,9 @@ use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_REGION_PEERS_TABLE_ID;
use common_error::ext::BoxedError;
use common_meta::rpc::router::RegionRoute;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
@@ -31,7 +31,7 @@ use datatypes::value::Value;
use datatypes::vectors::{Int64VectorBuilder, StringVectorBuilder, UInt64VectorBuilder};
use futures::{StreamExt, TryStreamExt};
use snafu::{OptionExt, ResultExt};
use store_api::storage::{ScanRequest, TableId};
use store_api::storage::{RegionId, ScanRequest, TableId};
use table::metadata::TableType;
use super::REGION_PEERS;
@@ -205,8 +205,8 @@ impl InformationSchemaRegionPeersBuilder {
table_ids.into_iter().map(|id| (id, vec![])).collect()
};
for routes in table_routes.values() {
self.add_region_peers(&predicates, routes);
for (table_id, routes) in table_routes {
self.add_region_peers(&predicates, table_id, &routes);
}
}
}
@@ -214,9 +214,14 @@ impl InformationSchemaRegionPeersBuilder {
self.finish()
}
fn add_region_peers(&mut self, predicates: &Predicates, routes: &[RegionRoute]) {
fn add_region_peers(
&mut self,
predicates: &Predicates,
table_id: TableId,
routes: &[RegionRoute],
) {
for route in routes {
let region_id = route.region.id.as_u64();
let region_id = RegionId::new(table_id, route.region.id.region_number()).as_u64();
let peer_id = route.leader_peer.clone().map(|p| p.id);
let peer_addr = route.leader_peer.clone().map(|p| p.addr);
let status = if let Some(status) = route.leader_status {

View File

@@ -17,10 +17,10 @@ use std::sync::Arc;
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID;
use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use common_time::util::current_time_millis;
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;

View File

@@ -17,9 +17,10 @@ use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_SCHEMATA_TABLE_ID;
use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_meta::key::schema_name::SchemaNameKey;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
@@ -32,15 +33,18 @@ use store_api::storage::{ScanRequest, TableId};
use super::SCHEMATA;
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
CreateRecordBatchSnafu, InternalSnafu, Result, SchemaNotFoundSnafu, TableMetadataManagerSnafu,
UpgradeWeakCatalogManagerRefSnafu,
};
use crate::information_schema::{InformationTable, Predicates};
use crate::information_schema::{utils, InformationTable, Predicates};
use crate::CatalogManager;
pub const CATALOG_NAME: &str = "catalog_name";
pub const SCHEMA_NAME: &str = "schema_name";
const DEFAULT_CHARACTER_SET_NAME: &str = "default_character_set_name";
const DEFAULT_COLLATION_NAME: &str = "default_collation_name";
/// The database options
pub const SCHEMA_OPTS: &str = "options";
const INIT_CAPACITY: usize = 42;
/// The `information_schema.schemata` table implementation.
@@ -74,6 +78,7 @@ impl InformationSchemaSchemata {
false,
),
ColumnSchema::new("sql_path", ConcreteDataType::string_datatype(), true),
ColumnSchema::new(SCHEMA_OPTS, ConcreteDataType::string_datatype(), true),
]))
}
@@ -133,6 +138,7 @@ struct InformationSchemaSchemataBuilder {
charset_names: StringVectorBuilder,
collation_names: StringVectorBuilder,
sql_paths: StringVectorBuilder,
schema_options: StringVectorBuilder,
}
impl InformationSchemaSchemataBuilder {
@@ -150,6 +156,7 @@ impl InformationSchemaSchemataBuilder {
charset_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
collation_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
sql_paths: StringVectorBuilder::with_capacity(INIT_CAPACITY),
schema_options: StringVectorBuilder::with_capacity(INIT_CAPACITY),
}
}
@@ -160,21 +167,50 @@ impl InformationSchemaSchemataBuilder {
.catalog_manager
.upgrade()
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let table_metadata_manager = utils::table_meta_manager(&self.catalog_manager)?;
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
self.add_schema(&predicates, &catalog_name, &schema_name);
let opts = if let Some(table_metadata_manager) = &table_metadata_manager {
let schema_opts = table_metadata_manager
.schema_manager()
.get(SchemaNameKey::new(&catalog_name, &schema_name))
.await
.context(TableMetadataManagerSnafu)?
.context(SchemaNotFoundSnafu {
catalog: &catalog_name,
schema: &schema_name,
})?;
Some(format!("{schema_opts}"))
} else {
None
};
self.add_schema(
&predicates,
&catalog_name,
&schema_name,
opts.as_deref().unwrap_or(""),
);
}
self.finish()
}
fn add_schema(&mut self, predicates: &Predicates, catalog_name: &str, schema_name: &str) {
fn add_schema(
&mut self,
predicates: &Predicates,
catalog_name: &str,
schema_name: &str,
schema_options: &str,
) {
let row = [
(CATALOG_NAME, &Value::from(catalog_name)),
(SCHEMA_NAME, &Value::from(schema_name)),
(DEFAULT_CHARACTER_SET_NAME, &Value::from("utf8")),
(DEFAULT_COLLATION_NAME, &Value::from("utf8_bin")),
(SCHEMA_OPTS, &Value::from(schema_options)),
];
if !predicates.eval(&row) {
@@ -186,6 +222,7 @@ impl InformationSchemaSchemataBuilder {
self.charset_names.push(Some("utf8"));
self.collation_names.push(Some("utf8_bin"));
self.sql_paths.push(None);
self.schema_options.push(Some(schema_options));
}
fn finish(&mut self) -> Result<RecordBatch> {
@@ -195,6 +232,7 @@ impl InformationSchemaSchemataBuilder {
Arc::new(self.charset_names.finish()),
Arc::new(self.collation_names.finish()),
Arc::new(self.sql_paths.finish()),
Arc::new(self.schema_options.finish()),
];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
}

View File

@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID;
use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;

View File

@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_TABLES_TABLE_ID;
use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
@@ -105,7 +105,9 @@ impl InformationTable for InformationSchemaTables {
.make_tables(Some(request))
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
.map_err(|err| {
datafusion::error::DataFusionError::External(format!("{err:?}").into())
})
}),
));
Ok(Box::pin(

View File

@@ -15,6 +15,7 @@
use std::sync::{Arc, Weak};
use common_config::Mode;
use common_meta::key::TableMetadataManagerRef;
use meta_client::client::MetaClient;
use snafu::OptionExt;
@@ -51,3 +52,17 @@ pub fn meta_client(catalog_manager: &Weak<dyn CatalogManager>) -> Result<Option<
Ok(meta_client)
}
/// Try to get the `[TableMetadataManagerRef]` from `[CatalogManager]` weak reference.
pub fn table_meta_manager(
catalog_manager: &Weak<dyn CatalogManager>,
) -> Result<Option<TableMetadataManagerRef>> {
let catalog_manager = catalog_manager
.upgrade()
.context(UpgradeWeakCatalogManagerRefSnafu)?;
Ok(catalog_manager
.as_any()
.downcast_ref::<KvBackendCatalogManager>()
.map(|manager| manager.table_metadata_manager_ref().clone()))
}

View File

@@ -16,5 +16,7 @@ pub use client::{CachedMetaKvBackend, CachedMetaKvBackendBuilder, MetaKvBackend}
mod client;
mod manager;
mod table_cache;
pub use manager::KvBackendCatalogManager;
pub use table_cache::{new_table_cache, TableCache, TableCacheRef};

View File

@@ -350,6 +350,13 @@ pub struct MetaKvBackend {
pub client: Arc<MetaClient>,
}
impl MetaKvBackend {
/// Constructs a [MetaKvBackend].
pub fn new(client: Arc<MetaClient>) -> MetaKvBackend {
MetaKvBackend { client }
}
}
impl TxnService for MetaKvBackend {
type Error = Error;
}
@@ -450,9 +457,8 @@ mod tests {
use common_meta::kv_backend::{KvBackend, TxnService};
use common_meta::rpc::store::{
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse,
BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse,
DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest,
RangeResponse,
BatchPutRequest, BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest,
PutResponse, RangeRequest, RangeResponse,
};
use common_meta::rpc::KeyValue;
use dashmap::DashMap;
@@ -512,13 +518,6 @@ mod tests {
unimplemented!()
}
async fn compare_and_put(
&self,
_req: CompareAndPutRequest,
) -> Result<CompareAndPutResponse, Self::Error> {
unimplemented!()
}
async fn delete_range(
&self,
_req: DeleteRangeRequest,

View File

@@ -15,17 +15,14 @@
use std::any::Any;
use std::collections::BTreeSet;
use std::sync::{Arc, Weak};
use std::time::Duration;
use async_stream::try_stream;
use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
};
use common_catalog::format_full_table_name;
use common_config::Mode;
use common_error::ext::BoxedError;
use common_meta::cache_invalidator::{CacheInvalidator, Context, MultiCacheInvalidator};
use common_meta::instruction::CacheIdent;
use common_meta::cache::{LayeredCacheRegistryRef, ViewInfoCacheRef};
use common_meta::key::catalog_name::CatalogNameKey;
use common_meta::key::schema_name::SchemaNameKey;
use common_meta::key::table_info::TableInfoValue;
@@ -35,20 +32,20 @@ use common_meta::kv_backend::KvBackendRef;
use futures_util::stream::BoxStream;
use futures_util::{StreamExt, TryStreamExt};
use meta_client::client::MetaClient;
use moka::future::{Cache as AsyncCache, CacheBuilder};
use moka::sync::Cache;
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
use snafu::prelude::*;
use table::dist_table::DistTable;
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
use table::table_name::TableName;
use table::TableRef;
use crate::error::Error::{GetTableCache, TableCacheNotGet};
use crate::error::{
InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu, ListSchemasSnafu, ListTablesSnafu, Result,
TableCacheNotGetSnafu, TableMetadataManagerSnafu,
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
};
use crate::information_schema::InformationSchemaProvider;
use crate::kvbackend::TableCacheRef;
use crate::CatalogManager;
/// Access all existing catalog, schema and tables.
@@ -64,64 +61,27 @@ pub struct KvBackendCatalogManager {
table_metadata_manager: TableMetadataManagerRef,
/// A sub-CatalogManager that handles system tables
system_catalog: SystemCatalog,
table_cache: AsyncCache<String, TableRef>,
}
struct TableCacheInvalidator {
table_cache: AsyncCache<String, TableRef>,
}
impl TableCacheInvalidator {
pub fn new(table_cache: AsyncCache<String, TableRef>) -> Self {
Self { table_cache }
}
}
#[async_trait::async_trait]
impl CacheInvalidator for TableCacheInvalidator {
async fn invalidate(
&self,
_ctx: &Context,
caches: Vec<CacheIdent>,
) -> common_meta::error::Result<()> {
for cache in caches {
if let CacheIdent::TableName(table_name) = cache {
let table_cache_key = format_full_table_name(
&table_name.catalog_name,
&table_name.schema_name,
&table_name.table_name,
);
self.table_cache.invalidate(&table_cache_key).await;
}
}
Ok(())
}
cache_registry: LayeredCacheRegistryRef,
}
const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
const TABLE_CACHE_MAX_CAPACITY: u64 = 65536;
const TABLE_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
const TABLE_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
impl KvBackendCatalogManager {
pub async fn new(
pub fn new(
mode: Mode,
meta_client: Option<Arc<MetaClient>>,
backend: KvBackendRef,
multi_cache_invalidator: Arc<MultiCacheInvalidator>,
cache_registry: LayeredCacheRegistryRef,
) -> Arc<Self> {
let table_cache: AsyncCache<String, TableRef> = CacheBuilder::new(TABLE_CACHE_MAX_CAPACITY)
.time_to_live(TABLE_CACHE_TTL)
.time_to_idle(TABLE_CACHE_TTI)
.build();
multi_cache_invalidator
.add_invalidator(Arc::new(TableCacheInvalidator::new(table_cache.clone())))
.await;
Arc::new_cyclic(|me| Self {
mode,
meta_client,
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
partition_manager: Arc::new(PartitionRuleManager::new(
backend.clone(),
cache_registry
.get()
.expect("Failed to get table_route_cache"),
)),
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
system_catalog: SystemCatalog {
catalog_manager: me.clone(),
@@ -131,7 +91,7 @@ impl KvBackendCatalogManager {
me.clone(),
)),
},
table_cache,
cache_registry,
})
}
@@ -140,6 +100,12 @@ impl KvBackendCatalogManager {
&self.mode
}
pub fn view_info_cache(&self) -> Result<ViewInfoCacheRef> {
self.cache_registry.get().context(CacheNotFoundSnafu {
name: "view_info_cache",
})
}
/// Returns the `[MetaClient]`.
pub fn meta_client(&self) -> Option<Arc<MetaClient>> {
self.meta_client.clone()
@@ -218,7 +184,7 @@ impl CatalogManager for KvBackendCatalogManager {
}
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool> {
if self.system_catalog.schema_exist(schema) {
if self.system_catalog.schema_exists(schema) {
return Ok(true);
}
@@ -230,7 +196,7 @@ impl CatalogManager for KvBackendCatalogManager {
}
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
if self.system_catalog.table_exist(schema, table) {
if self.system_catalog.table_exists(schema, table) {
return Ok(true);
}
@@ -245,60 +211,29 @@ impl CatalogManager for KvBackendCatalogManager {
async fn table(
&self,
catalog: &str,
schema: &str,
catalog_name: &str,
schema_name: &str,
table_name: &str,
) -> Result<Option<TableRef>> {
if let Some(table) = self.system_catalog.table(catalog, schema, table_name) {
if let Some(table) = self
.system_catalog
.table(catalog_name, schema_name, table_name)
{
return Ok(Some(table));
}
let init = async {
let table_name_key = TableNameKey::new(catalog, schema, table_name);
let Some(table_name_value) = self
.table_metadata_manager
.table_name_manager()
.get(table_name_key)
.await
.context(TableMetadataManagerSnafu)?
else {
return TableCacheNotGetSnafu {
key: table_name_key.to_string(),
}
.fail();
};
let table_id = table_name_value.table_id();
let table_cache: TableCacheRef = self.cache_registry.get().context(CacheNotFoundSnafu {
name: "table_cache",
})?;
let Some(table_info_value) = self
.table_metadata_manager
.table_info_manager()
.get(table_id)
.await
.context(TableMetadataManagerSnafu)?
.map(|v| v.into_inner())
else {
return TableCacheNotGetSnafu {
key: table_name_key.to_string(),
}
.fail();
};
build_table(table_info_value)
};
match self
.table_cache
.try_get_with_by_ref(&format_full_table_name(catalog, schema, table_name), init)
table_cache
.get_by_ref(&TableName {
catalog_name: catalog_name.to_string(),
schema_name: schema_name.to_string(),
table_name: table_name.to_string(),
})
.await
{
Ok(table) => Ok(Some(table)),
Err(err) => match err.as_ref() {
TableCacheNotGet { .. } => Ok(None),
_ => Err(err),
},
}
.map_err(|err| GetTableCache {
err_msg: err.to_string(),
})
.context(GetTableCacheSnafu)
}
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>> {
@@ -382,11 +317,11 @@ impl SystemCatalog {
}
}
fn schema_exist(&self, schema: &str) -> bool {
fn schema_exists(&self, schema: &str) -> bool {
schema == INFORMATION_SCHEMA_NAME
}
fn table_exist(&self, schema: &str, table: &str) -> bool {
fn table_exists(&self, schema: &str, table: &str) -> bool {
if schema == INFORMATION_SCHEMA_NAME {
self.information_schema_provider.table(table).is_some()
} else if schema == DEFAULT_SCHEMA_NAME {

View File

@@ -0,0 +1,80 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use common_meta::cache::{CacheContainer, Initializer, TableInfoCacheRef, TableNameCacheRef};
use common_meta::error::{Result as MetaResult, ValueNotExistSnafu};
use common_meta::instruction::CacheIdent;
use futures::future::BoxFuture;
use moka::future::Cache;
use snafu::OptionExt;
use table::dist_table::DistTable;
use table::table_name::TableName;
use table::TableRef;
pub type TableCacheRef = Arc<TableCache>;
/// [TableCache] caches the [TableName] to [TableRef] mapping.
pub type TableCache = CacheContainer<TableName, TableRef, CacheIdent>;
/// Constructs a [TableCache].
pub fn new_table_cache(
name: String,
cache: Cache<TableName, TableRef>,
table_info_cache: TableInfoCacheRef,
table_name_cache: TableNameCacheRef,
) -> TableCache {
let init = init_factory(table_info_cache, table_name_cache);
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
}
fn init_factory(
table_info_cache: TableInfoCacheRef,
table_name_cache: TableNameCacheRef,
) -> Initializer<TableName, TableRef> {
Arc::new(move |table_name| {
let table_info_cache = table_info_cache.clone();
let table_name_cache = table_name_cache.clone();
Box::pin(async move {
let table_id = table_name_cache
.get_by_ref(table_name)
.await?
.context(ValueNotExistSnafu)?;
let table_info = table_info_cache
.get_by_ref(&table_id)
.await?
.context(ValueNotExistSnafu)?;
Ok(Some(DistTable::table(table_info)))
})
})
}
fn invalidator<'a>(
cache: &'a Cache<TableName, TableRef>,
ident: &'a CacheIdent,
) -> BoxFuture<'a, MetaResult<()>> {
Box::pin(async move {
if let CacheIdent::TableName(table_name) = ident {
cache.invalidate(table_name).await
}
Ok(())
})
}
fn filter(ident: &CacheIdent) -> bool {
matches!(ident, CacheIdent::TableName(_))
}

View File

@@ -15,15 +15,25 @@
use std::collections::HashMap;
use std::sync::Arc;
use bytes::Bytes;
use common_catalog::format_full_table_name;
use common_query::logical_plan::SubstraitPlanDecoderRef;
use datafusion::common::{ResolvedTableReference, TableReference};
use datafusion::datasource::provider_as_source;
use datafusion::datasource::view::ViewTable;
use datafusion::datasource::{provider_as_source, TableProvider};
use datafusion::logical_expr::TableSource;
use session::context::QueryContext;
use snafu::{ensure, OptionExt};
use snafu::{ensure, OptionExt, ResultExt};
use table::metadata::TableType;
use table::table::adapter::DfTableProviderAdapter;
mod dummy_catalog;
use dummy_catalog::DummyCatalogList;
use crate::error::{QueryAccessDeniedSnafu, Result, TableNotExistSnafu};
use crate::error::{
CastManagerSnafu, DatafusionSnafu, DecodePlanSnafu, GetViewCacheSnafu, QueryAccessDeniedSnafu,
Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
};
use crate::kvbackend::KvBackendCatalogManager;
use crate::CatalogManagerRef;
pub struct DfTableSourceProvider {
@@ -32,6 +42,7 @@ pub struct DfTableSourceProvider {
disallow_cross_catalog_query: bool,
default_catalog: String,
default_schema: String,
plan_decoder: SubstraitPlanDecoderRef,
}
impl DfTableSourceProvider {
@@ -39,6 +50,7 @@ impl DfTableSourceProvider {
catalog_manager: CatalogManagerRef,
disallow_cross_catalog_query: bool,
query_ctx: &QueryContext,
plan_decoder: SubstraitPlanDecoderRef,
) -> Self {
Self {
catalog_manager,
@@ -46,6 +58,7 @@ impl DfTableSourceProvider {
resolved_tables: HashMap::new(),
default_catalog: query_ctx.current_catalog().to_owned(),
default_schema: query_ctx.current_schema().to_owned(),
plan_decoder,
}
}
@@ -94,8 +107,39 @@ impl DfTableSourceProvider {
table: format_full_table_name(catalog_name, schema_name, table_name),
})?;
let provider = DfTableProviderAdapter::new(table);
let source = provider_as_source(Arc::new(provider));
let provider: Arc<dyn TableProvider> = if table.table_info().table_type == TableType::View {
let catalog_manager = self
.catalog_manager
.as_any()
.downcast_ref::<KvBackendCatalogManager>()
.context(CastManagerSnafu)?;
let view_info = catalog_manager
.view_info_cache()?
.get(table.table_info().ident.table_id)
.await
.context(GetViewCacheSnafu)?
.context(ViewInfoNotFoundSnafu {
name: &table.table_info().name,
})?;
// Build the catalog list provider for deserialization.
let catalog_list = Arc::new(DummyCatalogList::new(self.catalog_manager.clone()));
let logical_plan = self
.plan_decoder
.decode(Bytes::from(view_info.view_info.clone()), catalog_list, true)
.await
.context(DecodePlanSnafu {
name: &table.table_info().name,
})?;
Arc::new(ViewTable::try_new(logical_plan, None).context(DatafusionSnafu)?)
} else {
Arc::new(DfTableProviderAdapter::new(table))
};
let source = provider_as_source(provider);
let _ = self.resolved_tables.insert(resolved_name, source.clone());
Ok(source)
}
@@ -103,6 +147,7 @@ impl DfTableSourceProvider {
#[cfg(test)]
mod tests {
use common_query::test_util::DummyDecoder;
use session::context::QueryContext;
use super::*;
@@ -112,8 +157,12 @@ mod tests {
fn test_validate_table_ref() {
let query_ctx = &QueryContext::with("greptime", "public");
let table_provider =
DfTableSourceProvider::new(MemoryCatalogManager::with_default_setup(), true, query_ctx);
let table_provider = DfTableSourceProvider::new(
MemoryCatalogManager::with_default_setup(),
true,
query_ctx,
DummyDecoder::arc(),
);
let table_ref = TableReference::bare("table_name");
let result = table_provider.resolve_table_ref(table_ref);
@@ -148,4 +197,99 @@ mod tests {
let table_ref = TableReference::full("greptime", "greptime_private", "columns");
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
}
use std::collections::HashSet;
use arrow::datatypes::{DataType, Field, Schema, SchemaRef};
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
use common_config::Mode;
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_query::error::Result as QueryResult;
use common_query::logical_plan::SubstraitPlanDecoder;
use datafusion::catalog::CatalogProviderList;
use datafusion::logical_expr::builder::LogicalTableSource;
use datafusion::logical_expr::{col, lit, LogicalPlan, LogicalPlanBuilder};
struct MockDecoder;
impl MockDecoder {
pub fn arc() -> Arc<Self> {
Arc::new(MockDecoder)
}
}
#[async_trait::async_trait]
impl SubstraitPlanDecoder for MockDecoder {
async fn decode(
&self,
_message: bytes::Bytes,
_catalog_list: Arc<dyn CatalogProviderList>,
_optimize: bool,
) -> QueryResult<LogicalPlan> {
Ok(mock_plan())
}
}
fn mock_plan() -> LogicalPlan {
let schema = Schema::new(vec![
Field::new("id", DataType::Int32, true),
Field::new("name", DataType::Utf8, true),
]);
let table_source = LogicalTableSource::new(SchemaRef::new(schema));
let projection = None;
let builder =
LogicalPlanBuilder::scan("person", Arc::new(table_source), projection).unwrap();
builder
.filter(col("id").gt(lit(500)))
.unwrap()
.build()
.unwrap()
}
#[tokio::test]
async fn test_resolve_view() {
let query_ctx = &QueryContext::with("greptime", "public");
let backend = Arc::new(MemoryKvBackend::default());
let layered_cache_builder = LayeredCacheRegistryBuilder::default()
.add_cache_registry(CacheRegistryBuilder::default().build());
let fundamental_cache_registry = build_fundamental_cache_registry(backend.clone());
let layered_cache_registry = Arc::new(
with_default_composite_cache_registry(
layered_cache_builder.add_cache_registry(fundamental_cache_registry),
)
.unwrap()
.build(),
);
let catalog_manager = KvBackendCatalogManager::new(
Mode::Standalone,
None,
backend.clone(),
layered_cache_registry,
);
let table_metadata_manager = TableMetadataManager::new(backend);
let mut view_info = common_meta::key::test_utils::new_test_table_info(1024, vec![]);
view_info.table_type = TableType::View;
let logical_plan = vec![1, 2, 3];
// Create view metadata
table_metadata_manager
.create_view_metadata(view_info.clone().into(), logical_plan, HashSet::new())
.await
.unwrap();
let mut table_provider =
DfTableSourceProvider::new(catalog_manager, true, query_ctx, MockDecoder::arc());
// View not found
let table_ref = TableReference::bare("not_exists_view");
assert!(table_provider.resolve_table(table_ref).await.is_err());
let table_ref = TableReference::bare(view_info.name);
let source = table_provider.resolve_table(table_ref).await.unwrap();
assert_eq!(*source.get_logical_plan().unwrap(), mock_plan());
}
}

View File

@@ -0,0 +1,129 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Dummy catalog for region server.
use std::any::Any;
use std::sync::Arc;
use async_trait::async_trait;
use common_catalog::format_full_table_name;
use datafusion::catalog::schema::SchemaProvider;
use datafusion::catalog::{CatalogProvider, CatalogProviderList};
use datafusion::datasource::TableProvider;
use snafu::OptionExt;
use table::table::adapter::DfTableProviderAdapter;
use crate::error::TableNotExistSnafu;
use crate::CatalogManagerRef;
/// Delegate the resolving requests to the `[CatalogManager]` unconditionally.
#[derive(Clone)]
pub struct DummyCatalogList {
catalog_manager: CatalogManagerRef,
}
impl DummyCatalogList {
/// Creates a new catalog list with the given catalog manager.
pub fn new(catalog_manager: CatalogManagerRef) -> Self {
Self { catalog_manager }
}
}
impl CatalogProviderList for DummyCatalogList {
fn as_any(&self) -> &dyn Any {
self
}
fn register_catalog(
&self,
_name: String,
_catalog: Arc<dyn CatalogProvider>,
) -> Option<Arc<dyn CatalogProvider>> {
None
}
fn catalog_names(&self) -> Vec<String> {
vec![]
}
fn catalog(&self, catalog_name: &str) -> Option<Arc<dyn CatalogProvider>> {
Some(Arc::new(DummyCatalogProvider {
catalog_name: catalog_name.to_string(),
catalog_manager: self.catalog_manager.clone(),
}))
}
}
/// A dummy catalog provider for [DummyCatalogList].
#[derive(Clone)]
struct DummyCatalogProvider {
catalog_name: String,
catalog_manager: CatalogManagerRef,
}
impl CatalogProvider for DummyCatalogProvider {
fn as_any(&self) -> &dyn Any {
self
}
fn schema_names(&self) -> Vec<String> {
vec![]
}
fn schema(&self, schema_name: &str) -> Option<Arc<dyn SchemaProvider>> {
Some(Arc::new(DummySchemaProvider {
catalog_name: self.catalog_name.clone(),
schema_name: schema_name.to_string(),
catalog_manager: self.catalog_manager.clone(),
}))
}
}
/// A dummy schema provider for [DummyCatalogList].
#[derive(Clone)]
struct DummySchemaProvider {
catalog_name: String,
schema_name: String,
catalog_manager: CatalogManagerRef,
}
#[async_trait]
impl SchemaProvider for DummySchemaProvider {
fn as_any(&self) -> &dyn Any {
self
}
fn table_names(&self) -> Vec<String> {
vec![]
}
async fn table(&self, name: &str) -> datafusion::error::Result<Option<Arc<dyn TableProvider>>> {
let table = self
.catalog_manager
.table(&self.catalog_name, &self.schema_name, name)
.await?
.with_context(|| TableNotExistSnafu {
table: format_full_table_name(&self.catalog_name, &self.schema_name, name),
})?;
let table_provider: Arc<dyn TableProvider> = Arc::new(DfTableProviderAdapter::new(table));
Ok(Some(table_provider))
}
fn table_exist(&self, _name: &str) -> bool {
true
}
}

View File

@@ -31,9 +31,11 @@ moka = { workspace = true, features = ["future"] }
parking_lot = "0.12"
prometheus.workspace = true
prost.workspace = true
query.workspace = true
rand.workspace = true
serde_json.workspace = true
snafu.workspace = true
substrait.workspace = true
tokio.workspace = true
tokio-stream = { workspace = true, features = ["net"] }
tonic.workspace = true
@@ -42,7 +44,6 @@ tonic.workspace = true
common-grpc-expr.workspace = true
datanode.workspace = true
derive-new = "0.5"
substrait.workspace = true
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }

View File

@@ -14,14 +14,16 @@
use std::sync::Arc;
use api::v1::flow::flow_client::FlowClient as PbFlowClient;
use api::v1::health_check_client::HealthCheckClient;
use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
use api::v1::region::region_client::RegionClient as PbRegionClient;
use api::v1::HealthCheckRequest;
use arrow_flight::flight_service_client::FlightServiceClient;
use common_grpc::channel_manager::ChannelManager;
use common_grpc::channel_manager::{ChannelConfig, ChannelManager, ClientTlsOption};
use parking_lot::RwLock;
use snafu::{OptionExt, ResultExt};
use tonic::codec::CompressionEncoding;
use tonic::transport::Channel;
use crate::load_balance::{LoadBalance, Loadbalancer};
@@ -86,6 +88,17 @@ impl Client {
Self::with_manager_and_urls(ChannelManager::new(), urls)
}
pub fn with_tls_and_urls<U, A>(urls: A, client_tls: ClientTlsOption) -> Result<Self>
where
U: AsRef<str>,
A: AsRef<[U]>,
{
let channel_config = ChannelConfig::default().client_tls_config(client_tls);
let channel_manager = ChannelManager::with_tls_config(channel_config)
.context(error::CreateTlsChannelSnafu)?;
Ok(Self::with_manager_and_urls(channel_manager, urls))
}
pub fn with_manager_and_urls<U, A>(channel_manager: ChannelManager, urls: A) -> Self
where
U: AsRef<str>,
@@ -151,24 +164,44 @@ impl Client {
pub fn make_flight_client(&self) -> Result<FlightClient> {
let (addr, channel) = self.find_channel()?;
Ok(FlightClient {
addr,
client: FlightServiceClient::new(channel)
.max_decoding_message_size(self.max_grpc_recv_message_size())
.max_encoding_message_size(self.max_grpc_send_message_size()),
})
let client = FlightServiceClient::new(channel)
.max_decoding_message_size(self.max_grpc_recv_message_size())
.max_encoding_message_size(self.max_grpc_send_message_size())
.accept_compressed(CompressionEncoding::Zstd)
.send_compressed(CompressionEncoding::Zstd);
Ok(FlightClient { addr, client })
}
pub(crate) fn raw_region_client(&self) -> Result<PbRegionClient<Channel>> {
let (_, channel) = self.find_channel()?;
Ok(PbRegionClient::new(channel)
pub(crate) fn raw_region_client(&self) -> Result<(String, PbRegionClient<Channel>)> {
let (addr, channel) = self.find_channel()?;
let client = PbRegionClient::new(channel)
.max_decoding_message_size(self.max_grpc_recv_message_size())
.max_encoding_message_size(self.max_grpc_send_message_size()))
.max_encoding_message_size(self.max_grpc_send_message_size())
.accept_compressed(CompressionEncoding::Zstd)
.send_compressed(CompressionEncoding::Zstd);
Ok((addr, client))
}
pub(crate) fn raw_flow_client(&self) -> Result<(String, PbFlowClient<Channel>)> {
let (addr, channel) = self.find_channel()?;
let client = PbFlowClient::new(channel)
.max_decoding_message_size(self.max_grpc_recv_message_size())
.max_encoding_message_size(self.max_grpc_send_message_size())
.accept_compressed(CompressionEncoding::Zstd)
.send_compressed(CompressionEncoding::Zstd);
Ok((addr, client))
}
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {
let (_, channel) = self.find_channel()?;
Ok(PrometheusGatewayClient::new(channel))
let client = PrometheusGatewayClient::new(channel)
.accept_compressed(CompressionEncoding::Gzip)
.accept_compressed(CompressionEncoding::Zstd)
.send_compressed(CompressionEncoding::Gzip)
.send_compressed(CompressionEncoding::Zstd);
Ok(client)
}
pub async fn health_check(&self) -> Result<()> {

View File

@@ -21,43 +21,45 @@ use common_meta::node_manager::{DatanodeRef, FlownodeRef, NodeManager};
use common_meta::peer::Peer;
use moka::future::{Cache, CacheBuilder};
use crate::flow::FlowRequester;
use crate::region::RegionRequester;
use crate::Client;
pub struct DatanodeClients {
pub struct NodeClients {
channel_manager: ChannelManager,
clients: Cache<Peer, Client>,
}
impl Default for DatanodeClients {
impl Default for NodeClients {
fn default() -> Self {
Self::new(ChannelConfig::new())
}
}
impl Debug for DatanodeClients {
impl Debug for NodeClients {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DatanodeClients")
f.debug_struct("NodeClients")
.field("channel_manager", &self.channel_manager)
.finish()
}
}
#[async_trait::async_trait]
impl NodeManager for DatanodeClients {
impl NodeManager for NodeClients {
async fn datanode(&self, datanode: &Peer) -> DatanodeRef {
let client = self.get_client(datanode).await;
Arc::new(RegionRequester::new(client))
}
async fn flownode(&self, _node: &Peer) -> FlownodeRef {
// TODO(weny): Support it.
unimplemented!()
async fn flownode(&self, flownode: &Peer) -> FlownodeRef {
let client = self.get_client(flownode).await;
Arc::new(FlowRequester::new(client))
}
}
impl DatanodeClients {
impl NodeClients {
pub fn new(config: ChannelConfig) -> Self {
Self {
channel_manager: ChannelManager::with_config(config),

View File

@@ -23,8 +23,6 @@ use api::v1::{
};
use arrow_flight::Ticket;
use async_stream::stream;
use client::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
use client::{from_grpc_response, Client, Result};
use common_error::ext::{BoxedError, ErrorExt};
use common_grpc::flight::{FlightDecoder, FlightMessage};
use common_query::Output;
@@ -37,7 +35,8 @@ use prost::Message;
use snafu::{ensure, ResultExt};
use tonic::transport::Channel;
pub const DEFAULT_LOOKBACK_STRING: &str = "5m";
use crate::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
use crate::{from_grpc_response, Client, Result};
#[derive(Clone, Debug, Default)]
pub struct Database {
@@ -105,10 +104,18 @@ impl Database {
self.catalog = catalog.into();
}
pub fn catalog(&self) -> &String {
&self.catalog
}
pub fn set_schema(&mut self, schema: impl Into<String>) {
self.schema = schema.into();
}
pub fn schema(&self) -> &String {
&self.schema
}
pub fn set_timezone(&mut self, timezone: impl Into<String>) {
self.timezone = timezone.into();
}
@@ -156,6 +163,13 @@ impl Database {
.await
}
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<Output> {
self.do_get(Request::Query(QueryRequest {
query: Some(Query::LogicalPlan(logical_plan)),
}))
.await
}
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
self.do_get(Request::Ddl(DdlRequest {
expr: Some(DdlExpr::CreateTable(expr)),
@@ -269,16 +283,12 @@ struct FlightContext {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use api::v1::auth_header::AuthScheme;
use api::v1::{AuthHeader, Basic};
use clap::Parser;
use client::Client;
use cmd::error::Result as CmdResult;
use cmd::options::{GlobalOptions, Options};
use cmd::{cli, standalone, App};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use super::{Database, FlightContext};
use super::*;
#[test]
fn test_flight_ctx() {
@@ -294,79 +304,11 @@ mod tests {
auth_scheme: Some(basic),
});
assert!(matches!(
assert_matches!(
ctx.auth_header,
Some(AuthHeader {
auth_scheme: Some(AuthScheme::Basic(_)),
})
))
}
#[tokio::test(flavor = "multi_thread")]
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
let output_dir = tempfile::tempdir().unwrap();
let standalone = standalone::Command::parse_from([
"standalone",
"start",
"--data-home",
&*output_dir.path().to_string_lossy(),
]);
let Options::Standalone(standalone_opts) =
standalone.load_options(&GlobalOptions::default())?
else {
unreachable!()
};
let mut instance = standalone.build(*standalone_opts).await?;
instance.start().await?;
let client = Client::with_urls(["127.0.0.1:4001"]);
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
database
.sql(r#"CREATE DATABASE "cli.export.create_table";"#)
.await
.unwrap();
database
.sql(
r#"CREATE TABLE "cli.export.create_table"."a.b.c"(
ts TIMESTAMP,
TIME INDEX (ts)
) engine=mito;
"#,
)
.await
.unwrap();
let output_dir = tempfile::tempdir().unwrap();
let cli = cli::Command::parse_from([
"cli",
"export",
"--addr",
"127.0.0.1:4000",
"--output-dir",
&*output_dir.path().to_string_lossy(),
"--target",
"create-table",
]);
let mut cli_app = cli.build().await?;
cli_app.start().await?;
instance.stop().await?;
let output_file = output_dir
.path()
.join("greptime-cli.export.create_table.sql");
let res = std::fs::read_to_string(output_file).unwrap();
let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
"ts" TIMESTAMP(3) NOT NULL,
TIME INDEX ("ts")
)
ENGINE=mito
;
"#;
assert_eq!(res.trim(), expect.trim());
Ok(())
)
}
}

View File

@@ -18,7 +18,7 @@ use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_error::{GREPTIME_DB_HEADER_ERROR_CODE, GREPTIME_DB_HEADER_ERROR_MSG};
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
use snafu::{location, Location, Snafu};
use tonic::{Code, Status};
#[derive(Snafu)]
@@ -26,7 +26,11 @@ use tonic::{Code, Status};
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Illegal Flight messages, reason: {}", reason))]
IllegalFlightMessages { reason: String, location: Location },
IllegalFlightMessages {
reason: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to do Flight get, code: {}", tonic_code))]
FlightGet {
@@ -37,47 +41,94 @@ pub enum Error {
#[snafu(display("Failure occurs during handling request"))]
HandleRequest {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to convert FlightData"))]
ConvertFlightData {
#[snafu(implicit)]
location: Location,
source: common_grpc::Error,
},
#[snafu(display("Column datatype error"))]
ColumnDataType {
#[snafu(implicit)]
location: Location,
source: api::error::Error,
},
#[snafu(display("Illegal GRPC client state: {}", err_msg))]
IllegalGrpcClientState { err_msg: String, location: Location },
IllegalGrpcClientState {
err_msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Missing required field in protobuf, field: {}", field))]
MissingField { field: String, location: Location },
MissingField {
field: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to create gRPC channel, peer address: {}", addr))]
CreateChannel {
addr: String,
#[snafu(implicit)]
location: Location,
source: common_grpc::error::Error,
},
#[snafu(display("Failed to request RegionServer, code: {}", code))]
RegionServer { code: Code, source: BoxedError },
#[snafu(display("Failed to create Tls channel manager"))]
CreateTlsChannel {
#[snafu(implicit)]
location: Location,
source: common_grpc::error::Error,
},
#[snafu(display("Failed to request RegionServer {}, code: {}", addr, code))]
RegionServer {
addr: String,
code: Code,
source: BoxedError,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to request FlowServer {}, code: {}", addr, code))]
FlowServer {
addr: String,
code: Code,
source: BoxedError,
#[snafu(implicit)]
location: Location,
},
// Server error carried in Tonic Status's metadata.
#[snafu(display("{}", msg))]
Server { code: StatusCode, msg: String },
Server {
code: StatusCode,
msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Illegal Database response: {err_msg}"))]
IllegalDatabaseResponse { err_msg: String },
IllegalDatabaseResponse {
err_msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to send request with streaming: {}", err_msg))]
ClientStreaming { err_msg: String, location: Location },
ClientStreaming {
err_msg: String,
#[snafu(implicit)]
location: Location,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -94,10 +145,11 @@ impl ErrorExt for Error {
Error::Server { code, .. } => *code,
Error::FlightGet { source, .. }
| Error::HandleRequest { source, .. }
| Error::RegionServer { source, .. } => source.status_code(),
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source, .. } => {
source.status_code()
}
| Error::RegionServer { source, .. }
| Error::FlowServer { source, .. } => source.status_code(),
Error::CreateChannel { source, .. }
| Error::ConvertFlightData { source, .. }
| Error::CreateTlsChannel { source, .. } => source.status_code(),
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
}
}
@@ -128,7 +180,11 @@ impl From<Status> for Error {
let msg = get_metadata_value(&e, GREPTIME_DB_HEADER_ERROR_MSG)
.unwrap_or_else(|| e.message().to_string());
Self::Server { code, msg }
Self::Server {
code,
msg,
location: location!(),
}
}
}
@@ -146,6 +202,9 @@ impl Error {
} | Self::RegionServer {
code: Code::Unavailable,
..
} | Self::RegionServer {
code: Code::Unknown,
..
}
)
}

104
src/client/src/flow.rs Normal file
View File

@@ -0,0 +1,104 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::flow::{FlowRequest, FlowResponse};
use api::v1::region::InsertRequests;
use common_error::ext::BoxedError;
use common_meta::node_manager::Flownode;
use snafu::{location, Location, ResultExt};
use crate::error::Result;
use crate::Client;
#[derive(Debug)]
pub struct FlowRequester {
client: Client,
}
#[async_trait::async_trait]
impl Flownode for FlowRequester {
async fn handle(&self, request: FlowRequest) -> common_meta::error::Result<FlowResponse> {
self.handle_inner(request)
.await
.map_err(BoxedError::new)
.context(common_meta::error::ExternalSnafu)
}
async fn handle_inserts(
&self,
request: InsertRequests,
) -> common_meta::error::Result<FlowResponse> {
self.handle_inserts_inner(request)
.await
.map_err(BoxedError::new)
.context(common_meta::error::ExternalSnafu)
}
}
impl FlowRequester {
pub fn new(client: Client) -> Self {
Self { client }
}
async fn handle_inner(&self, request: FlowRequest) -> Result<FlowResponse> {
let (addr, mut client) = self.client.raw_flow_client()?;
let response = client
.handle_create_remove(request)
.await
.map_err(|e| {
let code = e.code();
let err: crate::error::Error = e.into();
crate::error::Error::FlowServer {
addr,
code,
source: BoxedError::new(err),
location: location!(),
}
})?
.into_inner();
Ok(response)
}
async fn handle_inserts_inner(&self, request: InsertRequests) -> Result<FlowResponse> {
let (addr, mut client) = self.client.raw_flow_client()?;
let requests = api::v1::flow::InsertRequests {
requests: request
.requests
.into_iter()
.map(|insert| api::v1::flow::InsertRequest {
region_id: insert.region_id,
rows: insert.rows,
})
.collect(),
};
let response = client
.handle_mirror_request(requests)
.await
.map_err(|e| {
let code = e.code();
let err: crate::error::Error = e.into();
crate::error::Error::FlowServer {
addr,
code,
source: BoxedError::new(err),
location: location!(),
}
})?
.into_inner();
Ok(response)
}
}

View File

@@ -12,9 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(assert_matches)]
mod client;
pub mod client_manager;
#[cfg(feature = "testing")]
mod database;
pub mod error;
pub mod flow;
pub mod load_balance;
mod metrics;
pub mod region;
@@ -29,6 +34,8 @@ pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
use snafu::OptionExt;
pub use self::client::Client;
#[cfg(feature = "testing")]
pub use self::database::Database;
pub use self::error::{Error, Result};
use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};

View File

@@ -15,7 +15,7 @@
use std::sync::Arc;
use api::region::RegionResponse;
use api::v1::region::{QueryRequest, RegionRequest};
use api::v1::region::RegionRequest;
use api::v1::ResponseHeader;
use arc_swap::ArcSwapOption;
use arrow_flight::Ticket;
@@ -26,12 +26,15 @@ use common_error::status_code::StatusCode;
use common_grpc::flight::{FlightDecoder, FlightMessage};
use common_meta::error::{self as meta_error, Result as MetaResult};
use common_meta::node_manager::Datanode;
use common_query::request::QueryRequest;
use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
use common_telemetry::error;
use common_telemetry::tracing_context::TracingContext;
use prost::Message;
use query::query_engine::DefaultSerializer;
use snafu::{location, Location, OptionExt, ResultExt};
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use tokio_stream::StreamExt;
use crate::error::{
@@ -63,6 +66,17 @@ impl Datanode for RegionRequester {
}
async fn handle_query(&self, request: QueryRequest) -> MetaResult<SendableRecordBatchStream> {
let plan = DFLogicalSubstraitConvertor
.encode(&request.plan, DefaultSerializer)
.map_err(BoxedError::new)
.context(meta_error::ExternalSnafu)?
.to_vec();
let request = api::v1::region::QueryRequest {
header: request.header,
region_id: request.region_id.as_u64(),
plan,
};
let ticket = Ticket {
ticket: request.encode_to_vec().into(),
};
@@ -177,7 +191,7 @@ impl RegionRequester {
.with_label_values(&[request_type.as_str()])
.start_timer();
let mut client = self.client.raw_region_client()?;
let (addr, mut client) = self.client.raw_region_client()?;
let response = client
.handle(request)
@@ -187,8 +201,10 @@ impl RegionRequester {
let err: error::Error = e.into();
// Uses `Error::RegionServer` instead of `Error::Server`
error::Error::RegionServer {
addr,
code,
source: BoxedError::new(err),
location: location!(),
}
})?
.into_inner();
@@ -272,7 +288,7 @@ mod test {
err_msg: "blabla".to_string(),
}),
}));
let Server { code, msg } = result.unwrap_err() else {
let Server { code, msg, .. } = result.unwrap_err() else {
unreachable!()
};
assert_eq!(code, StatusCode::Internal);

Some files were not shown because too many files have changed in this diff Show More