Compare commits

...

228 Commits

Author SHA1 Message Date
Weny Xu
64e3eb9fa2 chore: pick #6415 to release/v0.15 (#6686)
ci: add check-version script to check whether push the latast image (#6415)

Signed-off-by: liyang <daviderli614@gmail.com>
Co-authored-by: liyang <daviderli614@gmail.com>
2025-08-07 12:57:09 +00:00
Weny Xu
6c57f4b7e4 feat: pick automated metadata recovery feature (#6676)
* feat: persist column ids in table metadata (#6457)

* feat: persist column ids in table metadata

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: add column metadata to response extensions (#6451)

Signed-off-by: WenyXu <wenymedia@gmail.com>

* refactor(meta): extract `AlterTableExecutor` from `AlterTableProcedure` (#6470)

* refactor(meta): extract `AlterTableExecutor` from `AlterTableProcedure`

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>

* refactor(meta): separate validation and execution logic in alter logical tables procedure (#6478)

* refactor(meta): separate validation and execution logic in alter logical tables procedure

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>

* fix: fix state transition in create table procedure (#6523)

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: add table reconciliation utilities (#6519)

* feat: add table reconciliation utilities

Signed-off-by: WenyXu <wenymedia@gmail.com>

* fix: fix unit tests

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestison from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: update comment

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: Support ListMetadataRequest to retrieve regions' metadata (#6348)

* feat: support list metadata in region server

Signed-off-by: evenyag <realevenyag@gmail.com>

* test: add test for list region metadata

Signed-off-by: evenyag <realevenyag@gmail.com>

* feat: return null if region not exists

Signed-off-by: evenyag <realevenyag@gmail.com>

* chore: update greptime-proto

Signed-off-by: evenyag <realevenyag@gmail.com>

---------

Signed-off-by: evenyag <realevenyag@gmail.com>

* refactor: support multiple index operations in single alter region request (#6487)

* refactor: support multiple index operations in single alter region request

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: update greptime-proto

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: implement pause/resume functionality for procedure manager (#6393)

* feat: implement pause/resume functionality for procedure manager

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: move metasrv admin to http server while keep tonic for backward compatibility (#6466)

* feat: move metasrv admin to http server while keep tonic for backward compatibility

Signed-off-by: lyang24 <lanqingy93@gmail.com>

* refactor with nest method

Signed-off-by: lyang24 <lanqingy93@gmail.com>

---------

Signed-off-by: lyang24 <lanqingy93@gmail.com>
Co-authored-by: lyang24 <lanqingy@usc.edu>

* feat: allow igoring nonexistent regions in recovery mode (#6592)

* feat: allow ignoring nonexistent regions

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: ignore nonexistent regions during startup in recovery mode

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: allow enabling recovery mode via http api

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: allow setting next table id via http api (#6597)

* feat: allow reset next table id via http api

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggesions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: ignore internal keys in metadata snapshots (#6606)

feat: ignore dumpping internal keys

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: introduce reconcile table procedure (#6584)

* feat: introduce `SyncColumns`

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: introduce reconcile table procedure

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggesions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: add tests

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: add comments

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: update proto

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: introduce reconcile database procedure (#6612)

* feat: introduce reconcile database procedure

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: hold the schema lock

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: add todo

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: update comments

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: rename to `fast_fail`

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: add logs

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: introduce reconcile logical tables procedure (#6588)

* feat: introduce reconcile logical tables procedure

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* fix: lock logical tables

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>

* refactor: remove procedure executor from DDL manager (#6625)

* refactor: remove procedure executor from DDL manager

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: clippy

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from  CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: introduce reconcile catalog procedure (#6613)

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: introduce reconciliation interface (#6614)

* feat: introduce reconcile interface

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: upgrade proto

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>

* fix: fix sequence peek method to return correct values when sequence is not initialized (#6643)

fix: improve sequence peek method to handle uninitialized sequences

Signed-off-by: WenyXu <wenymedia@gmail.com>

* fix: sequence peek with remote value (#6648)

* fix: sequence peek with remote value

* chore: more ut

* chore: add more ut

* feat: add metrics for reconciliation procedures (#6652)

* feat: add metrics for reconciliation procedures

Signed-off-by: WenyXu <wenymedia@gmail.com>

* refactor: improve error handling

Signed-off-by: WenyXu <wenymedia@gmail.com>

* fix(datanode): handle ignore_nonexistent_region flag in open_all_regions

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* refactor: merge metrics

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: minor refactor

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat(metric-engine): add metadata region cache (#6657)

* feat(metric-engine): add metadata region cache

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: use lru

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: rename

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: rename

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: add comments

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: default ttl

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: longer ttl

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: update greptime-proto

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: bump version to 0.15.5

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
Signed-off-by: lyang24 <lanqingy93@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
Co-authored-by: Lanqing Yang <lanqingy93@gmail.com>
Co-authored-by: lyang24 <lanqingy@usc.edu>
Co-authored-by: jeremyhi <jiachun_feng@proton.me>
2025-08-07 20:16:48 +08:00
zyy17
a7631239c3 fix: unable to record slow query (#6590)
* refactor: add process manager for prometheus query

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* refactor: modify `register_query()` API to accept parsed statement(`catalog::process_manager::QueryStatement`)

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* refactor: add the slow query timer in the `Tikcet` of ProcessManager

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* test: add integration tests

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* refactor: add process manager in `do_exec_plan()`

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* tests: add `test_postgres_slow_query` integration test

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* chore: polish the code

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* refactor: create a query ticket and slow query timer if the statement is a query in `query_statement()`

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* fix: sqlness errors

Signed-off-by: zyy17 <zyylsxm@gmail.com>

---------

Signed-off-by: zyy17 <zyylsxm@gmail.com>
2025-08-07 10:15:46 +08:00
evenyag
5fc0c5706c chore: bump version to v0.15.4
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-08-04 22:19:40 +08:00
Ning Sun
4d768b2c31 feat: schema/database support for label_values (#6631)
* feat: initial support for __schema__ in label values

* feat: filter database with matches

* refactor: skip unnecessary check

* fix: resolve schema matcher in label values

* test: add a test case for table not exists

* refactor: add matchop check on db label

* chore: merge main

Signed-off-by: evenyag <realevenyag@gmail.com>
2025-08-04 22:19:40 +08:00
Yingwen
b62f219810 feat: Add option to limit the files reading simultaneously (#6635)
* feat: limits the max number of files to scan at the same time

Signed-off-by: evenyag <realevenyag@gmail.com>

* feat: make max_concurrent_scan_files configurable

Signed-off-by: evenyag <realevenyag@gmail.com>

* feat: reduce concurrent scan files to 128

Signed-off-by: evenyag <realevenyag@gmail.com>

* docs: update config example

Signed-off-by: evenyag <realevenyag@gmail.com>

* test: add test for max_concurrent_scan_files

Signed-off-by: evenyag <realevenyag@gmail.com>

* style: fix clippy

Signed-off-by: evenyag <realevenyag@gmail.com>

* test: update config test

Signed-off-by: evenyag <realevenyag@gmail.com>

---------

Signed-off-by: evenyag <realevenyag@gmail.com>
2025-08-04 22:19:40 +08:00
Ruihang Xia
5d330fad17 feat: absent function in PromQL (#6618)
* feat: absent function in PromQL

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl serde

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* sqlness test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* ai suggests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* resolve PR comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* comment out some tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-08-04 22:19:40 +08:00
Ruihang Xia
dfdfae1a7b feat: support __schema__ and __database__ in Prom Remote Read (#6610)
* feat: support __schema__ and __database__ in Prom remote R/W

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix integration test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* revert remote write changes

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* check matcher type

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-08-04 22:19:40 +08:00
Ruihang Xia
822f0caf4b fix: only return the __name__ label when there is one (#6629)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-08-04 22:19:40 +08:00
yihong
09f3d72d2d fix: closee issue #6555 return empty result (#6569)
* fix: closee issue #6555 return empty result

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* fix: only start one instance one regrex sqlness test (#6570)

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

* refactor: refactor partition mod to use PartitionExpr instead of PartitionDef (#6554)

* refactor: refactor partition mod to use PartitionExpr instead of PartitionDef

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix snafu

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* Puts expression into PbPartition

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix compile

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* update proto

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* add serde test

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* add serde test

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: address comments

Signed-off-by: yihong0618 <zouzou0208@gmail.com>

---------

Signed-off-by: yihong0618 <zouzou0208@gmail.com>
Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
Co-authored-by: Zhenchi <zhongzc_arch@outlook.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-24 15:00:32 +08:00
Yingwen
ca0c1282ed chore: bump version to 0.15.3 (#6580)
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-24 11:24:07 +08:00
Yingwen
b719c020ba chore: cherry pick #6540, #6550, #6551, #6556, #6563, #6534 to v0.15 branch (#6577)
* feat: add metrics for request wait time and adjust stall metrics (#6540)

* feat: add metric greptime_mito_request_wait_time to observe wait time

Signed-off-by: evenyag <realevenyag@gmail.com>

* feat: add worker to wait time metric

Signed-off-by: evenyag <realevenyag@gmail.com>

* refactor: rename stall gauge to greptime_mito_write_stalling_count

Signed-off-by: evenyag <realevenyag@gmail.com>

* feat: change greptime_mito_write_stall_total to total stalled requests

Signed-off-by: evenyag <realevenyag@gmail.com>

* refactor: merge lazy static blocks

Signed-off-by: evenyag <realevenyag@gmail.com>

---------

Signed-off-by: evenyag <realevenyag@gmail.com>

* fix: estimate mem size for bulk ingester (#6550)

Signed-off-by: evenyag <realevenyag@gmail.com>

* fix: flow mirror cache (#6551)

* fix: invalid cache when flownode change address

Signed-off-by: discord9 <discord9@163.com>

* update comments

Signed-off-by: discord9 <discord9@163.com>

* fix

Signed-off-by: discord9 <discord9@163.com>

* refactor: add log&rename

Signed-off-by: discord9 <discord9@163.com>

* stuff

Signed-off-by: discord9 <discord9@163.com>

---------

Signed-off-by: discord9 <discord9@163.com>
Signed-off-by: evenyag <realevenyag@gmail.com>

* feat: impl timestamp function for promql (#6556)

* feat: impl timestamp function for promql

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: style and typo

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* fix: test

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* docs: update comments

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: comment

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

---------

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>

* feat: MergeScan print input (#6563)

* feat: MergeScan print input

Signed-off-by: discord9 <discord9@163.com>

* test: fix ut

Signed-off-by: discord9 <discord9@163.com>

---------

Signed-off-by: discord9 <discord9@163.com>
Signed-off-by: evenyag <realevenyag@gmail.com>

* fix: aggr group by all partition cols use partial commutative (#6534)

* fix: aggr group by all partition cols use partial commutative

Signed-off-by: discord9 <discord9@163.com>

* test: bugged case

Signed-off-by: discord9 <discord9@163.com>

* test: sqlness fix

Signed-off-by: discord9 <discord9@163.com>

* test: more redacted

Signed-off-by: discord9 <discord9@163.com>

* more cases

Signed-off-by: discord9 <discord9@163.com>

* even more test cases

Signed-off-by: discord9 <discord9@163.com>

* join testcase

Signed-off-by: discord9 <discord9@163.com>

* fix: column requirement added in correct location

Signed-off-by: discord9 <discord9@163.com>

* fix test

Signed-off-by: discord9 <discord9@163.com>

* chore: clippy

Signed-off-by: discord9 <discord9@163.com>

* track col reqs per stack

Signed-off-by: discord9 <discord9@163.com>

* fix: continue

Signed-off-by: discord9 <discord9@163.com>

* chore: clippy

Signed-off-by: discord9 <discord9@163.com>

* refactor: test mod

Signed-off-by: discord9 <discord9@163.com>

* test utils

Signed-off-by: discord9 <discord9@163.com>

* test: better test

Signed-off-by: discord9 <discord9@163.com>

* more testcases

Signed-off-by: discord9 <discord9@163.com>

* test limit push down

Signed-off-by: discord9 <discord9@163.com>

* more testcases

Signed-off-by: discord9 <discord9@163.com>

* more testcase

Signed-off-by: discord9 <discord9@163.com>

* more test

Signed-off-by: discord9 <discord9@163.com>

* chore: update sqlness

Signed-off-by: discord9 <discord9@163.com>

* chore: update commnets

Signed-off-by: discord9 <discord9@163.com>

* fix: check col reqs from bottom to upper

Signed-off-by: discord9 <discord9@163.com>

* chore: more comment

Signed-off-by: discord9 <discord9@163.com>

* docs: more todo

Signed-off-by: discord9 <discord9@163.com>

* chore: comments

Signed-off-by: discord9 <discord9@163.com>

* test: a new failing test that should be fixed

Signed-off-by: discord9 <discord9@163.com>

* fix: part col alias tracking

Signed-off-by: discord9 <discord9@163.com>

* chore: unused

Signed-off-by: discord9 <discord9@163.com>

* chore: clippy

Signed-off-by: discord9 <discord9@163.com>

* docs: comment

Signed-off-by: discord9 <discord9@163.com>

* mroe testcase

Signed-off-by: discord9 <discord9@163.com>

* more testcase for step/part aggr combine

Signed-off-by: discord9 <discord9@163.com>

* FIXME: a new bug

Signed-off-by: discord9 <discord9@163.com>

* literally unfixable

Signed-off-by: discord9 <discord9@163.com>

* chore: remove some debug print

Signed-off-by: discord9 <discord9@163.com>

---------

Signed-off-by: discord9 <discord9@163.com>
Signed-off-by: evenyag <realevenyag@gmail.com>

---------

Signed-off-by: evenyag <realevenyag@gmail.com>
Signed-off-by: discord9 <discord9@163.com>
Signed-off-by: Dennis Zhuang <killme2008@gmail.com>
Co-authored-by: fys <40801205+fengys1996@users.noreply.github.com>
Co-authored-by: discord9 <55937128+discord9@users.noreply.github.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
2025-07-23 22:29:14 +08:00
Ruihang Xia
717c1d1807 feat: update partial execution metrics (#6499)
* feat: update partial execution metrics

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* send data with metrics in distributed mode

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* only send partial metrics under VERBOSE flag

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* loop to while

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-23 20:54:33 +08:00
Zhenchi
291f3c89fe fix: row selection intersection removes trailing rows (#6539)
* fix: row selection intersection removes trailing rows

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix typos

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-23 20:54:33 +08:00
discord9
602cc38056 fix: breaking loop when not retryable (#6538)
fix: breaking when not retryable

Signed-off-by: discord9 <discord9@163.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-23 20:54:33 +08:00
Lei, HUANG
46b3593021 fix(grpc): check grpc client unavailable (#6488)
* fix/check-grpc-client-unavailable:
 Improve async handling in `greptime_handler.rs`

 - Updated the `DoPut` response handling to use `await` with `result_sender.send` for better asynchronous operation.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* fix/check-grpc-client-unavailable:
 ### Improve Error Handling in `greptime_handler.rs`

 - Enhanced error handling for the `DoPut` operation by switching from `send` to `try_send` for the `result_sender`.
 - Added specific logging for unreachable clients, including `request_id` in the warning message.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

---------

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-23 20:54:33 +08:00
Yan Tingwang
ff402fd6f6 test: add sqlness test for max execution time (#6517)
* add sqlness test for max_execution_time

Signed-off-by: codephage. <tingwangyan2020@163.com>

* add Pre-line comments SQLNESS PROTOCOL MYSQL

Signed-off-by: codephage. <tingwangyan2020@163.com>

* fix(mysql): support max_execution_time variable

Co-authored-by: evenyag <realevenyag@gmail.com>
Signed-off-by: codephage. <tingwangyan2020@163.com>

* fix: test::test_check & sqlness test mysql

Signed-off-by: codephage. <tingwangyan2020@163.com>

* add sqlness test for max_execution_time

Signed-off-by: codephage. <tingwangyan2020@163.com>

* add Pre-line comments SQLNESS PROTOCOL MYSQL

Signed-off-by: codephage. <tingwangyan2020@163.com>

* fix(mysql): support max_execution_time variable

Co-authored-by: evenyag <realevenyag@gmail.com>
Signed-off-by: codephage. <tingwangyan2020@163.com>

* fix: test::test_check & sqlness test mysql

Signed-off-by: codephage. <tingwangyan2020@163.com>

* chore: Unify the sql style

Signed-off-by: codephage. <tingwangyan2020@163.com>

---------

Signed-off-by: codephage. <tingwangyan2020@163.com>
Co-authored-by: evenyag <realevenyag@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-23 20:54:33 +08:00
Yan Tingwang
b83e6e2b18 fix: add system variable max_execution_time (#6511)
add system variable : max_execution_time

Signed-off-by: codephage. <tingwangyan2020@163.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-23 20:54:33 +08:00
discord9
cb74337dbe refactor(flow): faster time window expr (#6495)
* refactor: faster window expr

Signed-off-by: discord9 <discord9@163.com>

* docs: explain fast path

Signed-off-by: discord9 <discord9@163.com>

* chore: rm unwrap

Signed-off-by: discord9 <discord9@163.com>

---------

Signed-off-by: discord9 <discord9@163.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-23 20:54:33 +08:00
shuiyisong
32bffbb668 feat: add filter processor to v0.15 (#6516)
feat: add filter processor

Signed-off-by: shuiyisong <xixing.sys@gmail.com>
2025-07-14 17:43:49 +08:00
evenyag
941906dc74 chore: bump version to v0.15.2
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-11 00:24:21 +08:00
Ruihang Xia
cbf251d0f0 fix: expand on conditional commutative as well (#6484)
* fix: expand on conditional commutative as well

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Signed-off-by: discord9 <discord9@163.com>

* update sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Signed-off-by: discord9 <discord9@163.com>

* add logging to figure test failure

Signed-off-by: discord9 <discord9@163.com>

* revert

Signed-off-by: discord9 <discord9@163.com>

* feat: stream drop record metrics

Signed-off-by: discord9 <discord9@163.com>

* Revert "feat: stream drop record metrics"

This reverts commit 6a16946a5b8ea37557bbb1b600847d24274d6500.

Signed-off-by: discord9 <discord9@163.com>

* feat: stream drop record metrics

Signed-off-by: discord9 <discord9@163.com>

refactor: move logging to drop too

Signed-off-by: discord9 <discord9@163.com>

fix: drop input stream before collect metrics

Signed-off-by: discord9 <discord9@163.com>

* fix: expand differently

Signed-off-by: discord9 <discord9@163.com>

* test: update sqlness

Signed-off-by: discord9 <discord9@163.com>

* chore: more dbg

Signed-off-by: discord9 <discord9@163.com>

* Revert "feat: stream drop record metrics"

This reverts commit 3eda4a2257928d95cf9c1328ae44fae84cfbb017.

Signed-off-by: discord9 <discord9@163.com>

* test: sqlness redacted

Signed-off-by: discord9 <discord9@163.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Signed-off-by: discord9 <discord9@163.com>
Co-authored-by: discord9 <discord9@163.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-11 00:24:21 +08:00
shuiyisong
1519379262 chore: skip calc ts in doc 2 with transform (#6509)
Signed-off-by: shuiyisong <xixing.sys@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-10 22:40:07 +08:00
localhost
4bfe02ec7f chore: remove region id to reduce time series (#6506)
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-10 22:40:07 +08:00
Weny Xu
ecacf1333e fix: correctly update partition key indices during alter table operations (#6494)
* fix: correctly update partition key indices in alter table operations

Signed-off-by: WenyXu <wenymedia@gmail.com>

* test: add sqlness tests

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-10 22:40:07 +08:00
Yingwen
92fa33c250 fix: range query returns range selector error when table not found (#6481)
* test: add sqlness test for range vector with non-existence metric

Signed-off-by: evenyag <realevenyag@gmail.com>

* fix: handle empty metric for matrix selector

Signed-off-by: evenyag <realevenyag@gmail.com>

* test: update sqlness result

Signed-off-by: evenyag <realevenyag@gmail.com>

* chore: add newline

Signed-off-by: evenyag <realevenyag@gmail.com>

---------

Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-10 22:40:07 +08:00
shuiyisong
8b2d1a3753 fix: skip nan in prom remote write pipeline (#6489)
Signed-off-by: shuiyisong <xixing.sys@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-10 22:40:07 +08:00
Ning Sun
13401c94e0 feat: allow alternative version string (#6472)
* feat: allow alternative version string

* refactor: rename original version function to verbose_version

Signed-off-by: Ning Sun <sunning@greptime.com>

---------

Signed-off-by: Ning Sun <sunning@greptime.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-10 22:40:07 +08:00
shuiyisong
fd637dae47 chore: sort range query return values (#6474)
* chore: sort range query return values

* chore: add comments

* chore: add is_sorted check

* fix: test

Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-10 22:40:07 +08:00
dennis zhuang
69fac19770 fix: empty statements hang (#6480)
* fix: empty statements hang

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* tests: add cases

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

---------

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-10 22:40:07 +08:00
discord9
6435b97314 fix: stricter win sort condition (#6477)
test: sqlness

test: fix sqlness redacted

Signed-off-by: discord9 <discord9@163.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-10 22:40:07 +08:00
Weny Xu
726e3909fe fix(metric-engine): handle stale metadata region recovery failures (#6395)
* fix(metric-engine): handle stale metadata region recovery failures

Signed-off-by: WenyXu <wenymedia@gmail.com>

* test: add unit tests

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-10 22:40:07 +08:00
evenyag
00d759e828 chore: bump version to v0.15.1
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-04 22:53:46 +08:00
Lei, HUANG
0042ea6462 fix: filter empty batch in bulk insert api (#6459)
* fix/filter-empty-batch-in-bulk-insert-api:
 **Add Early Return for Empty Record Batches in `bulk_insert.rs`**

 - Implemented an early return in the `Inserter` implementation to handle cases where `record_batch.num_rows()` is zero, improving efficiency by avoiding unnecessary processing.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* fix/filter-empty-batch-in-bulk-insert-api:
 **Improve Bulk Insert Handling**

 - **`handle_bulk_insert.rs`**: Added a check to handle cases where the batch has zero rows, immediately returning and sending a success response with zero rows processed.
 - **`bulk_insert.rs`**: Enhanced logic to skip processing for masks that select none, optimizing the bulk insert operation by avoiding unnecessary iterations.

 These changes improve the efficiency and robustness of the bulk insert process by handling edge cases more effectively.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* fix/filter-empty-batch-in-bulk-insert-api:
 ### Refactor and Error Handling Enhancements

 - **Refactored Timestamp Handling**: Introduced `timestamp_array_to_primitive` function in `timestamp.rs` to streamline conversion of timestamp arrays to primitive arrays, reducing redundancy in `handle_bulk_insert.rs` and `bulk_insert.rs`.
 - **Error Handling**: Added `InconsistentTimestampLength` error in `error.rs` to handle mismatched timestamp column lengths in bulk insert operations.
 - **Bulk Insert Logic**: Updated `handle_bulk_insert.rs` to utilize the new timestamp conversion function and added checks for timestamp length consistency.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* fix/filter-empty-batch-in-bulk-insert-api:
 **Refactor `bulk_insert.rs` to streamline imports**

 - Simplified import statements by removing unused timestamp-related arrays and data types from the `arrow` crate in `bulk_insert.rs`.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

---------

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-04 22:53:46 +08:00
Zhenchi
d06450715f fix: add backward compatibility for SkippingIndexOptions deserialization (#6458)
* fix: add backward compatibility for `SkippingIndexOptions` deserialization

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-04 22:53:46 +08:00
evenyag
8612bb066f chore: fix statement compile errors
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-03 23:05:21 +08:00
Yingwen
467593d329 fix: enable max_execution time for other read only statements (#6454)
Also disable the timeout when timeout is 0

Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-03 23:05:21 +08:00
Ruihang Xia
9e4ae070b2 feat: skip rule checker on ingestion (#6453)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-07-03 23:05:21 +08:00
Ruihang Xia
d8261dda51 feat!: point matrix based partition rule checker (#6431)
* bare implementation

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* stateful generator

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* error report

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix remap checkpoint

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use matrix generator as iterator

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* pre-calculate suffix product

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update existing test cases

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* sqlness

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix ut

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-03 23:05:21 +08:00
dennis zhuang
7ab9b335a1 fix: label_replace and label_join functions when used as sub‐expressions (#6443)
* fix: label_replace and label_join functions in expressions

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: remove update_fields

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: tql eval -> TQL EVAL

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* fix: empty regex and not existing source label

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: simplfy test

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* fix: test

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* fix: test

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

---------

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-03 23:05:21 +08:00
Ruihang Xia
60835afb47 feat: Collider for playing with PartitionRule (#6399)
* skeleton

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* initial impl and tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* refactor and reorganize

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add comment

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* error handling

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* explain naming

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-03 23:05:21 +08:00
fys
aba5bf7431 refactor: avoid adding feature to parameter (#6391)
* refactor: avoid adding feature to parameter

* avoid `cfg(not(feature = ...))` block
2025-07-03 15:48:22 +08:00
Yingwen
7897fe8dbe fix: correct MAX_EXECUTION_TIME timeout calculation (#6444)
* feat: implement statement timeout in frontend instance

Signed-off-by: evenyag <realevenyag@gmail.com>

* fix: fail fast when timeout is 0

Signed-off-by: evenyag <realevenyag@gmail.com>

* fix: update start time

Signed-off-by: evenyag <realevenyag@gmail.com>

---------

Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-03 10:46:35 +08:00
Ruihang Xia
cc8ec706a1 fix: remap column indices on overriding logical table partitions (#6446)
* fix: remap column indices on overriding logical table partitions

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* sqlness

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* refactor map query

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-03 10:46:35 +08:00
Weny Xu
7c688718db fix: fix dest_keys chunks bug in TombstoneManager (#6432)
* fix(meta): fix dest_keys_chunks bug in TombstoneManager

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: fix typo

Signed-off-by: WenyXu <wenymedia@gmail.com>

* fix: fix sqlness tests

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-03 10:46:35 +08:00
shuiyisong
8a0e554e5a feat(pipeline): support Loki API (#6390)
* chore: use schema_info

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* refactor: abstract loki item generator

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: introduce middle item

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* feat: introduce pipeline in loki api

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* test: add tests

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: minor update

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: minor update

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: update prefix and test

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: change recursion to loop

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* fix: cr issue

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

---------

Signed-off-by: shuiyisong <xixing.sys@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-03 10:46:35 +08:00
jeremyhi
80fae1c559 feat: override logical table's partition key indices (#6385)
* feat: Override logical table's partition key indices with physical table's

* chore: by comment

Signed-off-by: evenyag <realevenyag@gmail.com>
2025-07-03 10:46:35 +08:00
Zhenchi
c37c4df20d feat: pick #6416 to release/0.15 (#6445)
* feat: pick #6416 to release/0.15

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* upgrade proto

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2025-07-02 08:55:54 +00:00
Yingwen
f712c1b356 feat: cherry-pick #6384 #6388 #6396 #6403 #6412 #6405 to 0.15 branch (#6414)
* feat: supports CsvWithNames and CsvWithNamesAndTypes formats (#6384)

* feat: supports CsvWithNames and CsvWithNamesAndTypes formats and object/array types

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* test: added and fixed tests

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: fix test

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: remove comments

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* test: add json type csv tests

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: remove comment

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>

* feat: introduce /v1/health for healthcheck from external (#6388)

Signed-off-by: Ning Sun <sunning@greptime.com>
Signed-off-by: evenyag <realevenyag@gmail.com>

* feat: update dashboard to v0.10.1 (#6396)

Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
Signed-off-by: evenyag <realevenyag@gmail.com>

* fix: complete partial index search results in cache (#6403)

* fix: complete partial index search results in cache

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* polish

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* add initial tests

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* cover issue case

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* TestEnv new -> async

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
Signed-off-by: evenyag <realevenyag@gmail.com>

* fix: skip failing nodes when gathering porcess info (#6412)

* fix/process-manager-skip-fail-nodes:
 - **Enhance Error Handling in `process_manager.rs`:**
   Improved error handling by adding a warning log for failing nodes in the `list_process` method. This ensures that the process listing continues even if some nodes fail to respond.

 - **Add Error Type Import in `process_manager.rs`:**
   Included the `Error` type from the `error` module to handle errors more effectively within the `ProcessManager` implementation.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* fix: clippy

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* fix/process-manager-skip-fail-nodes:
 **Enhancements to Debugging and Trait Implementation**

 - **`process_manager.rs`**: Improved logging by adding more detailed error messages when skipping failing nodes.
 - **`selector.rs`**: Enhanced the `FrontendClient` trait by adding the `Debug` trait bound to improve debugging capabilities.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

---------

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>

* refactor: pass pipeline name through http header and get db from query context (#6405)

Signed-off-by: zyy17 <zyylsxm@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>

---------

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>
Signed-off-by: evenyag <realevenyag@gmail.com>
Signed-off-by: Ning Sun <sunning@greptime.com>
Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>
Signed-off-by: zyy17 <zyylsxm@gmail.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
Co-authored-by: Ning Sun <sunng@protonmail.com>
Co-authored-by: ZonaHe <zonahe@qq.com>
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
Co-authored-by: Zhenchi <zhongzc_arch@outlook.com>
Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
Co-authored-by: zyy17 <zyylsxm@gmail.com>
2025-06-27 20:11:28 +08:00
shuiyisong
7cd6be41ce feat(pipeline): introduce pipeline doc version 2 for combine-transform (#6360)
* chore: init commit of pipeline doc version v2

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: remove unused code

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: remove unused code

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: add test

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: add test

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: compatible with v1 to remain field in the map during transform

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* refactor: pipeline.exec_mut

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* fix: typo

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: change from v2 to 2 in version setting

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

---------

Signed-off-by: shuiyisong <xixing.sys@gmail.com>
2025-06-22 00:58:36 +00:00
ZonaHe
15616d0c43 feat: update dashboard to v0.10.0 (#6368)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2025-06-20 23:48:35 +00:00
dennis zhuang
b43e315c67 fix: test test_tls_file_change_watch (#6366)
* fix: test test_tls_file_change_watch

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* fix: cert_path

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* fix: test

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: revert times

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: debug

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* fix: times

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: remove assertions

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: use inspect_err

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

---------

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>
2025-06-20 16:57:07 +00:00
Yingwen
36ab1ceef7 chore: prints a warning when skip_ssl_validation is true (#6367)
chore: warn when skip_ssl_validation is true

We already log all configs when a node starts.

Signed-off-by: evenyag <realevenyag@gmail.com>
2025-06-20 13:14:50 +00:00
Weny Xu
3fb1b726c6 refactor(cli): simplify metadata command parameters (#6364)
Signed-off-by: WenyXu <wenymedia@gmail.com>
2025-06-20 09:00:21 +00:00
Olexandr88
c423bb31fe docs: added YouTube link to documentation (#6362)
Update README.md
2025-06-20 08:16:54 +00:00
rgidda
e026f766d2 feat(storage): Add skip_ssl_validation option for object storage HTTP client (#6358)
* feat(storage): Add skip_ssl_validation option for object storage HTTP client

Signed-off-by: rgidda <rgidda@hitachivantara.com>

* fix(test): Broken test case for - Add skip_ssl_validation option for object storage HTTP client

Signed-off-by: rgidda <rgidda@hitachivantara.com>

* fix: test

* fix: test

---------

Signed-off-by: rgidda <rgidda@hitachivantara.com>
Co-authored-by: rgidda <rgidda@hitachivantara.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
2025-06-20 08:08:19 +00:00
discord9
9d08f2532a feat: dist auto step aggr pushdown (#6268)
* wip: steppable aggr fn

Signed-off-by: discord9 <discord9@163.com>

* poc: step aggr query

Signed-off-by: discord9 <discord9@163.com>

* feat: mvp poc stuff

Signed-off-by: discord9 <discord9@163.com>

* test: sqlness

Signed-off-by: discord9 <discord9@163.com>

* chore: import missing

Signed-off-by: discord9 <discord9@163.com>

* feat: support first/last_value

Signed-off-by: discord9 <discord9@163.com>

* fix: check also include first/last value

Signed-off-by: discord9 <discord9@163.com>

* chore: clean up after rebase

Signed-off-by: discord9 <discord9@163.com>

* feat: optimize yes!

Signed-off-by: discord9 <discord9@163.com>

* fix: alias qualifled

Signed-off-by: discord9 <discord9@163.com>

* test: more testcases

Signed-off-by: discord9 <discord9@163.com>

* chore: qualified column

Signed-off-by: discord9 <discord9@163.com>

* chore: per review

Signed-off-by: discord9 <discord9@163.com>

* fix: case when can push down

Signed-off-by: discord9 <discord9@163.com>

* feat: udd/hll_merge is also the same

Signed-off-by: discord9 <discord9@163.com>

* fix: udd/hll_merge args

Signed-off-by: discord9 <discord9@163.com>

* tests: fix sqlness

Signed-off-by: discord9 <discord9@163.com>

* tests: fix sqlness

Signed-off-by: discord9 <discord9@163.com>

* fix: udd/hll merge steppable

Signed-off-by: discord9 <discord9@163.com>

* chore: per review

Signed-off-by: discord9 <discord9@163.com>

* test: REDACTED

Signed-off-by: discord9 <discord9@163.com>

* refactor: per review

Signed-off-by: discord9 <discord9@163.com>

* refactor: more formal transform action

Signed-off-by: discord9 <discord9@163.com>

* feat: support modify child plan too

Signed-off-by: discord9 <discord9@163.com>

---------

Signed-off-by: discord9 <discord9@163.com>
2025-06-20 07:18:55 +00:00
LFC
e072726ea8 refactor: make scanner creation async (#6349)
* refactor: make scanner creation async

Signed-off-by: luofucong <luofc@foxmail.com>

* resolve PR comments

Signed-off-by: luofucong <luofc@foxmail.com>

---------

Signed-off-by: luofucong <luofc@foxmail.com>
2025-06-20 06:44:49 +00:00
Ning Sun
e78c3e1eaa refactor: make metadata region option opt-in (#6350)
* refactor: make metadata region option opt-in

Signed-off-by: Ning Sun <sunning@greptime.com>

* fix: preserve wal_options for metadata region

Signed-off-by: Ning Sun <sunning@greptime.com>

* Update src/metric-engine/src/engine/create.rs

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ning Sun <sunning@greptime.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2025-06-20 03:31:16 +00:00
Weny Xu
89e3c8edab fix(meta): enhance mysql election client with timeouts and reconnection (#6341)
* fix(meta): enhance mysql election client with timeouts and reconnection

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: improve MySQL election client lease management and error handling

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: adjust timeout configurations for election clients

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: remove unused error

Signed-off-by: WenyXu <wenymedia@gmail.com>

* fix: fix unit test

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>
2025-06-19 12:44:23 +00:00
fys
d4826b998d feat: support execute sql in frontend_client (#6355)
* feat: support execute sql in frontend_client

* chore: remove unnecessary clone

* add components for flownode instance

* add feature gate for component

* fix: enterprise feature
2025-06-19 09:47:16 +00:00
Weny Xu
d9faa5c801 feat(cli): add metadata del commands (#6339)
* feat: introduce cli for deleting metadata

Signed-off-by: WenyXu <wenymedia@gmail.com>

* refactor(cli): flatten metadata command structure

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: add alias

Signed-off-by: WenyXu <wenymedia@gmail.com>

* refactor(meta): implement logical deletion for CLI tool

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>
2025-06-19 06:48:00 +00:00
Ning Sun
12c3a3205b chore: security updates (#6351) 2025-06-19 06:43:43 +00:00
Weny Xu
5231505021 fix(metric-engine): properly propagate errors during batch open operation (#6325)
* fix(metric-engine): properly propagate errors during batch open operation

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: add comments

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>
2025-06-19 06:37:54 +00:00
Lei, HUANG
6ece560f8c fix: reordered write cause incorrect kv (#6345)
* fix/reordered-write-cause-incorrect-kv:
 - **Enhance Testing in `partition_tree.rs`**: Added comprehensive test functions such as `kv_region_metadata`, `key_values`, and `collect_kvs` to improve the robustness of key-value operations and ensure correct behavior of the `PartitionTreeMemtable`.
 - **Improve Key Handling in `dict.rs`**: Modified `KeyDictBuilder` to handle both full and sparse keys, ensuring correct mapping and insertion. Added a new test `test_builder_finish_with_sparse_key` to validate the handling of sparse keys.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* fix/reordered-write-cause-incorrect-kv:
 ### Refactor `partition_tree.rs` for Improved Key Handling

 - **Refactored Key Handling**: Simplified the `key_values` function to accept an iterator of keys, removing hardcoded key-value pairs. This change enhances flexibility and reduces redundancy in key management.
 - **Updated Test Cases**: Modified test cases to use the new `key_values` function signature, ensuring they iterate over keys dynamically rather than relying on predefined lists.

 Files affected:
 - `src/mito2/src/memtable/partition_tree.rs`

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* fix/reordered-write-cause-incorrect-kv:
 Enhance Testing in `partition_tree.rs`

 - Added assertions to verify key-value collection after `memtable` and `forked` operations.
 - Refactored key-value writing logic for clarity in `forked` operations.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

---------

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>
2025-06-19 06:32:40 +00:00
fys
2ab08a8f93 chore(deps): switch greptime-proto to official repository (#6347) 2025-06-18 12:52:46 +00:00
Lei, HUANG
086ae9cdcd chore: print series count after wal replay (#6344)
* chore/print-series-count-after-wal-replay:
 ### Add Series Count Functionality and Logging Enhancements

 - **`time_partition.rs`**: Introduced `series_count` method to calculate the total timeseries count across all time partitions.
 - **`opener.rs`**: Enhanced logging to include the total timeseries replayed during WAL replay.
 - **`version.rs`**: Added `series_count` method to `VersionControlData` for approximating timeseries count in the current version.
 - **`handler.rs`**: Added entry and exit logging for the `sql` function to trace execution flow.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* chore/print-series-count-after-wal-replay:
 ### Remove Unused Import

 - **File Modified**: `src/servers/src/http/handler.rs`
 - **Change Summary**: Removed the unused `info` import from `common_telemetry`.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

---------

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>
2025-06-18 12:04:39 +00:00
LFC
6da8e00243 refactor: make finding leader in metasrv client dynamic (#6343)
Signed-off-by: luofucong <luofc@foxmail.com>
2025-06-18 11:29:23 +00:00
Arshdeep
4b04c402b6 fix: add path exist check in copy_table_from (#6182) (#6300)
Signed-off-by: Arshdeep54 <balarsh535@gmail.com>
2025-06-18 09:50:27 +00:00
Lei, HUANG
a59b6c36d2 chore: add metrics for active series and field builders (#6332)
* chore/series-metrics:
 ### Add Metrics for Active Series and Values in Memtable

 - **`simple_bulk_memtable.rs`**: Implemented `Drop` trait for `SimpleBulkMemtable` to decrement `MEMTABLE_ACTIVE_SERIES_COUNT` and `MEMTABLE_ACTIVE_VALUES_COUNT` upon dropping.
 - **`time_series.rs`**:
   - Introduced `SeriesMap` with `Drop` implementation to manage active series and values count.
   - Updated `SeriesSet` and `Iter` to use `SeriesMap`.
   - Added `num_values` method in `Series` to calculate the number of values.
 - **`metrics.rs`**: Added `MEMTABLE_ACTIVE_SERIES_COUNT` and `MEMTABLE_ACTIVE_VALUES_COUNT` metrics to track active series and values in `TimeSeriesMemtable`.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* chore/series-metrics:
- Add metrics for active series and field builders
- Update dashboard

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* chore/series-metrics:
 **Add Series Count Tracking in Memtables**

 - **`flush.rs`**: Updated `RegionFlushTask` to track and log the series count during memtable flush operations.
 - **`memtable.rs`**: Introduced `series_count` in `MemtableStats` and added a method to retrieve it.
 - **`partition_tree.rs`, `partition.rs`, `tree.rs`**: Implemented series count calculation in `PartitionTreeMemtable` and its components.
 - **`simple_bulk_memtable.rs`, `time_series.rs`**: Integrated series count tracking in `SimpleBulkMemtable` and `TimeSeriesMemtable` implementations.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* Update src/mito2/src/memtable.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2025-06-18 09:16:45 +00:00
zyy17
f6ce6fe385 fix(jaeger-api): incorrect find_traces() logic and multiple api compatible issues (#6293)
* fix: use `limit` params in jaeger http

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* refactor: only parse `max_duration` and `min_duration` when it's not empty

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* fix: handle the input for empty `limit` string

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* fix: missing the fileter for `service_name`

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* test: fix ci errors

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* fix: incorrect behavior of find_traces

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* fix: the logic of `find_traces()`

The correct logic should be:

1. Get all trace ids that match the filters;

2. Get all traces that match the trace ids from the previous query;

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* fix: integration test errors

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* refactor: add `empty_string_as_none`

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* refactor: refine naming

Signed-off-by: zyy17 <zyylsxm@gmail.com>

---------

Signed-off-by: zyy17 <zyylsxm@gmail.com>
2025-06-18 08:01:36 +00:00
Weny Xu
4d4bfb7d8b fix(metric): prevent setting memtable type for metadata region (#6340)
Signed-off-by: WenyXu <wenymedia@gmail.com>
2025-06-18 03:49:22 +00:00
Ruihang Xia
6e1e8f19e6 feat: support setting FORMAT in TQL ANALYZE/VERBOSE (#6327)
* feat: support setting FORMAT in TQL ANALYZE/VERBOSE

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-06-18 03:39:12 +00:00
Weny Xu
49cb4da6d2 feat: introduce CLI tool for repairing logical table metadata (#6322)
* feat: introduce logical table metadata repair cli tool

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: deps

Signed-off-by: WenyXu <wenymedia@gmail.com>

* refactor: flatten doctor module structure

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>
2025-06-17 13:53:59 +00:00
Lei, HUANG
0d0236ddab fix: revert string builder initial capacity in TimeSeriesMemtable (#6330)
fix/revert-string-builder-initial-capacity:
 ### Update `time_series.rs` Memory Allocation

 - **Reduced StringBuilder Capacity**: Adjusted the initial capacity of `StringBuilder` in `ValueBuilder` from `(256, 4096)` to `(4, 8)` to optimize memory usage in `time_series.rs`.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2025-06-17 13:24:52 +00:00
Lei, HUANG
f8edb53b30 fix: carry process id in query ctx (#6335)
fix/carry-process-id-in-query-ctx:
 ### Commit Message

 Enhance query context handling in `instance.rs`

 - Updated `Instance` implementation to include `process_id` in query context initialization, improving traceability and debugging capabilities.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>
2025-06-17 12:04:23 +00:00
Lin Yihai
438791b3e4 feat: Add DROP DEFAULT (#6290)
* feat: Add `DROP DEFAULT`

Signed-off-by: Yihai Lin <yihai-lin@foxmail.com>

* chore: use `next_token`

Signed-off-by: Yihai Lin <yihai-lin@foxmail.com>

---------

Signed-off-by: Yihai Lin <yihai-lin@foxmail.com>
2025-06-17 09:33:56 +00:00
discord9
50e4c916e7 chore: clean up unused impl &standalone use mark dirty (#6331)
Signed-off-by: discord9 <discord9@163.com>
2025-06-17 08:18:17 +00:00
Ruihang Xia
16e7f7b64b fix: override logical table's partition column with physical table's (#6326)
* fix: override logical table's partition column with physical table's

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add more sqlness test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* naming

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-06-17 08:00:54 +00:00
localhost
53c4fd478e chore: add skip error for pipeline skip error log (#6318)
* chore: wip

Signed-off-by: paomian <xpaomian@gmail.com>

* chore: add skip error for pipeline skip error log

Signed-off-by: paomian <xpaomian@gmail.com>

* chore: add test and check timestamp must be non null

Signed-off-by: paomian <xpaomian@gmail.com>

* chore: fix test

* chore: fix by pr comment

* fix: typo

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Signed-off-by: paomian <xpaomian@gmail.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-06-17 06:44:11 +00:00
Lei, HUANG
ecbbd2fbdb feat: handle Ctrl-C command in MySQL client (#6320)
* feat/answer-ctrl-c-in-mysql:
 ## Implement Connection ID-based Query Killing

 ### Key Changes:
 - **Connection ID Management:**
   - Added `connection_id` to `Session` and `QueryContext` in `src/session/src/lib.rs` and `src/session/src/context.rs`.
   - Updated `MysqlInstanceShim` and `MysqlServer` to handle `connection_id` in `src/servers/src/mysql/handler.rs` and `src/servers/src/mysql/server.rs`.

 - **KILL Statement Enhancements:**
   - Introduced `Kill` enum to handle both `ProcessId` and `ConnectionId` in `src/sql/src/statements/kill.rs`.
   - Updated `ParserContext` to parse `KILL QUERY <connection_id>` in `src/sql/src/parser.rs`.
   - Modified `StatementExecutor` to support killing queries by `connection_id` in `src/operator/src/statement/kill.rs`.

 - **Process Management:**
   - Refactored `ProcessManager` to include `connection_id` in `src/catalog/src/process_manager.rs`.
   - Added `kill_local_process` method for local query termination.

 - **Testing:**
   - Added tests for `KILL` statement parsing and execution in `src/sql/src/parser.rs`.

 ### Affected Files:
 - `Cargo.lock`, `Cargo.toml`
 - `src/catalog/src/process_manager.rs`
 - `src/frontend/src/instance.rs`
 - `src/frontend/src/stream_wrapper.rs`
 - `src/operator/src/statement.rs`
 - `src/operator/src/statement/kill.rs`
 - `src/servers/src/mysql/federated.rs`
 - `src/servers/src/mysql/handler.rs`
 - `src/servers/src/mysql/server.rs`
 - `src/servers/src/postgres.rs`
 - `src/session/src/context.rs`
 - `src/session/src/lib.rs`
 - `src/sql/src/parser.rs`
 - `src/sql/src/statements.rs`
 - `src/sql/src/statements/kill.rs`
 - `src/sql/src/statements/statement.rs`

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

 Conflicts:
	Cargo.lock
	Cargo.toml

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/answer-ctrl-c-in-mysql:
 ### Enhance Process Management and Execution

 - **`process_manager.rs`**: Added a new method `find_processes_by_connection_id` to filter processes by connection ID, improving process management capabilities.
 - **`kill.rs`**: Refactored the process killing logic to utilize the new `find_processes_by_connection_id` method, streamlining the execution flow and reducing redundant checks.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/answer-ctrl-c-in-mysql:
 ## Commit Message

 ### Update Process ID Type and Refactor Code

 - **Change Process ID Type**: Updated the process ID type from `u64` to `u32` across multiple files to optimize memory usage. Affected files include `process_manager.rs`, `lib.rs`, `database.rs`, `instance.rs`, `server.rs`, `stream_wrapper.rs`, `kill.rs`, `federated.rs`, `handler.rs`, `server.rs`,
 `postgres.rs`, `mysql_server_test.rs`, `context.rs`, `lib.rs`, and `test_util.rs`.

 - **Remove Connection ID**: Removed the `connection_id` field and related logic from `process_manager.rs`, `lib.rs`, `instance.rs`, `server.rs`, `stream_wrapper.rs`, `kill.rs`, `federated.rs`, `handler.rs`, `server.rs`, `postgres.rs`, `mysql_server_test.rs`, `context.rs`, `lib.rs`, and `test_util.rs` to
 simplify the codebase.

 - **Refactor Process Management**: Refactored process management logic to improve clarity and maintainability in `process_manager.rs`, `kill.rs`, and `handler.rs`.

 - **Enhance MySQL Server Handling**: Improved MySQL server handling by integrating process management in `server.rs` and `mysql_server_test.rs`.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/answer-ctrl-c-in-mysql:
 ### Add Process Manager to Postgres Server

 - **`src/frontend/src/server.rs`**: Updated server initialization to include `process_manager`.
 - **`src/servers/src/postgres.rs`**: Modified `MakePostgresServerHandler` to accept `process_id` for session creation.
 - **`src/servers/src/postgres/server.rs`**: Integrated `process_manager` into `PostgresServer` for generating `process_id` during connection handling.
 - **`src/servers/tests/postgres/mod.rs`** and **`tests-integration/src/test_util.rs`**: Adjusted test server setup to accommodate optional `process_manager`.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/answer-ctrl-c-in-mysql:
 Update `greptime-proto` Dependency

 - Updated the `greptime-proto` dependency to a new revision in both `Cargo.lock` and `Cargo.toml`.
   - `Cargo.lock`: Changed source revision from `d75a56e05a87594fe31ad5c48525e9b2124149ba` to `fdcbe5f1c7c467634c90a1fd1a00a784b92a4e80`.
   - `Cargo.toml`: Updated the `greptime-proto` git revision to match the new commit.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

---------

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>
2025-06-17 06:36:23 +00:00
fys
3e3a12385c refactor: make flownode gRPC services able to be added dynamically (#6323)
chore: enhance the flownode gRPC servers extension
2025-06-17 06:27:41 +00:00
shuiyisong
079daf5db9 feat: support special labels parsing in prom remote write (#6302)
* feat: support special labels parsing in prom remote write

* chore: change __schema__ to __database__

* fix: test

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* fix: remove the missing type alias

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: update cr issue

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

---------

Signed-off-by: shuiyisong <xixing.sys@gmail.com>
Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
2025-06-17 03:20:33 +00:00
liyang
56b9ab5279 ci: add pr label workflow (#6316)
* ci: add pr label workflow

Signed-off-by: liyang <daviderli614@gmail.com>

* move permissions to jobs

Signed-off-by: liyang <daviderli614@gmail.com>

* add checkout step

Signed-off-by: liyang <daviderli614@gmail.com>

* add job permissions

Signed-off-by: liyang <daviderli614@gmail.com>

* custom sizes

Signed-off-by: liyang <daviderli614@gmail.com>

* Update .github/workflows/pr-labeling.yaml

Co-authored-by: shuiyisong <113876041+shuiyisong@users.noreply.github.com>

---------

Signed-off-by: liyang <daviderli614@gmail.com>
Co-authored-by: shuiyisong <113876041+shuiyisong@users.noreply.github.com>
2025-06-16 17:26:16 +00:00
Ruihang Xia
be4e0d589e feat: support arbitrary constant expression in PromQL function (#6315)
* refactor holt_winters, predict_linear, quantile, round

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* some sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* support some functions

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* make all sqlness cases pass

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix other sqlness cases

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* some refactor

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-06-16 15:12:27 +00:00
Yingwen
2a3445c72c fix: ignore missing columns and tables in PromQL (#6285)
* fix: handle table/column not found in or

Signed-off-by: evenyag <realevenyag@gmail.com>

* test: update result

Signed-off-by: evenyag <realevenyag@gmail.com>

* test: drop table after test

Signed-off-by: evenyag <realevenyag@gmail.com>

* test: fix test cases

Signed-off-by: evenyag <realevenyag@gmail.com>

* fix: do not return table not found error in series_query

Signed-off-by: evenyag <realevenyag@gmail.com>

---------

Signed-off-by: evenyag <realevenyag@gmail.com>
2025-06-16 12:15:38 +00:00
Lei, HUANG
9d997d593c feat: bulk support flow batch (#6291)
* feat/bulk-support-flow-batch:
 ### Refactor and Enhance Timestamp Handling in gRPC and Bulk Insert

 - **Refactor Table Handling**:
   - Updated `put_record_batch` method to use `TableRef` instead of `TableId` in `grpc.rs`, `greptime_handler.rs`, and `grpc.rs`.
   - Modified `handle_bulk_insert` to accept `TableRef` and extract `TableId` internally in `bulk_insert.rs`.

 - **Enhance Timestamp Processing**:
   - Added `compute_timestamp_range` function to calculate timestamp range in `bulk_insert.rs`.
   - Introduced error handling for invalid time index types in `error.rs`.

 - **Test Adjustments**:
   - Updated `DummyInstance` implementation in `tests/mod.rs` to align with new method signatures.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/bulk-support-flow-batch:
 ### Add Dirty Window Handling in Flow Module

 - **Updated `greptime-proto` Dependency**: Updated the `greptime-proto` dependency to a new revision in `Cargo.lock` and `Cargo.toml`.
 - **Flow Module Enhancements**:
   - Added `DirtyWindowRequest` handling in `flow.rs`, `node_manager.rs`, `test_util.rs`, `flownode_impl.rs`, and `server.rs`.
   - Implemented `handle_mark_window_dirty` function to manage dirty time windows.
 - **Bulk Insert Enhancements**:
   - Modified `bulk_insert.rs` to notify flownodes about dirty time windows using `update_flow_dirty_window`.
 - **Removed Unused Imports**: Cleaned up unused imports in `greptime_handler.rs`, `grpc.rs`, and `mod.rs`.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat: mark dirty time window

* feat: metrics

* metrics: more useful metrics batching mode

* feat/bulk-support-flow-batch:
 **Refactor Timestamp Handling and Update Dependencies**

 - **Dependency Update**: Updated `greptime-proto` dependency in `Cargo.lock` and `Cargo.toml` to a new revision.
 - **Batching Engine Refactor**: Modified `src/flow/src/batching_mode/engine.rs` to replace `dirty_time_ranges` with `timestamps` for improved timestamp handling.
 - **Bulk Insert Refactor**: Updated `src/operator/src/bulk_insert.rs` to refactor timestamp extraction and handling. Replaced `compute_timestamp_range` with `extract_timestamps` and adjusted related logic to handle timestamps directly.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/bulk-support-flow-batch:
 ### Update Metrics in Batching Mode Engine

 - **Modified Metrics**: Replaced `METRIC_FLOW_BATCHING_ENGINE_BULK_MARK_TIME_WINDOW_RANGE` with `METRIC_FLOW_BATCHING_ENGINE_BULK_MARK_TIME_WINDOW` to track the count of time windows instead of their range.
   - Files affected: `engine.rs`, `metrics.rs`
 - **New Method**: Added `len()` method to `DirtyTimeWindows` to return the number of dirty windows.
   - File affected: `state.rs`

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/bulk-support-flow-batch:
 **Refactor and Enhance Timestamp Handling in `bulk_insert.rs`**

 - **Refactored Timestamp Extraction**: Moved timestamp extraction logic to a new method `maybe_update_flow_dirty_window` to improve code readability and maintainability.
 - **Enhanced Flow Update Logic**: Updated the flow dirty window update mechanism to conditionally notify flownodes only if they are configured, using `table_info` and `record_batch`.
 - **Imports Adjusted**: Updated imports to reflect changes in table metadata handling, replacing `TableId` with `TableInfoRef`.

 Files affected:
 - `src/operator/src/bulk_insert.rs`

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/bulk-support-flow-batch:
 ## Update `handle_mark_window_dirty` Method in `flownode_impl.rs`

 - Replaced `unimplemented!()` with `unreachable!()` in the `handle_mark_window_dirty` method for both `FlowDualEngine` and `StreamingEngine` implementations in `flownode_impl.rs`.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/bulk-support-flow-batch:
 Update `greptime-proto` Dependency

 - Updated the `greptime-proto` dependency to a new revision in both `Cargo.lock` and `Cargo.toml`.
   - `Cargo.lock`: Changed the source revision from `f0913f179ee1d2ce428f8b85a9ea12b5f69ad636` to `17971523673f4fbc982510d3c9d6647ff642e16f`.
   - `Cargo.toml`: Updated the `greptime-proto` git revision to `17971523673f4fbc982510d3c9d6647ff642e16f`.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

---------

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>
Co-authored-by: discord9 <discord9@163.com>
2025-06-16 08:19:14 +00:00
Weny Xu
10bf9b11f6 fix: handle corner case in catchup where compacted entry id exceeds region last entry id (#6312)
* fix(mito2): handle corner case in catchup where compacted entry id exceeds region last entry id

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>
2025-06-16 06:36:31 +00:00
localhost
f4f8d65a39 fix: event api content type only check type and subtype (#6317)
* fix: event api content type only check type and subtype

Signed-off-by: paomian <xpaomian@gmail.com>

* chore: make clippy happy

Signed-off-by: paomian <xpaomian@gmail.com>

---------

Signed-off-by: paomian <xpaomian@gmail.com>
2025-06-13 18:50:05 +00:00
Lei, HUANG
b31990e881 chore: add connection info to QueryContext (#6319)
chore/add-conn-info-to-query-ctx:
 ### Add Connection Information to Query Context

 - **`src/frontend/src/instance.rs`**: Updated to use `query_ctx.conn_info().to_string()` for connection information instead of a placeholder string.
 - **`src/session/src/context.rs`**: Introduced `conn_info` field in `QueryContext` and added a method `conn_info()` to retrieve it. Updated `QueryContextBuilder` to handle `conn_info`.
 - **`src/session/src/lib.rs`**: Modified `Session` to include `conn_info` in the query context building process.

 These changes enhance the query context by incorporating connection information, allowing for more detailed session management.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>
2025-06-13 18:42:13 +00:00
Lei, HUANG
6da633e70d feat: support killing process (#6309)
* feat/kill-process:
 ### Add Cancellation Support and Enhance Process Management

 - **Cancellation Handle Implementation**: Introduced `CancellationHandle` in `cancellation_handle.rs` to facilitate cancellation of futures and streams.
 - **Process Management Enhancements**:
   - Updated `ProcessManager` in `process_manager.rs` to support cancellable processes using `CancellableProcess`.
   - Added `kill_process` method for terminating processes.
 - **Stream Wrapper Update**:
   - Replaced `StreamWrapper` with `CancellableStreamWrapper` in `stream_wrapper.rs` and `instance.rs` to handle stream cancellation.
 - **Error Handling**:
   - Added `StreamCancelled` error variant in `error.rs` to handle stream cancellation scenarios.
 - **gRPC Handler Update**:
   - Added `kill_process` gRPC method in `frontend_grpc_handler.rs` to allow external process termination.
 - **Dependency Updates**:
   - Updated `Cargo.lock` and `Cargo.toml` to include `common-base` and `tokio-util`.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/kill-process:
 **Enhancements and Bug Fixes**

 - **Dependency Update**: Updated `greptime-proto` dependency in `Cargo.lock` and `Cargo.toml` to a new revision.
 - **Error Handling Improvements**:
   - Modified error variants in `src/catalog/src/error.rs` and `src/common/frontend/src/error.rs` to improve error messages and handling.
   - Added `FrontendNotFound` error variant for better error specificity.
 - **Process Management Enhancements**:
   - Updated `ProcessManager` in `src/catalog/src/process_manager.rs` to include `kill_process` functionality with server address validation.
   - Enhanced `FrontendClient` trait in `src/common/frontend/src/selector.rs` to support `kill_process` requests.
 - **gRPC Handler Update**:
   - Refactored `FrontendGrpcHandler` in `src/servers/src/grpc/frontend_grpc_handler.rs` to handle `kill_process` requests asynchronously and return process status.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/kill-process:
 ### Add Kill Process Functionality

 - **`Cargo.lock`, `Cargo.toml`**: Added `common-frontend` as a dependency.
 - **`server.rs`, `builder.rs`, `instance.rs`**: Updated `FrontendInvoker` and `FrontendBuilder` to support process management.
 - **`error.rs`**: Introduced `InvalidProcessId` error for handling invalid process IDs.
 - **`statement.rs`, `kill.rs`**: Implemented `execute_kill` method in `StatementExecutor` to handle the `KILL` statement.
 - **`parser.rs`, `statement.rs`**: Updated SQL parser to recognize and parse the `KILL` statement.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/kill-process:
 ## Add Cancellation Support to Query Execution

 - **`process_manager.rs`**: Updated `CancellationHandle` initialization to use `default()` method.
 - **`cancellation_handle.rs`**: Implemented `Debug` trait for `CancellationHandle` and added `Cancellation` and `CancellableFuture` structs to support cancellable futures.
 - **`error.rs`**: Introduced `Cancelled` error variant to handle query cancellations.
 - **`instance.rs`**: Integrated `CancellableFuture` to manage query execution with cancellation support.
 - **`stream_wrapper.rs`**: Modified `CancellableStreamWrapper` to use the new `waker()` method for cancellation handling.
 - **`statement.rs`**: Added `#[allow(clippy::too_many_arguments)]` to `StatementExecutor::new` to suppress clippy warnings.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/kill-process:
 - **Add `MetaClientMissing` Error**: Introduced a new error variant `MetaClientMissing` in `error.rs` to handle missing meta client scenarios.
 - **Refactor Cancellation Handling**: Merged `cancellation_handle.rs` into `cancellation.rs` and updated related logic in `process_manager.rs`, `instance.rs`, and `stream_wrapper.rs`.
 - **Enhance Process Management**: Improved process management logic in `process_manager.rs` to handle process cancellation more effectively.
 - **Update Tests**: Added and updated tests in `cancellation.rs` and `stream_wrapper.rs` to cover new cancellation logic and error handling.
 - **Cargo.toml Update**: Adjusted workspace settings in `Cargo.toml` for `common-frontend`.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/kill-process:
 - **Add Tests for Process Management**: Introduced multiple async tests in `process_manager.rs` to verify query registration, deregistration, cancellation, and process killing functionalities.
 - **Update Error Message in SQL Parser**: Modified the expected error message in `parser.rs` to clarify the expected token as a "process id string literal".

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/kill-process:
 ### Add Process Count Metrics to Catalog

 - **`metrics.rs`**: Introduced a new metric `PROCESS_LIST_COUNT` to track the count of running processes per catalog using `IntGaugeVec`.
 - **`process_manager.rs`**: Updated `CancellableProcess` to increment and decrement `PROCESS_LIST_COUNT` upon creation and destruction, respectively. Added a `Drop` implementation for `CancellableProcess` to handle metric updates.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/kill-process:
 ### Fix process removal logic in `process_manager.rs`

 - Corrected the condition for removing an entry from the catalog in `ProcessManager` by using `o.get()` instead of `o.get_mut()`.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/kill-process:
 - **Error Handling Improvements**:
   - Updated status codes for `Error::FrontendNotFound` and `Error::MetaClientMissing` to `StatusCode::Unexpected` in `src/catalog/src/error.rs`.
   - Changed `InvokeFrontend` error display message and status code in `src/common/frontend/src/error.rs`.
   - Added `ProcessManagerMissing` error in `src/operator/src/error.rs` and updated its handling in `src/operator/src/statement/kill.rs`.

 - **Process Management Enhancements**:
   - Added documentation for `ProcessManager` and `register_query` in `src/catalog/src/process_manager.rs`.
   - Modified `kill_process` response handling in `src/servers/src/grpc/frontend_grpc_handler.rs`.

 - **Cancellation Logic Update**:
   - Improved cancellation logic in `src/common/base/src/cancellation.rs` to use `compare_exchange` for atomic operations.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/kill-process:
 ### Add Process Kill Count Metric and Refactor Cancellation Handle

 - **Metrics Update**: Added a new metric `PROCESS_KILL_COUNT` in `metrics.rs` to track the count of completed kill process requests per catalog.
 - **Refactor Cancellation Handle**: Renamed `cancellation_handler` to `cancellation_handle` across multiple files for consistency:
   - `process_manager.rs`
   - `instance.rs`
   - `stream_wrapper.rs`
 - **Process Management**: Updated process management logic in `process_manager.rs` to increment the `PROCESS_KILL_COUNT` metric upon successful process termination.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/kill-process:
 Update metric description in `metrics.rs`

 - Changed the description of `PROCESS_KILL_COUNT` to reflect the count of killed processes instead of running processes in `metrics.rs`.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* feat/kill-process:
 Update `greptime-proto` Dependency and Fix Response Field

 - **Updated Dependency**: Changed the `greptime-proto` Git revision in `Cargo.lock` and `Cargo.toml` to `f0913f1`.
 - **Code Fix**: Modified `frontend_grpc_handler.rs` to correct the response field from `found` to `success` in `KillProcessResponse`.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

---------

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>
2025-06-13 13:30:25 +00:00
zyy17
9633e794c7 fix: always use linux path style in windows platform unit tests (#6314)
Signed-off-by: zyy17 <zyylsxm@gmail.com>
2025-06-13 07:15:53 +00:00
Yingwen
eaf1e1198f refactor: Extract mito codec part into a new crate (#6307)
* chore: add a new crate mito-codec

Signed-off-by: evenyag <realevenyag@gmail.com>

* feat: port necessary mods for primary key codec

Signed-off-by: evenyag <realevenyag@gmail.com>

* refactor: use codec utils in mito-codec

Signed-off-by: evenyag <realevenyag@gmail.com>

* refactor: remove unused mods

Signed-off-by: evenyag <realevenyag@gmail.com>

* style: fix clippy

Signed-off-by: evenyag <realevenyag@gmail.com>

* refactor: remove Partition::is_partition_column()

Signed-off-by: evenyag <realevenyag@gmail.com>

* refactor: remove duplicated test utils

Signed-off-by: evenyag <realevenyag@gmail.com>

* chore: remove unused comment

Signed-off-by: evenyag <realevenyag@gmail.com>

* fix: fix is_partition_column check

Signed-off-by: evenyag <realevenyag@gmail.com>

---------

Signed-off-by: evenyag <realevenyag@gmail.com>
2025-06-13 07:14:29 +00:00
ZonaHe
505bf25505 feat: update dashboard to v0.9.3 (#6311)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2025-06-13 07:13:12 +00:00
Ning Sun
f1b29ece3c feat: process id for session, query context and postgres (#6301)
* feat: process id for session, query context and postgres

Signed-off-by: Ning Sun <sunning@greptime.com>

* feat: add sql functions to retrieve connection/process id

Signed-off-by: Ning Sun <sunning@greptime.com>

---------

Signed-off-by: Ning Sun <sunning@greptime.com>
2025-06-12 16:53:57 +00:00
discord9
74df12e8c0 fix: check for zero parallelism (#6310)
* fix: check for zero parallelism

Signed-off-by: discord9 <discord9@163.com>

* chore: silently use default value

Signed-off-by: discord9 <discord9@163.com>

---------

Signed-off-by: discord9 <discord9@163.com>
2025-06-12 15:58:59 +00:00
discord9
be6a5d2da8 feat: parallelism hint in grpc (#6306)
* feat: parallelism hint in grpc

Signed-off-by: discord9 <discord9@163.com>

* chore: per review

Signed-off-by: discord9 <discord9@163.com>

* chore: comment

Signed-off-by: discord9 <discord9@163.com>

* chore:docs

Signed-off-by: discord9 <discord9@163.com>

---------

Signed-off-by: discord9 <discord9@163.com>
2025-06-12 10:12:45 +00:00
Ruihang Xia
7468a8ab2a feat: organize EXPLAIN ANALYZE VERBOSE's output in JSON format (#6308)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-06-12 09:55:53 +00:00
Lei, HUANG
5bb0466ff2 feat: introduce file group in compaction (#6261)
* fix/file-group-in-compaction:
 ### Enhance Compaction Logic with File Grouping

 - **`run.rs`**: Introduced `FileGroup` struct to manage groups of `FileHandle` objects, allowing for more efficient compaction operations. Updated `Ranged` and `Item` trait implementations to work with `FileGroup`.
 - **`test_util.rs`**: Added `new_file_handle_with_sequence` function to support file handles with sequence numbers, enhancing test utilities.
 - **`twcs.rs`**: Modified `TwcsPicker` to utilize `FileGroup` for managing files within windows, improving compaction logic. Updated `Window` struct to use `HashMap` for storing `FileGroup` objects.
 - **`version_util.rs`**: Updated version control utilities to handle sequence numbers in file metadata, aligning with new compaction logic.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

* fix/file-group-in-compaction:
 ### Add Test for File Group Assignment in TWCS

 - **Enhancements in `twcs.rs`:**
   - Added a new test `test_assign_file_groups_to_windows` to verify the correct assignment of file groups to windows.
   - Enhanced `test_assign_compacting_to_windows` with a new case to ensure files with overlapping time ranges and the same sequence are treated as one `FileGroup`.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

* fix/file-group-in-compaction:
 **Enhance Compaction Task Documentation and Initialization**

 - **`run.rs`**: Added documentation for `FileGroup` to clarify its role in representing a group of files created by the same compaction task.
 - **`twcs.rs`**: Introduced comments in the `Window` struct to explain the mapping of file sequences to file groups, indicating files created from the same compaction task. Simplified the initialization of the `files` hashmap using `HashMap::from`.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

---------

Signed-off-by: Lei, HUANG <lhuang@greptime.com>
Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>
2025-06-12 09:33:40 +00:00
Ruihang Xia
f6db419afd feat: support using expressions as literal in PromQL (#6297)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-06-12 08:18:10 +00:00
Lei, HUANG
05b708ed2e feat: implement process manager and information_schema.process_list (#5865)
* ### Add Process List Management

 - **Error Handling Enhancements**:

* refactor: Update test IP addresses to include ports in ProcessKey

* feat/show-process-list:
 Refactor Process Management in Meta Module

 - Introduced `ProcessManager` for handling process registration and deregistration.
 - Added methods for managing and querying process states, including `register_query`, `deregister_query`, and `list_all_processes`.
 - Removed redundant process management code from the query module.
 - Updated error handling to reflect changes in process management.
 - Enhanced test coverage for process management functionalities.

* chore: rebase main

* add information schema process list table

* integrate process list table to system catalog

* build ProcessManager on frontend and standalone mode

* feat/show-process-list:
 **Add Process Management Enhancements**

 - **`manager.rs`**: Introduced `process_manager` to `SystemCatalog` and `KvBackendCatalogManager` for improved process handling.
 - **`information_schema.rs`**: Updated table insertion logic to conditionally include `PROCESS_LIST`.
 - **`frontend.rs`, `standalone.rs`**: Enhanced `StartCommand` to clone `process_manager` for better resource management.
 - **`instance.rs`, `builder.rs`**: Integrated `ProcessManager` into `Instance` and `FrontendBuilder` to manage query

* feat/show-process-list:
 ### Add Process Listing and Error Handling Enhancements

 - **Error Handling**: Introduced a new error variant `ListProcess` in `error.rs` to handle failures when listing running processes.
 - **Process List Implementation**: Enhanced `InformationSchemaProcessList` in `process_list.rs` to track running queries, including defining column names and implementing the `make_process_list` function to build the process list.
 - **Frontend Builder**: Added a `#[allow(clippy::too_many_arguments)]` attribute in `builder.rs` to suppress Clippy warnings for the `FrontendBuilder::new` function.

 These changes improve error handling and process tracking capabilities within the system.

* feat/show-process-list:
 Refactor imports in `process_list.rs`

 - Updated import paths for `Predicates` and `InformationTable` in `process_list.rs` to align with the new module structure.

* feat/show-process-list:
 Refactor process list generation in `process_list.rs`

 - Simplified the process list generation by removing intermediate row storage and directly building vectors.
 - Updated `process_to_row` function to use a mutable vector for current row data, improving memory efficiency.
 - Removed `rows_to_record_batch` function, integrating its logic directly into the main loop for streamlined processing.

* wip: move ProcessManager to catalog crate

* feat/show-process-list:
 - **Refactor Row Construction**: Updated row construction in multiple files to use references for `Value` objects, improving memory efficiency. Affected files include:
   - `cluster_info.rs`
   - `columns.rs`
   - `flows.rs`
   - `key_column_usage.rs`
   - `partitions.rs`
   - `procedure_info.rs`
   - `process_list.rs`
   - `region_peers.rs`
   - `region_statistics.rs`
   - `schemata.rs`
   - `table_constraints.rs`
   - `tables.rs`
   - `views.rs`
   - `pg_class.rs`
   - `pg_database.rs`
   - `pg_namespace.rs`
 - **Remove Unused Code**: Deleted unused functions and error variants related to process management in `process_list.rs` and `error.rs`.
 - **Predicate Evaluation Update**: Modified predicate evaluation functions in `predicate.rs` to work with references, enhancing performance.

* feat/show-process-list:
 ### Implement Process Management Enhancements

 - **Error Handling Enhancements**:
   - Added new error variants `BumpSequence`, `StartReportTask`, `ReportProcess`, and `BuildProcessManager` in `error.rs` to improve error handling for process management tasks.
   - Updated `ErrorExt` implementations to handle new error types.

 - **Process Manager Improvements**:
   - Introduced `ProcessManager` enhancements in `process_manager.rs` to manage process states using `ProcessWithState` and `ProcessState` enums.
   - Implemented periodic task `ReportTask` to report running queries to the KV backend.
   - Modified `register_query` and `deregister_query` methods to use the new state management system.

 - **Testing and Validation**:
   - Updated tests in `process_manager.rs` to validate new process management logic.
   - Replaced `dump` method with `list_all_processes` for listing processes.

 - **Integration with Frontend and Standalone**:
   - Updated `frontend.rs` and `standalone.rs` to handle `ProcessManager` initialization errors using `BuildProcessManager` error variant.

 - **Schema Adjustments**:
   - Modified `process_list.rs` in `system_schema/information_schema` to use the updated process listing method.

 - **Key-Value Conversion**:
   - Added `TryFrom` implementation for converting `Process` to `KeyValue` in `process_list.rs`.

* chore: remove register

* fix: sqlness tests

* merge main

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

* feat/show-process-list:
 - **Update `greptime-proto` Dependency**: Updated the `greptime-proto` dependency in `Cargo.lock` and `Cargo.toml` to a new revision.
 - **Refactor `ProcessManager`**: Simplified the `ProcessManager` implementation by removing the use of `KvBackendRef` and `SequenceRef`, and replaced them with `AtomicU64` and `RwLock` for managing process IDs and catalogs in `process_manager.rs`.
 - **Remove Process List Metadata**: Deleted the `process_list.rs` file and removed related metadata key definitions in `key.rs`.
 - **Update Process List Logic**: Modified the process list logic in `process_list.rs` to use the new `ProcessManager` structure.
 - **Adjust Frontend and Standalone Start Commands**: Updated `frontend.rs` and `standalone.rs` to use the new `ProcessManager` constructor.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

* feat/show-process-list:
 - **Update `greptime-proto` Dependency**: Updated the `greptime-proto` dependency version in `Cargo.lock` and `Cargo.toml` to a new commit hash.
 - **Refactor Error Handling**: Removed unused error variants and added a new `ParseProcessId` error in `src/catalog/src/error.rs`.
 - **Enhance Process Management**: Introduced `DisplayProcessId` struct for better process ID representation and parsing in `src/catalog/src/process_manager.rs`.
 - **Revise Process List Schema**: Updated the schema and logic for process listing in `src/catalog/src/system_schema/information_schema/process_list.rs` to include new fields like `client` and `frontend`.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

* feat/show-process-list:
 ### Commit Message

 **Enhancements and Refactoring**

 - **Process Management:**
   - Refactored `ProcessManager` to list local processes with an optional catalog filter in `process_manager.rs`.
   - Updated related tests in `process_manager.rs` and `process_list.rs`.

 - **Client Enhancements:**
   - Added `frontend_client` method in `client.rs` to support gRPC communication with the frontend.

 - **Error Handling:**
   - Extended error handling in `error.rs` to include gRPC and Meta errors.

 - **Frontend Module:**
   - Introduced `selector.rs` for frontend client selection and process listing.
   - Updated `Cargo.toml` to include new dependencies and dev-dependencies.

 - **gRPC Server:**
   - Integrated `FrontendServer` in `builder.rs` for enhanced gRPC server capabilities.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

* feat/show-process-list:
 ### Commit Message

 **Refactor Process Management and Frontend Integration**

 - **Add `common-frontend` Dependency**:
   - Updated `Cargo.lock`, `Cargo.toml` files to include `common-frontend` as a dependency.

 - **Refactor Process Management**:
   - Moved `ProcessManager` trait and `DisplayProcessId` struct to `common-frontend`.
   - Updated `process_manager.rs` to use `MetaProcessManager` and `ProcessManagerRef`.
   - Removed `ParseProcessId` error variant from `error.rs` in `catalog` and `frontend`.

 - **Frontend gRPC Service**:
   - Added `frontend_grpc_handler.rs` to handle gRPC requests for frontend processes.
   - Updated `grpc.rs` and `builder.rs` to integrate `FrontendGrpcHandler`.

 - **Update Tests**:
   - Modified tests in `process_manager.rs` to align with new `ProcessManager` implementation.

 - **Remove Unused Code**:
   - Removed `DisplayProcessId` and related parsing logic from `process_manager.rs`.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

* feat/show-process-list:
 ### Add `MetaClientRef` to `MetaProcessManager` and Update Instantiation

 - **Files Modified**:
   - `src/catalog/src/process_manager.rs`
   - `src/cmd/src/frontend.rs`
   - `src/cmd/src/standalone.rs`

 - **Key Changes**:
   - Added `MetaClientRef` as an optional parameter to the `MetaProcessManager::new` method.
   - Updated instantiation of `MetaProcessManager` to include `MetaClientRef` where applicable.

 ### Update `ProcessManagerRef` Usage

 - **Files Modified**:
   - `src/catalog/src/kvbackend/manager.rs`
   - `src/catalog/src/system_schema/information_schema.rs`
   - `src/catalog/src/system_schema/information_schema/process_list.rs`
   - `src/frontend/src/instance.rs`
   - `src/frontend/src/instance/builder.rs`

 - **Key Changes**:
   - Ensured consistent usage of `ProcessManagerRef` across various modules.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

* feat/show-process-list:
 ## Refactor Process Management

 - **Unified Process Manager**:
   - Replaced `MetaProcessManager` with `ProcessManager` across the codebase.
   - Updated `ProcessManager` to use `Arc` for shared references and introduced a `Ticket` struct for query registration and deregistration.
   - Affected files: `manager.rs`, `process_manager.rs`, `frontend.rs`, `standalone.rs`, `frontend_grpc_handler.rs`, `instance.rs`, `builder.rs`, `cluster.rs`, `standalone.rs`.

 - **Stream Wrapper Implementation**:
   - Added `StreamWrapper` to handle record batch streams with process management.
   - Affected file: `stream_wrapper.rs`.

 - **Test Adjustments**:
   - Updated tests to align with the new `ProcessManager` implementation.
   - Affected file: `tests-integration/src/cluster.rs`, `tests-integration/src/standalone.rs`.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

* feat/show-process-list:
 ### Add Error Handling and Process Management

 - **Error Handling Enhancements**:
   - Added new error variants `ListProcess` and `CreateChannel` in `error.rs` to handle specific gRPC service invocation failures.
   - Updated error handling in `selector.rs` to use the new error variants for better context and error propagation.

 - **Process Management Integration**:
   - Introduced `process_manager` method in `instance.rs` to access the process manager.
   - Integrated `FrontendGrpcHandler` with process management in `server.rs` to handle gRPC requests related to process management.

 - **gRPC Server Enhancements**:
   - Made `frontend_grpc_handler` public in `grpc.rs` to allow external access and integration with other modules.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

* feat/show-process-list:
 Update `greptime-proto` dependency and enhance process management

 - **Dependency Update**: Updated `greptime-proto` in `Cargo.lock` and `Cargo.toml` to a new revision.
 - **Process Management**:
   - Modified `process_manager.rs` to include catalog filtering in `list_process`.
   - Updated `frontend_grpc_handler.rs` to handle catalog filtering in `list_process` requests.
 - **System Schema**: Added a TODO comment in `process_list.rs` for future user catalog filtering implementation.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

* feat/show-process-list:
 - **Update Workspace Dependencies**:
   - Modified `Cargo.toml` files in `src/catalog`, `src/common/frontend`, and `src/servers` to adjust workspace dependencies.

 - **Refactor `ProcessManager` Logic**:
   - Updated `process_manager.rs` to simplify the condition in the `select` method.

 - **Remove Unused Error Variants**:
   - Deleted `BuildProcessManager` error variant from `error.rs` in `src/cmd`.
   - Removed `InvalidProcessKey` error variant from `error.rs` in `src/common/meta`.

 - **Add License Header**:
   - Added Apache License header to `stream_wrapper.rs` in `src/frontend`.

 - **Update Test Results**:
   - Adjusted expected results in `information_schema.result` to reflect changes in the schema.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

* feat/show-process-list:
 ### Add Error Handling for Process Listing

 - **`src/catalog/src/error.rs`**: Introduced a new error variant `ListProcess` to handle failures in listing frontend nodes.
 - **`src/catalog/src/process_manager.rs`**: Updated `local_processes` and `list_all_processes` methods to return the new error type, adding context for error handling.
 - **`src/catalog/src/system_schema/information_schema/process_list.rs`**: Modified `make_process_list` to propagate errors using the new error handling mechanism.
 - **`src/servers/src/grpc/frontend_grpc_handler.rs`**: Enhanced error handling in the `list_process` method to log errors and return appropriate gRPC status codes.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

* feat/show-process-list:
 Update `greptime-proto` Dependency and Remove `frontend_client` Method

 - **Cargo.lock** and **Cargo.toml**: Updated the `greptime-proto` dependency to a new revision (`5f6119ac7952878d39dcde0343c4bf828d18ffc8`).
 - **src/client/src/client.rs**: Removed the `frontend_client` method from the `Client` implementation.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

* feat/show-process-list:
 ### Add Query Registration with Pre-Generated ID

 - **`process_manager.rs`**: Introduced `register_query_with_id` method to allow registering queries with a pre-generated ID. This includes creating a `ProcessInfo` instance and inserting it into the catalog. Added `next_id` method to generate the next process ID.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

* feat/show-process-list:
 ### Update Process List Retrieval Method

 - **File**: `process_list.rs`
   - Updated the method for retrieving process lists from `local_processes` to `list_all_processes` to support asynchronous operations.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

* feat/show-process-list:
 ### Update error handling in `error.rs`

 - Refined status code handling for `CreateChannel` error by delegating to `source.status_code()`.
 - Separated `ListProcess` and `CreateChannel` error handling for clarity.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

---------

Signed-off-by: Lei, HUANG <lhuang@greptime.com>
2025-06-12 06:55:22 +00:00
Yiran
f4c3950f57 fix: doc links (#6304)
Signed-off-by: Yiran <cuiyiran3@gmail.com>
2025-06-12 03:18:26 +00:00
liyang
88c4409df4 ci: use the new meta backendStorage etcd structure (#6303)
Signed-off-by: liyang <daviderli614@gmail.com>
2025-06-12 03:17:32 +00:00
localhost
c10b8f8474 chore: add failover cache for pipeline table (#6284)
* chore: add second level cache for pipeline table

* chore: change pipeline failover cache name

* chore: add counter metrics for get pipeline operate
2025-06-12 03:15:02 +00:00
shuiyisong
041b683a8d refactor: remove PipelineMap and use Value instead (#6278)
* refactor: remove pipeline_map and use value instead

* chore: remove unused comments

* chore: move error to illegal state
2025-06-11 17:02:32 +00:00
Weny Xu
03bb6e4f28 feat(cli): add metadata get commands (#6299)
* refactor(cli): restructure cli modules and commands

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat(cli): add metadata get commands

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat(cli): enhance table metadata query capabilities

Signed-off-by: WenyXu <wenymedia@gmail.com>

* refactor: minor refactor

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>
2025-06-11 16:33:36 +00:00
discord9
09e5a6580f chore: silence clippy (#6298)
Signed-off-by: discord9 <discord9@163.com>
2025-06-11 14:32:41 +00:00
Lei, HUANG
f9f905ae14 fix: config docs (#6294)
fix/config-docs:
 Update `config.md` to specify default compression mode

 - Added default value `none` for `grpc.flight_compression` in both frontend and datanode sections of `config/config.md`.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>
2025-06-11 07:31:42 +00:00
Lei, HUANG
1d53dd26ae chore: add option for arrow flight compression mode (#6283)
* chore/enable-flight-encoder:
 ### Add Flight Compression Support

 - **Configuration Updates**:
   - Added `grpc.flight_compression` option to `config/config.md`, `config/datanode.example.toml`, and `config/frontend.example.toml` to specify compression modes for Arrow IPC service.

 - **Code Enhancements**:
   - Updated `FlightEncoder` in `src/common/grpc/src/flight.rs` to support compression modes.
   - Modified `RegionServer` and `DatanodeBuilder` in `src/datanode/src/datanode.rs` and `src/datanode/src/region_server.rs` to handle `FlightCompression`.
   - Integrated `FlightCompression` in `src/servers/src/grpc.rs` and `src/servers/src/grpc/flight.rs` to manage compression settings.

 - **Testing and Integration**:
   - Updated test utilities and integration tests in `tests-integration/src/grpc/flight.rs` and `tests-integration/src/test_util.rs` to include `FlightCompression`.

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>

* chore/enable-flight-encoder:
 ### Enable Compression in FlightClient

 - **`client.rs`**: Updated `make_flight_client` to accept `send_compression` and `accept_compression` parameters, enabling Zstd compression for sending and receiving messages.
 - **`client_manager.rs`**: Modified `datanode` method to pass compression settings from `ChannelConfig` to `RegionRequester`.
 - **`database.rs`**: Adjusted calls to `make_flight_client` to include compression parameters.
 - **`region.rs`**: Updated `RegionRequester` to store and utilize compression settings.
 - **`frontend.rs`**: Configured `ChannelConfig` to enable compression based on options.
 - **`channel_manager.rs`**: Added `send_compression` and `accept_compression` fields to `ChannelConfig` with default values and updated tests accordingly.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

* chore/enable-flight-encoder:
 ### Update Compression Defaults and Documentation

 - **Configuration Files**: Updated `datanode.example.toml` and `frontend.example.toml` to include a default setting comment for `flight_compression`, specifying it defaults to `none`.
 - **gRPC Server Code**: Modified `grpc.rs` to set `None` as the default for `FlightCompression` instead of `ArrowIpc`.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

---------

Signed-off-by: Lei, HUANG <mrsatangel@gmail.com>
Signed-off-by: Lei, HUANG <lhuang@greptime.com>
2025-06-11 06:54:10 +00:00
localhost
01796c9cc0 chore: org cli sub command (#6265)
* chore: org cli sub command

* chore: make clippy happy

* chore: fix info command not support absolute path

* chore: fix cli test

* Apply suggestions from code review

Co-authored-by: Weny Xu <wenymedia@gmail.com>

* chore: reorganizing the cli tool

* chore: fix limit issue

* chore: add some doc for cli

* chore: format code

---------

Co-authored-by: Weny Xu <wenymedia@gmail.com>
2025-06-11 03:34:56 +00:00
liyang
9469a8f8f2 ci: add signature information when updating downstream repository (#6282)
Signed-off-by: liyang <daviderli614@gmail.com>
2025-06-10 17:18:29 +00:00
Ruihang Xia
2fabe346a1 fix: null value handling on PromQL's join (#6289)
* fix: null value handling on PromQL's join

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness comment

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-06-10 13:24:09 +00:00
Zhenchi
c26138963e refactor: unify function registry (Part 1) (#6262)
* refactor: unify function registry (Part 1)

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor: simplify via register_scalar

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2025-06-10 10:11:06 +00:00
jeremyhi
12648f388a feat: refactor grpc options of metasrv (#6275)
* feat: refactor grpc options of metasrv

* fix: unit test

* feat: config update
2025-06-10 01:35:57 +00:00
Weny Xu
2979aa048e fix(meta): enhance postgres election client with timeouts and reconnection (#6276)
* fix(meta): enhance postgres election client with timeouts and reconnection

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>
2025-06-09 13:51:13 +00:00
Weny Xu
74222c3070 chore: improve CI debugging and resource configuration (#6274)
* chore: describe pods on CI failure

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: increase memory limit for main pod template from 2Gi to 3Gi

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>
2025-06-09 11:40:32 +00:00
dennis zhuang
0311db3089 fix: export metrics settings in sample config (#6170)
* fix: export metrics settings in sample config

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: forgot update docs

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* fix: test loading example configs

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

---------

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>
2025-06-09 10:53:01 +00:00
zyy17
e434294a0c refactor: support to get trace id with time range (#6236)
* refactor: add time range for getting trace id

* test: add unit test
2025-06-09 08:53:20 +00:00
dennis zhuang
8d2c1b7f6a ci: refactor bump downstream versions worflow and adds demo-scene (#6171)
* ci: refactor bump downstream versions and adds demo-scene

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: rename

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: style

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* fix: forgot DEMO_REPO_TOKEN

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* fix: demo repo name

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

---------

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>
2025-06-09 03:19:00 +00:00
Lei, HUANG
c50e84095e feat: disable compression for do_get API (#6254)
* feat/disable-flight-compression:
 ### Commit Summary

 - **Add Compression Control in Flight Encoder**: Introduced a new method `with_compression_disabled` in `FlightEncoder` to allow encoding without compression in `flight.rs`.
 - **Update Flight Stream Initialization**: Modified `FlightRecordBatchStream` to use the new `FlightEncoder::with_compression_disabled` method for initializing the encoder in `stream.rs`.

* feat/disable-flight-compression:
 Remove Unused Import in `flight.rs`

 - Removed the unused import `write_message` from `flight.rs` to clean up the codebase.

* feat/disable-flight-compression:
 ### Disable Compression in Flight Encoder

 - Updated `tests-integration/src/grpc/flight.rs` to use `FlightEncoder::with_compression_disabled()` instead of `FlightEncoder::default()` for encoding `FlightMessage::Schema` and `FlightMessage::RecordBatch`. This change disables compression in the Flight encoder for these operations.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

* Signed-off-by: Lei, HUANG <lhuang@greptime.com>

* disable flight client compression

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Lei, HUANG <lhuang@greptime.com>
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2025-06-09 03:02:28 +00:00
Weny Xu
d3d233257d feat: add some metasrv metrics to grafana dashboard (#6264)
* feat: add metasrv dashboard panels

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>
2025-06-09 02:41:00 +00:00
zyy17
fdf32a8f46 refactor: respect data_home as root data home directory (#6050)
* refactor: initialize logging dir by using data_home

* chore: remove tail '/' for dir name
2025-06-09 02:31:21 +00:00
Lei, HUANG
69870e2762 fix(mito): use 1day as default time partition duration (#6202)
* fix unit tests

* fix: sqlness

* fix/default-time-window:
 ## Add Helper Functions and Enhance Compaction Tests

 - **Refactor Compaction Logic**: Introduced helper functions `flush` and `compact` in `compaction_test.rs` to streamline compaction operations.
 - **Enhance Compaction Tests**: Added a new test `test_infer_compaction_time_window` in `compaction_test.rs` to verify compaction time window inference.
 - **Testing Improvements**: Added `#[cfg(test)]` attribute to `new_multi_partitions` in `time_partition.rs` to ensure it's only included in test builds.

* fix/default-time-window:
 - **Refactor `TimePartition` Struct**: Removed unnecessary comments regarding `time_range` in `time_partition.rs`.
 - **Enhance `TimePartitions` Functionality**: Added a method `part_duration_or_default` to provide a default partition duration in `time_partition.rs`.
 - **Update SQL Test Cases**: Modified SQL operations and expected results in `scan_big_varchar.result` and `scan_big_varchar.sql` to reflect changes in data manipulation logic.

* fix/default-time-window:
 ### Update Time Partition Default Duration

 - **Refactor Default Duration**: Introduced `INITIAL_TIME_WINDOW` constant to define the default time window duration as `Duration::from_days(1)`. This change replaces multiple instances of the hardcoded default duration across the `time_partition.rs` file.
 - **Files Affected**: `time_partition.rs`

* fix/default-time-window:
 ## Update Partition Duration Handling

 - **`time_partition.rs`**: Refactored `part_duration` to be non-optional, removing `Option` wrapper. Updated logic to use `unwrap_or` with `INITIAL_TIME_WINDOW` where necessary. Adjusted related methods and tests to accommodate this change.
 - **`version.rs` (memtable and region)**: Updated handling of `part_duration` to align with changes in `time_partition.rs`, ensuring consistent use of non-optional `Duration`.

* fix/default-time-window:
 ### Improve Error Context in `time_partition.rs`

 - Enhanced error context message in `time_partition.rs` to provide clearer information on partition time range issues, including bucket size details.

Signed-off-by: Lei, HUANG <lhuang@greptime.com>

---------

Signed-off-by: Lei, HUANG <lhuang@greptime.com>
2025-06-08 16:20:26 +00:00
Logic
f9f4ac1dca feat: Support export cli export to OSS (#6225)
* feat(object_store): add support for Alibaba Cloud OSS

- Implement OSS backend in object_store module
- Add OSS-related options to ExportCommand
- Update build_operator to support OSS
- Modify parse_url to handle OSS schema

Signed-off-by: Logic <zqr10159@dromara.org>

* feat(object_store): add support for Alibaba Cloud OSS

- Implement OSS backend in object_store module
- Add OSS-related options to ExportCommand
- Update build_operator to support OSS
- Modify parse_url to handle OSS schema

Signed-off-by: Logic <zqr10159@dromara.org>

* test(object_store): update OSS backend tests with comprehensive scenarios

- Remove minimal case test for OSS backend
- Update test for OSS backend with all fields valid- Remove invalid allow_anonymous test case

Signed-off-by: Logic <zqr10159@dromara.org>

* feat(datasource): add support for OSS (Object Storage Service)

- Implement is_supported_in_oss function to check if a key is supported in OSS configuration- Add build_oss_backend function for creating an OSS backend
- Update requests module to include OSS support check

Signed-off-by: Logic <zqr10159@dromara.org>

* refactor(export): enhance security and logging for sensitive data

- Replace plain strings with SecretString for sensitive information- Implement masking of sensitive data in SQL logs
- Update handling of S3 and OSS credentials

Signed-off-by: Logic <zqr10159@dromara.org>

* refactor(export): generalize remote storage support and rename options

- Rename `s3_ddl_local_dir` to `ddl_local_dir` for better clarity
- Update comments to support both S3 and OSS remote storage options
- Modify logic to handle remote storage options more generically

Signed-off-by: Logic <zqr10159@dromara.org>

* refactor(export): generalize remote storage support and rename options

- Rename `s3_ddl_local_dir` to `ddl_local_dir` for better clarity
- Update comments to support both S3 and OSS remote storage options
- Modify logic to handle remote storage options more generically

Signed-off-by: Logic <zqr10159@dromara.org>

---------

Signed-off-by: Logic <zqr10159@dromara.org>
2025-06-07 15:39:33 +00:00
zyy17
99e56af98c feat: add the gauge to indicate the CPU and Memory limit in the cgroups envrionment (#6238)
* wip

* feat: add cpu and memory limit gauge

* chore: add some test cases

* docs: polish some docs

* refactor: remove '#[cfg(target_os = linux)]'

* refactor: add cfg(target_os) in get_cpu_limit() and get_memory_limit()
2025-06-07 03:09:22 +00:00
shuiyisong
538b5abaae chore: support table suffix in hint (#6223)
* feat: pipeline recognize hints from exec

* chore: rename and add test

* chore: minor improve

* chore: rename and add comments

* fix: typos

* feat: add initial impl for vrl processor

* chore: update processors to allow vrl process

* feat: pipeline recognize hints from exec

* chore: rename and add test

* chore: minor improve

* chore: rename and add comments

* fix: typos

* chore: remove unnecessory clone fn

* chore: group metrics

* chore: use struct in transform output enum

* test: add test for vrl

* fix: leaked conflicts

* chore: merge branch code & add check in compile

* fix: check condition

* fix: check auto-transform timeindex

* chore: support table_suffix in hint

* chore: add test for table suffix in vrl hint

* refactor: change context_opt to a struct
2025-06-06 20:39:10 +00:00
shuiyisong
a2b3ad77df feat(pipeline): vrl processor (#6205)
* feat: pipeline recognize hints from exec

* chore: rename and add test

* chore: minor improve

* chore: rename and add comments

* fix: typos

* feat: add initial impl for vrl processor

* chore: update processors to allow vrl process

* feat: pipeline recognize hints from exec

* chore: rename and add test

* chore: minor improve

* chore: rename and add comments

* fix: typos

* chore: remove unnecessory clone fn

* chore: group metrics

* chore: use struct in transform output enum

* test: add test for vrl

* fix: leaked conflicts

* chore: merge branch code & add check in compile

* fix: check condition
2025-06-06 16:35:19 +00:00
liyang
0eb9e97f79 ci: increase upload s3 retry times (#6263)
ci: increase upload s3 retry

Signed-off-by: liyang <daviderli614@gmail.com>
2025-06-06 12:42:03 +00:00
Weny Xu
06b1627da5 fix: skip wal replay when opening follower regions (#6234)
* chore: introduce `follower_regions` field for `DatanodeTableValue`

* fix: skip wal replay when opening follower regions

* chore: add enterprise feature gate

* fix: fix unit tests

* feat: improve RegionRoleSet backward compatibility
2025-06-06 07:29:35 +00:00
Weny Xu
0d4f27a699 fix: convert JSON type to JSON string in COPY TABLE TO statment (#6255)
* fix: convert JSON type to JSON string in COPY TABLE TO statement

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2025-06-06 02:23:57 +00:00
Ruihang Xia
c4da8bb69d feat: don't allow creating logical table with partitions (#6249)
* feat: don't allow creating logical table with partitions

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-06-05 12:38:47 +00:00
discord9
0bd8856e2f chore: pub flow info (#6253)
* chore: make all flow info's field public

* chore: expose flow_route

* chore: more pub
2025-06-05 12:34:11 +00:00
Lei, HUANG
92c5a9f5f4 chore: allow numberic values in alter statements (#6252)
chore/allow-numberic-values-in-alter:
 ### Commit Message

 Enhance `alter_parser.rs` to Support Numeric Values

 - Updated `parse_string_options` function in `alter_parser.rs` to handle numeric literals in addition to string literals and `NULL` for alter table statements.
 - Added a new test `test_parse_alter_with_numeric_value` in `alter_parser.rs` to verify the parsing of numeric values in alter table options.
2025-06-05 02:16:53 +00:00
Weny Xu
80c5af0ecf fix: ignore incomplete WAL entries during read (#6251)
* fix: ignore incomplete entry

* fix: fix unit tests
2025-06-04 11:16:42 +00:00
LFC
7afb77fd35 fix: add "query" options to standalone (#6248) 2025-06-04 08:47:31 +00:00
discord9
0b9af77fe9 chore: test sleep longer (#6247)
* chore: test sleep longer

* win timer resolution is 15.6ms, need longer
2025-06-04 08:18:44 +00:00
discord9
cbafb6e00b feat(flow): flow streaming mode in list expr support (#6229)
* feat: flow streaming in list support

* chore: per review

* chore: per review

* fix: expr correct type
2025-06-04 08:05:20 +00:00
LFC
744a754246 fix: add missing features (#6245) 2025-06-04 07:13:39 +00:00
fys
9cd4a2c525 feat: add trigger ddl manager (#6228)
* feat: add trigger ddl manager

* chore: reduce the number of cfg feature code blocks

* upgrade greptime-proto

* chore: upgrade greptime-proto
2025-06-04 06:38:02 +00:00
liyang
180920327b ci: add option to choose whether upload artifacts to S3 in the development build (#6232)
ci: add option to choose whether to upload artifacts to S3 in the development build
2025-06-04 03:49:53 +00:00
Yingwen
ee4f830be6 fix: do not accommodate fields for multi-value protocol (#6237) 2025-06-04 01:10:52 +00:00
shuiyisong
69975f1f71 feat: pipeline with insert options (#6192)
* feat: pipeline recognize hints from exec

* chore: rename and add test

* chore: minor improve

* chore: rename and add comments

* fix: typos

* chore: remove unnecessory clone fn

* chore: group metrics

* chore: use struct in transform output enum

* chore: update hint prefix
2025-06-03 18:46:48 +00:00
discord9
38cac301f2 refactor(flow): limit the size of query (#6216)
* refactor: not wait for slow query

* chore: clippy

* chore: fmt

* WIP: time range lock

* WIP

* refactor: rm over-complicated query pool

* chore: add more metrics& rm sql from slow query metrics
2025-06-03 12:27:07 +00:00
Yuhan Wang
083c22b90a refactor: extract some common functions and structs in election module (#6172)
* refactor: extract some common functions and structs in election module

* chore: add comments and modify a function name

* chore: add comments and modify a function name

* fix: missing 2 lines in license header

* fix: acqrel

* chore: apply comment suggestions

* Update src/meta-srv/src/election.rs

Co-authored-by: jeremyhi <jiachun_feng@proton.me>

---------

Co-authored-by: jeremyhi <jiachun_feng@proton.me>
2025-06-03 11:31:30 +00:00
Lei, HUANG
fdd164c0fa fix(mito): revert initial builder capacity for TimeSeriesMemtable (#6231)
* fix/initial-builder-cap:
 ### Enhance Series Initialization and Capacity Management

 - **`simple_bulk_memtable.rs`**: Updated the `Series` initialization to use `with_capacity` with a specified capacity of 8192, improving memory management.
 - **`time_series.rs`**: Introduced `with_capacity` method in `Series` to allow custom initial capacity for `ValueBuilder`. Adjusted `INITIAL_BUILDER_CAPACITY` to 16 for more efficient memory usage. Added a new `new` method to maintain backward compatibility.

* fix/initial-builder-cap:
 ### Adjust Memory Allocation in Memtable

 - **`simple_bulk_memtable.rs`**: Reduced the initial capacity of `Series` from 8192 to 1024 to optimize memory usage.
 - **`time_series.rs`**: Decreased `INITIAL_BUILDER_CAPACITY` from 16 to 4 to improve efficiency in vector building.
2025-06-03 08:25:02 +00:00
Zhenchi
078afb2bd6 feat: bloom filter index applier support or eq chain (#6227)
* feat: bloom filter index applier support or eq chain

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2025-06-03 08:08:19 +00:00
localhost
477e4cc344 chore: add pg mysql be default feature in cli (#6230) 2025-06-03 07:09:26 +00:00
Lei, HUANG
078d83cec2 chore: add some metrics to grafana dashboard (#6169)
* add compaction elapsed time avg and bulk request convert elapsed time to grafana dashboard

* fix: standalone dashboard conversion

* chore: newline

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2025-06-03 03:33:11 +00:00
liyang
7705d84d83 docs: fix bad link (#6222)
* docs: fix bad link

* Update how-to-profile-memory.md
2025-06-03 03:19:10 +00:00
dennis zhuang
0d81400bb4 feat: supports select @@session.time_zone (#6212) 2025-06-03 02:32:19 +00:00
Weny Xu
1d7ae66e75 fix: remove stale region failover detectors (#6221)
* fix: remove stale region failover detectors

* fix: fix unit tests
2025-05-30 10:27:06 +00:00
shuiyisong
af6cf999c1 chore: shared pipeline under same catalog with compatibility (#6143)
* chore: support shared pipeline under catalog with compatibility

* test: add test for cross schema ref

* chore: use empty string schema by default

* chore: remove unwrap in the patch

* fix: df check
2025-05-30 07:19:32 +00:00
jeremyhi
54869a1329 chore: clear metadata filed after updating metadata (#6215)
chore: clear metadata filed after updatng metadata
2025-05-30 07:06:39 +00:00
jeremyhi
3104d49434 chore: example of http config in metasrv (#6218)
* chore: example of http config in metasrv

* docs: make config-docs
2025-05-30 03:27:54 +00:00
fys
b4d00fb499 feat: support SQL parsing for trigger show (#6217)
* feat: support SQL parsing for trigger show

* add excludes in licenserc

* refine comment

* fix: typo

* fix: add show/trigger.rs to excludes in licenserc
2025-05-29 12:00:36 +00:00
Ning Sun
4ae6df607b feat: update pgwire to 0.30 (#6209) 2025-05-29 11:47:00 +00:00
Lei, HUANG
183e1dc031 feat(http): lossy string validation in prom remote write (#6213)
* feat/lossy-string-validation-in-prom-remote-write:
 ### Commit Message

 #### Refactor Prometheus Validation Mode

 - **Replace `is_strict_mode` with `PromValidationMode` Enum:**
   - Updated `HttpOptions` and related structures to use `PromValidationMode` enum instead of the boolean `is_strict_mode`.
   - Modified functions and tests to accommodate the new enum, ensuring flexible validation modes (`Strict`, `Lossy`, `Unchecked`).
   - Affected files: `server.rs`, `prom_decode.rs`, `http.rs`, `prom_store.rs`, `prom_row_builder.rs`, `proto.rs`, `prom_store_test.rs`, `test_util.rs`, `http.rs`.

 - **Enhance UTF-8 String Decoding:**
   - Introduced `decode_string` function to handle UTF-8 string decoding based on the selected `PromValidationMode`.
   - Affected files: `proto.rs`, `prom_row_builder.rs`.

 This refactor improves the flexibility and clarity of Prometheus request handling by allowing different validation strategies.

* feat/lossy-string-validation-in-prom-remote-write:
 - **Add Prometheus Validation Mode Configuration:**
   - Updated `config/config.md`, `config/frontend.example.toml`, and `config/standalone.example.toml` to include `http.prom_validation_mode` setting for Prometheus remote write requests.

 - **Enhance Benchmarking for Prometheus Requests:**
   - Modified `src/servers/benches/prom_decode.rs` to benchmark different Prometheus validation modes (`Strict`, `Lossy`, `Unchecked`).

 - **Implement and Test String Decoding:**
   - Added `decode_string` function and comprehensive tests in `src/servers/src/proto.rs` to handle string decoding with different validation modes.

* feat/lossy-string-validation-in-prom-remote-write:
 ### Add Histogram Buckets to Metrics

 - **Files Modified**: `src/servers/src/metrics.rs`
 - **Key Changes**:
   - Added specific histogram buckets to `METRIC_MYSQL_QUERY_TIMER`, `METRIC_POSTGRES_QUERY_TIMER`, and `METRIC_SERVER_GRPC_PROM_REQUEST_TIMER` to enhance granularity in query elapsed time metrics.

* feat/lossy-string-validation-in-prom-remote-write:
 ### Update Prometheus Validation Mode Default

 - **Config Documentation**: Updated the default description for `http.prom_validation_mode` to indicate that "strict" is the default option in `config.md`, `frontend.example.toml`, and `standalone.example.toml`.
 - **HTTP Server Implementation**: Changed the default `prom_validation_mode` to `PromValidationMode::Strict` in `src/servers/src/http.rs`.

* feat/lossy-string-validation-in-prom-remote-write:
 **Commit Message:**

 Update Prometheus Validation Mode to Strict

 - Changed `http.prom_validation_mode` from `unchecked` to `strict` in `config.md`, `frontend.example.toml`, and
 `standalone.example.toml` to enforce strict validation of Prometheus remote write requests.
2025-05-29 11:08:57 +00:00
localhost
886c2dba76 chore: fix rds kv backend test (#6214)
* chore: fix rds kv backend test

* Revert "chore: fix rds kv backend test"

This reverts commit 9b5b6bacc0.

* chore: introduce helper macro

---------

Co-authored-by: WenyXu <wenymedia@gmail.com>
2025-05-29 09:12:31 +00:00
Lei, HUANG
4e615e8906 feat(wal): support bulk wal entries (#6178)
* feat/bulk-wal:
 ### Refactor: Simplify Data Handling in LogStore Implementations

 - **`kafka/log_store.rs`, `raft_engine/log_store.rs`, `wal.rs`, `raw_entry_reader.rs`, `logstore.rs`:**
   - Refactored `entry` and `build_entry` functions to accept `Vec<u8>` directly instead of `&mut Vec<u8>`.
   - Removed usage of `std::mem::take` for data handling, simplifying the code and improving readability.
   - Updated test cases to align with the new function signatures.

* feat/bulk-wal:
 ### Add Support for Bulk WAL Entries and Flight Data Encoding

 - **Add `raw_data` field to `BulkPart` and related structs**: Updated `BulkPart` and related structures in `src/mito2/src/memtable/bulk/part.rs`, `src/mito2/src/memtable/simple_bulk_memtable.rs`, `src/mito2/src/memtable/time_partition.rs`, `src/mito2/src/region_write_ctx.rs`,
 `src/mito2/src/worker/handle_bulk_insert.rs`, and `src/store-api/src/region_request.rs` to include a new `raw_data` field for handling Arrow IPC data.
 - **Implement Flight Data Encoding**: Added a new module `flight` in `src/common/test-util/src/flight.rs` to encode record batches to Flight data format.
 - **Update `greptime-proto` dependency**: Changed the revision of the `greptime-proto` dependency in `Cargo.lock` and `Cargo.toml`.
 - **Enhance WAL Writer and Tests**: Modified `src/mito2/src/wal.rs` and related test files to support bulk WAL entries and added tests for encoding and handling bulk data.

* feat/bulk-wal:
 - **Update `greptime-proto` Dependency**: Updated the `greptime-proto` dependency to a new revision in `Cargo.lock` and `Cargo.toml`.
 - **Add `common-grpc` Dependency**: Added `common-grpc` as a dependency in `Cargo.lock` and `src/mito2/Cargo.toml`.
 - **Refactor `BulkPart` Structure**: Removed `num_rows` field and added `num_rows()` method in `src/mito2/src/memtable/bulk/part.rs`. Updated related usages in `src/mito2/src/memtable/simple_bulk_memtable.rs`, `src/mito2/src/memtable/time_partition.rs`, `src/mito2/src/memtable/time_series.rs`,
 `src/mito2/src/region_write_ctx.rs`, and `src/mito2/src/worker/handle_bulk_insert.rs`.
 - **Implement `TryFrom` and `From` for `BulkWalEntry`**: Added implementations for converting between `BulkPart` and `BulkWalEntry` in `src/mito2/src/memtable/bulk/part.rs`.
 - **Handle Bulk Entries in Region Opener**: Added logic to process bulk entries in `src/mito2/src/region/opener.rs`.
 - **Fix `BulkInsertRequest` Handling**: Corrected `region_id` handling in `src/operator/src/bulk_insert.rs` and `src/store-api/src/region_request.rs`.
 - **Add Error Variant for `ConvertBulkWalEntry`**: Added a new error variant in `src/mito2/src/error.rs` for handling bulk WAL entry conversion errors.

* fix: ci

* feat/bulk-wal:
 Add bulk write operation in `opener.rs`

 - Enhanced the region write context by adding a call to `write_bulk()` after `write_memtable()` in `opener.rs`.
 - This change aims to improve the efficiency of writing operations by enabling bulk writes.

* feat/bulk-wal:
 Enhance error handling and metrics in `bulk_insert.rs`

 - Updated `Inserter` to improve error handling by capturing the result of `datanode.handle(request)` and incrementing the `DIST_INGEST_ROW_COUNT` metric with the number of affected rows.

* feat/bulk-wal:
 ### Remove Encode Error Handling for WAL Entries

 - **`error.rs`**: Removed the `EncodeWal` error variant and its associated handling.
 - **`wal.rs`**: Eliminated the `entry_encode_buf` buffer and its usage for encoding WAL entries. Replaced with direct encoding to a vector using `encode_to_vec()`.
2025-05-29 09:10:30 +00:00
dennis zhuang
9afc61f778 feat: supports @@session.time_zone for mysql (#6210)
* feat: supports @@session.time_zone for mysql

* test: assert timezone
2025-05-29 05:26:49 +00:00
fys
d22084e90c feat: support parsing trigger create sql (#6197)
* feat: support parsing trigger create sql

* chore: add context for TryFromInt error

* chore: refine error msg about int convert

* avoid clone of token
2025-05-29 04:14:27 +00:00
Weny Xu
5e9b5d981f chore: fix feature gates for pg and mysql kvbackend (#6211) 2025-05-29 03:58:06 +00:00
Weny Xu
b01fce95a0 fix: remove poison key before retrying procedure on retryable errors (#6189)
* fix(meta): remove poison key before retrying procedure on retriable errors

* refactor: enhance error handling in DDL procedures
2025-05-29 01:17:38 +00:00
Ning Sun
9fbcf9b7e7 chore: switch nix index to 25.05 release (#6181)
chore: switch to nix index to 25.05 release
2025-05-29 01:13:35 +00:00
localhost
dc3591655e chore: add metrics for rds kv backend (#6201)
* chore: add metrics for rds kv backend

* chore: make clippy happy

* chore: remove useless rds timer

* chore: remove  in record_rds_sql_execute_elapsed macro

* chore: change some str literal to constant

* chore: fix import issue

* chore: remove impl Display for RangeTemplateType
2025-05-28 13:11:11 +00:00
jeremyhi
aca7ad82b1 chore: correct some CAS ordering args (#6200)
chore: corect some CAS ordering args
2025-05-28 09:21:46 +00:00
yihong
10fa6d8736 docs: nit from github -> GitHub (#6199)
doc: nit from github -> GitHub

Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-05-28 07:20:02 +00:00
localhost
92422dafca feat: add CLI tool to export metadata (#6150)
* chore: add tool to export db meta

* chore: add meta restore command

* chore: fmt code

* chore: remove useless error

* chore: support key prefix

* chore: add clean check for meta restore

* chore: add more log for meta restore

* chore: resolve s3 and local file root in command meta-snapshot

* chore: remove the pg mysql features from the build script as they are already in the default feature

* chore: fix by pr comment
2025-05-28 03:18:00 +00:00
discord9
53752e4f6c fix: alter table update table column default (#6155)
* fix: alter table update table column default

* fix: fuzz test also cast default value

* chore: more testcase

* test: non-zero value

* refactor: per review

* tests: unexpected alter result(WIP on fix)

* ub

* ub more

* test: update sqlness
2025-05-27 09:42:27 +00:00
Weny Xu
40bfa98d4b fix(promql): handle field column projection with correct qualifier (#6183)
* fix(promql): handle field column projection with correct qualifier

* test: add sqlness tests
2025-05-27 03:26:23 +00:00
dennis zhuang
49986b03d6 chore: change info to debug for scanning physical table (#6180) 2025-05-26 18:23:31 +00:00
Lei, HUANG
493440a802 refactor: replace FlightMessage with arrow RecordBatch and Schema (#6175)
* refactor/flight-codec:
 ### Refactor and Enhance Schema and RecordBatch Handling

 - **Add `datatypes` Dependency**: Updated `Cargo.lock` and `Cargo.toml` to include the `datatypes` dependency.
 - **Schema Conversion and Error Handling**:
   - Updated `src/client/src/database.rs` and `src/client/src/region.rs` to handle schema conversion using `Arc` and added error handling for schema conversion.
   - Enhanced error handling in `src/client/src/error.rs` and `src/common/grpc/src/error.rs` by adding `ConvertSchema` error and removing unused errors.
 - **FlightMessage and RecordBatch Refactoring**:
   - Refactored `FlightMessage` enum in `src/common/grpc/src/flight.rs` to use `RecordBatch` instead of `Recordbatch`.
   - Updated related functions and tests in `src/common/grpc/benches/bench_flight_decoder.rs`, `src/operator/src/bulk_insert.rs`, `src/servers/src/grpc/flight/stream.rs`, and `tests-integration/src/grpc/flight.rs` to align with the new `FlightMessage` structure.

* refactor/flight-codec:
 Remove `ConvertArrowSchema` Error Variant

 - Removed the `ConvertArrowSchema` error variant from `error.rs`.
 - Updated the `ErrorExt` implementation to exclude `ConvertArrowSchema`.
 - Affected file: `src/common/query/src/error.rs`.

* fix: cr
2025-05-26 10:06:50 +00:00
localhost
77e2fee755 fix: add simple test for rds kv backend (#6167)
* chore: add simple test for rds kv backend

* chore: add test for etcd and mem

* chore: remove etcd simple range test

* chore: add more test case
2025-05-26 06:32:36 +00:00
dennis zhuang
b85429c0f1 fix: set column index can't work in physical table (#6179) 2025-05-26 04:44:05 +00:00
Lei, HUANG
3d942f6763 fix: bulk insert case sensitive (#6165)
* fix/bulk-insert-case-sensitive:
 Add error inspection for gRPC bulk insert in `greptime_handler.rs`

 - Enhanced error handling by adding `inspect_err` to log errors during the `put_record_batch` operation in `greptime_handler.rs`.

* fix: silient error while bulk ingest with uppercase columns
2025-05-24 07:02:42 +00:00
discord9
3901863432 chore: metasrv starting not blocking (#6158)
* chore: metasrv starting not blocking

* chore: fmt

* chore: expose actual bind_addr
2025-05-23 09:53:42 +00:00
Lei, HUANG
27e339f628 perf: optimize bulk encode decode (#6161)
* main:
 **Enhancements to Flight Data Handling and Error Management**

 - **Flight Data Handling:**
   - Added `bytes` dependency in `Cargo.lock` and `Cargo.toml`.
   - Introduced `try_from_schema_bytes` and `try_decode_record_batch` methods in `FlightDecoder` to handle schema and record batch decoding more efficiently in `src/common/grpc/src/flight.rs`.
   - Updated `Inserter` in `src/operator/src/bulk_insert.rs` to utilize schema bytes directly, improving bulk insert operations.

 - **Error Management:**
   - Added `ArrowError` handling in `src/common/grpc/src/error.rs` to manage errors related to Arrow operations.

 - **Region Request Processing:**
   - Modified `make_region_bulk_inserts` in `src/store-api/src/region_request.rs` to use the new `FlightDecoder` methods for decoding Arrow IPC data.

* - **Flight Data Handling:**
 - Added `bytes` dependency in `Cargo.lock` and `Cargo.toml`.
 - Introduced `try_from_schema_bytes` and `try_decode_record_batch` methods in `FlightDecoder` to handle schema and record batch decoding more efficiently in `src/common/grpc/src/flight.rs`.
 - Updated `Inserter` in `src/operator/src/bulk_insert.rs` to utilize schema bytes directly, improving bulk insert operations.
- **Error Management:**
 - Added `ArrowError` handling in `src/common/grpc/src/error.rs` to manage errors related to Arrow operations.
- **Region Request Processing:**
 - Modified `make_region_bulk_inserts` in `src/store-api/src/region_request.rs` to use the new `FlightDecoder` methods for decoding Arrow IPC data.

* perf/optimize-bulk-encode-decode:
 Update `greptime-proto` dependency and refactor error handling

 - **Dependency Update**: Updated the `greptime-proto` dependency to a new revision in `Cargo.lock` and `Cargo.toml`.
 - **Error Handling Refactor**: Removed the `Prost` error variant from `MetadataError` in `src/store-api/src/metadata.rs`.
 - **Error Handling Improvement**: Replaced `unwrap` with `context(FlightCodecSnafu)` for error handling in `make_region_bulk_inserts` function in `src/store-api/src/region_request.rs`.

* fix: clippy

* fix: toml

* perf/optimize-bulk-encode-decode:
 ### Update `Cargo.toml` Dependencies

 - Updated the `bytes` dependency to use the workspace version in `Cargo.toml`.

* perf/optimize-bulk-encode-decode:
 **Fix payload assignment in `bulk_insert.rs`**

 - Corrected the assignment of the `payload` field in the `ArrowIpc` struct within the `Inserter` implementation in `bulk_insert.rs`.

* use main branch proto
2025-05-23 07:22:10 +00:00
discord9
cf2712e6f4 chore: invalid table flow mapping cache (#6135)
* chore: invalid table flow mapping

* chore: exists

* fix: invalid all related keys in kv cache when drop flow&refactor: per review

* fix: flow not found status code

* chore: rm unused error code

* chore: stuff

* chore: unused
2025-05-23 03:40:10 +00:00
Lei, HUANG
4b71e493f7 feat!: revise compaction picker (#6121)
* - **Refactor `RegionFilePathFactory` to `RegionFilePathProvider`:** Updated references and implementations in `access_layer.rs`, `write_cache.rs`, and related test files to use the new struct name.
 - **Add `max_file_size` support in compaction:** Introduced `max_file_size` option in `PickerOutput`, `SerializedPickerOutput`, and `WriteOptions` in `compactor.rs`, `picker.rs`, `twcs.rs`, and `window.rs`.
 - **Enhance Parquet writing logic:** Modified `parquet.rs` and `parquet/writer.rs` to support optional `max_file_size` and added a test case `test_write_multiple_files` to verify writing multiple files based on size constraints.

 **Refactor Parquet Writer Initialization and File Handling**
 - Updated `ParquetWriter` in `writer.rs` to handle `current_indexer` as an `Option`, allowing for more flexible initialization and management.
 - Introduced `finish_current_file` method to encapsulate logic for completing and transitioning between SST files, improving code clarity and maintainability.
 - Enhanced error handling and logging with `debug` statements for better traceability during file operations.

 - **Removed Output Size Enforcement in `twcs.rs`:**
   - Deleted the `enforce_max_output_size` function and related logic to simplify compaction input handling.

 - **Added Max File Size Option in `parquet.rs`:**
   - Introduced `max_file_size` in `WriteOptions` to control the maximum size of output files.

 - **Refactored Indexer Management in `parquet/writer.rs`:**
   - Changed `current_indexer` from an `Option` to a direct `Indexer` type.
   - Implemented `roll_to_next_file` to handle file transitions when exceeding `max_file_size`.
   - Simplified indexer initialization and management logic.

 - **Refactored SST File Handling**:
   - Introduced `FilePathProvider` trait and its implementations (`WriteCachePathProvider`, `RegionFilePathFactory`) to manage SST and index file paths.
   - Updated `AccessLayer`, `WriteCache`, and `ParquetWriter` to use `FilePathProvider` for path management.
   - Modified `SstWriteRequest` and `SstUploadRequest` to use path providers instead of direct paths.
   - Files affected: `access_layer.rs`, `write_cache.rs`, `parquet.rs`, `writer.rs`.

 - **Enhanced Indexer Management**:
   - Replaced `IndexerBuilder` with `IndexerBuilderImpl` and made it async to support dynamic indexer creation.
   - Updated `ParquetWriter` to handle multiple indexers and file IDs.
   - Files affected: `index.rs`, `parquet.rs`, `writer.rs`.

 - **Removed Redundant File ID Handling**:
   - Removed `file_id` from `SstWriteRequest` and `CompactionOutput`.
   - Updated related logic to dynamically generate file IDs where necessary.
   - Files affected: `compaction.rs`, `flush.rs`, `picker.rs`, `twcs.rs`, `window.rs`.

 - **Test Adjustments**:
   - Updated tests to align with new path and indexer management.
   - Introduced `FixedPathProvider` and `NoopIndexBuilder` for testing purposes.
   - Files affected: `sst_util.rs`, `version_util.rs`, `parquet.rs`.

* chore: rebase main

* feat/multiple-compaction-output:
 ### Add Benchmarking and Refactor Compaction Logic

 - **Benchmarking**: Added a new benchmark `run_bench` in `Cargo.toml` and implemented benchmarks in `benches/run_bench.rs` using Criterion for `find_sorted_runs` and `reduce_runs` functions.
 - **Compaction Module Enhancements**:
   - Made `run.rs` public and refactored the `Ranged` and `Item` traits to be public.
   - Simplified the logic in `find_sorted_runs` and `reduce_runs` by removing `MergeItems` and related functions.
   - Introduced `find_overlapping_items` for identifying overlapping items.
 - **Code Cleanup**: Removed redundant code and tests related to `MergeItems` in `run.rs`.

* feat/multiple-compaction-output:
 ### Enhance Compaction Logic and Add Benchmarks

 - **Compaction Logic Improvements**:
   - Updated `reduce_runs` function in `src/mito2/src/compaction/run.rs` to remove the target parameter and improve the logic for selecting files to merge based on minimum penalty.
   - Enhanced `find_overlapping_items` to handle unsorted inputs and improve overlap detection efficiency.

 - **Benchmark Enhancements**:
   - Added `bench_find_overlapping_items` in `src/mito2/benches/run_bench.rs` to benchmark the new `find_overlapping_items` function.
   - Extended existing benchmarks to include larger data sizes.

 - **Testing Enhancements**:
   - Updated tests in `src/mito2/src/compaction/run.rs` to reflect changes in `reduce_runs` and added new tests for `find_overlapping_items`.

 - **Logging and Debugging**:
   - Improved logging in `src/mito2/src/compaction/twcs.rs` to provide more detailed information about compaction decisions.

* feat/multiple-compaction-output:
 ### Refactor and Enhance Compaction Logic

 - **Refactor `find_overlapping_items` Function**: Changed the function signature to accept slices instead of mutable vectors in `run.rs`.
 - **Rename and Update Struct Fields**: Renamed `penalty` to `size` in `SortedRun` struct and updated related logic in `run.rs`.
 - **Enhance `reduce_runs` Function**: Improved logic to sort runs by size and limit probe runs to 100 in `run.rs`.
 - **Add `merge_seq_files` Function**: Introduced a new function `merge_seq_files` in `run.rs` for merging sequential files.
 - **Modify `TwcsPicker` Logic**: Updated the compaction logic to use `merge_seq_files` when only one run is found in `twcs.rs`.
 - **Remove `enforce_file_num` Function**: Deleted the `enforce_file_num` function and its related test cases in `twcs.rs`.

* feat/multiple-compaction-output:
 ### Enhance Compaction Logic and Testing

 - **Add `merge_seq_files` Functionality**: Implemented the `merge_seq_files` function in `run.rs` to optimize file merging based on scoring systems. Updated
 benchmarks in `run_bench.rs` to include `bench_merge_seq_files`.
 - **Improve Compaction Strategy in `twcs.rs`**: Modified the compaction logic to handle file merging more effectively, considering file size and overlap.
 - **Update Tests**: Enhanced test coverage in `compaction_test.rs` and `append_mode_test.rs` to validate new compaction logic and file merging strategies.
 - **Remove Unused Function**: Deleted `new_file_handles` from `test_util.rs` as it was no longer needed.

* feat/multiple-compaction-output:
 ### Refactor TWCS Compaction Options

 - **Refactor Compaction Logic**: Simplified the TWCS compaction logic by replacing multiple parameters (`max_active_window_runs`, `max_active_window_files`, `max_inactive_window_runs`, `max_inactive_window_files`) with a single `trigger_file_num` parameter in `picker.rs`, `twcs.rs`, and `options.rs`.
 - **Update Tests**: Adjusted test cases to reflect the new compaction logic in `append_mode_test.rs`, `compaction_test.rs`, `filter_deleted_test.rs`, `merge_mode_test.rs`, and various test files under `tests/cases`.
 - **Modify Engine Options**: Updated engine option keys to use `trigger_file_num` in `mito_engine_options.rs` and `region_request.rs`.
 - **Fuzz Testing**: Updated fuzz test generators and translators to accommodate the new compaction parameter in `alter_expr.rs` and related files.

 This refactor aims to streamline the compaction configuration by reducing the number of parameters and simplifying the codebase.

* chore: add trailing space

* fix license header

* feat/revise-compaction-picker:
 **Limit File Processing and Optimize Merge Logic in `run.rs`**

 - Introduced a limit to process a maximum of 100 files in `merge_seq_files` to control time complexity.
 - Adjusted logic to calculate `target_size` and iterate over files using the limited set of files.
 - Updated scoring calculations to use the limited file set, ensuring efficient file merging.

* feat/revise-compaction-picker:
 ### Add Compaction Metrics and Remove Debug Logging

 - **Compaction Metrics**: Introduced new histograms `COMPACTION_INPUT_BYTES` and `COMPACTION_OUTPUT_BYTES` to track compaction input and output file sizes in `metrics.rs`. Updated `compactor.rs` to observe these metrics during the compaction process.
 - **Logging Cleanup**: Removed debug logging of file ranges during the merge process in `twcs.rs`.

* feat/revise-compaction-picker:
 ## Enhance Compaction Logic and Metrics

 - **Compaction Logic Improvements**:
   - Added methods `input_file_size` and `output_file_size` to `MergeOutput` in `compactor.rs` to streamline file size calculations.
   - Updated `Compactor` implementation to use these methods for metrics tracking.
   - Modified `Ranged` trait logic in `run.rs` to improve range comparison.
   - Enhanced test cases in `run.rs` to reflect changes in compaction logic.

 - **Metrics Enhancements**:
   - Changed `COMPACTION_INPUT_BYTES` and `COMPACTION_OUTPUT_BYTES` from histograms to counters in `metrics.rs` for better performance tracking.

 - **Debugging and Logging**:
   - Added detailed logging for compaction pick results in `twcs.rs`.
   - Implemented custom `Debug` trait for `FileMeta` in `file.rs` to improve debugging output.

 - **Testing Enhancements**:
   - Added new test `test_compaction_overlapping_files` in `compaction_test.rs` to verify compaction behavior with overlapping files.
   - Updated `merge_mode_test.rs` to reflect changes in file handling during scans.

* feat/revise-compaction-picker:
 ### Update `FileHandle` Debug Implementation

 - **Refactor Debug Output**: Simplified the `fmt::Debug` implementation for `FileHandle` in `src/mito2/src/sst/file.rs` by consolidating multiple fields into a single `meta` field using `meta_ref()`.
 - **Atomic Operations**: Updated the `deleted` field to use atomic loading with `Ordering::Relaxed`.

* Trigger CI

* feat/revise-compaction-picker:
 **Update compaction logic and default options**

 - **`twcs.rs`**: Enhanced logging for compaction pick results by improving the formatting for better readability.
 - **`options.rs`**: Modified the default `max_output_file_size` in `TwcsOptions` from 2GB to 512MB to optimize file handling and performance.

* feat/revise-compaction-picker:
 Refactor `find_overlapping_items` to use an external result vector

 - Updated `find_overlapping_items` in `src/mito2/src/compaction/run.rs` to accept a mutable result vector instead of returning a new vector, improving memory efficiency.
 - Modified benchmarks in `src/mito2/benches/bench_compaction_picker.rs` to accommodate the new function signature.
 - Adjusted tests in `src/mito2/src/compaction/run.rs` to use the updated function signature, ensuring correct functionality with the new approach.

* feat/revise-compaction-picker:
 Improve file merging logic in `run.rs`

 - Refactor the loop logic in `merge_seq_files` to simplify the iteration over file groups.
 - Adjust the range for `end_idx` to include the endpoint, allowing for more flexible group selection.
 - Remove the condition that skips groups with only one file, enabling more comprehensive processing of file sequences.

* feat/revise-compaction-picker:
 Enhance `find_overlapping_items` with `SortedRun` and Update Tests

 - Refactor `find_overlapping_items` in `src/mito2/src/compaction/run.rs` to utilize the `SortedRun` struct for improved efficiency and clarity.
 - Introduce a `sorted` flag in `SortedRun` to optimize sorting operations.
 - Update test cases in `src/mito2/benches/bench_compaction_picker.rs` to accommodate changes in `find_overlapping_items` by using `SortedRun`.
 - Add `From<Vec<T>>` implementation for `SortedRun` to facilitate easy conversion from vectors.

* feat/revise-compaction-picker:
 **Enhancements in `compaction/run.rs`:**

 - Added `ReadableSize` import to handle size calculations.
 - Modified the logic in `merge_seq_files` to clamp the calculated target size to a maximum of 2GB when `max_file_size` is not provided.

* feat/revise-compaction-picker: Add Default Max Output Size Constant for Compaction

Introduce DEFAULT_MAX_OUTPUT_SIZE constant to define the default maximum compaction output file size as 2GB. Refactor the merge_seq_files function to utilize this constant, ensuring consistent and maintainable code for handling file size limits during compaction.
2025-05-23 03:29:08 +00:00
Ruihang Xia
bf496e05cc ci: turn off fail fast strategy (#6157)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-05-23 02:38:25 +00:00
zyy17
513ca951ee chore: add the missing v prefix for NEXT_RELEASE_VERSION variable (#6160)
chore: add 'v' prefix for NEXT_RELEASE_VERSION variable
2025-05-22 10:33:14 +00:00
Ruihang Xia
791f530a78 fix: require input ordering in series divide plan (#6148)
* require input ordering in series divide plan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add sqlness case

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* finilise

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-05-22 07:04:25 +00:00
Ning Sun
1de6d8c619 fix: ident value in set search_path (#6153)
* fix: ident value in set search_path

* refactor: remove unneeded clone
2025-05-22 03:58:18 +00:00
discord9
a4d0420727 fix(flow): flow task run interval (#6100)
* fix: always check for shutdown signal in flow
chore: correct log msg for flows that shouldn't exist
feat: use time window size/2 as sleep interval

* chore: better slower query refresh time

* chore

* refactor: per review
2025-05-22 03:27:26 +00:00
discord9
fc6300a2ba feat(flow): support prom ql(in tql) in flow (#6063)
* feat: support parse prom ql in create flow

* refactor

* fix: just run tql unmodified

* refactor: determine type faster

* fix: pass original query

* tests: sqlness

* test: fix format&chore

* fix: get raw query

* test: fix sqlness randomness

* chore: what's the box for?

* test: location_to_index

* test: make sqlness more determinstic

* fix: tmp add sleep 1s after flush_flow

* undo test sleep 1s&rm done todo

* chore: more tests
2025-05-22 03:06:09 +00:00
liyang
f55af5838c ci: add issues write permission (#6145)
fixed to: https://github.com/GreptimeTeam/greptimedb/actions/runs/15155518237/job/42610589439
2025-05-21 15:53:01 +00:00
Lei, HUANG
5a0da5b6bb fix: region worker stall metrics (#6149)
fix/stall-metrics:
 Improve stalled request handling in `handle_write.rs`

 - Updated logic to account for both `write_requests` and `bulk_requests` when adjusting `stalled_count`.
 - Modified `reject_region_stalled_requests` and `handle_region_stalled_requests` to correctly subtract the combined length of `requests` and `bulk` from `stalled_count`.
2025-05-21 13:21:50 +00:00
Lei, HUANG
d5f0006864 fix: flaky prom gateway test (#6146)
fix/flaky-prom-gateway-test:
 **Refactor gRPC Test Assertions in `grpc.rs`**

 - Updated test assertions for `test_prom_gateway_query` to improve clarity and maintainability.
 - Replaced direct comparison with expected `PrometheusJsonResponse` objects with individual field assertions.
 - Added sorting for `vector` and `matrix` results to ensure consistent test outcomes.
2025-05-21 09:31:58 +00:00
liyang
ede82331b2 docs: change docker run mount directory (#6142) 2025-05-21 07:05:21 +00:00
Ruihang Xia
56e696bd55 chore: remove stale wal config entries (#6134)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-05-20 19:42:09 +00:00
ZonaHe
bc0cdf62ba feat: update dashboard to v0.9.2 (#6140)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2025-05-20 19:41:29 +00:00
Lei, HUANG
eaf7b4b9dd chore: update flush failure metric name and update grafana dashboard (#6138)
* 1. rename `greptime_mito_flush_errors_total` metric to `greptime_mito_flush_errors_total` for consistency
2. update grafana dashboard to add following panel:
  - compaction input/output bytes
  - bulk insert handle elasped time in frontend and region worker
2025-05-20 12:05:54 +00:00
Ruihang Xia
7ae0e150e5 feat: support altering multiple logical table in one remote write request (#6137)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-05-20 11:22:38 +00:00
ZonaHe
43c30b55ae feat: update dashboard to v0.9.1 (#6132)
Co-authored-by: sunchanglong <sunchanglong@users.noreply.github.com>
2025-05-20 09:58:44 +00:00
liyang
153e80450a fix: update dev-build image tag (#6136) 2025-05-20 09:08:28 +00:00
jeremyhi
1624dc41c5 chore: reduce unnecessary txns in alter operations (#6133) 2025-05-20 08:29:49 +00:00
Ruihang Xia
300262562b feat: accommodate default column name with pre-created table schema (#6126)
* refactor: prepare_mocked_backend

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* modify request in place

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* apply to influx line protocol

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* return on empty alter expr list

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* expose to other write paths

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-05-20 07:22:13 +00:00
shuiyisong
b2377d4b87 chore: update toolchain to 2025-05-19 (#6124)
* chore: update toolchain to 2025-05-19

* chore: update nix sha

* chore: rebase main and fix
2025-05-20 04:29:40 +00:00
yinheli
8d36ffb4e1 chore: enable github folder typo check and fix typos (#6128) 2025-05-20 04:20:07 +00:00
Yingwen
955ad644f7 ci: add pull requests permissions to semantic check job (#6130)
* ci: add pull requests permissions

* ci: reduce permissions
2025-05-20 03:33:33 +00:00
localhost
c2e3c3d398 chore: Add more data format support to the pipeline dryrun api. (#6115)
* chore: supporting more data type for pipeline dryrun API

* chore: add docs for parse_dryrun_data

* chore: fix by pr comment

* chore: add user-friendly error message

* chore: change EventPayloadResolver content_type field type from owner to ref

* Apply suggestions from code review

Co-authored-by: shuiyisong <113876041+shuiyisong@users.noreply.github.com>

---------

Co-authored-by: shuiyisong <113876041+shuiyisong@users.noreply.github.com>
2025-05-20 03:29:28 +00:00
Zhenchi
400229c384 feat: introduce index result cache (#6110)
* feat: introduce index result cache

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* Update src/mito2/src/sst/index/inverted_index/applier/builder.rs

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* optimize selector_len

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-05-20 01:45:42 +00:00
Ruihang Xia
cd9b6990bf feat: implement clamp_min and clamp_max (#6116)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-05-19 21:32:03 +00:00
Ruihang Xia
a56e6e04c2 chore: remove etcd from acknowledgement as not recommended (#6127)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-05-19 12:42:30 +00:00
Ning Sun
d324439014 ci: fix release job dependencies (#6125) 2025-05-19 11:48:57 +00:00
discord9
038acda7cd fix: flow update use proper update (#6108)
* fix: flow update use proper update

* refactor: per review

* fix: flow cache

* chore: per copilot review

* refactor: rm flow node id

* refactor: per review

* chore: per review

* refactor: per review

* chore: per review
2025-05-19 11:30:10 +00:00
shuiyisong
a0d89c9ed1 feat: Prometheus remote write with pipeline (#5981)
* chore: update nightly version

* chore: sort lint lines

* chore: minor fix

* chore: update nix

* chore: update toolchain to 2024-04-14

* chore: update toolchain to 2024-04-15

* chore: remove unnecessory test

* chore: do not assert oid in sqlness test

* chore: fix margin issue

* chore: fix cr issues

* chore: fix cr issues

* chore: add pipelie handler to prom state

* chore: add prom series processor to merge function

* chore: add run pipeline in decode

* chore: add channel to pipeline ctx

* chore: add pipeline info to remote wirte hander

* chore: minor update

* chore: minor update

* chore: add test

* chore: add comment

* refactor: simplify identity pipeline params

* fix: test

* refactor: remove is_prometheus

---------

Co-authored-by: Ning Sun <sunning@greptime.com>
2025-05-19 08:00:59 +00:00
discord9
3a5534722c feat: export to s3 add more options (#6091)
* feat: export to s3 add more options

* chore: rm output dir override logic

* fix: s3 root export data

* feat: use output_dir and s3 at same time

* refactor: per review

* fix: keep same behavior
2025-05-16 20:58:14 +00:00
Ruihang Xia
1010a0c2ad fix: update promql-parser for regex anchor fix (#6117)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-05-16 20:33:35 +00:00
Lei, HUANG
f46cdbd66b fix: fast path for single region bulk insert (#6104)
* fix/fast-path-for-single-region-bulk-insert:
 ### Commit Summary

 - **Refactor `try_decode` Method**: Updated the `try_decode` method in `FlightDecoder` to accept a reference to `FlightData` instead of consuming it. This change affects multiple files including `database.rs`, `region.rs`, `flight.rs`, `bulk_insert.rs`, `stream.rs`, and `region_request.rs`.
 - **Optimize Bulk Insert Handling**: Added a fast path for handling bulk inserts when only one region is involved in `bulk_insert.rs`.

* fix/fast-path-for-single-region-bulk-insert:
 Improve `FlightDecoder` usage in tests

 - Updated `try_decode` method calls in `flight.rs` to remove unnecessary references for `d1`, `d2`, and `d3`.
 - Ensured consistency in handling `FlightMessage` variants within test cases.

* fix/fast-path-for-single-region-bulk-insert:
 **Enhancement: Skip Empty Regions in Bulk Insert**

 - Updated `bulk_insert.rs` to improve efficiency by skipping regions without data during the bulk insert process. This change ensures that regions with a `true_count` of zero are not processed, optimizing resource usage and performance.

* fix/fast-path-for-single-region-bulk-insert:
 ### Commit Summary

 - **Refactor `RegionMask` Handling**:
   - Introduced `RegionMask` struct to encapsulate boolean array and selected rows count.
   - Updated methods to use `RegionMask` instead of `BooleanArray` for region selection.
   - Affected files: `bulk_insert.rs`, `multi_dim.rs`, `partition.rs`, `splitter.rs`.

 - **Optimize Region Selection**:
   - Removed unnecessary checks for empty regions in `bulk_insert.rs`.
   - Improved logic for handling default regions in `multi_dim.rs`.

 - **Update Tests**:
   - Modified test cases to accommodate `RegionMask` changes.
   - Affected files: `multi_dim.rs`, `splitter.rs`.

* fix/fast-path-for-single-region-bulk-insert:
 **Enhancements to MultiDimPartitionRule Logic and Tests**

 - **`multi_dim.rs`**: Improved the logic for selecting rows in `MultiDimPartitionRule` by optimizing the selection process when only one region is present.
 - **Tests**: Added new test cases to verify the behavior of default regions with unselected rows, existing default regions, and scenarios where all rows are selected. These tests ensure robust handling of partition rules and validate the correct assignment of rows to regions.
2025-05-16 20:26:56 +00:00
Weny Xu
864cc117b3 fix: append noop entry when auto topic creation is disabled (#6092)
* feat: improve topic management and add stale records cleanup

* fix: fix unit tests

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2025-05-16 11:26:47 +00:00
Yingwen
0ea9ab385d fix: clean files under the atomic write dir on failure (#6112)
* fix: remove files under atomic dir on failure

* fix: clean atomic dir on download failure

* chore: update comment

* fix: clean if failed to write without write cache

* feat: add a TempFileCleaner to clean files on failure

* chore: after merge fix

* chore: more fix

---------

Co-authored-by: discord9 <55937128+discord9@users.noreply.github.com>
Co-authored-by: discord9 <discord9@163.com>
2025-05-16 11:18:11 +00:00
Yingwen
c7e9485534 feat: New scanner SeriesScan to scan by series for querying metrics (#5968)
* chore: basic methods for SeriesScan

* chore: add to scanner enum

* feat: implement scan logic of each partition

* feat: use series scan when distribution is PerSeries

* refactor: remove per series scan from SeqScan

* fix: use series scan in PerSeries distribution

* feat: keep parallelize_scan unchanged

* fix: address compiler errors

* fix: include build merge reader cost to scan cost

* feat: use smallvec

* chore: update comment

* Revert "feat: keep parallelize_scan unchanged"

This reverts commit 96ba00d175.

* assign partition_ranges

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: try send before send

reduce the send timeout to 10ms

* chore: add comments

* fix: add metrics to partition metrics list

* fix: correct scan cost metrics

* chore: reset instant

* fix: scanner metrics init

* chore: display more info in explain

* feat: metrics for send series timeout

* style: fix clippy

* refactor: use ChainedRecordBatchStream to simplify codes

* chore: fix typos

* feat: separate distributor metrics

* feat: remove parallelize hack

* chore: fix warning

* test: add test for series scan

* test: update sqlness test

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2025-05-16 08:53:24 +00:00
Ruihang Xia
57b53211d9 feat: don't hide atomic write dir (#6109)
* feat: don't hidden atomic write dir

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* compatible code

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/mito2/src/access_layer.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2025-05-16 06:21:13 +00:00
zyy17
01076069a3 chore: modify default slow_query.threshold from 5s to 30s (#6107)
chore: modify slow_query.threshold from 5s to 30s
2025-05-15 20:16:13 +00:00
Ning Sun
73b4b710cd ci: update nix build linker (#6103)
* ci: update nix build linker

* ci: use mold for nix ci
2025-05-15 19:02:58 +00:00
zyy17
14b655ea57 refactor: add SlowQueryRecorder to record slow query in system table and refactor slow query options (#6008)
* refactor: add common-slow-query crate

* refactor: refine the naming

* chore: fix clippy

* chore: fix typo

* chore: sperate SlowQueryOptions From Logging

* chore: fix clippy

* chore: fix ci

* chore: refine the code

* chore: update config example

* refactor: use drop() to end the slow query timer

* refactor: move common-slow-query to frontend crate

* chore: polish some code

* refactor: code review

* refactor: add promql_range/promql_step/promql_start/promql_end fields in slow_queries

* refactor: add build_slow_query_logger()

* refactor: turn on slow query on frontend by default
2025-05-15 04:18:48 +00:00
Ruihang Xia
c780746171 perf: avoid some atomic operation on array slice (#6101)
* perf: avoid some atomic operation on array slice

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* finilise

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-05-15 02:29:07 +00:00
Weny Xu
1f62c3b545 fix: table metadata collection (#6102)
fix: fix collect metadata
2025-05-14 12:19:54 +00:00
Lei, HUANG
5a9023d6b3 feat(bulk): write to multiple time partitions (#6086)
* add benchmark for splitting according to time partition

* feat/write-to-multiple-time-partitions:
 **Enhancements to Bulk Processing and Time Partitioning**

 - **`part.rs`**: Added `Snafu` to imports and introduced `timestamp_index` in `BulkPart` struct. Implemented `timestamps` method for accessing timestamp columns.
 - **`simple_bulk_memtable.rs`**: Updated tests to include `timestamp_index` initialization.
 - **`time_partition.rs`**: Enhanced `TimePartition` to support partial writes with `write_record_batch_partial`. Implemented `split_record_batch` for filtering records by timestamp range. Added comprehensive tests for `split_record_batch`.
 - **`handle_bulk_insert.rs`**: Modified to retrieve timestamp index and column together, updating `BulkPart` initialization with `timestamp_index`.

* feat/write-to-multiple-time-partitions:
 ### Enhance Time Partitioning Logic

 - **`time_partition.rs`**:
   - Introduced `HashSet` for efficient partition management.
   - Refactored `write_bulk` to handle multiple partitions and added `find_partitions_by_time_range` for identifying existing and missing partitions.
   - Updated `get_or_create_time_partition` to manage partition creation.
   - Added comprehensive tests for partition finding logic, covering various scenarios including overlapping and non-overlapping time ranges.

 - **Tests**:
   - Added `test_find_partitions_by_time_range` to validate new partitioning logic.
   - Updated `test_split_record_batch` to ensure correct record batch splitting behavior.

* feat/write-to-multiple-time-partitions:
 ### Enhance Time Partitioning and Testing in `time_partition.rs`

 - **Time Partitioning Enhancements**:
   - Updated `split_record_batch` to handle multiple timestamp units (`Second`, `Millisecond`, `Microsecond`, `Nanosecond`) by matching on `DataType`.
   - Improved filtering logic for timestamp arrays to support various time units.

 - **Testing Enhancements**:
   - Added `test_write_bulk` to verify writing across multiple partitions and scenarios in `time_partition.rs`.
   - Updated `test_split_record_batch` to use `TimestampMillisecondArray` for testing timestamp partitioning.

 - **Imports and Dependencies**:
   - Added necessary imports for new timestamp array types and testing utilities.

* feat/write-to-multiple-time-partitions:
 ### Refactor and Enhance Time Partition Filtering

 - **Refactor Filtering Logic**: Consolidated the filtering logic for timestamp arrays using macros in `time_partition.rs` and `bench_filter_time_partition.rs`. This reduces code duplication and improves maintainability.
 - **Enhance `BulkPart` Struct**: Made fields in `BulkPart` public to facilitate easier access and manipulation in `memtable.rs` and `part.rs`.
 - **Rename Function**: Renamed `split_record_batch` to `filter_record_batch` for clarity in `time_partition.rs` and `bench_filter_time_partition.rs`.
 - **Add Feature Flag**: Introduced `int_roundings` feature in `lib.rs` to support new functionality.

* refactor tests

* feat/write-to-multiple-time-partitions:
 Improve timestamp handling in `time_partition.rs`

 - Enhanced safety comments for timestamp conversion to ensure clarity.
 - Modified logic to prevent overflow by using `div_euclid` for `bulk_start_sec` and `bulk_end_sec` calculations.
 - Adjusted the `filter_map` logic to correctly compute timestamps using `start_sec` and `part_duration_sec`.

* feat/write-to-multiple-time-partitions:
 **Refactor timestamp handling and add utility function**

 - **Refactor `time_partition.rs`:** Simplified timestamp handling by replacing direct type access with a utility function to retrieve the timestamp unit. Improved error handling for timestamp conversion.
 - **Enhance `metadata.rs`:** Added `time_index_type` function to `RegionMetadata` to retrieve the timestamp type of the time index column, ensuring safer and more readable code.

* feat/write-to-multiple-time-partitions:
 Refactor time partition variable names in `time_partition.rs`

 - Renamed variables for clarity: `bulk_start_sec` to `start_bucket` and `bulk_end_sec` to `end_bucket`.
 - Updated related logic to use new variable names for improved readability and maintainability.

* feat/write-to-multiple-time-partitions:
 **Refactor variable names in `time_partition.rs`**

 - Updated variable names from `matching` and `missing` to `matchings` and `missings` for clarity and consistency.
 - Modified function calls and loop iterations to align with the new variable names.
 - Affected file: `src/mito2/src/memtable/time_partition.rs`

* feat/write-to-multiple-time-partitions:
 ### Refactor variable names in `time_partition.rs`

 - Updated variable names for clarity in `time_partition.rs`:
   - Renamed `matchings` to `matching_parts`
   - Renamed `missings` to `missing_parts`
 - Adjusted logic to use new variable names in methods `find_partitions_by_time_range` and `write_record_batch`.

* feat/write-to-multiple-time-partitions:
 ### Enhance Time Partition Handling

 - **`time_partition.rs`**:
   - Added `ArrayRef` to handle timestamp arrays, improving the partitioning logic by allowing more efficient timestamp range checks.
   - Enhanced `find_partitions_by_time_range` to support sparse data and handle different timestamp units (`Second`, `Millisecond`, `Microsecond`, `Nanosecond`).
   - Updated test cases to cover new scenarios, including sparse data and edge cases, ensuring robustness of partition handling.

---------

Co-authored-by: Lei <lei@Leis-MacBook-Pro.local>
2025-05-14 05:09:59 +00:00
Ruihang Xia
209f8371f2 fix: promql regex escape behavior (#6094)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-05-13 18:19:17 +00:00
Weny Xu
30f1cbf0bf chore: bump rskafka version (#6090)
* chore: upgrade rskafka

* chore(test): bump kafka version
2025-05-13 11:57:31 +00:00
Ruihang Xia
bbb6f8685e feat: implement commutativity rule for prom-related plans (#5875)
* feat: implement commutativity rule for prom-related plans

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix range manipulate deserializer

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* blocklist in commutativity rule

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* change dictionary type

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* handle partition and ordering

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add rate, increase and delta

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* regexp_replace uses empty string instead of null value

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result again

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-05-13 09:06:25 +00:00
Weny Xu
29540b55ee feat(meta): add pusher deregister signal to mailbox receiver (#6072) 2025-05-13 08:04:43 +00:00
Yingwen
ca1641d1c4 feat: implement PlainBatch struct (#6079)
* feat: implement PlainBatch struct

* chore: typo

* style: fix clippy

* feat: assert num columns
2025-05-13 05:56:12 +00:00
omahs
b275793b36 fix: typos (#6084) 2025-05-12 12:12:47 +00:00
discord9
265b144ca2 fix: flownode chose fe randomly&not starve lock (#6077)
* fix: choose frontend randomly

* docs: update comment

* chore: more logs

* fix: ignore inserts until recovering flow is done

* chore: resolve TODO

* fix: rm unused code&set done in correct location

* refactor: speed up create flow
2025-05-12 12:11:28 +00:00
Weny Xu
2ce5631d3c chore: fix clippy error by feature-gating Query import (#6085) 2025-05-12 09:27:29 +00:00
936 changed files with 73331 additions and 21509 deletions

View File

@@ -12,3 +12,6 @@ fetch = true
checkout = true
list_files = true
internal_use_git2 = false
[env]
CARGO_WORKSPACE_DIR = { value = "", relative = true }

2
.github/CODEOWNERS vendored
View File

@@ -4,7 +4,7 @@
* @GreptimeTeam/db-approver
## [Module] Databse Engine
## [Module] Database Engine
/src/index @zhongzc
/src/mito2 @evenyag @v0y4g3r @waynexia
/src/query @evenyag

View File

@@ -52,7 +52,7 @@ runs:
uses: ./.github/actions/build-greptime-binary
with:
base-image: ubuntu
features: servers/dashboard,pg_kvbackend,mysql_kvbackend
features: servers/dashboard
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
version: ${{ inputs.version }}
@@ -70,7 +70,7 @@ runs:
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
with:
base-image: centos
features: servers/dashboard,pg_kvbackend,mysql_kvbackend
features: servers/dashboard
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
version: ${{ inputs.version }}

View File

@@ -64,11 +64,11 @@ inputs:
upload-max-retry-times:
description: Max retry times for uploading artifacts to S3
required: false
default: "20"
default: "30"
upload-retry-timeout:
description: Timeout for uploading artifacts to S3
required: false
default: "30" # minutes
default: "120" # minutes
runs:
using: composite
steps:

View File

@@ -10,13 +10,13 @@ inputs:
meta-replicas:
default: 2
description: "Number of Metasrv replicas"
image-registry:
image-registry:
default: "docker.io"
description: "Image registry"
image-repository:
image-repository:
default: "greptime/greptimedb"
description: "Image repository"
image-tag:
image-tag:
default: "latest"
description: 'Image tag'
etcd-endpoints:
@@ -32,12 +32,12 @@ runs:
steps:
- name: Install GreptimeDB operator
uses: nick-fields/retry@v3
with:
with:
timeout_minutes: 3
max_attempts: 3
shell: bash
command: |
helm repo add greptime https://greptimeteam.github.io/helm-charts/
helm repo add greptime https://greptimeteam.github.io/helm-charts/
helm repo update
helm upgrade \
--install \
@@ -48,10 +48,10 @@ runs:
--wait-for-jobs
- name: Install GreptimeDB cluster
shell: bash
run: |
run: |
helm upgrade \
--install my-greptimedb \
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
--set meta.backendStorage.etcd.endpoints=${{ inputs.etcd-endpoints }} \
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
--set image.registry=${{ inputs.image-registry }} \
--set image.repository=${{ inputs.image-repository }} \
@@ -59,7 +59,7 @@ runs:
--set base.podTemplate.main.resources.requests.cpu=50m \
--set base.podTemplate.main.resources.requests.memory=256Mi \
--set base.podTemplate.main.resources.limits.cpu=2000m \
--set base.podTemplate.main.resources.limits.memory=2Gi \
--set base.podTemplate.main.resources.limits.memory=3Gi \
--set frontend.replicas=${{ inputs.frontend-replicas }} \
--set datanode.replicas=${{ inputs.datanode-replicas }} \
--set meta.replicas=${{ inputs.meta-replicas }} \
@@ -72,7 +72,7 @@ runs:
- name: Wait for GreptimeDB
shell: bash
run: |
while true; do
while true; do
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
if [ "$PHASE" == "Running" ]; then
echo "Cluster is ready"
@@ -86,10 +86,10 @@ runs:
- name: Print GreptimeDB info
if: always()
shell: bash
run: |
run: |
kubectl get all --show-labels -n my-greptimedb
- name: Describe Nodes
if: always()
shell: bash
run: |
run: |
kubectl describe nodes

View File

@@ -22,7 +22,6 @@ datanode:
[wal]
provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
linger = "2ms"
overwrite_entry_start_id = true
frontend:
configData: |-

15
.github/labeler.yaml vendored Normal file
View File

@@ -0,0 +1,15 @@
ci:
- changed-files:
- any-glob-to-any-file: .github/**
docker:
- changed-files:
- any-glob-to-any-file: docker/**
documentation:
- changed-files:
- any-glob-to-any-file: docs/**
dashboard:
- changed-files:
- any-glob-to-any-file: grafana/**

42
.github/scripts/check-version.sh vendored Executable file
View File

@@ -0,0 +1,42 @@
#!/bin/bash
# Get current version
CURRENT_VERSION=$1
if [ -z "$CURRENT_VERSION" ]; then
echo "Error: Failed to get current version"
exit 1
fi
# Get the latest version from GitHub Releases
API_RESPONSE=$(curl -s "https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest")
if [ -z "$API_RESPONSE" ] || [ "$(echo "$API_RESPONSE" | jq -r '.message')" = "Not Found" ]; then
echo "Error: Failed to fetch latest version from GitHub"
exit 1
fi
# Get the latest version
LATEST_VERSION=$(echo "$API_RESPONSE" | jq -r '.tag_name')
if [ -z "$LATEST_VERSION" ] || [ "$LATEST_VERSION" = "null" ]; then
echo "Error: No valid version found in GitHub releases"
exit 1
fi
# Cleaned up version number format (removed possible 'v' prefix and -nightly suffix)
CLEAN_CURRENT=$(echo "$CURRENT_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
CLEAN_LATEST=$(echo "$LATEST_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
echo "Current version: $CLEAN_CURRENT"
echo "Latest release version: $CLEAN_LATEST"
# Use sort -V to compare versions
HIGHER_VERSION=$(printf "%s\n%s" "$CLEAN_CURRENT" "$CLEAN_LATEST" | sort -V | tail -n1)
if [ "$HIGHER_VERSION" = "$CLEAN_CURRENT" ]; then
echo "Current version ($CLEAN_CURRENT) is NEWER than or EQUAL to latest ($CLEAN_LATEST)"
echo "should-push-latest-tag=true" >> $GITHUB_OUTPUT
else
echo "Current version ($CLEAN_CURRENT) is OLDER than latest ($CLEAN_LATEST)"
echo "should-push-latest-tag=false" >> $GITHUB_OUTPUT
fi

View File

@@ -8,7 +8,7 @@ set -e
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
function create_version() {
# Read from envrionment variables.
# Read from environment variables.
if [ -z "$GITHUB_EVENT_NAME" ]; then
echo "GITHUB_EVENT_NAME is empty" >&2
exit 1
@@ -16,7 +16,8 @@ function create_version() {
if [ -z "$NEXT_RELEASE_VERSION" ]; then
echo "NEXT_RELEASE_VERSION is empty, use version from Cargo.toml" >&2
export NEXT_RELEASE_VERSION=$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
# NOTE: Need a `v` prefix for the version string.
export NEXT_RELEASE_VERSION=v$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
fi
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then

View File

@@ -10,7 +10,7 @@ GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
# Ceate a cluster with 1 control-plane node and 5 workers.
# Create a cluster with 1 control-plane node and 5 workers.
function create_kind_cluster() {
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
kind: Cluster
@@ -68,7 +68,7 @@ function deploy_greptimedb_cluster() {
helm install "$cluster_name" greptime/greptimedb-cluster \
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
-n "$install_namespace"
# Wait for greptimedb cluster to be ready.
@@ -103,7 +103,7 @@ function deploy_greptimedb_cluster_with_s3_storage() {
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
--set storage.s3.region="$AWS_REGION" \
--set storage.s3.root="$DATA_ROOT" \

View File

@@ -4,7 +4,7 @@ DEV_BUILDER_IMAGE_TAG=$1
update_dev_builder_version() {
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
echo "Error: Should specify the dev-builder image tag"
echo "Error: Should specify the dev-builder image tag"
exit 1
fi
@@ -17,7 +17,7 @@ update_dev_builder_version() {
git checkout -b $BRANCH_NAME
# Update the dev-builder image tag in the Makefile.
gsed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
sed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
# Commit the changes.
git add Makefile

View File

@@ -30,7 +30,7 @@ update_helm_charts_version() {
# Commit the changes.
git add .
git commit -m "chore: Update GreptimeDB version to ${VERSION}"
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
git push origin $BRANCH_NAME
# Create a Pull Request.

View File

@@ -26,7 +26,7 @@ update_homebrew_greptime_version() {
# Commit the changes.
git add .
git commit -m "chore: Update GreptimeDB version to ${VERSION}"
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
git push origin $BRANCH_NAME
# Create a Pull Request.

View File

@@ -41,7 +41,7 @@ function upload_artifacts() {
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
function update_version_info() {
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
# If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
# If it's the official release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "Updating latest-version.txt"
echo "$VERSION" > latest-version.txt

View File

@@ -55,6 +55,11 @@ on:
description: Build and push images to DockerHub and ACR
required: false
default: true
upload_artifacts_to_s3:
type: boolean
description: Whether upload artifacts to s3
required: false
default: false
cargo_profile:
type: choice
description: The cargo profile to use in building GreptimeDB.
@@ -238,7 +243,7 @@ jobs:
version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: false # Don't push the latest tag to registry.
dev-mode: true # Only build the standard images.
- name: Echo Docker image tag to step summary
run: |
echo "## Docker Image Tag" >> $GITHUB_STEP_SUMMARY
@@ -281,7 +286,7 @@ jobs:
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
upload-to-s3: false
upload-to-s3: ${{ inputs.upload_artifacts_to_s3 }}
dev-mode: true # Only build the standard images(exclude centos images).
push-latest-tag: false # Don't push the latest tag to registry.
update-version-info: false # Don't update the version info in S3.

View File

@@ -195,6 +195,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
target: [ "unstable_fuzz_create_table_standalone" ]
steps:
@@ -222,12 +223,12 @@ jobs:
run: |
sudo apt update && sudo apt install -y libfuzzer-14-dev
cargo install cargo-fuzz cargo-gc-bin --force
- name: Download pre-built binariy
- name: Download pre-built binary
uses: actions/download-artifact@v4
with:
name: bin
path: .
- name: Unzip bianry
- name: Unzip binary
run: |
tar -xvf ./bin.tar.gz
rm ./bin.tar.gz
@@ -249,6 +250,11 @@ jobs:
name: unstable-fuzz-logs
path: /tmp/unstable-greptime/
retention-days: 3
- name: Describe pods
if: failure()
shell: bash
run: |
kubectl describe pod -n my-greptimedb
build-greptime-ci:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
@@ -275,7 +281,7 @@ jobs:
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin --force
- name: Build greptime bianry
- name: Build greptime binary
shell: bash
# `cargo gc` will invoke `cargo build` with specified args
run: cargo gc --profile ci -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
@@ -299,6 +305,7 @@ jobs:
needs: build-greptime-ci
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
mode:
@@ -328,9 +335,9 @@ jobs:
name: Setup Minio
uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka
name: Setup Kafka cluser
name: Setup Kafka cluster
uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluser
- name: Setup Etcd cluster
uses: ./.github/actions/setup-etcd-cluster
# Prepares for fuzz tests
- uses: arduino/setup-protoc@v3
@@ -403,6 +410,11 @@ jobs:
shell: bash
run: |
kubectl describe nodes
- name: Describe pod
if: failure()
shell: bash
run: |
kubectl describe pod -n my-greptimedb
- name: Export kind logs
if: failure()
shell: bash
@@ -431,6 +443,7 @@ jobs:
needs: build-greptime-ci
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
mode:
@@ -475,9 +488,9 @@ jobs:
name: Setup Minio
uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka
name: Setup Kafka cluser
name: Setup Kafka cluster
uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluser
- name: Setup Etcd cluster
uses: ./.github/actions/setup-etcd-cluster
# Prepares for fuzz tests
- uses: arduino/setup-protoc@v3
@@ -551,6 +564,11 @@ jobs:
shell: bash
run: |
kubectl describe nodes
- name: Describe pods
if: failure()
shell: bash
run: |
kubectl describe pod -n my-greptimedb
- name: Export kind logs
if: failure()
shell: bash
@@ -578,6 +596,7 @@ jobs:
needs: build
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ ubuntu-latest ]
mode:

View File

@@ -124,9 +124,9 @@ jobs:
fetch-depth: 0
persist-credentials: false
- uses: cachix/install-nix-action@v31
with:
nix_path: nixpkgs=channel:nixos-24.11
- run: nix develop --command cargo build --bin greptime
- run: nix develop --command cargo check --bin greptime
env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
check-status:
name: Check status

42
.github/workflows/pr-labeling.yaml vendored Normal file
View File

@@ -0,0 +1,42 @@
name: 'PR Labeling'
on:
pull_request_target:
types:
- opened
- synchronize
- reopened
permissions:
contents: read
pull-requests: write
issues: write
jobs:
labeler:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v4
- uses: actions/labeler@v5
with:
configuration-path: ".github/labeler.yaml"
repo-token: "${{ secrets.GITHUB_TOKEN }}"
size-label:
runs-on: ubuntu-latest
steps:
- uses: pascalgn/size-label-action@v0.5.5
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
with:
sizes: >
{
"0": "XS",
"100": "S",
"300": "M",
"1000": "L",
"1500": "XL",
"2000": "XXL"
}

View File

@@ -88,7 +88,7 @@ env:
# Controls whether to run tests, include unit-test, integration-test and sqlness.
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nightly-20230313;
NIGHTLY_RELEASE_PREFIX: nightly
jobs:
@@ -110,6 +110,8 @@ jobs:
# The 'version' use as the global tag name of the release workflow.
version: ${{ steps.create-version.outputs.version }}
should-push-latest-tag: ${{ steps.check-version.outputs.should-push-latest-tag }}
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -124,7 +126,7 @@ jobs:
# The create-version will create a global variable named 'version' in the global workflows.
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nightly-20230313;
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
- name: Create version
id: create-version
@@ -135,6 +137,11 @@ jobs:
GITHUB_REF_NAME: ${{ github.ref_name }}
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
- name: Check version
id: check-version
run: |
./.github/scripts/check-version.sh "${{ steps.create-version.outputs.version }}"
- name: Allocate linux-amd64 runner
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
uses: ./.github/actions/start-runner
@@ -314,7 +321,7 @@ jobs:
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
- name: Set build image result
id: set-build-image-result
@@ -361,7 +368,7 @@ jobs:
dev-mode: false
upload-to-s3: true
update-version-info: true
push-latest-tag: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
publish-github-release:
name: Create GitHub release and upload artifacts
@@ -388,7 +395,7 @@ jobs:
### Stop runners ###
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
# Because we can terminate the specified EC2 instance immediately after the job is finished without unnecessary waiting.
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner
# Only run this job when the runner is allocated.
@@ -441,10 +448,10 @@ jobs:
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
bump-doc-version:
name: Bump doc version
bump-downstream-repo-versions:
name: Bump downstream repo versions
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [publish-github-release]
needs: [allocate-runners, publish-github-release]
runs-on: ubuntu-latest
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:
@@ -456,41 +463,21 @@ jobs:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg
- name: Bump doc version
- name: Bump downstream repo versions
working-directory: cyborg
run: pnpm tsx bin/bump-doc-version.ts
env:
VERSION: ${{ needs.allocate-runners.outputs.version }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
bump-website-version:
name: Bump website version
if: ${{ github.event_name == 'push' }}
needs: [publish-github-release]
runs-on: ubuntu-latest
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:
issues: write # Allows the action to create issues for cyborg.
contents: write # Allows the action to create a release.
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg
- name: Bump website version
working-directory: cyborg
run: pnpm tsx bin/bump-website-version.ts
run: pnpm tsx bin/bump-versions.ts
env:
TARGET_REPOS: website,docs,demo
VERSION: ${{ needs.allocate-runners.outputs.version }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
WEBSITE_REPO_TOKEN: ${{ secrets.WEBSITE_REPO_TOKEN }}
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
DEMO_REPO_TOKEN: ${{ secrets.DEMO_REPO_TOKEN }}
bump-helm-charts-version:
name: Bump helm charts version
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
needs: [publish-github-release]
needs: [allocate-runners, publish-github-release]
runs-on: ubuntu-latest
permissions:
contents: write
@@ -511,7 +498,7 @@ jobs:
bump-homebrew-greptime-version:
name: Bump homebrew greptime version
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
needs: [publish-github-release]
needs: [allocate-runners, publish-github-release]
runs-on: ubuntu-latest
permissions:
contents: write

View File

@@ -14,6 +14,9 @@ concurrency:
jobs:
check:
runs-on: ubuntu-latest
permissions:
pull-requests: write # Add permissions to modify PRs
issues: write
timeout-minutes: 10
steps:
- uses: actions/checkout@v4

3
.gitignore vendored
View File

@@ -58,3 +58,6 @@ tests-fuzz/corpus/
## default data home
greptimedb_data
# github
!/.github

View File

@@ -108,7 +108,7 @@ of what you were trying to do and what went wrong. You can also reach for help i
The core team will be thrilled if you would like to participate in any way you like. When you are stuck, try to ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
- [GreptimeDB Community Slack](https://greptime.com/slack)
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
- [GreptimeDB GitHub Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
Also, see some extra GreptimeDB content:

1180
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -30,6 +30,7 @@ members = [
"src/common/recordbatch",
"src/common/runtime",
"src/common/session",
"src/common/stat",
"src/common/substrait",
"src/common/telemetry",
"src/common/test-util",
@@ -48,6 +49,7 @@ members = [
"src/meta-client",
"src/meta-srv",
"src/metric-engine",
"src/mito-codec",
"src/mito2",
"src/object-store",
"src/operator",
@@ -69,7 +71,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.15.0"
version = "0.15.5"
edition = "2021"
license = "Apache-2.0"
@@ -79,6 +81,7 @@ clippy.implicit_clone = "warn"
clippy.result_large_err = "allow"
clippy.large_enum_variant = "allow"
clippy.doc_overindented_list_items = "allow"
clippy.uninlined_format_args = "allow"
rust.unknown_lints = "deny"
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
@@ -114,15 +117,15 @@ clap = { version = "4.4", features = ["derive"] }
config = "0.13.0"
crossbeam-utils = "0.8"
dashmap = "6.1"
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
deadpool = "0.12"
deadpool-postgres = "0.14"
derive_builder = "0.20"
@@ -131,7 +134,7 @@ etcd-client = "0.14"
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "7668a882d57ca6a2333146e0574b8f0c9d5008ae" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "f3103a8c9b8ce162457d0a3e3ca00d53d1a8bd06" }
hex = "0.4"
http = "1"
humantime = "2.1"
@@ -147,6 +150,7 @@ meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev =
mockall = "0.13"
moka = "0.12"
nalgebra = "0.33"
nix = { version = "0.30.1", default-features = false, features = ["event", "fs", "process"] }
notify = "8.0"
num_cpus = "1.16"
object_store_opendal = "0.50"
@@ -163,7 +167,9 @@ parquet = { version = "54.2", default-features = false, features = ["arrow", "as
paste = "1.0"
pin-project = "1.0"
prometheus = { version = "0.13.3", features = ["process"] }
promql-parser = { version = "0.5.1", features = ["ser"] }
promql-parser = { git = "https://github.com/GreptimeTeam/promql-parser.git", rev = "0410e8b459dda7cb222ce9596f8bf3971bd07bd2", features = [
"ser",
] }
prost = { version = "0.13", features = ["no-recursion-limit"] }
raft-engine = { version = "0.4.1", default-features = false }
rand = "0.9"
@@ -176,7 +182,7 @@ reqwest = { version = "0.12", default-features = false, features = [
"stream",
"multipart",
] }
rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "75535b5ad9bae4a5dbb582c82e44dfd81ec10105", features = [
rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "8dbd01ed809f5a791833a594e85b144e36e45820", features = [
"transport-tls",
] }
rstest = "0.25"
@@ -214,6 +220,8 @@ tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8"
tonic = { version = "0.12", features = ["tls", "gzip", "zstd"] }
tower = "0.5"
tower-http = "0.6"
tracing = "0.1"
tracing-appender = "0.2"
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
typetag = "0.2"
@@ -269,6 +277,7 @@ log-store = { path = "src/log-store" }
meta-client = { path = "src/meta-client" }
meta-srv = { path = "src/meta-srv" }
metric-engine = { path = "src/metric-engine" }
mito-codec = { path = "src/mito-codec" }
mito2 = { path = "src/mito2" }
object-store = { path = "src/object-store" }
operator = { path = "src/operator" }
@@ -284,6 +293,7 @@ query = { path = "src/query" }
servers = { path = "src/servers" }
session = { path = "src/session" }
sql = { path = "src/sql" }
stat = { path = "src/common/stat" }
store-api = { path = "src/store-api" }
substrait = { path = "src/common/substrait" }
table = { path = "src/table" }

View File

@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
IMAGE_REGISTRY ?= docker.io
IMAGE_NAMESPACE ?= greptime
IMAGE_TAG ?= latest
DEV_BUILDER_IMAGE_TAG ?= 2025-04-15-1a517ec8-20250428023155
DEV_BUILDER_IMAGE_TAG ?= 2025-05-19-b2377d4b-20250520045554
BUILDX_MULTI_PLATFORM_BUILD ?= false
BUILDX_BUILDER_NAME ?= gtbuilder
BASE_IMAGE ?= ubuntu

View File

@@ -121,7 +121,7 @@ docker pull greptime/greptimedb
```shell
docker run -p 127.0.0.1:4000-4003:4000-4003 \
-v "$(pwd)/greptimedb:/greptimedb_data" \
-v "$(pwd)/greptimedb_data:/greptimedb_data" \
--name greptime --rm \
greptime/greptimedb:latest standalone start \
--http-addr 0.0.0.0:4000 \
@@ -129,7 +129,7 @@ docker run -p 127.0.0.1:4000-4003:4000-4003 \
--mysql-addr 0.0.0.0:4002 \
--postgres-addr 0.0.0.0:4003
```
Dashboard: [http://localhost:4000/dashboard](http://localhost:4000/dashboard)
Dashboard: [http://localhost:4000/dashboard](http://localhost:4000/dashboard)
[Full Install Guide](https://docs.greptime.com/getting-started/installation/overview)
**Troubleshooting:**
@@ -167,7 +167,7 @@ cargo run -- standalone start
## Project Status
> **Status:** Beta.
> **Status:** Beta.
> **GA (v1.0):** Targeted for mid 2025.
- Being used in production by early adopters
@@ -189,7 +189,8 @@ We invite you to engage and contribute!
- [Official Website](https://greptime.com/)
- [Blog](https://greptime.com/blogs/)
- [LinkedIn](https://www.linkedin.com/company/greptime/)
- [Twitter](https://twitter.com/greptime)
- [X (Twitter)](https://X.com/greptime)
- [YouTube](https://www.youtube.com/@greptime)
## License
@@ -197,8 +198,8 @@ GreptimeDB is licensed under the [Apache License 2.0](https://apache.org/license
## Commercial Support
Running GreptimeDB in your organization?
We offer enterprise add-ons, services, training, and consulting.
Running GreptimeDB in your organization?
We offer enterprise add-ons, services, training, and consulting.
[Contact us](https://greptime.com/contactus) for details.
## Contributing
@@ -215,4 +216,3 @@ Special thanks to all contributors! See [AUTHORS.md](https://github.com/Greptime
- [Apache Parquet™](https://parquet.apache.org/) (file storage)
- [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/) (query engine)
- [Apache OpenDAL™](https://opendal.apache.org/) (data access abstraction)
- [etcd](https://etcd.io/) (meta service)

View File

@@ -27,6 +27,7 @@
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
@@ -99,7 +100,7 @@
| `query` | -- | -- | The query engine options. |
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `./greptimedb_data/` | The working home directory. |
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
@@ -122,6 +123,7 @@
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
| `storage.http_client.skip_ssl_validation` | Bool | `false` | To skip the ssl verification<br/>**Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks. |
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
| `region_engine.mito` | -- | -- | The Mito engine options. |
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
@@ -145,6 +147,7 @@
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.max_concurrent_scan_files` | Integer | `128` | Maximum number of SST files to scan concurrently. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
@@ -154,6 +157,7 @@
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
| `region_engine.mito.index.result_cache_size` | String | `128MiB` | Cache size for index result. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
@@ -188,17 +192,18 @@
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `slow_query` | -- | -- | The slow query log options. |
| `slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `slow_query.record_type` | String | Unset | The record type of slow queries. It can be `system_table` or `log`. |
| `slow_query.threshold` | String | Unset | The threshold of slow query. |
| `slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommended to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | Unset | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
@@ -224,10 +229,12 @@
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
@@ -288,17 +295,17 @@
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `slow_query` | -- | -- | The slow query log options. |
| `slow_query.enable` | Bool | `true` | Whether to enable slow query log. |
| `slow_query.record_type` | String | `system_table` | The record type of slow queries. It can be `system_table` or `log`.<br/>If `system_table` is selected, the slow queries will be recorded in a system table `greptime_private.slow_queries`.<br/>If `log` is selected, the slow queries will be logged in a log file `greptimedb-slow-queries.*`. |
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. |
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. |
| `slow_query.ttl` | String | `30d` | The TTL of the `slow_queries` system table. Default is `30d` when `record_type` is `system_table`. |
| `export_metrics` | -- | -- | The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | Unset | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
@@ -308,12 +315,10 @@
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `data_home` | String | `./greptimedb_data/metasrv/` | The working home directory. |
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
| `data_home` | String | `./greptimedb_data` | The working home directory. |
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store` |
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store`<br/>- `mysql_store` |
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
@@ -325,6 +330,16 @@
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:3002` | The address to bind the gRPC server. |
| `grpc.server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
@@ -362,17 +377,11 @@
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics` | -- | -- | The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | Unset | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
@@ -398,6 +407,7 @@
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for datanode side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
@@ -440,7 +450,7 @@
| `query` | -- | -- | The query engine options. |
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `./greptimedb_data/` | The working home directory. |
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
@@ -463,6 +473,7 @@
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
| `storage.http_client.skip_ssl_validation` | Bool | `false` | To skip the ssl verification<br/>**Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks. |
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
| `region_engine.mito` | -- | -- | The Mito engine options. |
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
@@ -486,6 +497,7 @@
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.max_concurrent_scan_files` | Integer | `128` | Maximum number of SST files to scan concurrently. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
@@ -495,6 +507,7 @@
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
| `region_engine.mito.index.result_cache_size` | String | `128MiB` | Cache size for index result. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
@@ -529,17 +542,11 @@
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | Unset | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
@@ -585,9 +592,5 @@
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |

View File

@@ -44,6 +44,13 @@ runtime_size = 8
max_recv_message_size = "512MB"
## The maximum send message size for gRPC server.
max_send_message_size = "512MB"
## Compression mode for datanode side Arrow IPC service. Available options:
## - `none`: disable all compression
## - `transport`: only enable gRPC transport compression (zstd)
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
## - `all`: enable all compression.
## Default to `none`
flight_compression = "arrow_ipc"
## gRPC server TLS options, see `mysql.tls` section.
[grpc.tls]
@@ -252,7 +259,7 @@ parallelism = 0
## The data storage options.
[storage]
## The working home directory.
data_home = "./greptimedb_data/"
data_home = "./greptimedb_data"
## The storage type used to store the data.
## - `File`: the data is stored in the local file system.
@@ -360,6 +367,10 @@ timeout = "30s"
## The timeout for idle sockets being kept-alive.
pool_idle_timeout = "90s"
## To skip the ssl verification
## **Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks.
skip_ssl_validation = false
# Custom storage options
# [[storage.providers]]
# name = "S3"
@@ -463,6 +474,9 @@ sst_write_buffer_size = "8MB"
## Capacity of the channel to send data from parallel scan tasks to the main task.
parallel_scan_channel_size = 32
## Maximum number of SST files to scan concurrently.
max_concurrent_scan_files = 128
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false
@@ -499,6 +513,9 @@ content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "64KiB"
## Cache size for index result.
result_cache_size = "128MiB"
## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index]
@@ -632,37 +649,16 @@ max_log_files = 720
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## @toml2docs:none-default
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.

View File

@@ -100,19 +100,6 @@ max_log_files = 720
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.

View File

@@ -37,6 +37,12 @@ enable_cors = true
## Customize allowed origins for HTTP CORS.
## @toml2docs:none-default
cors_allowed_origins = ["https://example.com"]
## Whether to enable validation for Prometheus remote write requests.
## Available options:
## - strict: deny invalid UTF-8 strings (default).
## - lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).
## - unchecked: do not valid strings.
prom_validation_mode = "strict"
## The gRPC server options.
[grpc]
@@ -48,6 +54,13 @@ bind_addr = "127.0.0.1:4001"
server_addr = "127.0.0.1:4001"
## The number of server worker threads.
runtime_size = 8
## Compression mode for frontend side Arrow IPC service. Available options:
## - `none`: disable all compression
## - `transport`: only enable gRPC transport compression (zstd)
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
## - `all`: enable all compression.
## Default to `none`
flight_compression = "arrow_ipc"
## gRPC server TLS options, see `mysql.tls` section.
[grpc.tls]
@@ -223,36 +236,34 @@ max_log_files = 720
default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
[slow_query]
## Whether to enable slow query log.
enable = false
enable = true
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The record type of slow queries. It can be `system_table` or `log`.
## If `system_table` is selected, the slow queries will be recorded in a system table `greptime_private.slow_queries`.
## If `log` is selected, the slow queries will be logged in a log file `greptimedb-slow-queries.*`.
record_type = "system_table"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
## The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`.
threshold = "30s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged.
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## The TTL of the `slow_queries` system table. Default is `30d` when `record_type` is `system_table`.
ttl = "30d"
## The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## @toml2docs:none-default
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.

View File

@@ -1,13 +1,5 @@
## The working home directory.
data_home = "./greptimedb_data/metasrv/"
## The bind address of metasrv.
bind_addr = "127.0.0.1:3002"
## The communication server address for the frontend and datanode to connect to metasrv.
## If left empty or unset, the server will automatically use the IP address of the first network interface
## on the host, with the same port number as the one specified in `bind_addr`.
server_addr = "127.0.0.1:3002"
data_home = "./greptimedb_data"
## Store server address default to etcd store.
## For postgres store, the format is:
@@ -24,6 +16,7 @@ store_key_prefix = ""
## - `etcd_store` (default value)
## - `memory_store`
## - `postgres_store`
## - `mysql_store`
backend = "etcd_store"
## Table name in RDS to store metadata. Effect when using a RDS kvbackend.
@@ -67,6 +60,32 @@ node_max_idle_time = "24hours"
## The number of threads to execute the runtime for global write operations.
#+ compact_rt_size = 4
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
bind_addr = "127.0.0.1:3002"
## The communication server address for the frontend and datanode to connect to metasrv.
## If left empty or unset, the server will automatically use the IP address of the first network interface
## on the host, with the same port number as the one specified in `bind_addr`.
server_addr = "127.0.0.1:3002"
## The number of server worker threads.
runtime_size = 8
## The maximum receive message size for gRPC server.
max_recv_message_size = "512MB"
## The maximum send message size for gRPC server.
max_send_message_size = "512MB"
## The HTTP server options.
[http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
timeout = "0s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## Procedure storage options.
[procedure]
@@ -218,37 +237,16 @@ max_log_files = 720
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## @toml2docs:none-default
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.

View File

@@ -43,6 +43,13 @@ enable_cors = true
## @toml2docs:none-default
cors_allowed_origins = ["https://example.com"]
## Whether to enable validation for Prometheus remote write requests.
## Available options:
## - strict: deny invalid UTF-8 strings (default).
## - lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).
## - unchecked: do not valid strings.
prom_validation_mode = "strict"
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
@@ -343,7 +350,7 @@ parallelism = 0
## The data storage options.
[storage]
## The working home directory.
data_home = "./greptimedb_data/"
data_home = "./greptimedb_data"
## The storage type used to store the data.
## - `File`: the data is stored in the local file system.
@@ -451,6 +458,10 @@ timeout = "30s"
## The timeout for idle sockets being kept-alive.
pool_idle_timeout = "90s"
## To skip the ssl verification
## **Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks.
skip_ssl_validation = false
# Custom storage options
# [[storage.providers]]
# name = "S3"
@@ -554,6 +565,9 @@ sst_write_buffer_size = "8MB"
## Capacity of the channel to send data from parallel scan tasks to the main task.
parallel_scan_channel_size = 32
## Maximum number of SST files to scan concurrently.
max_concurrent_scan_files = 128
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false
@@ -590,6 +604,9 @@ content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "64KiB"
## Cache size for index result.
result_cache_size = "128MiB"
## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index]
@@ -724,25 +741,27 @@ max_log_files = 720
default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
[slow_query]
## Whether to enable slow query log.
enable = false
#+ enable = false
## The record type of slow queries. It can be `system_table` or `log`.
## @toml2docs:none-default
#+ record_type = "system_table"
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
#+ threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
#+ sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
@@ -753,7 +772,7 @@ write_interval = "30s"
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.

View File

@@ -1,75 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from "@actions/core";
import {obtainClient} from "@/common";
async function triggerWorkflow(workflowId: string, version: string) {
const docsClient = obtainClient("DOCS_REPO_TOKEN")
try {
await docsClient.rest.actions.createWorkflowDispatch({
owner: "GreptimeTeam",
repo: "docs",
workflow_id: workflowId,
ref: "main",
inputs: {
version,
},
});
console.log(`Successfully triggered ${workflowId} workflow with version ${version}`);
} catch (error) {
core.setFailed(`Failed to trigger workflow: ${error.message}`);
}
}
function determineWorkflow(version: string): [string, string] {
// Check if it's a nightly version
if (version.includes('nightly')) {
return ['bump-nightly-version.yml', version];
}
const parts = version.split('.');
if (parts.length !== 3) {
throw new Error('Invalid version format');
}
// If patch version (last number) is 0, it's a major version
// Return only major.minor version
if (parts[2] === '0') {
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
}
// Otherwise it's a patch version, use full version
return ['bump-patch-version.yml', version];
}
const version = process.env.VERSION;
if (!version) {
core.setFailed("VERSION environment variable is required");
process.exit(1);
}
// Remove 'v' prefix if exists
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
try {
const [workflowId, apiVersion] = determineWorkflow(cleanVersion);
triggerWorkflow(workflowId, apiVersion);
} catch (error) {
core.setFailed(`Error processing version: ${error.message}`);
process.exit(1);
}

156
cyborg/bin/bump-versions.ts Normal file
View File

@@ -0,0 +1,156 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from "@actions/core";
import {obtainClient} from "@/common";
interface RepoConfig {
tokenEnv: string;
repo: string;
workflowLogic: (version: string) => [string, string] | null;
}
const REPO_CONFIGS: Record<string, RepoConfig> = {
website: {
tokenEnv: "WEBSITE_REPO_TOKEN",
repo: "website",
workflowLogic: (version: string) => {
// Skip nightly versions for website
if (version.includes('nightly')) {
console.log('Nightly version detected for website, skipping workflow trigger.');
return null;
}
return ['bump-patch-version.yml', version];
}
},
demo: {
tokenEnv: "DEMO_REPO_TOKEN",
repo: "demo-scene",
workflowLogic: (version: string) => {
// Skip nightly versions for demo
if (version.includes('nightly')) {
console.log('Nightly version detected for demo, skipping workflow trigger.');
return null;
}
return ['bump-patch-version.yml', version];
}
},
docs: {
tokenEnv: "DOCS_REPO_TOKEN",
repo: "docs",
workflowLogic: (version: string) => {
// Check if it's a nightly version
if (version.includes('nightly')) {
return ['bump-nightly-version.yml', version];
}
const parts = version.split('.');
if (parts.length !== 3) {
throw new Error('Invalid version format');
}
// If patch version (last number) is 0, it's a major version
// Return only major.minor version
if (parts[2] === '0') {
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
}
// Otherwise it's a patch version, use full version
return ['bump-patch-version.yml', version];
}
}
};
async function triggerWorkflow(repoConfig: RepoConfig, workflowId: string, version: string) {
const client = obtainClient(repoConfig.tokenEnv);
try {
await client.rest.actions.createWorkflowDispatch({
owner: "GreptimeTeam",
repo: repoConfig.repo,
workflow_id: workflowId,
ref: "main",
inputs: {
version,
},
});
console.log(`Successfully triggered ${workflowId} workflow for ${repoConfig.repo} with version ${version}`);
} catch (error) {
core.setFailed(`Failed to trigger workflow for ${repoConfig.repo}: ${error.message}`);
throw error;
}
}
async function processRepo(repoName: string, version: string) {
const repoConfig = REPO_CONFIGS[repoName];
if (!repoConfig) {
throw new Error(`Unknown repository: ${repoName}`);
}
try {
const workflowResult = repoConfig.workflowLogic(version);
if (workflowResult === null) {
// Skip this repo (e.g., nightly version for website)
return;
}
const [workflowId, apiVersion] = workflowResult;
await triggerWorkflow(repoConfig, workflowId, apiVersion);
} catch (error) {
core.setFailed(`Error processing ${repoName} with version ${version}: ${error.message}`);
throw error;
}
}
async function main() {
const version = process.env.VERSION;
if (!version) {
core.setFailed("VERSION environment variable is required");
process.exit(1);
}
// Remove 'v' prefix if exists
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
// Get target repositories from environment variable
// Default to both if not specified
const targetRepos = process.env.TARGET_REPOS?.split(',').map(repo => repo.trim()) || ['website', 'docs'];
console.log(`Processing version ${cleanVersion} for repositories: ${targetRepos.join(', ')}`);
const errors: string[] = [];
// Process each repository
for (const repo of targetRepos) {
try {
await processRepo(repo, cleanVersion);
} catch (error) {
errors.push(`${repo}: ${error.message}`);
}
}
if (errors.length > 0) {
core.setFailed(`Failed to process some repositories: ${errors.join('; ')}`);
process.exit(1);
}
console.log('All repositories processed successfully');
}
// Execute main function
main().catch((error) => {
core.setFailed(`Unexpected error: ${error.message}`);
process.exit(1);
});

View File

@@ -1,57 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from "@actions/core";
import {obtainClient} from "@/common";
async function triggerWorkflow(workflowId: string, version: string) {
const websiteClient = obtainClient("WEBSITE_REPO_TOKEN")
try {
await websiteClient.rest.actions.createWorkflowDispatch({
owner: "GreptimeTeam",
repo: "website",
workflow_id: workflowId,
ref: "main",
inputs: {
version,
},
});
console.log(`Successfully triggered ${workflowId} workflow with version ${version}`);
} catch (error) {
core.setFailed(`Failed to trigger workflow: ${error.message}`);
}
}
const version = process.env.VERSION;
if (!version) {
core.setFailed("VERSION environment variable is required");
process.exit(1);
}
// Remove 'v' prefix if exists
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
if (cleanVersion.includes('nightly')) {
console.log('Nightly version detected, skipping workflow trigger.');
process.exit(0);
}
try {
triggerWorkflow('bump-patch-version.yml', cleanVersion);
} catch (error) {
core.setFailed(`Error processing version: ${error.message}`);
process.exit(1);
}

View File

@@ -11,6 +11,6 @@ And database will reply with something like:
Log Level changed from Some("info") to "trace,flow=debug"%
```
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follow the same rule of `RUST_LOG`.
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follows the same rule of `RUST_LOG`.
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).

View File

@@ -14,7 +14,7 @@ impl SqlQueryHandler for Instance {
```
Normally, when a SQL query arrives at GreptimeDB, the `do_query` method will be called. After some parsing work, the SQL
will be feed into `StatementExecutor`:
will be fed into `StatementExecutor`:
```rust
// in Frontend Instance:
@@ -27,7 +27,7 @@ an example.
Now, what if the statements should be handled differently for GreptimeDB Standalone and Cluster? You can see there's
a `SqlStatementExecutor` field in `StatementExecutor`. Each GreptimeDB Standalone and Cluster has its own implementation
of `SqlStatementExecutor`. If you are going to implement the statements differently in the two mode (
of `SqlStatementExecutor`. If you are going to implement the statements differently in the two modes (
like `CREATE TABLE`), you have to implement them in their own `SqlStatementExecutor`s.
Summarize as the diagram below:

View File

@@ -1,6 +1,6 @@
# Profile memory usage of GreptimeDB
This crate provides an easy approach to dump memory profiling info. A set of ready to use scripts is provided in [docs/how-to/memory-profile-scripts](docs/how-to/memory-profile-scripts).
This crate provides an easy approach to dump memory profiling info. A set of ready to use scripts is provided in [docs/how-to/memory-profile-scripts](./memory-profile-scripts/scripts).
## Prerequisites
### jemalloc

View File

@@ -1,8 +1,8 @@
Currently, our query engine is based on DataFusion, so all aggregate function is executed by DataFusion, through its UDAF interface. You can find DataFusion's UDAF example [here](https://github.com/apache/arrow-datafusion/blob/arrow2/datafusion-examples/examples/simple_udaf.rs). Basically, we provide the same way as DataFusion to write aggregate functions: both are centered in a struct called "Accumulator" to accumulates states along the way in aggregation.
However, DataFusion's UDAF implementation has a huge restriction, that it requires user to provide a concrete "Accumulator". Take `Median` aggregate function for example, to aggregate a `u32` datatype column, you have to write a `MedianU32`, and use `SELECT MEDIANU32(x)` in SQL. `MedianU32` cannot be used to aggregate a `i32` datatype column. Or, there's another way: you can use a special type that can hold all kinds of data (like our `Value` enum or Arrow's `ScalarValue`), and `match` all the way up to do aggregate calculations. It might work, though rather tedious. (But I think it's DataFusion's prefer way to write UDAF.)
However, DataFusion's UDAF implementation has a huge restriction, that it requires user to provide a concrete "Accumulator". Take `Median` aggregate function for example, to aggregate a `u32` datatype column, you have to write a `MedianU32`, and use `SELECT MEDIANU32(x)` in SQL. `MedianU32` cannot be used to aggregate a `i32` datatype column. Or, there's another way: you can use a special type that can hold all kinds of data (like our `Value` enum or Arrow's `ScalarValue`), and `match` all the way up to do aggregate calculations. It might work, though rather tedious. (But I think it's DataFusion's preferred way to write UDAF.)
So is there a way we can make an aggregate function that automatically match the input data's type? For example, a `Median` aggregator that can work on both `u32` column and `i32`? The answer is yes until we found a way to bypassing DataFusion's restriction, a restriction that DataFusion simply don't pass the input data's type when creating an Accumulator.
So is there a way we can make an aggregate function that automatically match the input data's type? For example, a `Median` aggregator that can work on both `u32` column and `i32`? The answer is yes until we find a way to bypass DataFusion's restriction, a restriction that DataFusion simply doesn't pass the input data's type when creating an Accumulator.
> There's an example in `my_sum_udaf_example.rs`, take that as quick start.
@@ -16,7 +16,7 @@ You must first define a struct that will be used to create your accumulator. For
struct MySumAccumulatorCreator {}
```
Attribute macro `#[as_aggr_func_creator]` and derive macro `#[derive(Debug, AggrFuncTypeStore)]` must both annotated on the struct. They work together to provide a storage of aggregate function's input data types, which are needed for creating generic accumulator later.
Attribute macro `#[as_aggr_func_creator]` and derive macro `#[derive(Debug, AggrFuncTypeStore)]` must both be annotated on the struct. They work together to provide a storage of aggregate function's input data types, which are needed for creating generic accumulator later.
> Note that the `as_aggr_func_creator` macro will add fields to the struct, so the struct cannot be defined as an empty struct without field like `struct Foo;`, neither as a new type like `struct Foo(bar)`.
@@ -32,11 +32,11 @@ pub trait AggregateFunctionCreator: Send + Sync + Debug {
You can use input data's type in methods that return output type and state types (just invoke `input_types()`).
The output type is aggregate function's output data's type. For example, `SUM` aggregate function's output type is `u64` for a `u32` datatype column. The state types are accumulator's internal states' types. Take `AVG` aggregate function on a `i32` column as example, it's state types are `i64` (for sum) and `u64` (for count).
The output type is aggregate function's output data's type. For example, `SUM` aggregate function's output type is `u64` for a `u32` datatype column. The state types are accumulator's internal states' types. Take `AVG` aggregate function on a `i32` column as example, its state types are `i64` (for sum) and `u64` (for count).
The `creator` function is where you define how an accumulator (that will be used in DataFusion) is created. You define "how" to create the accumulator (instead of "what" to create), using the input data's type as arguments. With input datatype known, you can create accumulator generically.
# 2. Impl `Accumulator` trait for you accumulator.
# 2. Impl `Accumulator` trait for your accumulator.
The accumulator is where you store the aggregate calculation states and evaluate a result. You must impl `Accumulator` trait for it. The trait's definition is:
@@ -49,7 +49,7 @@ pub trait Accumulator: Send + Sync + Debug {
}
```
The DataFusion basically execute aggregate like this:
The DataFusion basically executes aggregate like this:
1. Partitioning all input data for aggregate. Create an accumulator for each part.
2. Call `update_batch` on each accumulator with partitioned data, to let you update your aggregate calculation.
@@ -57,16 +57,16 @@ The DataFusion basically execute aggregate like this:
4. Call `merge_batch` to merge all accumulator's internal state to one.
5. Execute `evaluate` on the chosen one to get the final calculation result.
Once you know the meaning of each method, you can easily write your accumulator. You can refer to `Median` accumulator or `SUM` accumulator defined in file `my_sum_udaf_example.rs` for more details.
Once you know the meaning of each method, you can easily write your accumulator. You can refer to `Median` accumulator or `SUM` accumulator defined in file `my_sum_udaf_example.rs` for more details.
# 3. Register your aggregate function to our query engine.
You can call `register_aggregate_function` method in query engine to register your aggregate function. To do that, you have to new an instance of struct `AggregateFunctionMeta`. The struct has three fields, first is the name of your aggregate function's name. The function name is case-sensitive due to DataFusion's restriction. We strongly recommend using lowercase for your name. If you have to use uppercase name, wrap your aggregate function with quotation marks. For example, if you define an aggregate function named "my_aggr", you can use "`SELECT MY_AGGR(x)`"; if you define "my_AGGR", you have to use "`SELECT "my_AGGR"(x)`".
The second field is arg_counts ,the count of the arguments. Like accumulator `percentile`, calculating the p_number of the column. We need to input the value of column and the value of p to cacalate, and so the count of the arguments is two.
The second field is arg_counts ,the count of the arguments. Like accumulator `percentile`, calculating the p_number of the column. We need to input the value of column and the value of p to calculate, and so the count of the arguments is two.
The third field is a function about how to create your accumulator creator that you defined in step 1 above. Create creator, that's a bit intertwined, but it is how we make DataFusion use a newly created aggregate function each time it executes a SQL, preventing the stored input types from affecting each other. The key detail can be starting looking at our `DfContextProviderAdapter` struct's `get_aggregate_meta` method.
# (Optional) 4. Make your aggregate function automatically registered.
If you've written a great aggregate function that want to let everyone use it, you can make it automatically registered to our query engine at start time. It's quick simple, just refer to the `AggregateFunctions::register` function in `common/function/src/scalars/aggregate/mod.rs`.
If you've written a great aggregate function that wants to let everyone use it, you can make it automatically register to our query engine at start time. It's quick and simple, just refer to the `AggregateFunctions::register` function in `common/function/src/scalars/aggregate/mod.rs`.

View File

@@ -3,7 +3,7 @@
This document introduces how to write fuzz tests in GreptimeDB.
## What is a fuzz test
Fuzz test is tool that leverage deterministic random generation to assist in finding bugs. The goal of fuzz tests is to identify inputs generated by the fuzzer that cause system panics, crashes, or unexpected behaviors to occur. And we are using the [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) to run our fuzz test targets.
Fuzz test is tool that leverages deterministic random generation to assist in finding bugs. The goal of fuzz tests is to identify inputs generated by the fuzzer that cause system panics, crashes, or unexpected behaviors to occur. And we are using the [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) to run our fuzz test targets.
## Why we need them
- Find bugs by leveraging random generation

8
flake.lock generated
View File

@@ -41,16 +41,16 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1745487689,
"narHash": "sha256-FQoi3R0NjQeBAsEOo49b5tbDPcJSMWc3QhhaIi9eddw=",
"lastModified": 1748162331,
"narHash": "sha256-rqc2RKYTxP3tbjA+PB3VMRQNnjesrT0pEofXQTrMsS8=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "5630cf13cceac06cefe9fc607e8dfa8fb342dde3",
"rev": "7c43f080a7f28b2774f3b3f43234ca11661bf334",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-24.11",
"ref": "nixos-25.05",
"repo": "nixpkgs",
"type": "github"
}

View File

@@ -2,7 +2,7 @@
description = "Development environment flake";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
fenix = {
url = "github:nix-community/fenix";
inputs.nixpkgs.follows = "nixpkgs";
@@ -21,7 +21,7 @@
lib = nixpkgs.lib;
rustToolchain = fenix.packages.${system}.fromToolchainName {
name = (lib.importTOML ./rust-toolchain.toml).toolchain.channel;
sha256 = "sha256-arzEYlWLGGYeOhECHpBxQd2joZ4rPKV3qLNnZ+eql6A=";
sha256 = "sha256-tJJr8oqX3YD+ohhPK7jlt/7kvKBnBqJVjYtoFr520d4=";
};
in
{
@@ -51,6 +51,7 @@
];
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
NIX_HARDENING_ENABLE = "";
};
});
}

View File

@@ -9,7 +9,7 @@ We highly recommend using the self-monitoring feature provided by [GreptimeDB Op
- **Metrics Dashboards**
- `dashboards/metrics/cluster/dashboard.json`: The Grafana dashboard for the GreptimeDB cluster. Read the [dashboard.md](./dashboards/metrics/cluster/dashboard.md) for more details.
- `dashboards/metrics/standalone/dashboard.json`: The Grafana dashboard for the standalone GreptimeDB instance. **It's generated from the `cluster/dashboard.json` by removing the instance filter through the `make dashboards` command**. Read the [dashboard.md](./dashboards/metrics/standalone/dashboard.md) for more details.
- **Logs Dashboard**
@@ -83,7 +83,7 @@ If you use the [Helm Chart](https://github.com/GreptimeTeam/helm-charts) to depl
- `monitoring.enabled=true`: Deploys a standalone GreptimeDB instance dedicated to monitoring the cluster;
- `grafana.enabled=true`: Deploys Grafana and automatically imports the monitoring dashboard;
The standalone GreptimeDB instance will collect metrics from your cluster, and the dashboard will be available in the Grafana UI. For detailed deployment instructions, please refer to our [Kubernetes deployment guide](https://docs.greptime.com/nightly/user-guide/deployments/deploy-on-kubernetes/getting-started).
The standalone GreptimeDB instance will collect metrics from your cluster, and the dashboard will be available in the Grafana UI. For detailed deployment instructions, please refer to our [Kubernetes deployment guide](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/getting-started).
### Self-host Prometheus and import dashboards manually

File diff suppressed because it is too large Load Diff

View File

@@ -46,6 +46,7 @@
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
| Frontend Handle Bulk Insert Elapsed Time | `sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_sum[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_count[$__rate_interval]))`<br/>`histogram_quantile(0.99, sum by(instance, pod, stage, le) (rate(greptime_table_operator_handle_bulk_insert_bucket[$__rate_interval])))` | `timeseries` | Per-stage time for frontend to handle bulk insert requests | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG` |
# Mito Engine
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
@@ -59,7 +60,7 @@
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `prometheus` | `ops` | `[{{ instance }}]-[{{pod}}]` |
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
| Compaction Elapsed Time per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))`<br/>`sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{instance=~"$datanode"}[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
| Cached Bytes per Instance | `greptime_mito_cache_bytes{instance=~"$datanode"}` | `timeseries` | Cached Bytes per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
@@ -67,6 +68,10 @@
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
| Compaction Input/Output Bytes | `sum by(instance, pod) (greptime_mito_compaction_input_bytes)`<br/>`sum by(instance, pod) (greptime_mito_compaction_output_bytes)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-input` |
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
| Active Series and Field Builders Count | `sum by(instance, pod) (greptime_mito_memtable_active_series_count)`<br/>`sum by(instance, pod) (greptime_mito_memtable_field_builder_count)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]-series` |
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
# OpenDAL
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
@@ -84,9 +89,19 @@
# Metasrv
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `prometheus` | `none` | `from-datanode-{{datanode_id}}` |
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `__auto` |
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `__auto` |
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `status-history` | Counter of region migration by source and destination | `prometheus` | -- | `from-datanode-{{datanode_id}}` |
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `{{pod}}-{{state}}-{{error_type}}` |
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `binBps` | `Datanode-{{datanode_id}}-writeload` |
| Rate of SQL Executions (RDS) | `rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_count[$__rate_interval])` | `timeseries` | Displays the rate of SQL executions processed by the Meta service using the RDS backend. | `prometheus` | `none` | `{{pod}} {{op}} {{type}} {{result}} ` |
| SQL Execution Latency (RDS) | `histogram_quantile(0.90, sum by(pod, op, type, result, le) (rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_bucket[$__rate_interval])))` | `timeseries` | Measures the response time of SQL executions via the RDS backend. | `prometheus` | `ms` | `{{pod}} {{op}} {{type}} {{result}} p90` |
| Handler Execution Latency | `histogram_quantile(0.90, sum by(pod, le, name) (
rate(greptime_meta_handler_execute_bucket[$__rate_interval])
))` | `timeseries` | Shows latency of Meta handlers by pod and handler name, useful for monitoring handler performance and detecting latency spikes.<br/> | `prometheus` | `s` | `{{pod}} {{name}} p90` |
| Heartbeat Packet Size | `histogram_quantile(0.9, sum by(pod, le) (greptime_meta_heartbeat_stat_memory_size_bucket))` | `timeseries` | Shows p90 heartbeat message sizes, helping track network usage and identify anomalies in heartbeat payload.<br/> | `prometheus` | `bytes` | `{{pod}}` |
| Meta Heartbeat Receive Rate | `rate(greptime_meta_heartbeat_rate[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}` |
| Meta KV Ops Latency | `histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}-{{op}} p99` |
| Rate of meta KV Ops | `rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `{{pod}}-{{op}} p99` |
| DDL Latency | `histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `CreateLogicalTables-{{step}} p90` |
# Flownode
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |

View File

@@ -371,6 +371,21 @@ groups:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{request_type}}]'
- title: 'Frontend Handle Bulk Insert Elapsed Time '
type: timeseries
description: Per-stage time for frontend to handle bulk insert requests
unit: s
queries:
- expr: sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_sum[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_count[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
- expr: histogram_quantile(0.99, sum by(instance, pod, stage, le) (rate(greptime_table_operator_handle_bulk_insert_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
- title: Mito Engine
panels:
- title: Request OPS per Instance
@@ -472,7 +487,7 @@ groups:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{pod}}]'
- title: Compaction P99 per Instance by Stage
- title: Compaction Elapsed Time per Instance by Stage
type: timeseries
description: Compaction latency by stage
unit: s
@@ -482,6 +497,11 @@ groups:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-p99'
- expr: sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{instance=~"$datanode"}[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_count{instance=~"$datanode"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-avg'
- title: Compaction P99 per Instance
type: timeseries
description: Compaction P99 per Instance.
@@ -562,6 +582,66 @@ groups:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: Compaction Input/Output Bytes
type: timeseries
description: Compaction oinput output bytes
unit: bytes
queries:
- expr: sum by(instance, pod) (greptime_mito_compaction_input_bytes)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-input'
- expr: sum by(instance, pod) (greptime_mito_compaction_output_bytes)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-output'
- title: Region Worker Handle Bulk Insert Requests
type: timeseries
description: Per-stage elapsed time for region worker to handle bulk insert region requests.
unit: s
queries:
- expr: histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
- expr: sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
- title: Active Series and Field Builders Count
type: timeseries
description: Compaction oinput output bytes
unit: none
queries:
- expr: sum by(instance, pod) (greptime_mito_memtable_active_series_count)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-series'
- expr: sum by(instance, pod) (greptime_mito_memtable_field_builder_count)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-field_builders'
- title: Region Worker Convert Requests
type: timeseries
description: Per-stage elapsed time for region worker to decode requests.
unit: s
queries:
- expr: histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
- expr: sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
- title: OpenDAL
panels:
- title: QPS per Instance
@@ -676,9 +756,8 @@ groups:
- title: Metasrv
panels:
- title: Region migration datanode
type: state-timeline
type: status-history
description: Counter of region migration by source and destination
unit: none
queries:
- expr: greptime_meta_region_migration_stat{datanode_type="src"}
datasource:
@@ -699,17 +778,127 @@ groups:
datasource:
type: prometheus
uid: ${metrics}
legendFormat: __auto
legendFormat: '{{pod}}-{{state}}-{{error_type}}'
- title: Datanode load
type: timeseries
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
unit: none
unit: binBps
queries:
- expr: greptime_datanode_load
datasource:
type: prometheus
uid: ${metrics}
legendFormat: __auto
legendFormat: Datanode-{{datanode_id}}-writeload
- title: Rate of SQL Executions (RDS)
type: timeseries
description: Displays the rate of SQL executions processed by the Meta service using the RDS backend.
unit: none
queries:
- expr: rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_count[$__rate_interval])
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '{{pod}} {{op}} {{type}} {{result}} '
- title: SQL Execution Latency (RDS)
type: timeseries
description: 'Measures the response time of SQL executions via the RDS backend. '
unit: ms
queries:
- expr: histogram_quantile(0.90, sum by(pod, op, type, result, le) (rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '{{pod}} {{op}} {{type}} {{result}} p90'
- title: Handler Execution Latency
type: timeseries
description: |
Shows latency of Meta handlers by pod and handler name, useful for monitoring handler performance and detecting latency spikes.
unit: s
queries:
- expr: |-
histogram_quantile(0.90, sum by(pod, le, name) (
rate(greptime_meta_handler_execute_bucket[$__rate_interval])
))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '{{pod}} {{name}} p90'
- title: Heartbeat Packet Size
type: timeseries
description: |
Shows p90 heartbeat message sizes, helping track network usage and identify anomalies in heartbeat payload.
unit: bytes
queries:
- expr: histogram_quantile(0.9, sum by(pod, le) (greptime_meta_heartbeat_stat_memory_size_bucket))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '{{pod}}'
- title: Meta Heartbeat Receive Rate
type: timeseries
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
unit: s
queries:
- expr: rate(greptime_meta_heartbeat_rate[$__rate_interval])
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '{{pod}}'
- title: Meta KV Ops Latency
type: timeseries
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '{{pod}}-{{op}} p99'
- title: Rate of meta KV Ops
type: timeseries
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
unit: none
queries:
- expr: rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '{{pod}}-{{op}} p99'
- title: DDL Latency
type: timeseries
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
unit: s
queries:
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: CreateLogicalTables-{{step}} p90
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: CreateTable-{{step}} p90
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: CreateView-{{step}} p90
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: CreateFlow-{{step}} p90
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: DropTable-{{step}} p90
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: AlterTable-{{step}} p90
- title: Flownode
panels:
- title: Flow Ingest / Output Rate

File diff suppressed because it is too large Load Diff

View File

@@ -46,6 +46,7 @@
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
| Frontend Handle Bulk Insert Elapsed Time | `sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_sum[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_count[$__rate_interval]))`<br/>`histogram_quantile(0.99, sum by(instance, pod, stage, le) (rate(greptime_table_operator_handle_bulk_insert_bucket[$__rate_interval])))` | `timeseries` | Per-stage time for frontend to handle bulk insert requests | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG` |
# Mito Engine
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
@@ -59,7 +60,7 @@
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `prometheus` | `ops` | `[{{ instance }}]-[{{pod}}]` |
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
| Compaction Elapsed Time per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{}[$__rate_interval])))`<br/>`sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{}[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_count{}[$__rate_interval]))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
| Cached Bytes per Instance | `greptime_mito_cache_bytes{}` | `timeseries` | Cached Bytes per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
@@ -67,6 +68,10 @@
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
| Compaction Input/Output Bytes | `sum by(instance, pod) (greptime_mito_compaction_input_bytes)`<br/>`sum by(instance, pod) (greptime_mito_compaction_output_bytes)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-input` |
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
| Active Series and Field Builders Count | `sum by(instance, pod) (greptime_mito_memtable_active_series_count)`<br/>`sum by(instance, pod) (greptime_mito_memtable_field_builder_count)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]-series` |
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
# OpenDAL
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
@@ -84,9 +89,19 @@
# Metasrv
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `prometheus` | `none` | `from-datanode-{{datanode_id}}` |
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `__auto` |
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `__auto` |
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `status-history` | Counter of region migration by source and destination | `prometheus` | -- | `from-datanode-{{datanode_id}}` |
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `{{pod}}-{{state}}-{{error_type}}` |
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `binBps` | `Datanode-{{datanode_id}}-writeload` |
| Rate of SQL Executions (RDS) | `rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_count[$__rate_interval])` | `timeseries` | Displays the rate of SQL executions processed by the Meta service using the RDS backend. | `prometheus` | `none` | `{{pod}} {{op}} {{type}} {{result}} ` |
| SQL Execution Latency (RDS) | `histogram_quantile(0.90, sum by(pod, op, type, result, le) (rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_bucket[$__rate_interval])))` | `timeseries` | Measures the response time of SQL executions via the RDS backend. | `prometheus` | `ms` | `{{pod}} {{op}} {{type}} {{result}} p90` |
| Handler Execution Latency | `histogram_quantile(0.90, sum by(pod, le, name) (
rate(greptime_meta_handler_execute_bucket[$__rate_interval])
))` | `timeseries` | Shows latency of Meta handlers by pod and handler name, useful for monitoring handler performance and detecting latency spikes.<br/> | `prometheus` | `s` | `{{pod}} {{name}} p90` |
| Heartbeat Packet Size | `histogram_quantile(0.9, sum by(pod, le) (greptime_meta_heartbeat_stat_memory_size_bucket))` | `timeseries` | Shows p90 heartbeat message sizes, helping track network usage and identify anomalies in heartbeat payload.<br/> | `prometheus` | `bytes` | `{{pod}}` |
| Meta Heartbeat Receive Rate | `rate(greptime_meta_heartbeat_rate[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}` |
| Meta KV Ops Latency | `histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}-{{op}} p99` |
| Rate of meta KV Ops | `rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `{{pod}}-{{op}} p99` |
| DDL Latency | `histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `CreateLogicalTables-{{step}} p90` |
# Flownode
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |

View File

@@ -371,6 +371,21 @@ groups:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{request_type}}]'
- title: 'Frontend Handle Bulk Insert Elapsed Time '
type: timeseries
description: Per-stage time for frontend to handle bulk insert requests
unit: s
queries:
- expr: sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_sum[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_count[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
- expr: histogram_quantile(0.99, sum by(instance, pod, stage, le) (rate(greptime_table_operator_handle_bulk_insert_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
- title: Mito Engine
panels:
- title: Request OPS per Instance
@@ -472,7 +487,7 @@ groups:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{pod}}]'
- title: Compaction P99 per Instance by Stage
- title: Compaction Elapsed Time per Instance by Stage
type: timeseries
description: Compaction latency by stage
unit: s
@@ -482,6 +497,11 @@ groups:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-p99'
- expr: sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{}[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_count{}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-avg'
- title: Compaction P99 per Instance
type: timeseries
description: Compaction P99 per Instance.
@@ -562,6 +582,66 @@ groups:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: Compaction Input/Output Bytes
type: timeseries
description: Compaction oinput output bytes
unit: bytes
queries:
- expr: sum by(instance, pod) (greptime_mito_compaction_input_bytes)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-input'
- expr: sum by(instance, pod) (greptime_mito_compaction_output_bytes)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-output'
- title: Region Worker Handle Bulk Insert Requests
type: timeseries
description: Per-stage elapsed time for region worker to handle bulk insert region requests.
unit: s
queries:
- expr: histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
- expr: sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
- title: Active Series and Field Builders Count
type: timeseries
description: Compaction oinput output bytes
unit: none
queries:
- expr: sum by(instance, pod) (greptime_mito_memtable_active_series_count)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-series'
- expr: sum by(instance, pod) (greptime_mito_memtable_field_builder_count)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-field_builders'
- title: Region Worker Convert Requests
type: timeseries
description: Per-stage elapsed time for region worker to decode requests.
unit: s
queries:
- expr: histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
- expr: sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
- title: OpenDAL
panels:
- title: QPS per Instance
@@ -676,9 +756,8 @@ groups:
- title: Metasrv
panels:
- title: Region migration datanode
type: state-timeline
type: status-history
description: Counter of region migration by source and destination
unit: none
queries:
- expr: greptime_meta_region_migration_stat{datanode_type="src"}
datasource:
@@ -699,17 +778,127 @@ groups:
datasource:
type: prometheus
uid: ${metrics}
legendFormat: __auto
legendFormat: '{{pod}}-{{state}}-{{error_type}}'
- title: Datanode load
type: timeseries
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
unit: none
unit: binBps
queries:
- expr: greptime_datanode_load
datasource:
type: prometheus
uid: ${metrics}
legendFormat: __auto
legendFormat: Datanode-{{datanode_id}}-writeload
- title: Rate of SQL Executions (RDS)
type: timeseries
description: Displays the rate of SQL executions processed by the Meta service using the RDS backend.
unit: none
queries:
- expr: rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_count[$__rate_interval])
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '{{pod}} {{op}} {{type}} {{result}} '
- title: SQL Execution Latency (RDS)
type: timeseries
description: 'Measures the response time of SQL executions via the RDS backend. '
unit: ms
queries:
- expr: histogram_quantile(0.90, sum by(pod, op, type, result, le) (rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '{{pod}} {{op}} {{type}} {{result}} p90'
- title: Handler Execution Latency
type: timeseries
description: |
Shows latency of Meta handlers by pod and handler name, useful for monitoring handler performance and detecting latency spikes.
unit: s
queries:
- expr: |-
histogram_quantile(0.90, sum by(pod, le, name) (
rate(greptime_meta_handler_execute_bucket[$__rate_interval])
))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '{{pod}} {{name}} p90'
- title: Heartbeat Packet Size
type: timeseries
description: |
Shows p90 heartbeat message sizes, helping track network usage and identify anomalies in heartbeat payload.
unit: bytes
queries:
- expr: histogram_quantile(0.9, sum by(pod, le) (greptime_meta_heartbeat_stat_memory_size_bucket))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '{{pod}}'
- title: Meta Heartbeat Receive Rate
type: timeseries
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
unit: s
queries:
- expr: rate(greptime_meta_heartbeat_rate[$__rate_interval])
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '{{pod}}'
- title: Meta KV Ops Latency
type: timeseries
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '{{pod}}-{{op}} p99'
- title: Rate of meta KV Ops
type: timeseries
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
unit: none
queries:
- expr: rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '{{pod}}-{{op}} p99'
- title: DDL Latency
type: timeseries
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
unit: s
queries:
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: CreateLogicalTables-{{step}} p90
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: CreateTable-{{step}} p90
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: CreateView-{{step}} p90
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: CreateFlow-{{step}} p90
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: DropTable-{{step}} p90
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: AlterTable-{{step}} p90
- title: Flownode
panels:
- title: Flow Ingest / Output Rate

View File

@@ -6,7 +6,7 @@ DAC_IMAGE=ghcr.io/zyy17/dac:20250423-522bd35
remove_instance_filters() {
# Remove the instance filters for the standalone dashboards.
sed 's/instance=~\\"$datanode\\",//; s/instance=~\\"$datanode\\"//; s/instance=~\\"$frontend\\",//; s/instance=~\\"$frontend\\"//; s/instance=~\\"$metasrv\\",//; s/instance=~\\"$metasrv\\"//; s/instance=~\\"$flownode\\",//; s/instance=~\\"$flownode\\"//;' $CLUSTER_DASHBOARD_DIR/dashboard.json > $STANDALONE_DASHBOARD_DIR/dashboard.json
sed -E 's/instance=~\\"(\$datanode|\$frontend|\$metasrv|\$flownode)\\",?//g' "$CLUSTER_DASHBOARD_DIR/dashboard.json" > "$STANDALONE_DASHBOARD_DIR/dashboard.json"
}
generate_intermediate_dashboards_and_docs() {

View File

@@ -26,6 +26,13 @@ excludes = [
"src/common/base/src/secrets.rs",
"src/servers/src/repeated_field.rs",
"src/servers/src/http/test_helpers.rs",
# enterprise
"src/common/meta/src/rpc/ddl/trigger.rs",
"src/operator/src/expr_helper/trigger.rs",
"src/sql/src/statements/create/trigger.rs",
"src/sql/src/statements/show/trigger.rs",
"src/sql/src/parsers/create_parser/trigger.rs",
"src/sql/src/parsers/show_parser/trigger.rs",
]
[properties]

View File

@@ -1,2 +1,2 @@
[toolchain]
channel = "nightly-2025-04-15"
channel = "nightly-2025-05-19"

View File

@@ -22,6 +22,7 @@ use greptime_proto::v1::region::RegionResponse as RegionResponseV1;
pub struct RegionResponse {
pub affected_rows: AffectedRows,
pub extensions: HashMap<String, Vec<u8>>,
pub metadata: Vec<u8>,
}
impl RegionResponse {
@@ -29,6 +30,7 @@ impl RegionResponse {
Self {
affected_rows: region_response.affected_rows as _,
extensions: region_response.extensions,
metadata: region_response.metadata,
}
}
@@ -37,6 +39,16 @@ impl RegionResponse {
Self {
affected_rows,
extensions: Default::default(),
metadata: Vec::new(),
}
}
/// Creates one response with metadata.
pub fn from_metadata(metadata: Vec<u8>) -> Self {
Self {
affected_rows: 0,
extensions: Default::default(),
metadata,
}
}
}

View File

@@ -24,7 +24,7 @@ use greptime_proto::v1::{
};
use snafu::ResultExt;
use crate::error::{self, Result};
use crate::error::{self, ConvertColumnDefaultConstraintSnafu, Result};
use crate::helper::ColumnDataTypeWrapper;
use crate::v1::{ColumnDef, ColumnOptions, SemanticType};
@@ -77,6 +77,48 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
})
}
/// Tries to construct a `ColumnDef` from the given `ColumnSchema`.
///
/// TODO(weny): Add tests for this function.
pub fn try_as_column_def(column_schema: &ColumnSchema, is_primary_key: bool) -> Result<ColumnDef> {
let column_datatype =
ColumnDataTypeWrapper::try_from(column_schema.data_type.clone()).map(|w| w.to_parts())?;
let semantic_type = if column_schema.is_time_index() {
SemanticType::Timestamp
} else if is_primary_key {
SemanticType::Tag
} else {
SemanticType::Field
} as i32;
let comment = column_schema
.metadata()
.get(COMMENT_KEY)
.cloned()
.unwrap_or_default();
let default_constraint = match column_schema.default_constraint() {
None => vec![],
Some(v) => v
.clone()
.try_into()
.context(ConvertColumnDefaultConstraintSnafu {
column: &column_schema.name,
})?,
};
let options = options_from_column_schema(column_schema);
Ok(ColumnDef {
name: column_schema.name.clone(),
data_type: column_datatype.0 as i32,
is_nullable: column_schema.is_nullable(),
default_constraint,
semantic_type,
comment,
datatype_extension: column_datatype.1,
options,
})
}
/// Constructs a `ColumnOptions` from the given `ColumnSchema`.
pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<ColumnOptions> {
let mut options = ColumnOptions::default();
@@ -226,18 +268,20 @@ mod tests {
assert!(options.is_none());
let mut schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
.with_fulltext_options(FulltextOptions {
enable: true,
analyzer: FulltextAnalyzer::English,
case_sensitive: false,
backend: FulltextBackend::Bloom,
})
.with_fulltext_options(FulltextOptions::new_unchecked(
true,
FulltextAnalyzer::English,
false,
FulltextBackend::Bloom,
10240,
0.01,
))
.unwrap();
schema.set_inverted_index(true);
let options = options_from_column_schema(&schema).unwrap();
assert_eq!(
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\"}"
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\",\"granularity\":10240,\"false-positive-rate-in-10000\":100}"
);
assert_eq!(
options.options.get(INVERTED_INDEX_GRPC_KEY).unwrap(),
@@ -247,16 +291,18 @@ mod tests {
#[test]
fn test_options_with_fulltext() {
let fulltext = FulltextOptions {
enable: true,
analyzer: FulltextAnalyzer::English,
case_sensitive: false,
backend: FulltextBackend::Bloom,
};
let fulltext = FulltextOptions::new_unchecked(
true,
FulltextAnalyzer::English,
false,
FulltextBackend::Bloom,
10240,
0.01,
);
let options = options_from_fulltext(&fulltext).unwrap().unwrap();
assert_eq!(
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\"}"
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\",\"granularity\":10240,\"false-positive-rate-in-10000\":100}"
);
}

View File

@@ -17,8 +17,10 @@ arrow-schema.workspace = true
async-stream.workspace = true
async-trait.workspace = true
bytes.workspace = true
common-base.workspace = true
common-catalog.workspace = true
common-error.workspace = true
common-frontend.workspace = true
common-macro.workspace = true
common-meta.workspace = true
common-procedure.workspace = true
@@ -41,6 +43,8 @@ moka = { workspace = true, features = ["future", "sync"] }
partition.workspace = true
paste.workspace = true
prometheus.workspace = true
promql-parser.workspace = true
rand.workspace = true
rustc-hash.workspace = true
serde_json.workspace = true
session.workspace = true

View File

@@ -277,6 +277,26 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to invoke frontend services"))]
InvokeFrontend {
source: common_frontend::error::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Meta client is not provided"))]
MetaClientMissing {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to find frontend node: {}", addr))]
FrontendNotFound {
addr: String,
#[snafu(implicit)]
location: Location,
},
}
impl Error {
@@ -345,6 +365,10 @@ impl ErrorExt for Error {
Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => {
source.status_code()
}
Error::InvokeFrontend { source, .. } => source.status_code(),
Error::FrontendNotFound { .. } | Error::MetaClientMissing { .. } => {
StatusCode::Unexpected
}
}
}

View File

@@ -16,8 +16,8 @@ use api::v1::meta::ProcedureStatus;
use common_error::ext::BoxedError;
use common_meta::cluster::{ClusterInfo, NodeInfo};
use common_meta::datanode::RegionStat;
use common_meta::ddl::{ExecutorContext, ProcedureExecutor};
use common_meta::key::flow::flow_state::FlowStat;
use common_meta::procedure_executor::{ExecutorContext, ProcedureExecutor};
use common_meta::rpc::procedure;
use common_procedure::{ProcedureInfo, ProcedureState};
use meta_client::MetaClientRef;

View File

@@ -22,11 +22,13 @@ use common_catalog::consts::{
PG_CATALOG_NAME,
};
use common_error::ext::BoxedError;
use common_meta::cache::{LayeredCacheRegistryRef, ViewInfoCacheRef};
use common_meta::cache::{
LayeredCacheRegistryRef, TableRoute, TableRouteCacheRef, ViewInfoCacheRef,
};
use common_meta::key::catalog_name::CatalogNameKey;
use common_meta::key::flow::FlowMetadataManager;
use common_meta::key::schema_name::SchemaNameKey;
use common_meta::key::table_info::TableInfoValue;
use common_meta::key::table_info::{TableInfoManager, TableInfoValue};
use common_meta::key::table_name::TableNameKey;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef;
@@ -37,6 +39,7 @@ use moka::sync::Cache;
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
use session::context::{Channel, QueryContext};
use snafu::prelude::*;
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
use table::dist_table::DistTable;
use table::metadata::TableId;
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
@@ -51,6 +54,7 @@ use crate::error::{
};
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
use crate::kvbackend::TableCacheRef;
use crate::process_manager::ProcessManagerRef;
use crate::system_schema::pg_catalog::PGCatalogProvider;
use crate::system_schema::SystemSchemaProvider;
use crate::CatalogManager;
@@ -84,6 +88,7 @@ impl KvBackendCatalogManager {
backend: KvBackendRef,
cache_registry: LayeredCacheRegistryRef,
procedure_manager: Option<ProcedureManagerRef>,
process_manager: Option<ProcessManagerRef>,
) -> Arc<Self> {
Arc::new_cyclic(|me| Self {
information_extension,
@@ -102,12 +107,14 @@ impl KvBackendCatalogManager {
DEFAULT_CATALOG_NAME.to_string(),
me.clone(),
Arc::new(FlowMetadataManager::new(backend.clone())),
process_manager.clone(),
)),
pg_catalog_provider: Arc::new(PGCatalogProvider::new(
DEFAULT_CATALOG_NAME.to_string(),
me.clone(),
)),
backend,
process_manager,
},
cache_registry,
procedure_manager,
@@ -136,6 +143,61 @@ impl KvBackendCatalogManager {
pub fn procedure_manager(&self) -> Option<ProcedureManagerRef> {
self.procedure_manager.clone()
}
// Override logical table's partition key indices with physical table's.
async fn override_logical_table_partition_key_indices(
table_route_cache: &TableRouteCacheRef,
table_info_manager: &TableInfoManager,
table: TableRef,
) -> Result<TableRef> {
// If the table is not a metric table, return the table directly.
if table.table_info().meta.engine != METRIC_ENGINE_NAME {
return Ok(table);
}
if let Some(table_route_value) = table_route_cache
.get(table.table_info().table_id())
.await
.context(TableMetadataManagerSnafu)?
&& let TableRoute::Logical(logical_route) = &*table_route_value
&& let Some(physical_table_info_value) = table_info_manager
.get(logical_route.physical_table_id())
.await
.context(TableMetadataManagerSnafu)?
{
let mut new_table_info = (*table.table_info()).clone();
// Remap partition key indices from physical table to logical table
new_table_info.meta.partition_key_indices = physical_table_info_value
.table_info
.meta
.partition_key_indices
.iter()
.filter_map(|&physical_index| {
// Get the column name from the physical table using the physical index
physical_table_info_value
.table_info
.meta
.schema
.column_schemas
.get(physical_index)
.and_then(|physical_column| {
// Find the corresponding index in the logical table schema
new_table_info
.meta
.schema
.column_index_by_name(physical_column.name.as_str())
})
})
.collect();
let new_table = DistTable::table(Arc::new(new_table_info));
return Ok(new_table);
}
Ok(table)
}
}
#[async_trait::async_trait]
@@ -262,16 +324,28 @@ impl CatalogManager for KvBackendCatalogManager {
let table_cache: TableCacheRef = self.cache_registry.get().context(CacheNotFoundSnafu {
name: "table_cache",
})?;
if let Some(table) = table_cache
let table = table_cache
.get_by_ref(&TableName {
catalog_name: catalog_name.to_string(),
schema_name: schema_name.to_string(),
table_name: table_name.to_string(),
})
.await
.context(GetTableCacheSnafu)?
{
return Ok(Some(table));
.context(GetTableCacheSnafu)?;
if let Some(table) = table {
let table_route_cache: TableRouteCacheRef =
self.cache_registry.get().context(CacheNotFoundSnafu {
name: "table_route_cache",
})?;
return Self::override_logical_table_partition_key_indices(
&table_route_cache,
self.table_metadata_manager.table_info_manager(),
table,
)
.await
.map(Some);
}
if channel == Channel::Postgres {
@@ -284,7 +358,7 @@ impl CatalogManager for KvBackendCatalogManager {
}
}
return Ok(None);
Ok(None)
}
async fn tables_by_ids(
@@ -336,8 +410,20 @@ impl CatalogManager for KvBackendCatalogManager {
let catalog = catalog.to_string();
let schema = schema.to_string();
let semaphore = Arc::new(Semaphore::new(CONCURRENCY));
let table_route_cache: Result<TableRouteCacheRef> =
self.cache_registry.get().context(CacheNotFoundSnafu {
name: "table_route_cache",
});
common_runtime::spawn_global(async move {
let table_route_cache = match table_route_cache {
Ok(table_route_cache) => table_route_cache,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let table_id_stream = metadata_manager
.table_name_manager()
.tables(&catalog, &schema)
@@ -364,6 +450,7 @@ impl CatalogManager for KvBackendCatalogManager {
let metadata_manager = metadata_manager.clone();
let tx = tx.clone();
let semaphore = semaphore.clone();
let table_route_cache = table_route_cache.clone();
common_runtime::spawn_global(async move {
// we don't explicitly close the semaphore so just ignore the potential error.
let _ = semaphore.acquire().await;
@@ -381,6 +468,16 @@ impl CatalogManager for KvBackendCatalogManager {
};
for table in table_info_values.into_values().map(build_table) {
let table = if let Ok(table) = table {
Self::override_logical_table_partition_key_indices(
&table_route_cache,
metadata_manager.table_info_manager(),
table,
)
.await
} else {
table
};
if tx.send(table).await.is_err() {
return;
}
@@ -419,6 +516,7 @@ struct SystemCatalog {
information_schema_provider: Arc<InformationSchemaProvider>,
pg_catalog_provider: Arc<PGCatalogProvider>,
backend: KvBackendRef,
process_manager: Option<ProcessManagerRef>,
}
impl SystemCatalog {
@@ -486,6 +584,7 @@ impl SystemCatalog {
catalog.to_string(),
self.catalog_manager.clone(),
Arc::new(FlowMetadataManager::new(self.backend.clone())),
self.process_manager.clone(),
))
});
information_schema_provider.table(table_name)

View File

@@ -14,6 +14,7 @@
#![feature(assert_matches)]
#![feature(try_blocks)]
#![feature(let_chains)]
use std::any::Any;
use std::fmt::{Debug, Formatter};
@@ -40,6 +41,7 @@ pub mod information_schema {
pub use crate::system_schema::information_schema::*;
}
pub mod process_manager;
pub mod table_source;
#[async_trait::async_trait]

View File

@@ -356,6 +356,7 @@ impl MemoryCatalogManager {
catalog,
Arc::downgrade(self) as Weak<dyn CatalogManager>,
Arc::new(FlowMetadataManager::new(Arc::new(MemoryKvBackend::new()))),
None, // we don't need ProcessManager on regions server.
);
let information_schema = information_schema_provider.tables().clone();

View File

@@ -34,4 +34,20 @@ lazy_static! {
register_histogram!("greptime_catalog_kv_get", "catalog kv get").unwrap();
pub static ref METRIC_CATALOG_KV_BATCH_GET: Histogram =
register_histogram!("greptime_catalog_kv_batch_get", "catalog kv batch get").unwrap();
/// Count of running process in each catalog.
pub static ref PROCESS_LIST_COUNT: IntGaugeVec = register_int_gauge_vec!(
"greptime_process_list_count",
"Running process count per catalog",
&["catalog"]
)
.unwrap();
/// Count of killed process in each catalog.
pub static ref PROCESS_KILL_COUNT: IntCounterVec = register_int_counter_vec!(
"greptime_process_kill_count",
"Completed kill process requests count",
&["catalog"]
)
.unwrap();
}

View File

@@ -0,0 +1,639 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::fmt::{Debug, Display, Formatter};
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::{Arc, RwLock};
use std::time::{Duration, Instant, UNIX_EPOCH};
use api::v1::frontend::{KillProcessRequest, ListProcessRequest, ProcessInfo};
use common_base::cancellation::CancellationHandle;
use common_frontend::selector::{FrontendSelector, MetaClientSelector};
use common_frontend::slow_query_event::SlowQueryEvent;
use common_telemetry::{debug, error, info, warn};
use common_time::util::current_time_millis;
use meta_client::MetaClientRef;
use promql_parser::parser::EvalStmt;
use rand::random;
use session::context::QueryContextRef;
use snafu::{ensure, OptionExt, ResultExt};
use sql::statements::statement::Statement;
use tokio::sync::mpsc::Sender;
use crate::error;
use crate::metrics::{PROCESS_KILL_COUNT, PROCESS_LIST_COUNT};
pub type ProcessId = u32;
pub type ProcessManagerRef = Arc<ProcessManager>;
/// Query process manager.
pub struct ProcessManager {
/// Local frontend server address,
server_addr: String,
/// Next process id for local queries.
next_id: AtomicU32,
/// Running process per catalog.
catalogs: RwLock<HashMap<String, HashMap<ProcessId, CancellableProcess>>>,
/// Frontend selector to locate frontend nodes.
frontend_selector: Option<MetaClientSelector>,
}
/// Represents a parsed query statement, functionally equivalent to [query::parser::QueryStatement].
/// This enum is defined here to avoid cyclic dependencies with the query parser module.
#[derive(Debug, Clone)]
pub enum QueryStatement {
Sql(Statement),
Promql(EvalStmt),
}
impl Display for QueryStatement {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
QueryStatement::Sql(stmt) => write!(f, "{}", stmt),
QueryStatement::Promql(eval_stmt) => write!(f, "{}", eval_stmt),
}
}
}
impl ProcessManager {
/// Create a [ProcessManager] instance with server address and kv client.
pub fn new(server_addr: String, meta_client: Option<MetaClientRef>) -> Self {
let frontend_selector = meta_client.map(MetaClientSelector::new);
Self {
server_addr,
next_id: Default::default(),
catalogs: Default::default(),
frontend_selector,
}
}
}
impl ProcessManager {
/// Registers a submitted query. Use the provided id if present.
#[must_use]
pub fn register_query(
self: &Arc<Self>,
catalog: String,
schemas: Vec<String>,
query: String,
client: String,
query_id: Option<ProcessId>,
_slow_query_timer: Option<SlowQueryTimer>,
) -> Ticket {
let id = query_id.unwrap_or_else(|| self.next_id.fetch_add(1, Ordering::Relaxed));
let process = ProcessInfo {
id,
catalog: catalog.clone(),
schemas,
query,
start_timestamp: current_time_millis(),
client,
frontend: self.server_addr.clone(),
};
let cancellation_handle = Arc::new(CancellationHandle::default());
let cancellable_process = CancellableProcess::new(cancellation_handle.clone(), process);
self.catalogs
.write()
.unwrap()
.entry(catalog.clone())
.or_default()
.insert(id, cancellable_process);
Ticket {
catalog,
manager: self.clone(),
id,
cancellation_handle,
_slow_query_timer,
}
}
/// Generates the next process id.
pub fn next_id(&self) -> u32 {
self.next_id.fetch_add(1, Ordering::Relaxed)
}
/// De-register a query from process list.
pub fn deregister_query(&self, catalog: String, id: ProcessId) {
if let Entry::Occupied(mut o) = self.catalogs.write().unwrap().entry(catalog) {
let process = o.get_mut().remove(&id);
debug!("Deregister process: {:?}", process);
if o.get().is_empty() {
o.remove();
}
}
}
/// List local running processes in given catalog.
pub fn local_processes(&self, catalog: Option<&str>) -> error::Result<Vec<ProcessInfo>> {
let catalogs = self.catalogs.read().unwrap();
let result = if let Some(catalog) = catalog {
if let Some(catalogs) = catalogs.get(catalog) {
catalogs.values().map(|p| p.process.clone()).collect()
} else {
vec![]
}
} else {
catalogs
.values()
.flat_map(|v| v.values().map(|p| p.process.clone()))
.collect()
};
Ok(result)
}
pub async fn list_all_processes(
&self,
catalog: Option<&str>,
) -> error::Result<Vec<ProcessInfo>> {
let mut processes = vec![];
if let Some(remote_frontend_selector) = self.frontend_selector.as_ref() {
let frontends = remote_frontend_selector
.select(|node| node.peer.addr != self.server_addr)
.await
.context(error::InvokeFrontendSnafu)?;
for mut f in frontends {
let result = f
.list_process(ListProcessRequest {
catalog: catalog.unwrap_or_default().to_string(),
})
.await
.context(error::InvokeFrontendSnafu);
match result {
Ok(resp) => {
processes.extend(resp.processes);
}
Err(e) => {
warn!(e; "Skipping failing node: {:?}", f)
}
}
}
}
processes.extend(self.local_processes(catalog)?);
Ok(processes)
}
/// Kills query with provided catalog and id.
pub async fn kill_process(
&self,
server_addr: String,
catalog: String,
id: ProcessId,
) -> error::Result<bool> {
if server_addr == self.server_addr {
self.kill_local_process(catalog, id).await
} else {
let mut nodes = self
.frontend_selector
.as_ref()
.context(error::MetaClientMissingSnafu)?
.select(|node| node.peer.addr == server_addr)
.await
.context(error::InvokeFrontendSnafu)?;
ensure!(
!nodes.is_empty(),
error::FrontendNotFoundSnafu { addr: server_addr }
);
let request = KillProcessRequest {
server_addr,
catalog,
process_id: id,
};
nodes[0]
.kill_process(request)
.await
.context(error::InvokeFrontendSnafu)?;
Ok(true)
}
}
/// Kills local query with provided catalog and id.
pub async fn kill_local_process(&self, catalog: String, id: ProcessId) -> error::Result<bool> {
if let Some(catalogs) = self.catalogs.write().unwrap().get_mut(&catalog) {
if let Some(process) = catalogs.remove(&id) {
process.handle.cancel();
info!(
"Killed process, catalog: {}, id: {:?}",
process.process.catalog, process.process.id
);
PROCESS_KILL_COUNT.with_label_values(&[&catalog]).inc();
Ok(true)
} else {
debug!("Failed to kill process, id not found: {}", id);
Ok(false)
}
} else {
debug!("Failed to kill process, catalog not found: {}", catalog);
Ok(false)
}
}
}
pub struct Ticket {
pub(crate) catalog: String,
pub(crate) manager: ProcessManagerRef,
pub(crate) id: ProcessId,
pub cancellation_handle: Arc<CancellationHandle>,
_slow_query_timer: Option<SlowQueryTimer>,
}
impl Drop for Ticket {
fn drop(&mut self) {
self.manager
.deregister_query(std::mem::take(&mut self.catalog), self.id);
}
}
struct CancellableProcess {
handle: Arc<CancellationHandle>,
process: ProcessInfo,
}
impl Drop for CancellableProcess {
fn drop(&mut self) {
PROCESS_LIST_COUNT
.with_label_values(&[&self.process.catalog])
.dec();
}
}
impl CancellableProcess {
fn new(handle: Arc<CancellationHandle>, process: ProcessInfo) -> Self {
PROCESS_LIST_COUNT
.with_label_values(&[&process.catalog])
.inc();
Self { handle, process }
}
}
impl Debug for CancellableProcess {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("CancellableProcess")
.field("cancelled", &self.handle.is_cancelled())
.field("process", &self.process)
.finish()
}
}
/// SlowQueryTimer is used to log slow query when it's dropped.
/// In drop(), it will check if the query is slow and send the slow query event to the handler.
pub struct SlowQueryTimer {
start: Instant,
stmt: QueryStatement,
query_ctx: QueryContextRef,
threshold: Option<Duration>,
sample_ratio: Option<f64>,
tx: Sender<SlowQueryEvent>,
}
impl SlowQueryTimer {
pub fn new(
stmt: QueryStatement,
query_ctx: QueryContextRef,
threshold: Option<Duration>,
sample_ratio: Option<f64>,
tx: Sender<SlowQueryEvent>,
) -> Self {
Self {
start: Instant::now(),
stmt,
query_ctx,
threshold,
sample_ratio,
tx,
}
}
}
impl SlowQueryTimer {
fn send_slow_query_event(&self, elapsed: Duration, threshold: Duration) {
let mut slow_query_event = SlowQueryEvent {
cost: elapsed.as_millis() as u64,
threshold: threshold.as_millis() as u64,
query: "".to_string(),
query_ctx: self.query_ctx.clone(),
// The following fields are only used for PromQL queries.
is_promql: false,
promql_range: None,
promql_step: None,
promql_start: None,
promql_end: None,
};
match &self.stmt {
QueryStatement::Promql(stmt) => {
slow_query_event.is_promql = true;
slow_query_event.query = stmt.expr.to_string();
slow_query_event.promql_step = Some(stmt.interval.as_millis() as u64);
let start = stmt
.start
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as i64;
let end = stmt
.end
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as i64;
slow_query_event.promql_range = Some((end - start) as u64);
slow_query_event.promql_start = Some(start);
slow_query_event.promql_end = Some(end);
}
QueryStatement::Sql(stmt) => {
slow_query_event.query = stmt.to_string();
}
}
// Send SlowQueryEvent to the handler.
if let Err(e) = self.tx.try_send(slow_query_event) {
error!(e; "Failed to send slow query event");
}
}
}
impl Drop for SlowQueryTimer {
fn drop(&mut self) {
if let Some(threshold) = self.threshold {
// Calculate the elaspsed duration since the timer is created.
let elapsed = self.start.elapsed();
if elapsed > threshold {
if let Some(ratio) = self.sample_ratio {
// Only capture a portion of slow queries based on sample_ratio.
// Generate a random number in [0, 1) and compare it with sample_ratio.
if ratio >= 1.0 || random::<f64>() <= ratio {
self.send_slow_query_event(elapsed, threshold);
}
} else {
// Captures all slow queries if sample_ratio is not set.
self.send_slow_query_event(elapsed, threshold);
}
}
}
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use crate::process_manager::ProcessManager;
#[tokio::test]
async fn test_register_query() {
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
let ticket = process_manager.clone().register_query(
"public".to_string(),
vec!["test".to_string()],
"SELECT * FROM table".to_string(),
"".to_string(),
None,
None,
);
let running_processes = process_manager.local_processes(None).unwrap();
assert_eq!(running_processes.len(), 1);
assert_eq!(&running_processes[0].frontend, "127.0.0.1:8000");
assert_eq!(running_processes[0].id, ticket.id);
assert_eq!(&running_processes[0].query, "SELECT * FROM table");
drop(ticket);
assert_eq!(process_manager.local_processes(None).unwrap().len(), 0);
}
#[tokio::test]
async fn test_register_query_with_custom_id() {
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
let custom_id = 12345;
let ticket = process_manager.clone().register_query(
"public".to_string(),
vec!["test".to_string()],
"SELECT * FROM table".to_string(),
"client1".to_string(),
Some(custom_id),
None,
);
assert_eq!(ticket.id, custom_id);
let running_processes = process_manager.local_processes(None).unwrap();
assert_eq!(running_processes.len(), 1);
assert_eq!(running_processes[0].id, custom_id);
assert_eq!(&running_processes[0].client, "client1");
}
#[tokio::test]
async fn test_multiple_queries_same_catalog() {
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
let ticket1 = process_manager.clone().register_query(
"public".to_string(),
vec!["schema1".to_string()],
"SELECT * FROM table1".to_string(),
"client1".to_string(),
None,
None,
);
let ticket2 = process_manager.clone().register_query(
"public".to_string(),
vec!["schema2".to_string()],
"SELECT * FROM table2".to_string(),
"client2".to_string(),
None,
None,
);
let running_processes = process_manager.local_processes(Some("public")).unwrap();
assert_eq!(running_processes.len(), 2);
// Verify both processes are present
let ids: Vec<u32> = running_processes.iter().map(|p| p.id).collect();
assert!(ids.contains(&ticket1.id));
assert!(ids.contains(&ticket2.id));
}
#[tokio::test]
async fn test_multiple_catalogs() {
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
let _ticket1 = process_manager.clone().register_query(
"catalog1".to_string(),
vec!["schema1".to_string()],
"SELECT * FROM table1".to_string(),
"client1".to_string(),
None,
None,
);
let _ticket2 = process_manager.clone().register_query(
"catalog2".to_string(),
vec!["schema2".to_string()],
"SELECT * FROM table2".to_string(),
"client2".to_string(),
None,
None,
);
// Test listing processes for specific catalog
let catalog1_processes = process_manager.local_processes(Some("catalog1")).unwrap();
assert_eq!(catalog1_processes.len(), 1);
assert_eq!(&catalog1_processes[0].catalog, "catalog1");
let catalog2_processes = process_manager.local_processes(Some("catalog2")).unwrap();
assert_eq!(catalog2_processes.len(), 1);
assert_eq!(&catalog2_processes[0].catalog, "catalog2");
// Test listing all processes
let all_processes = process_manager.local_processes(None).unwrap();
assert_eq!(all_processes.len(), 2);
}
#[tokio::test]
async fn test_deregister_query() {
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
let ticket = process_manager.clone().register_query(
"public".to_string(),
vec!["test".to_string()],
"SELECT * FROM table".to_string(),
"client1".to_string(),
None,
None,
);
assert_eq!(process_manager.local_processes(None).unwrap().len(), 1);
process_manager.deregister_query("public".to_string(), ticket.id);
assert_eq!(process_manager.local_processes(None).unwrap().len(), 0);
}
#[tokio::test]
async fn test_cancellation_handle() {
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
let ticket = process_manager.clone().register_query(
"public".to_string(),
vec!["test".to_string()],
"SELECT * FROM table".to_string(),
"client1".to_string(),
None,
None,
);
assert!(!ticket.cancellation_handle.is_cancelled());
ticket.cancellation_handle.cancel();
assert!(ticket.cancellation_handle.is_cancelled());
}
#[tokio::test]
async fn test_kill_local_process() {
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
let ticket = process_manager.clone().register_query(
"public".to_string(),
vec!["test".to_string()],
"SELECT * FROM table".to_string(),
"client1".to_string(),
None,
None,
);
assert!(!ticket.cancellation_handle.is_cancelled());
let killed = process_manager
.kill_process(
"127.0.0.1:8000".to_string(),
"public".to_string(),
ticket.id,
)
.await
.unwrap();
assert!(killed);
assert_eq!(process_manager.local_processes(None).unwrap().len(), 0);
}
#[tokio::test]
async fn test_kill_nonexistent_process() {
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
let killed = process_manager
.kill_process("127.0.0.1:8000".to_string(), "public".to_string(), 999)
.await
.unwrap();
assert!(!killed);
}
#[tokio::test]
async fn test_kill_process_nonexistent_catalog() {
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
let killed = process_manager
.kill_process("127.0.0.1:8000".to_string(), "nonexistent".to_string(), 1)
.await
.unwrap();
assert!(!killed);
}
#[tokio::test]
async fn test_process_info_fields() {
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
let _ticket = process_manager.clone().register_query(
"test_catalog".to_string(),
vec!["schema1".to_string(), "schema2".to_string()],
"SELECT COUNT(*) FROM users WHERE age > 18".to_string(),
"test_client".to_string(),
Some(42),
None,
);
let processes = process_manager.local_processes(None).unwrap();
assert_eq!(processes.len(), 1);
let process = &processes[0];
assert_eq!(process.id, 42);
assert_eq!(&process.catalog, "test_catalog");
assert_eq!(process.schemas, vec!["schema1", "schema2"]);
assert_eq!(&process.query, "SELECT COUNT(*) FROM users WHERE age > 18");
assert_eq!(&process.client, "test_client");
assert_eq!(&process.frontend, "127.0.0.1:8000");
assert!(process.start_timestamp > 0);
}
#[tokio::test]
async fn test_ticket_drop_deregisters_process() {
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
{
let _ticket = process_manager.clone().register_query(
"public".to_string(),
vec!["test".to_string()],
"SELECT * FROM table".to_string(),
"client1".to_string(),
None,
None,
);
// Process should be registered
assert_eq!(process_manager.local_processes(None).unwrap().len(), 1);
} // ticket goes out of scope here
// Process should be automatically deregistered
assert_eq!(process_manager.local_processes(None).unwrap().len(), 0);
}
}

View File

@@ -19,6 +19,7 @@ mod information_memory_table;
pub mod key_column_usage;
mod partitions;
mod procedure_info;
mod process_list;
pub mod region_peers;
mod region_statistics;
mod runtime_metrics;
@@ -42,6 +43,7 @@ use common_recordbatch::SendableRecordBatchStream;
use datatypes::schema::SchemaRef;
use lazy_static::lazy_static;
use paste::paste;
use process_list::InformationSchemaProcessList;
use store_api::storage::{ScanRequest, TableId};
use table::metadata::TableType;
use table::TableRef;
@@ -50,6 +52,7 @@ use views::InformationSchemaViews;
use self::columns::InformationSchemaColumns;
use crate::error::{Error, Result};
use crate::process_manager::ProcessManagerRef;
use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo;
use crate::system_schema::information_schema::flows::InformationSchemaFlows;
use crate::system_schema::information_schema::information_memory_table::get_schema_columns;
@@ -113,6 +116,7 @@ macro_rules! setup_memory_table {
pub struct InformationSchemaProvider {
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
process_manager: Option<ProcessManagerRef>,
flow_metadata_manager: Arc<FlowMetadataManager>,
tables: HashMap<String, TableRef>,
}
@@ -207,6 +211,10 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
self.catalog_manager.clone(),
),
) as _),
PROCESS_LIST => self
.process_manager
.as_ref()
.map(|p| Arc::new(InformationSchemaProcessList::new(p.clone())) as _),
_ => None,
}
}
@@ -217,11 +225,13 @@ impl InformationSchemaProvider {
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
flow_metadata_manager: Arc<FlowMetadataManager>,
process_manager: Option<ProcessManagerRef>,
) -> Self {
let mut provider = Self {
catalog_name,
catalog_manager,
flow_metadata_manager,
process_manager,
tables: HashMap::new(),
};
@@ -277,6 +287,9 @@ impl InformationSchemaProvider {
self.build_table(TABLE_CONSTRAINTS).unwrap(),
);
tables.insert(FLOWS.to_string(), self.build_table(FLOWS).unwrap());
if let Some(process_list) = self.build_table(PROCESS_LIST) {
tables.insert(PROCESS_LIST.to_string(), process_list);
}
// Add memory tables
for name in MEMORY_TABLES.iter() {
tables.insert((*name).to_string(), self.build_table(name).expect(name));

View File

@@ -0,0 +1,189 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use common_catalog::consts::INFORMATION_SCHEMA_PROCESS_LIST_TABLE_ID;
use common_error::ext::BoxedError;
use common_frontend::DisplayProcessId;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use common_time::util::current_time_millis;
use common_time::{Duration, Timestamp};
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datatypes::prelude::ConcreteDataType as CDT;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::value::Value;
use datatypes::vectors::{
DurationMillisecondVectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
VectorRef,
};
use snafu::ResultExt;
use store_api::storage::{ScanRequest, TableId};
use crate::error::{self, InternalSnafu};
use crate::information_schema::Predicates;
use crate::process_manager::ProcessManagerRef;
use crate::system_schema::information_schema::InformationTable;
/// Column names of `information_schema.process_list`
const ID: &str = "id";
const CATALOG: &str = "catalog";
const SCHEMAS: &str = "schemas";
const QUERY: &str = "query";
const CLIENT: &str = "client";
const FRONTEND: &str = "frontend";
const START_TIMESTAMP: &str = "start_timestamp";
const ELAPSED_TIME: &str = "elapsed_time";
/// `information_schema.process_list` table implementation that tracks running
/// queries in current cluster.
pub struct InformationSchemaProcessList {
schema: SchemaRef,
process_manager: ProcessManagerRef,
}
impl InformationSchemaProcessList {
pub fn new(process_manager: ProcessManagerRef) -> Self {
Self {
schema: Self::schema(),
process_manager,
}
}
fn schema() -> SchemaRef {
Arc::new(Schema::new(vec![
ColumnSchema::new(ID, CDT::string_datatype(), false),
ColumnSchema::new(CATALOG, CDT::string_datatype(), false),
ColumnSchema::new(SCHEMAS, CDT::string_datatype(), false),
ColumnSchema::new(QUERY, CDT::string_datatype(), false),
ColumnSchema::new(CLIENT, CDT::string_datatype(), false),
ColumnSchema::new(FRONTEND, CDT::string_datatype(), false),
ColumnSchema::new(
START_TIMESTAMP,
CDT::timestamp_millisecond_datatype(),
false,
),
ColumnSchema::new(ELAPSED_TIME, CDT::duration_millisecond_datatype(), false),
]))
}
}
impl InformationTable for InformationSchemaProcessList {
fn table_id(&self) -> TableId {
INFORMATION_SCHEMA_PROCESS_LIST_TABLE_ID
}
fn table_name(&self) -> &'static str {
"process_list"
}
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
fn to_stream(&self, request: ScanRequest) -> error::Result<SendableRecordBatchStream> {
let process_manager = self.process_manager.clone();
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
self.schema.arrow_schema().clone(),
futures::stream::once(async move {
make_process_list(process_manager, request)
.await
.map(RecordBatch::into_df_record_batch)
.map_err(|e| datafusion::error::DataFusionError::External(Box::new(e)))
}),
));
Ok(Box::pin(
RecordBatchStreamAdapter::try_new(stream)
.map_err(BoxedError::new)
.context(InternalSnafu)?,
))
}
}
/// Build running process list.
async fn make_process_list(
process_manager: ProcessManagerRef,
request: ScanRequest,
) -> error::Result<RecordBatch> {
let predicates = Predicates::from_scan_request(&Some(request));
let current_time = current_time_millis();
// todo(hl): find a way to extract user catalog to filter queries from other users.
let queries = process_manager.list_all_processes(None).await?;
let mut id_builder = StringVectorBuilder::with_capacity(queries.len());
let mut catalog_builder = StringVectorBuilder::with_capacity(queries.len());
let mut schemas_builder = StringVectorBuilder::with_capacity(queries.len());
let mut query_builder = StringVectorBuilder::with_capacity(queries.len());
let mut client_builder = StringVectorBuilder::with_capacity(queries.len());
let mut frontend_builder = StringVectorBuilder::with_capacity(queries.len());
let mut start_time_builder = TimestampMillisecondVectorBuilder::with_capacity(queries.len());
let mut elapsed_time_builder = DurationMillisecondVectorBuilder::with_capacity(queries.len());
for process in queries {
let display_id = DisplayProcessId {
server_addr: process.frontend.to_string(),
id: process.id,
}
.to_string();
let schemas = process.schemas.join(",");
let id = Value::from(display_id);
let catalog = Value::from(process.catalog);
let schemas = Value::from(schemas);
let query = Value::from(process.query);
let client = Value::from(process.client);
let frontend = Value::from(process.frontend);
let start_timestamp = Value::from(Timestamp::new_millisecond(process.start_timestamp));
let elapsed_time = Value::from(Duration::new_millisecond(
current_time - process.start_timestamp,
));
let row = [
(ID, &id),
(CATALOG, &catalog),
(SCHEMAS, &schemas),
(QUERY, &query),
(CLIENT, &client),
(FRONTEND, &frontend),
(START_TIMESTAMP, &start_timestamp),
(ELAPSED_TIME, &elapsed_time),
];
if predicates.eval(&row) {
id_builder.push(id.as_string().as_deref());
catalog_builder.push(catalog.as_string().as_deref());
schemas_builder.push(schemas.as_string().as_deref());
query_builder.push(query.as_string().as_deref());
client_builder.push(client.as_string().as_deref());
frontend_builder.push(frontend.as_string().as_deref());
start_time_builder.push(start_timestamp.as_timestamp().map(|t| t.value().into()));
elapsed_time_builder.push(elapsed_time.as_duration().map(|d| d.value().into()));
}
}
RecordBatch::new(
InformationSchemaProcessList::schema(),
vec![
Arc::new(id_builder.finish()) as VectorRef,
Arc::new(catalog_builder.finish()) as VectorRef,
Arc::new(schemas_builder.finish()) as VectorRef,
Arc::new(query_builder.finish()) as VectorRef,
Arc::new(client_builder.finish()) as VectorRef,
Arc::new(frontend_builder.finish()) as VectorRef,
Arc::new(start_time_builder.finish()) as VectorRef,
Arc::new(elapsed_time_builder.finish()) as VectorRef,
],
)
.context(error::CreateRecordBatchSnafu)
}

View File

@@ -47,3 +47,4 @@ pub const VIEWS: &str = "views";
pub const FLOWS: &str = "flows";
pub const PROCEDURE_INFO: &str = "procedure_info";
pub const REGION_STATISTICS: &str = "region_statistics";
pub const PROCESS_LIST: &str = "process_list";

View File

@@ -328,6 +328,7 @@ mod tests {
backend.clone(),
layered_cache_registry,
None,
None,
);
let table_metadata_manager = TableMetadataManager::new(backend);
let mut view_info = common_meta::key::test_utils::new_test_table_info(1024, vec![]);

View File

@@ -5,13 +5,18 @@ edition.workspace = true
license.workspace = true
[features]
pg_kvbackend = ["common-meta/pg_kvbackend"]
mysql_kvbackend = ["common-meta/mysql_kvbackend"]
default = [
"pg_kvbackend",
"mysql_kvbackend",
]
pg_kvbackend = ["common-meta/pg_kvbackend", "meta-srv/pg_kvbackend"]
mysql_kvbackend = ["common-meta/mysql_kvbackend", "meta-srv/mysql_kvbackend"]
[lints]
workspace = true
[dependencies]
async-stream.workspace = true
async-trait.workspace = true
auth.workspace = true
base64.workspace = true
@@ -43,11 +48,10 @@ etcd-client.workspace = true
futures.workspace = true
humantime.workspace = true
meta-client.workspace = true
meta-srv.workspace = true
nu-ansi-term = "0.46"
opendal = { version = "0.51.1", features = [
"services-fs",
"services-s3",
] }
object-store.workspace = true
operator.workspace = true
query.workspace = true
rand.workspace = true
reqwest.workspace = true
@@ -63,6 +67,7 @@ tokio.workspace = true
tracing-appender.workspace = true
[dev-dependencies]
common-meta = { workspace = true, features = ["testing"] }
common-version.workspace = true
serde.workspace = true
tempfile.workspace = true

View File

@@ -58,6 +58,7 @@ where
info!("{desc}, average operation cost: {cost:.2} ms");
}
/// Command to benchmark table metadata operations.
#[derive(Debug, Default, Parser)]
pub struct BenchTableMetadataCommand {
#[clap(long)]
@@ -159,6 +160,7 @@ fn create_table_info(table_id: TableId, table_name: TableName) -> RawTableInfo {
options: Default::default(),
region_numbers: (1..=100).collect(),
partition_key_indices: vec![],
column_ids: vec![],
};
RawTableInfo {

39
src/cli/src/data.rs Normal file
View File

@@ -0,0 +1,39 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod export;
mod import;
use clap::Subcommand;
use common_error::ext::BoxedError;
use crate::data::export::ExportCommand;
use crate::data::import::ImportCommand;
use crate::Tool;
/// Command for data operations including exporting data from and importing data into GreptimeDB.
#[derive(Subcommand)]
pub enum DataCommand {
Export(ExportCommand),
Import(ImportCommand),
}
impl DataCommand {
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
match self {
DataCommand::Export(cmd) => cmd.build().await,
DataCommand::Import(cmd) => cmd.build().await,
}
}
}

View File

@@ -19,10 +19,12 @@ use std::time::Duration;
use async_trait::async_trait;
use clap::{Parser, ValueEnum};
use common_base::secrets::{ExposeSecret, SecretString};
use common_error::ext::BoxedError;
use common_telemetry::{debug, error, info};
use opendal::layers::LoggingLayer;
use opendal::{services, Operator};
use object_store::layers::LoggingLayer;
use object_store::services::Oss;
use object_store::{services, ObjectStore};
use serde_json::Value;
use snafu::{OptionExt, ResultExt};
use tokio::sync::Semaphore;
@@ -48,6 +50,7 @@ enum ExportTarget {
All,
}
/// Command for exporting data from the GreptimeDB.
#[derive(Debug, Default, Parser)]
pub struct ExportCommand {
/// Server address to connect
@@ -110,11 +113,26 @@ pub struct ExportCommand {
#[clap(long)]
s3: bool,
/// if both `ddl_local_dir` and remote storage (s3/oss) are set, `ddl_local_dir` will be only used for
/// exported SQL files, and the data will be exported to remote storage.
///
/// Note that `ddl_local_dir` export sql files to **LOCAL** file system, this is useful if export client don't have
/// direct access to remote storage.
///
/// if remote storage is set but `ddl_local_dir` is not set, both SQL&data will be exported to remote storage.
#[clap(long)]
ddl_local_dir: Option<String>,
/// The s3 bucket name
/// if s3 is set, this is required
#[clap(long)]
s3_bucket: Option<String>,
// The s3 root path
/// if s3 is set, this is required
#[clap(long)]
s3_root: Option<String>,
/// The s3 endpoint
/// if s3 is set, this is required
#[clap(long)]
@@ -134,6 +152,30 @@ pub struct ExportCommand {
/// if s3 is set, this is required
#[clap(long)]
s3_region: Option<String>,
/// if export data to oss
#[clap(long)]
oss: bool,
/// The oss bucket name
/// if oss is set, this is required
#[clap(long)]
oss_bucket: Option<String>,
/// The oss endpoint
/// if oss is set, this is required
#[clap(long)]
oss_endpoint: Option<String>,
/// The oss access key id
/// if oss is set, this is required
#[clap(long)]
oss_access_key_id: Option<String>,
/// The oss access key secret
/// if oss is set, this is required
#[clap(long)]
oss_access_key_secret: Option<String>,
}
impl ExportCommand {
@@ -147,7 +189,7 @@ impl ExportCommand {
{
return Err(BoxedError::new(S3ConfigNotSetSnafu {}.build()));
}
if !self.s3 && self.output_dir.is_none() {
if !self.s3 && !self.oss && self.output_dir.is_none() {
return Err(BoxedError::new(OutputDirNotSetSnafu {}.build()));
}
let (catalog, schema) =
@@ -172,11 +214,32 @@ impl ExportCommand {
start_time: self.start_time.clone(),
end_time: self.end_time.clone(),
s3: self.s3,
ddl_local_dir: self.ddl_local_dir.clone(),
s3_bucket: self.s3_bucket.clone(),
s3_root: self.s3_root.clone(),
s3_endpoint: self.s3_endpoint.clone(),
s3_access_key: self.s3_access_key.clone(),
s3_secret_key: self.s3_secret_key.clone(),
// Wrap sensitive values in SecretString
s3_access_key: self
.s3_access_key
.as_ref()
.map(|k| SecretString::from(k.clone())),
s3_secret_key: self
.s3_secret_key
.as_ref()
.map(|k| SecretString::from(k.clone())),
s3_region: self.s3_region.clone(),
oss: self.oss,
oss_bucket: self.oss_bucket.clone(),
oss_endpoint: self.oss_endpoint.clone(),
// Wrap sensitive values in SecretString
oss_access_key_id: self
.oss_access_key_id
.as_ref()
.map(|k| SecretString::from(k.clone())),
oss_access_key_secret: self
.oss_access_key_secret
.as_ref()
.map(|k| SecretString::from(k.clone())),
}))
}
}
@@ -192,21 +255,30 @@ pub struct Export {
start_time: Option<String>,
end_time: Option<String>,
s3: bool,
ddl_local_dir: Option<String>,
s3_bucket: Option<String>,
s3_root: Option<String>,
s3_endpoint: Option<String>,
s3_access_key: Option<String>,
s3_secret_key: Option<String>,
// Changed to SecretString for sensitive data
s3_access_key: Option<SecretString>,
s3_secret_key: Option<SecretString>,
s3_region: Option<String>,
oss: bool,
oss_bucket: Option<String>,
oss_endpoint: Option<String>,
// Changed to SecretString for sensitive data
oss_access_key_id: Option<SecretString>,
oss_access_key_secret: Option<SecretString>,
}
impl Export {
fn catalog_path(&self) -> PathBuf {
if self.s3 {
if self.s3 || self.oss {
PathBuf::from(&self.catalog)
} else if let Some(dir) = &self.output_dir {
PathBuf::from(dir).join(&self.catalog)
} else {
unreachable!("catalog_path: output_dir must be set when not using s3")
unreachable!("catalog_path: output_dir must be set when not using remote storage")
}
}
@@ -364,7 +436,7 @@ impl Export {
let timer = Instant::now();
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
let operator = self.build_operator().await?;
let operator = self.build_prefer_fs_operator().await?;
for schema in db_names {
let create_database = self
@@ -394,7 +466,7 @@ impl Export {
let semaphore = Arc::new(Semaphore::new(self.parallelism));
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
let operator = Arc::new(self.build_operator().await?);
let operator = Arc::new(self.build_prefer_fs_operator().await?);
let mut tasks = Vec::with_capacity(db_names.len());
for schema in db_names {
@@ -408,7 +480,7 @@ impl Export {
.await?;
// Create directory if needed for file system storage
if !export_self.s3 {
if !export_self.s3 && !export_self.oss {
let db_dir = format!("{}/{}/", export_self.catalog, schema);
operator.create_dir(&db_dir).await.context(OpenDalSnafu)?;
}
@@ -451,21 +523,45 @@ impl Export {
Ok(())
}
async fn build_operator(&self) -> Result<Operator> {
async fn build_operator(&self) -> Result<ObjectStore> {
if self.s3 {
self.build_s3_operator().await
} else if self.oss {
self.build_oss_operator().await
} else {
self.build_fs_operator().await
}
}
async fn build_s3_operator(&self) -> Result<Operator> {
let mut builder = services::S3::default().root("").bucket(
/// build operator with preference for file system
async fn build_prefer_fs_operator(&self) -> Result<ObjectStore> {
if (self.s3 || self.oss) && self.ddl_local_dir.is_some() {
let root = self.ddl_local_dir.as_ref().unwrap().clone();
let op = ObjectStore::new(services::Fs::default().root(&root))
.context(OpenDalSnafu)?
.layer(LoggingLayer::default())
.finish();
Ok(op)
} else if self.s3 {
self.build_s3_operator().await
} else if self.oss {
self.build_oss_operator().await
} else {
self.build_fs_operator().await
}
}
async fn build_s3_operator(&self) -> Result<ObjectStore> {
let mut builder = services::S3::default().bucket(
self.s3_bucket
.as_ref()
.expect("s3_bucket must be provided when s3 is enabled"),
);
if let Some(root) = self.s3_root.as_ref() {
builder = builder.root(root);
}
if let Some(endpoint) = self.s3_endpoint.as_ref() {
builder = builder.endpoint(endpoint);
}
@@ -475,27 +571,51 @@ impl Export {
}
if let Some(key_id) = self.s3_access_key.as_ref() {
builder = builder.access_key_id(key_id);
builder = builder.access_key_id(key_id.expose_secret());
}
if let Some(secret_key) = self.s3_secret_key.as_ref() {
builder = builder.secret_access_key(secret_key);
builder = builder.secret_access_key(secret_key.expose_secret());
}
let op = Operator::new(builder)
let op = ObjectStore::new(builder)
.context(OpenDalSnafu)?
.layer(LoggingLayer::default())
.finish();
Ok(op)
}
async fn build_fs_operator(&self) -> Result<Operator> {
async fn build_oss_operator(&self) -> Result<ObjectStore> {
let mut builder = Oss::default()
.bucket(self.oss_bucket.as_ref().expect("oss_bucket must be set"))
.endpoint(
self.oss_endpoint
.as_ref()
.expect("oss_endpoint must be set"),
);
// Use expose_secret() to access the actual secret value
if let Some(key_id) = self.oss_access_key_id.as_ref() {
builder = builder.access_key_id(key_id.expose_secret());
}
if let Some(secret_key) = self.oss_access_key_secret.as_ref() {
builder = builder.access_key_secret(secret_key.expose_secret());
}
let op = ObjectStore::new(builder)
.context(OpenDalSnafu)?
.layer(LoggingLayer::default())
.finish();
Ok(op)
}
async fn build_fs_operator(&self) -> Result<ObjectStore> {
let root = self
.output_dir
.as_ref()
.context(OutputDirNotSetSnafu)?
.clone();
let op = Operator::new(services::Fs::default().root(&root))
let op = ObjectStore::new(services::Fs::default().root(&root))
.context(OpenDalSnafu)?
.layer(LoggingLayer::default())
.finish();
@@ -509,6 +629,7 @@ impl Export {
let db_count = db_names.len();
let mut tasks = Vec::with_capacity(db_count);
let operator = Arc::new(self.build_operator().await?);
let fs_first_operator = Arc::new(self.build_prefer_fs_operator().await?);
let with_options = build_with_options(&self.start_time, &self.end_time);
for schema in db_names {
@@ -516,12 +637,13 @@ impl Export {
let export_self = self.clone();
let with_options_clone = with_options.clone();
let operator = operator.clone();
let fs_first_operator = fs_first_operator.clone();
tasks.push(async move {
let _permit = semaphore_moved.acquire().await.unwrap();
// Create directory if not using S3
if !export_self.s3 {
// Create directory if not using remote storage
if !export_self.s3 && !export_self.oss {
let db_dir = format!("{}/{}/", export_self.catalog, schema);
operator.create_dir(&db_dir).await.context(OpenDalSnafu)?;
}
@@ -533,7 +655,11 @@ impl Export {
r#"COPY DATABASE "{}"."{}" TO '{}' WITH ({}){};"#,
export_self.catalog, schema, path, with_options_clone, connection_part
);
info!("Executing sql: {sql}");
// Log SQL command but mask sensitive information
let safe_sql = export_self.mask_sensitive_sql(&sql);
info!("Executing sql: {}", safe_sql);
export_self.database_client.sql_in_public(&sql).await?;
info!(
"Finished exporting {}.{} data to {}",
@@ -549,7 +675,7 @@ impl Export {
let copy_from_path = export_self.get_file_path(&schema, "copy_from.sql");
export_self
.write_to_storage(
&operator,
&fs_first_operator,
&copy_from_path,
copy_database_from_sql.into_bytes(),
)
@@ -573,6 +699,29 @@ impl Export {
Ok(())
}
/// Mask sensitive information in SQL commands for safe logging
fn mask_sensitive_sql(&self, sql: &str) -> String {
let mut masked_sql = sql.to_string();
// Mask S3 credentials
if let Some(access_key) = &self.s3_access_key {
masked_sql = masked_sql.replace(access_key.expose_secret(), "[REDACTED]");
}
if let Some(secret_key) = &self.s3_secret_key {
masked_sql = masked_sql.replace(secret_key.expose_secret(), "[REDACTED]");
}
// Mask OSS credentials
if let Some(access_key_id) = &self.oss_access_key_id {
masked_sql = masked_sql.replace(access_key_id.expose_secret(), "[REDACTED]");
}
if let Some(access_key_secret) = &self.oss_access_key_secret {
masked_sql = masked_sql.replace(access_key_secret.expose_secret(), "[REDACTED]");
}
masked_sql
}
fn get_file_path(&self, schema: &str, file_name: &str) -> String {
format!("{}/{}/{}", self.catalog, schema, file_name)
}
@@ -580,8 +729,20 @@ impl Export {
fn format_output_path(&self, file_path: &str) -> String {
if self.s3 {
format!(
"s3://{}/{}",
"s3://{}{}/{}",
self.s3_bucket.as_ref().unwrap_or(&String::new()),
if let Some(root) = &self.s3_root {
format!("/{}", root)
} else {
String::new()
},
file_path
)
} else if self.oss {
format!(
"oss://{}/{}/{}",
self.oss_bucket.as_ref().unwrap_or(&String::new()),
self.catalog,
file_path
)
} else {
@@ -595,19 +756,27 @@ impl Export {
async fn write_to_storage(
&self,
op: &Operator,
op: &ObjectStore,
file_path: &str,
content: Vec<u8>,
) -> Result<()> {
op.write(file_path, content).await.context(OpenDalSnafu)
op.write(file_path, content)
.await
.context(OpenDalSnafu)
.map(|_| ())
}
fn get_storage_params(&self, schema: &str) -> (String, String) {
if self.s3 {
let s3_path = format!(
"s3://{}/{}/{}/",
"s3://{}{}/{}/{}/",
// Safety: s3_bucket is required when s3 is enabled
self.s3_bucket.as_ref().unwrap(),
if let Some(root) = &self.s3_root {
format!("/{}", root)
} else {
String::new()
},
self.catalog,
schema
);
@@ -620,15 +789,36 @@ impl Export {
};
// Safety: All s3 options are required
// Use expose_secret() to access the actual secret values
let connection_options = format!(
"ACCESS_KEY_ID='{}', SECRET_ACCESS_KEY='{}', REGION='{}'{}",
self.s3_access_key.as_ref().unwrap(),
self.s3_secret_key.as_ref().unwrap(),
self.s3_access_key.as_ref().unwrap().expose_secret(),
self.s3_secret_key.as_ref().unwrap().expose_secret(),
self.s3_region.as_ref().unwrap(),
endpoint_option
);
(s3_path, format!(" CONNECTION ({})", connection_options))
} else if self.oss {
let oss_path = format!(
"oss://{}/{}/{}/",
self.oss_bucket.as_ref().unwrap(),
self.catalog,
schema
);
let endpoint_option = if let Some(endpoint) = self.oss_endpoint.as_ref() {
format!(", ENDPOINT='{}'", endpoint)
} else {
String::new()
};
let connection_options = format!(
"ACCESS_KEY_ID='{}', ACCESS_KEY_SECRET='{}'{}",
self.oss_access_key_id.as_ref().unwrap().expose_secret(),
self.oss_access_key_secret.as_ref().unwrap().expose_secret(),
endpoint_option
);
(oss_path, format!(" CONNECTION ({})", connection_options))
} else {
(
self.catalog_path()

View File

@@ -40,6 +40,7 @@ enum ImportTarget {
All,
}
/// Command to import data from a directory into a GreptimeDB instance.
#[derive(Debug, Default, Parser)]
pub struct ImportCommand {
/// Server address to connect

View File

@@ -17,7 +17,10 @@ use std::any::Any;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use common_meta::peer::Peer;
use object_store::Error as ObjectStoreError;
use snafu::{Location, Snafu};
use store_api::storage::TableId;
#[derive(Snafu)]
#[snafu(visibility(pub))]
@@ -29,6 +32,7 @@ pub enum Error {
location: Location,
msg: String,
},
#[snafu(display("Failed to create default catalog and schema"))]
InitMetadata {
#[snafu(implicit)]
@@ -71,6 +75,20 @@ pub enum Error {
source: common_meta::error::Error,
},
#[snafu(display("Failed to get table metadata"))]
TableMetadata {
#[snafu(implicit)]
location: Location,
source: common_meta::error::Error,
},
#[snafu(display("Unexpected error: {}", msg))]
Unexpected {
msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Missing config, msg: {}", msg))]
MissingConfig {
msg: String,
@@ -220,23 +238,81 @@ pub enum Error {
location: Location,
},
#[snafu(display("Table not found: {table_id}"))]
TableNotFound {
table_id: TableId,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("OpenDAL operator failed"))]
OpenDal {
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: opendal::Error,
error: ObjectStoreError,
},
#[snafu(display("S3 config need be set"))]
S3ConfigNotSet {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Output directory not set"))]
OutputDirNotSet {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Empty store addresses"))]
EmptyStoreAddrs {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Unsupported memory backend"))]
UnsupportedMemoryBackend {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("File path invalid: {}", msg))]
InvalidFilePath {
msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Invalid arguments: {}", msg))]
InvalidArguments {
msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to init backend"))]
InitBackend {
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: ObjectStoreError,
},
#[snafu(display("Covert column schemas to defs failed"))]
CovertColumnSchemasToDefs {
#[snafu(implicit)]
location: Location,
source: operator::error::Error,
},
#[snafu(display("Failed to send request to datanode: {}", peer))]
SendRequestToDatanode {
peer: Peer,
#[snafu(implicit)]
location: Location,
source: common_meta::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -244,9 +320,9 @@ pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::InitMetadata { source, .. } | Error::InitDdlManager { source, .. } => {
source.status_code()
}
Error::InitMetadata { source, .. }
| Error::InitDdlManager { source, .. }
| Error::TableMetadata { source, .. } => source.status_code(),
Error::MissingConfig { .. }
| Error::LoadLayeredConfig { .. }
@@ -255,8 +331,14 @@ impl ErrorExt for Error {
| Error::ConnectEtcd { .. }
| Error::CreateDir { .. }
| Error::EmptyResult { .. }
| Error::InvalidFilePath { .. }
| Error::UnsupportedMemoryBackend { .. }
| Error::InvalidArguments { .. }
| Error::ParseProxyOpts { .. } => StatusCode::InvalidArguments,
Error::CovertColumnSchemasToDefs { source, .. } => source.status_code(),
Error::SendRequestToDatanode { source, .. } => source.status_code(),
Error::StartProcedureManager { source, .. }
| Error::StopProcedureManager { source, .. } => source.status_code(),
Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
@@ -264,6 +346,7 @@ impl ErrorExt for Error {
Error::ParseSql { source, .. } | Error::PlanStatement { source, .. } => {
source.status_code()
}
Error::Unexpected { .. } => StatusCode::Unexpected,
Error::SerdeJson { .. }
| Error::FileIo { .. }
@@ -272,14 +355,16 @@ impl ErrorExt for Error {
| Error::BuildClient { .. } => StatusCode::Unexpected,
Error::Other { source, .. } => source.status_code(),
Error::OpenDal { .. } => StatusCode::Internal,
Error::S3ConfigNotSet { .. } => StatusCode::InvalidArguments,
Error::OutputDirNotSet { .. } => StatusCode::InvalidArguments,
Error::OpenDal { .. } | Error::InitBackend { .. } => StatusCode::Internal,
Error::S3ConfigNotSet { .. }
| Error::OutputDirNotSet { .. }
| Error::EmptyStoreAddrs { .. } => StatusCode::InvalidArguments,
Error::BuildRuntime { source, .. } => source.status_code(),
Error::CacheRequired { .. } | Error::BuildCacheRegistry { .. } => StatusCode::Internal,
Error::MetaClientInit { source, .. } => source.status_code(),
Error::TableNotFound { .. } => StatusCode::TableNotFound,
Error::SchemaNotFound { .. } => StatusCode::DatabaseNotFound,
}
}

View File

@@ -13,10 +13,10 @@
// limitations under the License.
mod bench;
mod data;
mod database;
pub mod error;
mod export;
mod import;
mod metadata;
use async_trait::async_trait;
use clap::Parser;
@@ -25,8 +25,8 @@ pub use database::DatabaseClient;
use error::Result;
pub use crate::bench::BenchTableMetadataCommand;
pub use crate::export::ExportCommand;
pub use crate::import::ImportCommand;
pub use crate::data::DataCommand;
pub use crate::metadata::MetadataCommand;
#[async_trait]
pub trait Tool: Send + Sync {

52
src/cli/src/metadata.rs Normal file
View File

@@ -0,0 +1,52 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod common;
mod control;
mod repair;
mod snapshot;
mod utils;
use clap::Subcommand;
use common_error::ext::BoxedError;
use crate::metadata::control::{DelCommand, GetCommand};
use crate::metadata::repair::RepairLogicalTablesCommand;
use crate::metadata::snapshot::SnapshotCommand;
use crate::Tool;
/// Command for managing metadata operations,
/// including saving and restoring metadata snapshots,
/// controlling metadata operations, and diagnosing and repairing metadata.
#[derive(Subcommand)]
pub enum MetadataCommand {
#[clap(subcommand)]
Snapshot(SnapshotCommand),
#[clap(subcommand)]
Get(GetCommand),
#[clap(subcommand)]
Del(DelCommand),
RepairLogicalTables(RepairLogicalTablesCommand),
}
impl MetadataCommand {
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
match self {
MetadataCommand::Snapshot(cmd) => cmd.build().await,
MetadataCommand::RepairLogicalTables(cmd) => cmd.build().await,
MetadataCommand::Get(cmd) => cmd.build().await,
MetadataCommand::Del(cmd) => cmd.build().await,
}
}
}

View File

@@ -0,0 +1,116 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use clap::Parser;
use common_error::ext::BoxedError;
use common_meta::kv_backend::chroot::ChrootKvBackend;
use common_meta::kv_backend::etcd::EtcdStore;
use common_meta::kv_backend::KvBackendRef;
use meta_srv::bootstrap::create_etcd_client;
use meta_srv::metasrv::BackendImpl;
use crate::error::{EmptyStoreAddrsSnafu, UnsupportedMemoryBackendSnafu};
#[derive(Debug, Default, Parser)]
pub(crate) struct StoreConfig {
/// The endpoint of store. one of etcd, postgres or mysql.
///
/// For postgres store, the format is:
/// "password=password dbname=postgres user=postgres host=localhost port=5432"
///
/// For etcd store, the format is:
/// "127.0.0.1:2379"
///
/// For mysql store, the format is:
/// "mysql://user:password@ip:port/dbname"
#[clap(long, alias = "store-addr", value_delimiter = ',', num_args = 1..)]
store_addrs: Vec<String>,
/// The maximum number of operations in a transaction. Only used when using [etcd-store].
#[clap(long, default_value = "128")]
max_txn_ops: usize,
/// The metadata store backend.
#[clap(long, value_enum, default_value = "etcd-store")]
backend: BackendImpl,
/// The key prefix of the metadata store.
#[clap(long, default_value = "")]
store_key_prefix: String,
/// The table name in RDS to store metadata. Only used when using [postgres-store] or [mysql-store].
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
#[clap(long, default_value = common_meta::kv_backend::DEFAULT_META_TABLE_NAME)]
meta_table_name: String,
}
impl StoreConfig {
/// Builds a [`KvBackendRef`] from the store configuration.
pub async fn build(&self) -> Result<KvBackendRef, BoxedError> {
let max_txn_ops = self.max_txn_ops;
let store_addrs = &self.store_addrs;
if store_addrs.is_empty() {
EmptyStoreAddrsSnafu.fail().map_err(BoxedError::new)
} else {
let kvbackend = match self.backend {
BackendImpl::EtcdStore => {
let etcd_client = create_etcd_client(store_addrs)
.await
.map_err(BoxedError::new)?;
Ok(EtcdStore::with_etcd_client(etcd_client, max_txn_ops))
}
#[cfg(feature = "pg_kvbackend")]
BackendImpl::PostgresStore => {
let table_name = &self.meta_table_name;
let pool = meta_srv::bootstrap::create_postgres_pool(store_addrs)
.await
.map_err(BoxedError::new)?;
Ok(common_meta::kv_backend::rds::PgStore::with_pg_pool(
pool,
table_name,
max_txn_ops,
)
.await
.map_err(BoxedError::new)?)
}
#[cfg(feature = "mysql_kvbackend")]
BackendImpl::MysqlStore => {
let table_name = &self.meta_table_name;
let pool = meta_srv::bootstrap::create_mysql_pool(store_addrs)
.await
.map_err(BoxedError::new)?;
Ok(common_meta::kv_backend::rds::MySqlStore::with_mysql_pool(
pool,
table_name,
max_txn_ops,
)
.await
.map_err(BoxedError::new)?)
}
BackendImpl::MemoryStore => UnsupportedMemoryBackendSnafu
.fail()
.map_err(BoxedError::new),
};
if self.store_key_prefix.is_empty() {
kvbackend
} else {
let chroot_kvbackend =
ChrootKvBackend::new(self.store_key_prefix.as_bytes().to_vec(), kvbackend?);
Ok(Arc::new(chroot_kvbackend))
}
}
}
}

View File

@@ -12,11 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
mod geo_path;
mod hll;
mod uddsketch_state;
mod del;
mod get;
#[cfg(test)]
mod test_utils;
mod utils;
pub use geo_path::{GeoPathAccumulator, GEO_PATH_NAME};
pub(crate) use hll::HllStateType;
pub use hll::{HllState, HLL_MERGE_NAME, HLL_NAME};
pub use uddsketch_state::{UddSketchState, UDDSKETCH_MERGE_NAME, UDDSKETCH_STATE_NAME};
pub(crate) use del::DelCommand;
pub(crate) use get::GetCommand;

View File

@@ -0,0 +1,42 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod key;
mod table;
use clap::Subcommand;
use common_error::ext::BoxedError;
use crate::metadata::control::del::key::DelKeyCommand;
use crate::metadata::control::del::table::DelTableCommand;
use crate::Tool;
/// The prefix of the tombstone keys.
pub(crate) const CLI_TOMBSTONE_PREFIX: &str = "__cli_tombstone/";
/// Subcommand for deleting metadata from the metadata store.
#[derive(Subcommand)]
pub enum DelCommand {
Key(DelKeyCommand),
Table(DelTableCommand),
}
impl DelCommand {
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
match self {
DelCommand::Key(cmd) => cmd.build().await,
DelCommand::Table(cmd) => cmd.build().await,
}
}
}

View File

@@ -0,0 +1,132 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use async_trait::async_trait;
use clap::Parser;
use common_error::ext::BoxedError;
use common_meta::key::tombstone::TombstoneManager;
use common_meta::kv_backend::KvBackendRef;
use common_meta::rpc::store::RangeRequest;
use crate::metadata::common::StoreConfig;
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
use crate::Tool;
/// Delete key-value pairs logically from the metadata store.
#[derive(Debug, Default, Parser)]
pub struct DelKeyCommand {
/// The key to delete from the metadata store.
key: String,
/// Delete key-value pairs with the given prefix.
#[clap(long)]
prefix: bool,
#[clap(flatten)]
store: StoreConfig,
}
impl DelKeyCommand {
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
let kv_backend = self.store.build().await?;
Ok(Box::new(DelKeyTool {
key: self.key.to_string(),
prefix: self.prefix,
key_deleter: KeyDeleter::new(kv_backend),
}))
}
}
struct KeyDeleter {
kv_backend: KvBackendRef,
tombstone_manager: TombstoneManager,
}
impl KeyDeleter {
fn new(kv_backend: KvBackendRef) -> Self {
Self {
kv_backend: kv_backend.clone(),
tombstone_manager: TombstoneManager::new_with_prefix(kv_backend, CLI_TOMBSTONE_PREFIX),
}
}
async fn delete(&self, key: &str, prefix: bool) -> Result<usize, BoxedError> {
let mut req = RangeRequest::default().with_keys_only();
if prefix {
req = req.with_prefix(key.as_bytes());
} else {
req = req.with_key(key.as_bytes());
}
let resp = self.kv_backend.range(req).await.map_err(BoxedError::new)?;
let keys = resp.kvs.iter().map(|kv| kv.key.clone()).collect::<Vec<_>>();
self.tombstone_manager
.create(keys)
.await
.map_err(BoxedError::new)
}
}
struct DelKeyTool {
key: String,
prefix: bool,
key_deleter: KeyDeleter,
}
#[async_trait]
impl Tool for DelKeyTool {
async fn do_work(&self) -> Result<(), BoxedError> {
let deleted = self.key_deleter.delete(&self.key, self.prefix).await?;
// Print the number of deleted keys.
println!("{}", deleted);
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use common_meta::kv_backend::chroot::ChrootKvBackend;
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::kv_backend::{KvBackend, KvBackendRef};
use common_meta::rpc::store::RangeRequest;
use crate::metadata::control::del::key::KeyDeleter;
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
use crate::metadata::control::test_utils::put_key;
#[tokio::test]
async fn test_delete_keys() {
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
let key_deleter = KeyDeleter::new(kv_backend.clone());
put_key(&kv_backend, "foo", "bar").await;
put_key(&kv_backend, "foo/bar", "baz").await;
put_key(&kv_backend, "foo/baz", "qux").await;
let deleted = key_deleter.delete("foo", true).await.unwrap();
assert_eq!(deleted, 3);
let deleted = key_deleter.delete("foo/bar", false).await.unwrap();
assert_eq!(deleted, 0);
let chroot = ChrootKvBackend::new(CLI_TOMBSTONE_PREFIX.as_bytes().to_vec(), kv_backend);
let req = RangeRequest::default().with_prefix(b"foo");
let resp = chroot.range(req).await.unwrap();
assert_eq!(resp.kvs.len(), 3);
assert_eq!(resp.kvs[0].key, b"foo");
assert_eq!(resp.kvs[0].value, b"bar");
assert_eq!(resp.kvs[1].key, b"foo/bar");
assert_eq!(resp.kvs[1].value, b"baz");
assert_eq!(resp.kvs[2].key, b"foo/baz");
assert_eq!(resp.kvs[2].value, b"qux");
}
}

View File

@@ -0,0 +1,235 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use async_trait::async_trait;
use clap::Parser;
use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_catalog::format_full_table_name;
use common_error::ext::BoxedError;
use common_meta::ddl::utils::get_region_wal_options;
use common_meta::key::table_name::TableNameManager;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::KvBackendRef;
use store_api::storage::TableId;
use crate::error::{InvalidArgumentsSnafu, TableNotFoundSnafu};
use crate::metadata::common::StoreConfig;
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
use crate::metadata::control::utils::get_table_id_by_name;
use crate::Tool;
/// Delete table metadata logically from the metadata store.
#[derive(Debug, Default, Parser)]
pub struct DelTableCommand {
/// The table id to delete from the metadata store.
#[clap(long)]
table_id: Option<u32>,
/// The table name to delete from the metadata store.
#[clap(long)]
table_name: Option<String>,
/// The schema name of the table.
#[clap(long, default_value = DEFAULT_SCHEMA_NAME)]
schema_name: String,
/// The catalog name of the table.
#[clap(long, default_value = DEFAULT_CATALOG_NAME)]
catalog_name: String,
#[clap(flatten)]
store: StoreConfig,
}
impl DelTableCommand {
fn validate(&self) -> Result<(), BoxedError> {
if matches!(
(&self.table_id, &self.table_name),
(Some(_), Some(_)) | (None, None)
) {
return Err(BoxedError::new(
InvalidArgumentsSnafu {
msg: "You must specify either --table-id or --table-name.",
}
.build(),
));
}
Ok(())
}
}
impl DelTableCommand {
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
self.validate()?;
let kv_backend = self.store.build().await?;
Ok(Box::new(DelTableTool {
table_id: self.table_id,
table_name: self.table_name.clone(),
schema_name: self.schema_name.clone(),
catalog_name: self.catalog_name.clone(),
table_name_manager: TableNameManager::new(kv_backend.clone()),
table_metadata_deleter: TableMetadataDeleter::new(kv_backend),
}))
}
}
struct DelTableTool {
table_id: Option<u32>,
table_name: Option<String>,
schema_name: String,
catalog_name: String,
table_name_manager: TableNameManager,
table_metadata_deleter: TableMetadataDeleter,
}
#[async_trait]
impl Tool for DelTableTool {
async fn do_work(&self) -> Result<(), BoxedError> {
let table_id = if let Some(table_name) = &self.table_name {
let catalog_name = &self.catalog_name;
let schema_name = &self.schema_name;
let Some(table_id) = get_table_id_by_name(
&self.table_name_manager,
catalog_name,
schema_name,
table_name,
)
.await?
else {
println!(
"Table({}) not found",
format_full_table_name(catalog_name, schema_name, table_name)
);
return Ok(());
};
table_id
} else {
// Safety: we have validated that table_id or table_name is not None
self.table_id.unwrap()
};
self.table_metadata_deleter.delete(table_id).await?;
println!("Table({}) deleted", table_id);
Ok(())
}
}
struct TableMetadataDeleter {
table_metadata_manager: TableMetadataManager,
}
impl TableMetadataDeleter {
fn new(kv_backend: KvBackendRef) -> Self {
Self {
table_metadata_manager: TableMetadataManager::new_with_custom_tombstone_prefix(
kv_backend,
CLI_TOMBSTONE_PREFIX,
),
}
}
async fn delete(&self, table_id: TableId) -> Result<(), BoxedError> {
let (table_info, table_route) = self
.table_metadata_manager
.get_full_table_info(table_id)
.await
.map_err(BoxedError::new)?;
let Some(table_info) = table_info else {
return Err(BoxedError::new(TableNotFoundSnafu { table_id }.build()));
};
let Some(table_route) = table_route else {
return Err(BoxedError::new(TableNotFoundSnafu { table_id }.build()));
};
let physical_table_id = self
.table_metadata_manager
.table_route_manager()
.get_physical_table_id(table_id)
.await
.map_err(BoxedError::new)?;
let table_name = table_info.table_name();
let region_wal_options = get_region_wal_options(
&self.table_metadata_manager,
&table_route,
physical_table_id,
)
.await
.map_err(BoxedError::new)?;
self.table_metadata_manager
.delete_table_metadata(table_id, &table_name, &table_route, &region_wal_options)
.await
.map_err(BoxedError::new)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use std::sync::Arc;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_meta::key::table_route::TableRouteValue;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::chroot::ChrootKvBackend;
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::kv_backend::{KvBackend, KvBackendRef};
use common_meta::rpc::store::RangeRequest;
use crate::metadata::control::del::table::TableMetadataDeleter;
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
use crate::metadata::control::test_utils::prepare_physical_table_metadata;
#[tokio::test]
async fn test_delete_table_not_found() {
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
let table_metadata_deleter = TableMetadataDeleter::new(kv_backend);
let table_id = 1;
let err = table_metadata_deleter.delete(table_id).await.unwrap_err();
assert_eq!(err.status_code(), StatusCode::TableNotFound);
}
#[tokio::test]
async fn test_delete_table_metadata() {
let kv_backend = Arc::new(MemoryKvBackend::new());
let table_metadata_manager = TableMetadataManager::new(kv_backend.clone());
let table_id = 1024;
let (table_info, table_route) = prepare_physical_table_metadata("my_table", table_id).await;
table_metadata_manager
.create_table_metadata(
table_info,
TableRouteValue::Physical(table_route),
HashMap::new(),
)
.await
.unwrap();
let total_keys = kv_backend.len();
assert!(total_keys > 0);
let table_metadata_deleter = TableMetadataDeleter::new(kv_backend.clone());
table_metadata_deleter.delete(table_id).await.unwrap();
// Check the tombstone keys are deleted
let chroot =
ChrootKvBackend::new(CLI_TOMBSTONE_PREFIX.as_bytes().to_vec(), kv_backend.clone());
let req = RangeRequest::default().with_range(vec![0], vec![0]);
let resp = chroot.range(req).await.unwrap();
assert_eq!(resp.kvs.len(), total_keys);
}
}

View File

@@ -0,0 +1,247 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp::min;
use async_trait::async_trait;
use clap::{Parser, Subcommand};
use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_catalog::format_full_table_name;
use common_error::ext::BoxedError;
use common_meta::key::table_info::TableInfoKey;
use common_meta::key::table_route::TableRouteKey;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::KvBackendRef;
use common_meta::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
use common_meta::rpc::store::RangeRequest;
use futures::TryStreamExt;
use crate::error::InvalidArgumentsSnafu;
use crate::metadata::common::StoreConfig;
use crate::metadata::control::utils::{decode_key_value, get_table_id_by_name, json_fromatter};
use crate::Tool;
/// Getting metadata from metadata store.
#[derive(Subcommand)]
pub enum GetCommand {
Key(GetKeyCommand),
Table(GetTableCommand),
}
impl GetCommand {
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
match self {
GetCommand::Key(cmd) => cmd.build().await,
GetCommand::Table(cmd) => cmd.build().await,
}
}
}
/// Get key-value pairs from the metadata store.
#[derive(Debug, Default, Parser)]
pub struct GetKeyCommand {
/// The key to get from the metadata store.
#[clap(default_value = "")]
key: String,
/// Whether to perform a prefix query. If true, returns all key-value pairs where the key starts with the given prefix.
#[clap(long, default_value = "false")]
prefix: bool,
/// The maximum number of key-value pairs to return. If 0, returns all key-value pairs.
#[clap(long, default_value = "0")]
limit: u64,
#[clap(flatten)]
store: StoreConfig,
}
impl GetKeyCommand {
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
let kvbackend = self.store.build().await?;
Ok(Box::new(GetKeyTool {
kvbackend,
key: self.key.clone(),
prefix: self.prefix,
limit: self.limit,
}))
}
}
struct GetKeyTool {
kvbackend: KvBackendRef,
key: String,
prefix: bool,
limit: u64,
}
#[async_trait]
impl Tool for GetKeyTool {
async fn do_work(&self) -> Result<(), BoxedError> {
let mut req = RangeRequest::default();
if self.prefix {
req = req.with_prefix(self.key.as_bytes());
} else {
req = req.with_key(self.key.as_bytes());
}
let page_size = if self.limit > 0 {
min(self.limit as usize, DEFAULT_PAGE_SIZE)
} else {
DEFAULT_PAGE_SIZE
};
let pagination_stream =
PaginationStream::new(self.kvbackend.clone(), req, page_size, decode_key_value);
let mut stream = Box::pin(pagination_stream.into_stream());
let mut counter = 0;
while let Some((key, value)) = stream.try_next().await.map_err(BoxedError::new)? {
print!("{}\n{}\n", key, value);
counter += 1;
if self.limit > 0 && counter >= self.limit {
break;
}
}
Ok(())
}
}
/// Get table metadata from the metadata store via table id.
#[derive(Debug, Default, Parser)]
pub struct GetTableCommand {
/// Get table metadata by table id.
#[clap(long)]
table_id: Option<u32>,
/// Get table metadata by table name.
#[clap(long)]
table_name: Option<String>,
/// The schema name of the table.
#[clap(long, default_value = DEFAULT_SCHEMA_NAME)]
schema_name: String,
/// The catalog name of the table.
#[clap(long, default_value = DEFAULT_CATALOG_NAME)]
catalog_name: String,
/// Pretty print the output.
#[clap(long, default_value = "false")]
pretty: bool,
#[clap(flatten)]
store: StoreConfig,
}
impl GetTableCommand {
pub fn validate(&self) -> Result<(), BoxedError> {
if matches!(
(&self.table_id, &self.table_name),
(Some(_), Some(_)) | (None, None)
) {
return Err(BoxedError::new(
InvalidArgumentsSnafu {
msg: "You must specify either --table-id or --table-name.",
}
.build(),
));
}
Ok(())
}
}
struct GetTableTool {
kvbackend: KvBackendRef,
table_id: Option<u32>,
table_name: Option<String>,
schema_name: String,
catalog_name: String,
pretty: bool,
}
#[async_trait]
impl Tool for GetTableTool {
async fn do_work(&self) -> Result<(), BoxedError> {
let table_metadata_manager = TableMetadataManager::new(self.kvbackend.clone());
let table_name_manager = table_metadata_manager.table_name_manager();
let table_info_manager = table_metadata_manager.table_info_manager();
let table_route_manager = table_metadata_manager.table_route_manager();
let table_id = if let Some(table_name) = &self.table_name {
let catalog_name = &self.catalog_name;
let schema_name = &self.schema_name;
let Some(table_id) =
get_table_id_by_name(table_name_manager, catalog_name, schema_name, table_name)
.await?
else {
println!(
"Table({}) not found",
format_full_table_name(catalog_name, schema_name, table_name)
);
return Ok(());
};
table_id
} else {
// Safety: we have validated that table_id or table_name is not None
self.table_id.unwrap()
};
let table_info = table_info_manager
.get(table_id)
.await
.map_err(BoxedError::new)?;
if let Some(table_info) = table_info {
println!(
"{}\n{}",
TableInfoKey::new(table_id),
json_fromatter(self.pretty, &*table_info)
);
} else {
println!("Table info not found");
}
let table_route = table_route_manager
.table_route_storage()
.get(table_id)
.await
.map_err(BoxedError::new)?;
if let Some(table_route) = table_route {
println!(
"{}\n{}",
TableRouteKey::new(table_id),
json_fromatter(self.pretty, &table_route)
);
} else {
println!("Table route not found");
}
Ok(())
}
}
impl GetTableCommand {
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
self.validate()?;
let kvbackend = self.store.build().await?;
Ok(Box::new(GetTableTool {
kvbackend,
table_id: self.table_id,
table_name: self.table_name.clone(),
schema_name: self.schema_name.clone(),
catalog_name: self.catalog_name.clone(),
pretty: self.pretty,
}))
}
}

View File

@@ -0,0 +1,51 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_meta::ddl::test_util::test_create_physical_table_task;
use common_meta::key::table_route::PhysicalTableRouteValue;
use common_meta::kv_backend::KvBackendRef;
use common_meta::peer::Peer;
use common_meta::rpc::router::{Region, RegionRoute};
use common_meta::rpc::store::PutRequest;
use store_api::storage::{RegionId, TableId};
use table::metadata::RawTableInfo;
/// Puts a key-value pair into the kv backend.
pub async fn put_key(kv_backend: &KvBackendRef, key: &str, value: &str) {
let put_req = PutRequest::new()
.with_key(key.as_bytes())
.with_value(value.as_bytes());
kv_backend.put(put_req).await.unwrap();
}
/// Prepares the physical table metadata for testing.
///
/// Returns the table info and the table route.
pub async fn prepare_physical_table_metadata(
table_name: &str,
table_id: TableId,
) -> (RawTableInfo, PhysicalTableRouteValue) {
let mut create_physical_table_task = test_create_physical_table_task(table_name);
let table_route = PhysicalTableRouteValue::new(vec![RegionRoute {
region: Region {
id: RegionId::new(table_id, 1),
..Default::default()
},
leader_peer: Some(Peer::empty(1)),
..Default::default()
}]);
create_physical_table_task.set_table_id(table_id);
(create_physical_table_task.table_info, table_route)
}

View File

@@ -0,0 +1,57 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_error::ext::BoxedError;
use common_meta::error::Result as CommonMetaResult;
use common_meta::key::table_name::{TableNameKey, TableNameManager};
use common_meta::rpc::KeyValue;
use serde::Serialize;
use store_api::storage::TableId;
/// Decodes a key-value pair into a string.
pub fn decode_key_value(kv: KeyValue) -> CommonMetaResult<(String, String)> {
let key = String::from_utf8_lossy(&kv.key).to_string();
let value = String::from_utf8_lossy(&kv.value).to_string();
Ok((key, value))
}
/// Formats a value as a JSON string.
pub fn json_fromatter<T>(pretty: bool, value: &T) -> String
where
T: Serialize,
{
if pretty {
serde_json::to_string_pretty(value).unwrap()
} else {
serde_json::to_string(value).unwrap()
}
}
/// Gets the table id by table name.
pub async fn get_table_id_by_name(
table_name_manager: &TableNameManager,
catalog_name: &str,
schema_name: &str,
table_name: &str,
) -> Result<Option<TableId>, BoxedError> {
let table_name_key = TableNameKey::new(catalog_name, schema_name, table_name);
let Some(table_name_value) = table_name_manager
.get(table_name_key)
.await
.map_err(BoxedError::new)?
else {
return Ok(None);
};
Ok(Some(table_name_value.table_id()))
}

View File

@@ -0,0 +1,368 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod alter_table;
mod create_table;
use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use clap::Parser;
use client::api::v1::CreateTableExpr;
use client::client_manager::NodeClients;
use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_grpc::channel_manager::ChannelConfig;
use common_meta::error::Error as CommonMetaError;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::KvBackendRef;
use common_meta::node_manager::NodeManagerRef;
use common_meta::peer::Peer;
use common_meta::rpc::router::{find_leaders, RegionRoute};
use common_telemetry::{error, info, warn};
use futures::TryStreamExt;
use snafu::{ensure, ResultExt};
use store_api::storage::TableId;
use crate::error::{
InvalidArgumentsSnafu, Result, SendRequestToDatanodeSnafu, TableMetadataSnafu, UnexpectedSnafu,
};
use crate::metadata::common::StoreConfig;
use crate::metadata::utils::{FullTableMetadata, IteratorInput, TableMetadataIterator};
use crate::Tool;
/// Repair metadata of logical tables.
#[derive(Debug, Default, Parser)]
pub struct RepairLogicalTablesCommand {
/// The names of the tables to repair.
#[clap(long, value_delimiter = ',', alias = "table-name")]
table_names: Vec<String>,
/// The id of the table to repair.
#[clap(long, value_delimiter = ',', alias = "table-id")]
table_ids: Vec<TableId>,
/// The schema of the tables to repair.
#[clap(long, default_value = DEFAULT_SCHEMA_NAME)]
schema_name: String,
/// The catalog of the tables to repair.
#[clap(long, default_value = DEFAULT_CATALOG_NAME)]
catalog_name: String,
/// Whether to fail fast if any repair operation fails.
#[clap(long)]
fail_fast: bool,
#[clap(flatten)]
store: StoreConfig,
/// The timeout for the client to operate the datanode.
#[clap(long, default_value_t = 30)]
client_timeout_secs: u64,
/// The timeout for the client to connect to the datanode.
#[clap(long, default_value_t = 3)]
client_connect_timeout_secs: u64,
}
impl RepairLogicalTablesCommand {
fn validate(&self) -> Result<()> {
ensure!(
!self.table_names.is_empty() || !self.table_ids.is_empty(),
InvalidArgumentsSnafu {
msg: "You must specify --table-names or --table-ids.",
}
);
Ok(())
}
}
impl RepairLogicalTablesCommand {
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
self.validate().map_err(BoxedError::new)?;
let kv_backend = self.store.build().await?;
let node_client_channel_config = ChannelConfig::new()
.timeout(Duration::from_secs(self.client_timeout_secs))
.connect_timeout(Duration::from_secs(self.client_connect_timeout_secs));
let node_manager = Arc::new(NodeClients::new(node_client_channel_config));
Ok(Box::new(RepairTool {
table_names: self.table_names.clone(),
table_ids: self.table_ids.clone(),
schema_name: self.schema_name.clone(),
catalog_name: self.catalog_name.clone(),
fail_fast: self.fail_fast,
kv_backend,
node_manager,
}))
}
}
struct RepairTool {
table_names: Vec<String>,
table_ids: Vec<TableId>,
schema_name: String,
catalog_name: String,
fail_fast: bool,
kv_backend: KvBackendRef,
node_manager: NodeManagerRef,
}
#[async_trait]
impl Tool for RepairTool {
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
self.repair_tables().await.map_err(BoxedError::new)
}
}
impl RepairTool {
fn generate_iterator_input(&self) -> Result<IteratorInput> {
if !self.table_names.is_empty() {
let table_names = &self.table_names;
let catalog = &self.catalog_name;
let schema_name = &self.schema_name;
let table_names = table_names
.iter()
.map(|table_name| {
(
catalog.to_string(),
schema_name.to_string(),
table_name.to_string(),
)
})
.collect::<Vec<_>>();
return Ok(IteratorInput::new_table_names(table_names));
} else if !self.table_ids.is_empty() {
return Ok(IteratorInput::new_table_ids(self.table_ids.clone()));
};
InvalidArgumentsSnafu {
msg: "You must specify --table-names or --table-id.",
}
.fail()
}
async fn repair_tables(&self) -> Result<()> {
let input = self.generate_iterator_input()?;
let mut table_metadata_iterator =
Box::pin(TableMetadataIterator::new(self.kv_backend.clone(), input).into_stream());
let table_metadata_manager = TableMetadataManager::new(self.kv_backend.clone());
let mut skipped_table = 0;
let mut success_table = 0;
while let Some(full_table_metadata) = table_metadata_iterator.try_next().await? {
let full_table_name = full_table_metadata.full_table_name();
if !full_table_metadata.is_metric_engine() {
warn!(
"Skipping repair for non-metric engine table: {}",
full_table_name
);
skipped_table += 1;
continue;
}
if full_table_metadata.is_physical_table() {
warn!("Skipping repair for physical table: {}", full_table_name);
skipped_table += 1;
continue;
}
let (physical_table_id, physical_table_route) = table_metadata_manager
.table_route_manager()
.get_physical_table_route(full_table_metadata.table_id)
.await
.context(TableMetadataSnafu)?;
if let Err(err) = self
.repair_table(
&full_table_metadata,
physical_table_id,
&physical_table_route.region_routes,
)
.await
{
error!(
err;
"Failed to repair table: {}, skipped table: {}",
full_table_name,
skipped_table,
);
if self.fail_fast {
return Err(err);
}
} else {
success_table += 1;
}
}
info!(
"Repair logical tables result: {} tables repaired, {} tables skipped",
success_table, skipped_table
);
Ok(())
}
async fn alter_table_on_datanodes(
&self,
full_table_metadata: &FullTableMetadata,
physical_region_routes: &[RegionRoute],
) -> Result<Vec<(Peer, CommonMetaError)>> {
let logical_table_id = full_table_metadata.table_id;
let alter_table_expr = alter_table::generate_alter_table_expr_for_all_columns(
&full_table_metadata.table_info,
)?;
let node_manager = self.node_manager.clone();
let mut failed_peers = Vec::new();
info!(
"Sending alter table requests to all datanodes for table: {}, number of regions:{}.",
full_table_metadata.full_table_name(),
physical_region_routes.len()
);
let leaders = find_leaders(physical_region_routes);
for peer in &leaders {
let alter_table_request = alter_table::make_alter_region_request_for_peer(
logical_table_id,
&alter_table_expr,
peer,
physical_region_routes,
)?;
let datanode = node_manager.datanode(peer).await;
if let Err(err) = datanode.handle(alter_table_request).await {
failed_peers.push((peer.clone(), err));
}
}
Ok(failed_peers)
}
async fn create_table_on_datanode(
&self,
create_table_expr: &CreateTableExpr,
logical_table_id: TableId,
physical_table_id: TableId,
peer: &Peer,
physical_region_routes: &[RegionRoute],
) -> Result<()> {
let node_manager = self.node_manager.clone();
let datanode = node_manager.datanode(peer).await;
let create_table_request = create_table::make_create_region_request_for_peer(
logical_table_id,
physical_table_id,
create_table_expr,
peer,
physical_region_routes,
)?;
datanode
.handle(create_table_request)
.await
.with_context(|_| SendRequestToDatanodeSnafu { peer: peer.clone() })?;
Ok(())
}
async fn repair_table(
&self,
full_table_metadata: &FullTableMetadata,
physical_table_id: TableId,
physical_region_routes: &[RegionRoute],
) -> Result<()> {
let full_table_name = full_table_metadata.full_table_name();
// First we sends alter table requests to all datanodes with all columns.
let failed_peers = self
.alter_table_on_datanodes(full_table_metadata, physical_region_routes)
.await?;
if failed_peers.is_empty() {
info!(
"All alter table requests sent successfully for table: {}",
full_table_name
);
return Ok(());
}
warn!(
"Sending alter table requests to datanodes for table: {} failed for the datanodes: {:?}",
full_table_name,
failed_peers.iter().map(|(peer, _)| peer.id).collect::<Vec<_>>()
);
let create_table_expr =
create_table::generate_create_table_expr(&full_table_metadata.table_info)?;
let mut errors = Vec::new();
for (peer, err) in failed_peers {
if err.status_code() != StatusCode::RegionNotFound {
error!(
err;
"Sending alter table requests to datanode: {} for table: {} failed",
peer.id,
full_table_name,
);
continue;
}
info!(
"Region not found for table: {}, datanode: {}, trying to create the logical table on that datanode",
full_table_name,
peer.id
);
// If the alter table request fails for any datanode, we attempt to create the table on that datanode
// as a fallback mechanism to ensure table consistency across the cluster.
if let Err(err) = self
.create_table_on_datanode(
&create_table_expr,
full_table_metadata.table_id,
physical_table_id,
&peer,
physical_region_routes,
)
.await
{
error!(
err;
"Failed to create table on datanode: {} for table: {}",
peer.id, full_table_name
);
errors.push(err);
if self.fail_fast {
break;
}
} else {
info!(
"Created table on datanode: {} for table: {}",
peer.id, full_table_name
);
}
}
if !errors.is_empty() {
return UnexpectedSnafu {
msg: format!(
"Failed to create table on datanodes for table: {}",
full_table_name,
),
}
.fail();
}
Ok(())
}
}

View File

@@ -0,0 +1,84 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use client::api::v1::alter_table_expr::Kind;
use client::api::v1::region::{region_request, AlterRequests, RegionRequest, RegionRequestHeader};
use client::api::v1::{AddColumn, AddColumns, AlterTableExpr};
use common_meta::ddl::alter_logical_tables::make_alter_region_request;
use common_meta::peer::Peer;
use common_meta::rpc::router::{find_leader_regions, RegionRoute};
use operator::expr_helper::column_schemas_to_defs;
use snafu::ResultExt;
use store_api::storage::{RegionId, TableId};
use table::metadata::RawTableInfo;
use crate::error::{CovertColumnSchemasToDefsSnafu, Result};
/// Generates alter table expression for all columns.
pub fn generate_alter_table_expr_for_all_columns(
table_info: &RawTableInfo,
) -> Result<AlterTableExpr> {
let schema = &table_info.meta.schema;
let mut alter_table_expr = AlterTableExpr {
catalog_name: table_info.catalog_name.to_string(),
schema_name: table_info.schema_name.to_string(),
table_name: table_info.name.to_string(),
..Default::default()
};
let primary_keys = table_info
.meta
.primary_key_indices
.iter()
.map(|i| schema.column_schemas[*i].name.clone())
.collect::<Vec<_>>();
let add_columns = column_schemas_to_defs(schema.column_schemas.clone(), &primary_keys)
.context(CovertColumnSchemasToDefsSnafu)?;
alter_table_expr.kind = Some(Kind::AddColumns(AddColumns {
add_columns: add_columns
.into_iter()
.map(|col| AddColumn {
column_def: Some(col),
location: None,
add_if_not_exists: true,
})
.collect(),
}));
Ok(alter_table_expr)
}
/// Makes an alter region request for a peer.
pub fn make_alter_region_request_for_peer(
logical_table_id: TableId,
alter_table_expr: &AlterTableExpr,
peer: &Peer,
region_routes: &[RegionRoute],
) -> Result<RegionRequest> {
let regions_on_this_peer = find_leader_regions(region_routes, peer);
let mut requests = Vec::with_capacity(regions_on_this_peer.len());
for region_number in &regions_on_this_peer {
let region_id = RegionId::new(logical_table_id, *region_number);
let request = make_alter_region_request(region_id, alter_table_expr);
requests.push(request);
}
Ok(RegionRequest {
header: Some(RegionRequestHeader::default()),
body: Some(region_request::Body::Alters(AlterRequests { requests })),
})
}

View File

@@ -0,0 +1,89 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use client::api::v1::region::{region_request, CreateRequests, RegionRequest, RegionRequestHeader};
use client::api::v1::CreateTableExpr;
use common_meta::ddl::create_logical_tables::create_region_request_builder;
use common_meta::ddl::utils::region_storage_path;
use common_meta::peer::Peer;
use common_meta::rpc::router::{find_leader_regions, RegionRoute};
use operator::expr_helper::column_schemas_to_defs;
use snafu::ResultExt;
use store_api::storage::{RegionId, TableId};
use table::metadata::RawTableInfo;
use crate::error::{CovertColumnSchemasToDefsSnafu, Result};
/// Generates a `CreateTableExpr` from a `RawTableInfo`.
pub fn generate_create_table_expr(table_info: &RawTableInfo) -> Result<CreateTableExpr> {
let schema = &table_info.meta.schema;
let primary_keys = table_info
.meta
.primary_key_indices
.iter()
.map(|i| schema.column_schemas[*i].name.clone())
.collect::<Vec<_>>();
let timestamp_index = schema.timestamp_index.as_ref().unwrap();
let time_index = schema.column_schemas[*timestamp_index].name.clone();
let column_defs = column_schemas_to_defs(schema.column_schemas.clone(), &primary_keys)
.context(CovertColumnSchemasToDefsSnafu)?;
let table_options = HashMap::from(&table_info.meta.options);
Ok(CreateTableExpr {
catalog_name: table_info.catalog_name.to_string(),
schema_name: table_info.schema_name.to_string(),
table_name: table_info.name.to_string(),
desc: String::default(),
column_defs,
time_index,
primary_keys,
create_if_not_exists: true,
table_options,
table_id: None,
engine: table_info.meta.engine.to_string(),
})
}
/// Makes a create region request for a peer.
pub fn make_create_region_request_for_peer(
logical_table_id: TableId,
physical_table_id: TableId,
create_table_expr: &CreateTableExpr,
peer: &Peer,
region_routes: &[RegionRoute],
) -> Result<RegionRequest> {
let regions_on_this_peer = find_leader_regions(region_routes, peer);
let mut requests = Vec::with_capacity(regions_on_this_peer.len());
let request_builder =
create_region_request_builder(create_table_expr, physical_table_id).unwrap();
let catalog = &create_table_expr.catalog_name;
let schema = &create_table_expr.schema_name;
let storage_path = region_storage_path(catalog, schema);
for region_number in &regions_on_this_peer {
let region_id = RegionId::new(logical_table_id, *region_number);
let region_request =
request_builder.build_one(region_id, storage_path.clone(), &HashMap::new());
requests.push(region_request);
}
Ok(RegionRequest {
header: Some(RegionRequestHeader::default()),
body: Some(region_request::Body::Creates(CreateRequests { requests })),
})
}

View File

@@ -0,0 +1,363 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::Path;
use async_trait::async_trait;
use clap::{Parser, Subcommand};
use common_base::secrets::{ExposeSecret, SecretString};
use common_error::ext::BoxedError;
use common_meta::snapshot::MetadataSnapshotManager;
use object_store::services::{Fs, S3};
use object_store::ObjectStore;
use snafu::{OptionExt, ResultExt};
use crate::error::{InvalidFilePathSnafu, OpenDalSnafu, S3ConfigNotSetSnafu};
use crate::metadata::common::StoreConfig;
use crate::Tool;
/// Subcommand for metadata snapshot operations, including saving snapshots, restoring from snapshots, and viewing snapshot information.
#[derive(Subcommand)]
pub enum SnapshotCommand {
/// Save a snapshot of the current metadata state to a specified location.
Save(SaveCommand),
/// Restore metadata from a snapshot.
Restore(RestoreCommand),
/// Explore metadata from a snapshot.
Info(InfoCommand),
}
impl SnapshotCommand {
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
match self {
SnapshotCommand::Save(cmd) => cmd.build().await,
SnapshotCommand::Restore(cmd) => cmd.build().await,
SnapshotCommand::Info(cmd) => cmd.build().await,
}
}
}
// TODO(qtang): Abstract a generic s3 config for export import meta snapshot restore
#[derive(Debug, Default, Parser)]
struct S3Config {
/// whether to use s3 as the output directory. default is false.
#[clap(long, default_value = "false")]
s3: bool,
/// The s3 bucket name.
#[clap(long)]
s3_bucket: Option<String>,
/// The s3 region.
#[clap(long)]
s3_region: Option<String>,
/// The s3 access key.
#[clap(long)]
s3_access_key: Option<SecretString>,
/// The s3 secret key.
#[clap(long)]
s3_secret_key: Option<SecretString>,
/// The s3 endpoint. we will automatically use the default s3 decided by the region if not set.
#[clap(long)]
s3_endpoint: Option<String>,
}
impl S3Config {
pub fn build(&self, root: &str) -> Result<Option<ObjectStore>, BoxedError> {
if !self.s3 {
Ok(None)
} else {
if self.s3_region.is_none()
|| self.s3_access_key.is_none()
|| self.s3_secret_key.is_none()
|| self.s3_bucket.is_none()
{
return S3ConfigNotSetSnafu.fail().map_err(BoxedError::new);
}
// Safety, unwrap is safe because we have checked the options above.
let mut config = S3::default()
.bucket(self.s3_bucket.as_ref().unwrap())
.region(self.s3_region.as_ref().unwrap())
.access_key_id(self.s3_access_key.as_ref().unwrap().expose_secret())
.secret_access_key(self.s3_secret_key.as_ref().unwrap().expose_secret());
if !root.is_empty() && root != "." {
config = config.root(root);
}
if let Some(endpoint) = &self.s3_endpoint {
config = config.endpoint(endpoint);
}
Ok(Some(
ObjectStore::new(config)
.context(OpenDalSnafu)
.map_err(BoxedError::new)?
.finish(),
))
}
}
}
/// Export metadata snapshot tool.
/// This tool is used to export metadata snapshot from etcd, pg or mysql.
/// It will dump the metadata snapshot to local file or s3 bucket.
/// The snapshot file will be in binary format.
#[derive(Debug, Default, Parser)]
pub struct SaveCommand {
/// The store configuration.
#[clap(flatten)]
store: StoreConfig,
/// The s3 config.
#[clap(flatten)]
s3_config: S3Config,
/// The name of the target snapshot file. we will add the file extension automatically.
#[clap(long, default_value = "metadata_snapshot")]
file_name: String,
/// The directory to store the snapshot file.
/// if target output is s3 bucket, this is the root directory in the bucket.
/// if target output is local file, this is the local directory.
#[clap(long, default_value = "")]
output_dir: String,
}
fn create_local_file_object_store(root: &str) -> Result<ObjectStore, BoxedError> {
let root = if root.is_empty() { "." } else { root };
let object_store = ObjectStore::new(Fs::default().root(root))
.context(OpenDalSnafu)
.map_err(BoxedError::new)?
.finish();
Ok(object_store)
}
impl SaveCommand {
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
let kvbackend = self.store.build().await?;
let output_dir = &self.output_dir;
let object_store = self.s3_config.build(output_dir).map_err(BoxedError::new)?;
if let Some(store) = object_store {
let tool = MetaSnapshotTool {
inner: MetadataSnapshotManager::new(kvbackend, store),
target_file: self.file_name.clone(),
};
Ok(Box::new(tool))
} else {
let object_store = create_local_file_object_store(output_dir)?;
let tool = MetaSnapshotTool {
inner: MetadataSnapshotManager::new(kvbackend, object_store),
target_file: self.file_name.clone(),
};
Ok(Box::new(tool))
}
}
}
struct MetaSnapshotTool {
inner: MetadataSnapshotManager,
target_file: String,
}
#[async_trait]
impl Tool for MetaSnapshotTool {
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
self.inner
.dump("", &self.target_file)
.await
.map_err(BoxedError::new)?;
Ok(())
}
}
/// Restore metadata from a snapshot file.
///
/// This command restores the metadata state from a previously saved snapshot.
/// The snapshot can be loaded from either a local file system or an S3 bucket,
/// depending on the provided configuration.
#[derive(Debug, Default, Parser)]
pub struct RestoreCommand {
/// The store configuration.
#[clap(flatten)]
store: StoreConfig,
/// The s3 config.
#[clap(flatten)]
s3_config: S3Config,
/// The name of the target snapshot file.
#[clap(long, default_value = "metadata_snapshot.metadata.fb")]
file_name: String,
/// The directory to store the snapshot file.
#[clap(long, default_value = ".")]
input_dir: String,
#[clap(long, default_value = "false")]
force: bool,
}
impl RestoreCommand {
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
let kvbackend = self.store.build().await?;
let input_dir = &self.input_dir;
let object_store = self.s3_config.build(input_dir).map_err(BoxedError::new)?;
if let Some(store) = object_store {
let tool = MetaRestoreTool::new(
MetadataSnapshotManager::new(kvbackend, store),
self.file_name.clone(),
self.force,
);
Ok(Box::new(tool))
} else {
let object_store = create_local_file_object_store(input_dir)?;
let tool = MetaRestoreTool::new(
MetadataSnapshotManager::new(kvbackend, object_store),
self.file_name.clone(),
self.force,
);
Ok(Box::new(tool))
}
}
}
struct MetaRestoreTool {
inner: MetadataSnapshotManager,
source_file: String,
force: bool,
}
impl MetaRestoreTool {
pub fn new(inner: MetadataSnapshotManager, source_file: String, force: bool) -> Self {
Self {
inner,
source_file,
force,
}
}
}
#[async_trait]
impl Tool for MetaRestoreTool {
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
let clean = self
.inner
.check_target_source_clean()
.await
.map_err(BoxedError::new)?;
if clean {
common_telemetry::info!(
"The target source is clean, we will restore the metadata snapshot."
);
self.inner
.restore(&self.source_file)
.await
.map_err(BoxedError::new)?;
Ok(())
} else if !self.force {
common_telemetry::warn!(
"The target source is not clean, if you want to restore the metadata snapshot forcefully, please use --force option."
);
Ok(())
} else {
common_telemetry::info!("The target source is not clean, We will restore the metadata snapshot with --force.");
self.inner
.restore(&self.source_file)
.await
.map_err(BoxedError::new)?;
Ok(())
}
}
}
/// Explore metadata from a snapshot file.
///
/// This command allows filtering the metadata by a specific key and limiting the number of results.
/// It prints the filtered metadata to the console.
#[derive(Debug, Default, Parser)]
pub struct InfoCommand {
/// The s3 config.
#[clap(flatten)]
s3_config: S3Config,
/// The name of the target snapshot file. we will add the file extension automatically.
#[clap(long, default_value = "metadata_snapshot")]
file_name: String,
/// The query string to filter the metadata.
#[clap(long, default_value = "*")]
inspect_key: String,
/// The limit of the metadata to query.
#[clap(long)]
limit: Option<usize>,
}
struct MetaInfoTool {
inner: ObjectStore,
source_file: String,
inspect_key: String,
limit: Option<usize>,
}
#[async_trait]
impl Tool for MetaInfoTool {
#[allow(clippy::print_stdout)]
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
let result = MetadataSnapshotManager::info(
&self.inner,
&self.source_file,
&self.inspect_key,
self.limit,
)
.await
.map_err(BoxedError::new)?;
for item in result {
println!("{}", item);
}
Ok(())
}
}
impl InfoCommand {
fn decide_object_store_root_for_local_store(
file_path: &str,
) -> Result<(&str, &str), BoxedError> {
let path = Path::new(file_path);
let parent = path
.parent()
.and_then(|p| p.to_str())
.context(InvalidFilePathSnafu { msg: file_path })
.map_err(BoxedError::new)?;
let file_name = path
.file_name()
.and_then(|f| f.to_str())
.context(InvalidFilePathSnafu { msg: file_path })
.map_err(BoxedError::new)?;
let root = if parent.is_empty() { "." } else { parent };
Ok((root, file_name))
}
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
let object_store = self.s3_config.build("").map_err(BoxedError::new)?;
if let Some(store) = object_store {
let tool = MetaInfoTool {
inner: store,
source_file: self.file_name.clone(),
inspect_key: self.inspect_key.clone(),
limit: self.limit,
};
Ok(Box::new(tool))
} else {
let (root, file_name) =
Self::decide_object_store_root_for_local_store(&self.file_name)?;
let object_store = create_local_file_object_store(root)?;
let tool = MetaInfoTool {
inner: object_store,
source_file: file_name.to_string(),
inspect_key: self.inspect_key.clone(),
limit: self.limit,
};
Ok(Box::new(tool))
}
}
}

View File

@@ -0,0 +1,178 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::VecDeque;
use async_stream::try_stream;
use common_catalog::consts::METRIC_ENGINE;
use common_catalog::format_full_table_name;
use common_meta::key::table_name::TableNameKey;
use common_meta::key::table_route::TableRouteValue;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::KvBackendRef;
use futures::Stream;
use snafu::{OptionExt, ResultExt};
use store_api::storage::TableId;
use table::metadata::RawTableInfo;
use crate::error::{Result, TableMetadataSnafu, UnexpectedSnafu};
/// The input for the iterator.
pub enum IteratorInput {
TableIds(VecDeque<TableId>),
TableNames(VecDeque<(String, String, String)>),
}
impl IteratorInput {
/// Creates a new iterator input from a list of table ids.
pub fn new_table_ids(table_ids: Vec<TableId>) -> Self {
Self::TableIds(table_ids.into())
}
/// Creates a new iterator input from a list of table names.
pub fn new_table_names(table_names: Vec<(String, String, String)>) -> Self {
Self::TableNames(table_names.into())
}
}
/// An iterator for retrieving table metadata from the metadata store.
///
/// This struct provides functionality to iterate over table metadata based on
/// either [`TableId`] and their associated regions or fully qualified table names.
pub struct TableMetadataIterator {
input: IteratorInput,
table_metadata_manager: TableMetadataManager,
}
/// The full table metadata.
pub struct FullTableMetadata {
pub table_id: TableId,
pub table_info: RawTableInfo,
pub table_route: TableRouteValue,
}
impl FullTableMetadata {
/// Returns true if it's [TableRouteValue::Physical].
pub fn is_physical_table(&self) -> bool {
self.table_route.is_physical()
}
/// Returns true if it's a metric engine table.
pub fn is_metric_engine(&self) -> bool {
self.table_info.meta.engine == METRIC_ENGINE
}
/// Returns the full table name.
pub fn full_table_name(&self) -> String {
format_full_table_name(
&self.table_info.catalog_name,
&self.table_info.schema_name,
&self.table_info.name,
)
}
}
impl TableMetadataIterator {
pub fn new(kvbackend: KvBackendRef, input: IteratorInput) -> Self {
let table_metadata_manager = TableMetadataManager::new(kvbackend);
Self {
input,
table_metadata_manager,
}
}
/// Returns the next table metadata.
///
/// This method handles two types of inputs:
/// - TableIds: Returns metadata for a specific [`TableId`].
/// - TableNames: Returns metadata for a table identified by its full name (catalog.schema.table).
///
/// Returns `None` when there are no more tables to process.
pub async fn next(&mut self) -> Result<Option<FullTableMetadata>> {
match &mut self.input {
IteratorInput::TableIds(table_ids) => {
if let Some(table_id) = table_ids.pop_front() {
let full_table_metadata = self.get_table_metadata(table_id).await?;
return Ok(Some(full_table_metadata));
}
}
IteratorInput::TableNames(table_names) => {
if let Some(full_table_name) = table_names.pop_front() {
let table_id = self.get_table_id_by_name(full_table_name).await?;
let full_table_metadata = self.get_table_metadata(table_id).await?;
return Ok(Some(full_table_metadata));
}
}
}
Ok(None)
}
/// Converts the iterator into a stream of table metadata.
pub fn into_stream(mut self) -> impl Stream<Item = Result<FullTableMetadata>> {
try_stream!({
while let Some(full_table_metadata) = self.next().await? {
yield full_table_metadata;
}
})
}
async fn get_table_id_by_name(
&mut self,
(catalog_name, schema_name, table_name): (String, String, String),
) -> Result<TableId> {
let key = TableNameKey::new(&catalog_name, &schema_name, &table_name);
let table_id = self
.table_metadata_manager
.table_name_manager()
.get(key)
.await
.context(TableMetadataSnafu)?
.with_context(|| UnexpectedSnafu {
msg: format!(
"Table not found: {}",
format_full_table_name(&catalog_name, &schema_name, &table_name)
),
})?
.table_id();
Ok(table_id)
}
async fn get_table_metadata(&mut self, table_id: TableId) -> Result<FullTableMetadata> {
let (table_info, table_route) = self
.table_metadata_manager
.get_full_table_info(table_id)
.await
.context(TableMetadataSnafu)?;
let table_info = table_info
.with_context(|| UnexpectedSnafu {
msg: format!("Table info not found for table id: {table_id}"),
})?
.into_inner()
.table_info;
let table_route = table_route
.with_context(|| UnexpectedSnafu {
msg: format!("Table route not found for table id: {table_id}"),
})?
.into_inner();
Ok(FullTableMetadata {
table_id,
table_info,
table_route,
})
}
}

View File

@@ -25,6 +25,7 @@ common-meta.workspace = true
common-query.workspace = true
common-recordbatch.workspace = true
common-telemetry.workspace = true
datatypes.workspace = true
enum_dispatch = "0.3"
futures.workspace = true
futures-util.workspace = true

View File

@@ -162,14 +162,23 @@ impl Client {
.as_bytes() as usize
}
pub fn make_flight_client(&self) -> Result<FlightClient> {
pub fn make_flight_client(
&self,
send_compression: bool,
accept_compression: bool,
) -> Result<FlightClient> {
let (addr, channel) = self.find_channel()?;
let client = FlightServiceClient::new(channel)
let mut client = FlightServiceClient::new(channel)
.max_decoding_message_size(self.max_grpc_recv_message_size())
.max_encoding_message_size(self.max_grpc_send_message_size())
.accept_compressed(CompressionEncoding::Zstd)
.send_compressed(CompressionEncoding::Zstd);
.max_encoding_message_size(self.max_grpc_send_message_size());
// todo(hl): support compression methods.
if send_compression {
client = client.send_compressed(CompressionEncoding::Zstd);
}
if accept_compression {
client = client.accept_compressed(CompressionEncoding::Zstd);
}
Ok(FlightClient { addr, client })
}
@@ -178,9 +187,7 @@ impl Client {
let (addr, channel) = self.find_channel()?;
let client = PbRegionClient::new(channel)
.max_decoding_message_size(self.max_grpc_recv_message_size())
.max_encoding_message_size(self.max_grpc_send_message_size())
.accept_compressed(CompressionEncoding::Zstd)
.send_compressed(CompressionEncoding::Zstd);
.max_encoding_message_size(self.max_grpc_send_message_size());
Ok((addr, client))
}

View File

@@ -49,7 +49,16 @@ impl NodeManager for NodeClients {
async fn datanode(&self, datanode: &Peer) -> DatanodeRef {
let client = self.get_client(datanode).await;
Arc::new(RegionRequester::new(client))
let ChannelConfig {
send_compression,
accept_compression,
..
} = self.channel_manager.config();
Arc::new(RegionRequester::new(
client,
*send_compression,
*accept_compression,
))
}
async fn flownode(&self, flownode: &Peer) -> FlownodeRef {

View File

@@ -14,6 +14,7 @@
use std::pin::Pin;
use std::str::FromStr;
use std::sync::Arc;
use api::v1::auth_header::AuthScheme;
use api::v1::ddl_request::Expr as DdlExpr;
@@ -35,7 +36,7 @@ use common_grpc::flight::do_put::DoPutResponse;
use common_grpc::flight::{FlightDecoder, FlightMessage};
use common_query::Output;
use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::RecordBatchStreamWrapper;
use common_recordbatch::{RecordBatch, RecordBatchStreamWrapper};
use common_telemetry::tracing_context::W3cTrace;
use common_telemetry::{error, warn};
use futures::future;
@@ -49,7 +50,7 @@ use crate::error::{
ConvertFlightDataSnafu, Error, FlightGetSnafu, IllegalFlightMessagesSnafu,
InvalidTonicMetadataValueSnafu, ServerSnafu,
};
use crate::{from_grpc_response, Client, Result};
use crate::{error, from_grpc_response, Client, Result};
type FlightDataStream = Pin<Box<dyn Stream<Item = FlightData> + Send>>;
@@ -210,12 +211,18 @@ impl Database {
retries += 1;
warn!("Retrying {} times with error = {:?}", retries, err);
continue;
} else {
error!(
err; "Failed to send request to grpc handle, retries = {}, not retryable error, aborting",
retries
);
return Err(err.into());
}
}
(Err(err), false) => {
error!(
"Failed to send request to grpc handle after {} retries, error = {:?}",
retries, err
err; "Failed to send request to grpc handle after {} retries",
retries,
);
return Err(err.into());
}
@@ -286,7 +293,7 @@ impl Database {
let mut request = tonic::Request::new(request);
Self::put_hints(request.metadata_mut(), hints)?;
let mut client = self.client.make_flight_client()?;
let mut client = self.client.make_flight_client(false, false)?;
let response = client.mut_inner().do_get(request).await.or_else(|e| {
let tonic_code = e.code();
@@ -315,7 +322,7 @@ impl Database {
let mut flight_message_stream = flight_data_stream.map(move |flight_data| {
flight_data
.map_err(Error::from)
.and_then(|data| decoder.try_decode(data).context(ConvertFlightDataSnafu))
.and_then(|data| decoder.try_decode(&data).context(ConvertFlightDataSnafu))
});
let Some(first_flight_message) = flight_message_stream.next().await else {
@@ -337,20 +344,30 @@ impl Database {
);
Ok(Output::new_with_affected_rows(rows))
}
FlightMessage::Recordbatch(_) | FlightMessage::Metrics(_) => {
FlightMessage::RecordBatch(_) | FlightMessage::Metrics(_) => {
IllegalFlightMessagesSnafu {
reason: "The first flight message cannot be a RecordBatch or Metrics message",
}
.fail()
}
FlightMessage::Schema(schema) => {
let schema = Arc::new(
datatypes::schema::Schema::try_from(schema)
.context(error::ConvertSchemaSnafu)?,
);
let schema_cloned = schema.clone();
let stream = Box::pin(stream!({
while let Some(flight_message) = flight_message_stream.next().await {
let flight_message = flight_message
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
match flight_message {
FlightMessage::Recordbatch(record_batch) => yield Ok(record_batch),
FlightMessage::RecordBatch(arrow_batch) => {
yield RecordBatch::try_from_df_record_batch(
schema_cloned.clone(),
arrow_batch,
)
}
FlightMessage::Metrics(_) => {}
FlightMessage::AffectedRows(_) | FlightMessage::Schema(_) => {
yield IllegalFlightMessagesSnafu {reason: format!("A Schema message must be succeeded exclusively by a set of RecordBatch messages, flight_message: {:?}", flight_message)}
@@ -398,7 +415,7 @@ impl Database {
MetadataValue::from_str(db_to_put).context(InvalidTonicMetadataValueSnafu)?,
);
let mut client = self.client.make_flight_client()?;
let mut client = self.client.make_flight_client(false, false)?;
let response = client.mut_inner().do_put(request).await?;
let response = response
.into_inner()

View File

@@ -117,6 +117,13 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to convert Schema"))]
ConvertSchema {
#[snafu(implicit)]
location: Location,
source: datatypes::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -137,6 +144,7 @@ impl ErrorExt for Error {
| Error::CreateTlsChannel { source, .. } => source.status_code(),
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
Error::InvalidTonicMetadataValue { .. } => StatusCode::InvalidArguments,
Error::ConvertSchema { source, .. } => source.status_code(),
}
}

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::flow::{FlowRequest, FlowResponse};
use api::v1::flow::{DirtyWindowRequest, DirtyWindowRequests, FlowRequest, FlowResponse};
use api::v1::region::InsertRequests;
use common_error::ext::BoxedError;
use common_meta::node_manager::Flownode;
@@ -44,6 +44,16 @@ impl Flownode for FlowRequester {
.map_err(BoxedError::new)
.context(common_meta::error::ExternalSnafu)
}
async fn handle_mark_window_dirty(
&self,
req: DirtyWindowRequest,
) -> common_meta::error::Result<FlowResponse> {
self.handle_mark_window_dirty(req)
.await
.map_err(BoxedError::new)
.context(common_meta::error::ExternalSnafu)
}
}
impl FlowRequester {
@@ -91,4 +101,20 @@ impl FlowRequester {
.into_inner();
Ok(response)
}
async fn handle_mark_window_dirty(&self, req: DirtyWindowRequest) -> Result<FlowResponse> {
let (addr, mut client) = self.client.raw_flow_client()?;
let response = client
.handle_mark_dirty_time_window(DirtyWindowRequests {
requests: vec![req],
})
.await
.or_else(|e| {
let code = e.code();
let err: crate::error::Error = e.into();
Err(BoxedError::new(err)).context(FlowServerSnafu { addr, code })
})?
.into_inner();
Ok(response)
}
}

View File

@@ -28,7 +28,7 @@ use common_meta::error::{self as meta_error, Result as MetaResult};
use common_meta::node_manager::Datanode;
use common_query::request::QueryRequest;
use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
use common_recordbatch::{RecordBatch, RecordBatchStreamWrapper, SendableRecordBatchStream};
use common_telemetry::error;
use common_telemetry::tracing_context::TracingContext;
use prost::Message;
@@ -46,6 +46,8 @@ use crate::{metrics, Client, Error};
#[derive(Debug)]
pub struct RegionRequester {
client: Client,
send_compression: bool,
accept_compression: bool,
}
#[async_trait]
@@ -55,6 +57,7 @@ impl Datanode for RegionRequester {
if err.should_retry() {
meta_error::Error::RetryLater {
source: BoxedError::new(err),
clean_poisons: false,
}
} else {
meta_error::Error::External {
@@ -88,12 +91,18 @@ impl Datanode for RegionRequester {
}
impl RegionRequester {
pub fn new(client: Client) -> Self {
Self { client }
pub fn new(client: Client, send_compression: bool, accept_compression: bool) -> Self {
Self {
client,
send_compression,
accept_compression,
}
}
pub async fn do_get_inner(&self, ticket: Ticket) -> Result<SendableRecordBatchStream> {
let mut flight_client = self.client.make_flight_client()?;
let mut flight_client = self
.client
.make_flight_client(self.send_compression, self.accept_compression)?;
let response = flight_client
.mut_inner()
.do_get(ticket)
@@ -125,7 +134,7 @@ impl RegionRequester {
let mut flight_message_stream = flight_data_stream.map(move |flight_data| {
flight_data
.map_err(Error::from)
.and_then(|data| decoder.try_decode(data).context(ConvertFlightDataSnafu))
.and_then(|data| decoder.try_decode(&data).context(ConvertFlightDataSnafu))
});
let Some(first_flight_message) = flight_message_stream.next().await else {
@@ -146,18 +155,78 @@ impl RegionRequester {
let tracing_context = TracingContext::from_current_span();
let schema = Arc::new(
datatypes::schema::Schema::try_from(schema).context(error::ConvertSchemaSnafu)?,
);
let schema_cloned = schema.clone();
let stream = Box::pin(stream!({
let _span = tracing_context.attach(common_telemetry::tracing::info_span!(
"poll_flight_data_stream"
));
while let Some(flight_message) = flight_message_stream.next().await {
let flight_message = flight_message
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
let mut buffered_message: Option<FlightMessage> = None;
let mut stream_ended = false;
while !stream_ended {
// get the next message from the buffered message or read from the flight message stream
let flight_message_item = if let Some(msg) = buffered_message.take() {
Some(Ok(msg))
} else {
flight_message_stream.next().await
};
let flight_message = match flight_message_item {
Some(Ok(message)) => message,
Some(Err(e)) => {
yield Err(BoxedError::new(e)).context(ExternalSnafu);
break;
}
None => break,
};
match flight_message {
FlightMessage::Recordbatch(record_batch) => yield Ok(record_batch),
FlightMessage::RecordBatch(record_batch) => {
let result_to_yield = RecordBatch::try_from_df_record_batch(
schema_cloned.clone(),
record_batch,
);
// get the next message from the stream. normally it should be a metrics message.
if let Some(next_flight_message_result) = flight_message_stream.next().await
{
match next_flight_message_result {
Ok(FlightMessage::Metrics(s)) => {
let m = serde_json::from_str(&s).ok().map(Arc::new);
metrics_ref.swap(m);
}
Ok(FlightMessage::RecordBatch(rb)) => {
// for some reason it's not a metrics message, so we need to buffer this record batch
// and yield it in the next iteration.
buffered_message = Some(FlightMessage::RecordBatch(rb));
}
Ok(_) => {
yield IllegalFlightMessagesSnafu {
reason: "A RecordBatch message can only be succeeded by a Metrics message or another RecordBatch message"
}
.fail()
.map_err(BoxedError::new)
.context(ExternalSnafu);
break;
}
Err(e) => {
yield Err(BoxedError::new(e)).context(ExternalSnafu);
break;
}
}
} else {
// the stream has ended
stream_ended = true;
}
yield result_to_yield;
}
FlightMessage::Metrics(s) => {
// just a branch in case of some metrics message comes after other things.
let m = serde_json::from_str(&s).ok().map(Arc::new);
metrics_ref.swap(m);
break;

View File

@@ -10,7 +10,13 @@ name = "greptime"
path = "src/bin/greptime.rs"
[features]
default = ["servers/pprof", "servers/mem-prof"]
default = [
"servers/pprof",
"servers/mem-prof",
"meta-srv/pg_kvbackend",
"meta-srv/mysql_kvbackend",
]
enterprise = ["common-meta/enterprise", "frontend/enterprise", "meta-srv/enterprise"]
tokio-console = ["common-telemetry/tokio-console"]
[lints]
@@ -74,6 +80,7 @@ servers.workspace = true
session.workspace = true
similar-asserts.workspace = true
snafu.workspace = true
stat.workspace = true
store-api.workspace = true
substrait.workspace = true
table.workspace = true

View File

@@ -20,11 +20,11 @@ use cmd::error::{InitTlsProviderSnafu, Result};
use cmd::options::GlobalOptions;
use cmd::{cli, datanode, flownode, frontend, metasrv, standalone, App};
use common_base::Plugins;
use common_version::version;
use common_version::{verbose_version, version};
use servers::install_ring_crypto_provider;
#[derive(Parser)]
#[command(name = "greptime", author, version, long_version = version(), about)]
#[command(name = "greptime", author, version, long_version = verbose_version(), about)]
#[command(propagate_version = true)]
pub(crate) struct Command {
#[clap(subcommand)]
@@ -143,10 +143,8 @@ async fn start(cli: Command) -> Result<()> {
}
fn setup_human_panic() {
human_panic::setup_panic!(
human_panic::Metadata::new("GreptimeDB", env!("CARGO_PKG_VERSION"))
.homepage("https://github.com/GreptimeTeam/greptimedb/discussions")
);
human_panic::setup_panic!(human_panic::Metadata::new("GreptimeDB", version())
.homepage("https://github.com/GreptimeTeam/greptimedb/discussions"));
common_telemetry::set_panic_hook();
}

View File

@@ -76,6 +76,7 @@ impl Command {
&opts,
&TracingOptions::default(),
None,
None,
);
let tool = self.cmd.build().await.context(error::BuildCliSnafu)?;
@@ -145,6 +146,7 @@ mod tests {
let output_dir = tempfile::tempdir().unwrap();
let cli = cli::Command::parse_from([
"cli",
"data",
"export",
"--addr",
"127.0.0.1:4000",

View File

@@ -14,12 +14,13 @@
pub mod builder;
use std::path::Path;
use std::time::Duration;
use async_trait::async_trait;
use clap::Parser;
use common_config::Configurable;
use common_telemetry::logging::TracingOptions;
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
use common_telemetry::{info, warn};
use common_wal::config::DatanodeWalConfig;
use datanode::datanode::Datanode;
@@ -248,6 +249,14 @@ impl StartCommand {
raft_engine_config.dir.replace(wal_dir.clone());
}
// If the logging dir is not set, use the default logs dir in the data home.
if opts.logging.dir.is_empty() {
opts.logging.dir = Path::new(&opts.storage.data_home)
.join(DEFAULT_LOGGING_DIR)
.to_string_lossy()
.to_string();
}
if let Some(http_addr) = &self.http_addr {
opts.http.addr.clone_from(http_addr);
}

View File

@@ -19,7 +19,7 @@ use catalog::kvbackend::MetaKvBackend;
use common_base::Plugins;
use common_meta::cache::LayeredCacheRegistryBuilder;
use common_telemetry::info;
use common_version::{short_version, version};
use common_version::{short_version, verbose_version};
use datanode::datanode::DatanodeBuilder;
use datanode::service::DatanodeServiceBuilder;
use meta_client::MetaClientType;
@@ -28,7 +28,7 @@ use tracing_appender::non_blocking::WorkerGuard;
use crate::datanode::{DatanodeOptions, Instance, APP_NAME};
use crate::error::{MetaClientInitSnafu, MissingConfigSnafu, Result, StartDatanodeSnafu};
use crate::log_versions;
use crate::{create_resource_limit_metrics, log_versions};
/// Builder for Datanode instance.
pub struct InstanceBuilder {
@@ -64,9 +64,11 @@ impl InstanceBuilder {
&dn_opts.logging,
&dn_opts.tracing,
dn_opts.node_id.map(|x| x.to_string()),
None,
);
log_versions(version(), short_version(), APP_NAME);
log_versions(verbose_version(), short_version(), APP_NAME);
create_resource_limit_metrics(APP_NAME);
plugins::setup_datanode_plugins(plugins, &opts.plugins, dn_opts)
.await
@@ -91,6 +93,7 @@ impl InstanceBuilder {
MetaClientType::Datanode { member_id },
meta_client_options,
Some(&plugins),
None,
)
.await
.context(MetaClientInitSnafu)?;

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
@@ -21,7 +22,7 @@ use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManager, MetaKv
use clap::Parser;
use client::client_manager::NodeClients;
use common_base::Plugins;
use common_config::Configurable;
use common_config::{Configurable, DEFAULT_DATA_HOME};
use common_grpc::channel_manager::ChannelConfig;
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
@@ -30,8 +31,8 @@ use common_meta::heartbeat::handler::HandlerGroupExecutor;
use common_meta::key::flow::FlowMetadataManager;
use common_meta::key::TableMetadataManager;
use common_telemetry::info;
use common_telemetry::logging::TracingOptions;
use common_version::{short_version, version};
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
use common_version::{short_version, verbose_version};
use flow::{
get_flow_auth_options, FlownodeBuilder, FlownodeInstance, FlownodeServiceBuilder,
FrontendClient, FrontendInvoker,
@@ -45,7 +46,7 @@ use crate::error::{
MissingConfigSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
};
use crate::options::{GlobalOptions, GreptimeOptions};
use crate::{log_versions, App};
use crate::{create_resource_limit_metrics, log_versions, App};
pub const APP_NAME: &str = "greptime-flownode";
@@ -54,14 +55,32 @@ type FlownodeOptions = GreptimeOptions<flow::FlownodeOptions>;
pub struct Instance {
flownode: FlownodeInstance,
// The components of flownode, which make it easier to expand based
// on the components.
#[cfg(feature = "enterprise")]
components: Components,
// Keep the logging guard to prevent the worker from being dropped.
_guard: Vec<WorkerGuard>,
}
#[cfg(feature = "enterprise")]
pub struct Components {
pub catalog_manager: catalog::CatalogManagerRef,
pub fe_client: Arc<FrontendClient>,
pub kv_backend: common_meta::kv_backend::KvBackendRef,
}
impl Instance {
pub fn new(flownode: FlownodeInstance, guard: Vec<WorkerGuard>) -> Self {
pub fn new(
flownode: FlownodeInstance,
#[cfg(feature = "enterprise")] components: Components,
guard: Vec<WorkerGuard>,
) -> Self {
Self {
flownode,
#[cfg(feature = "enterprise")]
components,
_guard: guard,
}
}
@@ -74,6 +93,11 @@ impl Instance {
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
&mut self.flownode
}
#[cfg(feature = "enterprise")]
pub fn components(&self) -> &Components {
&self.components
}
}
#[async_trait::async_trait]
@@ -186,6 +210,14 @@ impl StartCommand {
opts.logging.dir.clone_from(dir);
}
// If the logging dir is not set, use the default logs dir in the data home.
if opts.logging.dir.is_empty() {
opts.logging.dir = Path::new(DEFAULT_DATA_HOME)
.join(DEFAULT_LOGGING_DIR)
.to_string_lossy()
.to_string();
}
if global_options.log_level.is_some() {
opts.logging.level.clone_from(&global_options.log_level);
}
@@ -244,8 +276,11 @@ impl StartCommand {
&opts.component.logging,
&opts.component.tracing,
opts.component.node_id.map(|x| x.to_string()),
None,
);
log_versions(version(), short_version(), APP_NAME);
log_versions(verbose_version(), short_version(), APP_NAME);
create_resource_limit_metrics(APP_NAME);
info!("Flownode start command: {:#?}", self);
info!("Flownode options: {:#?}", opts);
@@ -271,6 +306,7 @@ impl StartCommand {
MetaClientType::Flownode { member_id },
meta_config,
None,
None,
)
.await
.context(MetaClientInitSnafu)?;
@@ -311,6 +347,7 @@ impl StartCommand {
cached_meta_backend.clone(),
layered_cache_registry.clone(),
None,
None,
);
let table_metadata_manager =
@@ -336,19 +373,20 @@ impl StartCommand {
let flow_auth_header = get_flow_auth_options(&opts).context(StartFlownodeSnafu)?;
let frontend_client =
FrontendClient::from_meta_client(meta_client.clone(), flow_auth_header);
let frontend_client = Arc::new(frontend_client);
let flownode_builder = FlownodeBuilder::new(
opts.clone(),
plugins,
table_metadata_manager,
catalog_manager.clone(),
flow_metadata_manager,
Arc::new(frontend_client),
frontend_client.clone(),
)
.with_heartbeat_task(heartbeat_task);
let mut flownode = flownode_builder.build().await.context(StartFlownodeSnafu)?;
let services = FlownodeServiceBuilder::new(&opts)
.with_grpc_server(flownode.flownode_server().clone())
.with_default_grpc_server(flownode.flownode_server())
.enable_http_service()
.build()
.context(StartFlownodeSnafu)?;
@@ -380,6 +418,16 @@ impl StartCommand {
.set_frontend_invoker(invoker)
.await;
Ok(Instance::new(flownode, guard))
#[cfg(feature = "enterprise")]
let components = Components {
catalog_manager: catalog_manager.clone(),
fe_client: frontend_client,
kv_backend: cached_meta_backend,
};
#[cfg(not(feature = "enterprise"))]
return Ok(Instance::new(flownode, guard));
#[cfg(feature = "enterprise")]
Ok(Instance::new(flownode, components, guard))
}
}

Some files were not shown because too many files have changed in this diff Show More