Compare commits

...

234 Commits

Author SHA1 Message Date
Yingwen
506dc20765 fix: last non null iter not init (#4687) 2024-09-06 04:13:23 +00:00
Lei, HUANG
114772ba87 chore: bump version v0.9.3 (#4684) 2024-09-06 02:31:41 +00:00
liyang
89a3da8a3a chore(dockerfile): remove mysql and postgresql clients in greptimedb image (#4685) 2024-09-05 16:00:53 +00:00
jeremyhi
8814695b58 feat: invalidate cache via invalidator on region migration (#4682)
feat: invalidate table via invalidator on region migration
2024-09-05 06:15:38 +00:00
Lanqing Yang
86cef648cd feat: add more spans to mito engine (#4643)
feat: add more span on mito engine
2024-09-05 06:13:22 +00:00
Ning Sun
e476e36647 feat: add geohash and h3 as built-in functions (#4656)
* feat: add built-in functions h3 and geohash

* tests: add sqlness tests for geo functions

* doc: correct h3 comment

* fix: lint error

* fix: toml format

* refactor: address review comments

* test: add more sqlness cases

* Apply suggestions from code review

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-05 04:42:29 +00:00
shuiyisong
4781b327f3 fix: ref to auth err (#4681)
* fix: ref to auth err

* fix: typo
2024-09-05 04:05:39 +00:00
LFC
3e4a69017d build: add mysql and postgresql clients to greptimedb image (#4677) 2024-09-04 11:38:47 +00:00
LFC
d43e31c7ed feat: schedule compaction when adding sst files by editing region (#4648)
* feat: schedule compaction when adding sst files by editing region

* add minimum time interval for two successive compactions

* resolve PR comments
2024-09-04 10:10:07 +00:00
discord9
19e2a9d44b feat: change log level dynamically (#4653)
* feat: add dyn_log handle

* feat: use reload handle

* chore: per review
2024-09-04 07:54:50 +00:00
zyy17
8453df1392 refactor: make init_global_logging() clean and add log_format (#4657)
refactor: refine the code logic of init_global_logging and add json output format
2024-09-04 03:04:51 +00:00
Ruihang Xia
8ca35a4a1a fix: use number of partitions as parallilism in region scanner (#4669)
* fix: use number of partitions as parallilism in region scanner

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add sqlness

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

Co-authored-by: Lei HUANG <mrsatangel@gmail.com>

* order by ts

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* debug pring time range

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Lei HUANG <mrsatangel@gmail.com>
2024-09-03 13:42:38 +00:00
Ruihang Xia
93f202694c refactor: remove unused error variants (#4666)
* add python script

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove unused errors

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix all negative cases

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* setup CI

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add license header

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-03 13:19:38 +00:00
Weny Xu
b52e3c694a chore(ci): set etcd resources limits (#4665) 2024-09-03 07:25:23 +00:00
dennis zhuang
a612b67470 feat: supports name in object storage config (#4630)
* feat: supports name in object storage config

* fix: integration test

* fix: integration test

* fix: update sample config

* fix: config api test
2024-09-03 07:02:55 +00:00
jeremyhi
9b03940e03 chore: refactor metadata key value trait (#4664) 2024-09-03 07:00:24 +00:00
jeremyhi
8d6cd8ae16 feat: export import database (#4654)
* feat: export database create sql

* feat: import create database

* Update src/cmd/src/cli/export.rs

Co-authored-by: Weny Xu <wenymedia@gmail.com>

* Update src/cmd/src/cli/import.rs

Co-authored-by: Weny Xu <wenymedia@gmail.com>

* Update src/cmd/src/error.rs

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* chore: make show create fail fast

---------

Co-authored-by: Weny Xu <wenymedia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-03 04:45:25 +00:00
dennis zhuang
8f4ec536de feat: grpc writing supports TTL hint (#4651) 2024-09-03 02:15:01 +00:00
zyy17
f0e2d6e663 fix: use 'target' for 'actions-rust-lang/setup-rust-toolchain' to fix cross build failed (#4661) 2024-09-02 06:11:12 +00:00
Weny Xu
306bd25c64 fix: expose missing options for initializing regions (#4660)
* fix: expose `init_regions_in_background` and `init_regions_parallelism` opts

* fix: ci
2024-09-02 03:11:18 +00:00
zyy17
ddafcc678c ci: disable macos integration test and some minor refactoring (#4658) 2024-09-02 03:06:17 +00:00
Weny Xu
2564b5daee fix: correct otlp endpoint formatting (#4646) 2024-09-02 02:59:50 +00:00
Lei, HUANG
37dcf34bb9 fix(mito): avoid caching empty batches in row group (#4652)
* fix: avoid caching empty batches in row group

* fix: clippy

* Update tests/cases/standalone/common/select/last_value.sql

* fix: sqlness
2024-09-02 02:43:00 +00:00
Yingwen
8eda36bfe3 feat: remove files from the write cache in purger (#4655)
* feat: remove files from the write cache in purger

* chore: fix typo
2024-08-31 04:19:52 +00:00
Ruihang Xia
68b59e0e5e feat: remove the requirement that partition column must be PK (#4647)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-31 03:16:01 +00:00
Ruihang Xia
a37aeb2814 feat: initialize partition range from ScanInput (#4635)
* feat: initialize partition range from ScanInput

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use num_rows instead

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add todo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* setup unordered scan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/mito2/src/read/scan_region.rs

Co-authored-by: jeremyhi <jiachun_feng@proton.me>

* leave unordered scan unchanged

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: jeremyhi <jiachun_feng@proton.me>
2024-08-30 07:30:37 +00:00
jeremyhi
f641c562c2 feat: show create database (#4642)
* feat: show create database

* feat: add sqlness test

* chore: reorder mod and use

* feat: show create schema

* Update src/frontend/src/instance.rs
2024-08-30 03:58:11 +00:00
Lanqing Yang
9286e963e7 chore: adding heartbeat sent/recv counts in greptimedb nodes (#4624)
obs: adding heartbeat sent/recv counts in greptimedb nodes
2024-08-30 03:57:16 +00:00
LFC
8ea4f67e4b refactor: reduce a object store "stat" call (#4645) 2024-08-30 03:31:19 +00:00
jeremyhi
5e4bac2633 feat: import cli tool (#4639)
* feat: import create tables

* feat: import databasse

* fix: export view schema
2024-08-29 09:32:21 +00:00
LFC
d45b04180c feat: pre-download the ingested sst (#4636)
* refactor: pre-read the ingested sst file in object store to fill the local cache to accelerate first query

* feat: pre-download the ingested SST from remote to accelerate following reads

* resolve PR comments

* resolve PR comments
2024-08-29 08:36:41 +00:00
discord9
8c8499ce53 perf(flow): Map&Reduce Operator use batch to reduce alloc (#4567)
* feat: partial impl mfp

* feat: eval batch inner

* chore: fmt

* feat: mfp eval_batch

* WIP

* feat: Collection generic over row&Batch

* feat: render source batch

* chore: chore

* feat: render mfp batch

* feat: render reduce batch(WIP)

* feat(WIP): render reduce

* feat: reduce batch

* feat: render sink batch

* feat: render constant batch

* chore: error handling& mfp batch test

* test: mfp batch

* chore: rm import

* test: render reduce batch

* chore: add TODO

* chore: per bot review

* refactor: per review

* chore: cmt

* chore: rename

* docs: update no panic
2024-08-29 07:28:13 +00:00
Weny Xu
79f40a762b fix: set selector_result_cache_size in unit test again (#4641) 2024-08-29 07:14:40 +00:00
jeremyhi
b062d8515d feat: copy database ignores view and temporary tables (#4640)
feat: copy database ingores view and temporary tables
2024-08-29 06:17:51 +00:00
discord9
9f9c1dab60 feat(flow): use DataFusion's optimizer (#4489)
* feat: use datafusion optimization

refactor: mv `sql_to_flow_plan` elsewhere

feat(WIP): use df optimization

WIP analyzer rule

feat(WIP): avg expander

fix: transform avg expander

fix: avg expand

feat: names from substrait

fix: avg rewrite

test: update `test_avg`&`test_avg_group_by`

test: fix `test_sum`

test: fix some tests

chore: remove unused flow plan transform

feat: tumble expander

test: update tests

* chore: clippy

* fix: tumble lose `group expr`

* test: sqlness test update

* test: rm unused cast

* test: simplify sqlness

* refactor: per review

* chore: after rebase

* fix: remove a outdated test

* test: add comment

* fix: report error when not literal

* chore: update sqlness test after rebase

* refactor: per review
2024-08-29 02:52:00 +00:00
dennis zhuang
841e66c810 fix: config api and export metrics default database (#4633) 2024-08-28 14:28:49 +00:00
shuiyisong
d1c635085c chore: modify grafana config to accord with version 9 (#4634)
chore: update grafana config to accord with version 9
2024-08-28 12:53:35 +00:00
Weny Xu
47657ebbc8 feat: replay WAL entries respect index (#4565)
* feat(log_store): use new `Consumer`

* feat: add `from_peer_id`

* feat: read WAL entries respect index

* test: add test for `build_region_wal_index_iterator`

* fix: keep the handle

* fix: incorrect last index

* fix: replay last entry id may be greater than expected

* chore: remove unused code

* chore: apply suggestions from CR

* chore: rename `datanode_id` to `location_id`

* chore: rename `from_peer_id` to `location_id`

* chore: rename `from_peer_id` to `location_id`

* chore: apply suggestions from CR
2024-08-28 11:37:18 +00:00
Ruihang Xia
64ae32def0 feat: remove some redundent clone/conversion on constructing MergeScan stream (#4632)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-28 08:52:09 +00:00
Weny Xu
744946957e fix: set selector_result_cache_size in unit test (#4631) 2024-08-28 07:24:17 +00:00
Ruihang Xia
d5455db2d5 fix: update properties on updating partitions (#4627)
* fix: update properties on updating partitions

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add unit test and handle insufficient ranges

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-28 07:06:01 +00:00
Yingwen
28bf549907 fix: fallback to window size in manifest (#4629) 2024-08-28 06:43:56 +00:00
zyy17
4ea412249a ci: add check-builder-rust-version job in release and change release-dev-builder-images trigger condition (#4615) 2024-08-27 16:59:01 +00:00
zyy17
eacc7bc471 refactor: add app in greptime_app_version metric (#4626)
refactor: add app in greptime_app_version metric
2024-08-27 11:19:01 +00:00
Ruihang Xia
b72d3bc71d build(deps): bump backon to 1.0 (#4625)
* build(deps): bump backon to 1.0

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-27 09:38:12 +00:00
Ning Sun
0b102ef846 ci: improve toolchain resolution in ci (#4614)
* ci: improve toolchain resolution in ci

* fix: yaml format
2024-08-27 07:46:51 +00:00
liyang
e404e9dafc chore: setting docker authentication in dev-build image (#4623) 2024-08-27 03:49:53 +00:00
liyang
63a442632e fix: failed to get version (#4622) 2024-08-26 15:33:30 +00:00
liyang
d39bafcfbd fix: change toolchain file name (#4621) 2024-08-26 13:04:06 +00:00
liyang
1717445ebe fix: failed to get github sha (#4620) 2024-08-26 11:42:07 +00:00
liyang
55d65da24d ci: add push dev-build images to aws ecr (#4618)
* ci: add push dev-build images to aws ecr

* chore: use toolchain file generation dev-build image tag

* chore: change dev-build version

* Update .github/workflows/release-dev-builder-images.yaml

Co-authored-by: zyy17 <zyylsxm@gmail.com>

---------

Co-authored-by: zyy17 <zyylsxm@gmail.com>
2024-08-26 09:36:55 +00:00
Weny Xu
3297d5f657 feat: allow skipping topic creation (#4616)
* feat: introduce `create_topics` opt

* feat: allow skipping topic creation

* chore: refine docs

* chore: apply suggestions from CR
2024-08-26 08:34:27 +00:00
Ning Sun
d6865911ee feat: add postgres response for trasaction related statements (#4562)
* feat: add postgres fixtures WIP

* feat: implement more postgres fixtures

* feat: add compatibility for transaction/set transaction/show transaction

* fix: improve regex for set transaction
2024-08-26 08:09:21 +00:00
dennis zhuang
63f2463273 feat!: impl admin command (#4600)
* feat: impl admin statement parser

* feat: introduce AsyncFunction and implements it for admin functions

* feat: execute admin functions

* fix: license header

* fix: panic in test

* chore: fixed by code review
2024-08-26 07:53:40 +00:00
Ruihang Xia
da337a9635 perf: acclerate scatter query (#4607)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-26 03:03:30 +00:00
fys
3973d6b01f chore: optimize common_version build (#4611) 2024-08-23 12:36:28 +00:00
discord9
2c731c76ad chore: add stats feature for jemalloc-ctl (#4610) 2024-08-23 11:18:30 +00:00
ozewr
40e7b58c80 feat: refactoring LruCacheLayer with list_with_metakey and concurrent_stat_in_list (#4596)
* use list_with_metakey and concurrent_stat_in_list

* change concurrent in recover_cache like before

* remove stat funcation

* use 8 concurrent

* use const value

* fmt code

* Apply suggestions from code review

---------

Co-authored-by: ozewr <l19ht@google.com>
Co-authored-by: Weny Xu <wenymedia@gmail.com>
2024-08-23 03:22:00 +00:00
zyy17
5177717f71 refactor: add fallback_to_local region option (#4578)
* refactor: add 'fallback_to_local_compaction' region option

* refactor: use 'fallback_to_local'
2024-08-23 03:09:43 +00:00
Weny Xu
8d61e6fe49 chore: bump rskafka to 75535b (#4608) 2024-08-23 03:05:52 +00:00
Ruihang Xia
a3b8d2fe8f chore: bump rust toolchain to 2024-08-21 (#4606)
* chore: bump rust toolchain to 2024-08-22

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update workflow

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* try 20240606

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-22 15:38:10 +00:00
Ning Sun
863ee073a9 chore: add commerial support section (#4601)
doc: add commerial support section
2024-08-22 12:03:20 +00:00
Weny Xu
25cd61b310 chore: upgrade toolchain to nightly-2024-08-07 (#4549)
* chore: upgrade toolchain to `nightly-2024-08-07`

* chore(ci): upgrade toolchain

* fix: fix unit test
2024-08-22 11:02:18 +00:00
fys
3517c13192 fix: incremental compilation always compile the common-version crate (#4605)
fix: wrong cargo:rerun
2024-08-22 11:00:33 +00:00
Ruihang Xia
b9cedf2c1a perf: optimize series divide algo (#4603)
* perf: optimize series divide algo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove dead code

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-22 09:16:36 +00:00
LFC
883c5bc5b0 refactor: skip checking the existence of the SST files (#4602)
refactor: skip checking the existence of the SST files when region is directly edited
2024-08-22 08:32:27 +00:00
Yingwen
d628079f4c feat: collect filters metrics for scanners (#4591)
* feat: collect filter metrics

* refactor: reuse ReaderFilterMetrics

* feat: record read rows from parquet by type

* feat: unordered scan observe rows

also fix read type

* chore: rename label
2024-08-22 03:22:05 +00:00
Weny Xu
0025fa6ec7 chore: bump opendal version to 0.49 (#4587)
* chore: bump opendal version to 0.49

* chore: apply suggestions from CR

* Update src/object-store/src/util.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2024-08-22 03:05:36 +00:00
Lanqing Yang
ff04109ee6 docs: add example configs introduced by pg_kvbackend (#4573)
chore: add example configs that introduced after pg_kvbackend
2024-08-22 01:52:02 +00:00
Yingwen
9c1704d4cb docs: move v0.9.1 benchmark report to tsbs dir (#4598)
* docs: move v0.9.1 benchmark report to tsbs dir

* docs: add newlines
2024-08-21 09:31:05 +00:00
Yingwen
a12a905578 chore: disable ttl for write cache by default (#4595)
* chore: remove default write cache ttl

* docs: update example config

* chore: fix ci
2024-08-21 08:38:38 +00:00
shuiyisong
449236360d docs: log benchmark (#4597)
* chore: add log benchmark stuff

* chore: minor update
2024-08-21 07:12:32 +00:00
localhost
bf16422cee fix: pipeline prepare loop break detects a conditional error (#4593) 2024-08-21 06:20:09 +00:00
Ran Joe
9db08dbbe0 refactor(mito2): reduce duplicate IndexOutput struct (#4592)
* refactor(mito2): reduce duplicate IndexOutput struct

* docs(mito2): add index output note
2024-08-20 12:30:17 +00:00
fys
9d885fa0c2 chore: bump tikv-jemalloc* to "0.6" (#4590)
chore: bump tikv-jemalloc* ti "0.6"
2024-08-20 09:08:21 +00:00
Ruihang Xia
b25a2b117e feat: remove sql in error desc (#4589)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-20 06:37:30 +00:00
fys
6fccff4810 chore: keep symbol table in nightly profile (#4588)
chore: keep symbol table in nighly profile
2024-08-20 02:27:31 +00:00
ozewr
30af78700f feat: Implement the Buf to avoid extra memory allocation (#4585)
* feat: Implement the Buf to avoid extra memory allocation

* fmt toml

* fmt code

* mv entry.into_buffer to raw_entry_buffer

* less reuse opendal

* remove todo #4065

* Update src/mito2/src/wal/entry_reader.rs

Co-authored-by: Weny Xu <wenymedia@gmail.com>

* fmt code

---------

Co-authored-by: ozewr <l19ht@google.com>
Co-authored-by: Weny Xu <wenymedia@gmail.com>
2024-08-19 12:11:08 +00:00
Ruihang Xia
8de11a0e34 perf: set simple filter on primary key columns to exact filter (#4564)
* perf: set simple filter on primary key columns to exact filter

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add sqlness test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix sqlness

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-19 09:07:35 +00:00
Lei, HUANG
975b8c69e5 fix(sqlness): redact all volatile text (#4583)
Commit Message:

 Add SQLNESS replacements for RoundRobinBatch and region patterns
2024-08-19 08:04:54 +00:00
Weny Xu
8036b44347 chore: setup kafka before downloading binary step (#4582) 2024-08-19 06:44:33 +00:00
Zhenchi
4c72b3f3fe chore: bump version to v0.9.2 (#4581)
chore: bump version to 0.9.2

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-08-19 06:11:36 +00:00
Weny Xu
76dc906574 feat(log_store): introduce the CollectionTask (#4530)
* feat: introduce the `CollectionTask`

* feat: add config of index collector

* chore: remove unused code

* feat: truncate indexes

* chore: apply suggestions from CR

* chore: update config examples

* refactor: retrieve latest offset while dumping indexes

* chore: print warn
2024-08-19 03:48:35 +00:00
Ran Joe
2a73e0937f fix(common_version): short_version with empty branch (#4572) 2024-08-19 03:14:49 +00:00
Zhenchi
c8de8b80f4 fix(fulltext-index): single segment is not sufficient for >50M rows SST (#4552)
* fix(fulltext-index): single segment is not sufficient for a >50M rows SST

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: update doc comment

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-08-16 09:14:33 +00:00
LFC
ec59ce5c9a feat: able to handle concurrent region edit requests (#4569)
* feat: able to handle concurrent region edit requests

* resolve PR comments
2024-08-16 03:29:03 +00:00
liyang
f578155602 feat: add GcsConfig credential field (#4568) 2024-08-16 03:11:20 +00:00
Weny Xu
d1472782d0 chore(log_store): remove redundant metrics (#4570)
chore(log_store): remove unused metrics
2024-08-16 02:23:21 +00:00
Lanqing Yang
93be81c041 feat: implement postgres kvbackend (#4421) 2024-08-14 22:49:32 +00:00
discord9
2c3fccb516 feat(flow): add eval_batch for ScalarExpr (#4551)
* refactor: better perf flow

* feat(WIP): batching proc

* feat: UnaryFunc::eval_batch untested

* feat: BinaryFunc::eval_batch untested

* feat: VariadicFunc::eval_batch un tested

* feat: literal eval_batch

* refactor: move DfScalarFunc to separate file

* chore: remove unused imports

* feat: eval_batch df func&ifthen

* chore: remove unused file

* refactor: use Batch type

* chore: remove unused

* chore: remove a done TODO

* refactor: per review

* chore: import

* refactor: eval_batch if then

* chore: typo
2024-08-14 11:29:30 +00:00
Lei, HUANG
c1b1be47ba fix: append table stats (#4561)
* fix: append table stats

* fix: clippy
2024-08-14 09:01:42 +00:00
Weny Xu
0f85037024 chore: remove unused code (#4559) 2024-08-14 06:55:54 +00:00
discord9
f88705080b chore: set topic to 3 for sqlness test (#4560) 2024-08-14 06:32:26 +00:00
discord9
cbb06cd0c6 feat(flow): add some metrics (#4539)
* feat: add some metrics

* fix: tmp rate limiter

* feat: add task count metrics

* refactor: use bounded channel anyway

* refactor: better metrics
2024-08-14 03:23:49 +00:00
discord9
b59a93dfbc chore: Helper function to convert Vec<Value> to VectorRef (#4546)
* chore: `try_from_row_into_vector` helper

* test: try_from_row

* refactor: simplify with builder

* fix: deicmal set prec&scale

* refactor: more simplify

* refactor: use ref
2024-08-14 03:11:44 +00:00
localhost
202c730363 perf: Optimizing pipeline performance (#4390)
* chore: improve pipeline performance

* chore: use arc to improve time type

* chore: improve pipeline coerce

* chore: add vec refactor

* chore: add vec pp

* chore: improve pipeline

* inprocess

* chore: set log ingester use new pipeline

* chore: fix some error by pr comment

* chore: fix typo

* chore: use enum_dispatch to simplify code

* chore: some minor fix

* chore: format code

* chore: update by pr comment

* chore: fix typo

* chore: make clippy happy

* chore: fix by pr comment

* chore: remove epoch and date process add new timestamp process

* chore: add more test for pipeline

* chore: restore epoch and date processor

* chore: compatibility issue

* chore: fix by pr comment

* chore: move the evaluation out of the loop

* chore: fix by pr comment

* chore: fix dissect output key filter

* chore: fix transform output greptime value has order error

* chore: keep pipeline transform output order

* chore: revert tests

* chore: simplify pipeline prepare implementation

* chore: add test for timestamp pipelin processor

* chore: make clippy happy

* chore: replace is_some check to match

---------

Co-authored-by: shuiyisong <xixing.sys@gmail.com>
2024-08-13 11:32:04 +00:00
zyy17
63e1892dc1 refactor(plugin): add SetupPlugin and StartPlugin error (#4554) 2024-08-13 11:22:48 +00:00
Lei, HUANG
216bce6973 perf: count(*) for append-only tables (#4545)
* feat: support fast count(*) for append-only tables

* fix: total_rows stats in time series memtable

* fix: sqlness result changes for SinglePartitionScanner -> StreamScanAdapter

* fix: some cr comments
2024-08-13 09:27:50 +00:00
Yingwen
4466fee580 docs: update grafana readme (#4550)
* docs: update grafana readme

* docs: simplify example
2024-08-13 08:45:06 +00:00
shuiyisong
5aa4c70057 chore: update validator signature (#4548) 2024-08-13 08:06:12 +00:00
Yingwen
72a1732fb4 docs: Adds more panels to grafana dashboards (#4540)
* docs: update standalone grafana

* docs: add more panels to grafana dashboards

* docs: replace source name

* docs: bump dashboard version

* docs: update hit rate expr

* docs: greptime_pod to instance, add panels for cache
2024-08-13 06:29:28 +00:00
Weny Xu
c821d21111 feat(log_store): introduce the IndexCollector (#4461)
* feat: introduce the IndexCollector

* refactor: separate BackgroundProducerWorker code into files

* feat: introduce index related operations

* feat: introduce the `GlobalIndexCollector`

* refactor: move collector to index mod

* refactor: refactor `GlobalIndexCollector`

* chore: remove unused collector.rs

* chore: add comments

* chore: add comments

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2024-08-13 06:15:24 +00:00
Weny Xu
2e2eacf3b2 feat: add SASL and TLS config for Kafka client (#4536)
* feat: add SASL and TLS config

* feat: add SASL/PLAIN and TLS config for Kafka client

* chore: use `ring`

* feat: support SASL SCRAM-SHA-256 and SCRAM-SHA-512

* fix: correct unit test

* test: add integration test

* chore: apply suggestions from CR

* refactor: introduce `KafkaConnectionConfig`

* chore: refine toml examples

* docs: add missing fields

* chore: refine examples

* feat: allow no server ca cert

* chore: refine examples

* chore: fix clippy

* feat: load system ca certs

* chore: fmt toml

* chore: unpin version

* Update src/common/wal/src/error.rs

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

---------

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
2024-08-12 12:27:11 +00:00
Ruihang Xia
9bcaeaaa0e refactor: reuse aligned ts array in range manipulate exec (#4535)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-12 06:26:11 +00:00
Weny Xu
90cfe276b4 chore: upload kind logs (#4544) 2024-08-12 05:01:13 +00:00
JohnsonLee
6694d2a930 fix: change the type of oid in pg_namespace to u32 (#4541)
* fix:  change the type of oid in pg_namespace to u32

* fix: header and correct logic of update oid
2024-08-10 15:06:14 +00:00
Ning Sun
9532ffb954 fix: configuration example for selector (#4532)
* fix: configuration example for selector

* docs: update config docs

* test: update unit tests for configuration in meta
2024-08-09 09:51:05 +00:00
Weny Xu
665b7e5c6e perf: merge small byte ranges for optimized fetching (#4520) 2024-08-09 08:17:54 +00:00
Weny Xu
27d9aa0f3b fix: rollback only if dropping the metric physical table fails (#4525)
* fix: rollback only if dropping the metric physical table fails

* chore: apply suggestions from CR
2024-08-09 08:01:11 +00:00
discord9
8f3293d4fb fix: larger stack size in debug mode (#4521)
* fix: larger stack size in debug mode

* chore: typo

* chore: clippy

* chore: per review

* chore: rename thread

* chore: per review

* refactor: better looking cfg

* chore: async main entry
2024-08-09 07:01:20 +00:00
LFC
7dd20b0348 chore: make mysql server version changable (#4531) 2024-08-09 03:43:43 +00:00
zyy17
4c1a3f29c0 ci: download the latest stable released version by default and do some small refactoring (#4529)
refactor: download the latest stable released version by default and do some small refactoring
2024-08-08 07:46:09 +00:00
Jeremyhi
0d70961448 feat: change the default selector to RoundRobin (#4528)
* feat: change the default selector to rr

* Update src/meta-srv/src/selector.rs

* fix: unit test
2024-08-08 04:58:20 +00:00
LFC
a75cfaa516 chore: update snafu to make clippy happy (#4507)
* chore: update snafu to make clippy happy

* fix ci
2024-08-07 16:12:00 +00:00
Lei, HUANG
aa3f53f08a fix: install script (#4527)
fix: install script always install v0.9.0-nightly-20240709 instead of latest nightly
2024-08-07 14:07:32 +00:00
Ruihang Xia
8f0959fa9f fix: fix incorrect result of topk with cte (#4523)
* fix: fix incorrect result of topk with cte

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up cargo toml

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-07 09:13:38 +00:00
Weny Xu
4a3982ca60 chore: use configData (#4522)
* chore: use `configData`

* chore: add an empty line
2024-08-07 07:43:04 +00:00
Yingwen
559219496d ci: fix windows temp path (#4518) 2024-08-06 13:53:12 +00:00
LFC
685aa7dd8f ci: squeeze some disk space for complex fuzz tests (#4519)
* ci: squeeze some disk space for complex fuzz tests

* Update .github/workflows/develop.yml

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

---------

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
2024-08-06 11:52:34 +00:00
Lei, HUANG
be5364a056 chore: support swcs as the short name for strict window compaction (#4517) 2024-08-06 07:38:07 +00:00
Weny Xu
a25d9f736f chore: set default otlp_endpoint (#4508)
* chore: set default `otlp_endpoint`

* fix: fix ci
2024-08-06 06:48:14 +00:00
dependabot[bot]
2cd4a78f17 build(deps): bump zerovec from 0.10.2 to 0.10.4 (#4335)
Bumps [zerovec](https://github.com/unicode-org/icu4x) from 0.10.2 to 0.10.4.
- [Release notes](https://github.com/unicode-org/icu4x/releases)
- [Changelog](https://github.com/unicode-org/icu4x/blob/main/CHANGELOG.md)
- [Commits](https://github.com/unicode-org/icu4x/commits/ind/zerovec@0.10.4)

---
updated-dependencies:
- dependency-name: zerovec
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Weny Xu <wenymedia@gmail.com>
2024-08-06 00:40:03 +00:00
dependabot[bot]
188e182d75 build(deps): bump zerovec-derive from 0.10.2 to 0.10.3 (#4346)
Bumps [zerovec-derive](https://github.com/unicode-org/icu4x) from 0.10.2 to 0.10.3.
- [Release notes](https://github.com/unicode-org/icu4x/releases)
- [Changelog](https://github.com/unicode-org/icu4x/blob/main/CHANGELOG.md)
- [Commits](https://github.com/unicode-org/icu4x/commits/ind/zerovec-derive@0.10.3)

---
updated-dependencies:
- dependency-name: zerovec-derive
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Weny Xu <wenymedia@gmail.com>
2024-08-05 23:58:30 +00:00
Yingwen
d64cc79ab4 docs: add v0.9.1 bench result (#4511) 2024-08-05 16:53:32 +00:00
discord9
e6cc4df8c8 feat: flow recreate on reboot (#4509)
* feat: flow reboot clean

* refactor: per review

* refactor: per review

* test: sqlness flow reboot
2024-08-05 13:57:48 +00:00
LFC
803780030d fix: too large shadow-rs consts (#4506) 2024-08-05 07:05:14 +00:00
Weny Xu
79f10d0415 chore: reduce fuzz tests in CI (#4505) 2024-08-05 06:56:41 +00:00
Weny Xu
3937e67694 feat: introduce new kafka topic consumer respecting WAL index (#4424)
* feat: introduce new kafka topic consumer respecting WAL index

* chore: fmt

* chore: fmt toml

* chore: add comments

* feat: merge close ranges

* fix: fix unit tests

* chore: fix typos

* chore: use loop

* chore: use unstable sort

* chore: use gt instead of gte

* chore: add comments

* chore: rename to `current_entry_id`

* chore: apply suggestions from CR

* chore: apply suggestions from CR

* refactor: minor refactor

* chore: apply suggestions from CR
2024-08-05 06:56:25 +00:00
Weny Xu
4c93fe6c2d chore: bump rust-postgres to 0.7.11 (#4504) 2024-08-05 04:26:46 +00:00
LFC
c4717abb68 chore: bump shadow-rs version to set the path to find the correct git repo (#4494) 2024-08-05 02:24:12 +00:00
shuiyisong
3b701d8f5e test: more on processors (#4493)
* test: add date test

* test: add epoch test

* test: add letter test and complete some others

* test: add urlencoding test

* chore: typo
2024-08-04 08:29:31 +00:00
Weny Xu
cb4cffe636 chore: bump opendal version to 0.48 (#4499) 2024-08-04 00:46:04 +00:00
Ruihang Xia
cc7f33c90c fix(tql): avoid unwrap on parsing tql query (#4502)
* fix(tql): avoid unwrap on parsing tql query

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add unit test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-03 20:58:53 +00:00
Ruihang Xia
fe1cfbf2b3 fix: partition column with mixed quoted and unquoted idents (#4491)
* fix: partition column with mixed quoted and unquoted idents

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update error message

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-02 09:06:31 +00:00
Yingwen
ded874da04 feat: enlarge default page cache size (#4490) 2024-08-02 07:24:20 +00:00
Lei, HUANG
fe2d29a2a0 chore: bump version v0.9.1 (#4486)
Update package versions to 0.9.1

 - Bump version for multiple packages from 0.9.0 to 0.9.1 in Cargo.lock
2024-08-02 07:10:05 +00:00
Yingwen
b388829a96 fix: avoid total size overflow (#4487)
feat: avoid total size overflow
2024-08-02 06:16:37 +00:00
zyy17
8e7c027bf5 ci: make docker image args configurable from env vars (#4484)
refactor: make docker image args configurable from env vars
2024-08-02 03:17:09 +00:00
Lei, HUANG
9d5d7c1f9a feat(compaction): add file number limits to TWCS compaction (#4481)
* Add file number limits to TWCS compaction

 - Introduce `max_active_window_files` and `max_inactive_window_files` to `TwcsOptions`.

* feat/limit-files-in-windows: Add max active/inactive window files options to mito engine config

* feat/limit-files-in-windows: Add Debug derive to TwcsPicker and implement max file enforcement logging in TWCS compaction

* fix: clippy
2024-08-01 12:42:09 +00:00
ZonaHe
efe5eeef14 feat: update dashboard to v0.5.4 (#4483)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2024-08-01 12:19:38 +00:00
Ruihang Xia
ca54b05be3 feat: time poll elapsed for RegionScan plan (#4482)
* feat: time poll elapsed for RegionScan plan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* also record await time

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-08-01 12:19:15 +00:00
Jeremyhi
d67314789c feat: export all schemas and data at once in export tool (#4478)
* feat: export all schemas and data at onece

* feat: introduce export all to export schemas and data at once

* feat: default value for target

* feat: refactor export target

* chore: fix unit test
2024-08-01 09:14:44 +00:00
Yingwen
6c4b8b63a5 fix: notify flush receiver after write buffer is released (#4476)
* fix: notify the worker after write buffer is released

* feat: worker level region count
2024-08-01 07:15:36 +00:00
Jeremyhi
62a0defd63 feat: improve extract hints (#4479) 2024-08-01 07:06:13 +00:00
Jeremyhi
291d9d55a4 feat: hint options for gRPC insert (#4454)
* feat: hint options for gRPC isnert

* chore: unit test for extract_hints

* feat: add integration test for grpc hint

* test: add integration test for hints
2024-08-01 02:59:38 +00:00
Weny Xu
90301a6250 fix: generate unique timestamp for inserting tests (#4472) 2024-07-31 12:19:43 +00:00
shuiyisong
c66d3090b6 fix: prometheus api only returns 200 (#4471)
fix: prometheus api returns http status other than 200
2024-07-31 07:42:50 +00:00
dennis zhuang
656050722c fix: overflow when parsing default value with negative numbers (#4459)
* fix: overflow when parsing default value with negative numbers

* test: adds sqlness test
2024-07-31 07:41:49 +00:00
Ning Sun
b741a7181b feat: track channels with query context and w/rcu (#4448)
* feat: add source channel to meter recorders

* feat: provide channel for query context

* fix: testing and extension get for query context

* chore: revert cargo toml structure changes

* fix: querycontext modification for prometheus and pipeline

* chore: switch git dependency to main branches

* chore: remove TODO

* refactor: rename other to unknown

---------

Co-authored-by: shuiyisong <113876041+shuiyisong@users.noreply.github.com>
2024-07-31 07:30:50 +00:00
Weny Xu
dd23d47743 chore(ci): bring back chaos tests (#4456)
* Revert "chore: temporarily disable fuzz chaos tests (#4457)"

This reverts commit f0c953f84a.

* chore: update config

* Update .github/actions/setup-greptimedb-cluster/with-remote-wal.yaml

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-07-31 07:29:31 +00:00
Ran Miller
80aaa7725e docs(contributing): replace expired links (#4468) 2024-07-31 06:11:30 +00:00
Ran Miller
c24de8b908 refactor(servers): improve postgres error message (#4463)
* refactor(servers): improve postgres error message

* refactor(servers): remove numerical representation of ErrorSeverity
2024-07-31 06:06:15 +00:00
Yingwen
f382a7695f perf: reduce lock scope and improve log (#4453)
* feat: refine logs for scan

* feat: improve build parts and  unordered scan metrics

* feat: change to debug log

* fix: release lock before reading part

* test: replace region id

* test: fix sqlness

* chore: add todo

Co-authored-by: dennis zhuang <killme2008@gmail.com>

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2024-07-31 04:07:34 +00:00
Jeremyhi
1ea43da9ea feat: default export catalog name (#4464)
* feat: default export catalog name

* chore: default catalog name
2024-07-31 03:39:39 +00:00
dennis zhuang
6113f46284 docs: tweak readme (#4465) 2024-07-31 02:35:29 +00:00
LFC
6d8a502430 chore: add more metrics about parquet and cache (#4410)
* chore: add more metrics about parquet and cache

* resolve PR comments

* resolve PR comments

* resolve PR comments

* resolve PR comments
2024-07-30 12:01:49 +00:00
Ruihang Xia
2d992f4f12 fix: check_partition uses unqualified name (#4452)
* fix: check_partition uses unqualified name

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-07-30 11:28:28 +00:00
Ruihang Xia
7daf24c47f feat: remove dedicated runtime for grpc, mysql and pg protocols (#4436)
* feat: remove dedicated runtime for grpc, mysql and pg protocols

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove other runtimes

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* spawn compact task into compact_runtime

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* refine naming

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/servers/tests/mysql/mysql_server_test.rs

Co-authored-by: Zhenchi <zhongzc_arch@outlook.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* turnoff fuzz test matrix fail fast option

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore: update rt config for ci tests

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Zhenchi <zhongzc_arch@outlook.com>
Co-authored-by: Weny Xu <wenymedia@gmail.com>
2024-07-30 06:17:58 +00:00
shuiyisong
567f5105bf fix: missing pre_write check on prometheus remote write (#4460)
fix: missing pre_write check on prometheus remote write
2024-07-30 04:55:19 +00:00
Yingwen
78962015dd ci: keep sqlness log by default (#4449)
* ci: keep sqlness log by default

* chore: not preserve state in makefile by default

* ci: use make
2024-07-29 17:11:24 +00:00
taobo
1138f32af9 feat: support setting time range in Copy From statement (#4405)
* feat: support setting time range in Copy From statement

* test: add batch_filter_test

* fix: ts data type inconsistent error

* test: add sqlness test for copy from with statement

* fix: sqlness result error

* fix: cr comments
2024-07-29 16:55:19 +00:00
shuiyisong
53fc14a50b fix: use status code to http status mapping in error IntoResponse (#4455) 2024-07-29 16:37:04 +00:00
Ruihang Xia
1895a5478b feat: track prometheus HTTP API's query latency (#4458)
* feat: track prometheus HTTP API's query latency

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update grafana config

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore: Update src/servers/src/metrics.rs

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2024-07-29 15:00:09 +00:00
Weny Xu
f0c953f84a chore: temporarily disable fuzz chaos tests (#4457) 2024-07-29 13:23:40 +00:00
zyy17
1a38f36d2d refactor!: Remove Mode from FrontendOptions (#4401)
refactor: remove `Mode` from `FrontendOptions`

Signed-off-by: zyy17 <zyylsxm@gmail.com>
2024-07-29 06:57:01 +00:00
Zhenchi
cb94bd45d3 fix(fulltext-search): prune rows in row group forget to take remainder (#4447)
* fix(fulltext-search): prune rows in row group forget to take remainder

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* test: add unit test

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-07-29 06:20:07 +00:00
Ning Sun
b298b35b3b feat: show root cause and db name on the error line (#4442)
* feat: show root cause on the error line

* feat: show root error for grpc

* feat: add error information for http error

* feat: add db information on error mysql/postgres logs
2024-07-29 03:59:42 +00:00
Weny Xu
164232e073 fix: use heartbeat runtime instead of background runtime (#4445) 2024-07-29 03:29:30 +00:00
JohnsonLee
9a5fa49955 feat: support pg_namespace, pg_class and related psql command (#4428)
* feat: add function 'pg_catalog.pg_table_is_visible'q

* feat: add 'pg_class' and 'pg_namespace', now we can run '\d' and '\dt'!

* refactor: move memory_table::tables to utils::tables

* refactor: move out predicate to system_schema to reuse it

* feat: predicates pushdown

* test: add pg_namespace, pg_class related sqlness test

* fix: typos and license header

* fix: sqlness test

* refactor: use `expect` instead of `unwrap` here

* refactor: remove the `information_schema::utils` mod

* doc: make the comment in pg_get_userbyid more precise

* doc: add TODO and comment in pg_catalog

* fix: typo

* fix: sqlness

* doc: change to comment on PGClassBuilder to TODO
2024-07-28 12:04:54 +00:00
dennis zhuang
92d6d4e64a docs: update project status (#4440)
* docs: update project status

* docs: update project status
2024-07-27 05:24:09 +00:00
discord9
021ec7b6ac feat(flow): flush_flow function (#4416)
* refactor: df err variant

* WIP

* chore: update proto version

* chore: revert mistaken rust-toolchain

* feat(WIP): added FlowService to QueryEngine

* refactor: move flow service to operator

* refactor: flush use flow name not id

* refactor: use full path in macro

* feat: flush flow

* feat: impl flush flow

* chore: remove unused

* chore: meaninful response

* chore: remove unused

* chore: clippy

* fix: flush_flow with proper blocking

* test: sqlness tests added back for flow

* test: better predicate for flush_flow

* refactor: rwlock

* fix: flush lock

* fix: flush lock write then drop

* test: add a new flow sqlness test

* fix: sqlness testcase

* chore: style

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2024-07-26 23:04:13 +00:00
dennis zhuang
0710e6ff36 fix: remove to_timezone function (#4439)
fix: remove to_timezone, it doesn't make sense
2024-07-26 07:40:07 +00:00
dennis zhuang
db3a07804e fix: information_schema tables and views column value (#4438) 2024-07-26 07:39:58 +00:00
Lei, HUANG
bdd3d2d9ce chore: add dynamic cache size adjustment for InvertedIndexConfig (#4433)
* Add dynamic cache size adjustment for InvertedIndexConfig

* Increase cache sizes in integration tests for HTTP

 - Updated `metadata_cache_size` from 32MiB to 64MiB

* Remove cache size settings from config and update drop_lines_with_inconsistent_results function to handle them

* Add cache size configurations for inverted index metadata and content

 - Introduced `metadata_cache_size` with a default of 64MiB.
 - Introduced `content_cache_size` with a default of 128MiB.

* chore/index-content-cache-default-size: Add cache size configuration options for Mito engine's inverted index
2024-07-26 03:36:20 +00:00
zyy17
b81d3a28e6 refactor: add RetryInterceptor to print detailed error (#4434) 2024-07-25 11:52:28 +00:00
Weny Xu
89b86c87a2 chore: add docs for config file (#4432) 2024-07-25 08:11:10 +00:00
Lei, HUANG
0b0ed03ee6 fix(metrics): RowGroupLastRowCachedReader metrics (#4418)
fix/reader-metrics:
 Refactor cache hit/miss logic and update metrics in mito2

 - Simplify cache retrieval logic in CacheManager by removing inline update_hit_miss function call.
 - Add separate functions for incrementing cache hit and miss metrics.
 - Update RowGroupLastRowCachedReader to use new cache hit/miss functions and refactor to new helper methods for creating Hit and Miss variants.
2024-07-25 06:45:43 +00:00
dennis zhuang
ea4a71b387 docs: update readme (#4431) 2024-07-25 06:17:45 +00:00
dennis zhuang
4cd5ec7769 docs: update readme (#4430) 2024-07-25 02:42:18 +00:00
Ruihang Xia
c8f4a85720 chore: update grafana dashboard to reflect recent metric changes (#4417)
* chore: update grafana dashboard to reflect recent metric changes

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore: add a blank line at the end

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
2024-07-24 20:05:44 +00:00
discord9
024dac8171 chore: add a compile cfg for python in cmd package (#4406)
* chore: add a compile cfg for python

* fix: feature gate additive turn off default features in workspace&add cfg in place

* chore: remove unused in different cfg
2024-07-24 20:03:53 +00:00
Ran Miller
918be099cd docs(common_error): format enum StatusCode docs (#4427)
* fix: format comments end with . symbol
* docs: add commnet for RegionReadonly
* fix: comment error for DatabaseAlreadyExists
2024-07-24 15:54:35 +00:00
Zhenchi
91dbac4141 fix(fulltext-index): clean up 0-value timer (#4423)
Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-07-24 15:03:36 +00:00
Ran Miller
e935bf7574 refactor: Remove PhysicalOptimizer and LogicalOptimizer trait (#4426)
* refactor(query): Remove LogicalOptimizer trait

* refactor(query): Remove PhysicalOptimizer trait
2024-07-24 13:01:44 +00:00
Ran Miller
f7872654cc refactor(query): Remove PhysicalPlanner trait (#4412) 2024-07-24 03:06:46 +00:00
shuiyisong
547730a467 chore: add metrics for log ingestion (#4411)
* chore: add metrics for log ingestion

* chore: record result as well
2024-07-23 08:05:11 +00:00
Ning Sun
49f22f0fc5 fix: add back AuthBackend which is required by custom auth backend (#4409) 2024-07-23 05:35:29 +00:00
zyy17
2ae2a6674e refactor: add get_storage_path() and get_catalog_and_schema() (#4397)
refactor: add get_storage_path() and get_catalog_and_schema()
2024-07-20 01:55:48 +00:00
Lei, HUANG
c8cf3b1677 fix(wal): handle WAL deletion on region drop (#4400)
Add LogStore trait bound to RegionWorkerLoop and handle WAL deletion on region drop.
2024-07-19 13:24:10 +00:00
Yingwen
7aae19aa8b fix: dictionary key type use u32 (#4396)
* fix: dictionary key type use u32

* fix: fix error whle reading content

* fix: bulk memtable dictionary type
2024-07-19 09:51:29 +00:00
Jeremyhi
b90267dd80 feat: export database data (#4382)
* feat: export database data

* feat: export data with time range

* feat: refactor the data dir

* feat: by comment
2024-07-19 09:29:45 +00:00
discord9
9fa9156bde feat: FLOWS table in information_schema&SHOW FLOWS (#4386)
* feat(WIP): flow info table

refactor: better err handling&log

feat: add flow metadata to info schema provider

feat(WIP): info_schema.flows

feat: info_schema.flows table

* fix: err after rebase

* fix: wrong comparsion op

* feat: SHOW FLOWS&tests

* refactor: per review

* chore: unused

* refactor: json error

* chore: per review

* test: sqlness

* chore: rm inline error

* refactor: per review
2024-07-19 09:29:36 +00:00
zyy17
ce900e850a fix: user provider can't be configured by config file or environment variables (#4398) 2024-07-19 08:41:29 +00:00
zyy17
5274c5a407 refactor: add &mut Plugins argument in plugins setup api and remove unnecessary mut (#4389)
refactor: add '&mut Plugins' argument in plugins setup api and remove unnecessary mut

Signed-off-by: zyy17 <zyylsxm@gmail.com>
2024-07-19 08:12:06 +00:00
Yingwen
0b13ac6e16 ci: disable auto review (#4387) 2024-07-18 08:03:37 +00:00
shuiyisong
8ab6136d1c chore: support pattern as pipeline key name (#4368)
* chore: add pattern to processor key name

* fix: typo

* refactor: test
2024-07-18 03:32:26 +00:00
Weny Xu
e39f49fe56 fix: ensure keep alive is completed in time (#4349)
* fix: ensure keep alive is completed in time

* chore: apply suggestions from CR

* chore: use write runtime

* refactor: set META_LEASE_SECS to 5

* chore: set etcd replicas to 1

* chore: apply suggestions from CR

* chore: apply suggestions from CR

* fix: set `MissedTickBehavior::Delay`

* chore: apply suggestions from CR
2024-07-17 06:14:45 +00:00
discord9
c595a56ac8 test(flow): ignore flow tests for now (#4377)
* tests: ignore flow tests for now

* chore: typo

* test: remove file altogether

* fix: last_value sqlness test regex

* fix: last_value part REDACTED
2024-07-16 10:07:21 +00:00
Yingwen
d6c7b848da chore: bump version to v0.9.0 (#4376)
* chore: bump version to v0.9.0

* chore: format
2024-07-16 07:50:45 +00:00
Lei, HUANG
2010a2a33d feat: Add caching for last row reader and expose cache manager (#4375)
* Add caching for last row reader and expose cache manager
 - Implement `RowGroupLastRowCachedReader` to handle cache hits and misses for last row reads.

* Add projection field to SelectorResultValue and refactor RowGroupLastRowReader

 - Introduced `projection` field in `SelectorResultValue` to store projection indices.
2024-07-16 07:13:39 +00:00
dennis zhuang
be3ea0fae7 feat: improve datafusion external error and mysql error (#4362)
* feat: improve datafusion external error and mysql error

* chore: address CR comments and fix tests

---------

Co-authored-by: evenyag <realevenyag@gmail.com>
2024-07-16 07:01:09 +00:00
Lei, HUANG
7b28da277d refactor: LastRowReader to use LastRowSelector (#4374)
Refactor LastRowReader to use LastRowSelector
 - Replaced `last_batch` in `LastRowReader` with `LastRowSelector`.
2024-07-16 03:47:41 +00:00
discord9
b2c5f8eefa test: more sleep when flow insert makes it serial (#4373)
* tests: more sleep

* fix: wait more time

* fix: update result file
2024-07-16 03:36:31 +00:00
JohnsonLee
072d7c2022 feat: introduce 'pg_catalog.pg_type' (#4332)
* WIP: pg_catalog

* refactor: move memory_table to crate public level to reuse it in pgcatalog

* refactor: new system_schema mod to manage implementation of information_schema and pg_catalog

* feat: pg_catalog.pg_type

* fix: remove unused code to avoid warning

* test: add pg_catalog sqlness test

* feat: pg_catalog_cache in system_catalog

* fix: integration test

* test: rollback unit test

* refactor: mix pg_catalog table_id with old ones

* fix: add todo information

* tests: rerun sqlness

---------

Co-authored-by: johnsonlee <johnsonlee@localhost.localdomain>
2024-07-15 17:41:08 +00:00
Yingwen
7900367433 test: replace unstable output of last value test (#4371)
test: replace unstable output
2024-07-15 16:51:16 +00:00
Lei, HUANG
9fbc4ba649 feat: add PruneReader for optimized row filtering (#4370)
* Add PruneReader for optimized row filtering and error handling

 - Introduced `PruneReader` to replace `RowGroupReader` for optimized row filtering.

* Commit Message:

 Make ReaderMetrics fields public for external access

* Add row selection support to SeqScan and FileRange readers

 - Updated `SeqScan::build_part_sources` to accept an optional `TimeSeriesRowSelector`.

* Refactor `scan_region.rs` to remove unnecessary cloning of `series_row_selector`. Enhance `file_range.rs` by adding `select_all` method to check if all rows in a row group are selected, and update the logic in `reader` method to use `LastRowReader` only when all rows are
 selected and no DELETE operations are present.

* Commit Message:

Enhance PruneReader and ParquetReader with reset functionality and metrics handling

Summary:

 • Made Source enum public in prune.rs.

* chore: Update src/mito2/src/sst/parquet/reader.rs

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2024-07-15 14:23:34 +00:00
Yingwen
2e7b12c344 feat: add a cache for last value result in row group (#4369)
* feat: add selector result cache to cache manager

* feat: expose config
2024-07-15 12:33:36 +00:00
discord9
2b912d93fb feat: flow perf&fix df func call (#4347)
* feat: flow perf&fix df func call

feat: make source sender `send` non-blocking

feat: better control of flow worker freq

feat: support transform interval

fix: const folding df func args&tests

tests: update cast const fold

chore: adjust flow work's freq

refactor: batch split

feat: adaptive run freq flow worker&check for errors

chore: better debug log

* refactor: per review

* chore: per zc's review

* chore: per bot review

* chore: remove some `TODO` completed

* docs: add comments for a test
2024-07-15 09:20:04 +00:00
Zhenchi
04ac0c8da0 feat(fulltext_index): integrate full-text indexer with parquet reader (#4348)
* feat(fulltext_index): integrate full-text indexer with parquet reader

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* disable reload

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: range allow exceeding total row

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* test: unit tests in index

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* test: prune row groups

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: rename creator

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* test: sst fulltext index

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: address comment

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-07-15 08:14:44 +00:00
dennis zhuang
64cad4e891 feat: tweak error and status codes (#4359)
* feat: tweak status codes

* fix: typo

* fix: by cr comments
2024-07-15 07:50:16 +00:00
Yingwen
20d9c0a345 fix: scan hint checks order asc (#4365)
* fix: order by asc check

* feat: print selector in explain

* test: move last_value opt test to standalone

* test: sqlness remove space

* test: update regex for datetime

* test: fix partitioning

* chore: update comment

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2024-07-15 07:49:14 +00:00
Lanqing Yang
9501318ce5 feat: support show views statement (#4360)
This commit enables show view statement which will display a list of views
names.
2024-07-15 07:24:27 +00:00
Ning Sun
b8bd8456f0 fix: remove path label for cache store (#4336)
* fix: remove path label for cache store

* fix: ignore path label for intermediatemanager

* refactor: remove unused object store
2024-07-15 03:34:19 +00:00
tison
4b8b04ffa2 chore: update project slogan (#4361)
* chore: update project slogan

Signed-off-by: tison <wander4096@gmail.com>

* keep in oneline

Signed-off-by: tison <wander4096@gmail.com>

* Apply suggestions from code review

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* Update README.md

---------

Signed-off-by: tison <wander4096@gmail.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
2024-07-15 03:02:44 +00:00
Lanqing Yang
15ac8116ea feat: adding information_schema.views table (#4342)
This commit introduces information_schema.views table. The VIEWS table provides
information about views in databases.
2024-07-14 09:50:19 +00:00
Zhenchi
377a513690 feat(index): distinguish different types of index metrics (#4337)
Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-07-14 08:03:09 +00:00
Yingwen
5a1732279b feat: Implement reader that returns the last row of each series (#4354)
* feat: last row reader

* feat: scan use last row reader

* test: test last row selector

* chore: update comment
2024-07-12 14:40:06 +00:00
Ruihang Xia
16075ada67 feat: impl optimizer rule to handle last_value case (#4357)
* feat: impl optimizer rule to handle last_value case

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* rename file

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/query/src/optimizer/scan_hint.rs

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>

* split methods

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Jeremyhi <jiachun_feng@proton.me>
2024-07-12 14:36:23 +00:00
shuiyisong
67dfdd6c61 feat: support text/plain format for log ingestion (#4300)
* feat: support text/plain format of log input

* refactor: pipeline query and delete using dataframe api

* chore: minor refactor

* refactor: skip jsonify when processing plan/text

* refactor: support array(string) as pipeline engine input
2024-07-12 09:17:15 +00:00
irenjj
9f2d53c3df refactor: Remove the StandaloneKafkaConfig struct (#4253)
* refactor: Remove the StandaloneKafkaConfig struct

* remove the redundant assignment

* remove rudundant struct

* simplify replication_factor

* add KafkaTopicConfig

* fix check

* fix check

* fix check

* add flatten with

* revert config.md

* fix test params

* fix test param

* fix missing params when provider is kafka

* remove unsed files

* remove with prefix

* fix doc

* fix test

* fix clippy
2024-07-12 08:17:18 +00:00
Weny Xu
05c7d3eb42 docs(config): add enable_region_failover option to configuration (#4355)
docs(config): Add enable_region_failover option to configuration
2024-07-12 08:09:25 +00:00
Ruihang Xia
63acc30ce7 perf: fine–tuned plan steps (#4258)
* perf: fine–tuned plan steps

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* handle explain plan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* handle explain plan again

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-07-12 06:56:13 +00:00
tison
285ffc5850 fix: build info should use build time env var (#4343)
* fix: build info should use build time env var

Signed-off-by: tison <wander4096@gmail.com>

* catch up

Signed-off-by: tison <wander4096@gmail.com>

* fixup lifetime

Signed-off-by: tison <wander4096@gmail.com>

* fixup

Signed-off-by: tison <wander4096@gmail.com>

* fix more

Signed-off-by: tison <wander4096@gmail.com>

---------

Signed-off-by: tison <wander4096@gmail.com>
2024-07-12 03:08:30 +00:00
dennis zhuang
ab22bbac84 feat: impl drop view (#4231)
* feat: impl drop view

* fix: metric name

* fix: comments

* test: add DropViewProcedure test

* test: drop view meets a table

* test: update sqlness tests by drop view

* feat: apply suggestion from AI

* chore: apply suggestion

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>

* chore: apply suggestion

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>

* chore: apply suggestion

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>

* fix: TYPE_NAME for DropFlowProcedure

---------

Co-authored-by: Jeremyhi <jiachun_feng@proton.me>
2024-07-11 19:53:54 +00:00
Weny Xu
7ad248d6f6 fix(config): enable file engine by default (#4345)
* fix: enable file engine by default

* fix: fix tests
2024-07-11 17:35:18 +00:00
Ning Sun
50e4539667 fix: permission denied is 403 (#4350) 2024-07-11 17:16:17 +00:00
Zhenchi
da1ea253ba perf(puffin): not to stage uncompressed blob (#4333)
* feat(puffin): not to stage blob

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat: back with compressed blob

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2024-07-11 12:32:07 +00:00
Lei, HUANG
da0c840261 feat: customize copy to parquet parameter (#4328)
* feat/copy-to-parquet-parameter: Commit Message:

Enhance Parquet Writer with Column-wise Configuration

Summary:

 • Introduced column_wise_config function to customize per-column properties in Parquet writer.

* feat/copy-to-parquet-parameter: Commit Message:

Enhance Parquet File Format Handling for Specific Data Types

Summary:

 • Added ConcreteDataType import to support specific data type handling.

* feat/copy-to-parquet-parameter: Commit Message:

 Refactor Parquet file format configuration

* feat/copy-to-parquet-parameter:
 Enhance Parquet file format handling for timestamp columns

 - Added logic to disable dictionary encoding and set DELTA_BINARY_PACKED encoding for timestamp columns in the Parquet file format configuration.

* feat/copy-to-parquet-parameter:
 Disable dictionary encoding for timestamp columns in Parquet writer and update default max_active_window_runs in TwcsOptions

 - Modified Parquet writer to disable dictionary encoding for timestamp columns to optimize for increasing timestamp data.

* feat/copy-to-parquet-parameter:
 Update compaction settings in tests

 - Modified `test_compaction_region` to include new compaction options: `compaction.type`,
 `compaction.twcs.max_active_window_runs`, and `compaction.twcs.max_inactive_window_runs`.
 - Updated `test_merge_mode_compaction` to use `compaction.twcs.max_active_window_runs` and
 `compaction.twcs.max_inactive_window_runs` instead of `max_active_window_files` and
 `max_inactive_window_files`.
2024-07-10 07:52:30 +00:00
zyy17
20417e646a ci: add flownode in docker compose (#4306) 2024-07-10 06:37:21 +00:00
Ruihang Xia
9271b3b7bd docs: remove cargo test workspace command (#4325)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-07-10 06:36:44 +00:00
Ruihang Xia
374cfe74bf fix(sqlness): relax start time regex to match various precisions (#4326)
* fix(sqlness): relax start time regex to match various precisions

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* exclude ip pattern

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-07-10 06:35:55 +00:00
Weny Xu
52a9a748a1 revert: lz4 compression (#4329)
* Revert "test: revert lz4 compression"

This reverts commit 180dda13fa.

* refactor: remove compression field
2024-07-10 04:24:40 +00:00
dennis zhuang
33ed745049 feat: show create view and creating view with columns (#4086)
* feat: parse column names when creating view

* feat: save the view definition into view info

* feat: supports view columns and show create view

* feat: save plan columns for validation

* fix: typo

* chore: comments and style

* chore: apply suggestions

* test: assert CreateView display result

* chore: style

Co-authored-by: Weny Xu <wenymedia@gmail.com>

* chore: avoid the clone

Co-authored-by: Weny Xu <wenymedia@gmail.com>

* fix: compile error after rebeasing

---------

Co-authored-by: Weny Xu <wenymedia@gmail.com>
2024-07-09 18:35:11 +00:00
Yingwen
458e5d7e66 feat: add TimeSeriesRowSelector hint (#4327)
* feat: Add TimeSeriesRowSelector

* feat: scan allow specify series row selector

* chore: Update comment
2024-07-09 12:29:47 +00:00
765 changed files with 40410 additions and 14615 deletions

15
.coderabbit.yaml Normal file
View File

@@ -0,0 +1,15 @@
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
language: "en-US"
early_access: false
reviews:
profile: "chill"
request_changes_workflow: false
high_level_summary: true
poem: true
review_status: true
collapse_walkthrough: false
auto_review:
enabled: false
drafts: false
chat:
auto_reply: true

View File

@@ -14,10 +14,11 @@ GT_AZBLOB_CONTAINER=AZBLOB container
GT_AZBLOB_ACCOUNT_NAME=AZBLOB account name
GT_AZBLOB_ACCOUNT_KEY=AZBLOB account key
GT_AZBLOB_ENDPOINT=AZBLOB endpoint
# Settings for gcs test
GT_GCS_BUCKET = GCS bucket
# Settings for gcs test
GT_GCS_BUCKET = GCS bucket
GT_GCS_SCOPE = GCS scope
GT_GCS_CREDENTIAL_PATH = GCS credential path
GT_GCS_CREDENTIAL_PATH = GCS credential path
GT_GCS_CREDENTIAL = GCS credential
GT_GCS_ENDPOINT = GCS end point
# Settings for kafka wal test
GT_KAFKA_ENDPOINTS = localhost:9092

View File

@@ -17,6 +17,12 @@ inputs:
description: Enable dev mode, only build standard greptime
required: false
default: "false"
image-namespace:
description: Image Namespace
required: true
image-registry:
description: Image Registry
required: true
working-dir:
description: Working directory to build the artifacts
required: false
@@ -31,8 +37,8 @@ runs:
run: |
cd ${{ inputs.working-dir }} && \
make run-it-in-container BUILD_JOBS=4 \
IMAGE_NAMESPACE=i8k6a5e1/greptime \
IMAGE_REGISTRY=public.ecr.aws
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }}
- name: Upload sqlness logs
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
@@ -51,8 +57,8 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
- name: Build greptime without pyo3
if: ${{ inputs.dev-mode == 'false' }}
@@ -64,8 +70,8 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
shell: bash
@@ -82,8 +88,8 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
- name: Build greptime on android base image
uses: ./.github/actions/build-greptime-binary
@@ -94,5 +100,5 @@ runs:
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
build-android-artifacts: true
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}

View File

@@ -4,9 +4,6 @@ inputs:
arch:
description: Architecture to build
required: true
rust-toolchain:
description: Rust toolchain to use
required: true
cargo-profile:
description: Cargo profile to build
required: true
@@ -43,10 +40,9 @@ runs:
brew install protobuf
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@master
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
toolchain: ${{ inputs.rust-toolchain }}
targets: ${{ inputs.arch }}
target: ${{ inputs.arch }}
- name: Start etcd # For integration tests.
if: ${{ inputs.disable-run-tests == 'false' }}
@@ -62,12 +58,13 @@ runs:
# Get proper backtraces in mac Sonoma. Currently there's an issue with the new
# linker that prevents backtraces from getting printed correctly.
#
# <https://github.com/rust-lang/rust/issues/113783>
# <https://github.com/rust-lang/rust/issues/113783>
- name: Run integration tests
if: ${{ inputs.disable-run-tests == 'false' }}
shell: bash
env:
env:
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
SQLNESS_OPTS: "--preserve-state"
run: |
make test sqlness-test
@@ -81,7 +78,7 @@ runs:
- name: Build greptime binary
shell: bash
env:
env:
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
run: |
make build \

View File

@@ -4,9 +4,6 @@ inputs:
arch:
description: Architecture to build
required: true
rust-toolchain:
description: Rust toolchain to use
required: true
cargo-profile:
description: Cargo profile to build
required: true
@@ -28,10 +25,9 @@ runs:
- uses: arduino/setup-protoc@v3
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@master
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
toolchain: ${{ inputs.rust-toolchain }}
targets: ${{ inputs.arch }}
target: ${{ inputs.arch }}
components: llvm-tools-preview
- name: Rust Cache
@@ -40,7 +36,7 @@ runs:
- name: Install Python
uses: actions/setup-python@v5
with:
python-version: '3.10'
python-version: "3.10"
- name: Install PyArrow Package
shell: pwsh
@@ -62,13 +58,14 @@ runs:
env:
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
RUST_BACKTRACE: 1
SQLNESS_OPTS: "--preserve-state"
- name: Upload sqlness logs
if: ${{ failure() }} # Only upload logs when the integration tests failed.
uses: actions/upload-artifact@v4
with:
name: sqlness-logs
path: /tmp/greptime-*.log
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness*
retention-days: 3
- name: Build greptime binary

View File

@@ -2,7 +2,7 @@ name: Setup Etcd cluster
description: Deploy Etcd cluster on Kubernetes
inputs:
etcd-replicas:
default: 3
default: 1
description: "Etcd replicas"
namespace:
default: "etcd-cluster"
@@ -18,6 +18,8 @@ runs:
--set replicaCount=${{ inputs.etcd-replicas }} \
--set resources.requests.cpu=50m \
--set resources.requests.memory=128Mi \
--set resources.limits.cpu=1000m \
--set resources.limits.memory=2Gi \
--set auth.rbac.create=false \
--set auth.rbac.token.enabled=false \
--set persistence.size=2Gi \

View File

@@ -1,18 +1,13 @@
meta:
config: |-
configData: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
global_rt_size = 4
datanode:
config: |-
configData: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
global_rt_size = 4
compact_rt_size = 2
frontend:
config: |-
configData: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
global_rt_size = 4

View File

@@ -1,29 +1,24 @@
meta:
config: |-
configData: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
global_rt_size = 4
[datanode]
[datanode.client]
timeout = "60s"
datanode:
config: |-
configData: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
global_rt_size = 4
compact_rt_size = 2
[storage]
cache_path = "/data/greptimedb/s3cache"
cache_capacity = "256MB"
frontend:
config: |-
configData: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
global_rt_size = 4
[meta_client]
ddl_timeout = "60s"

View File

@@ -1,25 +1,20 @@
meta:
config: |-
configData: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
global_rt_size = 4
[datanode]
[datanode.client]
timeout = "60s"
datanode:
config: |-
configData: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
global_rt_size = 4
compact_rt_size = 2
frontend:
config: |-
configData: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
global_rt_size = 4
[meta_client]
ddl_timeout = "60s"

View File

@@ -1,9 +1,7 @@
meta:
config: |-
configData: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
global_rt_size = 4
[wal]
provider = "kafka"
@@ -15,22 +13,19 @@ meta:
[datanode.client]
timeout = "60s"
datanode:
config: |-
configData: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
global_rt_size = 4
compact_rt_size = 2
[wal]
provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
linger = "2ms"
frontend:
config: |-
configData: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
global_rt_size = 4
[meta_client]
ddl_timeout = "60s"
@@ -43,3 +38,8 @@ objectStorage:
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123
remoteWal:
enabled: true
kafka:
brokerEndpoints:
- "kafka.kafka-cluster.svc.cluster.local:9092"

View File

@@ -0,0 +1,30 @@
name: Setup PostgreSQL
description: Deploy PostgreSQL on Kubernetes
inputs:
postgres-replicas:
default: 1
description: "Number of PostgreSQL replicas"
namespace:
default: "postgres-namespace"
postgres-version:
default: "14.2"
description: "PostgreSQL version"
storage-size:
default: "1Gi"
description: "Storage size for PostgreSQL"
runs:
using: composite
steps:
- name: Install PostgreSQL
shell: bash
run: |
helm upgrade \
--install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql \
--set replicaCount=${{ inputs.postgres-replicas }} \
--set image.tag=${{ inputs.postgres-version }} \
--set persistence.size=${{ inputs.storage-size }} \
--set postgresql.username=greptimedb \
--set postgresql.password=admin \
--create-namespace \
-n ${{ inputs.namespace }}

View File

@@ -38,7 +38,7 @@ runs:
steps:
- name: Configure AWS credentials
if: startsWith(inputs.runner, 'ec2')
uses: aws-actions/configure-aws-credentials@v2
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}

View File

@@ -25,7 +25,7 @@ runs:
steps:
- name: Configure AWS credentials
if: ${{ inputs.label && inputs.ec2-instance-id }}
uses: aws-actions/configure-aws-credentials@v2
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}

View File

@@ -12,9 +12,6 @@ on:
name: Build API docs
env:
RUST_TOOLCHAIN: nightly-2024-04-20
jobs:
apidoc:
runs-on: ubuntu-20.04
@@ -23,9 +20,7 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- run: cargo doc --workspace --no-deps --document-private-items
- run: |
cat <<EOF > target/doc/index.html

View File

@@ -177,6 +177,8 @@ jobs:
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
dev-mode: true # Only build the standard greptime binary.
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
@@ -206,6 +208,8 @@ jobs:
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
dev-mode: true # Only build the standard greptime binary.
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
release-images-to-dockerhub:
name: Build and push images to DockerHub

View File

@@ -29,9 +29,6 @@ concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
env:
RUST_TOOLCHAIN: nightly-2024-04-20
jobs:
check-typos-and-docs:
name: Check typos and docs
@@ -64,9 +61,7 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
@@ -82,9 +77,7 @@ jobs:
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: stable
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
@@ -107,9 +100,7 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
@@ -141,16 +132,27 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
steps:
- name: Remove unused software
run: |
echo "Disk space before:"
df -h
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
sudo docker builder prune -a
echo "Disk space after:"
df -h
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
@@ -168,7 +170,7 @@ jobs:
name: bins
path: .
- name: Unzip binaries
run: |
run: |
tar -xvf ./bins.tar.gz
rm ./bins.tar.gz
- name: Run GreptimeDB
@@ -192,13 +194,23 @@ jobs:
matrix:
target: [ "unstable_fuzz_create_table_standalone" ]
steps:
- name: Remove unused software
run: |
echo "Disk space before:"
df -h
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
sudo docker builder prune -a
echo "Disk space after:"
df -h
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
@@ -249,9 +261,7 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
@@ -262,7 +272,7 @@ jobs:
- name: Build greptime bianry
shell: bash
# `cargo gc` will invoke `cargo build` with specified args
run: cargo gc --profile ci -- --bin greptime
run: cargo gc --profile ci -- --bin greptime
- name: Pack greptime binary
shell: bash
run: |
@@ -276,7 +286,7 @@ jobs:
artifacts-dir: bin
version: current
distributed-fuzztest:
distributed-fuzztest:
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
runs-on: ubuntu-latest
needs: build-greptime-ci
@@ -284,24 +294,24 @@ jobs:
strategy:
matrix:
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
mode:
- name: "Disk"
minio: false
kafka: false
values: "with-disk.yaml"
- name: "Minio"
minio: true
kafka: false
values: "with-minio.yaml"
- name: "Minio with Cache"
minio: true
kafka: false
values: "with-minio-and-cache.yaml"
mode:
- name: "Remote WAL"
minio: true
kafka: true
values: "with-remote-wal.yaml"
steps:
- name: Remove unused software
run: |
echo "Disk space before:"
df -h
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
sudo docker builder prune -a
echo "Disk space after:"
df -h
- uses: actions/checkout@v4
- name: Setup Kind
uses: ./.github/actions/setup-kind
@@ -313,13 +323,13 @@ jobs:
uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster
- name: Setup Postgres cluser
uses: ./.github/actions/setup-postgres-cluster
# Prepares for fuzz tests
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
@@ -389,12 +399,12 @@ jobs:
- name: Describe Nodes
if: failure()
shell: bash
run: |
kubectl describe nodes
run: |
kubectl describe nodes
- name: Export kind logs
if: failure()
shell: bash
run: |
run: |
kind export logs /tmp/kind
- name: Upload logs
if: failure()
@@ -406,13 +416,13 @@ jobs:
- name: Delete cluster
if: success()
shell: bash
run: |
run: |
kind delete cluster
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
docker system prune -f
distributed-fuzztest-with-chaos:
distributed-fuzztest-with-chaos:
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
runs-on: ubuntu-latest
needs: build-greptime-ci
@@ -420,12 +430,24 @@ jobs:
strategy:
matrix:
target: ["fuzz_migrate_mito_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
mode:
mode:
- name: "Remote WAL"
minio: true
kafka: true
values: "with-remote-wal.yaml"
steps:
- name: Remove unused software
run: |
echo "Disk space before:"
df -h
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
sudo docker builder prune -a
echo "Disk space after:"
df -h
- uses: actions/checkout@v4
- name: Setup Kind
uses: ./.github/actions/setup-kind
@@ -439,13 +461,13 @@ jobs:
uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster
- name: Setup Postgres cluser
uses: ./.github/actions/setup-postgres-cluster
# Prepares for fuzz tests
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
@@ -516,12 +538,12 @@ jobs:
- name: Describe Nodes
if: failure()
shell: bash
run: |
kubectl describe nodes
run: |
kubectl describe nodes
- name: Export kind logs
if: failure()
shell: bash
run: |
run: |
kind export logs /tmp/kind
- name: Upload logs
if: failure()
@@ -533,7 +555,7 @@ jobs:
- name: Delete cluster
if: success()
shell: bash
run: |
run: |
kind delete cluster
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
@@ -556,6 +578,10 @@ jobs:
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- if: matrix.mode.kafka
name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
@@ -563,10 +589,6 @@ jobs:
path: .
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- if: matrix.mode.kafka
name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
- name: Upload sqlness logs
@@ -586,17 +608,16 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
- uses: actions-rust-lang/setup-rust-toolchain@v1
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: rustfmt
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "check-rust-fmt"
- name: Run cargo fmt
run: cargo fmt --all -- --check
- name: Check format
run: make fmt-check
clippy:
name: Clippy
@@ -607,9 +628,8 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
- uses: actions-rust-lang/setup-rust-toolchain@v1
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: clippy
- name: Rust Cache
uses: Swatinem/rust-cache@v2
@@ -633,9 +653,8 @@ jobs:
with:
version: "14.0"
- name: Install toolchain
uses: dtolnay/rust-toolchain@master
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: llvm-tools-preview
- name: Rust Cache
uses: Swatinem/rust-cache@v2
@@ -665,6 +684,9 @@ jobs:
- name: Setup minio
working-directory: tests-integration/fixtures/minio
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup postgres server
working-directory: tests-integration/fixtures/postgres
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run nextest cases
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
env:
@@ -681,7 +703,9 @@ jobs:
GT_MINIO_REGION: us-west-2
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
UNITTEST_LOG_DIR: "__unittest_logs"
- name: Codecov upload
uses: codecov/codecov-action@v4

View File

@@ -154,6 +154,8 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
@@ -173,6 +175,8 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
release-images-to-dockerhub:
name: Build and push images to DockerHub

View File

@@ -9,9 +9,6 @@ concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
env:
RUST_TOOLCHAIN: nightly-2024-04-20
permissions:
issues: write
@@ -33,6 +30,13 @@ jobs:
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
- name: Upload sqlness logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: sqlness-logs-kind
path: /tmp/kind/
retention-days: 3
sqlness-windows:
name: Sqlness tests on Windows
@@ -45,19 +49,19 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Run sqlness
run: cargo sqlness
run: make sqlness-test
env:
SQLNESS_OPTS: "--preserve-state"
- name: Upload sqlness logs
if: always()
if: failure()
uses: actions/upload-artifact@v4
with:
name: sqlness-logs
path: /tmp/greptime-*.log
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness*
retention-days: 3
test-on-windows:
@@ -76,9 +80,8 @@ jobs:
with:
version: "14.0"
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@master
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: llvm-tools-preview
- name: Rust Cache
uses: Swatinem/rust-cache@v2
@@ -109,11 +112,7 @@ jobs:
check-status:
name: Check status
needs: [
sqlness-test,
sqlness-windows,
test-on-windows,
]
needs: [sqlness-test, sqlness-windows, test-on-windows]
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-20.04
outputs:
@@ -127,9 +126,7 @@ jobs:
notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team
needs: [
check-status
]
needs: [check-status]
runs-on: ubuntu-20.04
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}

View File

@@ -1,12 +1,14 @@
name: Release dev-builder images
on:
push:
branches:
- main
paths:
- rust-toolchain.toml
- 'docker/dev-builder/**'
workflow_dispatch: # Allows you to run this workflow manually.
inputs:
version:
description: Version of the dev-builder
required: false
default: latest
release_dev_builder_ubuntu_image:
type: boolean
description: Release dev-builder-ubuntu image
@@ -28,22 +30,103 @@ jobs:
name: Release dev builder images
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
runs-on: ubuntu-20.04-16-cores
outputs:
version: ${{ steps.set-version.outputs.version }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Configure build image version
id: set-version
shell: bash
run: |
commitShortSHA=`echo ${{ github.sha }} | cut -c1-8`
buildTime=`date +%Y%m%d%H%M%S`
BUILD_VERSION="$commitShortSHA-$buildTime"
RUST_TOOLCHAIN_VERSION=$(cat rust-toolchain.toml | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
IMAGE_VERSION="${RUST_TOOLCHAIN_VERSION}-${BUILD_VERSION}"
echo "VERSION=${IMAGE_VERSION}" >> $GITHUB_ENV
echo "version=$IMAGE_VERSION" >> $GITHUB_OUTPUT
- name: Build and push dev builder images
uses: ./.github/actions/build-dev-builder-images
with:
version: ${{ inputs.version }}
version: ${{ env.VERSION }}
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
release-dev-builder-images-ecr:
name: Release dev builder images to AWS ECR
runs-on: ubuntu-20.04
needs: [
release-dev-builder-images
]
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ECR_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_ECR_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.ECR_REGION }}
- name: Login to Amazon ECR
id: login-ecr-public
uses: aws-actions/amazon-ecr-login@v2
env:
AWS_REGION: ${{ vars.ECR_REGION }}
with:
registry-type: public
- name: Push dev-builder-ubuntu image
shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image }}
run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest
- name: Push dev-builder-centos image
shell: bash
if: ${{ inputs.release_dev_builder_centos_image }}
run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:latest \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:latest
- name: Push dev-builder-android image
shell: bash
if: ${{ inputs.release_dev_builder_android_image }}
run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:latest \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:latest
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
name: Release dev builder images to CN region
runs-on: ubuntu-20.04
@@ -51,35 +134,39 @@ jobs:
release-dev-builder-images
]
steps:
- name: Login to AliCloud Container Registry
uses: docker/login-action@v3
with:
registry: ${{ vars.ACR_IMAGE_REGISTRY }}
username: ${{ secrets.ALICLOUD_USERNAME }}
password: ${{ secrets.ALICLOUD_PASSWORD }}
- name: Push dev-builder-ubuntu image
shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image }}
env:
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
run: |
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }} \
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
- name: Push dev-builder-centos image
shell: bash
if: ${{ inputs.release_dev_builder_centos_image }}
env:
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
run: |
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }} \
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
- name: Push dev-builder-android image
shell: bash
if: ${{ inputs.release_dev_builder_android_image }}
env:
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
run: |
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }} \
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}

View File

@@ -33,6 +33,7 @@ on:
description: The runner uses to build linux-arm64 artifacts
default: ec2-c6g.4xlarge-arm64
options:
- ubuntu-2204-32-cores-arm
- ec2-c6g.xlarge-arm64 # 4C8G
- ec2-c6g.2xlarge-arm64 # 8C16G
- ec2-c6g.4xlarge-arm64 # 16C32G
@@ -82,7 +83,6 @@ on:
# Use env variables to control all the release process.
env:
# The arguments of building greptime.
RUST_TOOLCHAIN: nightly-2024-04-20
CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness.
@@ -91,7 +91,7 @@ env:
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
NIGHTLY_RELEASE_PREFIX: nightly
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
NEXT_RELEASE_VERSION: v0.9.0
NEXT_RELEASE_VERSION: v0.10.0
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:
@@ -123,6 +123,11 @@ jobs:
with:
fetch-depth: 0
- name: Check Rust toolchain version
shell: bash
run: |
./scripts/check-builder-rust-version.sh
# The create-version will create a global variable named 'version' in the global workflows.
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
@@ -183,6 +188,8 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
@@ -202,6 +209,8 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-macos-artifacts:
name: Build macOS artifacts
@@ -240,11 +249,11 @@ jobs:
- uses: ./.github/actions/build-macos-artifacts
with:
arch: ${{ matrix.arch }}
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
cargo-profile: ${{ env.CARGO_PROFILE }}
features: ${{ matrix.features }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
# We decide to disable the integration tests on macOS because it's unnecessary and time-consuming.
disable-run-tests: true
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
- name: Set build macos result
@@ -283,7 +292,6 @@ jobs:
- uses: ./.github/actions/build-windows-artifacts
with:
arch: ${{ matrix.arch }}
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
cargo-profile: ${{ env.CARGO_PROFILE }}
features: ${{ matrix.features }}
version: ${{ needs.allocate-runners.outputs.version }}

View File

@@ -14,7 +14,7 @@ Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get th
It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity.
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/main/CODE_OF_CONDUCT.md)
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md)
- Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working.
- Check the closed issues before opening your issue.
- Try to follow the existing style of the code.
@@ -30,7 +30,7 @@ Pull requests are great, but we accept all kinds of other help if you like. Such
## Code of Conduct
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/main/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
## License
@@ -55,7 +55,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
- Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run`.
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
#### `pre-commit` Hooks

842
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -64,7 +64,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.8.2"
version = "0.9.3"
edition = "2021"
license = "Apache-2.0"
@@ -77,6 +77,7 @@ clippy.readonly_write_lock = "allow"
rust.unknown_lints = "deny"
# Remove this after https://github.com/PyO3/pyo3/issues/4094
rust.non_local_definitions = "allow"
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
[workspace.dependencies]
# We turn off default-features for some dependencies here so the workspaces which inherit them can
@@ -104,27 +105,27 @@ clap = { version = "4.4", features = ["derive"] }
config = "0.13.0"
crossbeam-utils = "0.8"
dashmap = "5.4"
datafusion = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
derive_builder = "0.12"
dotenv = "0.15"
etcd-client = { version = "0.13" }
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "b83f00958fe4cbc77b85b7407bca206e98bdc845" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "c437b55725b7f5224fe9d46db21072b4a682ee4b" }
humantime = "2.1"
humantime-serde = "1.1"
itertools = "0.10"
lazy_static = "1.4"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80b72716dcde47ec4161478416a5c6c21343364d" }
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd" }
mockall = "0.11.4"
moka = "0.12"
notify = "6.1"
@@ -151,14 +152,18 @@ reqwest = { version = "0.12", default-features = false, features = [
"stream",
"multipart",
] }
rskafka = "0.5"
rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "75535b5ad9bae4a5dbb582c82e44dfd81ec10105", features = [
"transport-tls",
] }
rstest = "0.21"
rstest_reuse = "0.7"
rust_decimal = "1.33"
rustc-hash = "2.0"
schemars = "0.8"
serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0", features = ["float_roundtrip"] }
serde_with = "3"
shadow-rs = "0.31"
smallvec = { version = "1", features = ["serde"] }
snafu = "0.8"
sysinfo = "0.30"
@@ -169,6 +174,7 @@ sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "5
strum = { version = "0.25", features = ["derive"] }
tempfile = "3"
tokio = { version = "1.36", features = ["full"] }
tokio-postgres = "0.7"
tokio-stream = { version = "0.1" }
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8"
@@ -183,7 +189,7 @@ auth = { path = "src/auth" }
cache = { path = "src/cache" }
catalog = { path = "src/catalog" }
client = { path = "src/client" }
cmd = { path = "src/cmd" }
cmd = { path = "src/cmd", default-features = false }
common-base = { path = "src/common/base" }
common-catalog = { path = "src/common/catalog" }
common-config = { path = "src/common/config" }
@@ -213,7 +219,7 @@ datanode = { path = "src/datanode" }
datatypes = { path = "src/datatypes" }
file-engine = { path = "src/file-engine" }
flow = { path = "src/flow" }
frontend = { path = "src/frontend" }
frontend = { path = "src/frontend", default-features = false }
index = { path = "src/index" }
log-store = { path = "src/log-store" }
meta-client = { path = "src/meta-client" }
@@ -238,14 +244,14 @@ table = { path = "src/table" }
[workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git"
rev = "80b72716dcde47ec4161478416a5c6c21343364d"
rev = "80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd"
[profile.release]
debug = 1
[profile.nightly]
inherits = "release"
strip = true
strip = "debuginfo"
lto = "thin"
debug = false
incremental = false

View File

@@ -8,6 +8,7 @@ CARGO_BUILD_OPTS := --locked
IMAGE_REGISTRY ?= docker.io
IMAGE_NAMESPACE ?= greptime
IMAGE_TAG ?= latest
DEV_BUILDER_IMAGE_TAG ?= 2024-06-06-b4b105ad-20240827021230
BUILDX_MULTI_PLATFORM_BUILD ?= false
BUILDX_BUILDER_NAME ?= gtbuilder
BASE_IMAGE ?= ubuntu
@@ -15,6 +16,7 @@ RUST_TOOLCHAIN ?= $(shell cat rust-toolchain.toml | grep channel | cut -d'"' -f2
CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
SQLNESS_OPTS ?=
# The arguments for running integration tests.
ETCD_VERSION ?= v3.5.9
@@ -76,7 +78,7 @@ build: ## Build debug version greptime.
build-by-dev-builder: ## Build greptime by dev-builder.
docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
make build \
CARGO_EXTENSION="${CARGO_EXTENSION}" \
CARGO_PROFILE=${CARGO_PROFILE} \
@@ -90,7 +92,7 @@ build-by-dev-builder: ## Build greptime by dev-builder.
build-android-bin: ## Build greptime binary for android.
docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
make build \
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
CARGO_PROFILE=release \
@@ -104,8 +106,8 @@ build-android-bin: ## Build greptime binary for android.
strip-android-bin: build-android-bin ## Strip greptime binary for android.
docker run --network=host \
-v ${PWD}:/greptimedb \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip /greptimedb/target/aarch64-linux-android/release/greptime'
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip --strip-debug /greptimedb/target/aarch64-linux-android/release/greptime'
.PHONY: clean
clean: ## Clean the project.
@@ -144,7 +146,7 @@ dev-builder: multi-platform-buildx ## Build dev-builder image.
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
--build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
-f docker/dev-builder/${BASE_IMAGE}/Dockerfile \
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
.PHONY: multi-platform-buildx
multi-platform-buildx: ## Create buildx multi-platform builder.
@@ -161,7 +163,7 @@ nextest: ## Install nextest tools.
.PHONY: sqlness-test
sqlness-test: ## Run sqlness test.
cargo sqlness
cargo sqlness ${SQLNESS_OPTS}
# Run fuzz test ${FUZZ_TARGET}.
RUNS ?= 1
@@ -172,7 +174,7 @@ fuzz:
.PHONY: fuzz-ls
fuzz-ls:
cargo fuzz list --fuzz-dir tests-fuzz
cargo fuzz list --fuzz-dir tests-fuzz
.PHONY: check
check: ## Cargo check all the targets.
@@ -189,6 +191,7 @@ fix-clippy: ## Fix clippy violations.
.PHONY: fmt-check
fmt-check: ## Check code format.
cargo fmt --all -- --check
python3 scripts/check-snafu.py
.PHONY: start-etcd
start-etcd: ## Start single node etcd for testing purpose.
@@ -202,13 +205,17 @@ stop-etcd: ## Stop single node etcd for testing purpose.
run-it-in-container: start-etcd ## Run integration tests in dev-builder.
docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
.PHONY: run-cluster-with-etcd
run-cluster-with-etcd: ## Run greptime cluster with etcd in docker-compose.
.PHONY: start-cluster
start-cluster: ## Start the greptimedb cluster with etcd by using docker compose.
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml up
.PHONY: stop-cluster
stop-cluster: ## Stop the greptimedb cluster that created by docker compose.
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml stop
##@ Docs
config-docs: ## Generate configuration documentation from toml files.
docker run --rm \

View File

@@ -6,12 +6,12 @@
</picture>
</p>
<h1 align="center">Cloud-scale, Fast and Efficient Time Series Database</h1>
<h2 align="center">Unified Time Series Database for Metrics, Logs, and Events</h2>
<div align="center">
<h3 align="center">
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
<a href="https://docs.greptime.com/">User guide</a> |
<a href="https://docs.greptime.com/">User Guide</a> |
<a href="https://greptimedb.rs/">API Docs</a> |
<a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
</h4>
@@ -50,24 +50,23 @@
## Introduction
**GreptimeDB** is an open-source time-series database focusing on efficiency, scalability, and analytical capabilities.
Designed to work on infrastructure of the cloud era, GreptimeDB benefits users with its elasticity and commodity storage, offering a fast and cost-effective **alternative to InfluxDB** and a **long-term storage for Prometheus**.
**GreptimeDB** is an open-source unified time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at any scale.
## Why GreptimeDB
Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you:
* **Easy horizontal scaling**
* **Unified all kinds of time series**
Seamless scalability from a standalone binary at edge to a robust, highly available distributed cluster in cloud, with a transparent experience for both developers and administrators.
GreptimeDB treats all time series as contextual events with timestamp, and thus unifies the processing of metrics, logs, and events. It supports analyzing metrics, logs, and events with SQL and PromQL, and doing streaming with continuous aggregation.
* **Analyzing time-series data**
* **Cloud-Edge collaboration**
Query your time-series data with SQL and PromQL. Use Python scripts to facilitate complex analytical tasks.
GreptimeDB can be deployed on ARM architecture-compatible Android/Linux systems as well as cloud environments from various vendors. Both sides run the same software, providing identical APIs and control planes, so your application can run at the edge or on the cloud without modification, and data synchronization also becomes extremely easy and efficient.
* **Cloud-native distributed database**
Fully open-source distributed cluster architecture that harnesses the power of cloud-native elastic computing resources.
By leveraging object storage (S3 and others), separating compute and storage, scaling stateless compute nodes arbitrarily, GreptimeDB implements seamless scalability. It also supports cross-cloud deployment with a built-in unified data access layer over different object storages.
* **Performance and Cost-effective**
@@ -105,10 +104,10 @@ Read more about [Installation](https://docs.greptime.com/getting-started/install
## Getting Started
* [Quickstart](https://docs.greptime.com/getting-started/quick-start/overview)
* [Write Data](https://docs.greptime.com/user-guide/clients/overview)
* [Query Data](https://docs.greptime.com/user-guide/query-data/overview)
* [Operations](https://docs.greptime.com/user-guide/operations/overview)
* [Quickstart](https://docs.greptime.com/getting-started/quick-start)
* [User Guide](https://docs.greptime.com/user-guide/overview)
* [Demos](https://github.com/GreptimeTeam/demo-scene)
* [FAQ](https://docs.greptime.com/faq-and-others/faq)
## Build
@@ -151,9 +150,10 @@ Our official Grafana dashboard is available at [grafana](grafana/README.md) dire
## Project Status
The current version has not yet reached General Availability version standards.
In line with our Greptime 2024 Roadmap, we plan to achieve a production-level
version with the update to v1.0 in August. [[Join Force]](https://github.com/GreptimeTeam/greptimedb/issues/3412)
The current version has not yet reached the standards for General Availability.
According to our Greptime 2024 Roadmap, we aim to achieve a production-level version with the release of v1.0 by the end of 2024. [Join Us](https://github.com/GreptimeTeam/greptimedb/issues/3412)
We welcome you to test and use GreptimeDB. Some users have already adopted it in their production environments. If you're interested in trying it out, please use the latest stable release available.
## Community
@@ -172,6 +172,13 @@ In addition, you may:
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
- Follow us on [Twitter](https://twitter.com/greptime)
## Commerial Support
If you are running GreptimeDB OSS in your organization, we offer additional
enterprise addons, installation service, training and consulting. [Contact
us](https://greptime.com/contactus) and we will reach out to you with more
detail of our commerial license.
## License
GreptimeDB uses the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt) to strike a balance between

View File

@@ -15,10 +15,11 @@
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `default_timezone` | String | `None` | The default timezone of the server. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
@@ -68,6 +69,12 @@
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
| `wal.num_topics` | Integer | `64` | Number of topics.<br/>**It's only used when the provider is `kafka`**. |
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.<br/>**It's only used when the provider is `kafka`**. |
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition.<br/>**It's only used when the provider is `kafka`**. |
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
@@ -94,6 +101,7 @@
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential` | String | `None` | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
@@ -111,15 +119,17 @@
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | `512MB` | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | `None` | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
@@ -129,6 +139,8 @@
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
@@ -139,21 +151,23 @@
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
@@ -165,12 +179,10 @@
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `default_timezone` | String | `None` | The default timezone of the server. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
@@ -227,20 +239,21 @@
| `datanode.client.connect_timeout` | String | `10s` | -- |
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
@@ -253,35 +266,37 @@
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
| `store_addr` | String | `127.0.0.1:2379` | Etcd server address. |
| `selector` | String | `lease_based` | Datanode selector type.<br/>- `lease_based` (default value).<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
| `store_addr` | String | `127.0.0.1:2379` | Store server address default to etcd store. |
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
| `use_memory_store` | Bool | `false` | Store data in memory. |
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
| `backend` | String | `EtcdStore` | The datastore for meta server. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
| `failure_detector` | -- | -- | -- |
| `failure_detector.threshold` | Float | `8.0` | -- |
| `failure_detector.min_std_deviation` | String | `100ms` | -- |
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | -- |
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | -- |
| `failure_detector.threshold` | Float | `8.0` | The threshold value used by the failure detector to determine failure conditions. |
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations. |
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable. |
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | The initial estimate of the heartbeat interval used by the failure detector. |
| `datanode` | -- | -- | Datanode options. |
| `datanode.client` | -- | -- | Datanode client options. |
| `datanode.client.timeout` | String | `10s` | -- |
| `datanode.client.connect_timeout` | String | `10s` | -- |
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
| `datanode.client.timeout` | String | `10s` | Operation timeout. |
| `datanode.client.connect_timeout` | String | `10s` | Connect server timeout. |
| `datanode.client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
| `wal` | -- | -- | -- |
| `wal.provider` | String | `raft_engine` | -- |
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
| `wal.num_topics` | Integer | `64` | Number of topics to be created upon start. |
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
| `wal.num_topics` | Integer | `64` | Number of topics. |
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`. |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
@@ -289,20 +304,21 @@
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
@@ -323,6 +339,10 @@
| `rpc_runtime_size` | Integer | `None` | Deprecated, use `grpc.runtime_size` instead. |
| `rpc_max_recv_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
| `rpc_max_send_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
@@ -335,9 +355,8 @@
| `grpc.tls.key_path` | String | `None` | Private key file path. |
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
@@ -369,6 +388,8 @@
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
| `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.<br/>**It's only used when the provider is `kafka`**. |
| `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.<br/>**It's only used when the provider is `kafka`**. |
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
@@ -383,6 +404,7 @@
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential` | String | `None` | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
@@ -400,15 +422,17 @@
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | `512MB` | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | `None` | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
@@ -428,21 +452,23 @@
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
@@ -474,11 +500,12 @@
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |

View File

@@ -39,6 +39,18 @@ rpc_max_recv_message_size = "512MB"
## +toml2docs:none-default
rpc_max_send_message_size = "512MB"
## The HTTP server options.
[http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
timeout = "30s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
@@ -73,11 +85,9 @@ watch = false
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
global_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 4
compact_rt_size = 4
## The heartbeat options.
[heartbeat]
@@ -189,6 +199,32 @@ backoff_base = 2
## **It's only used when the provider is `kafka`**.
backoff_deadline = "5mins"
## Whether to enable WAL index creation.
## **It's only used when the provider is `kafka`**.
create_index = true
## The interval for dumping WAL indexes.
## **It's only used when the provider is `kafka`**.
dump_index_interval = "60s"
# The Kafka SASL configuration.
# **It's only used when the provider is `kafka`**.
# Available SASL mechanisms:
# - `PLAIN`
# - `SCRAM-SHA-256`
# - `SCRAM-SHA-512`
# [wal.sasl]
# type = "SCRAM-SHA-512"
# username = "user_kafka"
# password = "secret"
# The Kafka TLS configuration.
# **It's only used when the provider is `kafka`**.
# [wal.tls]
# server_ca_cert_path = "/path/to/server_cert"
# client_cert_path = "/path/to/client_cert"
# client_key_path = "/path/to/key"
# Example of using S3 as the storage.
# [storage]
# type = "S3"
@@ -225,6 +261,7 @@ backoff_deadline = "5mins"
# root = "data"
# scope = "test"
# credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com"
## The data storage options.
@@ -296,6 +333,11 @@ scope = "test"
## +toml2docs:none-default
credential_path = "test"
## The credential of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## +toml2docs:none-default
credential = "base64-credential"
## The container of the azure account.
## **It's only used when the storage type is `Azblob`**.
## +toml2docs:none-default
@@ -318,9 +360,23 @@ region = "us-west-2"
# Custom storage options
# [[storage.providers]]
# name = "S3"
# type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# [[storage.providers]]
# name = "Gcs"
# type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com"
## The region engine options. You can configure multiple region engines.
[[region_engine]]
@@ -364,9 +420,13 @@ sst_meta_cache_size = "128MB"
vector_cache_size = "512MB"
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## If not set, it's default to 1/8 of OS memory.
page_cache_size = "512MB"
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
selector_result_cache_size = "512MB"
## Whether to enable the experimental write cache.
enable_experimental_write_cache = false
@@ -377,7 +437,8 @@ experimental_write_cache_path = ""
experimental_write_cache_size = "512MB"
## TTL for write cache.
experimental_write_cache_ttl = "1h"
## +toml2docs:none-default
experimental_write_cache_ttl = "8h"
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
@@ -394,6 +455,10 @@ parallel_scan_channel_size = 32
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false
## Minimum time interval between two compactions.
## To align with the old behavior, the default value is 0 (no restrictions).
min_compaction_interval = "0m"
## The options for index in Mito engine.
[region_engine.mito.index]
@@ -478,9 +543,13 @@ data_freeze_threshold = 32768
## Only available for `partition_tree` memtable.
fork_dictionary_bytes = "1GiB"
[[region_engine]]
## Enable the file engine.
[region_engine.file]
## The logging options.
[logging]
## The directory to store the log files.
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
@@ -491,12 +560,14 @@ level = "info"
enable_otlp_tracing = false
## The OTLP tracing endpoint.
## +toml2docs:none-default
otlp_endpoint = ""
otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
@@ -514,12 +585,13 @@ enable = false
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## +toml2docs:none-default
db = "information_schema"
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.

View File

@@ -59,7 +59,7 @@ retry_interval = "3s"
## The logging options.
[logging]
## The directory to store the log files.
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
@@ -70,12 +70,14 @@ level = "info"
enable_otlp_tracing = false
## The OTLP tracing endpoint.
## +toml2docs:none-default
otlp_endpoint = ""
otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0

View File

@@ -1,6 +1,3 @@
## The running mode of the datanode. It can be `standalone` or `distributed`.
mode = "standalone"
## The default timezone of the server.
## +toml2docs:none-default
default_timezone = "UTC"
@@ -8,11 +5,9 @@ default_timezone = "UTC"
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
global_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 4
compact_rt_size = 4
## The heartbeat options.
[heartbeat]
@@ -171,7 +166,7 @@ tcp_nodelay = true
## The logging options.
[logging]
## The directory to store the log files.
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
@@ -182,12 +177,14 @@ level = "info"
enable_otlp_tracing = false
## The OTLP tracing endpoint.
## +toml2docs:none-default
otlp_endpoint = ""
otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
@@ -205,12 +202,13 @@ enable = false
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## +toml2docs:none-default
db = "information_schema"
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.

View File

@@ -7,14 +7,15 @@ bind_addr = "127.0.0.1:3002"
## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
server_addr = "127.0.0.1:3002"
## Etcd server address.
## Store server address default to etcd store.
store_addr = "127.0.0.1:2379"
## Datanode selector type.
## - `lease_based` (default value).
## - `round_robin` (default value)
## - `lease_based`
## - `load_based`
## For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
selector = "lease_based"
selector = "round_robin"
## Store data in memory.
use_memory_store = false
@@ -25,14 +26,21 @@ enable_telemetry = true
## If it's not empty, the metasrv will store all data with this key prefix.
store_key_prefix = ""
## Whether to enable region failover.
## This feature is only available on GreptimeDB running on cluster mode and
## - Using Remote WAL
## - Using shared storage (e.g., s3).
enable_region_failover = false
## The datastore for meta server.
backend = "EtcdStore"
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
global_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 4
compact_rt_size = 4
## Procedure storage options.
[procedure]
@@ -52,17 +60,32 @@ max_metadata_value_size = "1500KiB"
# Failure detectors options.
[failure_detector]
## The threshold value used by the failure detector to determine failure conditions.
threshold = 8.0
## The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations.
min_std_deviation = "100ms"
## The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable.
acceptable_heartbeat_pause = "10000ms"
## The initial estimate of the heartbeat interval used by the failure detector.
first_heartbeat_estimate = "1000ms"
## Datanode options.
[datanode]
## Datanode client options.
[datanode.client]
## Operation timeout.
timeout = "10s"
## Connect server timeout.
connect_timeout = "10s"
## `TCP_NODELAY` option for accepted connections.
tcp_nodelay = true
[wal]
@@ -76,7 +99,12 @@ provider = "raft_engine"
## The broker endpoints of the Kafka cluster.
broker_endpoints = ["127.0.0.1:9092"]
## Number of topics to be created upon start.
## Automatically create topics for WAL.
## Set to `true` to automatically create topics for WAL.
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
auto_create_topics = true
## Number of topics.
num_topics = 64
## Topic selector type.
@@ -85,6 +113,7 @@ num_topics = 64
selector_type = "round_robin"
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
topic_name_prefix = "greptimedb_wal_topic"
## Expected number of replicas of each partition.
@@ -104,9 +133,27 @@ backoff_base = 2
## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
backoff_deadline = "5mins"
# The Kafka SASL configuration.
# **It's only used when the provider is `kafka`**.
# Available SASL mechanisms:
# - `PLAIN`
# - `SCRAM-SHA-256`
# - `SCRAM-SHA-512`
# [wal.sasl]
# type = "SCRAM-SHA-512"
# username = "user_kafka"
# password = "secret"
# The Kafka TLS configuration.
# **It's only used when the provider is `kafka`**.
# [wal.tls]
# server_ca_cert_path = "/path/to/server_cert"
# client_cert_path = "/path/to/client_cert"
# client_key_path = "/path/to/key"
## The logging options.
[logging]
## The directory to store the log files.
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
@@ -117,12 +164,14 @@ level = "info"
enable_otlp_tracing = false
## The OTLP tracing endpoint.
## +toml2docs:none-default
otlp_endpoint = ""
otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
@@ -140,12 +189,13 @@ enable = false
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## +toml2docs:none-default
db = "information_schema"
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.

View File

@@ -8,14 +8,19 @@ enable_telemetry = true
## +toml2docs:none-default
default_timezone = "UTC"
## Initialize all regions in the background during the startup.
## By default, it provides services after all regions have been initialized.
init_regions_in_background = false
## Parallelism of initializing regions.
init_regions_parallelism = 16
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
global_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 4
compact_rt_size = 4
## The HTTP server options.
[http]
@@ -173,6 +178,34 @@ sync_period = "10s"
## **It's only used when the provider is `kafka`**.
broker_endpoints = ["127.0.0.1:9092"]
## Automatically create topics for WAL.
## Set to `true` to automatically create topics for WAL.
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
auto_create_topics = true
## Number of topics.
## **It's only used when the provider is `kafka`**.
num_topics = 64
## Topic selector type.
## Available selector types:
## - `round_robin` (default)
## **It's only used when the provider is `kafka`**.
selector_type = "round_robin"
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
## **It's only used when the provider is `kafka`**.
topic_name_prefix = "greptimedb_wal_topic"
## Expected number of replicas of each partition.
## **It's only used when the provider is `kafka`**.
replication_factor = 1
## Above which a topic creation operation will be cancelled.
## **It's only used when the provider is `kafka`**.
create_topic_timeout = "30s"
## The max size of a single producer batch.
## Warning: Kafka has a default limit of 1MB per message in a topic.
## **It's only used when the provider is `kafka`**.
@@ -198,6 +231,24 @@ backoff_base = 2
## **It's only used when the provider is `kafka`**.
backoff_deadline = "5mins"
# The Kafka SASL configuration.
# **It's only used when the provider is `kafka`**.
# Available SASL mechanisms:
# - `PLAIN`
# - `SCRAM-SHA-256`
# - `SCRAM-SHA-512`
# [wal.sasl]
# type = "SCRAM-SHA-512"
# username = "user_kafka"
# password = "secret"
# The Kafka TLS configuration.
# **It's only used when the provider is `kafka`**.
# [wal.tls]
# server_ca_cert_path = "/path/to/server_cert"
# client_cert_path = "/path/to/client_cert"
# client_key_path = "/path/to/key"
## Metadata storage options.
[metadata_store]
## Kv file size in bytes.
@@ -248,6 +299,7 @@ retry_delay = "500ms"
# root = "data"
# scope = "test"
# credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com"
## The data storage options.
@@ -319,6 +371,11 @@ scope = "test"
## +toml2docs:none-default
credential_path = "test"
## The credential of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## +toml2docs:none-default
credential = "base64-credential"
## The container of the azure account.
## **It's only used when the storage type is `Azblob`**.
## +toml2docs:none-default
@@ -341,9 +398,23 @@ region = "us-west-2"
# Custom storage options
# [[storage.providers]]
# name = "S3"
# type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# [[storage.providers]]
# name = "Gcs"
# type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com"
## The region engine options. You can configure multiple region engines.
[[region_engine]]
@@ -387,9 +458,13 @@ sst_meta_cache_size = "128MB"
vector_cache_size = "512MB"
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## If not set, it's default to 1/8 of OS memory.
page_cache_size = "512MB"
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
selector_result_cache_size = "512MB"
## Whether to enable the experimental write cache.
enable_experimental_write_cache = false
@@ -400,7 +475,8 @@ experimental_write_cache_path = ""
experimental_write_cache_size = "512MB"
## TTL for write cache.
experimental_write_cache_ttl = "1h"
## +toml2docs:none-default
experimental_write_cache_ttl = "8h"
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
@@ -417,6 +493,10 @@ parallel_scan_channel_size = 32
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false
## Minimum time interval between two compactions.
## To align with the old behavior, the default value is 0 (no restrictions).
min_compaction_interval = "0m"
## The options for index in Mito engine.
[region_engine.mito.index]
@@ -459,6 +539,12 @@ mem_threshold_on_create = "auto"
## Deprecated, use `region_engine.mito.index.aux_path` instead.
intermediate_path = ""
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## The options for full-text index in Mito engine.
[region_engine.mito.fulltext_index]
@@ -501,9 +587,13 @@ data_freeze_threshold = 32768
## Only available for `partition_tree` memtable.
fork_dictionary_bytes = "1GiB"
[[region_engine]]
## Enable the file engine.
[region_engine.file]
## The logging options.
[logging]
## The directory to store the log files.
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
@@ -514,12 +604,14 @@ level = "info"
enable_otlp_tracing = false
## The OTLP tracing endpoint.
## +toml2docs:none-default
otlp_endpoint = ""
otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
@@ -537,12 +629,13 @@ enable = false
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## +toml2docs:none-default
db = "information_schema"
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.

View File

@@ -1,12 +1,13 @@
x-custom:
initial_cluster_token: &initial_cluster_token "--initial-cluster-token=etcd-cluster"
common_settings: &common_settings
image: quay.io/coreos/etcd:v3.5.10
etcd_initial_cluster_token: &etcd_initial_cluster_token "--initial-cluster-token=etcd-cluster"
etcd_common_settings: &etcd_common_settings
image: "${ETCD_REGISTRY:-quay.io}/${ETCD_NAMESPACE:-coreos}/etcd:${ETCD_VERSION:-v3.5.10}"
entrypoint: /usr/local/bin/etcd
greptimedb_image: &greptimedb_image "${GREPTIMEDB_REGISTRY:-docker.io}/${GREPTIMEDB_NAMESPACE:-greptime}/greptimedb:${GREPTIMEDB_VERSION:-latest}"
services:
etcd0:
<<: *common_settings
<<: *etcd_common_settings
container_name: etcd0
ports:
- 2379:2379
@@ -22,7 +23,7 @@ services:
- --election-timeout=1250
- --initial-cluster=etcd0=http://etcd0:2380
- --initial-cluster-state=new
- *initial_cluster_token
- *etcd_initial_cluster_token
volumes:
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
healthcheck:
@@ -34,7 +35,7 @@ services:
- greptimedb
metasrv:
image: docker.io/greptime/greptimedb:latest
image: *greptimedb_image
container_name: metasrv
ports:
- 3002:3002
@@ -56,10 +57,11 @@ services:
- greptimedb
datanode0:
image: docker.io/greptime/greptimedb:latest
image: *greptimedb_image
container_name: datanode0
ports:
- 3001:3001
- 5000:5000
command:
- datanode
- start
@@ -67,8 +69,14 @@ services:
- --rpc-addr=0.0.0.0:3001
- --rpc-hostname=datanode0:3001
- --metasrv-addrs=metasrv:3002
- --http-addr=0.0.0.0:5000
volumes:
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
healthcheck:
test: [ "CMD", "curl", "-f", "http://datanode0:5000/health" ]
interval: 5s
timeout: 3s
retries: 5
depends_on:
metasrv:
condition: service_healthy
@@ -76,7 +84,7 @@ services:
- greptimedb
frontend0:
image: docker.io/greptime/greptimedb:latest
image: *greptimedb_image
container_name: frontend0
ports:
- 4000:4000
@@ -91,8 +99,31 @@ services:
- --rpc-addr=0.0.0.0:4001
- --mysql-addr=0.0.0.0:4002
- --postgres-addr=0.0.0.0:4003
healthcheck:
test: [ "CMD", "curl", "-f", "http://frontend0:4000/health" ]
interval: 5s
timeout: 3s
retries: 5
depends_on:
metasrv:
datanode0:
condition: service_healthy
networks:
- greptimedb
flownode0:
image: *greptimedb_image
container_name: flownode0
ports:
- 4004:4004
command:
- flownode
- start
- --node-id=0
- --metasrv-addrs=metasrv:3002
- --rpc-addr=0.0.0.0:4004
- --rpc-hostname=flownode0:4004
depends_on:
frontend0:
condition: service_healthy
networks:
- greptimedb

View File

@@ -0,0 +1,51 @@
# Log benchmark configuration
This repo holds the configuration we used to benchmark GreptimeDB, Clickhouse and Elastic Search.
Here are the versions of databases we used in the benchmark
| name | version |
| :------------ | :--------- |
| GreptimeDB | v0.9.2 |
| Clickhouse | 24.9.1.219 |
| Elasticsearch | 8.15.0 |
## Structured model vs Unstructured model
We divide test into two parts, using structured model and unstructured model accordingly. You can also see the difference in create table clause.
__Structured model__
The log data is pre-processed into columns by vector. For example an insert request looks like following
```SQL
INSERT INTO test_table (bytes, http_version, ip, method, path, status, user, timestamp) VALUES ()
```
The goal is to test string/text support for each database. In real scenarios it means the datasource(or log data producers) have separate fields defined, or have already processed the raw input.
__Unstructured model__
The log data is inserted as a long string, and then we build fulltext index upon these strings. For example an insert request looks like following
```SQL
INSERT INTO test_table (message, timestamp) VALUES ()
```
The goal is to test fuzzy search performance for each database. In real scenarios it means the log is produced by some kind of middleware and inserted directly into the database.
## Creating tables
See [here](./create_table.sql) for GreptimeDB and Clickhouse's create table clause.
The mapping of Elastic search is created automatically.
## Vector Configuration
We use vector to generate random log data and send inserts to databases.
Please refer to [structured config](./structured_vector.toml) and [unstructured config](./unstructured_vector.toml) for detailed configuration.
## SQLs and payloads
Please refer to [SQL query](./query.sql) for GreptimeDB and Clickhouse, and [query payload](./query.md) for Elastic search.
## Steps to reproduce
0. Decide whether to run structured model test or unstructured mode test.
1. Build vector binary(see vector's config file for specific branch) and databases binaries accordingly.
2. Create table in GreptimeDB and Clickhouse in advance.
3. Run vector to insert data.
4. When data insertion is finished, run queries against each database. Note: you'll need to update timerange value after data insertion.
## Addition
- You can tune GreptimeDB's configuration to get better performance.
- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/operations/configuration/#storage-options).

View File

@@ -0,0 +1,56 @@
-- GreptimeDB create table clause
-- structured test, use vector to pre-process log data into fields
CREATE TABLE IF NOT EXISTS `test_table` (
`bytes` Int64 NULL,
`http_version` STRING NULL,
`ip` STRING NULL,
`method` STRING NULL,
`path` STRING NULL,
`status` SMALLINT UNSIGNED NULL,
`user` STRING NULL,
`timestamp` TIMESTAMP(3) NOT NULL,
PRIMARY KEY (`user`, `path`, `status`),
TIME INDEX (`timestamp`)
)
ENGINE=mito
WITH(
append_mode = 'true'
);
-- unstructured test, build fulltext index on message column
CREATE TABLE IF NOT EXISTS `test_table` (
`message` STRING NULL FULLTEXT WITH(analyzer = 'English', case_sensitive = 'false'),
`timestamp` TIMESTAMP(3) NOT NULL,
TIME INDEX (`timestamp`)
)
ENGINE=mito
WITH(
append_mode = 'true'
);
-- Clickhouse create table clause
-- structured test
CREATE TABLE IF NOT EXISTS test_table
(
bytes UInt64 NOT NULL,
http_version String NOT NULL,
ip String NOT NULL,
method String NOT NULL,
path String NOT NULL,
status UInt8 NOT NULL,
user String NOT NULL,
timestamp String NOT NULL,
)
ENGINE = MergeTree()
ORDER BY (user, path, status);
-- unstructured test
SET allow_experimental_full_text_index = true;
CREATE TABLE IF NOT EXISTS test_table
(
message String,
timestamp String,
INDEX inv_idx(message) TYPE full_text(0) GRANULARITY 1
)
ENGINE = MergeTree()
ORDER BY tuple();

View File

@@ -0,0 +1,199 @@
# Query URL and payload for Elastic Search
## Count
URL: `http://127.0.0.1:9200/_count`
## Query by timerange
URL: `http://127.0.0.1:9200/_search`
You can use the following payload to get the full timerange first.
```JSON
{"size":0,"aggs":{"max_timestamp":{"max":{"field":"timestamp"}},"min_timestamp":{"min":{"field":"timestamp"}}}}
```
And then use this payload to query by timerange.
```JSON
{
"from": 0,
"size": 1000,
"query": {
"range": {
"timestamp": {
"gte": "2024-08-16T04:30:44.000Z",
"lte": "2024-08-16T04:51:52.000Z"
}
}
}
}
```
## Query by condition
URL: `http://127.0.0.1:9200/_search`
### Structured payload
```JSON
{
"from": 0,
"size": 10000,
"query": {
"bool": {
"must": [
{
"term": {
"user.keyword": "CrucifiX"
}
},
{
"term": {
"method.keyword": "OPTION"
}
},
{
"term": {
"path.keyword": "/user/booperbot124"
}
},
{
"term": {
"http_version.keyword": "HTTP/1.1"
}
},
{
"term": {
"status": "401"
}
}
]
}
}
}
```
### Unstructured payload
```JSON
{
"from": 0,
"size": 10000,
"query": {
"bool": {
"must": [
{
"match_phrase": {
"message": "CrucifiX"
}
},
{
"match_phrase": {
"message": "OPTION"
}
},
{
"match_phrase": {
"message": "/user/booperbot124"
}
},
{
"match_phrase": {
"message": "HTTP/1.1"
}
},
{
"match_phrase": {
"message": "401"
}
}
]
}
}
}
```
## Query by condition and timerange
URL: `http://127.0.0.1:9200/_search`
### Structured payload
```JSON
{
"size": 10000,
"query": {
"bool": {
"must": [
{
"term": {
"user.keyword": "CrucifiX"
}
},
{
"term": {
"method.keyword": "OPTION"
}
},
{
"term": {
"path.keyword": "/user/booperbot124"
}
},
{
"term": {
"http_version.keyword": "HTTP/1.1"
}
},
{
"term": {
"status": "401"
}
},
{
"range": {
"timestamp": {
"gte": "2024-08-19T07:03:37.383Z",
"lte": "2024-08-19T07:24:58.883Z"
}
}
}
]
}
}
}
```
### Unstructured payload
```JSON
{
"size": 10000,
"query": {
"bool": {
"must": [
{
"match_phrase": {
"message": "CrucifiX"
}
},
{
"match_phrase": {
"message": "OPTION"
}
},
{
"match_phrase": {
"message": "/user/booperbot124"
}
},
{
"match_phrase": {
"message": "HTTP/1.1"
}
},
{
"match_phrase": {
"message": "401"
}
},
{
"range": {
"timestamp": {
"gte": "2024-08-19T05:16:17.099Z",
"lte": "2024-08-19T05:46:02.722Z"
}
}
}
]
}
}
}
```

View File

@@ -0,0 +1,50 @@
-- Structured query for GreptimeDB and Clickhouse
-- query count
select count(*) from test_table;
-- query by timerange. Note: place the timestamp range in the where clause
-- GreptimeDB
-- you can use `select max(timestamp)::bigint from test_table;` and `select min(timestamp)::bigint from test_table;`
-- to get the full timestamp range
select * from test_table where timestamp between 1723710843619 and 1723711367588;
-- Clickhouse
-- you can use `select max(timestamp) from test_table;` and `select min(timestamp) from test_table;`
-- to get the full timestamp range
select * from test_table where timestamp between '2024-08-16T03:58:46Z' and '2024-08-16T04:03:50Z';
-- query by condition
SELECT * FROM test_table WHERE user = 'CrucifiX' and method = 'OPTION' and path = '/user/booperbot124' and http_version = 'HTTP/1.1' and status = 401;
-- query by condition and timerange
-- GreptimeDB
SELECT * FROM test_table WHERE user = "CrucifiX" and method = "OPTION" and path = "/user/booperbot124" and http_version = "HTTP/1.1" and status = 401
and timestamp between 1723774396760 and 1723774788760;
-- Clickhouse
SELECT * FROM test_table WHERE user = 'CrucifiX' and method = 'OPTION' and path = '/user/booperbot124' and http_version = 'HTTP/1.1' and status = 401
and timestamp between '2024-08-16T03:58:46Z' and '2024-08-16T04:03:50Z';
-- Unstructured query for GreptimeDB and Clickhouse
-- query by condition
-- GreptimeDB
SELECT * FROM test_table WHERE MATCHES(message, "+CrucifiX +OPTION +/user/booperbot124 +HTTP/1.1 +401");
-- Clickhouse
SELECT * FROM test_table WHERE (message LIKE '%CrucifiX%')
AND (message LIKE '%OPTION%')
AND (message LIKE '%/user/booperbot124%')
AND (message LIKE '%HTTP/1.1%')
AND (message LIKE '%401%');
-- query by condition and timerange
-- GreptimeDB
SELECT * FROM test_table WHERE MATCHES(message, "+CrucifiX +OPTION +/user/booperbot124 +HTTP/1.1 +401")
and timestamp between 1723710843619 and 1723711367588;
-- Clickhouse
SELECT * FROM test_table WHERE (message LIKE '%CrucifiX%')
AND (message LIKE '%OPTION%')
AND (message LIKE '%/user/booperbot124%')
AND (message LIKE '%HTTP/1.1%')
AND (message LIKE '%401%')
AND timestamp between '2024-08-15T10:25:26.524000000Z' AND '2024-08-15T10:31:31.746000000Z';

View File

@@ -0,0 +1,57 @@
# Please note we use patched branch to build vector
# https://github.com/shuiyisong/vector/tree/chore/greptime_log_ingester_logitem
[sources.demo_logs]
type = "demo_logs"
format = "apache_common"
# interval value = 1 / rps
# say you want to insert at 20k/s, that is 1 / 20000 = 0.00005
# set to 0 to run as fast as possible
interval = 0
# total rows to insert
count = 100000000
lines = [ "line1" ]
[transforms.parse_logs]
type = "remap"
inputs = ["demo_logs"]
source = '''
. = parse_regex!(.message, r'^(?P<ip>\S+) - (?P<user>\S+) \[(?P<timestamp>[^\]]+)\] "(?P<method>\S+) (?P<path>\S+) (?P<http_version>\S+)" (?P<status>\d+) (?P<bytes>\d+)$')
# Convert timestamp to a standard format
.timestamp = parse_timestamp!(.timestamp, format: "%d/%b/%Y:%H:%M:%S %z")
# Convert status and bytes to integers
.status = to_int!(.status)
.bytes = to_int!(.bytes)
'''
[sinks.sink_greptime_logs]
type = "greptimedb_logs"
# The table to insert into
table = "test_table"
pipeline_name = "demo_pipeline"
compression = "none"
inputs = [ "parse_logs" ]
endpoint = "http://127.0.0.1:4000"
# Batch size for each insertion
batch.max_events = 4000
[sinks.clickhouse]
type = "clickhouse"
inputs = [ "parse_logs" ]
database = "default"
endpoint = "http://127.0.0.1:8123"
format = "json_each_row"
# The table to insert into
table = "test_table"
[sinks.sink_elasticsearch]
type = "elasticsearch"
inputs = [ "parse_logs" ]
api_version = "auto"
compression = "none"
doc_type = "_doc"
endpoints = [ "http://127.0.0.1:9200" ]
id_key = "id"
mode = "bulk"

View File

@@ -0,0 +1,43 @@
# Please note we use patched branch to build vector
# https://github.com/shuiyisong/vector/tree/chore/greptime_log_ingester_ft
[sources.demo_logs]
type = "demo_logs"
format = "apache_common"
# interval value = 1 / rps
# say you want to insert at 20k/s, that is 1 / 20000 = 0.00005
# set to 0 to run as fast as possible
interval = 0
# total rows to insert
count = 100000000
lines = [ "line1" ]
[sinks.sink_greptime_logs]
type = "greptimedb_logs"
# The table to insert into
table = "test_table"
pipeline_name = "demo_pipeline"
compression = "none"
inputs = [ "demo_logs" ]
endpoint = "http://127.0.0.1:4000"
# Batch size for each insertion
batch.max_events = 500
[sinks.clickhouse]
type = "clickhouse"
inputs = [ "demo_logs" ]
database = "default"
endpoint = "http://127.0.0.1:8123"
format = "json_each_row"
# The table to insert into
table = "test_table"
[sinks.sink_elasticsearch]
type = "elasticsearch"
inputs = [ "demo_logs" ]
api_version = "auto"
compression = "none"
doc_type = "_doc"
endpoints = [ "http://127.0.0.1:9200" ]
id_key = "id"
mode = "bulk"

View File

@@ -0,0 +1,58 @@
# TSBS benchmark - v0.9.1
## Environment
### Local
| | |
| ------ | ---------------------------------- |
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
| Memory | 32GB |
| Disk | SOLIDIGM SSDPFKNU010TZ |
| OS | Ubuntu 22.04.2 LTS |
### Amazon EC2
| | |
| ------- | ----------------------- |
| Machine | c5d.2xlarge |
| CPU | 8 core |
| Memory | 16GB |
| Disk | 100GB (GP3) |
| OS | Ubuntu Server 24.04 LTS |
## Write performance
| Environment | Ingest rate (rows/s) |
| --------------- | -------------------- |
| Local | 387697.68 |
| EC2 c5d.2xlarge | 234620.19 |
## Query performance
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
| --------------------- | ---------- | -------------------- |
| cpu-max-all-1 | 21.14 | 14.75 |
| cpu-max-all-8 | 36.79 | 30.69 |
| double-groupby-1 | 529.02 | 987.85 |
| double-groupby-5 | 1064.53 | 1455.95 |
| double-groupby-all | 1625.33 | 2143.96 |
| groupby-orderby-limit | 529.19 | 1353.49 |
| high-cpu-1 | 12.09 | 8.24 |
| high-cpu-all | 3619.47 | 5312.82 |
| lastpoint | 224.91 | 576.06 |
| single-groupby-1-1-1 | 10.82 | 6.01 |
| single-groupby-1-1-12 | 11.16 | 7.42 |
| single-groupby-1-8-1 | 13.50 | 10.20 |
| single-groupby-5-1-1 | 11.99 | 6.70 |
| single-groupby-5-1-12 | 13.17 | 8.72 |
| single-groupby-5-8-1 | 16.01 | 12.07 |
`single-groupby-1-1-1` query throughput
| Environment | Client concurrency | mean time (ms) | qps (queries/sec) |
| --------------- | ------------------ | -------------- | ----------------- |
| Local | 50 | 33.04 | 1511.74 |
| Local | 100 | 67.70 | 1476.14 |
| EC2 c5d.2xlarge | 50 | 61.93 | 806.97 |
| EC2 c5d.2xlarge | 100 | 126.31 | 791.40 |

View File

@@ -105,7 +105,7 @@ use tests_fuzz::utils::{init_greptime_connections, Connections};
fuzz_target!(|input: FuzzInput| {
common_telemetry::init_default_ut_logging();
common_runtime::block_on_write(async {
common_runtime::block_on_global(async {
let Connections { mysql } = init_greptime_connections().await;
let mut rng = ChaChaRng::seed_from_u64(input.seed);
let columns = rng.gen_range(2..30);

View File

@@ -25,7 +25,7 @@ Please ensure the following configuration before importing the dashboard into Gr
__1. Prometheus scrape config__
Assign `greptime_pod` label to each host target. We use this label to identify each node instance.
Configure Prometheus to scrape the cluster.
```yml
# example config
@@ -34,27 +34,15 @@ Assign `greptime_pod` label to each host target. We use this label to identify e
scrape_configs:
- job_name: metasrv
static_configs:
- targets: ['<ip>:<port>']
labels:
greptime_pod: metasrv
- targets: ['<metasrv-ip>:<port>']
- job_name: datanode
static_configs:
- targets: ['<ip>:<port>']
labels:
greptime_pod: datanode1
- targets: ['<ip>:<port>']
labels:
greptime_pod: datanode2
- targets: ['<ip>:<port>']
labels:
greptime_pod: datanode3
- targets: ['<datanode0-ip>:<port>', '<datanode1-ip>:<port>', '<datanode2-ip>:<port>']
- job_name: frontend
static_configs:
- targets: ['<ip>:<port>']
labels:
greptime_pod: frontend
- targets: ['<frontend-ip>:<port>']
```
__2. Grafana config__
@@ -63,4 +51,4 @@ Create a Prometheus data source in Grafana before using this dashboard. We use `
### Usage
Use `datasource` or `greptime_pod` on the upper-left corner to filter data from certain node.
Use `datasource` or `instance` on the upper-left corner to filter data from certain node.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,2 +1,3 @@
[toolchain]
channel = "nightly-2024-04-20"
channel = "nightly-2024-06-06"

View File

@@ -0,0 +1,42 @@
#!/usr/bin/env bash
set -e
RUST_TOOLCHAIN_VERSION_FILE="rust-toolchain.toml"
DEV_BUILDER_UBUNTU_REGISTRY="docker.io"
DEV_BUILDER_UBUNTU_NAMESPACE="greptime"
DEV_BUILDER_UBUNTU_NAME="dev-builder-ubuntu"
function check_rust_toolchain_version() {
DEV_BUILDER_IMAGE_TAG=$(grep "DEV_BUILDER_IMAGE_TAG ?= " Makefile | cut -d= -f2 | sed 's/^[ \t]*//')
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
echo "Error: No DEV_BUILDER_IMAGE_TAG found in Makefile"
exit 1
fi
DEV_BUILDER_UBUNTU_IMAGE="$DEV_BUILDER_UBUNTU_REGISTRY/$DEV_BUILDER_UBUNTU_NAMESPACE/$DEV_BUILDER_UBUNTU_NAME:$DEV_BUILDER_IMAGE_TAG"
CURRENT_VERSION=$(grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}' "$RUST_TOOLCHAIN_VERSION_FILE")
if [ -z "$CURRENT_VERSION" ]; then
echo "Error: No rust toolchain version found in $RUST_TOOLCHAIN_VERSION_FILE"
exit 1
fi
RUST_TOOLCHAIN_VERSION_IN_BUILDER=$(docker run "$DEV_BUILDER_UBUNTU_IMAGE" rustc --version | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
if [ -z "$RUST_TOOLCHAIN_VERSION_IN_BUILDER" ]; then
echo "Error: No rustc version found in $DEV_BUILDER_UBUNTU_IMAGE"
exit 1
fi
# Compare the version and the difference should be less than 1 day.
current_rust_toolchain_seconds=$(date -d "$CURRENT_VERSION" +%s)
rust_toolchain_in_dev_builder_ubuntu_seconds=$(date -d "$RUST_TOOLCHAIN_VERSION_IN_BUILDER" +%s)
date_diff=$(( (current_rust_toolchain_seconds - rust_toolchain_in_dev_builder_ubuntu_seconds) / 86400 ))
if [ $date_diff -gt 1 ]; then
echo "Error: The rust toolchain '$RUST_TOOLCHAIN_VERSION_IN_BUILDER' in builder '$DEV_BUILDER_UBUNTU_IMAGE' maybe outdated, please update it to '$CURRENT_VERSION'"
exit 1
fi
}
check_rust_toolchain_version

69
scripts/check-snafu.py Normal file
View File

@@ -0,0 +1,69 @@
# Copyright 2023 Greptime Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
def find_rust_files(directory):
error_files = []
other_rust_files = []
for root, _, files in os.walk(directory):
for file in files:
if file == "error.rs":
error_files.append(os.path.join(root, file))
elif file.endswith(".rs"):
other_rust_files.append(os.path.join(root, file))
return error_files, other_rust_files
def extract_branch_names(file_content):
pattern = re.compile(r"#\[snafu\(display\([^\)]*\)\)\]\s*(\w+)\s*\{")
return pattern.findall(file_content)
def check_snafu_in_files(branch_name, rust_files):
branch_name_snafu = f"{branch_name}Snafu"
for rust_file in rust_files:
with open(rust_file, "r") as file:
content = file.read()
if branch_name_snafu in content:
return True
return False
def main():
error_files, other_rust_files = find_rust_files(".")
branch_names = []
for error_file in error_files:
with open(error_file, "r") as file:
content = file.read()
branch_names.extend(extract_branch_names(content))
unused_snafu = [
branch_name
for branch_name in branch_names
if not check_snafu_in_files(branch_name, other_rust_files)
]
for name in unused_snafu:
print(name)
if unused_snafu:
raise SystemExit(1)
if __name__ == "__main__":
main()

View File

@@ -1,62 +1,72 @@
#!/bin/sh
#!/usr/bin/env bash
set -ue
OS_TYPE=
ARCH_TYPE=
# Set the GitHub token to avoid GitHub API rate limit.
# You can run with `GITHUB_TOKEN`:
# GITHUB_TOKEN=<your_token> ./scripts/install.sh
GITHUB_TOKEN=${GITHUB_TOKEN:-}
VERSION=${1:-latest}
GITHUB_ORG=GreptimeTeam
GITHUB_REPO=greptimedb
BIN=greptime
get_os_type() {
os_type="$(uname -s)"
function get_os_type() {
os_type="$(uname -s)"
case "$os_type" in
case "$os_type" in
Darwin)
OS_TYPE=darwin
;;
OS_TYPE=darwin
;;
Linux)
OS_TYPE=linux
;;
OS_TYPE=linux
;;
*)
echo "Error: Unknown OS type: $os_type"
exit 1
esac
echo "Error: Unknown OS type: $os_type"
exit 1
esac
}
get_arch_type() {
arch_type="$(uname -m)"
function get_arch_type() {
arch_type="$(uname -m)"
case "$arch_type" in
case "$arch_type" in
arm64)
ARCH_TYPE=arm64
;;
ARCH_TYPE=arm64
;;
aarch64)
ARCH_TYPE=arm64
;;
ARCH_TYPE=arm64
;;
x86_64)
ARCH_TYPE=amd64
;;
ARCH_TYPE=amd64
;;
amd64)
ARCH_TYPE=amd64
;;
ARCH_TYPE=amd64
;;
*)
echo "Error: Unknown CPU type: $arch_type"
exit 1
esac
echo "Error: Unknown CPU type: $arch_type"
exit 1
esac
}
get_os_type
get_arch_type
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
# Use the latest nightly version.
function download_artifact() {
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
# Use the latest stable released version.
# GitHub API reference: https://docs.github.com/en/rest/releases/releases?apiVersion=2022-11-28#get-the-latest-release.
if [ "${VERSION}" = "latest" ]; then
VERSION=$(curl -s -XGET "https://api.github.com/repos/${GITHUB_ORG}/${GITHUB_REPO}/releases" | grep tag_name | grep nightly | cut -d: -f 2 | sed 's/.*"\(.*\)".*/\1/' | uniq | sort -r | head -n 1)
if [ -z "${VERSION}" ]; then
echo "Failed to get the latest version."
exit 1
# To avoid other tools dependency, we choose to use `curl` to get the version metadata and parsed by `sed`.
VERSION=$(curl -sL \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
${GITHUB_TOKEN:+-H "Authorization: Bearer $GITHUB_TOKEN"} \
"https://api.github.com/repos/${GITHUB_ORG}/${GITHUB_REPO}/releases/latest" | sed -n 's/.*"tag_name": "\([^"]*\)".*/\1/p')
if [ -z "${VERSION}" ]; then
echo "Failed to get the latest stable released version."
exit 1
fi
fi
@@ -73,4 +83,9 @@ if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
rm -r "${PACKAGE_NAME%.tar.gz}" && \
echo "Run './${BIN} --help' to get started"
fi
fi
fi
}
get_os_type
get_arch_type
download_artifact

View File

@@ -38,10 +38,11 @@ pub enum Error {
location: Location,
},
#[snafu(display("Auth failed"))]
#[snafu(display("Authentication source failure"))]
AuthBackend {
#[snafu(implicit)]
location: Location,
#[snafu(source)]
source: BoxedError,
},
@@ -87,7 +88,7 @@ impl ErrorExt for Error {
Error::IllegalParam { .. } => StatusCode::InvalidArguments,
Error::FileWatch { .. } => StatusCode::InvalidArguments,
Error::InternalState { .. } => StatusCode::Unexpected,
Error::Io { .. } => StatusCode::Internal,
Error::Io { .. } => StatusCode::StorageUnavailable,
Error::AuthBackend { .. } => StatusCode::Internal,
Error::UserNotFound { .. } => StatusCode::UserNotFound,

View File

@@ -13,9 +13,11 @@
// limitations under the License.
use common_base::secrets::ExposeSecret;
use common_error::ext::BoxedError;
use snafu::{OptionExt, ResultExt};
use crate::error::{
AccessDeniedSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
AccessDeniedSnafu, AuthBackendSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
UserPasswordMismatchSnafu,
};
use crate::user_info::DefaultUserInfo;
@@ -49,6 +51,19 @@ impl MockUserProvider {
info.schema.clone_into(&mut self.schema);
info.username.clone_into(&mut self.username);
}
// this is a deliberate function to ref AuthBackendSnafu
// so that it won't get deleted in the future
pub fn ref_auth_backend_snafu(&self) -> Result<()> {
let none_option = None;
none_option
.context(UserNotFoundSnafu {
username: "no_user".to_string(),
})
.map_err(BoxedError::new)
.context(AuthBackendSnafu)
}
}
#[async_trait::async_trait]

View File

@@ -18,6 +18,7 @@ use std::sync::Arc;
use api::v1::greptime_request::Request;
use auth::error::Error::InternalState;
use auth::error::InternalStateSnafu;
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq, PermissionResp, UserInfoRef};
use sql::statements::show::{ShowDatabases, ShowKind};
use sql::statements::statement::Statement;
@@ -33,9 +34,10 @@ impl PermissionChecker for DummyPermissionChecker {
match req {
PermissionReq::GrpcRequest(_) => Ok(PermissionResp::Allow),
PermissionReq::SqlStatement(_) => Ok(PermissionResp::Reject),
_ => Err(InternalState {
_ => InternalStateSnafu {
msg: "testing".to_string(),
}),
}
.fail(),
}
}
}

View File

@@ -34,7 +34,7 @@ pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::CacheRequired { .. } => StatusCode::Internal,
Error::CacheRequired { .. } => StatusCode::Unexpected,
}
}

View File

@@ -40,6 +40,7 @@ moka = { workspace = true, features = ["future", "sync"] }
partition.workspace = true
paste = "1.0"
prometheus.workspace = true
rustc-hash.workspace = true
serde_json.workspace = true
session.workspace = true
snafu.workspace = true

View File

@@ -18,6 +18,7 @@ use std::fmt::Debug;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use common_query::error::datafusion_status_code;
use datafusion::error::DataFusionError;
use snafu::{Location, Snafu};
@@ -56,6 +57,31 @@ pub enum Error {
source: BoxedError,
},
#[snafu(display("Failed to list flows in catalog {catalog}"))]
ListFlows {
#[snafu(implicit)]
location: Location,
catalog: String,
source: BoxedError,
},
#[snafu(display("Flow info not found: {flow_name} in catalog {catalog_name}"))]
FlowInfoNotFound {
flow_name: String,
catalog_name: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Can't convert value to json, input={input}"))]
Json {
input: String,
#[snafu(source)]
error: serde_json::error::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to re-compile script due to internal error"))]
CompileScriptInternal {
#[snafu(implicit)]
@@ -71,13 +97,6 @@ pub enum Error {
source: table::error::Error,
},
#[snafu(display("System catalog is not valid: {}", msg))]
SystemCatalog {
msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
CatalogNotFound {
catalog_name: String,
@@ -114,6 +133,18 @@ pub enum Error {
location: Location,
},
#[snafu(display(
"View plan columns changed from: {} to: {}",
origin_names,
actual_names
))]
ViewPlanColumnsChanged {
origin_names: String,
actual_names: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to find table partitions"))]
FindPartitions { source: partition::error::Error },
@@ -148,13 +179,6 @@ pub enum Error {
source: common_query::error::Error,
},
#[snafu(display("Failed to perform metasrv operation"))]
Metasrv {
#[snafu(implicit)]
location: Location,
source: meta_client::error::Error,
},
#[snafu(display("Invalid table info in catalog"))]
InvalidTableInfoInCatalog {
#[snafu(implicit)]
@@ -173,6 +197,14 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to project view columns"))]
ProjectViewColumns {
#[snafu(source)]
error: DataFusionError,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Table metadata manager error"))]
TableMetadataManager {
source: common_meta::error::Error,
@@ -208,6 +240,21 @@ pub enum Error {
},
}
impl Error {
pub fn should_fail(&self) -> bool {
use Error::*;
matches!(
self,
GetViewCache { .. }
| ViewInfoNotFound { .. }
| DecodePlan { .. }
| ViewPlanColumnsChanged { .. }
| ProjectViewColumns { .. }
)
}
}
pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
@@ -218,11 +265,14 @@ impl ErrorExt for Error {
| Error::FindPartitions { .. }
| Error::FindRegionRoutes { .. }
| Error::CacheNotFound { .. }
| Error::CastManager { .. } => StatusCode::Unexpected,
| Error::CastManager { .. }
| Error::Json { .. } => StatusCode::Unexpected,
Error::ViewPlanColumnsChanged { .. } => StatusCode::InvalidArguments,
Error::ViewInfoNotFound { .. } => StatusCode::TableNotFound,
Error::SystemCatalog { .. } => StatusCode::StorageUnavailable,
Error::FlowInfoNotFound { .. } => StatusCode::FlowNotFound,
Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
@@ -232,11 +282,11 @@ impl ErrorExt for Error {
Error::ListCatalogs { source, .. }
| Error::ListNodes { source, .. }
| Error::ListSchemas { source, .. }
| Error::ListTables { source, .. } => source.status_code(),
| Error::ListTables { source, .. }
| Error::ListFlows { source, .. } => source.status_code(),
Error::CreateTable { source, .. } => source.status_code(),
Error::Metasrv { source, .. } => source.status_code(),
Error::DecodePlan { source, .. } => source.status_code(),
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
@@ -245,7 +295,8 @@ impl ErrorExt for Error {
}
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None),
Error::ProjectViewColumns { .. } => StatusCode::EngineExecuteQuery,
Error::TableMetadataManager { source, .. } => source.status_code(),
Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => {
source.status_code()
@@ -260,7 +311,7 @@ impl ErrorExt for Error {
impl From<Error> for DataFusionError {
fn from(e: Error) -> Self {
DataFusionError::Internal(e.to_string())
DataFusionError::External(Box::new(e))
}
}
@@ -270,27 +321,6 @@ mod tests {
use super::*;
#[test]
pub fn test_error_status_code() {
assert_eq!(
StatusCode::TableAlreadyExists,
Error::TableExists {
table: "some_table".to_string(),
location: Location::generate(),
}
.status_code()
);
assert_eq!(
StatusCode::StorageUnavailable,
Error::SystemCatalog {
msg: String::default(),
location: Location::generate(),
}
.status_code()
);
}
#[test]
pub fn test_errors_to_datafusion_error() {
let e: DataFusionError = Error::TableExists {
@@ -299,7 +329,7 @@ mod tests {
}
.into();
match e {
DataFusionError::Internal(_) => {}
DataFusionError::External(_) => {}
_ => {
panic!("catalog error should be converted to DataFusionError::Internal")
}

View File

@@ -20,8 +20,8 @@ use std::time::Duration;
use common_error::ext::BoxedError;
use common_meta::cache_invalidator::KvCacheInvalidator;
use common_meta::error::Error::{CacheNotGet, GetKvCache};
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, Result};
use common_meta::error::Error::CacheNotGet;
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
use common_meta::rpc::store::{
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
@@ -282,8 +282,11 @@ impl KvBackend for CachedMetaKvBackend {
_ => Err(e),
},
}
.map_err(|e| GetKvCache {
err_msg: e.to_string(),
.map_err(|e| {
GetKvCacheSnafu {
err_msg: e.to_string(),
}
.build()
});
// "cache.invalidate_key" and "cache.try_get_with_by_ref" are not mutually exclusive. So we need

View File

@@ -19,11 +19,13 @@ use std::sync::{Arc, Weak};
use async_stream::try_stream;
use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
PG_CATALOG_NAME,
};
use common_config::Mode;
use common_error::ext::BoxedError;
use common_meta::cache::{LayeredCacheRegistryRef, ViewInfoCacheRef};
use common_meta::key::catalog_name::CatalogNameKey;
use common_meta::key::flow::FlowMetadataManager;
use common_meta::key::schema_name::SchemaNameKey;
use common_meta::key::table_info::TableInfoValue;
use common_meta::key::table_name::TableNameKey;
@@ -46,6 +48,8 @@ use crate::error::{
};
use crate::information_schema::InformationSchemaProvider;
use crate::kvbackend::TableCacheRef;
use crate::system_schema::pg_catalog::PGCatalogProvider;
use crate::system_schema::SystemSchemaProvider;
use crate::CatalogManager;
/// Access all existing catalog, schema and tables.
@@ -82,14 +86,21 @@ impl KvBackendCatalogManager {
.get()
.expect("Failed to get table_route_cache"),
)),
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
table_metadata_manager: Arc::new(TableMetadataManager::new(backend.clone())),
system_catalog: SystemCatalog {
catalog_manager: me.clone(),
catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
pg_catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
information_schema_provider: Arc::new(InformationSchemaProvider::new(
DEFAULT_CATALOG_NAME.to_string(),
me.clone(),
Arc::new(FlowMetadataManager::new(backend.clone())),
)),
pg_catalog_provider: Arc::new(PGCatalogProvider::new(
DEFAULT_CATALOG_NAME.to_string(),
me.clone(),
)),
backend,
},
cache_registry,
})
@@ -295,30 +306,41 @@ fn build_table(table_info_value: TableInfoValue) -> Result<TableRef> {
/// Existing system tables:
/// - public.numbers
/// - information_schema.{tables}
/// - pg_catalog.{tables}
#[derive(Clone)]
struct SystemCatalog {
catalog_manager: Weak<KvBackendCatalogManager>,
catalog_cache: Cache<String, Arc<InformationSchemaProvider>>,
pg_catalog_cache: Cache<String, Arc<PGCatalogProvider>>,
// system_schema_provider for default catalog
information_schema_provider: Arc<InformationSchemaProvider>,
pg_catalog_provider: Arc<PGCatalogProvider>,
backend: KvBackendRef,
}
impl SystemCatalog {
// TODO(j0hn50n133): remove the duplicated hard-coded table names logic
fn schema_names(&self) -> Vec<String> {
vec![INFORMATION_SCHEMA_NAME.to_string()]
vec![
INFORMATION_SCHEMA_NAME.to_string(),
PG_CATALOG_NAME.to_string(),
]
}
fn table_names(&self, schema: &str) -> Vec<String> {
if schema == INFORMATION_SCHEMA_NAME {
self.information_schema_provider.table_names()
} else if schema == DEFAULT_SCHEMA_NAME {
vec![NUMBERS_TABLE_NAME.to_string()]
} else {
vec![]
match schema {
INFORMATION_SCHEMA_NAME => self.information_schema_provider.table_names(),
PG_CATALOG_NAME => self.pg_catalog_provider.table_names(),
DEFAULT_SCHEMA_NAME => {
vec![NUMBERS_TABLE_NAME.to_string()]
}
_ => vec![],
}
}
fn schema_exists(&self, schema: &str) -> bool {
schema == INFORMATION_SCHEMA_NAME
schema == INFORMATION_SCHEMA_NAME || schema == PG_CATALOG_NAME
}
fn table_exists(&self, schema: &str, table: &str) -> bool {
@@ -326,6 +348,8 @@ impl SystemCatalog {
self.information_schema_provider.table(table).is_some()
} else if schema == DEFAULT_SCHEMA_NAME {
table == NUMBERS_TABLE_NAME
} else if schema == PG_CATALOG_NAME {
self.pg_catalog_provider.table(table).is_some()
} else {
false
}
@@ -338,9 +362,23 @@ impl SystemCatalog {
Arc::new(InformationSchemaProvider::new(
catalog.to_string(),
self.catalog_manager.clone(),
Arc::new(FlowMetadataManager::new(self.backend.clone())),
))
});
information_schema_provider.table(table_name)
} else if schema == PG_CATALOG_NAME {
if catalog == DEFAULT_CATALOG_NAME {
self.pg_catalog_provider.table(table_name)
} else {
let pg_catalog_provider =
self.pg_catalog_cache.get_with_by_ref(catalog, move || {
Arc::new(PGCatalogProvider::new(
catalog.to_string(),
self.catalog_manager.clone(),
))
});
pg_catalog_provider.table(table_name)
}
} else if schema == DEFAULT_SCHEMA_NAME && table_name == NUMBERS_TABLE_NAME {
Some(NumbersTable::table(NUMBERS_TABLE_ID))
} else {

View File

@@ -28,12 +28,16 @@ use table::TableRef;
use crate::error::Result;
pub mod error;
pub mod information_schema;
pub mod kvbackend;
pub mod memory;
mod metrics;
pub mod table_source;
pub mod system_schema;
pub mod information_schema {
// TODO(j0hn50n133): re-export to make it compatible with the legacy code, migrate to the new path later
pub use crate::system_schema::information_schema::*;
}
pub mod table_source;
#[async_trait::async_trait]
pub trait CatalogManager: Send + Sync {
fn as_any(&self) -> &dyn Any;

View File

@@ -20,14 +20,18 @@ use std::sync::{Arc, RwLock, Weak};
use async_stream::{stream, try_stream};
use common_catalog::build_db_string;
use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_PRIVATE_SCHEMA_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME,
DEFAULT_CATALOG_NAME, DEFAULT_PRIVATE_SCHEMA_NAME, DEFAULT_SCHEMA_NAME,
INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME,
};
use common_meta::key::flow::FlowMetadataManager;
use common_meta::kv_backend::memory::MemoryKvBackend;
use futures_util::stream::BoxStream;
use snafu::OptionExt;
use table::TableRef;
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
use crate::information_schema::InformationSchemaProvider;
use crate::system_schema::SystemSchemaProvider;
use crate::{CatalogManager, DeregisterTableRequest, RegisterSchemaRequest, RegisterTableRequest};
type SchemaEntries = HashMap<String, HashMap<String, TableRef>>;
@@ -173,6 +177,12 @@ impl MemoryCatalogManager {
schema: DEFAULT_PRIVATE_SCHEMA_NAME.to_string(),
})
.unwrap();
manager
.register_schema_sync(RegisterSchemaRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: PG_CATALOG_NAME.to_string(),
})
.unwrap();
manager
.register_schema_sync(RegisterSchemaRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
@@ -196,7 +206,7 @@ impl MemoryCatalogManager {
}
fn catalog_exist_sync(&self, catalog: &str) -> Result<bool> {
Ok(self.catalogs.read().unwrap().get(catalog).is_some())
Ok(self.catalogs.read().unwrap().contains_key(catalog))
}
/// Registers a catalog if it does not exist and returns false if the schema exists.
@@ -290,6 +300,7 @@ impl MemoryCatalogManager {
let information_schema_provider = InformationSchemaProvider::new(
catalog,
Arc::downgrade(self) as Weak<dyn CatalogManager>,
Arc::new(FlowMetadataManager::new(Arc::new(MemoryKvBackend::new()))),
);
let information_schema = information_schema_provider.tables().clone();

View File

@@ -0,0 +1,166 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod information_schema;
mod memory_table;
pub mod pg_catalog;
mod predicate;
mod utils;
use std::collections::HashMap;
use std::sync::Arc;
use common_error::ext::BoxedError;
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
use datatypes::schema::SchemaRef;
use futures_util::StreamExt;
use snafu::ResultExt;
use store_api::data_source::DataSource;
use store_api::storage::ScanRequest;
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
use table::metadata::{
FilterPushDownType, TableId, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType,
};
use table::{Table, TableRef};
use crate::error::Result;
pub trait SystemSchemaProvider {
/// Returns a map of [TableRef] in information schema.
fn tables(&self) -> &HashMap<String, TableRef>;
/// Returns the [TableRef] by table name.
fn table(&self, name: &str) -> Option<TableRef> {
self.tables().get(name).cloned()
}
/// Returns table names in the order of table id.
fn table_names(&self) -> Vec<String> {
let mut tables = self.tables().values().clone().collect::<Vec<_>>();
tables.sort_by(|t1, t2| {
t1.table_info()
.table_id()
.partial_cmp(&t2.table_info().table_id())
.unwrap()
});
tables
.into_iter()
.map(|t| t.table_info().name.clone())
.collect()
}
}
trait SystemSchemaProviderInner {
fn catalog_name(&self) -> &str;
fn schema_name() -> &'static str;
fn build_table(&self, name: &str) -> Option<TableRef> {
self.system_table(name).map(|table| {
let table_info = Self::table_info(self.catalog_name().to_string(), &table);
let filter_pushdown = FilterPushDownType::Inexact;
let data_source = Arc::new(SystemTableDataSource::new(table));
let table = Table::new(table_info, filter_pushdown, data_source);
Arc::new(table)
})
}
fn system_table(&self, name: &str) -> Option<SystemTableRef>;
fn table_info(catalog_name: String, table: &SystemTableRef) -> TableInfoRef {
let table_meta = TableMetaBuilder::default()
.schema(table.schema())
.primary_key_indices(vec![])
.next_column_id(0)
.build()
.unwrap();
let table_info = TableInfoBuilder::default()
.table_id(table.table_id())
.name(table.table_name().to_string())
.catalog_name(catalog_name)
.schema_name(Self::schema_name().to_string())
.meta(table_meta)
.table_type(table.table_type())
.build()
.unwrap();
Arc::new(table_info)
}
}
pub(crate) trait SystemTable {
fn table_id(&self) -> TableId;
fn table_name(&self) -> &'static str;
fn schema(&self) -> SchemaRef;
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream>;
fn table_type(&self) -> TableType {
TableType::Temporary
}
}
pub(crate) type SystemTableRef = Arc<dyn SystemTable + Send + Sync>;
struct SystemTableDataSource {
table: SystemTableRef,
}
impl SystemTableDataSource {
fn new(table: SystemTableRef) -> Self {
Self { table }
}
fn try_project(&self, projection: &[usize]) -> std::result::Result<SchemaRef, BoxedError> {
let schema = self
.table
.schema()
.try_project(projection)
.context(SchemaConversionSnafu)
.map_err(BoxedError::new)?;
Ok(Arc::new(schema))
}
}
impl DataSource for SystemTableDataSource {
fn get_stream(
&self,
request: ScanRequest,
) -> std::result::Result<SendableRecordBatchStream, BoxedError> {
let projection = request.projection.clone();
let projected_schema = match &projection {
Some(projection) => self.try_project(projection)?,
None => self.table.schema(),
};
let stream = self
.table
.to_stream(request)
.map_err(BoxedError::new)
.context(TablesRecordBatchSnafu)
.map_err(BoxedError::new)?
.map(move |batch| match &projection {
Some(p) => batch.and_then(|b| b.try_project(p)),
None => batch,
});
let stream = RecordBatchStreamWrapper {
schema: projected_schema,
stream: Box::pin(stream),
output_ordering: None,
metrics: Default::default(),
};
Ok(Box::pin(stream))
}
}

View File

@@ -14,50 +14,49 @@
mod cluster_info;
pub mod columns;
pub mod flows;
mod information_memory_table;
pub mod key_column_usage;
mod memory_table;
mod partitions;
mod predicate;
mod region_peers;
mod runtime_metrics;
pub mod schemata;
mod table_constraints;
mod table_names;
pub mod tables;
pub(crate) mod utils;
mod views;
use std::collections::HashMap;
use std::sync::{Arc, Weak};
use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME};
use common_error::ext::BoxedError;
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
use common_meta::key::flow::FlowMetadataManager;
use common_recordbatch::SendableRecordBatchStream;
use datatypes::schema::SchemaRef;
use futures_util::StreamExt;
use lazy_static::lazy_static;
use paste::paste;
pub(crate) use predicate::Predicates;
use snafu::ResultExt;
use store_api::data_source::DataSource;
use store_api::storage::{ScanRequest, TableId};
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
use table::metadata::{
FilterPushDownType, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType,
};
use table::{Table, TableRef};
use table::metadata::TableType;
use table::TableRef;
pub use table_names::*;
use views::InformationSchemaViews;
use self::columns::InformationSchemaColumns;
use super::{SystemSchemaProviderInner, SystemTable, SystemTableRef};
use crate::error::Result;
use crate::information_schema::cluster_info::InformationSchemaClusterInfo;
use crate::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
use crate::information_schema::memory_table::{get_schema_columns, MemoryTable};
use crate::information_schema::partitions::InformationSchemaPartitions;
use crate::information_schema::region_peers::InformationSchemaRegionPeers;
use crate::information_schema::runtime_metrics::InformationSchemaMetrics;
use crate::information_schema::schemata::InformationSchemaSchemata;
use crate::information_schema::table_constraints::InformationSchemaTableConstraints;
use crate::information_schema::tables::InformationSchemaTables;
use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo;
use crate::system_schema::information_schema::flows::InformationSchemaFlows;
use crate::system_schema::information_schema::information_memory_table::get_schema_columns;
use crate::system_schema::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
use crate::system_schema::information_schema::partitions::InformationSchemaPartitions;
use crate::system_schema::information_schema::region_peers::InformationSchemaRegionPeers;
use crate::system_schema::information_schema::runtime_metrics::InformationSchemaMetrics;
use crate::system_schema::information_schema::schemata::InformationSchemaSchemata;
use crate::system_schema::information_schema::table_constraints::InformationSchemaTableConstraints;
use crate::system_schema::information_schema::tables::InformationSchemaTables;
use crate::system_schema::memory_table::MemoryTable;
pub(crate) use crate::system_schema::predicate::Predicates;
use crate::system_schema::SystemSchemaProvider;
use crate::CatalogManager;
lazy_static! {
@@ -106,107 +105,26 @@ macro_rules! setup_memory_table {
pub struct InformationSchemaProvider {
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
flow_metadata_manager: Arc<FlowMetadataManager>,
tables: HashMap<String, TableRef>,
}
impl InformationSchemaProvider {
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
let mut provider = Self {
catalog_name,
catalog_manager,
tables: HashMap::new(),
};
provider.build_tables();
provider
}
/// Returns table names in the order of table id.
pub fn table_names(&self) -> Vec<String> {
let mut tables = self.tables.values().clone().collect::<Vec<_>>();
tables.sort_by(|t1, t2| {
t1.table_info()
.table_id()
.partial_cmp(&t2.table_info().table_id())
.unwrap()
});
tables
.into_iter()
.map(|t| t.table_info().name.clone())
.collect()
}
/// Returns a map of [TableRef] in information schema.
pub fn tables(&self) -> &HashMap<String, TableRef> {
impl SystemSchemaProvider for InformationSchemaProvider {
fn tables(&self) -> &HashMap<String, TableRef> {
assert!(!self.tables.is_empty());
&self.tables
}
/// Returns the [TableRef] by table name.
pub fn table(&self, name: &str) -> Option<TableRef> {
self.tables.get(name).cloned()
}
impl SystemSchemaProviderInner for InformationSchemaProvider {
fn catalog_name(&self) -> &str {
&self.catalog_name
}
fn schema_name() -> &'static str {
INFORMATION_SCHEMA_NAME
}
fn build_tables(&mut self) {
let mut tables = HashMap::new();
// SECURITY NOTE:
// Carefully consider the tables that may expose sensitive cluster configurations,
// authentication details, and other critical information.
// Only put these tables under `greptime` catalog to prevent info leak.
if self.catalog_name == DEFAULT_CATALOG_NAME {
tables.insert(
RUNTIME_METRICS.to_string(),
self.build_table(RUNTIME_METRICS).unwrap(),
);
tables.insert(
BUILD_INFO.to_string(),
self.build_table(BUILD_INFO).unwrap(),
);
tables.insert(
REGION_PEERS.to_string(),
self.build_table(REGION_PEERS).unwrap(),
);
tables.insert(
CLUSTER_INFO.to_string(),
self.build_table(CLUSTER_INFO).unwrap(),
);
}
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
tables.insert(SCHEMATA.to_string(), self.build_table(SCHEMATA).unwrap());
tables.insert(COLUMNS.to_string(), self.build_table(COLUMNS).unwrap());
tables.insert(
KEY_COLUMN_USAGE.to_string(),
self.build_table(KEY_COLUMN_USAGE).unwrap(),
);
tables.insert(
TABLE_CONSTRAINTS.to_string(),
self.build_table(TABLE_CONSTRAINTS).unwrap(),
);
// Add memory tables
for name in MEMORY_TABLES.iter() {
tables.insert((*name).to_string(), self.build_table(name).expect(name));
}
self.tables = tables;
}
fn build_table(&self, name: &str) -> Option<TableRef> {
self.information_table(name).map(|table| {
let table_info = Self::table_info(self.catalog_name.clone(), &table);
let filter_pushdown = FilterPushDownType::Inexact;
let data_source = Arc::new(InformationTableDataSource::new(table));
let table = Table::new(table_info, filter_pushdown, data_source);
Arc::new(table)
})
}
fn information_table(&self, name: &str) -> Option<InformationTableRef> {
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
match name.to_ascii_lowercase().as_str() {
TABLES => Some(Arc::new(InformationSchemaTables::new(
self.catalog_name.clone(),
@@ -262,27 +180,83 @@ impl InformationSchemaProvider {
CLUSTER_INFO => Some(Arc::new(InformationSchemaClusterInfo::new(
self.catalog_manager.clone(),
)) as _),
VIEWS => Some(Arc::new(InformationSchemaViews::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
)) as _),
FLOWS => Some(Arc::new(InformationSchemaFlows::new(
self.catalog_name.clone(),
self.flow_metadata_manager.clone(),
)) as _),
_ => None,
}
}
}
fn table_info(catalog_name: String, table: &InformationTableRef) -> TableInfoRef {
let table_meta = TableMetaBuilder::default()
.schema(table.schema())
.primary_key_indices(vec![])
.next_column_id(0)
.build()
.unwrap();
let table_info = TableInfoBuilder::default()
.table_id(table.table_id())
.name(table.table_name().to_string())
.catalog_name(catalog_name)
.schema_name(INFORMATION_SCHEMA_NAME.to_string())
.meta(table_meta)
.table_type(table.table_type())
.build()
.unwrap();
Arc::new(table_info)
impl InformationSchemaProvider {
pub fn new(
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
flow_metadata_manager: Arc<FlowMetadataManager>,
) -> Self {
let mut provider = Self {
catalog_name,
catalog_manager,
flow_metadata_manager,
tables: HashMap::new(),
};
provider.build_tables();
provider
}
fn build_tables(&mut self) {
let mut tables = HashMap::new();
// SECURITY NOTE:
// Carefully consider the tables that may expose sensitive cluster configurations,
// authentication details, and other critical information.
// Only put these tables under `greptime` catalog to prevent info leak.
if self.catalog_name == DEFAULT_CATALOG_NAME {
tables.insert(
RUNTIME_METRICS.to_string(),
self.build_table(RUNTIME_METRICS).unwrap(),
);
tables.insert(
BUILD_INFO.to_string(),
self.build_table(BUILD_INFO).unwrap(),
);
tables.insert(
REGION_PEERS.to_string(),
self.build_table(REGION_PEERS).unwrap(),
);
tables.insert(
CLUSTER_INFO.to_string(),
self.build_table(CLUSTER_INFO).unwrap(),
);
}
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
tables.insert(VIEWS.to_string(), self.build_table(VIEWS).unwrap());
tables.insert(SCHEMATA.to_string(), self.build_table(SCHEMATA).unwrap());
tables.insert(COLUMNS.to_string(), self.build_table(COLUMNS).unwrap());
tables.insert(
KEY_COLUMN_USAGE.to_string(),
self.build_table(KEY_COLUMN_USAGE).unwrap(),
);
tables.insert(
TABLE_CONSTRAINTS.to_string(),
self.build_table(TABLE_CONSTRAINTS).unwrap(),
);
tables.insert(FLOWS.to_string(), self.build_table(FLOWS).unwrap());
// Add memory tables
for name in MEMORY_TABLES.iter() {
tables.insert((*name).to_string(), self.build_table(name).expect(name));
}
self.tables = tables;
}
}
@@ -300,57 +274,28 @@ trait InformationTable {
}
}
type InformationTableRef = Arc<dyn InformationTable + Send + Sync>;
struct InformationTableDataSource {
table: InformationTableRef,
}
impl InformationTableDataSource {
fn new(table: InformationTableRef) -> Self {
Self { table }
// Provide compatibility for legacy `information_schema` code.
impl<T> SystemTable for T
where
T: InformationTable,
{
fn table_id(&self) -> TableId {
InformationTable::table_id(self)
}
fn try_project(&self, projection: &[usize]) -> std::result::Result<SchemaRef, BoxedError> {
let schema = self
.table
.schema()
.try_project(projection)
.context(SchemaConversionSnafu)
.map_err(BoxedError::new)?;
Ok(Arc::new(schema))
}
}
impl DataSource for InformationTableDataSource {
fn get_stream(
&self,
request: ScanRequest,
) -> std::result::Result<SendableRecordBatchStream, BoxedError> {
let projection = request.projection.clone();
let projected_schema = match &projection {
Some(projection) => self.try_project(projection)?,
None => self.table.schema(),
};
let stream = self
.table
.to_stream(request)
.map_err(BoxedError::new)
.context(TablesRecordBatchSnafu)
.map_err(BoxedError::new)?
.map(move |batch| match &projection {
Some(p) => batch.and_then(|b| b.try_project(p)),
None => batch,
});
let stream = RecordBatchStreamWrapper {
schema: projected_schema,
stream: Box::pin(stream),
output_ordering: None,
metrics: Default::default(),
};
Ok(Box::pin(stream))
fn table_name(&self) -> &'static str {
InformationTable::table_name(self)
}
fn schema(&self) -> SchemaRef {
InformationTable::schema(self)
}
fn table_type(&self) -> TableType {
InformationTable::table_type(self)
}
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
InformationTable::to_stream(self, request)
}
}

View File

@@ -41,7 +41,8 @@ use store_api::storage::{ScanRequest, TableId};
use super::CLUSTER_INFO;
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, ListNodesSnafu, Result};
use crate::information_schema::{utils, InformationTable, Predicates};
use crate::system_schema::information_schema::{InformationTable, Predicates};
use crate::system_schema::utils;
use crate::CatalogManager;
const PEER_ID: &str = "peer_id";

View File

@@ -0,0 +1,305 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use common_catalog::consts::INFORMATION_SCHEMA_FLOW_TABLE_ID;
use common_error::ext::BoxedError;
use common_meta::key::flow::flow_info::FlowInfoValue;
use common_meta::key::flow::FlowMetadataManager;
use common_meta::key::FlowId;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datatypes::prelude::ConcreteDataType as CDT;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::value::Value;
use datatypes::vectors::{Int64VectorBuilder, StringVectorBuilder, UInt32VectorBuilder, VectorRef};
use futures::TryStreamExt;
use snafu::{OptionExt, ResultExt};
use store_api::storage::{ScanRequest, TableId};
use crate::error::{
CreateRecordBatchSnafu, FlowInfoNotFoundSnafu, InternalSnafu, JsonSnafu, ListFlowsSnafu, Result,
};
use crate::information_schema::{Predicates, FLOWS};
use crate::system_schema::information_schema::InformationTable;
const INIT_CAPACITY: usize = 42;
// rows of information_schema.flows
// pk is (flow_name, flow_id, table_catalog)
pub const FLOW_NAME: &str = "flow_name";
pub const FLOW_ID: &str = "flow_id";
pub const TABLE_CATALOG: &str = "table_catalog";
pub const FLOW_DEFINITION: &str = "flow_definition";
pub const COMMENT: &str = "comment";
pub const EXPIRE_AFTER: &str = "expire_after";
pub const SOURCE_TABLE_IDS: &str = "source_table_ids";
pub const SINK_TABLE_NAME: &str = "sink_table_name";
pub const FLOWNODE_IDS: &str = "flownode_ids";
pub const OPTIONS: &str = "options";
/// The `information_schema.flows` to provides information about flows in databases.
pub(super) struct InformationSchemaFlows {
schema: SchemaRef,
catalog_name: String,
flow_metadata_manager: Arc<FlowMetadataManager>,
}
impl InformationSchemaFlows {
pub(super) fn new(
catalog_name: String,
flow_metadata_manager: Arc<FlowMetadataManager>,
) -> Self {
Self {
schema: Self::schema(),
catalog_name,
flow_metadata_manager,
}
}
/// for complex fields(including [`SOURCE_TABLE_IDS`], [`FLOWNODE_IDS`] and [`OPTIONS`]), it will be serialized to json string for now
/// TODO(discord9): use a better way to store complex fields like json type
pub(crate) fn schema() -> SchemaRef {
Arc::new(Schema::new(
vec![
(FLOW_NAME, CDT::string_datatype(), false),
(FLOW_ID, CDT::uint32_datatype(), false),
(TABLE_CATALOG, CDT::string_datatype(), false),
(FLOW_DEFINITION, CDT::string_datatype(), false),
(COMMENT, CDT::string_datatype(), true),
(EXPIRE_AFTER, CDT::int64_datatype(), true),
(SOURCE_TABLE_IDS, CDT::string_datatype(), true),
(SINK_TABLE_NAME, CDT::string_datatype(), false),
(FLOWNODE_IDS, CDT::string_datatype(), true),
(OPTIONS, CDT::string_datatype(), true),
]
.into_iter()
.map(|(name, ty, nullable)| ColumnSchema::new(name, ty, nullable))
.collect(),
))
}
fn builder(&self) -> InformationSchemaFlowsBuilder {
InformationSchemaFlowsBuilder::new(
self.schema.clone(),
self.catalog_name.clone(),
&self.flow_metadata_manager,
)
}
}
impl InformationTable for InformationSchemaFlows {
fn table_id(&self) -> TableId {
INFORMATION_SCHEMA_FLOW_TABLE_ID
}
fn table_name(&self) -> &'static str {
FLOWS
}
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_flows(Some(request))
.await
.map(|x| x.into_df_record_batch())
.map_err(|err| datafusion::error::DataFusionError::External(Box::new(err)))
}),
));
Ok(Box::pin(
RecordBatchStreamAdapter::try_new(stream)
.map_err(BoxedError::new)
.context(InternalSnafu)?,
))
}
}
/// Builds the `information_schema.FLOWS` table row by row
///
/// columns are based on [`FlowInfoValue`]
struct InformationSchemaFlowsBuilder {
schema: SchemaRef,
catalog_name: String,
flow_metadata_manager: Arc<FlowMetadataManager>,
flow_names: StringVectorBuilder,
flow_ids: UInt32VectorBuilder,
table_catalogs: StringVectorBuilder,
raw_sqls: StringVectorBuilder,
comments: StringVectorBuilder,
expire_afters: Int64VectorBuilder,
source_table_id_groups: StringVectorBuilder,
sink_table_names: StringVectorBuilder,
flownode_id_groups: StringVectorBuilder,
option_groups: StringVectorBuilder,
}
impl InformationSchemaFlowsBuilder {
fn new(
schema: SchemaRef,
catalog_name: String,
flow_metadata_manager: &Arc<FlowMetadataManager>,
) -> Self {
Self {
schema,
catalog_name,
flow_metadata_manager: flow_metadata_manager.clone(),
flow_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
flow_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
table_catalogs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
raw_sqls: StringVectorBuilder::with_capacity(INIT_CAPACITY),
comments: StringVectorBuilder::with_capacity(INIT_CAPACITY),
expire_afters: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
source_table_id_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
sink_table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
flownode_id_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
option_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
}
}
/// Construct the `information_schema.flows` virtual table
async fn make_flows(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
let catalog_name = self.catalog_name.clone();
let predicates = Predicates::from_scan_request(&request);
let flow_info_manager = self.flow_metadata_manager.clone();
// TODO(discord9): use `AsyncIterator` once it's stable-ish
let mut stream = flow_info_manager
.flow_name_manager()
.flow_names(&catalog_name)
.await;
while let Some((flow_name, flow_id)) = stream
.try_next()
.await
.map_err(BoxedError::new)
.context(ListFlowsSnafu {
catalog: &catalog_name,
})?
{
let flow_info = flow_info_manager
.flow_info_manager()
.get(flow_id.flow_id())
.await
.map_err(BoxedError::new)
.context(InternalSnafu)?
.context(FlowInfoNotFoundSnafu {
catalog_name: catalog_name.to_string(),
flow_name: flow_name.to_string(),
})?;
self.add_flow(&predicates, flow_id.flow_id(), flow_info)?;
}
self.finish()
}
fn add_flow(
&mut self,
predicates: &Predicates,
flow_id: FlowId,
flow_info: FlowInfoValue,
) -> Result<()> {
let row = [
(FLOW_NAME, &Value::from(flow_info.flow_name().to_string())),
(FLOW_ID, &Value::from(flow_id)),
(
TABLE_CATALOG,
&Value::from(flow_info.catalog_name().to_string()),
),
];
if !predicates.eval(&row) {
return Ok(());
}
self.flow_names.push(Some(flow_info.flow_name()));
self.flow_ids.push(Some(flow_id));
self.table_catalogs.push(Some(flow_info.catalog_name()));
self.raw_sqls.push(Some(flow_info.raw_sql()));
self.comments.push(Some(flow_info.comment()));
self.expire_afters.push(flow_info.expire_after());
self.source_table_id_groups.push(Some(
&serde_json::to_string(flow_info.source_table_ids()).context(JsonSnafu {
input: format!("{:?}", flow_info.source_table_ids()),
})?,
));
self.sink_table_names
.push(Some(&flow_info.sink_table_name().to_string()));
self.flownode_id_groups.push(Some(
&serde_json::to_string(flow_info.flownode_ids()).context({
JsonSnafu {
input: format!("{:?}", flow_info.flownode_ids()),
}
})?,
));
self.option_groups
.push(Some(&serde_json::to_string(flow_info.options()).context(
JsonSnafu {
input: format!("{:?}", flow_info.options()),
},
)?));
Ok(())
}
fn finish(&mut self) -> Result<RecordBatch> {
let columns: Vec<VectorRef> = vec![
Arc::new(self.flow_names.finish()),
Arc::new(self.flow_ids.finish()),
Arc::new(self.table_catalogs.finish()),
Arc::new(self.raw_sqls.finish()),
Arc::new(self.comments.finish()),
Arc::new(self.expire_afters.finish()),
Arc::new(self.source_table_id_groups.finish()),
Arc::new(self.sink_table_names.finish()),
Arc::new(self.flownode_id_groups.finish()),
Arc::new(self.option_groups.finish()),
];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
}
}
impl DfPartitionStream for InformationSchemaFlows {
fn schema(&self) -> &arrow_schema::SchemaRef {
self.schema.arrow_schema()
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema: Arc<arrow_schema::Schema> = self.schema.arrow_schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_flows(None)
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
))
}
}

View File

@@ -15,17 +15,19 @@
use std::sync::Arc;
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
use datatypes::prelude::{ConcreteDataType, VectorRef};
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::{Int64Vector, StringVector};
use datatypes::schema::{Schema, SchemaRef};
use datatypes::vectors::{Int64Vector, StringVector, VectorRef};
use crate::information_schema::table_names::*;
use super::table_names::*;
use crate::system_schema::utils::tables::{
bigint_column, datetime_column, string_column, string_columns,
};
const NO_VALUE: &str = "NO";
/// Find the schema and columns by the table_name, only valid for memory tables.
/// Safety: the user MUST ensure the table schema exists, panic otherwise.
pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
let (column_schemas, columns): (_, Vec<VectorRef>) = match table_name {
COLUMN_PRIVILEGES => (
string_columns(&[
@@ -80,7 +82,7 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
"GIT_BRANCH",
"GIT_COMMIT",
"GIT_COMMIT_SHORT",
"GIT_DIRTY",
"GIT_CLEAN",
"PKG_VERSION",
]),
vec![
@@ -89,7 +91,7 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
Arc::new(StringVector::from(vec![build_info
.commit_short
.to_string()])),
Arc::new(StringVector::from(vec![build_info.dirty.to_string()])),
Arc::new(StringVector::from(vec![build_info.clean.to_string()])),
Arc::new(StringVector::from(vec![build_info.version.to_string()])),
],
)
@@ -414,50 +416,3 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
(Arc::new(Schema::new(column_schemas)), columns)
}
fn string_columns(names: &[&'static str]) -> Vec<ColumnSchema> {
names.iter().map(|name| string_column(name)).collect()
}
fn string_column(name: &str) -> ColumnSchema {
ColumnSchema::new(
str::to_lowercase(name),
ConcreteDataType::string_datatype(),
false,
)
}
fn bigint_column(name: &str) -> ColumnSchema {
ColumnSchema::new(
str::to_lowercase(name),
ConcreteDataType::int64_datatype(),
false,
)
}
fn datetime_column(name: &str) -> ColumnSchema {
ColumnSchema::new(
str::to_lowercase(name),
ConcreteDataType::datetime_datatype(),
false,
)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_string_columns() {
let columns = ["a", "b", "c"];
let column_schemas = string_columns(&columns);
assert_eq!(3, column_schemas.len());
for (i, name) in columns.iter().enumerate() {
let cs = column_schemas.get(i).unwrap();
assert_eq!(*name, cs.name);
assert_eq!(ConcreteDataType::string_datatype(), cs.data_type);
}
}
}

View File

@@ -35,7 +35,7 @@ use super::KEY_COLUMN_USAGE;
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
use crate::information_schema::{InformationTable, Predicates};
use crate::system_schema::information_schema::{InformationTable, Predicates};
use crate::CatalogManager;
pub const CONSTRAINT_SCHEMA: &str = "constraint_schema";

View File

@@ -44,8 +44,8 @@ use crate::error::{
CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, Result,
UpgradeWeakCatalogManagerRefSnafu,
};
use crate::information_schema::{InformationTable, Predicates};
use crate::kvbackend::KvBackendCatalogManager;
use crate::system_schema::information_schema::{InformationTable, Predicates};
use crate::CatalogManager;
const TABLE_CATALOG: &str = "table_catalog";

View File

@@ -39,8 +39,8 @@ use crate::error::{
CreateRecordBatchSnafu, FindRegionRoutesSnafu, InternalSnafu, Result,
UpgradeWeakCatalogManagerRefSnafu,
};
use crate::information_schema::{InformationTable, Predicates};
use crate::kvbackend::KvBackendCatalogManager;
use crate::system_schema::information_schema::{InformationTable, Predicates};
use crate::CatalogManager;
const REGION_ID: &str = "region_id";

View File

@@ -36,7 +36,8 @@ use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, TableMetadataManagerSnafu,
UpgradeWeakCatalogManagerRefSnafu,
};
use crate::information_schema::{utils, InformationTable, Predicates};
use crate::system_schema::information_schema::{InformationTable, Predicates};
use crate::system_schema::utils;
use crate::CatalogManager;
pub const CATALOG_NAME: &str = "catalog_name";

View File

@@ -43,3 +43,5 @@ pub const PARTITIONS: &str = "partitions";
pub const REGION_PEERS: &str = "region_peers";
pub const TABLE_CONSTRAINTS: &str = "table_constraints";
pub const CLUSTER_INFO: &str = "cluster_info";
pub const VIEWS: &str = "views";
pub const FLOWS: &str = "flows";

View File

@@ -38,7 +38,7 @@ use super::TABLES;
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
use crate::information_schema::{InformationTable, Predicates};
use crate::system_schema::information_schema::{InformationTable, Predicates};
use crate::CatalogManager;
pub const TABLE_CATALOG: &str = "table_catalog";
@@ -143,9 +143,7 @@ impl InformationTable for InformationSchemaTables {
.make_tables(Some(request))
.await
.map(|x| x.into_df_record_batch())
.map_err(|err| {
datafusion::error::DataFusionError::External(format!("{err:?}").into())
})
.map_err(|err| datafusion::error::DataFusionError::External(Box::new(err)))
}),
));
Ok(Box::pin(
@@ -300,7 +298,7 @@ impl InformationSchemaTablesBuilder {
self.data_free.push(Some(0));
self.auto_increment.push(Some(0));
self.row_format.push(Some("Fixed"));
self.table_collation.push(None);
self.table_collation.push(Some("utf8_bin"));
self.update_time.push(None);
self.check_time.push(None);

View File

@@ -0,0 +1,288 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_VIEW_TABLE_ID;
use common_error::ext::BoxedError;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::value::Value;
use datatypes::vectors::StringVectorBuilder;
use futures::TryStreamExt;
use snafu::{OptionExt, ResultExt};
use store_api::storage::{ScanRequest, TableId};
use table::metadata::TableType;
use super::VIEWS;
use crate::error::{
CastManagerSnafu, CreateRecordBatchSnafu, GetViewCacheSnafu, InternalSnafu, Result,
UpgradeWeakCatalogManagerRefSnafu, ViewInfoNotFoundSnafu,
};
use crate::kvbackend::KvBackendCatalogManager;
use crate::system_schema::information_schema::{InformationTable, Predicates};
use crate::CatalogManager;
const INIT_CAPACITY: usize = 42;
pub const TABLE_CATALOG: &str = "table_catalog";
pub const TABLE_SCHEMA: &str = "table_schema";
pub const TABLE_NAME: &str = "table_name";
pub const VIEW_DEFINITION: &str = "view_definition";
pub const CHECK_OPTION: &str = "check_option";
pub const IS_UPDATABLE: &str = "is_updatable";
pub const DEFINER: &str = "definer";
pub const SECURITY_TYPE: &str = "security_type";
pub const CHARACTER_SET_CLIENT: &str = "character_set_client";
pub const COLLATION_CONNECTION: &str = "collation_connection";
/// The `information_schema.views` to provides information about views in databases.
pub(super) struct InformationSchemaViews {
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
}
impl InformationSchemaViews {
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
Self {
schema: Self::schema(),
catalog_name,
catalog_manager,
}
}
pub(crate) fn schema() -> SchemaRef {
Arc::new(Schema::new(vec![
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(VIEW_DEFINITION, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(CHECK_OPTION, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(IS_UPDATABLE, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(DEFINER, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(SECURITY_TYPE, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(
CHARACTER_SET_CLIENT,
ConcreteDataType::string_datatype(),
true,
),
ColumnSchema::new(
COLLATION_CONNECTION,
ConcreteDataType::string_datatype(),
true,
),
]))
}
fn builder(&self) -> InformationSchemaViewsBuilder {
InformationSchemaViewsBuilder::new(
self.schema.clone(),
self.catalog_name.clone(),
self.catalog_manager.clone(),
)
}
}
impl InformationTable for InformationSchemaViews {
fn table_id(&self) -> TableId {
INFORMATION_SCHEMA_VIEW_TABLE_ID
}
fn table_name(&self) -> &'static str {
VIEWS
}
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_views(Some(request))
.await
.map(|x| x.into_df_record_batch())
.map_err(|err| datafusion::error::DataFusionError::External(Box::new(err)))
}),
));
Ok(Box::pin(
RecordBatchStreamAdapter::try_new(stream)
.map_err(BoxedError::new)
.context(InternalSnafu)?,
))
}
}
/// Builds the `information_schema.VIEWS` table row by row
///
/// Columns are based on <https://dev.mysql.com/doc/refman/8.4/en/information-schema-views-table.html>
struct InformationSchemaViewsBuilder {
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
catalog_names: StringVectorBuilder,
schema_names: StringVectorBuilder,
table_names: StringVectorBuilder,
view_definitions: StringVectorBuilder,
check_options: StringVectorBuilder,
is_updatable: StringVectorBuilder,
definer: StringVectorBuilder,
security_type: StringVectorBuilder,
character_set_client: StringVectorBuilder,
collation_connection: StringVectorBuilder,
}
impl InformationSchemaViewsBuilder {
fn new(
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
) -> Self {
Self {
schema,
catalog_name,
catalog_manager,
catalog_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
schema_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
view_definitions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
check_options: StringVectorBuilder::with_capacity(INIT_CAPACITY),
is_updatable: StringVectorBuilder::with_capacity(INIT_CAPACITY),
definer: StringVectorBuilder::with_capacity(INIT_CAPACITY),
security_type: StringVectorBuilder::with_capacity(INIT_CAPACITY),
character_set_client: StringVectorBuilder::with_capacity(INIT_CAPACITY),
collation_connection: StringVectorBuilder::with_capacity(INIT_CAPACITY),
}
}
/// Construct the `information_schema.views` virtual table
async fn make_views(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
let catalog_name = self.catalog_name.clone();
let catalog_manager = self
.catalog_manager
.upgrade()
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let predicates = Predicates::from_scan_request(&request);
let view_info_cache = catalog_manager
.as_any()
.downcast_ref::<KvBackendCatalogManager>()
.context(CastManagerSnafu)?
.view_info_cache()?;
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
while let Some(table) = stream.try_next().await? {
let table_info = table.table_info();
if table_info.table_type == TableType::View {
let view_info = view_info_cache
.get(table_info.ident.table_id)
.await
.context(GetViewCacheSnafu)?
.context(ViewInfoNotFoundSnafu {
name: &table_info.name,
})?;
self.add_view(
&predicates,
&catalog_name,
&schema_name,
&table_info.name,
&view_info.definition,
)
}
}
}
self.finish()
}
fn add_view(
&mut self,
predicates: &Predicates,
catalog_name: &str,
schema_name: &str,
table_name: &str,
definition: &str,
) {
let row = [
(TABLE_CATALOG, &Value::from(catalog_name)),
(TABLE_SCHEMA, &Value::from(schema_name)),
(TABLE_NAME, &Value::from(table_name)),
];
if !predicates.eval(&row) {
return;
}
self.catalog_names.push(Some(catalog_name));
self.schema_names.push(Some(schema_name));
self.table_names.push(Some(table_name));
self.view_definitions.push(Some(definition));
self.check_options.push(None);
// View is not updatable, statements such UPDATE , DELETE , and INSERT are illegal and are rejected.
self.is_updatable.push(Some("NO"));
self.definer.push(None);
self.security_type.push(None);
self.character_set_client.push(Some("utf8"));
self.collation_connection.push(Some("utf8_bin"));
}
fn finish(&mut self) -> Result<RecordBatch> {
let columns: Vec<VectorRef> = vec![
Arc::new(self.catalog_names.finish()),
Arc::new(self.schema_names.finish()),
Arc::new(self.table_names.finish()),
Arc::new(self.view_definitions.finish()),
Arc::new(self.check_options.finish()),
Arc::new(self.is_updatable.finish()),
Arc::new(self.definer.finish()),
Arc::new(self.security_type.finish()),
Arc::new(self.character_set_client.finish()),
Arc::new(self.collation_connection.finish()),
];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
}
}
impl DfPartitionStream for InformationSchemaViews {
fn schema(&self) -> &ArrowSchemaRef {
self.schema.arrow_schema()
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_views(None)
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
))
}
}

View File

@@ -12,7 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
mod tables;
mod table_columns;
use std::sync::Arc;
use arrow_schema::SchemaRef as ArrowSchemaRef;
@@ -27,22 +28,21 @@ use datatypes::schema::SchemaRef;
use datatypes::vectors::VectorRef;
use snafu::ResultExt;
use store_api::storage::{ScanRequest, TableId};
pub use tables::get_schema_columns;
use super::SystemTable;
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
use crate::information_schema::InformationTable;
/// A memory table with specified schema and columns.
pub(super) struct MemoryTable {
table_id: TableId,
table_name: &'static str,
schema: SchemaRef,
columns: Vec<VectorRef>,
pub(crate) struct MemoryTable {
pub(crate) table_id: TableId,
pub(crate) table_name: &'static str,
pub(crate) schema: SchemaRef,
pub(crate) columns: Vec<VectorRef>,
}
impl MemoryTable {
/// Creates a memory table with table id, name, schema and columns.
pub(super) fn new(
pub fn new(
table_id: TableId,
table_name: &'static str,
schema: SchemaRef,
@@ -56,12 +56,54 @@ impl MemoryTable {
}
}
fn builder(&self) -> MemoryTableBuilder {
pub fn builder(&self) -> MemoryTableBuilder {
MemoryTableBuilder::new(self.schema.clone(), self.columns.clone())
}
}
impl InformationTable for MemoryTable {
pub(crate) struct MemoryTableBuilder {
schema: SchemaRef,
columns: Vec<VectorRef>,
}
impl MemoryTableBuilder {
fn new(schema: SchemaRef, columns: Vec<VectorRef>) -> Self {
Self { schema, columns }
}
/// Construct the `information_schema.{table_name}` virtual table
pub async fn memory_records(&mut self) -> Result<RecordBatch> {
if self.columns.is_empty() {
RecordBatch::new_empty(self.schema.clone()).context(CreateRecordBatchSnafu)
} else {
RecordBatch::new(self.schema.clone(), std::mem::take(&mut self.columns))
.context(CreateRecordBatchSnafu)
}
}
}
impl DfPartitionStream for MemoryTable {
fn schema(&self) -> &ArrowSchemaRef {
self.schema.arrow_schema()
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.memory_records()
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
))
}
}
impl SystemTable for MemoryTable {
fn table_id(&self) -> TableId {
self.table_id
}
@@ -95,48 +137,6 @@ impl InformationTable for MemoryTable {
}
}
struct MemoryTableBuilder {
schema: SchemaRef,
columns: Vec<VectorRef>,
}
impl MemoryTableBuilder {
fn new(schema: SchemaRef, columns: Vec<VectorRef>) -> Self {
Self { schema, columns }
}
/// Construct the `information_schema.{table_name}` virtual table
async fn memory_records(&mut self) -> Result<RecordBatch> {
if self.columns.is_empty() {
RecordBatch::new_empty(self.schema.clone()).context(CreateRecordBatchSnafu)
} else {
RecordBatch::new(self.schema.clone(), std::mem::take(&mut self.columns))
.context(CreateRecordBatchSnafu)
}
}
}
impl DfPartitionStream for MemoryTable {
fn schema(&self) -> &ArrowSchemaRef {
self.schema.arrow_schema()
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.memory_records()
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
))
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
@@ -147,6 +147,7 @@ mod tests {
use datatypes::vectors::StringVector;
use super::*;
use crate::system_schema::SystemTable;
#[tokio::test]
async fn test_memory_table() {
@@ -166,8 +167,8 @@ mod tests {
);
assert_eq!(42, table.table_id());
assert_eq!("test", table.table_name());
assert_eq!(schema, InformationTable::schema(&table));
assert_eq!("test", table.table_name);
assert_eq!(schema, SystemTable::schema(&table));
let stream = table.to_stream(ScanRequest::default()).unwrap();
@@ -196,7 +197,7 @@ mod tests {
assert_eq!(42, table.table_id());
assert_eq!("test", table.table_name());
assert_eq!(schema, InformationTable::schema(&table));
assert_eq!(schema, SystemTable::schema(&table));
let stream = table.to_stream(ScanRequest::default()).unwrap();

View File

@@ -0,0 +1,50 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[macro_export]
macro_rules! memory_table_cols{
([$($colname:ident),*], $t:expr) => {
let t = &$t;
$(
let mut $colname = Vec::with_capacity(t.len());
)*
paste::paste!{
for &($([<r_ $colname>]),*) in t {
$(
$colname.push([<r_ $colname>]);
)*
}
}
};
}
#[cfg(test)]
mod tests {
#[test]
fn test_memory_table_columns() {
memory_table_cols!(
[oid, typname, typlen],
[
(1, "String", -1),
(2, "Binary", -1),
(3, "Time", 8),
(4, "Datetime", 8)
]
);
assert_eq!(&oid[..], &[1, 2, 3, 4]);
assert_eq!(&typname[..], &["String", "Binary", "Time", "Datetime"]);
assert_eq!(&typlen[..], &[-1, -1, 8, 8]);
}
}

View File

@@ -0,0 +1,144 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod pg_catalog_memory_table;
mod pg_class;
mod pg_namespace;
mod table_names;
use std::collections::HashMap;
use std::sync::{Arc, Weak};
use common_catalog::consts::{self, PG_CATALOG_NAME};
use datatypes::schema::ColumnSchema;
use lazy_static::lazy_static;
use paste::paste;
use pg_catalog_memory_table::get_schema_columns;
use pg_class::PGClass;
use pg_namespace::PGNamespace;
use table::TableRef;
pub use table_names::*;
use self::pg_namespace::oid_map::{PGNamespaceOidMap, PGNamespaceOidMapRef};
use super::memory_table::MemoryTable;
use super::utils::tables::u32_column;
use super::{SystemSchemaProvider, SystemSchemaProviderInner, SystemTableRef};
use crate::CatalogManager;
lazy_static! {
static ref MEMORY_TABLES: &'static [&'static str] = &[table_names::PG_TYPE];
}
/// The column name for the OID column.
/// The OID column is a unique identifier of type u32 for each object in the database.
const OID_COLUMN_NAME: &str = "oid";
fn oid_column() -> ColumnSchema {
u32_column(OID_COLUMN_NAME)
}
/// [`PGCatalogProvider`] is the provider for a schema named `pg_catalog`, it is not a catalog.
pub struct PGCatalogProvider {
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
tables: HashMap<String, TableRef>,
// Workaround to store mapping of schema_name to a numeric id
namespace_oid_map: PGNamespaceOidMapRef,
}
impl SystemSchemaProvider for PGCatalogProvider {
fn tables(&self) -> &HashMap<String, TableRef> {
assert!(!self.tables.is_empty());
&self.tables
}
}
// TODO(j0hn50n133): Not sure whether to avoid duplication with `information_schema` or not.
macro_rules! setup_memory_table {
($name: expr) => {
paste! {
{
let (schema, columns) = get_schema_columns($name);
Some(Arc::new(MemoryTable::new(
consts::[<PG_CATALOG_ $name _TABLE_ID>],
$name,
schema,
columns
)) as _)
}
}
};
}
impl PGCatalogProvider {
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
let mut provider = Self {
catalog_name,
catalog_manager,
tables: HashMap::new(),
namespace_oid_map: Arc::new(PGNamespaceOidMap::new()),
};
provider.build_tables();
provider
}
fn build_tables(&mut self) {
// SECURITY NOTE:
// Must follow the same security rules as [`InformationSchemaProvider::build_tables`].
let mut tables = HashMap::new();
// TODO(J0HN50N133): modeling the table_name as a enum type to get rid of expect/unwrap here
// It's safe to unwrap here because we are sure that the constants have been handle correctly inside system_table.
for name in MEMORY_TABLES.iter() {
tables.insert(name.to_string(), self.build_table(name).expect(name));
}
tables.insert(
PG_NAMESPACE.to_string(),
self.build_table(PG_NAMESPACE).expect(PG_NAMESPACE),
);
tables.insert(
PG_CLASS.to_string(),
self.build_table(PG_CLASS).expect(PG_NAMESPACE),
);
self.tables = tables;
}
}
impl SystemSchemaProviderInner for PGCatalogProvider {
fn schema_name() -> &'static str {
PG_CATALOG_NAME
}
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
match name {
table_names::PG_TYPE => setup_memory_table!(PG_TYPE),
table_names::PG_NAMESPACE => Some(Arc::new(PGNamespace::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
self.namespace_oid_map.clone(),
))),
table_names::PG_CLASS => Some(Arc::new(PGClass::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
self.namespace_oid_map.clone(),
))),
_ => None,
}
}
fn catalog_name(&self) -> &str {
&self.catalog_name
}
}

View File

@@ -0,0 +1,69 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::{Int16Vector, StringVector, UInt32Vector, VectorRef};
use super::oid_column;
use super::table_names::PG_TYPE;
use crate::memory_table_cols;
use crate::system_schema::utils::tables::{i16_column, string_column};
fn pg_type_schema_columns() -> (Vec<ColumnSchema>, Vec<VectorRef>) {
// TODO(j0hn50n133): acquire this information from `DataType` instead of hardcoding it to avoid regression.
memory_table_cols!(
[oid, typname, typlen],
[
(1, "String", -1),
(2, "Binary", -1),
(3, "Int8", 1),
(4, "Int16", 2),
(5, "Int32", 4),
(6, "Int64", 8),
(7, "UInt8", 1),
(8, "UInt16", 2),
(9, "UInt32", 4),
(10, "UInt64", 8),
(11, "Float32", 4),
(12, "Float64", 8),
(13, "Decimal", 16),
(14, "Date", 4),
(15, "DateTime", 8),
(16, "Timestamp", 8),
(17, "Time", 8),
(18, "Duration", 8),
(19, "Interval", 16),
(20, "List", -1),
]
);
(
// not quiet identical with pg, we only follow the definition in pg
vec![oid_column(), string_column("typname"), i16_column("typlen")],
vec![
Arc::new(UInt32Vector::from_vec(oid)), // oid
Arc::new(StringVector::from(typname)),
Arc::new(Int16Vector::from_vec(typlen)), // typlen in bytes
],
)
}
pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
let (column_schemas, columns): (_, Vec<VectorRef>) = match table_name {
PG_TYPE => pg_type_schema_columns(),
_ => unreachable!("Unknown table in pg_catalog: {}", table_name),
};
(Arc::new(Schema::new(column_schemas)), columns)
}

View File

@@ -0,0 +1,263 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::PG_CATALOG_PG_CLASS_TABLE_ID;
use common_error::ext::BoxedError;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::schema::{Schema, SchemaRef};
use datatypes::value::Value;
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
use futures::TryStreamExt;
use snafu::{OptionExt, ResultExt};
use store_api::storage::ScanRequest;
use table::metadata::TableType;
use super::pg_namespace::oid_map::PGNamespaceOidMapRef;
use super::{OID_COLUMN_NAME, PG_CLASS};
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
use crate::information_schema::Predicates;
use crate::system_schema::utils::tables::{string_column, u32_column};
use crate::system_schema::SystemTable;
use crate::CatalogManager;
// === column name ===
pub const RELNAME: &str = "relname";
pub const RELNAMESPACE: &str = "relnamespace";
pub const RELKIND: &str = "relkind";
pub const RELOWNER: &str = "relowner";
// === enum value of relkind ===
pub const RELKIND_TABLE: &str = "r";
pub const RELKIND_VIEW: &str = "v";
/// The initial capacity of the vector builders.
const INIT_CAPACITY: usize = 42;
/// The dummy owner id for the namespace.
const DUMMY_OWNER_ID: u32 = 0;
/// The `pg_catalog.pg_class` table implementation.
pub(super) struct PGClass {
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
// Workaround to convert schema_name to a numeric id
namespace_oid_map: PGNamespaceOidMapRef,
}
impl PGClass {
pub(super) fn new(
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
namespace_oid_map: PGNamespaceOidMapRef,
) -> Self {
Self {
schema: Self::schema(),
catalog_name,
catalog_manager,
namespace_oid_map,
}
}
fn schema() -> SchemaRef {
Arc::new(Schema::new(vec![
u32_column(OID_COLUMN_NAME),
string_column(RELNAME),
u32_column(RELNAMESPACE),
string_column(RELKIND),
u32_column(RELOWNER),
]))
}
fn builder(&self) -> PGClassBuilder {
PGClassBuilder::new(
self.schema.clone(),
self.catalog_name.clone(),
self.catalog_manager.clone(),
self.namespace_oid_map.clone(),
)
}
}
impl SystemTable for PGClass {
fn table_id(&self) -> table::metadata::TableId {
PG_CATALOG_PG_CLASS_TABLE_ID
}
fn table_name(&self) -> &'static str {
PG_CLASS
}
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
fn to_stream(
&self,
request: ScanRequest,
) -> Result<common_recordbatch::SendableRecordBatchStream> {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_class(Some(request))
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
));
Ok(Box::pin(
RecordBatchStreamAdapter::try_new(stream)
.map_err(BoxedError::new)
.context(InternalSnafu)?,
))
}
}
impl DfPartitionStream for PGClass {
fn schema(&self) -> &ArrowSchemaRef {
self.schema.arrow_schema()
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_class(None)
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
))
}
}
/// Builds the `pg_catalog.pg_class` table row by row
/// TODO(J0HN50N133): `relowner` is always the [`DUMMY_OWNER_ID`] cuz we don't have user.
/// Once we have user system, make it the actual owner of the table.
struct PGClassBuilder {
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
namespace_oid_map: PGNamespaceOidMapRef,
oid: UInt32VectorBuilder,
relname: StringVectorBuilder,
relnamespace: UInt32VectorBuilder,
relkind: StringVectorBuilder,
relowner: UInt32VectorBuilder,
}
impl PGClassBuilder {
fn new(
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
namespace_oid_map: PGNamespaceOidMapRef,
) -> Self {
Self {
schema,
catalog_name,
catalog_manager,
namespace_oid_map,
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
relname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
relnamespace: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
relkind: StringVectorBuilder::with_capacity(INIT_CAPACITY),
relowner: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
}
}
async fn make_class(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
let catalog_name = self.catalog_name.clone();
let catalog_manager = self
.catalog_manager
.upgrade()
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
while let Some(table) = stream.try_next().await? {
let table_info = table.table_info();
self.add_class(
&predicates,
table_info.table_id(),
&schema_name,
&table_info.name,
if table_info.table_type == TableType::View {
RELKIND_VIEW
} else {
RELKIND_TABLE
},
);
}
}
self.finish()
}
fn add_class(
&mut self,
predicates: &Predicates,
oid: u32,
schema: &str,
table: &str,
kind: &str,
) {
let namespace_oid = self.namespace_oid_map.get_oid(schema);
let row = [
(OID_COLUMN_NAME, &Value::from(oid)),
(RELNAMESPACE, &Value::from(schema)),
(RELNAME, &Value::from(table)),
(RELKIND, &Value::from(kind)),
(RELOWNER, &Value::from(DUMMY_OWNER_ID)),
];
if !predicates.eval(&row) {
return;
}
self.oid.push(Some(oid));
self.relnamespace.push(Some(namespace_oid));
self.relname.push(Some(table));
self.relkind.push(Some(kind));
self.relowner.push(Some(DUMMY_OWNER_ID));
}
fn finish(&mut self) -> Result<RecordBatch> {
let columns: Vec<VectorRef> = vec![
Arc::new(self.oid.finish()),
Arc::new(self.relname.finish()),
Arc::new(self.relnamespace.finish()),
Arc::new(self.relkind.finish()),
Arc::new(self.relowner.finish()),
];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
}
}

View File

@@ -0,0 +1,206 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub(super) mod oid_map;
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::PG_CATALOG_PG_NAMESPACE_TABLE_ID;
use common_error::ext::BoxedError;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::schema::{Schema, SchemaRef};
use datatypes::value::Value;
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
use snafu::{OptionExt, ResultExt};
use store_api::storage::ScanRequest;
use super::{PGNamespaceOidMapRef, OID_COLUMN_NAME, PG_NAMESPACE};
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
use crate::information_schema::Predicates;
use crate::system_schema::utils::tables::{string_column, u32_column};
use crate::system_schema::SystemTable;
use crate::CatalogManager;
/// The `pg_catalog.pg_namespace` table implementation.
/// namespace is a schema in greptime
const NSPNAME: &str = "nspname";
const INIT_CAPACITY: usize = 42;
pub(super) struct PGNamespace {
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
// Workaround to convert schema_name to a numeric id
oid_map: PGNamespaceOidMapRef,
}
impl PGNamespace {
pub(super) fn new(
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
oid_map: PGNamespaceOidMapRef,
) -> Self {
Self {
schema: Self::schema(),
catalog_name,
catalog_manager,
oid_map,
}
}
fn schema() -> SchemaRef {
Arc::new(Schema::new(vec![
// TODO(J0HN50N133): we do not have a numeric schema id, use schema name as a workaround. Use a proper schema id once we have it.
u32_column(OID_COLUMN_NAME),
string_column(NSPNAME),
]))
}
fn builder(&self) -> PGNamespaceBuilder {
PGNamespaceBuilder::new(
self.schema.clone(),
self.catalog_name.clone(),
self.catalog_manager.clone(),
self.oid_map.clone(),
)
}
}
impl SystemTable for PGNamespace {
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
fn table_id(&self) -> table::metadata::TableId {
PG_CATALOG_PG_NAMESPACE_TABLE_ID
}
fn table_name(&self) -> &'static str {
PG_NAMESPACE
}
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_namespace(Some(request))
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
));
Ok(Box::pin(
RecordBatchStreamAdapter::try_new(stream)
.map_err(BoxedError::new)
.context(InternalSnafu)?,
))
}
}
impl DfPartitionStream for PGNamespace {
fn schema(&self) -> &ArrowSchemaRef {
self.schema.arrow_schema()
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_namespace(None)
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
))
}
}
/// Builds the `pg_catalog.pg_namespace` table row by row
/// `oid` use schema name as a workaround since we don't have numeric schema id.
/// `nspname` is the schema name.
struct PGNamespaceBuilder {
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
namespace_oid_map: PGNamespaceOidMapRef,
oid: UInt32VectorBuilder,
nspname: StringVectorBuilder,
}
impl PGNamespaceBuilder {
fn new(
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
namespace_oid_map: PGNamespaceOidMapRef,
) -> Self {
Self {
schema,
catalog_name,
catalog_manager,
namespace_oid_map,
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
nspname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
}
}
/// Construct the `pg_catalog.pg_namespace` virtual table
async fn make_namespace(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
let catalog_name = self.catalog_name.clone();
let catalog_manager = self
.catalog_manager
.upgrade()
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
self.add_namespace(&predicates, &schema_name);
}
self.finish()
}
fn finish(&mut self) -> Result<RecordBatch> {
let columns: Vec<VectorRef> =
vec![Arc::new(self.oid.finish()), Arc::new(self.nspname.finish())];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
}
fn add_namespace(&mut self, predicates: &Predicates, schema_name: &str) {
let oid = self.namespace_oid_map.get_oid(schema_name);
let row = [
(OID_COLUMN_NAME, &Value::from(oid)),
(NSPNAME, &Value::from(schema_name)),
];
if !predicates.eval(&row) {
return;
}
self.oid.push(Some(oid));
self.nspname.push(Some(schema_name));
}
}

View File

@@ -0,0 +1,100 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::hash::BuildHasher;
use std::sync::Arc;
use dashmap::DashMap;
use rustc_hash::FxSeededState;
pub type PGNamespaceOidMapRef = Arc<PGNamespaceOidMap>;
// Workaround to convert schema_name to a numeric id,
// remove this when we have numeric schema id in greptime
pub struct PGNamespaceOidMap {
oid_map: DashMap<String, u32>,
// Rust use SipHasher by default, which provides resistance against DOS attacks.
// This will produce different hash value between each greptime instance. This will
// cause the sqlness test fail. We need a deterministic hash here to provide
// same oid for the same schema name with best effort and DOS attacks aren't concern here.
hasher: FxSeededState,
}
impl PGNamespaceOidMap {
pub fn new() -> Self {
Self {
oid_map: DashMap::new(),
hasher: FxSeededState::with_seed(0), // PLEASE DO NOT MODIFY THIS SEED VALUE!!!
}
}
fn oid_is_used(&self, oid: u32) -> bool {
self.oid_map.iter().any(|e| *e.value() == oid)
}
pub fn get_oid(&self, schema_name: &str) -> u32 {
if let Some(oid) = self.oid_map.get(schema_name) {
*oid
} else {
let mut oid = self.hasher.hash_one(schema_name) as u32;
while self.oid_is_used(oid) {
oid = self.hasher.hash_one(oid) as u32;
}
self.oid_map.insert(schema_name.to_string(), oid);
oid
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn oid_is_stable() {
let oid_map_1 = PGNamespaceOidMap::new();
let oid_map_2 = PGNamespaceOidMap::new();
let schema = "schema";
let oid = oid_map_1.get_oid(schema);
// oid keep stable in the same instance
assert_eq!(oid, oid_map_1.get_oid(schema));
// oid keep stable between different instances
assert_eq!(oid, oid_map_2.get_oid(schema));
}
#[test]
fn oid_collision() {
let oid_map = PGNamespaceOidMap::new();
let key1 = "3178510";
let key2 = "4215648";
// have collision
assert_eq!(
oid_map.hasher.hash_one(key1) as u32,
oid_map.hasher.hash_one(key2) as u32
);
// insert them into oid_map
let oid1 = oid_map.get_oid(key1);
let oid2 = oid_map.get_oid(key2);
// they should have different id
assert_ne!(oid1, oid2);
}
}

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
fn main() {
common_version::setup_build_info();
}
pub const PG_DATABASE: &str = "pg_databases";
pub const PG_NAMESPACE: &str = "pg_namespace";
pub const PG_CLASS: &str = "pg_class";
pub const PG_TYPE: &str = "pg_type";

View File

@@ -25,7 +25,7 @@ type ColumnName = String;
/// we only support these simple predicates currently.
/// TODO(dennis): supports more predicate types.
#[derive(Clone, PartialEq, Eq, Debug)]
enum Predicate {
pub(crate) enum Predicate {
Eq(ColumnName, Value),
Like(ColumnName, String, bool),
NotEq(ColumnName, Value),

View File

@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod tables;
use std::sync::{Arc, Weak};
use common_config::Mode;

View File

@@ -0,0 +1,79 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::ColumnSchema;
pub fn string_columns(names: &[&'static str]) -> Vec<ColumnSchema> {
names.iter().map(|name| string_column(name)).collect()
}
pub fn string_column(name: &str) -> ColumnSchema {
ColumnSchema::new(
str::to_lowercase(name),
ConcreteDataType::string_datatype(),
false,
)
}
pub fn u32_column(name: &str) -> ColumnSchema {
ColumnSchema::new(
str::to_lowercase(name),
ConcreteDataType::uint32_datatype(),
false,
)
}
pub fn i16_column(name: &str) -> ColumnSchema {
ColumnSchema::new(
str::to_lowercase(name),
ConcreteDataType::int16_datatype(),
false,
)
}
pub fn bigint_column(name: &str) -> ColumnSchema {
ColumnSchema::new(
str::to_lowercase(name),
ConcreteDataType::int64_datatype(),
false,
)
}
pub fn datetime_column(name: &str) -> ColumnSchema {
ColumnSchema::new(
str::to_lowercase(name),
ConcreteDataType::datetime_datatype(),
false,
)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_string_columns() {
let columns = ["a", "b", "c"];
let column_schemas = string_columns(&columns);
assert_eq!(3, column_schemas.len());
for (i, name) in columns.iter().enumerate() {
let cs = column_schemas.get(i).unwrap();
assert_eq!(*name, cs.name);
assert_eq!(ConcreteDataType::string_datatype(), cs.data_type);
}
}
}

View File

@@ -17,21 +17,24 @@ use std::sync::Arc;
use bytes::Bytes;
use common_catalog::format_full_table_name;
use common_query::logical_plan::SubstraitPlanDecoderRef;
use common_query::logical_plan::{rename_logical_plan_columns, SubstraitPlanDecoderRef};
use datafusion::common::{ResolvedTableReference, TableReference};
use datafusion::datasource::view::ViewTable;
use datafusion::datasource::{provider_as_source, TableProvider};
use datafusion::logical_expr::TableSource;
use itertools::Itertools;
use session::context::QueryContext;
use snafu::{ensure, OptionExt, ResultExt};
use table::metadata::TableType;
use table::table::adapter::DfTableProviderAdapter;
mod dummy_catalog;
use dummy_catalog::DummyCatalogList;
use table::TableRef;
use crate::error::{
CastManagerSnafu, DatafusionSnafu, DecodePlanSnafu, GetViewCacheSnafu, QueryAccessDeniedSnafu,
Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
CastManagerSnafu, DatafusionSnafu, DecodePlanSnafu, GetViewCacheSnafu, ProjectViewColumnsSnafu,
QueryAccessDeniedSnafu, Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
ViewPlanColumnsChangedSnafu,
};
use crate::kvbackend::KvBackendCatalogManager;
use crate::CatalogManagerRef;
@@ -43,6 +46,7 @@ pub struct DfTableSourceProvider {
default_catalog: String,
default_schema: String,
plan_decoder: SubstraitPlanDecoderRef,
enable_ident_normalization: bool,
}
impl DfTableSourceProvider {
@@ -51,6 +55,7 @@ impl DfTableSourceProvider {
disallow_cross_catalog_query: bool,
query_ctx: &QueryContext,
plan_decoder: SubstraitPlanDecoderRef,
enable_ident_normalization: bool,
) -> Self {
Self {
catalog_manager,
@@ -59,6 +64,7 @@ impl DfTableSourceProvider {
default_catalog: query_ctx.current_catalog().to_owned(),
default_schema: query_ctx.current_schema(),
plan_decoder,
enable_ident_normalization,
}
}
@@ -108,32 +114,7 @@ impl DfTableSourceProvider {
})?;
let provider: Arc<dyn TableProvider> = if table.table_info().table_type == TableType::View {
let catalog_manager = self
.catalog_manager
.as_any()
.downcast_ref::<KvBackendCatalogManager>()
.context(CastManagerSnafu)?;
let view_info = catalog_manager
.view_info_cache()?
.get(table.table_info().ident.table_id)
.await
.context(GetViewCacheSnafu)?
.context(ViewInfoNotFoundSnafu {
name: &table.table_info().name,
})?;
// Build the catalog list provider for deserialization.
let catalog_list = Arc::new(DummyCatalogList::new(self.catalog_manager.clone()));
let logical_plan = self
.plan_decoder
.decode(Bytes::from(view_info.view_info.clone()), catalog_list, true)
.await
.context(DecodePlanSnafu {
name: &table.table_info().name,
})?;
Arc::new(ViewTable::try_new(logical_plan, None).context(DatafusionSnafu)?)
self.create_view_provider(&table).await?
} else {
Arc::new(DfTableProviderAdapter::new(table))
};
@@ -143,6 +124,80 @@ impl DfTableSourceProvider {
let _ = self.resolved_tables.insert(resolved_name, source.clone());
Ok(source)
}
async fn create_view_provider(&self, table: &TableRef) -> Result<Arc<dyn TableProvider>> {
let catalog_manager = self
.catalog_manager
.as_any()
.downcast_ref::<KvBackendCatalogManager>()
.context(CastManagerSnafu)?;
let view_info = catalog_manager
.view_info_cache()?
.get(table.table_info().ident.table_id)
.await
.context(GetViewCacheSnafu)?
.context(ViewInfoNotFoundSnafu {
name: &table.table_info().name,
})?;
// Build the catalog list provider for deserialization.
let catalog_list = Arc::new(DummyCatalogList::new(self.catalog_manager.clone()));
let logical_plan = self
.plan_decoder
.decode(Bytes::from(view_info.view_info.clone()), catalog_list, true)
.await
.context(DecodePlanSnafu {
name: &table.table_info().name,
})?;
let columns: Vec<_> = view_info.columns.iter().map(|c| c.as_str()).collect();
let original_plan_columns: Vec<_> =
view_info.plan_columns.iter().map(|c| c.as_str()).collect();
let plan_columns: Vec<_> = logical_plan
.schema()
.columns()
.into_iter()
.map(|c| c.name)
.collect();
// Only check columns number, because substrait doesn't include aliases currently.
// See https://github.com/apache/datafusion/issues/10815#issuecomment-2158666881
// and https://github.com/apache/datafusion/issues/6489
// TODO(dennis): check column names
ensure!(
original_plan_columns.len() == plan_columns.len(),
ViewPlanColumnsChangedSnafu {
origin_names: original_plan_columns.iter().join(","),
actual_names: plan_columns.iter().join(","),
}
);
// We have to do `columns` projection here, because
// substrait doesn't include aliases neither for tables nor for columns:
// https://github.com/apache/datafusion/issues/10815#issuecomment-2158666881
let logical_plan = if !columns.is_empty() {
rename_logical_plan_columns(
self.enable_ident_normalization,
logical_plan,
plan_columns
.iter()
.map(|c| c.as_str())
.zip(columns.into_iter())
.collect(),
)
.context(ProjectViewColumnsSnafu)?
} else {
logical_plan
};
Ok(Arc::new(
ViewTable::try_new(logical_plan, Some(view_info.definition.to_string()))
.context(DatafusionSnafu)?,
))
}
}
#[cfg(test)]
@@ -162,6 +217,7 @@ mod tests {
true,
query_ctx,
DummyDecoder::arc(),
true,
);
let table_ref = TableReference::bare("table_name");
@@ -277,12 +333,19 @@ mod tests {
let logical_plan = vec![1, 2, 3];
// Create view metadata
table_metadata_manager
.create_view_metadata(view_info.clone().into(), logical_plan, HashSet::new())
.create_view_metadata(
view_info.clone().into(),
logical_plan,
HashSet::new(),
vec!["a".to_string(), "b".to_string()],
vec!["id".to_string(), "name".to_string()],
"definition".to_string(),
)
.await
.unwrap();
let mut table_provider =
DfTableSourceProvider::new(catalog_manager, true, query_ctx, MockDecoder::arc());
DfTableSourceProvider::new(catalog_manager, true, query_ctx, MockDecoder::arc(), true);
// View not found
let table_ref = TableReference::bare("not_exists_view");
@@ -290,6 +353,12 @@ mod tests {
let table_ref = TableReference::bare(view_info.name);
let source = table_provider.resolve_table(table_ref).await.unwrap();
assert_eq!(*source.get_logical_plan().unwrap(), mock_plan());
assert_eq!(
r#"
Projection: person.id AS a, person.name AS b
Filter: person.id > Int32(500)
TableScan: person"#,
format!("\n{:?}", source.get_logical_plan().unwrap())
);
}
}

View File

@@ -49,4 +49,4 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"] }
[dev-dependencies.substrait_proto]
package = "substrait"
version = "0.17"
version = "0.37"

View File

@@ -33,9 +33,13 @@ use common_telemetry::tracing_context::W3cTrace;
use futures_util::StreamExt;
use prost::Message;
use snafu::{ensure, ResultExt};
use tonic::metadata::AsciiMetadataKey;
use tonic::transport::Channel;
use crate::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
use crate::error::{
ConvertFlightDataSnafu, Error, FlightGetSnafu, IllegalFlightMessagesSnafu, InvalidAsciiSnafu,
ServerSnafu,
};
use crate::{from_grpc_response, Client, Result};
#[derive(Clone, Debug, Default)]
@@ -88,7 +92,7 @@ impl Database {
///
/// - the name of database when using GreptimeDB standalone or cluster
/// - the name provided by GreptimeCloud or other multi-tenant GreptimeDB
/// environment
/// environment
pub fn new_with_dbname(dbname: impl Into<String>, client: Client) -> Self {
Self {
catalog: String::default(),
@@ -130,6 +134,36 @@ impl Database {
self.handle(Request::Inserts(requests)).await
}
pub async fn insert_with_hints(
&self,
requests: InsertRequests,
hints: &[(&str, &str)],
) -> Result<u32> {
let mut client = make_database_client(&self.client)?.inner;
let request = self.to_rpc_request(Request::Inserts(requests));
let mut request = tonic::Request::new(request);
let metadata = request.metadata_mut();
for (key, value) in hints {
let key = AsciiMetadataKey::from_bytes(format!("x-greptime-hint-{}", key).as_bytes())
.map_err(|_| {
InvalidAsciiSnafu {
value: key.to_string(),
}
.build()
})?;
let value = value.parse().map_err(|_| {
InvalidAsciiSnafu {
value: value.to_string(),
}
.build()
})?;
metadata.insert(key, value);
}
let response = client.handle(request).await?.into_inner();
from_grpc_response(response)
}
async fn handle(&self, request: Request) -> Result<u32> {
let mut client = make_database_client(&self.client)?.inner;
let request = self.to_rpc_request(request);
@@ -192,16 +226,18 @@ impl Database {
let mut client = self.client.make_flight_client()?;
let response = client.mut_inner().do_get(request).await.map_err(|e| {
let response = client.mut_inner().do_get(request).await.or_else(|e| {
let tonic_code = e.code();
let e: Error = e.into();
let code = e.status_code();
let msg = e.to_string();
let error = Error::FlightGet {
tonic_code,
addr: client.addr().to_string(),
source: BoxedError::new(ServerSnafu { code, msg }.build()),
};
let error =
Err(BoxedError::new(ServerSnafu { code, msg }.build())).with_context(|_| {
FlightGetSnafu {
addr: client.addr().to_string(),
tonic_code,
}
});
error!(
"Failed to do Flight get, addr: {}, code: {}, source: {:?}",
client.addr(),

View File

@@ -39,13 +39,6 @@ pub enum Error {
source: BoxedError,
},
#[snafu(display("Failure occurs during handling request"))]
HandleRequest {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to convert FlightData"))]
ConvertFlightData {
#[snafu(implicit)]
@@ -53,13 +46,6 @@ pub enum Error {
source: common_grpc::Error,
},
#[snafu(display("Column datatype error"))]
ColumnDataType {
#[snafu(implicit)]
location: Location,
source: api::error::Error,
},
#[snafu(display("Illegal GRPC client state: {}", err_msg))]
IllegalGrpcClientState {
err_msg: String,
@@ -123,9 +109,9 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to send request with streaming: {}", err_msg))]
ClientStreaming {
err_msg: String,
#[snafu(display("Failed to parse ascii string: {}", value))]
InvalidAscii {
value: String,
#[snafu(implicit)]
location: Location,
},
@@ -137,20 +123,19 @@ impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::IllegalFlightMessages { .. }
| Error::ColumnDataType { .. }
| Error::MissingField { .. }
| Error::IllegalDatabaseResponse { .. }
| Error::ClientStreaming { .. } => StatusCode::Internal,
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
Error::Server { code, .. } => *code,
Error::FlightGet { source, .. }
| Error::HandleRequest { source, .. }
| Error::RegionServer { source, .. }
| Error::FlowServer { source, .. } => source.status_code(),
Error::CreateChannel { source, .. }
| Error::ConvertFlightData { source, .. }
| Error::CreateTlsChannel { source, .. } => source.status_code(),
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
Error::InvalidAscii { .. } => StatusCode::InvalidArguments,
}
}

View File

@@ -16,9 +16,9 @@ use api::v1::flow::{FlowRequest, FlowResponse};
use api::v1::region::InsertRequests;
use common_error::ext::BoxedError;
use common_meta::node_manager::Flownode;
use snafu::{location, Location, ResultExt};
use snafu::ResultExt;
use crate::error::Result;
use crate::error::{FlowServerSnafu, Result};
use crate::Client;
#[derive(Debug)]
@@ -57,15 +57,10 @@ impl FlowRequester {
let response = client
.handle_create_remove(request)
.await
.map_err(|e| {
.or_else(|e| {
let code = e.code();
let err: crate::error::Error = e.into();
crate::error::Error::FlowServer {
addr,
code,
source: BoxedError::new(err),
location: location!(),
}
Err(BoxedError::new(err)).context(FlowServerSnafu { addr, code })
})?
.into_inner();
Ok(response)
@@ -88,15 +83,10 @@ impl FlowRequester {
let response = client
.handle_mirror_request(requests)
.await
.map_err(|e| {
.or_else(|e| {
let code = e.code();
let err: crate::error::Error = e.into();
crate::error::Error::FlowServer {
addr,
code,
source: BoxedError::new(err),
location: location!(),
}
Err(BoxedError::new(err)).context(FlowServerSnafu { addr, code })
})?
.into_inner();
Ok(response)

View File

@@ -33,13 +33,13 @@ use common_telemetry::error;
use common_telemetry::tracing_context::TracingContext;
use prost::Message;
use query::query_engine::DefaultSerializer;
use snafu::{location, Location, OptionExt, ResultExt};
use snafu::{location, OptionExt, ResultExt};
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use tokio_stream::StreamExt;
use crate::error::{
self, ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
MissingFieldSnafu, Result, ServerSnafu,
self, ConvertFlightDataSnafu, FlightGetSnafu, IllegalDatabaseResponseSnafu,
IllegalFlightMessagesSnafu, MissingFieldSnafu, Result, ServerSnafu,
};
use crate::{metrics, Client, Error};
@@ -103,11 +103,14 @@ impl RegionRequester {
let e: error::Error = e.into();
let code = e.status_code();
let msg = e.to_string();
let error = Error::FlightGet {
tonic_code,
addr: flight_client.addr().to_string(),
source: BoxedError::new(ServerSnafu { code, msg }.build()),
};
let error = ServerSnafu { code, msg }
.fail::<()>()
.map_err(BoxedError::new)
.with_context(|_| FlightGetSnafu {
tonic_code,
addr: flight_client.addr().to_string(),
})
.unwrap_err();
error!(
e; "Failed to do Flight get, addr: {}, code: {}",
flight_client.addr(),

View File

@@ -10,7 +10,9 @@ name = "greptime"
path = "src/bin/greptime.rs"
[features]
default = ["python"]
tokio-console = ["common-telemetry/tokio-console"]
python = ["frontend/python"]
[lints]
workspace = true
@@ -47,9 +49,9 @@ either = "1.8"
etcd-client.workspace = true
file-engine.workspace = true
flow.workspace = true
frontend.workspace = true
frontend = { workspace = true, default-features = false }
futures.workspace = true
human-panic = "1.2.2"
human-panic = "2.0"
lazy_static.workspace = true
meta-client.workspace = true
meta-srv.workspace = true
@@ -78,17 +80,15 @@ tonic.workspace = true
tracing-appender = "0.2"
[target.'cfg(not(windows))'.dependencies]
tikv-jemallocator = "0.5"
tikv-jemallocator = "0.6"
[dev-dependencies]
client = { workspace = true, features = ["testing"] }
common-test-util.workspace = true
common-version.workspace = true
serde.workspace = true
temp-env = "0.3"
tempfile.workspace = true
[target.'cfg(not(windows))'.dev-dependencies]
rexpect = "0.5"
[build-dependencies]
common-version.workspace = true

View File

@@ -21,7 +21,7 @@ use cmd::{cli, datanode, flownode, frontend, metasrv, standalone, App};
use common_version::version;
#[derive(Parser)]
#[command(name = "greptime", author, version, long_version = version!(), about)]
#[command(name = "greptime", author, version, long_version = version(), about)]
#[command(propagate_version = true)]
pub(crate) struct Command {
#[clap(subcommand)]
@@ -62,8 +62,37 @@ enum SubCommand {
#[global_allocator]
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
#[cfg(debug_assertions)]
fn main() -> Result<()> {
use snafu::ResultExt;
// Set the stack size to 8MB for the thread so it wouldn't overflow on large stack usage in debug mode
// see https://github.com/GreptimeTeam/greptimedb/pull/4317
// and https://github.com/rust-lang/rust/issues/34283
std::thread::Builder::new()
.name("main_spawn".to_string())
.stack_size(8 * 1024 * 1024)
.spawn(|| {
{
tokio::runtime::Builder::new_multi_thread()
.thread_stack_size(8 * 1024 * 1024)
.enable_all()
.build()
.expect("Failed building the Runtime")
.block_on(main_body())
}
})
.context(cmd::error::SpawnThreadSnafu)?
.join()
.expect("Couldn't join on the associated thread")
}
#[cfg(not(debug_assertions))]
#[tokio::main]
async fn main() -> Result<()> {
main_body().await
}
async fn main_body() -> Result<()> {
setup_human_panic();
start(Command::parse()).await
}
@@ -110,13 +139,10 @@ async fn start(cli: Command) -> Result<()> {
}
fn setup_human_panic() {
let metadata = human_panic::Metadata {
version: env!("CARGO_PKG_VERSION").into(),
name: "GreptimeDB".into(),
authors: Default::default(),
homepage: "https://github.com/GreptimeTeam/greptimedb/discussions".into(),
};
human_panic::setup_panic!(metadata);
human_panic::setup_panic!(
human_panic::Metadata::new("GreptimeDB", env!("CARGO_PKG_VERSION"))
.homepage("https://github.com/GreptimeTeam/greptimedb/discussions")
);
common_telemetry::set_panic_hook();
}

View File

@@ -21,6 +21,8 @@ mod export;
mod helper;
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
mod database;
mod import;
#[allow(unused)]
mod repl;
@@ -32,6 +34,7 @@ pub use repl::Repl;
use tracing_appender::non_blocking::WorkerGuard;
use self::export::ExportCommand;
use crate::cli::import::ImportCommand;
use crate::error::Result;
use crate::options::GlobalOptions;
use crate::App;
@@ -114,6 +117,7 @@ enum SubCommand {
// Attach(AttachCommand),
Bench(BenchTableMetadataCommand),
Export(ExportCommand),
Import(ImportCommand),
}
impl SubCommand {
@@ -122,6 +126,7 @@ impl SubCommand {
// SubCommand::Attach(cmd) => cmd.build().await,
SubCommand::Bench(cmd) => cmd.build(guard).await,
SubCommand::Export(cmd) => cmd.build(guard).await,
SubCommand::Import(cmd) => cmd.build(guard).await,
}
}
}

119
src/cmd/src/cli/database.rs Normal file
View File

@@ -0,0 +1,119 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use base64::engine::general_purpose;
use base64::Engine;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use serde_json::Value;
use servers::http::greptime_result_v1::GreptimedbV1Response;
use servers::http::GreptimeQueryOutput;
use snafu::ResultExt;
use crate::error::{HttpQuerySqlSnafu, Result, SerdeJsonSnafu};
pub(crate) struct DatabaseClient {
addr: String,
catalog: String,
auth_header: Option<String>,
}
impl DatabaseClient {
pub fn new(addr: String, catalog: String, auth_basic: Option<String>) -> Self {
let auth_header = if let Some(basic) = auth_basic {
let encoded = general_purpose::STANDARD.encode(basic);
Some(format!("basic {}", encoded))
} else {
None
};
Self {
addr,
catalog,
auth_header,
}
}
pub async fn sql_in_public(&self, sql: &str) -> Result<Option<Vec<Vec<Value>>>> {
self.sql(sql, DEFAULT_SCHEMA_NAME).await
}
/// Execute sql query.
pub async fn sql(&self, sql: &str, schema: &str) -> Result<Option<Vec<Vec<Value>>>> {
let url = format!("http://{}/v1/sql", self.addr);
let params = [
("db", format!("{}-{}", self.catalog, schema)),
("sql", sql.to_string()),
];
let mut request = reqwest::Client::new()
.post(&url)
.form(&params)
.header("Content-Type", "application/x-www-form-urlencoded");
if let Some(ref auth) = self.auth_header {
request = request.header("Authorization", auth);
}
let response = request.send().await.with_context(|_| HttpQuerySqlSnafu {
reason: format!("bad url: {}", url),
})?;
let response = response
.error_for_status()
.with_context(|_| HttpQuerySqlSnafu {
reason: format!("query failed: {}", sql),
})?;
let text = response.text().await.with_context(|_| HttpQuerySqlSnafu {
reason: "cannot get response text".to_string(),
})?;
let body = serde_json::from_str::<GreptimedbV1Response>(&text).context(SerdeJsonSnafu)?;
Ok(body.output().first().and_then(|output| match output {
GreptimeQueryOutput::Records(records) => Some(records.rows().clone()),
GreptimeQueryOutput::AffectedRows(_) => None,
}))
}
}
/// Split at `-`.
pub(crate) fn split_database(database: &str) -> Result<(String, Option<String>)> {
let (catalog, schema) = match database.split_once('-') {
Some((catalog, schema)) => (catalog, schema),
None => (DEFAULT_CATALOG_NAME, database),
};
if schema == "*" {
Ok((catalog.to_string(), None))
} else {
Ok((catalog.to_string(), Some(schema.to_string())))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_split_database() {
let result = split_database("catalog-schema").unwrap();
assert_eq!(result, ("catalog".to_string(), Some("schema".to_string())));
let result = split_database("schema").unwrap();
assert_eq!(result, ("greptime".to_string(), Some("schema".to_string())));
let result = split_database("catalog-*").unwrap();
assert_eq!(result, ("catalog".to_string(), None));
let result = split_database("*").unwrap();
assert_eq!(result, ("greptime".to_string(), None));
}
}

View File

@@ -13,18 +13,13 @@
// limitations under the License.
use std::collections::HashSet;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use async_trait::async_trait;
use base64::engine::general_purpose;
use base64::Engine;
use clap::{Parser, ValueEnum};
use client::DEFAULT_SCHEMA_NAME;
use common_telemetry::{debug, error, info, warn};
use common_telemetry::{debug, error, info};
use serde_json::Value;
use servers::http::greptime_result_v1::GreptimedbV1Response;
use servers::http::GreptimeQueryOutput;
use snafu::{OptionExt, ResultExt};
use tokio::fs::File;
use tokio::io::{AsyncWriteExt, BufWriter};
@@ -32,21 +27,21 @@ use tokio::sync::Semaphore;
use tokio::time::Instant;
use tracing_appender::non_blocking::WorkerGuard;
use crate::cli::{Instance, Tool};
use crate::error::{
EmptyResultSnafu, Error, FileIoSnafu, HttpQuerySqlSnafu, InvalidDatabaseNameSnafu, Result,
SerdeJsonSnafu,
};
use crate::cli::database::DatabaseClient;
use crate::cli::{database, Instance, Tool};
use crate::error::{EmptyResultSnafu, Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
type TableReference = (String, String, String);
#[derive(Debug, Default, Clone, ValueEnum)]
enum ExportTarget {
/// Corresponding to `SHOW CREATE TABLE`
/// Export all table schemas, corresponding to `SHOW CREATE TABLE`.
Schema,
/// Export all table data, corresponding to `COPY DATABASE TO`.
Data,
/// Export all table schemas and data at once.
#[default]
CreateTable,
/// Corresponding to `EXPORT TABLE`
TableData,
All,
}
#[derive(Debug, Default, Parser)]
@@ -72,34 +67,41 @@ pub struct ExportCommand {
max_retry: usize,
/// Things to export
#[clap(long, short = 't', value_enum)]
#[clap(long, short = 't', value_enum, default_value = "all")]
target: ExportTarget,
/// basic authentication for connecting to the server
/// A half-open time range: [start_time, end_time).
/// The start of the time range (time-index column) for data export.
#[clap(long)]
start_time: Option<String>,
/// A half-open time range: [start_time, end_time).
/// The end of the time range (time-index column) for data export.
#[clap(long)]
end_time: Option<String>,
/// The basic authentication for connecting to the server
#[clap(long)]
auth_basic: Option<String>,
}
impl ExportCommand {
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
let (catalog, schema) = split_database(&self.database)?;
let (catalog, schema) = database::split_database(&self.database)?;
let auth_header = if let Some(basic) = &self.auth_basic {
let encoded = general_purpose::STANDARD.encode(basic);
Some(format!("basic {}", encoded))
} else {
None
};
let database_client =
DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
Ok(Instance::new(
Box::new(Export {
addr: self.addr.clone(),
catalog,
schema,
database_client,
output_dir: self.output_dir.clone(),
parallelism: self.export_jobs,
target: self.target.clone(),
auth_header,
start_time: self.start_time.clone(),
end_time: self.end_time.clone(),
}),
guard,
))
@@ -107,76 +109,59 @@ impl ExportCommand {
}
pub struct Export {
addr: String,
catalog: String,
schema: Option<String>,
database_client: DatabaseClient,
output_dir: String,
parallelism: usize,
target: ExportTarget,
auth_header: Option<String>,
start_time: Option<String>,
end_time: Option<String>,
}
impl Export {
/// Execute one single sql query.
async fn sql(&self, sql: &str) -> Result<Option<Vec<Vec<Value>>>> {
let url = format!(
"http://{}/v1/sql?db={}-{}&sql={}",
self.addr,
self.catalog,
self.schema.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME),
sql
);
fn catalog_path(&self) -> PathBuf {
PathBuf::from(&self.output_dir).join(&self.catalog)
}
let mut request = reqwest::Client::new()
.get(&url)
.header("Content-Type", "application/x-www-form-urlencoded");
if let Some(ref auth) = self.auth_header {
request = request.header("Authorization", auth);
}
async fn get_db_names(&self) -> Result<Vec<String>> {
let db_names = self.all_db_names().await?;
let Some(schema) = &self.schema else {
return Ok(db_names);
};
let response = request.send().await.with_context(|_| HttpQuerySqlSnafu {
reason: format!("bad url: {}", url),
})?;
let response = response
.error_for_status()
.with_context(|_| HttpQuerySqlSnafu {
reason: format!("query failed: {}", sql),
})?;
let text = response.text().await.with_context(|_| HttpQuerySqlSnafu {
reason: "cannot get response text".to_string(),
})?;
let body = serde_json::from_str::<GreptimedbV1Response>(&text).context(SerdeJsonSnafu)?;
Ok(body.output().first().and_then(|output| match output {
GreptimeQueryOutput::Records(records) => Some(records.rows().clone()),
GreptimeQueryOutput::AffectedRows(_) => None,
}))
// Check if the schema exists
db_names
.into_iter()
.find(|db_name| db_name.to_lowercase() == schema.to_lowercase())
.map(|name| vec![name])
.context(SchemaNotFoundSnafu {
catalog: &self.catalog,
schema,
})
}
/// Iterate over all db names.
///
/// Newbie: `db_name` is catalog + schema.
async fn iter_db_names(&self) -> Result<Vec<(String, String)>> {
if let Some(schema) = &self.schema {
Ok(vec![(self.catalog.clone(), schema.clone())])
} else {
let result = self.sql("show databases").await?;
let Some(records) = result else {
EmptyResultSnafu.fail()?
async fn all_db_names(&self) -> Result<Vec<String>> {
let records = self
.database_client
.sql_in_public("SHOW DATABASES")
.await?
.context(EmptyResultSnafu)?;
let mut result = Vec::with_capacity(records.len());
for value in records {
let Value::String(schema) = &value[0] else {
unreachable!()
};
let mut result = Vec::with_capacity(records.len());
for value in records {
let serde_json::Value::String(schema) = &value[0] else {
unreachable!()
};
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
continue;
}
result.push((self.catalog.clone(), schema.clone()));
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
continue;
}
Ok(result)
if schema == common_catalog::consts::PG_CATALOG_NAME {
continue;
}
result.push(schema.clone());
}
Ok(result)
}
/// Return a list of [`TableReference`] to be exported.
@@ -185,22 +170,29 @@ impl Export {
&self,
catalog: &str,
schema: &str,
) -> Result<(Vec<TableReference>, Vec<TableReference>)> {
) -> Result<(
Vec<TableReference>,
Vec<TableReference>,
Vec<TableReference>,
)> {
// Puts all metric table first
let sql = format!(
"select table_catalog, table_schema, table_name from \
information_schema.columns where column_name = '__tsid' \
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'"
"SELECT table_catalog, table_schema, table_name \
FROM information_schema.columns \
WHERE column_name = '__tsid' \
and table_catalog = \'{catalog}\' \
and table_schema = \'{schema}\'"
);
let result = self.sql(&sql).await?;
let Some(records) = result else {
EmptyResultSnafu.fail()?
};
let records = self
.database_client
.sql_in_public(&sql)
.await?
.context(EmptyResultSnafu)?;
let mut metric_physical_tables = HashSet::with_capacity(records.len());
for value in records {
let mut t = Vec::with_capacity(3);
for v in &value {
let serde_json::Value::String(value) = v else {
let Value::String(value) = v else {
unreachable!()
};
t.push(value);
@@ -208,93 +200,144 @@ impl Export {
metric_physical_tables.insert((t[0].clone(), t[1].clone(), t[2].clone()));
}
// TODO: SQL injection hurts
let sql = format!(
"select table_catalog, table_schema, table_name from \
information_schema.tables where table_type = \'BASE TABLE\' \
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'",
"SELECT table_catalog, table_schema, table_name, table_type \
FROM information_schema.tables \
WHERE (table_type = \'BASE TABLE\' OR table_type = \'VIEW\') \
and table_catalog = \'{catalog}\' \
and table_schema = \'{schema}\'",
);
let result = self.sql(&sql).await?;
let Some(records) = result else {
EmptyResultSnafu.fail()?
};
let records = self
.database_client
.sql_in_public(&sql)
.await?
.context(EmptyResultSnafu)?;
debug!("Fetched table list: {:?}", records);
debug!("Fetched table/view list: {:?}", records);
if records.is_empty() {
return Ok((vec![], vec![]));
return Ok((vec![], vec![], vec![]));
}
let mut remaining_tables = Vec::with_capacity(records.len());
let mut views = Vec::new();
for value in records {
let mut t = Vec::with_capacity(3);
let mut t = Vec::with_capacity(4);
for v in &value {
let serde_json::Value::String(value) = v else {
let Value::String(value) = v else {
unreachable!()
};
t.push(value);
}
let table = (t[0].clone(), t[1].clone(), t[2].clone());
let table_type = t[3].as_str();
// Ignores the physical table
if !metric_physical_tables.contains(&table) {
remaining_tables.push(table);
if table_type == "VIEW" {
views.push(table);
} else {
remaining_tables.push(table);
}
}
}
Ok((
metric_physical_tables.into_iter().collect(),
remaining_tables,
views,
))
}
async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> {
let sql = format!(
r#"show create table "{}"."{}"."{}""#,
catalog, schema, table
);
let result = self.sql(&sql).await?;
let Some(records) = result else {
EmptyResultSnafu.fail()?
async fn show_create(
&self,
show_type: &str,
catalog: &str,
schema: &str,
table: Option<&str>,
) -> Result<String> {
let sql = match table {
Some(table) => format!(
r#"SHOW CREATE {} "{}"."{}"."{}""#,
show_type, catalog, schema, table
),
None => format!(r#"SHOW CREATE {} "{}"."{}""#, show_type, catalog, schema),
};
let serde_json::Value::String(create_table) = &records[0][1] else {
let records = self
.database_client
.sql_in_public(&sql)
.await?
.context(EmptyResultSnafu)?;
let Value::String(create) = &records[0][1] else {
unreachable!()
};
Ok(format!("{};\n", create_table))
Ok(format!("{};\n", create))
}
async fn export_create_database(&self) -> Result<()> {
let timer = Instant::now();
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
for schema in db_names {
let db_dir = self.catalog_path().join(format!("{schema}/"));
tokio::fs::create_dir_all(&db_dir)
.await
.context(FileIoSnafu)?;
let file = db_dir.join("create_database.sql");
let mut file = File::create(file).await.context(FileIoSnafu)?;
let create_database = self
.show_create("DATABASE", &self.catalog, &schema, None)
.await?;
file.write_all(create_database.as_bytes())
.await
.context(FileIoSnafu)?;
}
let elapsed = timer.elapsed();
info!("Success {db_count} jobs, cost: {elapsed:?}");
Ok(())
}
async fn export_create_table(&self) -> Result<()> {
let timer = Instant::now();
let semaphore = Arc::new(Semaphore::new(self.parallelism));
let db_names = self.iter_db_names().await?;
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
let mut tasks = Vec::with_capacity(db_names.len());
for (catalog, schema) in db_names {
for schema in db_names {
let semaphore_moved = semaphore.clone();
tasks.push(async move {
let _permit = semaphore_moved.acquire().await.unwrap();
let (metric_physical_tables, remaining_tables) =
self.get_table_list(&catalog, &schema).await?;
let table_count = metric_physical_tables.len() + remaining_tables.len();
tokio::fs::create_dir_all(&self.output_dir)
let (metric_physical_tables, remaining_tables, views) =
self.get_table_list(&self.catalog, &schema).await?;
let table_count =
metric_physical_tables.len() + remaining_tables.len() + views.len();
let db_dir = self.catalog_path().join(format!("{schema}/"));
tokio::fs::create_dir_all(&db_dir)
.await
.context(FileIoSnafu)?;
let output_file =
Path::new(&self.output_dir).join(format!("{catalog}-{schema}.sql"));
let mut file = File::create(output_file).await.context(FileIoSnafu)?;
let file = db_dir.join("create_tables.sql");
let mut file = File::create(file).await.context(FileIoSnafu)?;
for (c, s, t) in metric_physical_tables.into_iter().chain(remaining_tables) {
match self.show_create_table(&c, &s, &t).await {
Err(e) => {
error!(e; r#"Failed to export table "{}"."{}"."{}""#, c, s, t)
}
Ok(create_table) => {
file.write_all(create_table.as_bytes())
.await
.context(FileIoSnafu)?;
}
}
let create_table = self.show_create("TABLE", &c, &s, Some(&t)).await?;
file.write_all(create_table.as_bytes())
.await
.context(FileIoSnafu)?;
}
info!("finished exporting {catalog}.{schema} with {table_count} tables",);
for (c, s, v) in views {
let create_view = self.show_create("VIEW", &c, &s, Some(&v)).await?;
file.write_all(create_view.as_bytes())
.await
.context(FileIoSnafu)?;
}
info!(
"Finished exporting {}.{schema} with {table_count} table schemas to path: {}",
self.catalog,
db_dir.to_string_lossy()
);
Ok::<(), Error>(())
});
}
@@ -305,92 +348,87 @@ impl Export {
.filter(|r| match r {
Ok(_) => true,
Err(e) => {
error!(e; "export job failed");
error!(e; "export schema job failed");
false
}
})
.count();
let elapsed = timer.elapsed();
info!("Success {success}/{db_count} jobs, cost: {:?}", elapsed);
info!("Success {success}/{db_count} jobs, cost: {elapsed:?}");
Ok(())
}
async fn export_table_data(&self) -> Result<()> {
async fn export_database_data(&self) -> Result<()> {
let timer = Instant::now();
let semaphore = Arc::new(Semaphore::new(self.parallelism));
let db_names = self.iter_db_names().await?;
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
let mut tasks = Vec::with_capacity(db_names.len());
for (catalog, schema) in db_names {
let mut tasks = Vec::with_capacity(db_count);
for schema in db_names {
let semaphore_moved = semaphore.clone();
tasks.push(async move {
let _permit = semaphore_moved.acquire().await.unwrap();
tokio::fs::create_dir_all(&self.output_dir)
let db_dir = self.catalog_path().join(format!("{schema}/"));
tokio::fs::create_dir_all(&db_dir)
.await
.context(FileIoSnafu)?;
let output_dir = Path::new(&self.output_dir).join(format!("{catalog}-{schema}/"));
// Ignores metric physical tables
let (metrics_tables, table_list) = self.get_table_list(&catalog, &schema).await?;
for (_, _, table_name) in metrics_tables {
warn!("Ignores metric physical table: {table_name}");
}
for (catalog_name, schema_name, table_name) in table_list {
// copy table to
let sql = format!(
r#"Copy "{}"."{}"."{}" TO '{}{}.parquet' WITH (format='parquet');"#,
catalog_name,
schema_name,
table_name,
output_dir.to_str().unwrap(),
table_name,
);
info!("Executing sql: {sql}");
self.sql(&sql).await?;
}
info!("Finished exporting {catalog}.{schema} data");
// export copy from sql
let dir_filenames = match output_dir.read_dir() {
Ok(dir) => dir,
Err(_) => {
warn!("empty database {catalog}.{schema}");
return Ok(());
let with_options = match (&self.start_time, &self.end_time) {
(Some(start_time), Some(end_time)) => {
format!(
"WITH (FORMAT='parquet', start_time='{}', end_time='{}')",
start_time, end_time
)
}
(Some(start_time), None) => {
format!("WITH (FORMAT='parquet', start_time='{}')", start_time)
}
(None, Some(end_time)) => {
format!("WITH (FORMAT='parquet', end_time='{}')", end_time)
}
(None, None) => "WITH (FORMAT='parquet')".to_string(),
};
let copy_from_file =
Path::new(&self.output_dir).join(format!("{catalog}-{schema}_copy_from.sql"));
let sql = format!(
r#"COPY DATABASE "{}"."{}" TO '{}' {};"#,
self.catalog,
schema,
db_dir.to_str().unwrap(),
with_options
);
info!("Executing sql: {sql}");
self.database_client.sql_in_public(&sql).await?;
info!(
"Finished exporting {}.{schema} data into path: {}",
self.catalog,
db_dir.to_string_lossy()
);
// The export copy from sql
let copy_from_file = db_dir.join("copy_from.sql");
let mut writer =
BufWriter::new(File::create(copy_from_file).await.context(FileIoSnafu)?);
for table_file in dir_filenames {
let table_file = table_file.unwrap();
let table_name = table_file
.file_name()
.into_string()
.unwrap()
.replace(".parquet", "");
writer
.write(
format!(
"copy {} from '{}' with (format='parquet');\n",
table_name,
table_file.path().to_str().unwrap()
)
.as_bytes(),
)
.await
.context(FileIoSnafu)?;
}
let copy_database_from_sql = format!(
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH (FORMAT='parquet');"#,
self.catalog,
schema,
db_dir.to_str().unwrap()
);
writer
.write(copy_database_from_sql.as_bytes())
.await
.context(FileIoSnafu)?;
writer.flush().await.context(FileIoSnafu)?;
info!("finished exporting {catalog}.{schema} copy_from.sql");
info!("Finished exporting {}.{schema} copy_from.sql", self.catalog);
Ok::<(), Error>(())
});
})
}
let success = futures::future::join_all(tasks)
@@ -399,13 +437,14 @@ impl Export {
.filter(|r| match r {
Ok(_) => true,
Err(e) => {
error!(e; "export job failed");
error!(e; "export database job failed");
false
}
})
.count();
let elapsed = timer.elapsed();
info!("Success {success}/{db_count} jobs, costs: {:?}", elapsed);
info!("Success {success}/{db_count} jobs, costs: {elapsed:?}");
Ok(())
}
@@ -415,26 +454,20 @@ impl Export {
impl Tool for Export {
async fn do_work(&self) -> Result<()> {
match self.target {
ExportTarget::CreateTable => self.export_create_table().await,
ExportTarget::TableData => self.export_table_data().await,
ExportTarget::Schema => {
self.export_create_database().await?;
self.export_create_table().await
}
ExportTarget::Data => self.export_database_data().await,
ExportTarget::All => {
self.export_create_database().await?;
self.export_create_table().await?;
self.export_database_data().await
}
}
}
}
/// Split at `-`.
fn split_database(database: &str) -> Result<(String, Option<String>)> {
let (catalog, schema) = database
.split_once('-')
.with_context(|| InvalidDatabaseNameSnafu {
database: database.to_string(),
})?;
if schema == "*" {
Ok((catalog.to_string(), None))
} else {
Ok((catalog.to_string(), Some(schema.to_string())))
}
}
#[cfg(test)]
mod tests {
use clap::Parser;
@@ -487,7 +520,7 @@ mod tests {
"--output-dir",
&*output_dir.path().to_string_lossy(),
"--target",
"create-table",
"schema",
]);
let mut cli_app = cli.build(LoggingOptions::default()).await?;
cli_app.start().await?;
@@ -496,7 +529,9 @@ mod tests {
let output_file = output_dir
.path()
.join("greptime-cli.export.create_table.sql");
.join("greptime")
.join("cli.export.create_table")
.join("create_tables.sql");
let res = std::fs::read_to_string(output_file).unwrap();
let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
"ts" TIMESTAMP(3) NOT NULL,

218
src/cmd/src/cli/import.rs Normal file
View File

@@ -0,0 +1,218 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::PathBuf;
use std::sync::Arc;
use async_trait::async_trait;
use clap::{Parser, ValueEnum};
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use common_telemetry::{error, info, warn};
use snafu::{OptionExt, ResultExt};
use tokio::sync::Semaphore;
use tokio::time::Instant;
use tracing_appender::non_blocking::WorkerGuard;
use crate::cli::database::DatabaseClient;
use crate::cli::{database, Instance, Tool};
use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
#[derive(Debug, Default, Clone, ValueEnum)]
enum ImportTarget {
/// Import all table schemas into the database.
Schema,
/// Import all table data into the database.
Data,
/// Export all table schemas and data at once.
#[default]
All,
}
#[derive(Debug, Default, Parser)]
pub struct ImportCommand {
/// Server address to connect
#[clap(long)]
addr: String,
/// Directory of the data. E.g.: /tmp/greptimedb-backup
#[clap(long)]
input_dir: String,
/// The name of the catalog to import.
#[clap(long, default_value = "greptime-*")]
database: String,
/// Parallelism of the import.
#[clap(long, short = 'j', default_value = "1")]
import_jobs: usize,
/// Max retry times for each job.
#[clap(long, default_value = "3")]
max_retry: usize,
/// Things to export
#[clap(long, short = 't', value_enum, default_value = "all")]
target: ImportTarget,
/// The basic authentication for connecting to the server
#[clap(long)]
auth_basic: Option<String>,
}
impl ImportCommand {
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
let (catalog, schema) = database::split_database(&self.database)?;
let database_client =
DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
Ok(Instance::new(
Box::new(Import {
catalog,
schema,
database_client,
input_dir: self.input_dir.clone(),
parallelism: self.import_jobs,
target: self.target.clone(),
}),
guard,
))
}
}
pub struct Import {
catalog: String,
schema: Option<String>,
database_client: DatabaseClient,
input_dir: String,
parallelism: usize,
target: ImportTarget,
}
impl Import {
async fn import_create_table(&self) -> Result<()> {
// Use default db to creates other dbs
self.do_sql_job("create_database.sql", Some(DEFAULT_SCHEMA_NAME))
.await?;
self.do_sql_job("create_tables.sql", None).await
}
async fn import_database_data(&self) -> Result<()> {
self.do_sql_job("copy_from.sql", None).await
}
async fn do_sql_job(&self, filename: &str, exec_db: Option<&str>) -> Result<()> {
let timer = Instant::now();
let semaphore = Arc::new(Semaphore::new(self.parallelism));
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
let mut tasks = Vec::with_capacity(db_count);
for schema in db_names {
let semaphore_moved = semaphore.clone();
tasks.push(async move {
let _permit = semaphore_moved.acquire().await.unwrap();
let database_input_dir = self.catalog_path().join(&schema);
let sql_file = database_input_dir.join(filename);
let sql = tokio::fs::read_to_string(sql_file)
.await
.context(FileIoSnafu)?;
if sql.is_empty() {
info!("Empty `{filename}` {database_input_dir:?}");
} else {
let db = exec_db.unwrap_or(&schema);
self.database_client.sql(&sql, db).await?;
info!("Imported `{filename}` for database {schema}");
}
Ok::<(), Error>(())
})
}
let success = futures::future::join_all(tasks)
.await
.into_iter()
.filter(|r| match r {
Ok(_) => true,
Err(e) => {
error!(e; "import {filename} job failed");
false
}
})
.count();
let elapsed = timer.elapsed();
info!("Success {success}/{db_count} `{filename}` jobs, cost: {elapsed:?}");
Ok(())
}
fn catalog_path(&self) -> PathBuf {
PathBuf::from(&self.input_dir).join(&self.catalog)
}
async fn get_db_names(&self) -> Result<Vec<String>> {
let db_names = self.all_db_names().await?;
let Some(schema) = &self.schema else {
return Ok(db_names);
};
// Check if the schema exists
db_names
.into_iter()
.find(|db_name| db_name.to_lowercase() == schema.to_lowercase())
.map(|name| vec![name])
.context(SchemaNotFoundSnafu {
catalog: &self.catalog,
schema,
})
}
// Get all database names in the input directory.
// The directory structure should be like:
// /tmp/greptimedb-backup
// ├── greptime-1
// │ ├── db1
// │ └── db2
async fn all_db_names(&self) -> Result<Vec<String>> {
let mut db_names = vec![];
let path = self.catalog_path();
let mut entries = tokio::fs::read_dir(path).await.context(FileIoSnafu)?;
while let Some(entry) = entries.next_entry().await.context(FileIoSnafu)? {
let path = entry.path();
if path.is_dir() {
let db_name = match path.file_name() {
Some(name) => name.to_string_lossy().to_string(),
None => {
warn!("Failed to get the file name of {:?}", path);
continue;
}
};
db_names.push(db_name);
}
}
Ok(db_names)
}
}
#[async_trait]
impl Tool for Import {
async fn do_work(&self) -> Result<()> {
match self.target {
ImportTarget::Schema => self.import_create_table().await,
ImportTarget::Data => self.import_database_data().await,
ImportTarget::All => {
self.import_create_table().await?;
self.import_database_data().await
}
}
}
}

View File

@@ -34,7 +34,6 @@ use common_telemetry::debug;
use either::Either;
use meta_client::client::MetaClientBuilder;
use query::datafusion::DatafusionQueryEngine;
use query::logical_optimizer::LogicalOptimizer;
use query::parser::QueryLanguageParser;
use query::plan::LogicalPlan;
use query::query_engine::{DefaultSerializer, QueryEngineState};
@@ -289,6 +288,7 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
None,
None,
None,
None,
false,
plugins.clone(),
));

View File

@@ -18,6 +18,7 @@ use std::time::Duration;
use async_trait::async_trait;
use catalog::kvbackend::MetaKvBackend;
use clap::Parser;
use common_base::Plugins;
use common_config::Configurable;
use common_telemetry::logging::TracingOptions;
use common_telemetry::{info, warn};
@@ -266,13 +267,14 @@ impl StartCommand {
&opts.component.tracing,
opts.component.node_id.map(|x| x.to_string()),
);
log_versions(version!(), short_version!());
log_versions(version(), short_version(), APP_NAME);
info!("Datanode start command: {:#?}", self);
info!("Datanode options: {:#?}", opts);
let mut opts = opts.component;
let plugins = plugins::setup_datanode_plugins(&mut opts)
let opts = opts.component;
let mut plugins = Plugins::new();
plugins::setup_datanode_plugins(&mut plugins, &opts)
.await
.context(StartDatanodeSnafu)?;
@@ -338,7 +340,7 @@ mod tests {
mode = "distributed"
enable_memory_catalog = false
node_id = 42
rpc_addr = "127.0.0.1:4001"
rpc_hostname = "192.168.0.1"
[grpc]
@@ -365,7 +367,7 @@ mod tests {
mode = "distributed"
enable_memory_catalog = false
node_id = 42
[grpc]
addr = "127.0.0.1:3001"
hostname = "127.0.0.1"

View File

@@ -31,13 +31,6 @@ pub enum Error {
source: common_meta::error::Error,
},
#[snafu(display("Failed to iter stream"))]
IterStream {
#[snafu(implicit)]
location: Location,
source: common_meta::error::Error,
},
#[snafu(display("Failed to init DDL manager"))]
InitDdlManager {
#[snafu(implicit)]
@@ -237,13 +230,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to start catalog manager"))]
StartCatalogManager {
#[snafu(implicit)]
location: Location,
source: catalog::error::Error,
},
#[snafu(display("Failed to connect to Etcd at {etcd_addr}"))]
ConnectEtcd {
etcd_addr: String,
@@ -253,14 +239,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to connect server at {addr}"))]
ConnectServer {
addr: String,
source: client::error::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to serde json"))]
SerdeJson {
#[snafu(source)]
@@ -278,12 +256,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Expect data from output, but got another thing"))]
NotDataFromOutput {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Empty result from output"))]
EmptyResult {
#[snafu(implicit)]
@@ -298,13 +270,6 @@ pub enum Error {
error: std::io::Error,
},
#[snafu(display("Invalid database name: {}", database))]
InvalidDatabaseName {
#[snafu(implicit)]
location: Location,
database: String,
},
#[snafu(display("Failed to create directory {}", dir))]
CreateDir {
dir: String,
@@ -312,6 +277,12 @@ pub enum Error {
error: std::io::Error,
},
#[snafu(display("Failed to spawn thread"))]
SpawnThread {
#[snafu(source)]
error: std::io::Error,
},
#[snafu(display("Other error"))]
Other {
source: BoxedError,
@@ -347,13 +318,12 @@ pub enum Error {
source: meta_client::error::Error,
},
#[snafu(display("Tonic transport error: {error:?} with msg: {msg:?}"))]
TonicTransport {
#[snafu(display("Cannot find schema {schema} in catalog {catalog}"))]
SchemaNotFound {
catalog: String,
schema: String,
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: tonic::transport::Error,
msg: Option<String>,
},
}
@@ -371,21 +341,18 @@ impl ErrorExt for Error {
Error::BuildMetaServer { source, .. } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
Error::IterStream { source, .. }
| Error::InitMetadata { source, .. }
| Error::InitDdlManager { source, .. } => source.status_code(),
Error::InitMetadata { source, .. } | Error::InitDdlManager { source, .. } => {
source.status_code()
}
Error::ConnectServer { source, .. } => source.status_code(),
Error::MissingConfig { .. }
| Error::LoadLayeredConfig { .. }
| Error::IllegalConfig { .. }
| Error::InvalidReplCommand { .. }
| Error::InitTimezone { .. }
| Error::ConnectEtcd { .. }
| Error::NotDataFromOutput { .. }
| Error::CreateDir { .. }
| Error::EmptyResult { .. }
| Error::InvalidDatabaseName { .. } => StatusCode::InvalidArguments,
| Error::EmptyResult { .. } => StatusCode::InvalidArguments,
Error::StartProcedureManager { source, .. }
| Error::StopProcedureManager { source, .. } => source.status_code(),
@@ -401,9 +368,10 @@ impl ErrorExt for Error {
source.status_code()
}
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
Error::StartCatalogManager { source, .. } => source.status_code(),
Error::SerdeJson { .. } | Error::FileIo { .. } => StatusCode::Unexpected,
Error::SerdeJson { .. } | Error::FileIo { .. } | Error::SpawnThread { .. } => {
StatusCode::Unexpected
}
Error::Other { source, .. } => source.status_code(),
@@ -414,7 +382,7 @@ impl ErrorExt for Error {
source.status_code()
}
Error::MetaClientInit { source, .. } => source.status_code(),
Error::TonicTransport { .. } => StatusCode::Internal,
Error::SchemaNotFound { .. } => StatusCode::DatabaseNotFound,
}
}

View File

@@ -24,6 +24,7 @@ use common_grpc::channel_manager::ChannelConfig;
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
use common_meta::heartbeat::handler::HandlerGroupExecutor;
use common_meta::key::flow::FlowMetadataManager;
use common_meta::key::TableMetadataManager;
use common_telemetry::info;
use common_telemetry::logging::TracingOptions;
@@ -214,7 +215,7 @@ impl StartCommand {
&opts.component.tracing,
opts.component.node_id.map(|x| x.to_string()),
);
log_versions(version!(), short_version!());
log_versions(version(), short_version(), APP_NAME);
info!("Flownode start command: {:#?}", self);
info!("Flownode options: {:#?}", opts);
@@ -296,11 +297,13 @@ impl StartCommand {
Arc::new(executor),
);
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(cached_meta_backend.clone()));
let flownode_builder = FlownodeBuilder::new(
opts,
Plugins::new(),
table_metadata_manager,
catalog_manager.clone(),
flow_metadata_manager,
)
.with_heartbeat_task(heartbeat_task);

View File

@@ -20,6 +20,7 @@ use cache::{build_fundamental_cache_registry, with_default_composite_cache_regis
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
use clap::Parser;
use client::client_manager::NodeClients;
use common_base::Plugins;
use common_config::Configurable;
use common_grpc::channel_manager::ChannelConfig;
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
@@ -242,10 +243,11 @@ impl StartCommand {
.get_or_insert_with(MetaClientOptions::default)
.metasrv_addrs
.clone_from(metasrv_addrs);
opts.mode = Mode::Distributed;
}
opts.user_provider.clone_from(&self.user_provider);
if let Some(user_provider) = &self.user_provider {
opts.user_provider = Some(user_provider.clone());
}
Ok(())
}
@@ -259,14 +261,14 @@ impl StartCommand {
&opts.component.tracing,
opts.component.node_id.clone(),
);
log_versions(version!(), short_version!());
log_versions(version(), short_version(), APP_NAME);
info!("Frontend start command: {:#?}", self);
info!("Frontend options: {:#?}", opts);
let mut opts = opts.component;
#[allow(clippy::unnecessary_mut_passed)]
let plugins = plugins::setup_frontend_plugins(&mut opts)
let opts = opts.component;
let mut plugins = Plugins::new();
plugins::setup_frontend_plugins(&mut plugins, &opts)
.await
.context(StartFrontendSnafu)?;
@@ -314,7 +316,7 @@ impl StartCommand {
);
let catalog_manager = KvBackendCatalogManager::new(
opts.mode,
Mode::Distributed,
Some(meta_client.clone()),
cached_meta_backend.clone(),
layered_cache_registry.clone(),
@@ -445,7 +447,6 @@ mod tests {
let fe_opts = command.load_options(&Default::default()).unwrap().component;
assert_eq!(Mode::Distributed, fe_opts.mode);
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
@@ -458,7 +459,7 @@ mod tests {
#[tokio::test]
async fn test_try_from_start_command_to_anymap() {
let mut fe_opts = frontend::frontend::FrontendOptions {
let fe_opts = frontend::frontend::FrontendOptions {
http: HttpOptions {
disable_dashboard: false,
..Default::default()
@@ -467,8 +468,10 @@ mod tests {
..Default::default()
};
#[allow(clippy::unnecessary_mut_passed)]
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
let mut plugins = Plugins::new();
plugins::setup_frontend_plugins(&mut plugins, &fe_opts)
.await
.unwrap();
let provider = plugins.get::<UserProviderRef>().unwrap();
let result = provider

Some files were not shown because too many files have changed in this diff Show More