Compare commits

..

216 Commits

Author SHA1 Message Date
liyang
4b580f4037 feat: release binary to aws s3 (#1881) 2023-07-04 22:33:35 +08:00
Weny Xu
ee16262b45 feat: add create table procedure (#1845)
* feat: add create table procedure

* feat: change table_info type from vec u8 to RawTableInfo

* feat: return create table status

* fix: fix uncaught error

* refactor: use a notifier to respond to callers

* chore: apply suggestions from CR

* chore: apply suggestions from CR

* chore: add comment

* chore: apply suggestions from CR

* refacotr: make CreateMetadata step after DatanodeCreateTable step
2023-07-04 22:24:43 +08:00
Yingwen
f37b394f1a fix: check table existence in create table procedure (#1880)
* fix: check table existence in table procedures

* fix: use correct error variant

* chore: address view comments

* chore: address comments

* test: change error code
2023-07-04 22:01:27 +08:00
Eugene Tolbakov
ccee60f37d feat(http_body_limit): add initial support for DefaultBodyLimit (#1860)
* feat(http_body_limit): add initial support for DefaultBodyLimit

* fix: address CR suggestions

* fix: adjust the const for default http body limit

* fix: adjust the toml_str for the test

* fix: address CR suggestions

* fix: body_limit units in example config toml files

* fix: address clippy suggestions
2023-07-04 20:56:56 +08:00
Ruihang Xia
bee8323bae chore: bump sqlness to 0.5.0 (#1877)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-07-04 19:49:12 +08:00
Weny Xu
000df8cf1e feat: add ddl client (#1856)
* feat: add ddl client

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2023-07-04 19:32:02 +08:00
Yingwen
884731a2c8 chore: initialize mito2 crate (#1875) 2023-07-04 17:55:00 +08:00
shuiyisong
2922c25a16 chore: stop caching None in CachedMetaKvBackend (#1871)
* chore: dont cache none

* fix: test case

* chore: add comment

* chore: minor rewrite
2023-07-04 17:17:48 +08:00
Lei, HUANG
4dec06ec86 chore: bump version 0.3.2 (#1876)
bump version 0.3.2
2023-07-04 17:04:27 +08:00
Lei, HUANG
3b6f70cde3 feat: initial twcs impl (#1851)
* feat: initial twcs impl

* chore: rename SimplePicker to LeveledPicker

* rename some structs

* Remove Compaction strategy

* make compaction picker a trait object

* make compaction picker configurable for every region

* chore: add some test for ttl

* add some tests

* fix: some style issues in cr

* feat: enable twcs when creating tables

* feat: allow config time window when creating tables

* fix: some cr comments
2023-07-04 16:42:27 +08:00
Yingwen
b8e92292d2 feat: Implement a new scan mode using a chain reader (#1857)
* feat: add log

* feat: print more info

* feat: use chain reader

* fix: panic on getting first range

* fix: prev not updated

* fix: reverse readers and iter backward

* chore: don't print windows in log

* feat: consider memtable range

Also fix the issue that using incorrect comparision method to sort time
ranges.

* fix: merge memtable window with sst's

* feat: add use_chain_reader option

* feat: skip empty memtables

* chore: change log level

* fix: memtable range not ordered

* style: fix clippy

* chore: address review comments

* chore: print region id in log
2023-07-04 16:01:34 +08:00
Ruihang Xia
746fe8b4fe fix: use mark-deletion for system catalog (#1874)
* fix: use mark-deletion for system catalog

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix the default value

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean tables

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-07-04 16:00:39 +08:00
JeremyHi
20f2fc4a2a feat: add leader kv store cache for metadata (#1853)
* feat: add leader kv store cache for metadata

* refactor: create cache internal

* fix: race condition

* fix: race condition on read
2023-07-04 15:49:42 +08:00
Yingwen
2ef84f64f1 feat(servers): enlarge default body limit to 64M (#1873) 2023-07-04 07:13:14 +00:00
fys
451cc02d8d chore: add feature for metrics-process, default enable (#1870)
chore: add feature for metrics process, default enable
2023-07-04 13:28:33 +08:00
Lei, HUANG
b466ef6cb6 fix: libz dependency (#1867) 2023-07-03 10:08:53 +00:00
LFC
5b42e15105 refactor: add TableInfoKey and TableRegionKey (#1865)
* refactor: add TableInfoKey and TableRegionKey

* refactor: move KvBackend to common-meta

* fix: resolve PR comments
2023-07-03 18:01:20 +08:00
shuiyisong
e1bb7acfe5 fix: return err msg if use wrong database in MySQL (#1866) 2023-07-03 17:31:09 +08:00
Lei, HUANG
2c0c4672b4 feat: support building binary for centos7 (#1863)
feat:support building binary for centos7
2023-07-03 14:13:55 +08:00
Cao Zhengjia
e54415e723 feat: Make heartbeat intervals configurable in Frontend and Datanode (#1864)
* update frontend options and config

* fix format
2023-07-03 12:08:47 +08:00
Ruihang Xia
783a794060 fix: break CI again 🥲 (#1859)
* fix information schema case

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* disable -Wunused_result lint

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-30 20:01:14 +08:00
Vanish
563f6e05e2 feat: remove all the manifests in drop_region. (#1834)
* feat: drop_region delete manifest file

* chore: remove redundant code

* chore: fmt

* chore: clippy

* chore: clippy

* feat: support delete_all in manifest.

* chore:CR

* test: test_drop_basic, test_drop_reopen

* chore: cr

* fix: typo

* chore: cr
2023-06-30 17:42:11 +08:00
Ruihang Xia
25cb667470 fix: sort unstable sqlness result (#1858)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-30 09:25:24 +00:00
Ruihang Xia
c77b94650c refactor: remove Table::scan method (#1855)
* remove scan method

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-30 12:13:14 +08:00
Ruihang Xia
605776f49c feat: support bool operator with other computation (#1844)
* add some cases

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix sqlness test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl atan2 and power

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix instant manipulator

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-29 19:23:54 +08:00
Ruihang Xia
d45e7b7480 refactor: build parquet file stream from ParquetExec (#1852)
* refactor: build parquet file stream from ParquetExec

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* rename sqlness case

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-29 19:19:31 +08:00
JeremyHi
2b3ca1309a feat: table_routes util (#1849) 2023-06-29 16:47:56 +08:00
Weny Xu
acfa229641 chore: bump orc-rust to 0319acd (#1847) 2023-06-29 10:45:05 +08:00
JeremyHi
7e23dd7714 feat: http api for node-lease (#1843)
* feat: add node-lease http api

* revert: show_create.result
2023-06-29 09:34:54 +08:00
Lei, HUANG
559d1f73a2 feat: push all possible filters down to parquet exec (#1839)
* feat: push all possible filters down to parquet exec

* fix: project

* test: add ut for DatafusionArrowPredicate

* fix: according to CR comments
2023-06-28 20:14:37 +08:00
JeremyHi
bc33fdc8ef feat: save node lease into memory (#1841)
* feat: lease secs = 5

* feat: set lease data into memory of leader

* fix: ignore stale heartbeat

* Update src/meta-srv/src/election.rs

Co-authored-by: LFC <bayinamine@gmail.com>

---------

Co-authored-by: LFC <bayinamine@gmail.com>
2023-06-28 11:54:06 +08:00
Lei, HUANG
f287d3115b chore: replace result assertions (#1840)
* s/assert!\((.*)\.is_ok\(\)\);/\1.unwrap\(\);/g

* s/assert!\((.*)\.is_some\(\)\);/\1.unwrap\(\);/g
2023-06-27 19:14:48 +08:00
Ruihang Xia
b737a240de fix: add sqlness tests for some promql function (#1838)
* correct range manipulate exec fmt text

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix partition requirement

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix udf signature

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* finilise

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* ignore unstable ordered result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add nan value test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-27 19:05:26 +08:00
fys
99f0479bd2 feat: improve influxdb v2 api compability (#1831)
* feat: support influxdb v2 api

* cr
2023-06-27 18:21:51 +08:00
fys
313121f2ae fix: block when stream insert (#1835)
* fix: stream insert blocking

* fix: example link

* chore: Increase the default channel size "1024" -> "65536"
2023-06-27 16:57:03 +08:00
LFC
fcff66e039 chore: deny unused results (#1825)
* chore: deny unused results

* rebase
2023-06-27 15:33:53 +08:00
shuiyisong
03057cab6c feat: physical plan wrapper (#1837)
* test: add physical plan wrapper trait

* test: add plugins to datanode initialization

* test: add plugins to datanode initialization

* chore: add metrics method

* chore: update meter-core version

* chore: remove unused code

* chore: impl metrics method on df execution plan adapter

* chore: minor comment fix

* chore: add retry in create table

* chore: shrink keep lease handler buffer

* chore: add etcd batch size warn

* chore: try shrink

* Revert "chore: try shrink"

This reverts commit 0361b51670.

* chore: add create table backup time

* add metrics in some interfaces

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* calc elapsed time and rows

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore: remove timer in scan batch

* chore: add back stream metrics wrapper

* chore: add timer to ready poll

* chore: minor update

* chore: try using df_plan.metrics()

* chore: remove table scan timer

* chore: remove scan timer

* chore: add debug log

* Revert "chore: add debug log"

This reverts commit 672a0138fd.

* chore: use batch size as row count

* chore: use batch size as row count

* chore: tune code for pr

* chore: rename to physical plan wrapper

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-27 14:04:04 +08:00
Weny Xu
dcfce49cff refactor(datanode): move Instance heartbeat task to Datanode struct (#1832)
* refactor(datanode): move Instance heartbeat to Datanode struct

* chore: apply suggestions from CR

* fix: start heartbeat task after instance starts
2023-06-27 12:32:20 +08:00
JeremyHi
78b07996b1 feat: txn for meta (#1828)
* feat: txn for meta kvstore

* feat: txn

* chore: add unit test

* chore: more test

* chore: more test

* Update src/meta-srv/src/service/store/memory.rs

Co-authored-by: LFC <bayinamine@gmail.com>

* chore: by cr

---------

Co-authored-by: LFC <bayinamine@gmail.com>
2023-06-26 17:12:48 +08:00
dennis zhuang
034564fd27 feat: make blob(binary) type working (#1818)
* feat: test blob type

* feat: make blob type working

* chore: comment

* Update src/sql/src/statements/insert.rs

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

* chore: by CR comments

* fix: comment

* Update src/sql/src/statements/insert.rs

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/sql/src/statements/insert.rs

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* fix: test

---------

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-26 08:49:04 +00:00
Ruihang Xia
a95f8767a8 refactor: merge catalog provider & schema provider into catalog manager (#1803)
* move  to expr_factory

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* move configs into service_config

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* move GrpcQueryHandler into distributed.rs

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix compile and test in catalog sub-crate

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix table-procedure compile and test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix query compile and tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix datanode compile and tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix catalog/query/script/servers compile

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix frontend compile

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix nextest except information_schema

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* support information_schema

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix sqlness test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix merge errors

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove other structs

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix format

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* change deregister_table's return type to empty tuple

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-26 15:08:59 +08:00
Eugene Tolbakov
964d26e415 fix: docker build for aarch64 (#1826) 2023-06-25 18:29:00 +09:00
Yingwen
fd412b7b07 refactor!: Uses table id to locate tables in table engines (#1817)
* refactor: add table_id to get_table()/table_exists()

* refactor: Add table_id to alter table request

* refactor: Add table id to DropTableRequest

* refactor: add table id to DropTableRequest

* refactor: Use table id as key for the tables map

* refactor: use table id as file engine's map key

* refactor: Remove table reference from engine's get_table/table_exists

* style: remove unused imports

* feat!: Add table id to TableRegionalValue

* style: fix cilppy

* chore: add comments and logs
2023-06-25 15:05:20 +08:00
Weny Xu
223cf31409 feat: support to copy from orc format (#1814)
* feat: support to copy from orc format

* test: add copy from orc test

* chore: add license header

* refactor: remove unimplemented macro

* chore: apply suggestions from CR

* chore: bump orc-rust to 0.2.3
2023-06-25 14:07:16 +08:00
Ruihang Xia
62f660e439 feat: implement metrics for Scan plan (#1812)
* add metrics in some interfaces

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* calc elapsed time and rows

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-25 14:06:50 +08:00
Lei, HUANG
0fb18245b8 fix: docker build (#1822) 2023-06-25 11:05:46 +08:00
Weny Xu
caed6879e6 refactor: remove redundant code (#1821) 2023-06-25 10:56:31 +08:00
Yingwen
5ab0747092 test(storage): wait task before checking scheduled task num (#1811) 2023-06-21 18:04:34 +08:00
Ruihang Xia
b1ccc7ef5d fix: prevent filter pushdown in distributed planner (#1806)
* fix: prevent filter pushdown in distributed planner

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix metadata

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-21 16:25:50 +08:00
Lei, HUANG
d1b5ce0d35 chore: check catalog deregister result (#1810)
* chore: check deregister result and return error on failure

* refactor: SystemCatalog::deregister_table returns Result<()>
2023-06-21 08:09:11 +00:00
Lei, HUANG
a314993ab4 chore: change logstore default config (#1809) 2023-06-21 07:34:24 +00:00
LFC
fa522bc579 fix: drop region alive countdown tasks when deregistering table (#1808) 2023-06-21 14:49:32 +08:00
Lei, HUANG
5335203360 feat: support cross compilation to aarch64 linux (#1802) 2023-06-21 14:08:45 +08:00
Ruihang Xia
23bf55a265 fix: __field__ matcher on single value column (#1805)
* fix error text and field_column_names

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add sqlness test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add empty line

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* improve style

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-21 10:59:58 +08:00
Eugene Tolbakov
3b91fc2c64 feat: add initial implementation for status endpoint (#1789)
* feat: add initial implementation for status endpoint

* feat(status_endpoint): add more data to response

* feat(status_endpoint): use build data env vars

* feat(status_endpoint): add simple test

* fix(status_endpoint): adjust the toml indentation
2023-06-21 10:50:08 +08:00
LFC
6205616301 fix: filter table regional values with the current node id (#1800) 2023-06-20 19:17:35 +08:00
JeremyHi
e47ef1f0d2 chore: minor fix (#1801) 2023-06-20 11:03:52 +00:00
Lei, HUANG
16c1ee2618 feat: incremental database backup (#1240)
* feat: incremental database backup

* chore: rebase develop

* chore: move backup to StatementExecutor

* feat: copy database parser

* chore: remove some todos

* chore: use timestamp string instead of i64 string

* fix: typo
2023-06-20 18:26:55 +08:00
JeremyHi
323e2aed07 feat: deal with more than 128 txn (#1799) 2023-06-20 17:56:45 +08:00
LFC
cbc2620a59 feat: start region alive keepers (#1796)
* feat: start region alive keepers
2023-06-20 15:45:29 +08:00
JeremyHi
4fdee5ea3c feat: deal with node epoch (#1795)
* feat: deal with node epoch

* feat: dn send node_epoch

* Update src/meta-srv/src/handler/persist_stats_handler.rs

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* Update src/meta-srv/src/service/store/ext.rs

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* chore: by cr

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-06-20 07:07:05 +00:00
dennis zhuang
30472cebae feat: prepare supports caching logical plan and infering param types (#1776)
* feat: change do_describe function signature

* feat: infer param type and cache logical plan for msyql prepared statments

* fix: convert_value

* fix: forgot helper

* chore: comments

* fix: typo

* test: add more tests and test date, datatime in mysql

* chore: fix CR comments

* chore: add location

* chore: by CR comments

* Update tests-integration/tests/sql.rs

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* chore: remove the trace

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-20 04:07:28 +00:00
Ruihang Xia
903f02bf10 ci: optimize release progress (#1794)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-20 11:39:53 +08:00
JeremyHi
1703e93e15 feat: add handler execution timer (#1791)
* feat: add handler execution timer

* fix: by cr
2023-06-20 11:25:13 +08:00
LFC
2dd86b686f feat: extend region leases in Metasrv (#1784)
* feat: extend region leases in Metasrv

* fix: resolve PR comments
2023-06-19 19:55:59 +08:00
LFC
128c6ec98c feat: region alive keeper in Datanode (#1780) 2023-06-19 14:50:33 +08:00
Lei, HUANG
960b84262b fix: abort parquet writer (#1785)
* fix: sst file size

* fix: avoid creating file when no row's been written

* chore: rename tests

* fix: some clippy issues

* fix: some cr comments
2023-06-19 03:19:31 +00:00
Lei, HUANG
69854c07c5 fix: wait for compaction task to finish (#1783) 2023-06-16 16:45:06 +08:00
JeremyHi
1eeb5b4330 feat: disable_region_failover option for metasrv (#1777) 2023-06-15 16:26:27 +08:00
LFC
9b3037fe97 feat: a countdown task for closing region in Datanode (#1775) 2023-06-14 15:50:21 +08:00
dennis zhuang
09747ea206 feat: use DataFrame to replace SQL for Prometheus remote read (#1774)
* feat: debug QueryEngineState

* feat: impl read_table to create DataFrame for a table

* fix: clippy warnings

* feat: use DataFrame to handle prometheus remote read quries

* Update src/frontend/src/instance/prometheus.rs

Co-authored-by: LFC <bayinamine@gmail.com>

* chore: CR comments

---------

Co-authored-by: LFC <bayinamine@gmail.com>
2023-06-14 07:39:28 +00:00
Lei, HUANG
fb35e09072 chore: fix compaction caused race condition (#1767)
fix: unit tests. For real, this time.
2023-06-13 21:03:09 +08:00
Weny Xu
803940cfa4 feat: enable azblob tests (#1765)
* feat: enable azblob tests

* fix: add missing arg
2023-06-13 07:44:57 +00:00
Weny Xu
420ae054b3 chore: add debug log for heartbeat (#1770) 2023-06-13 07:43:26 +00:00
Lei, HUANG
0f1e061f24 fix: compile issue on develop and workaround to fix failing tests cau… (#1771)
* fix: compile issue on develop and workaround to fix failing tests caused by logstore file lock

* Apply suggestions from code review

Co-authored-by: JeremyHi <jiachun_feng@proton.me>

---------

Co-authored-by: JeremyHi <jiachun_feng@proton.me>
2023-06-13 07:30:16 +00:00
Lei, HUANG
7961de25ad feat: persist compaction time window (#1757)
* feat: persist compaction time window

* refactor: remove useless compaction window fields

* chore: revert some useless change

* fix: some CR comments

* fix: comment out unstable sqlness test

* revert commented sqlness
2023-06-13 10:15:42 +08:00
Lei, HUANG
f7d98e533b chore: fix compaction caused race condition (#1759)
* fix: set max_files_in_l0 in unit tests to avoid compaction

* refactor: pass while EngineConfig

* fix: comment out unstable sqlness test

* revert commented sqlness
2023-06-12 11:19:42 +00:00
Ruihang Xia
b540d640cf fix: unstable order with union operation (#1763)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-12 18:16:24 +08:00
Eugene Tolbakov
51a4d660b7 feat(to_unixtime): add timestamp types as arguments (#1632)
* feat(to_unixtime): add timestamp types as arguments

* feat(to_unixtime): change the return type

* feat(to_unixtime): address code review issues

* feat(to_unixtime): fix fmt issue
2023-06-12 17:21:49 +08:00
Ruihang Xia
1b2381502e fix: bring EnforceSorting rule forward (#1754)
* fix: bring EnforceSorting rule forward

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove duplicated rules

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* wrap remove logic into a method

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-12 07:29:08 +00:00
Yingwen
0e937be3f5 fix(storage): Use region_write_buffer_size as default value (#1760) 2023-06-12 15:05:17 +08:00
Weny Xu
564c183607 chore: make MetaKvBackend public (#1761) 2023-06-12 14:13:26 +08:00
Ruihang Xia
8c78368374 refactor: replace #[snafu(backtrace)] with Location (#1753)
* remove snafu backtrace

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-12 11:55:33 +08:00
Lei, HUANG
67c16dd631 feat: optimize some parquet writer parameter (#1758) 2023-06-12 11:46:45 +08:00
Lei, HUANG
ddcee052b2 fix: order by optimization (#1748)
* add some debug log

* fix: use lazy parquet reader in MitoTable::scan_to_stream to avoid IO in plan stage

* fix: unit tests

* fix: order-by optimization

* add some tests

* fix: move metric names to metrics.rs

* fix: some cr comments
2023-06-12 11:45:43 +08:00
王听正
7efcf868d5 refactor: Remove MySQL related options from Datanode (#1756)
* refactor: Remove MySQL related options from Datanode

remove mysql_addr and mysql_runtime_size in datanode.rs, remove command line argument mysql_addr in cmd/src/datanode.rs

#1739

* feat: remove --mysql-addr from command line

in pre commit, sqlness can not find --mysql-addrr, because we remove it

issue#1739

* refactor: remove --mysql-addr from command line

in pre commit, sqlness can not find --mysql-addrr, because we remove it

issue#1739
2023-06-12 11:00:24 +08:00
dennis zhuang
f08f726bec test: s3 manifest (#1755)
* feat: change default manifest options

* test: s3 manifest

* feat: revert checkpoint_margin to 10

* Update src/object-store/src/test_util.rs

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

---------

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
2023-06-09 10:28:41 +00:00
Ning Sun
7437820bdc ci: correct data type for input and event check (#1752) 2023-06-09 13:59:56 +08:00
Lei, HUANG
910c950717 fix: jemalloc error does not implement Error (#1747) 2023-06-09 04:00:50 +00:00
Zou Wei
f91cd250f8 feat:make version() show greptime info. (#1749)
* feat:impl get_version() to return greptime info.

* fix: refactor test case.
2023-06-09 11:38:52 +08:00
Yingwen
115d9eea8d chore: Log version and arguments (#1744) 2023-06-09 11:38:08 +08:00
Ning Sun
bc8f236806 ci: fix using env in job.if context (#1751) 2023-06-09 11:28:29 +08:00
Yiran
fdbda51c25 chore: update document links in README.md (#1745) 2023-06-09 10:05:24 +08:00
Ning Sun
e184826353 ci: allow triggering nightly release manually (#1746)
ci: allow triggering nightly manually
2023-06-09 10:04:44 +08:00
Yingwen
5b8e54e60e feat: Add HTTP API for cpu profiling (#1694)
* chore: print source error in mem-prof

* feat(common-pprof): add pprof crate

* feat(servers): Add pprof handler to router

refactor the mem_prof handler to avoid checking feature while
registering router

* feat(servers): pprof handler support different output type

* docs(common-pprof): Add readme

* feat(common-pprof): Build guard using code in pprof-rs's example

* feat(common-pprof): use prost

* feat: don't add timeout to perf api

* feat: add feature pprof

* feat: update readme

* test: fix tests

* feat: close region in TestBase

* feat(pprof): addres comments
2023-06-07 15:25:16 +08:00
Lei, HUANG
8cda1635cc feat: make jemalloc the default allocator (#1733)
* feat: add jemalloc metrics

* fix: dep format
2023-06-06 12:11:22 +00:00
Lei, HUANG
f63ddb57c3 fix: parquet time range predicate panic (#1735)
fix: parquet reader should use store schema to build time range predicate
2023-06-06 19:11:45 +08:00
fys
d2a8fd9890 feat: add route admin api in metasrv (#1734)
* feat: add route admin api in metasrv

* fix: add license
2023-06-06 18:00:02 +08:00
LFC
91026a6820 chore: clean up some of my todos (#1723)
* chore: clean up some of my todos

* fix: ci
2023-06-06 17:25:04 +08:00
Ruihang Xia
7a60bfec2a fix: empty result type on prom query endpoint (#1732)
* adjust return type

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add test case

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-06 15:40:54 +08:00
Niwaka
a103614fd2 feat: support /api/v1/series for Prometheus (#1620)
* feat: support /api/v1/series for Prometheus

* chore: error handling

* feat: update tests
2023-06-06 10:29:16 +08:00
Yingwen
1b4976b077 feat: Adds some metrics for write path and flush (#1726)
* feat: more metrics

* feat: Add preprocess elapsed

* chore(storage): rename metric

* test: fix tests
2023-06-05 21:35:44 +08:00
Lei, HUANG
166fb8871e chore: bump greptimedb version 0.4.0 (#1724) 2023-06-05 18:41:53 +08:00
Yingwen
466f258266 feat(servers): collect samples by metric (#1706) 2023-06-03 17:17:52 +08:00
Ruihang Xia
94228285a7 feat: convert values to vector directly (#1704)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-03 12:41:13 +08:00
JeremyHi
3d7185749d feat: insert with stream (#1703)
* feat: insert with stream

* chore: by CR
2023-06-03 03:58:00 +00:00
LFC
5004cf6d9a feat: make grpc insert requests in a batch (#1687)
* feat: make Prometheus remote write in a batch

* rebase

* fix: resolve PR comments

* fix: resolve PR comments

* fix: resolve PR comments
2023-06-02 09:06:48 +00:00
Ruihang Xia
8e69aef973 feat: serialize/deserialize support for PromQL plans (#1684)
* implement serializer

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy and CR comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix compile error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* register registry

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* enable promql plan for dist planner

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-02 16:14:05 +08:00
Ruihang Xia
2615718999 feat: merge scan for distributed execution (#1660)
* generate exec plan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* move DatanodeClients to client crate

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* wip MergeScanExec::to_stream

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix compile errors

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix default catalog

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix expand order of new stage

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* move sqlness cases contains plan out of common dir

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* refactor information schema to allow duplicated scan call

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: ignore two cases due to substrait

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* reorganise sqlness common cases

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typos

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* redact round robin partition number

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

* skip tranforming projection

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* revert common/order

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/query/src/dist_plan/merge_scan.rs

Co-authored-by: JeremyHi <jiachun_feng@proton.me>

* update sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result again

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* resolve CR comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* ignore region failover IT

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result again and again

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* unignore some tests about projection

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* enable failover tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
Co-authored-by: JeremyHi <jiachun_feng@proton.me>
2023-06-02 06:42:54 +00:00
fys
fe6e3daf81 fix: failed to insert data with u8 (#1701)
* fix: failed to insert data with u8 field

* remove unused code

* fix cr
2023-06-02 06:01:59 +00:00
ZonaHe
b7e1778ada feat: update dashboard to v0.2.6 (#1700)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2023-06-02 13:26:07 +08:00
Lei, HUANG
ccd666aa9b fix: avoid writing manifest and wal if no files are actually flushed (#1698)
* fix: avoid writing manifest and wal if no files are actually flushed

* fix: simplify log
2023-06-02 13:16:59 +08:00
JeremyHi
2aa442c86d feat: exists API for KVStore (#1695)
* feat: exists API for kv

* chore: add unit test
2023-06-02 12:35:04 +08:00
Weny Xu
f811ae4665 fix: enable region failover test (#1699)
fix: fix region failover test
2023-06-02 12:05:37 +08:00
Ruihang Xia
e5b6f8654a feat: optimizer rule to pass expected output ordering hint (#1675)
* move type convertsion rule into optimizer dir

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* implement order_hint rule

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* it works!

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use column name instead

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* accomplish test case

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update lock file

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-02 03:43:51 +00:00
Ruihang Xia
ff6d11ddc7 chore: ignore symbol link target file (#1696)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-02 10:42:44 +08:00
Ruihang Xia
878c6bf75a fix: do not alias relation before join (#1693)
* fix: do not alias relation before join

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/promql/src/error.rs

Co-authored-by: dennis zhuang <killme2008@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-06-01 14:24:37 +00:00
LFC
ce440606a9 fix: sqlness failed due to region failover wrongly kicks in for dropp… (#1690)
fix: sqlness failed due to region failover wrongly kicks in for dropped or renamed table
2023-06-01 21:47:47 +08:00
fys
5fd7250dca fix: invalidate route cache on renaming table (#1691)
* fix: sqlness test

* remove unnecessary clone

* fix cr
2023-06-01 20:43:31 +08:00
Ruihang Xia
5a5e88353c fix: do not change timestamp index column while planning aggr (#1688)
* fix: do not change timestamp index column while planning aggr

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove println

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-01 20:17:18 +08:00
Ruihang Xia
ef15de5f17 ci: always upload sqlness log (#1692)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-06-01 20:01:26 +08:00
fys
86adac1532 chore: reject table creation when partitions exceeds peer number (#1654)
* chore: table creation is rejected, when partition_num exceeds peer_num

* chore: modify no_active_datanode error msg

* fix: ut

* fix sqlness test and add limit for select peer in region_failover

* upgrade greptime-proto

* self cr

* fix: cargo sqlness

* chore: add table info in select ctx for failover

* fix sqlness
2023-06-01 09:05:17 +00:00
Ning Sun
e7a410573b test: fix sqlx compatibility and adds integration test for sqlx (#1686)
* test: fix sqlx compatibility and adds integration test for sqlx

* test: correct insert statements
2023-06-01 15:43:13 +08:00
Yingwen
548f0d1e2a feat: Add app version metric (#1685)
* feat: Add app version metric

* chore: use greptimedb instead of greptime
2023-06-01 14:31:08 +08:00
Zheming Li
5467ea496f feat: Add column supports at first or after the existing columns (#1621)
* feat: Add column supports at first or after the existing columns

* Update src/common/query/Cargo.toml

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-06-01 02:13:00 +00:00
Yingwen
70e17ead68 fix: Print source error in subprocedure failure message (#1683)
* fix: print source error in subprocedure failed error

* feat: print source error in subprocedure failure message
2023-06-01 09:51:31 +08:00
dennis zhuang
ae8203fafa fix: prepare statement doesn't support insert clause (#1680)
* fix: insert clause doesn't support prepare statement

* fix: manifeste dir

* fix: format

* fix: temp path
2023-05-31 20:14:58 +08:00
Ruihang Xia
ac3666b841 chore(deps): bump arrow/parquet to 40.0, datafuson to the latest HEAD (#1677)
* fix compile error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove deprecated substrait

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update deps

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* downgrade opendal to 0.33.1

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* change finish's impl

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update test results

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* ignore failing cases

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-05-31 18:55:02 +08:00
Weny Xu
0460f3ae30 test: add write test for region failover (#1673)
* test: add write test for region failover

* test: add written data assertion after failover

* test: support more storage types
2023-05-31 15:42:00 +08:00
Yingwen
9d179802b8 feat: Add a global TTL option for all tables (#1679)
* feat: Add a global TTL option for all tables

* docs: update config examples

* chore: print start command and options when standalone/frontend starts
2023-05-31 15:36:25 +08:00
Lei, HUANG
72b6bd11f7 feat: adapt window reader to order rules (#1671)
* feat: adapt window reader to order rules

* fix: add asc sort test case
2023-05-31 03:36:17 +00:00
Xuanwo
6b08a5f94e chore: Bump OpenDAL to v0.36 (#1678)
* chore: Bump OpenDAL to v0.36

Signed-off-by: Xuanwo <github@xuanwo.io>

* Fix

Signed-off-by: Xuanwo <github@xuanwo.io>

---------

Signed-off-by: Xuanwo <github@xuanwo.io>
2023-05-31 11:12:40 +08:00
dennis zhuang
00104bef76 feat: supports CTE query (#1674)
* feat: supports CTE query

* test: move cte test to standalone
2023-05-30 12:08:49 +00:00
Zou Wei
ae81c7329d feat: support azblob storage. (#1659)
* feat:support azblob storage.

* test:add some tests.

* refactor:use if-let.
2023-05-30 19:59:38 +08:00
Yingwen
c5f6d7c99a refactor: update proto and rename incorrect region_id fields (#1670) 2023-05-30 15:19:04 +09:00
Weny Xu
bb1b71bcf0 feat: acquire table_id from region_id (#1656)
feat: acquire table_id from region_id
2023-05-30 03:36:47 +00:00
Weny Xu
a4b884406a feat: add invalidate cache step (#1658)
* feat: add invalidate cache step

* refactor: refactor TableIdent

* chore: apply suggestions from CR
2023-05-30 11:17:59 +08:00
dennis zhuang
ab5dfd31ec feat: sql dialect for different protocols (#1631)
* feat: add SqlDialect to query context

* feat: use session in postgrel handlers

* chore: refactor sql dialect

* feat: use different dialects for different sql protocols

* feat: adds GreptimeDbDialect

* refactor: replace GenericDialect with GreptimeDbDialect

* feat: save user info to session

* fix: compile error

* fix: test
2023-05-30 09:52:35 +08:00
Yingwen
563ce59071 feat: Add request type and result code to grpc metrics (#1664) 2023-05-30 09:51:08 +08:00
LFC
51b23664f7 feat: update table metadata in lock (#1634)
* feat: using distributed lock to guard against the concurrent updating of table metadatas in region failover procedure

* fix: resolve PR comments

* fix: resolve PR comments
2023-05-30 08:59:14 +08:00
Ruihang Xia
9e21632f23 fix: clippy warning (#1669)
* fix: clippy warning

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* restore the removed common sqlness cases

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-05-30 08:55:24 +08:00
Ruihang Xia
b27c569ae0 refactor: add scan_to_stream() to Table trait to postpone the stream generation (#1639)
* add scan_to_stream to Table

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl parquet stream

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* reorganise adapters

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* implement scan_to_stream for mito table

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add location info

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: table scan

* UT pass

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl project record batch

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix information schema

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* resolve CR comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove one todo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix errors generated by merge commit

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add output_ordering method to record batch stream

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix rustfmt

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* enhance error types

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Lei, HUANG <mrsatangel@gmail.com>
2023-05-29 20:03:47 +08:00
Weny Xu
0eaae634fa fix: invalidate table route cache (#1663) 2023-05-29 18:49:23 +08:00
JeremyHi
8b9b5a0d3a feat: broadcast with mailbox (#1661)
feat: broad with mailbox
2023-05-29 15:11:50 +08:00
Lei, HUANG
78fab08b51 feat: window inferer (#1648)
* feat: window inferer

* doc: add some doc

* test: add a long missing unit test case for windowed reader

* add more tests

* fix: some CR comments
2023-05-29 14:41:00 +08:00
Weny Xu
d072947ef2 refactor: move code out of loop (#1657) 2023-05-27 13:31:13 +08:00
Weny Xu
4094907c09 fix: fix type casting issue (#1652)
* fix: fix type casting issue

* chore: apply suggestion from CR
2023-05-27 00:17:56 +08:00
Ruihang Xia
0da94930d5 feat: impl literal only PromQL query (#1641)
* refactor EmptyMetric to accept expr

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* impl literal only query

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add empty line

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* support literal on HTTP gateway

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy (again)

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-05-26 23:27:03 +08:00
fys
f0a519b71b chore: reduce the number of requests for meta (#1647) 2023-05-26 17:25:18 +08:00
Yingwen
89366ba939 refactor: Holds histogram in the timer to avoid clone labels if possible (#1653)
* feat: use Histogram struct to impl Timer

* fix: fix compile errors

* feat: downgrade metrics-process

* fix: compiler errors
2023-05-26 17:12:03 +08:00
Yingwen
c042723fc9 feat: Record process metrics (#1646)
* feat(servers): Export process metrics

* chore: update metrics related deps to get the process-metrics printed

The latest process-metrics crate depends on metrics 0.21, we use metrics
0.20. This cause the process-metrics crate doesn't record the metrics
  when use metrics macros
2023-05-26 11:51:01 +08:00
Weny Xu
732784d3f8 feat: support to load missing region (#1651)
* feat: support to load missing region

* Update src/mito/src/table.rs

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

---------

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
2023-05-26 03:30:46 +00:00
Ning Sun
332b3677ac feat: add metrics for ingested row count (#1645) 2023-05-26 10:57:27 +08:00
Weny Xu
6cd634b105 fix: fix typo (#1649) 2023-05-26 10:24:12 +08:00
Yinnan Yao
cd1ccb110b fix: install python3-pip in Dockerfile (#1644)
When I use docker build to build the image, I get an error that pip is missing. Add install python3-pip in Dockerfile.

Fixes: #1643

Signed-off-by: yaoyinnan <yaoyinnan@foxmail.com>
2023-05-25 23:00:39 +08:00
Weny Xu
953793143b feat: add invalidate table cache handler (#1633)
* feat: add invalidate table cache handler

* feat: setup invalidate table cache handler for frontend

* test: add test for invalidate table cache handler

* chore: apply suggestions from CR

* chore: apply suggestions from CR

* fix: fix report_interval unit
2023-05-25 17:45:45 +08:00
Yingwen
8a7998cd25 feat(servers): Add metrics based on axum's example (#1638)
Log on error
2023-05-25 17:31:48 +09:00
LFC
eb24bab5df refactor: set the filters for testing logs (#1637)
minor: set the filters for testing logs
2023-05-25 11:07:57 +08:00
fys
8f9e9686fe chore: add metrics for table route getting (#1636)
chore: add metrics for getting table_route
2023-05-25 10:02:59 +08:00
shuiyisong
61a32d1b9c chore: add boxed error for custom error map (#1635)
* chore: add boxed error for custom error map

* chore: fix typo

* chore: add comment & update display msg

* chore: change name to other error
2023-05-24 12:54:52 +00:00
Weny Xu
74a6517bd0 refactor: move the common part of the heartbeat response handler to common (#1627)
* refactor: move heartbeat response handler to common

* chore: apply suggestions from CR
2023-05-24 07:55:06 +00:00
fys
fa4a497d75 feat: add cache for catalog kv backend (#1592)
* feat: add kvbackend cache

* fix: cargo fmt
2023-05-24 15:07:29 +08:00
Ning Sun
ddca0307d1 feat: more configurable logging levels (#1630)
* feat: make logging level more configurable

* chore: resolve lint warnings

* fix: correct default level for h2

* chore: update text copy
2023-05-24 14:47:41 +08:00
Weny Xu
3dc45f1c13 feat: implement CloseRegionHandler (#1569)
* feat: implement CloseRegionHandler

* feat: register heartbeat response handlers

* test: add tests for heartbeat response handlers

* fix: drop table does not release regions

* chore: apply suggestion from CR

* fix: fix close region issue

* chore: apply suggestion from CR

* chore: apply suggestion from CR

* chore: apply suggestion from CR

* chore: apply suggestion from CR

* chore: apply suggestion from CR

* chore: apply suggestion from CR

* chore: apply suggestion from CR

* chore: modify method name and add log

* refactor: refactor HeartbeatResponseHandler

* chore: apply suggestion from CR

* refactor: remove close method from Region trait

* chore: apply suggestion from CR

* chore: remove PartialEq from CloseTableResult

* chore: apply suggestion from CR
2023-05-23 15:44:27 +08:00
dennis zhuang
7c55783e53 feat!: reorganize the storage layout (#1609)
* feat: adds data_home to DataOptions

* refactor: split out object store stuffs from datanode instance

* feat: move data_home into FileConfig

* refactor: object storage layers

* feat: adds datanode path to procedure paths

* feat: temp commit

* refactor: clean code

* fix: forgot files

* fix: forgot files

* Update src/common/test-util/src/ports.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* Update tests/runner/src/env.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* fix: compile error

* chore: cr comments

* fix: dependencies order in cargo

* fix: data path in test

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-05-23 13:58:26 +08:00
shuiyisong
5b304fa692 chore: add grpc query interceptor (#1626) 2023-05-23 13:57:54 +08:00
Weny Xu
9f67ad8bce fix: fix doesn't release closed regions issue (#1596)
* fix: fix close region issue

* chore: apply suggestion from CR

* chore: apply suggestion from CR

* chore: apply suggestion from CR

* chore: apply suggestion from CR

* refactor: remove close method from Region trait

* chore: remove PartialEq from CloseTableResult
2023-05-23 11:40:12 +08:00
Weny Xu
e646490d16 chore: fix code styling (#1623) 2023-05-23 10:09:34 +08:00
JeremyHi
1225edb065 refactor: move rpc's commons to common-meta (#1625) 2023-05-23 10:07:24 +08:00
Lei, HUANG
8e7ec4626b refactor: remove useless error (#1624)
* refactor: remove useless

* fix: remove useless error variant
2023-05-22 22:55:27 +08:00
LFC
f64527da22 feat: region failover procedure (#1558)
* feat: region failover procedure
2023-05-22 19:54:52 +08:00
Yingwen
6dbceb1ad5 feat: Trigger flush based on global write buffer size (#1585)
* feat(storage): Add AllocTracker

* feat(storage): flush request wip

* feat(storage): support global write buffer size

* fix(storage): Test and fix size based strategy

* test(storage): Test AllocTracker

* test(storage): Test pick_by_write_buffer_full

* docs: Add flush config example

* test(storage): Test schedule_engine_flush

* feat(storage): Add metrics for write buffer size

* chore(flush): Add log when triggering flush by global buffer

* chore(storage): track allocation in update_stats
2023-05-22 19:00:30 +08:00
Ning Sun
067c5ee7ce feat: time_zone variable for mysql connections (#1607)
* feat: add timezone info to query context

* feat: parse mysql compatible time zone string

* feat: add method to timestamp for rendering timezone aware string

* feat: use timezone from session for time string rendering

* refactor: use querycontectref

* feat: implement session/timezone variable read/write

* style: resolve toml format

* test: update tests

* Apply suggestions from code review

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* Update src/session/src/context.rs

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* refactor: address review issues

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-05-22 18:30:23 +08:00
Yingwen
32ad358323 fix(table-procedure): Open table in RegisterCatalog state (#1617)
* fix(table-procedure): on_register_catalog should use open_table

* test: Test recover RegisterCatalog state

* test: Fix subprocedure does not execute in test

* feat(mito): adjust procedure log level

* refactor: rename execute_parent_procedure

execute_parent_procedure -> execute_until_suspended_or_done
2023-05-22 17:54:02 +08:00
Chuanle Chen
77497ca46a feat: support /api/v1/label/<label_name>/values from Prometheus (#1604)
* feat: support `/api/v1/label/<label_name>/values` from Prometheus

* chore: apply CR

* chore: apply CR
2023-05-22 07:24:12 +00:00
JeremyHi
e5a215de46 chore: truncate route-table (#1619) 2023-05-22 14:54:40 +08:00
Ruihang Xia
e5aad0f607 feat: distributed planner basic (#1599)
* basic skeleton

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* change QueryEngineState's constructor

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* install extension planner

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* tidy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-05-22 11:48:03 +08:00
QuenKar
edf6c0bf48 refactor: add "table engine" to datanode heartbeat. (#1616)
refactor:add "table engine" to datanode heartbeat.
2023-05-22 10:09:32 +08:00
Ruihang Xia
c3eeda7d84 refactor(frontend): adjust code structure (#1615)
* move  to expr_factory

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* move configs into service_config

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* move GrpcQueryHandler into distributed.rs

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-05-20 02:09:20 +08:00
Lei, HUANG
82f2b34f4d fix: wal replay ignore manifest entries (#1612)
* fix: wal replay ignore manifest entries

* test: add ut
2023-05-19 18:12:44 +08:00
Vanish
8764ce7845 feat: add delete WAL in drop_region (#1577)
* feat: add delete WAL in drop_region

* chore: fix typo err.

* feat: mark all SSTs deleted and remove the region from StorageEngine's region map.

* test: add test_drop_region for StorageEngine.

* chore: make clippy happy

* fix: fix conflict

* chore: CR.

* chore: CR

* chore: fix clippy

* fix: temp file life time
2023-05-18 18:02:34 +08:00
localhost
d76ddc575f fix: meta admin API get catalog table name error (#1603) 2023-05-18 14:27:40 +08:00
Weny Xu
68dfea0cfd fix: fix datanode cannot start while failing to open tables (#1601) 2023-05-17 20:56:13 +08:00
fys
57c02af55b feat: change default selector in meta from "LeaseBased" to "LoadBased" (#1598)
* feat: change default selector from "LeaseBased" to "LoadBased"

* fix: ut
2023-05-17 17:48:13 +08:00
Lei, HUANG
e8c2222a76 feat: add WindowedReader (#1532)
* feat: add WindowedReader

* fix: some cr comments

* feat: filter memtable by timestamp range

* fix: add source in error variants

* fix: some CR comments

* refactor: filter memtable in MapIterWrapper

* fix: clippy
2023-05-17 17:34:29 +08:00
JeremyHi
eb95a9e78b fix: sequence out of range (#1597) 2023-05-17 14:43:54 +08:00
zyy17
4920836021 refactor: support parsing env list (#1595)
* refactor: support parse env list

* refactor: set 'multiple = true' for metasrv_addr cli option and remove duplicated parsing
2023-05-17 14:37:08 +08:00
Huaijin
715e1a321f feat: implement /api/v1/labels for prometheus (#1580)
* feat: implement /api/v1/labels for prometheus

* fix: only gather match[]

* chore: fix typo

* chore: fix typo

* chore: change style

* fix: suggestion

* fix: suggestion

* chore: typo

* fix: fmt

* fix: add more test
2023-05-17 03:56:22 +00:00
localhost
a6ec79ee30 chore: add a uniform prefix to the metrics using the official recommendation of (#1590) 2023-05-17 11:08:49 +08:00
Lei, HUANG
e70d49b9cf feat: memtable stats (#1591)
* feat: memtable stats

* chore: add tests for timestamp subtraction

* feat: add `Value:as_timestamp` method
2023-05-17 11:07:07 +08:00
Weny Xu
ca75a7b744 fix: remove region number validation (#1593)
* fix: remove region number validation

* Update src/mito/src/engine.rs

Co-authored-by: dennis zhuang <killme2008@gmail.com>

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-05-17 09:23:56 +08:00
localhost
3330957896 chore: add fmt for statement query (#1588)
* chore: add fmt for statement query

* chore: add test for query display
2023-05-16 16:14:11 +08:00
WU Jingdi
fb1ac0cb9c feat: support user config manifest compression (#1579)
* feat: support user config manifest compression

* chore: change style

* chore: enhance test
2023-05-16 11:02:59 +08:00
Niwaka
856ab5bea7 feat: make RepeatedTask invoke remove_outdated_meta method (#1578)
* feat: make RepeatedTask invoke remove_outdated_meta method

* fix: typo

* chore: improve error message
2023-05-16 10:21:35 +08:00
Eugene Tolbakov
122bd5f0ab feat(tql): add initial implementation for explain & analyze (#1427)
* feat(tql): resolve conflicts after merge,formatting and clippy issues, add sqlness tests, adjust explain with start, end, step

* feat(tql): adjust sqlness assertions
2023-05-16 07:28:24 +08:00
Ruihang Xia
2fd1075c4f fix: uses nextest in the Release CI (#1582)
* fix: uses nextest in the Release CI

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* install nextest

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update Makefile

Co-authored-by: zyy17 <zyylsxm@gmail.com>

* update workflow yaml

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: zyy17 <zyylsxm@gmail.com>
2023-05-15 21:09:09 +08:00
fys
027707d969 feat: support frontend-meta heartbeat (#1555)
* feat: support frontend heartbeat

* fix: typo "reponse" -> "response"

* add ut

* enable start heartbeat task

* chore: frontend id is specified by metasrv, not in the frontend startup parameter

* fix typo

* self-cr

* cr

* cr

* cr

* remove unnecessary headers

* use the member id in the header as the node id
2023-05-15 09:54:45 +00:00
Yingwen
8d54d40b21 feat: Add FlushPicker to flush regions periodically (#1559)
* feat: Add FlushPicker

* feat(storage): Add close to StorageEngine

* style(storage): fix clippy

* feat(storage): Close regions in StorageEngine::close

* chore(storage): Clear requests on scheduler stop

* test(storage): Test flush picker

* feat(storage): Add metrics for auto flush

* feat(storage): Add flush reason and record it in metrics

* feat: Expose flush config

docs(config): Update config example

* refactor(storage): Run auto flush task in FlushScheduler

* refactor(storage): Add FlushItem trait to make FlushPicker easy to test
2023-05-15 17:29:28 +08:00
Ning Sun
497b1f9dc9 feat: metrics for storage engine (#1574)
* feat: add storage engine region count gauge

* test: remove catalog metrics because we can't get a correct number

* feat: add metrics for log store write and compaction

* fix: address review issues
2023-05-15 15:22:00 +08:00
LFC
4ae0b5e185 test: move instances tests to "tests-integration" (#1573)
* test: move standalone and distributed instances tests from "frontend" crate to "tests-integration"

* fix: resolve PR comments
2023-05-15 12:00:43 +08:00
Lei, HUANG
cfcfc72681 refactor: remove version column (#1576) 2023-05-15 11:03:37 +08:00
Weny Xu
66903d42e1 feat: implement OpenTableHandler (#1567)
* feat: implement OpenTableHandler

* chore: apply suggestion from CR

* chore: apply suggestion from CR
2023-05-15 10:47:28 +08:00
zyy17
4fc173acf0 refactor: support layered configuration (#1535)
* refactor: add a layered configuration by using config-rs

Signed-off-by: zyy17 <zyylsxm@gmail.com>

* refactor: add 'env_var_prefix' for 'load_options()' and remove duplicate default construction in frontend

* refactor: add test_config_precedence_order in standalone

* refactor: add 'test_config_precedence_order()' test case in metasrv

* refactor: add 'test_config_precedence_order()' test case in datanode

* refactor: refine the naming '*_env_var_*' -> '*_env_vars_*'

* refactor: fix clippy error

* refactor: refine error naming 'LoadConfig' -> 'LoadLayeredConfig' and add Location

* refactor: move 'env_vars_prefix' to clap options

* fix: use '__' as envrionment variables separator and simplify load_layered_options()

* refactor: derive 'Default' for StartCommand and use default function to simplify the test cases

* fix: clippy error

* chore: update comments

* chore(deps): update deps info

* refactor(naming): 'env_vars_prefix' -> 'env_prefix'

* refactor: simplify the code

* refactor: change some argument type of 'load_layered_options()'

* refactor: simplify the code

* refactor: remove unnecessary 'clone()'

* refactor: add 'GREPTIMEDB_*' prefix for env_prefix

* refactor: modify configuration precedence order: cli > config file > environment variables > default values

---------

Signed-off-by: zyy17 <zyylsxm@gmail.com>
2023-05-13 22:37:47 +08:00
Huaijin
f9a4326461 fix: unwrap() None in NULL value exist multi-field table during prometheus query_range (#1571)
* fix: NULL value in multi-field table meet error in prometheus query_range

* fix: suggestion

* chore: change style
2023-05-12 17:36:03 +08:00
Ning Sun
4151d7a8ea fix: allow cross-schema query on information_schema (#1568) 2023-05-11 16:54:28 +08:00
LFC
a4e106380b fix: refreshing Dashboard returns 404 (#1562)
* fix: refreshing Dashboard returns 404

* fix: refreshing Dashboard returns 404
2023-05-11 15:08:20 +08:00
Ruihang Xia
7a310cb056 docs: rfc of distributed planner (#1554)
* docs: rfc of distributed planner

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update docs/rfcs/2023-05-09-distributed-planner.md

Co-authored-by: LFC <bayinamine@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
Co-authored-by: LFC <bayinamine@gmail.com>
2023-05-11 14:45:32 +08:00
LFC
8fef32f8ef feat: enable tokio console in cluster mode (#1512)
* feat: enable tokio console subscriber

* fix: resolve PR comments

* fix: resolve PR comments

* fix: resolve PR comments
2023-05-11 14:35:06 +08:00
Ning Sun
8c85fdec29 fix: correct schema/table count in catalog metrics (#1565) 2023-05-11 14:20:42 +08:00
ZonaHe
84f6b46437 feat: update dashboard to v0.2.5 (#1563)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2023-05-11 13:55:42 +08:00
Weny Xu
44aef6fcbd feat(datanode): iImplement the heartbeat response handler (#1547)
* feat(datanode): implement instruction handler

* chore: apply suggestion from CR

* refactor: refactor heartbeat response handler
2023-05-11 09:27:13 +08:00
JeremyHi
7a9dd5f0c8 feat: ignore mailbox message into stat (#1560) 2023-05-10 18:06:04 +08:00
WU Jingdi
486bb2ee8e feat: Compress manifest and checkpoint (#1497)
* feat: Compress manifest and checkpoint

* refactor: use file extention infer compression type

* chore: apply suggestions from CR

* Update src/storage/src/manifest/storage.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* chore: CR advices

* chore: Fix bugs, strengthen test

* chore: Fix CR, strengthen test

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-05-10 07:53:06 +00:00
Weny Xu
020c55e260 refactor: change mailbox_messages to mailbox_message (#1557) 2023-05-10 07:17:11 +00:00
Yingwen
ee3e1dbdaa feat: Use LocalScheduler framework to implement FlushScheduler (#1531)
* test: simplify countdownlatch

* feat: impl Drop for LocalScheduler

* feat(storage): Impl FlushRequest and FlushHandler

* feat(storage): Use scheduler to handle flush job

* chore(storage): remove unused code

* feat(storage): Use new type pattern for RegionMap

* feat(storage): Remove on_success callback

* feat(storage): Address CR comments and add some metrics to flush
2023-05-10 07:16:51 +00:00
dennis zhuang
aa0c5b888c docs: update readme (#1549)
* docs: update readme

* Update README.md

Co-authored-by: Ning Sun <classicning@gmail.com>

* chore: cr comments

* chore: cr comments

---------

Co-authored-by: Ning Sun <classicning@gmail.com>
2023-05-10 14:36:07 +08:00
732 changed files with 44196 additions and 16820 deletions

View File

@@ -20,6 +20,3 @@ out/
# Rust
target/
# Git
.git

View File

@@ -9,3 +9,9 @@ GT_OSS_BUCKET=OSS bucket
GT_OSS_ACCESS_KEY_ID=OSS access key id
GT_OSS_ACCESS_KEY=OSS access key
GT_OSS_ENDPOINT=OSS endpoint
# Settings for azblob test
GT_AZBLOB_CONTAINER=AZBLOB container
GT_AZBLOB_ACCOUNT_NAME=AZBLOB account name
GT_AZBLOB_ACCOUNT_KEY=AZBLOB account key
GT_AZBLOB_ENDPOINT=AZBLOB endpoint

View File

@@ -141,6 +141,7 @@ jobs:
- name: Run sqlness
run: cargo sqlness && ls /tmp
- name: Upload sqlness logs
if: always()
uses: actions/upload-artifact@v3
with:
name: sqlness-logs

View File

@@ -7,25 +7,35 @@ on:
- cron: '0 0 * * 1'
# Mannually trigger only builds binaries.
workflow_dispatch:
inputs:
dry_run:
description: 'Skip docker push and release steps'
type: boolean
default: true
skip_test:
description: 'Do not run tests during build'
type: boolean
default: false
name: Release
env:
RUST_TOOLCHAIN: nightly-2023-05-03
SCHEDULED_BUILD_VERSION_PREFIX: v0.3.0
SCHEDULED_BUILD_VERSION_PREFIX: v0.4.0
SCHEDULED_PERIOD: nightly
CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness.
DISABLE_RUN_TESTS: false
DISABLE_RUN_TESTS: ${{ inputs.skip_test || false }}
jobs:
build-macos:
name: Build macOS binary
strategy:
fail-fast: false
matrix:
# The file format is greptime-<os>-<arch>
include:
@@ -84,13 +94,14 @@ jobs:
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
targets: ${{ matrix.arch }}
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Output package versions
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
# - name: Run tests
# if: env.DISABLE_RUN_TESTS == 'false'
# run: make unit-test integration-test sqlness-test
- name: Run tests
if: env.DISABLE_RUN_TESTS == 'false'
run: make test sqlness-test
- name: Run cargo build
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
@@ -116,9 +127,25 @@ jobs:
name: ${{ matrix.file }}.sha256sum
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
- name: Configure tag
shell: bash
if: github.event_name == 'push'
run: |
VERSION=${{ github.ref_name }}
echo "TAG=${VERSION:1}" >> $GITHUB_ENV
- name: Upload to S3
run: |
aws s3 sync target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }} s3://${{ secrets.GREPTIMEDB_RELEASE_BUCKET_NAME }}/releases/${TAG}
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: ${{ secrets.AWS_CN_REGION }}
build-linux:
name: Build linux binary
strategy:
fail-fast: false
matrix:
# The file format is greptime-<os>-<arch>
include:
@@ -200,13 +227,14 @@ jobs:
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
targets: ${{ matrix.arch }}
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Output package versions
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
- name: Run tests
if: env.DISABLE_RUN_TESTS == 'false'
run: make unit-test integration-test sqlness-test
run: make test sqlness-test
- name: Run cargo build
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
@@ -275,11 +303,26 @@ jobs:
name: ${{ matrix.file }}.sha256sum
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
- name: Configure tag
shell: bash
if: github.event_name == 'push'
run: |
VERSION=${{ github.ref_name }}
echo "TAG=${VERSION:1}" >> $GITHUB_ENV
- name: Upload to S3
run: |
aws s3 sync target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }} s3://${{ secrets.GREPTIMEDB_RELEASE_BUCKET_NAME }}/releases/${TAG}
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: ${{ secrets.AWS_CN_REGION }}
docker:
name: Build docker image
needs: [build-linux, build-macos]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
steps:
- name: Checkout sources
uses: actions/checkout@v3
@@ -292,7 +335,7 @@ jobs:
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
if: github.event_name == 'schedule'
if: github.event_name != 'push'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
@@ -300,7 +343,7 @@ jobs:
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash
if: github.event_name != 'schedule'
if: github.event_name == 'push'
run: |
VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
@@ -365,7 +408,7 @@ jobs:
# Release artifacts only when all the artifacts are built successfully.
needs: [build-linux, build-macos, docker]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
steps:
- name: Checkout sources
uses: actions/checkout@v3
@@ -375,7 +418,7 @@ jobs:
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-${SCHEDULED_PERIOD}-YYYYMMDD, like v0.2.0-nigthly-20230313.
shell: bash
if: github.event_name == 'schedule'
if: github.event_name != 'push'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-${{ env.SCHEDULED_PERIOD }}-$buildTime
@@ -393,13 +436,13 @@ jobs:
fi
- name: Create scheduled build git tag
if: github.event_name == 'schedule'
if: github.event_name != 'push'
run: |
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
- name: Publish scheduled release # configure the different release title and tags.
uses: ncipollo/release-action@v1
if: github.event_name == 'schedule'
if: github.event_name != 'push'
with:
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
prerelease: ${{ env.prerelease }}
@@ -411,7 +454,7 @@ jobs:
- name: Publish release
uses: ncipollo/release-action@v1
if: github.event_name != 'schedule'
if: github.event_name == 'push'
with:
name: "${{ github.ref_name }}"
prerelease: ${{ env.prerelease }}
@@ -424,7 +467,7 @@ jobs:
name: Push docker image to alibaba cloud container registry
needs: [docker]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
continue-on-error: true
steps:
- name: Checkout sources
@@ -445,7 +488,7 @@ jobs:
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
if: github.event_name == 'schedule'
if: github.event_name != 'push'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
@@ -453,7 +496,7 @@ jobs:
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash
if: github.event_name != 'schedule'
if: github.event_name == 'push'
run: |
VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV

7
.gitignore vendored
View File

@@ -1,6 +1,8 @@
# Generated by Cargo
# will have compiled files and executables
/target/
# also ignore if it's a symbolic link
/target
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
@@ -39,3 +41,8 @@ benchmarks/data
# dashboard files
!/src/servers/dashboard/VERSION
/src/servers/dashboard/*
# Vscode workspace
*.code-workspace
venv/

2488
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -14,8 +14,10 @@ members = [
"src/common/grpc",
"src/common/grpc-expr",
"src/common/mem-prof",
"src/common/meta",
"src/common/procedure",
"src/common/procedure-test",
"src/common/pprof",
"src/common/query",
"src/common/recordbatch",
"src/common/runtime",
@@ -31,6 +33,7 @@ members = [
"src/meta-client",
"src/meta-srv",
"src/mito",
"src/mito2",
"src/object-store",
"src/partition",
"src/promql",
@@ -48,47 +51,49 @@ members = [
]
[workspace.package]
version = "0.2.0"
version = "0.3.2"
edition = "2021"
license = "Apache-2.0"
[workspace.dependencies]
arrow = { version = "37.0" }
arrow-array = "37.0"
arrow-flight = "37.0"
arrow-schema = { version = "37.0", features = ["serde"] }
arrow = { version = "40.0" }
arrow-array = "40.0"
arrow-flight = "40.0"
arrow-schema = { version = "40.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
chrono = { version = "0.4", features = ["serde"] }
# TODO(ruihang): use arrow-datafusion when it contains https://github.com/apache/arrow-datafusion/pull/6032
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
futures = "0.3"
futures-util = "0.3"
parquet = "37.0"
greptime-proto = { git = "https://github.com/WenyXu/greptime-proto.git", rev = "1eda4691a5d2c8ffc463d48ca2317905ba7e4b2d" }
itertools = "0.10"
parquet = "40.0"
paste = "1.0"
prost = "0.11"
rand = "0.8"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
sqlparser = "0.33"
sqlparser = "0.34"
tempfile = "3"
tokio = { version = "1.24.2", features = ["full"] }
tokio = { version = "1.28", features = ["full"] }
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
tonic = { version = "0.9", features = ["tls"] }
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
metrics = "0.20"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "f0798c4c648d89f51abe63e870919c75dd463199" }
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
[workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git"
rev = "f0798c4c648d89f51abe63e870919c75dd463199"
rev = "abbd357c1e193cd270ea65ee7652334a150b628f"
[profile.release]
debug = true

7
Cross.toml Normal file
View File

@@ -0,0 +1,7 @@
[build]
pre-build = [
"dpkg --add-architecture $CROSS_DEB_ARCH",
"apt update && apt install -y unzip zlib1g-dev zlib1g-dev:$CROSS_DEB_ARCH",
"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/",
"chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google",
]

View File

@@ -33,13 +33,12 @@ docker-image: ## Build docker image.
##@ Test
.PHONY: unit-test
unit-test: ## Run unit test.
cargo test --workspace
test: nextest ## Run unit and integration tests.
cargo nextest run --retries 3
.PHONY: integration-test
integration-test: ## Run integation test.
cargo test integration
.PHONY: nextest ## Install nextest tools.
nextest:
cargo --list | grep nextest || cargo install cargo-nextest --locked
.PHONY: sqlness-test
sqlness-test: ## Run sqlness test.

View File

@@ -100,64 +100,22 @@ Or if you built from docker:
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
```
For more startup options, greptimedb's **distributed mode** and information
about Kubernetes deployment, check our [docs](https://docs.greptime.com/).
Please see [the online document site](https://docs.greptime.com/getting-started/overview#install-greptimedb) for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
### Connect
### Get started
1. Connect to GreptimeDB via standard [MySQL
client](https://dev.mysql.com/downloads/mysql/):
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview#connect) on our [official document site](https://docs.greptime.com/).
```
# The standalone instance listen on port 4002 by default.
mysql -h 127.0.0.1 -P 4002
```
2. Create table:
```SQL
CREATE TABLE monitor (
host STRING,
ts TIMESTAMP,
cpu DOUBLE DEFAULT 0,
memory DOUBLE,
TIME INDEX (ts),
PRIMARY KEY(host)) ENGINE=mito WITH(regions=1);
```
3. Insert some data:
```SQL
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host1', 66.6, 1024, 1660897955000);
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host2', 77.7, 2048, 1660897956000);
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host3', 88.8, 4096, 1660897957000);
```
4. Query the data:
```SQL
SELECT * FROM monitor;
```
```TEXT
+-------+--------------------------+------+--------+
| host | ts | cpu | memory |
+-------+--------------------------+------+--------+
| host1 | 2022-08-19 16:32:35+0800 | 66.6 | 1024 |
| host2 | 2022-08-19 16:32:36+0800 | 77.7 | 2048 |
| host3 | 2022-08-19 16:32:37+0800 | 88.8 | 4096 |
+-------+--------------------------+------+--------+
3 rows in set (0.03 sec)
```
You can always cleanup test database by removing `/tmp/greptimedb`.
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/client/overview).
## Resources
### Installation
- [Pre-built Binaries](https://github.com/GreptimeTeam/greptimedb/releases):
For Linux and macOS, you can easily download pre-built binaries that are ready to use. In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version. We recommend using virtualenv for the installation process to manage multiple Python versions.
- [Pre-built Binaries](https://greptime.com/download):
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
We recommend using virtualenv for the installation process to manage multiple Python versions.
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
@@ -165,7 +123,7 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
### Documentation
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts.html)
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts/overview)
- GreptimeDB [Developer
Guide](https://docs.greptime.com/developer-guide/overview.html)
- GreptimeDB [internal code document](https://greptimedb.rs)

View File

@@ -9,6 +9,6 @@ arrow.workspace = true
clap = { version = "4.0", features = ["derive"] }
client = { path = "../src/client" }
indicatif = "0.17.1"
itertools = "0.10.5"
itertools.workspace = true
parquet.workspace = true
tokio.workspace = true

View File

@@ -26,7 +26,9 @@ use arrow::datatypes::{DataType, Float64Type, Int64Type};
use arrow::record_batch::RecordBatch;
use clap::Parser;
use client::api::v1::column::Values;
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest};
use client::api::v1::{
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests,
};
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
@@ -107,8 +109,12 @@ async fn write_data(
columns,
row_count,
};
let requests = InsertRequests {
inserts: vec![request],
};
let now = Instant::now();
db.insert(request).await.unwrap();
let _ = db.insert(requests).await.unwrap();
let elapsed = now.elapsed();
total_rpc_elapsed_ms += elapsed.as_millis();
progress_bar.inc(row_count as _);
@@ -364,26 +370,23 @@ fn create_table_expr() -> CreateTableExpr {
primary_keys: vec!["VendorID".to_string()],
create_if_not_exists: false,
table_options: Default::default(),
region_ids: vec![0],
region_numbers: vec![0],
table_id: None,
engine: "mito".to_string(),
}
}
fn query_set() -> HashMap<String, String> {
let mut ret = HashMap::new();
ret.insert(
"count_all".to_string(),
format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
);
ret.insert(
"fare_amt_by_passenger".to_string(),
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count")
);
ret
HashMap::from([
(
"count_all".to_string(),
format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
),
(
"fare_amt_by_passenger".to_string(),
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count"),
)
])
}
async fn do_write(args: &Args, db: &Database) {
@@ -408,7 +411,8 @@ async fn do_write(args: &Args, db: &Database) {
let db = db.clone();
let mpb = multi_progress_bar.clone();
let pb_style = progress_bar_style.clone();
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
let _ = write_jobs
.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
}
}
while write_jobs.join_next().await.is_some() {
@@ -417,7 +421,8 @@ async fn do_write(args: &Args, db: &Database) {
let db = db.clone();
let mpb = multi_progress_bar.clone();
let pb_style = progress_bar_style.clone();
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
let _ = write_jobs
.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
}
}
}

View File

@@ -10,6 +10,8 @@ rpc_addr = "127.0.0.1:3001"
rpc_hostname = "127.0.0.1"
# The number of gRPC server worker threads, 8 by default.
rpc_runtime_size = 8
# Interval for sending heartbeat messages to the Metasrv in milliseconds, 5000 by default.
heartbeat_interval_millis = 5000
# Metasrv client options.
[meta_client_options]
@@ -24,9 +26,10 @@ tcp_nodelay = true
# WAL options, see `standalone.example.toml`.
[wal]
dir = "/tmp/greptimedb/wal"
file_size = "1GB"
purge_threshold = "50GB"
# WAL data directory
# dir = "/tmp/greptimedb/wal"
file_size = "256MB"
purge_threshold = "4GB"
purge_interval = "10m"
read_batch_size = 128
sync_write = false
@@ -34,7 +37,9 @@ sync_write = false
# Storage options, see `standalone.example.toml`.
[storage]
type = "File"
data_dir = "/tmp/greptimedb/data/"
data_home = "/tmp/greptimedb/"
# TTL for all tables. Disabled by default.
# global_ttl = "7d"
# Compaction options, see `standalone.example.toml`.
[storage.compaction]
@@ -48,16 +53,31 @@ max_purge_tasks = 32
# Create a checkpoint every <checkpoint_margin> actions.
checkpoint_margin = 10
# Region manifest logs and checkpoints gc execution duration
gc_duration = '30s'
gc_duration = '10m'
# Whether to try creating a manifest checkpoint on region opening
checkpoint_on_startup = false
# Storage flush options
[storage.flush]
# Max inflight flush tasks.
max_flush_tasks = 8
# Default write buffer size for a region.
region_write_buffer_size = "32MB"
# Interval to check whether a region needs flush.
picker_schedule_interval = "5m"
# Interval to auto flush a region if it has not flushed yet.
auto_flush_interval = "1h"
# Global write buffer size for all regions.
global_write_buffer_size = "1GB"
# Procedure storage options, see `standalone.example.toml`.
[procedure]
max_retry_times = 3
retry_delay = "500ms"
# Log options, see `standalone.example.toml`
[logging]
dir = "/tmp/greptimedb/logs"
level = "info"
# Log options
# [logging]
# Specify logs directory.
# dir = "/tmp/greptimedb/logs"
# Specify the log level [info | debug | error | warn]
# level = "info"

View File

@@ -1,10 +1,15 @@
# Node running mode, see `standalone.example.toml`.
mode = "distributed"
# Interval for sending heartbeat task to the Metasrv in milliseconds, 5000 by default.
heartbeat_interval_millis = 5000
# Interval for retry sending heartbeat task in milliseconds, 5000 by default.
retry_interval_millis = 5000
# HTTP server options, see `standalone.example.toml`.
[http_options]
addr = "127.0.0.1:4000"
timeout = "30s"
body_limit = "64MB"
# gRPC server options, see `standalone.example.toml`.
[grpc_options]
@@ -58,6 +63,6 @@ connect_timeout_millis = 5000
tcp_nodelay = true
# Log options, see `standalone.example.toml`
[logging]
dir = "/tmp/greptimedb/logs"
level = "info"
# [logging]
# dir = "/tmp/greptimedb/logs"
# level = "info"

View File

@@ -15,6 +15,6 @@ selector = "LeaseBased"
use_memory_store = false
# Log options, see `standalone.example.toml`
[logging]
dir = "/tmp/greptimedb/logs"
level = "info"
# [logging]
# dir = "/tmp/greptimedb/logs"
# level = "info"

View File

@@ -9,6 +9,9 @@ enable_memory_catalog = false
addr = "127.0.0.1:4000"
# HTTP request timeout, 30s by default.
timeout = "30s"
# HTTP request body limit, 64Mb by default.
# the following units are supported: B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB
body_limit = "64MB"
# gRPC server options.
[grpc_options]
@@ -78,12 +81,12 @@ addr = "127.0.0.1:4004"
# WAL options.
[wal]
# WAL data directory.
dir = "/tmp/greptimedb/wal"
# WAL data directory
# dir = "/tmp/greptimedb/wal"
# WAL file size in bytes.
file_size = "1GB"
# WAL purge threshold in bytes.
purge_threshold = "50GB"
file_size = "256MB"
# WAL purge threshold.
purge_threshold = "4GB"
# WAL purge interval in seconds.
purge_interval = "10m"
# WAL read batch size.
@@ -96,7 +99,9 @@ sync_write = false
# Storage type.
type = "File"
# Data directory, "/tmp/greptimedb/data" by default.
data_dir = "/tmp/greptimedb/data/"
data_home = "/tmp/greptimedb/"
# TTL for all tables. Disabled by default.
# global_ttl = "7d"
# Compaction options.
[storage.compaction]
@@ -113,10 +118,23 @@ max_purge_tasks = 32
# Create a checkpoint every <checkpoint_margin> actions.
checkpoint_margin = 10
# Region manifest logs and checkpoints gc execution duration
gc_duration = '30s'
gc_duration = '10m'
# Whether to try creating a manifest checkpoint on region opening
checkpoint_on_startup = false
# Storage flush options
[storage.flush]
# Max inflight flush tasks.
max_flush_tasks = 8
# Default write buffer size for a region.
region_write_buffer_size = "32MB"
# Interval to check whether a region needs flush.
picker_schedule_interval = "5m"
# Interval to auto flush a region if it has not flushed yet.
auto_flush_interval = "1h"
# Global write buffer size for all regions.
global_write_buffer_size = "1GB"
# Procedure storage options.
[procedure]
# Procedure max retry time.
@@ -125,8 +143,8 @@ max_retry_times = 3
retry_delay = "500ms"
# Log options
[logging]
# [logging]
# Specify logs directory.
dir = "/tmp/greptimedb/logs"
# dir = "/tmp/greptimedb/logs"
# Specify the log level [info | debug | error | warn]
level = "debug"
# level = "info"

View File

@@ -8,11 +8,14 @@ RUN apt-get update && apt-get install -y \
libssl-dev \
protobuf-compiler \
curl \
git \
build-essential \
pkg-config \
python3 \
python3-dev \
&& pip install pyarrow
python3-pip \
&& pip3 install --upgrade pip \
&& pip3 install pyarrow
# Install Rust.
SHELL ["/bin/bash", "-c"]

View File

@@ -0,0 +1,29 @@
FROM centos:7
ENV LANG en_US.utf8
WORKDIR /greptimedb
RUN sed -e 's|^mirrorlist=|#mirrorlist=|g' \
-e 's|^#baseurl=http://mirror.centos.org/centos|baseurl=http://mirrors.tuna.tsinghua.edu.cn/centos|g' \
-i.bak \
/etc/yum.repos.d/CentOS-*.repo
# Install dependencies
RUN RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
RUN yum install -y epel-release \
openssl \
openssl-devel \
centos-release-scl \
rh-python38 \
rh-python38-python-devel
# Install protoc
RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
# Install Rust
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
CMD ["cargo", "build", "--release"]

View File

@@ -8,6 +8,7 @@ RUN apt-get update && apt-get install -y \
libssl-dev \
protobuf-compiler \
curl \
git \
build-essential \
pkg-config \
wget

View File

@@ -0,0 +1,137 @@
---
Feature Name: distributed-planner
Tracking Issue: TBD
Date: 2023-05-09
Author: "Ruihang Xia <waynestxia@gmail.com>"
---
Distributed Planner
-------------------
# Summary
Enhance the logical planner with aware of distributed, multi-region table topology. To achieve "push computation down" execution rather than the current "pull data up" manner.
# Motivation
Query distributively can leverage the advantage of GreptimeDB's architecture to process large dataset that exceeds the capacity of a single node, or accelerate the query execution by executing it in parallel. This task includes two sub-tasks
- Be able to transform the plan that can push as much as possible computation down to data source.
- Be able to handle pipeline breaker (like `Join` or `Sort`) on multiple computation nodes.
This is a relatively complex topic. To keep this RFC concentrated I'll focus on the first one.
# Details
## Background: Partition and Region
GreptimeDB supports table partitioning, where the partition rule is set during table creation. Each partition can be further divided into one or more physical storage units known as "regions". Both partitions and regions are divided based on rows:
``` text
┌────────────────────────────────────┐
│ │
│ Table │
│ │
└─────┬────────────┬────────────┬────┘
│ │ │
│ │ │
┌─────▼────┐ ┌─────▼────┐ ┌─────▼────┐
│ Region 1 │ │ Region 2 │ │ Region 3 │
└──────────┘ └──────────┘ └──────────┘
Row 1~10 Row 11~20 Row 21~30
```
General speaking, region is the minimum element of data distribution, and we can also use it as the unit to distribute computation. This can greatly simplify the routing logic of this distributed planner, by always schedule the computation to the node that currently opening the corresponding region. And is also easy to scale more node for computing since GreptimeDB's data is persisted on shared storage backend like S3. But this is a bit beyond the scope of this specific topic.
## Background: Commutativity
Commutativity is an attribute that describes whether two operation can exchange their apply order: $P1(P2(R)) \Leftrightarrow P2(P1(R))$. If the equation keeps, we can transform one expression into another form without changing its result. This is useful on rewriting SQL expression, and is the theoretical basis of this RFC.
Take this SQL as an example
``` sql
SELECT a FROM t WHERE a > 10;
```
As we know projection and filter are commutative (todo: latex), it can be translated to the following two identical plan trees:
```text
┌─────────────┐ ┌─────────────┐
│Projection(a)│ │Filter(a>10) │
└──────▲──────┘ └──────▲──────┘
│ │
┌──────┴──────┐ ┌──────┴──────┐
│Filter(a>10) │ │Projection(a)│
└──────▲──────┘ └──────▲──────┘
│ │
┌──────┴──────┐ ┌──────┴──────┐
│ TableScan │ │ TableScan │
└─────────────┘ └─────────────┘
```
## Merge Operation
This RFC proposes to add a new expression node `MergeScan` to merge result from several regions in the frontend. It wrap the abstraction of remote data and execution, and expose a `TableScan` interface to upper level.
``` text
┌───────┼───────┐
│ │ │
│ ┌──┴──┐ │
│ └──▲──┘ │
│ │ │
│ ┌──┴──┐ │
│ └──▲──┘ │ ┌─────────────────────────────┐
│ │ │ │ │
│ ┌────┴────┐ │ │ ┌──────────┐ ┌───┐ ┌───┐ │
│ │MergeScan◄──┼────┤ │ Region 1 │ │ │ .. │ │ │
│ └─────────┘ │ │ └──────────┘ └───┘ └───┘ │
│ │ │ │
└─Frontend──────┘ └─Remote-Sources──────────────┘
```
This merge operation simply chains all the the underlying remote data sources and return `RecordBatch`, just like a coalesce op. And each remote sources is a gRPC query to datanode via the substrait logical plan interface. The plan is transformed and divided from the original query that comes to frontend.
## Commutativity of MergeScan
Obviously, The position of `MergeScan` is the key of the distributed plan. The more closer to the underlying `TableScan`, the less computation is taken by datanodes. Thus the goal is to pull the `MergeScan` up as more as possible. The word "pull up" means exchange `MergeScan` with its parent node in the plan tree, which means we should check the commutativity between the existing expression nodes and the `MergeScan`. Here I classify all the possibility into five categories:
- Commutative: $P1(P2(R)) \Leftrightarrow P2(P1(R))$
- filter
- projection
- operations that match the partition key
- Partial Commutative: $P1(P2(R)) \Leftrightarrow P1(P2(P1(R)))$
- $min(R) \rightarrow min(MERGE(min(R)))$
- $max(R) \rightarrow max(MERGE(max(R)))$
- Conditional Commutative: $P1(P2(R)) \Leftrightarrow P3(P2(P1(R)))$
- $count(R) \rightarrow sum(count(R))$
- Transformed Commutative: $P1(P2(R)) \Leftrightarrow P1(P3(R)) \Leftrightarrow P3(P1(R))$
- $avg(R) \rightarrow sum(R)/count(R)$
- Non-commutative
- sort
- join
- percentile
## Steps to plan
After establishing the set of commutative relations for all expressions, we can begin transforming the logical plan. There are four steps:
- Add a merge node before table scan
- Evaluate commutativity in a bottom-up way, stop at the first non-commutative node
- Divide the TableScan to scan over partitions
- Execute
First insert the `MergeScan` on top of the bottom `TableScan` node. Then examine the commutativity start from the `MergeScan` node transform the plan tree based on the result. Stop this process on the first non-commutative node.
``` text
┌─────────────┐ ┌─────────────┐
│ Sort │ │ Sort │
└──────▲──────┘ └──────▲──────┘
│ │
┌─────────────┐ ┌──────┴──────┐ ┌──────┴──────┐
│ Sort │ │Projection(a)│ │ MergeScan │
└──────▲──────┘ └──────▲──────┘ └──────▲──────┘
│ │ │
┌──────┴──────┐ ┌──────┴──────┐ ┌──────┴──────┐
│Projection(a)│ │ MergeScan │ │Projection(a)│
└──────▲──────┘ └──────▲──────┘ └──────▲──────┘
│ │ │
┌──────┴──────┐ ┌──────┴──────┐ ┌──────┴──────┐
│ TableScan │ │ TableScan │ │ TableScan │
└─────────────┘ └─────────────┘ └─────────────┘
(a) (b) (c)
```
Then in the physical planning phase, convert the sub-tree below `MergeScan` into a remote query request and dispatch to all the regions. And let the `MergeScan` to receive the results and feed to it parent node.
To simplify the overall complexity, any error in the procedure will lead to a failure to the entire query, and cancel all other parts.
# Alternatives
## Spill
If only consider the ability of processing large dataset, we can enable DataFusion's spill ability to temporary persist intermediate data into disk, like the "swap" memory. But this will lead to a super slow performance and very large write amplification.
# Future Work
As described in the `Motivation` section we can further explore the distributed planner on the physical execution level, by introducing mechanism like Spark's shuffle to improve parallelism and reduce intermediate pipeline breaker's stage.

View File

@@ -10,7 +10,7 @@ common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "e8abf8241c908448dce595399e89c89a40d048bd" }
greptime-proto.workspace = true
prost.workspace = true
snafu = { version = "0.7", features = ["backtraces"] }
tonic.workspace = true

View File

@@ -41,7 +41,7 @@ pub enum Error {
))]
ConvertColumnDefaultConstraint {
column: String,
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
@@ -52,7 +52,7 @@ pub enum Error {
))]
InvalidColumnDefaultConstraint {
column: String,
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
}

View File

@@ -18,6 +18,10 @@ use datatypes::prelude::ConcreteDataType;
use datatypes::types::TimestampType;
use datatypes::value::Value;
use datatypes::vectors::VectorRef;
use greptime_proto::v1::ddl_request::Expr;
use greptime_proto::v1::greptime_request::Request;
use greptime_proto::v1::query_request::Query;
use greptime_proto::v1::{DdlRequest, QueryRequest};
use snafu::prelude::*;
use crate::error::{self, Result};
@@ -224,6 +228,38 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
column.null_mask = null_mask.into_vec();
}
/// Returns the type name of the [Request].
pub fn request_type(request: &Request) -> &'static str {
match request {
Request::Inserts(_) => "inserts",
Request::Query(query_req) => query_request_type(query_req),
Request::Ddl(ddl_req) => ddl_request_type(ddl_req),
Request::Delete(_) => "delete",
}
}
/// Returns the type name of the [QueryRequest].
fn query_request_type(request: &QueryRequest) -> &'static str {
match request.query {
Some(Query::Sql(_)) => "query.sql",
Some(Query::LogicalPlan(_)) => "query.logical_plan",
Some(Query::PromRangeQuery(_)) => "query.prom_range",
None => "query.empty",
}
}
/// Returns the type name of the [DdlRequest].
fn ddl_request_type(request: &DdlRequest) -> &'static str {
match request.expr {
Some(Expr::CreateDatabase(_)) => "ddl.create_database",
Some(Expr::CreateTable(_)) => "ddl.create_table",
Some(Expr::Alter(_)) => "ddl.alter",
Some(Expr::DropTable(_)) => "ddl.drop_table",
Some(Expr::FlushTable(_)) => "ddl.flush_table",
None => "ddl.empty",
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;

View File

@@ -23,4 +23,5 @@ pub mod prometheus {
pub mod v1;
pub use greptime_proto;
pub use prost::DecodeError;

View File

@@ -4,6 +4,9 @@ version.workspace = true
edition.workspace = true
license.workspace = true
[features]
testing = []
[dependencies]
api = { path = "../api" }
arc-swap = "1.0"
@@ -14,6 +17,7 @@ backoff = { version = "0.4", features = ["tokio"] }
common-catalog = { path = "../common/catalog" }
common-error = { path = "../common/error" }
common-grpc = { path = "../common/grpc" }
common-meta = { path = "../common/meta" }
common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
common-runtime = { path = "../common/runtime" }
@@ -28,6 +32,7 @@ key-lock = "0.1"
lazy_static = "1.4"
meta-client = { path = "../meta-client" }
metrics.workspace = true
moka = { version = "0.11", features = ["future"] }
parking_lot = "0.12"
regex = "1.6"
serde = "1.0"
@@ -35,10 +40,12 @@ serde_json = "1.0"
session = { path = "../session" }
snafu = { version = "0.7", features = ["backtraces"] }
storage = { path = "../storage" }
store-api = { path = "../store-api" }
table = { path = "../table" }
tokio.workspace = true
[dev-dependencies]
catalog = { path = ".", features = ["testing"] }
common-test-util = { path = "../common/test-util" }
chrono.workspace = true
log-store = { path = "../log-store" }

View File

@@ -32,18 +32,18 @@ pub enum Error {
source
))]
CompileScriptInternal {
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to open system catalog table, source: {}", source))]
OpenSystemCatalog {
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
#[snafu(display("Failed to create system catalog table, source: {}", source))]
CreateSystemCatalog {
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
@@ -54,7 +54,7 @@ pub enum Error {
))]
CreateTable {
table_info: String,
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
@@ -94,7 +94,7 @@ pub enum Error {
#[snafu(display("Table engine not found: {}, source: {}", engine_name, source))]
TableEngineNotFound {
engine_name: String,
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
@@ -132,7 +132,7 @@ pub enum Error {
#[snafu(display("Failed to open table, table info: {}, source: {}", table_info, source))]
OpenTable {
table_info: String,
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
@@ -147,13 +147,13 @@ pub enum Error {
#[snafu(display("Failed to read system catalog table records"))]
ReadSystemCatalog {
#[snafu(backtrace)]
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to create recordbatch, source: {}", source))]
CreateRecordBatch {
#[snafu(backtrace)]
location: Location,
source: common_recordbatch::error::Error,
},
@@ -162,7 +162,7 @@ pub enum Error {
source
))]
InsertCatalogRecord {
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
@@ -173,7 +173,7 @@ pub enum Error {
))]
DeregisterTable {
request: DeregisterTableRequest,
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
@@ -182,69 +182,49 @@ pub enum Error {
#[snafu(display("Failed to scan system catalog table, source: {}", source))]
SystemCatalogTableScan {
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
#[snafu(display("Failure during SchemaProvider operation, source: {}", source))]
SchemaProviderOperation {
#[snafu(backtrace)]
source: BoxedError,
},
#[snafu(display("{source}"))]
Internal {
#[snafu(backtrace)]
location: Location,
source: BoxedError,
},
#[snafu(display(
"Failed to upgrade weak catalog manager reference. location: {}",
location
))]
UpgradeWeakCatalogManagerRef { location: Location },
#[snafu(display("Failed to execute system catalog table scan, source: {}", source))]
SystemCatalogTableScanExec {
#[snafu(backtrace)]
location: Location,
source: common_query::error::Error,
},
#[snafu(display("Cannot parse catalog value, source: {}", source))]
InvalidCatalogValue {
#[snafu(backtrace)]
location: Location,
source: common_catalog::error::Error,
},
#[snafu(display("Failed to perform metasrv operation, source: {}", source))]
MetaSrv {
#[snafu(backtrace)]
location: Location,
source: meta_client::error::Error,
},
#[snafu(display("Invalid table info in catalog, source: {}", source))]
InvalidTableInfoInCatalog {
#[snafu(backtrace)]
location: Location,
source: datatypes::error::Error,
},
#[snafu(display("Failed to serialize or deserialize catalog entry: {}", source))]
CatalogEntrySerde {
#[snafu(backtrace)]
source: common_catalog::error::Error,
},
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
QueryAccessDenied { catalog: String, schema: String },
#[snafu(display(
"Failed to get region stats, catalog: {}, schema: {}, table: {}, source: {}",
catalog,
schema,
table,
source
))]
RegionStats {
catalog: String,
schema: String,
table: String,
#[snafu(backtrace)]
source: table::error::Error,
},
#[snafu(display("Invalid system table definition: {err_msg}"))]
InvalidSystemTableDef { err_msg: String, location: Location },
@@ -257,9 +237,18 @@ pub enum Error {
#[snafu(display("Table schema mismatch, source: {}", source))]
TableSchemaMismatch {
#[snafu(backtrace)]
location: Location,
source: table::error::Error,
},
#[snafu(display("A generic error has occurred, msg: {}", msg))]
Generic { msg: String, location: Location },
#[snafu(display("Table metadata manager error: {}", source))]
TableMetadataManager {
source: common_meta::error::Error,
location: Location,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -280,14 +269,14 @@ impl ErrorExt for Error {
| Error::EmptyValue { .. }
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
Error::SystemCatalogTypeMismatch { .. } => StatusCode::Internal,
Error::Generic { .. }
| Error::SystemCatalogTypeMismatch { .. }
| Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source } => {
source.status_code()
}
Error::InvalidCatalogValue { source, .. } | Error::CatalogEntrySerde { source } => {
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source, .. } => {
source.status_code()
}
Error::InvalidCatalogValue { source, .. } => source.status_code(),
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
Error::TableNotExist { .. } => StatusCode::TableNotFound,
@@ -301,21 +290,21 @@ impl ErrorExt for Error {
| Error::OpenTable { source, .. }
| Error::CreateTable { source, .. }
| Error::DeregisterTable { source, .. }
| Error::RegionStats { source, .. }
| Error::TableSchemaMismatch { source } => source.status_code(),
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
Error::MetaSrv { source, .. } => source.status_code(),
Error::SystemCatalogTableScan { source } => source.status_code(),
Error::SystemCatalogTableScanExec { source } => source.status_code(),
Error::InvalidTableInfoInCatalog { source } => source.status_code(),
Error::SystemCatalogTableScan { source, .. } => source.status_code(),
Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
Error::CompileScriptInternal { source }
| Error::SchemaProviderOperation { source }
| Error::Internal { source } => source.status_code(),
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
source.status_code()
}
Error::Unimplemented { .. } | Error::NotSupported { .. } => StatusCode::Unsupported,
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
Error::TableMetadataManager { source, .. } => source.status_code(),
}
}

View File

@@ -67,6 +67,7 @@ pub fn build_schema_prefix(catalog_name: impl AsRef<str>) -> String {
format!("{SCHEMA_KEY_PREFIX}-{}-", catalog_name.as_ref())
}
/// Global table info has only one key across all datanodes so it does not have `node_id` field.
pub fn build_table_global_prefix(
catalog_name: impl AsRef<str>,
schema_name: impl AsRef<str>,
@@ -78,6 +79,7 @@ pub fn build_table_global_prefix(
)
}
/// Regional table info varies between datanode, so it contains a `node_id` field.
pub fn build_table_regional_prefix(
catalog_name: impl AsRef<str>,
schema_name: impl AsRef<str>,
@@ -91,7 +93,7 @@ pub fn build_table_regional_prefix(
}
/// Table global info has only one key across all datanodes so it does not have `node_id` field.
#[derive(Clone)]
#[derive(Clone, Hash, Eq, PartialEq)]
pub struct TableGlobalKey {
pub catalog_name: String,
pub schema_name: String,
@@ -124,6 +126,14 @@ impl TableGlobalKey {
table_name: captures[3].to_string(),
})
}
pub fn to_raw_key(&self) -> Vec<u8> {
self.to_string().into_bytes()
}
pub fn try_from_raw_key(key: &[u8]) -> Result<Self, Error> {
Self::parse(String::from_utf8_lossy(key))
}
}
/// Table global info contains necessary info for a datanode to create table regions, including
@@ -141,6 +151,10 @@ impl TableGlobalValue {
pub fn table_id(&self) -> TableId {
self.table_info.ident.table_id
}
pub fn engine(&self) -> &str {
&self.table_info.meta.engine
}
}
/// Table regional info that varies between datanode, so it contains a `node_id` field.
@@ -189,6 +203,9 @@ impl TableRegionalKey {
/// region ids allocated by metasrv.
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct TableRegionalValue {
// We can remove the `Option` from the table id once all regional values
// stored in meta have table ids.
pub table_id: Option<TableId>,
pub version: TableVersion,
pub regions_ids: Vec<u32>,
pub engine_name: Option<String>,
@@ -375,6 +392,6 @@ mod tests {
#[test]
fn test_table_global_value_compatibility() {
let s = r#"{"node_id":1,"regions_id_map":{"1":[0]},"table_info":{"ident":{"table_id":1098,"version":1},"name":"container_cpu_limit","desc":"Created on insertion","catalog_name":"greptime","schema_name":"dd","meta":{"schema":{"column_schemas":[{"name":"container_id","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"container_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"docker_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"host","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_tag","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"interval","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"runtime","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"short_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"type","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"dd_value","data_type":{"Float64":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"ts","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":false,"is_time_index":true,"default_constraint":null,"metadata":{"greptime:time_index":"true"}},{"name":"git.repository_url","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}}],"timestamp_index":11,"version":1},"primary_key_indices":[0,1,2,3,4,5,6,7,8,9,12],"value_indices":[10,11],"engine":"mito","next_column_id":12,"region_numbers":[],"engine_options":{},"options":{},"created_on":"1970-01-01T00:00:00Z"},"table_type":"Base"}}"#;
TableGlobalValue::parse(s).unwrap();
let _ = TableGlobalValue::parse(s).unwrap();
}
}

View File

@@ -16,87 +16,122 @@ mod columns;
mod tables;
use std::any::Any;
use std::sync::Arc;
use std::sync::{Arc, Weak};
use async_trait::async_trait;
use datafusion::datasource::streaming::{PartitionStream, StreamingTable};
use common_error::prelude::BoxedError;
use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
use datatypes::schema::SchemaRef;
use futures_util::StreamExt;
use snafu::ResultExt;
use table::table::adapter::TableAdapter;
use table::TableRef;
use store_api::storage::ScanRequest;
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
use table::{Result as TableResult, Table, TableRef};
use self::columns::InformationSchemaColumns;
use crate::error::{DatafusionSnafu, Result, TableSchemaMismatchSnafu};
use crate::error::Result;
use crate::information_schema::tables::InformationSchemaTables;
use crate::{CatalogProviderRef, SchemaProvider};
use crate::CatalogManager;
const TABLES: &str = "tables";
const COLUMNS: &str = "columns";
pub(crate) struct InformationSchemaProvider {
pub struct InformationSchemaProvider {
catalog_name: String,
catalog_provider: CatalogProviderRef,
tables: Vec<String>,
catalog_manager: Weak<dyn CatalogManager>,
}
impl InformationSchemaProvider {
pub(crate) fn new(catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
Self {
catalog_name,
catalog_provider,
tables: vec![TABLES.to_string(), COLUMNS.to_string()],
catalog_manager,
}
}
}
#[async_trait]
impl SchemaProvider for InformationSchemaProvider {
fn as_any(&self) -> &dyn Any {
self
}
async fn table_names(&self) -> Result<Vec<String>> {
Ok(self.tables.clone())
}
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
let table = match name.to_ascii_lowercase().as_ref() {
TABLES => {
let inner = Arc::new(InformationSchemaTables::new(
self.catalog_name.clone(),
self.catalog_provider.clone(),
));
Arc::new(
StreamingTable::try_new(inner.schema().clone(), vec![inner]).with_context(
|_| DatafusionSnafu {
msg: format!("Failed to get InformationSchema table '{name}'"),
},
)?,
)
}
COLUMNS => {
let inner = Arc::new(InformationSchemaColumns::new(
self.catalog_name.clone(),
self.catalog_provider.clone(),
));
Arc::new(
StreamingTable::try_new(inner.schema().clone(), vec![inner]).with_context(
|_| DatafusionSnafu {
msg: format!("Failed to get InformationSchema table '{name}'"),
},
)?,
)
}
impl InformationSchemaProvider {
pub fn table(&self, name: &str) -> Result<Option<TableRef>> {
let stream_builder = match name.to_ascii_lowercase().as_ref() {
TABLES => Arc::new(InformationSchemaTables::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
)) as _,
COLUMNS => Arc::new(InformationSchemaColumns::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
)) as _,
_ => {
return Ok(None);
}
};
let table = TableAdapter::new(table).context(TableSchemaMismatchSnafu)?;
Ok(Some(Arc::new(table)))
}
async fn table_exist(&self, name: &str) -> Result<bool> {
let normalized_name = name.to_ascii_lowercase();
Ok(self.tables.contains(&normalized_name))
Ok(Some(Arc::new(InformationTable::new(stream_builder))))
}
}
// TODO(ruihang): make it a more generic trait:
// https://github.com/GreptimeTeam/greptimedb/pull/1639#discussion_r1205001903
pub trait InformationStreamBuilder: Send + Sync {
fn to_stream(&self) -> Result<SendableRecordBatchStream>;
fn schema(&self) -> SchemaRef;
}
pub struct InformationTable {
stream_builder: Arc<dyn InformationStreamBuilder>,
}
impl InformationTable {
pub fn new(stream_builder: Arc<dyn InformationStreamBuilder>) -> Self {
Self { stream_builder }
}
}
#[async_trait]
impl Table for InformationTable {
fn as_any(&self) -> &dyn Any {
self
}
fn schema(&self) -> SchemaRef {
self.stream_builder.schema()
}
fn table_info(&self) -> table::metadata::TableInfoRef {
unreachable!("Should not call table_info() of InformationTable directly")
}
async fn scan_to_stream(&self, request: ScanRequest) -> TableResult<SendableRecordBatchStream> {
let projection = request.projection;
let projected_schema = if let Some(projection) = &projection {
Arc::new(
self.schema()
.try_project(projection)
.context(SchemaConversionSnafu)?,
)
} else {
self.schema()
};
let stream = self
.stream_builder
.to_stream()
.map_err(BoxedError::new)
.context(TablesRecordBatchSnafu)?
.map(move |batch| {
batch.and_then(|batch| {
if let Some(projection) = &projection {
batch.try_project(projection)
} else {
Ok(batch)
}
})
});
let stream = RecordBatchStreamAdaptor {
schema: projected_schema,
stream: Box::pin(stream),
output_ordering: None,
};
Ok(Box::pin(stream))
}
}

View File

@@ -12,14 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::{
SEMANTIC_TYPE_FIELD, SEMANTIC_TYPE_PRIMARY_KEY, SEMANTIC_TYPE_TIME_INDEX,
};
use common_error::prelude::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::RecordBatch;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::datasource::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
@@ -27,15 +29,18 @@ use datatypes::prelude::{ConcreteDataType, DataType};
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::{StringVectorBuilder, VectorRef};
use snafu::ResultExt;
use snafu::{OptionExt, ResultExt};
use crate::error::{CreateRecordBatchSnafu, Result};
use crate::CatalogProviderRef;
use super::InformationStreamBuilder;
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
use crate::CatalogManager;
pub(super) struct InformationSchemaColumns {
schema: SchemaRef,
catalog_name: String,
catalog_provider: CatalogProviderRef,
catalog_manager: Weak<dyn CatalogManager>,
}
const TABLE_CATALOG: &str = "table_catalog";
@@ -46,7 +51,7 @@ const DATA_TYPE: &str = "data_type";
const SEMANTIC_TYPE: &str = "semantic_type";
impl InformationSchemaColumns {
pub(super) fn new(catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
let schema = Arc::new(Schema::new(vec![
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
@@ -58,7 +63,7 @@ impl InformationSchemaColumns {
Self {
schema,
catalog_name,
catalog_provider,
catalog_manager,
}
}
@@ -66,15 +71,41 @@ impl InformationSchemaColumns {
InformationSchemaColumnsBuilder::new(
self.schema.clone(),
self.catalog_name.clone(),
self.catalog_provider.clone(),
self.catalog_manager.clone(),
)
}
}
impl InformationStreamBuilder for InformationSchemaColumns {
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
fn to_stream(&self) -> Result<SendableRecordBatchStream> {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_tables()
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
));
Ok(Box::pin(
RecordBatchStreamAdapter::try_new(stream)
.map_err(BoxedError::new)
.context(InternalSnafu)?,
))
}
}
struct InformationSchemaColumnsBuilder {
schema: SchemaRef,
catalog_name: String,
catalog_provider: CatalogProviderRef,
catalog_manager: Weak<dyn CatalogManager>,
catalog_names: StringVectorBuilder,
schema_names: StringVectorBuilder,
@@ -85,11 +116,15 @@ struct InformationSchemaColumnsBuilder {
}
impl InformationSchemaColumnsBuilder {
fn new(schema: SchemaRef, catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
fn new(
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
) -> Self {
Self {
schema,
catalog_name,
catalog_provider,
catalog_manager,
catalog_names: StringVectorBuilder::with_capacity(42),
schema_names: StringVectorBuilder::with_capacity(42),
table_names: StringVectorBuilder::with_capacity(42),
@@ -102,11 +137,23 @@ impl InformationSchemaColumnsBuilder {
/// Construct the `information_schema.tables` virtual table
async fn make_tables(&mut self) -> Result<RecordBatch> {
let catalog_name = self.catalog_name.clone();
let catalog_manager = self
.catalog_manager
.upgrade()
.context(UpgradeWeakCatalogManagerRefSnafu)?;
for schema_name in self.catalog_provider.schema_names().await? {
let Some(schema) = self.catalog_provider.schema(&schema_name).await? else { continue };
for table_name in schema.table_names().await? {
let Some(table) = schema.table(&table_name).await? else { continue };
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
if !catalog_manager
.schema_exist(&catalog_name, &schema_name)
.await?
{
continue;
}
for table_name in catalog_manager
.table_names(&catalog_name, &schema_name)
.await?
{
let Some(table) = catalog_manager.table(&catalog_name, &schema_name, &table_name).await? else { continue };
let keys = &table.table_info().meta.primary_key_indices;
let schema = table.schema();
for (idx, column) in schema.column_schemas().iter().enumerate() {
@@ -168,7 +215,7 @@ impl DfPartitionStream for InformationSchemaColumns {
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema = self.schema().clone();
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,

View File

@@ -12,32 +12,37 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_NAME;
use common_error::prelude::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::RecordBatch;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::datasource::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder};
use snafu::ResultExt;
use snafu::{OptionExt, ResultExt};
use table::metadata::TableType;
use crate::error::{CreateRecordBatchSnafu, Result};
use crate::CatalogProviderRef;
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
use crate::information_schema::InformationStreamBuilder;
use crate::CatalogManager;
pub(super) struct InformationSchemaTables {
schema: SchemaRef,
catalog_name: String,
catalog_provider: CatalogProviderRef,
catalog_manager: Weak<dyn CatalogManager>,
}
impl InformationSchemaTables {
pub(super) fn new(catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
let schema = Arc::new(Schema::new(vec![
ColumnSchema::new("table_catalog", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("table_schema", ConcreteDataType::string_datatype(), false),
@@ -49,7 +54,7 @@ impl InformationSchemaTables {
Self {
schema,
catalog_name,
catalog_provider,
catalog_manager,
}
}
@@ -57,18 +62,44 @@ impl InformationSchemaTables {
InformationSchemaTablesBuilder::new(
self.schema.clone(),
self.catalog_name.clone(),
self.catalog_provider.clone(),
self.catalog_manager.clone(),
)
}
}
impl InformationStreamBuilder for InformationSchemaTables {
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
fn to_stream(&self) -> Result<SendableRecordBatchStream> {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_tables()
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
));
Ok(Box::pin(
RecordBatchStreamAdapter::try_new(stream)
.map_err(BoxedError::new)
.context(InternalSnafu)?,
))
}
}
/// Builds the `information_schema.TABLE` table row by row
///
/// Columns are based on <https://www.postgresql.org/docs/current/infoschema-columns.html>
struct InformationSchemaTablesBuilder {
schema: SchemaRef,
catalog_name: String,
catalog_provider: CatalogProviderRef,
catalog_manager: Weak<dyn CatalogManager>,
catalog_names: StringVectorBuilder,
schema_names: StringVectorBuilder,
@@ -79,11 +110,15 @@ struct InformationSchemaTablesBuilder {
}
impl InformationSchemaTablesBuilder {
fn new(schema: SchemaRef, catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
fn new(
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
) -> Self {
Self {
schema,
catalog_name,
catalog_provider,
catalog_manager,
catalog_names: StringVectorBuilder::with_capacity(42),
schema_names: StringVectorBuilder::with_capacity(42),
table_names: StringVectorBuilder::with_capacity(42),
@@ -96,15 +131,27 @@ impl InformationSchemaTablesBuilder {
/// Construct the `information_schema.tables` virtual table
async fn make_tables(&mut self) -> Result<RecordBatch> {
let catalog_name = self.catalog_name.clone();
let catalog_manager = self
.catalog_manager
.upgrade()
.context(UpgradeWeakCatalogManagerRefSnafu)?;
for schema_name in self.catalog_provider.schema_names().await? {
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
if schema_name == INFORMATION_SCHEMA_NAME {
continue;
}
if !catalog_manager
.schema_exist(&catalog_name, &schema_name)
.await?
{
continue;
}
let Some(schema) = self.catalog_provider.schema(&schema_name).await? else { continue };
for table_name in schema.table_names().await? {
let Some(table) = schema.table(&table_name).await? else { continue };
for table_name in catalog_manager
.table_names(&catalog_name, &schema_name)
.await?
{
let Some(table) = catalog_manager.table(&catalog_name, &schema_name, &table_name).await? else { continue };
let table_info = table.table_info();
self.add_table(
&catalog_name,
@@ -160,7 +207,7 @@ impl DfPartitionStream for InformationSchemaTables {
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema = self.schema().clone();
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,

View File

@@ -12,9 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(trait_upcasting)]
#![feature(assert_matches)]
#![feature(try_blocks)]
use std::any::Any;
use std::collections::HashMap;
use std::fmt::{Debug, Formatter};
use std::sync::Arc;
@@ -27,65 +30,46 @@ use table::requests::CreateTableRequest;
use table::TableRef;
use crate::error::{CreateTableSnafu, Result};
pub use crate::schema::{SchemaProvider, SchemaProviderRef};
pub mod error;
pub mod helper;
pub(crate) mod information_schema;
pub mod information_schema;
pub mod local;
mod metrics;
pub mod remote;
pub mod schema;
pub mod system;
pub mod table_source;
pub mod tables;
/// Represents a catalog, comprising a number of named schemas.
#[async_trait::async_trait]
pub trait CatalogProvider: Sync + Send {
/// Returns the catalog provider as [`Any`](std::any::Any)
/// so that it can be downcast to a specific implementation.
fn as_any(&self) -> &dyn Any;
/// Retrieves the list of available schema names in this catalog.
async fn schema_names(&self) -> Result<Vec<String>>;
/// Registers schema to this catalog.
async fn register_schema(
&self,
name: String,
schema: SchemaProviderRef,
) -> Result<Option<SchemaProviderRef>>;
/// Retrieves a specific schema from the catalog by name, provided it exists.
async fn schema(&self, name: &str) -> Result<Option<SchemaProviderRef>>;
}
pub type CatalogProviderRef = Arc<dyn CatalogProvider>;
#[async_trait::async_trait]
pub trait CatalogManager: Send + Sync {
fn as_any(&self) -> &dyn Any;
/// Starts a catalog manager.
async fn start(&self) -> Result<()>;
async fn register_catalog(
&self,
name: String,
catalog: CatalogProviderRef,
) -> Result<Option<CatalogProviderRef>>;
/// Registers a table within given catalog/schema to catalog manager,
/// returns whether the table registered.
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool>;
/// Deregisters a table within given catalog/schema to catalog manager,
/// returns whether the table deregistered.
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool>;
/// Registers a catalog to catalog manager, returns whether the catalog exist before.
async fn register_catalog(&self, name: String) -> Result<bool>;
/// Register a schema with catalog name and schema name. Retuens whether the
/// schema registered.
///
/// # Errors
///
/// This method will/should fail if catalog not exist
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool>;
/// Registers a table within given catalog/schema to catalog manager,
/// returns whether the table registered.
///
/// # Errors
///
/// This method will/should fail if catalog or schema not exist
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool>;
/// Deregisters a table within given catalog/schema to catalog manager
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<()>;
/// Rename a table to [RenameTableRequest::new_table_name], returns whether the table is renamed.
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool>;
@@ -95,9 +79,15 @@ pub trait CatalogManager: Send + Sync {
async fn catalog_names(&self) -> Result<Vec<String>>;
async fn catalog(&self, catalog: &str) -> Result<Option<CatalogProviderRef>>;
async fn schema_names(&self, catalog: &str) -> Result<Vec<String>>;
async fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>>;
async fn table_names(&self, catalog: &str, schema: &str) -> Result<Vec<String>>;
async fn catalog_exist(&self, catalog: &str) -> Result<bool>;
async fn schema_exist(&self, catalog: &str, schema: &str) -> Result<bool>;
async fn table_exist(&self, catalog: &str, schema: &str, table: &str) -> Result<bool>;
/// Returns the table by catalog, schema and table name.
async fn table(
@@ -106,8 +96,6 @@ pub trait CatalogManager: Send + Sync {
schema: &str,
table_name: &str,
) -> Result<Option<TableRef>>;
fn as_any(&self) -> &dyn Any;
}
pub type CatalogManagerRef = Arc<dyn CatalogManager>;
@@ -167,14 +155,6 @@ pub struct RegisterSchemaRequest {
pub schema: String,
}
pub trait CatalogProviderFactory {
fn create(&self, catalog_name: String) -> CatalogProviderRef;
}
pub trait SchemaProviderFactory {
fn create(&self, catalog_name: String, schema_name: String) -> SchemaProviderRef;
}
pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
manager: &'a M,
engine: TableEngineRef,
@@ -200,7 +180,7 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
table_name,
),
})?;
manager
let _ = manager
.register_table(RegisterTableRequest {
catalog: catalog_name.clone(),
schema: schema_name.clone(),
@@ -231,19 +211,17 @@ pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> (u64, Vec<Reg
let Ok(catalog_names) = catalog_manager.catalog_names().await else { return (region_number, region_stats) };
for catalog_name in catalog_names {
let Ok(Some(catalog)) = catalog_manager.catalog(&catalog_name).await else { continue };
let Ok(schema_names) = catalog.schema_names().await else { continue };
let Ok(schema_names) = catalog_manager.schema_names(&catalog_name).await else { continue };
for schema_name in schema_names {
let Ok(Some(schema)) = catalog.schema(&schema_name).await else { continue };
let Ok(table_names) = schema.table_names().await else { continue };
let Ok(table_names) = catalog_manager.table_names(&catalog_name,&schema_name).await else { continue };
for table_name in table_names {
let Ok(Some(table)) = schema.table(&table_name).await else { continue };
let Ok(Some(table)) = catalog_manager.table(&catalog_name, &schema_name, &table_name).await else { continue };
let region_numbers = &table.table_info().meta.region_numbers;
region_number += region_numbers.len() as u64;
let engine = &table.table_info().meta.engine;
match table.region_stats() {
Ok(stats) => {
let stats = stats.into_iter().map(|stat| RegionStat {
@@ -254,6 +232,7 @@ pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> (u64, Vec<Reg
table_name: table_name.clone(),
}),
approximate_bytes: stat.disk_usage_bytes as i64,
attrs: HashMap::from([("engine_name".to_owned(), engine.clone())]),
..Default::default()
});

View File

@@ -16,6 +16,4 @@ pub mod manager;
pub mod memory;
pub use manager::LocalCatalogManager;
pub use memory::{
new_memory_catalog_list, MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider,
};
pub use memory::{new_memory_catalog_manager, MemoryCatalogManager};

View File

@@ -18,7 +18,8 @@ use std::sync::Arc;
use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MIN_USER_TABLE_ID,
MITO_ENGINE, SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_NAME,
MITO_ENGINE, NUMBERS_TABLE_ID, SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_ID,
SYSTEM_CATALOG_TABLE_NAME,
};
use common_catalog::format_full_table_name;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
@@ -32,7 +33,7 @@ use table::engine::manager::TableEngineManagerRef;
use table::engine::EngineContext;
use table::metadata::TableId;
use table::requests::OpenTableRequest;
use table::table::numbers::NumbersTable;
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
use table::table::TableIdProvider;
use table::TableRef;
@@ -42,16 +43,16 @@ use crate::error::{
SystemCatalogTypeMismatchSnafu, TableEngineNotFoundSnafu, TableExistsSnafu, TableNotExistSnafu,
TableNotFoundSnafu,
};
use crate::local::memory::{MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
use crate::information_schema::InformationSchemaProvider;
use crate::local::memory::MemoryCatalogManager;
use crate::system::{
decode_system_catalog, Entry, SystemCatalogTable, TableEntry, ENTRY_TYPE_INDEX, KEY_INDEX,
VALUE_INDEX,
};
use crate::tables::SystemCatalog;
use crate::{
handle_system_table_request, CatalogManager, CatalogProviderRef, DeregisterTableRequest,
handle_system_table_request, CatalogManager, CatalogManagerRef, DeregisterTableRequest,
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
SchemaProviderRef,
};
/// A `CatalogManager` consists of a system catalog and a bunch of user catalogs.
@@ -74,11 +75,11 @@ impl LocalCatalogManager {
engine_name: MITO_ENGINE,
})?;
let table = SystemCatalogTable::new(engine.clone()).await?;
let memory_catalog_list = crate::local::memory::new_memory_catalog_list()?;
let memory_catalog_manager = crate::local::memory::new_memory_catalog_manager()?;
let system_catalog = Arc::new(SystemCatalog::new(table));
Ok(Self {
system: system_catalog,
catalogs: memory_catalog_list,
catalogs: memory_catalog_manager,
engine_manager,
next_table_id: AtomicU32::new(MIN_USER_TABLE_ID),
init_lock: Mutex::new(false),
@@ -116,26 +117,47 @@ impl LocalCatalogManager {
}
async fn init_system_catalog(&self) -> Result<()> {
let system_schema = Arc::new(MemorySchemaProvider::new());
system_schema.register_table_sync(
SYSTEM_CATALOG_TABLE_NAME.to_string(),
self.system.information_schema.system.clone(),
)?;
let system_catalog = Arc::new(MemoryCatalogProvider::new());
system_catalog.register_schema_sync(INFORMATION_SCHEMA_NAME.to_string(), system_schema)?;
self.catalogs
.register_catalog_sync(SYSTEM_CATALOG_NAME.to_string(), system_catalog)?;
// register SystemCatalogTable
let _ = self
.catalogs
.register_catalog_sync(SYSTEM_CATALOG_NAME.to_string())?;
let _ = self.catalogs.register_schema_sync(RegisterSchemaRequest {
catalog: SYSTEM_CATALOG_NAME.to_string(),
schema: INFORMATION_SCHEMA_NAME.to_string(),
})?;
let register_table_req = RegisterTableRequest {
catalog: SYSTEM_CATALOG_NAME.to_string(),
schema: INFORMATION_SCHEMA_NAME.to_string(),
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
table_id: SYSTEM_CATALOG_TABLE_ID,
table: self.system.information_schema.system.clone(),
};
let _ = self.catalogs.register_table(register_table_req).await?;
let default_catalog = Arc::new(MemoryCatalogProvider::new());
let default_schema = Arc::new(MemorySchemaProvider::new());
// register default catalog and default schema
let _ = self
.catalogs
.register_catalog_sync(DEFAULT_CATALOG_NAME.to_string())?;
let _ = self.catalogs.register_schema_sync(RegisterSchemaRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
})?;
// Add numbers table for test
let table = Arc::new(NumbersTable::default());
default_schema.register_table_sync("numbers".to_string(), table)?;
let numbers_table = Arc::new(NumbersTable::default());
let register_number_table_req = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: NUMBERS_TABLE_NAME.to_string(),
table_id: NUMBERS_TABLE_ID,
table: numbers_table,
};
let _ = self
.catalogs
.register_table(register_number_table_req)
.await?;
default_catalog.register_schema_sync(DEFAULT_SCHEMA_NAME.to_string(), default_schema)?;
self.catalogs
.register_catalog_sync(DEFAULT_CATALOG_NAME.to_string(), default_catalog)?;
Ok(())
}
@@ -207,30 +229,26 @@ impl LocalCatalogManager {
for entry in entries {
match entry {
Entry::Catalog(c) => {
self.catalogs.register_catalog_if_absent(
c.catalog_name.clone(),
Arc::new(MemoryCatalogProvider::new()),
);
let _ = self
.catalogs
.register_catalog_if_absent(c.catalog_name.clone());
info!("Register catalog: {}", c.catalog_name);
}
Entry::Schema(s) => {
self.catalogs
.catalog(&s.catalog_name)
.await?
.context(CatalogNotFoundSnafu {
catalog_name: &s.catalog_name,
})?
.register_schema(
s.schema_name.clone(),
Arc::new(MemorySchemaProvider::new()),
)
.await?;
let req = RegisterSchemaRequest {
catalog: s.catalog_name.clone(),
schema: s.schema_name.clone(),
};
let _ = self.catalogs.register_schema_sync(req)?;
info!("Registered schema: {:?}", s);
}
Entry::Table(t) => {
max_table_id = max_table_id.max(t.table_id);
if t.is_deleted {
continue;
}
self.open_and_register_table(&t).await?;
info!("Registered table: {:?}", t);
max_table_id = max_table_id.max(t.table_id);
}
}
}
@@ -245,27 +263,16 @@ impl LocalCatalogManager {
}
async fn open_and_register_table(&self, t: &TableEntry) -> Result<()> {
let catalog =
self.catalogs
.catalog(&t.catalog_name)
.await?
.context(CatalogNotFoundSnafu {
catalog_name: &t.catalog_name,
})?;
let schema = catalog
.schema(&t.schema_name)
.await?
.context(SchemaNotFoundSnafu {
catalog: &t.catalog_name,
schema: &t.schema_name,
})?;
self.check_catalog_schema_exist(&t.catalog_name, &t.schema_name)
.await?;
let context = EngineContext {};
let request = OpenTableRequest {
let open_request = OpenTableRequest {
catalog_name: t.catalog_name.clone(),
schema_name: t.schema_name.clone(),
table_name: t.table_name.clone(),
table_id: t.table_id,
region_numbers: vec![0],
};
let engine = self
.engine_manager
@@ -274,8 +281,8 @@ impl LocalCatalogManager {
engine_name: &t.engine,
})?;
let option = engine
.open_table(&context, request)
let table_ref = engine
.open_table(&context, open_request)
.await
.with_context(|_| OpenTableSnafu {
table_info: format!(
@@ -290,7 +297,48 @@ impl LocalCatalogManager {
),
})?;
schema.register_table(t.table_name.clone(), option).await?;
let register_request = RegisterTableRequest {
catalog: t.catalog_name.clone(),
schema: t.schema_name.clone(),
table_name: t.table_name.clone(),
table_id: t.table_id,
table: table_ref,
};
let _ = self.catalogs.register_table(register_request).await?;
Ok(())
}
async fn check_state(&self) -> Result<()> {
let started = self.init_lock.lock().await;
ensure!(
*started,
IllegalManagerStateSnafu {
msg: "Catalog manager not started",
}
);
Ok(())
}
async fn check_catalog_schema_exist(
&self,
catalog_name: &str,
schema_name: &str,
) -> Result<()> {
if !self.catalogs.catalog_exist(catalog_name).await? {
return CatalogNotFoundSnafu { catalog_name }.fail()?;
}
if !self
.catalogs
.schema_exist(catalog_name, schema_name)
.await?
{
return SchemaNotFoundSnafu {
catalog: catalog_name,
schema: schema_name,
}
.fail()?;
}
Ok(())
}
}
@@ -311,34 +359,21 @@ impl CatalogManager for LocalCatalogManager {
}
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
let started = self.init_lock.lock().await;
self.check_state().await?;
ensure!(
*started,
IllegalManagerStateSnafu {
msg: "Catalog manager not started",
}
);
let catalog_name = request.catalog.clone();
let schema_name = request.schema.clone();
let catalog_name = &request.catalog;
let schema_name = &request.schema;
let catalog = self
.catalogs
.catalog(catalog_name)
.await?
.context(CatalogNotFoundSnafu { catalog_name })?;
let schema = catalog
.schema(schema_name)
.await?
.with_context(|| SchemaNotFoundSnafu {
catalog: catalog_name,
schema: schema_name,
})?;
self.check_catalog_schema_exist(&catalog_name, &schema_name)
.await?;
{
let _lock = self.register_lock.lock().await;
if let Some(existing) = schema.table(&request.table_name).await? {
if let Some(existing) = self
.catalogs
.table(&request.catalog, &request.schema, &request.table_name)
.await?
{
if existing.table_info().ident.table_id != request.table_id {
error!(
"Unexpected table register request: {:?}, existing: {:?}",
@@ -347,8 +382,8 @@ impl CatalogManager for LocalCatalogManager {
);
return TableExistsSnafu {
table: format_full_table_name(
catalog_name,
schema_name,
&catalog_name,
&schema_name,
&request.table_name,
),
}
@@ -357,24 +392,25 @@ impl CatalogManager for LocalCatalogManager {
// Try to register table with same table id, just ignore.
Ok(false)
} else {
let engine = request.table.table_info().meta.engine.to_string();
// table does not exist
self.system
let engine = request.table.table_info().meta.engine.to_string();
let table_name = request.table_name.clone();
let table_id = request.table_id;
let _ = self.catalogs.register_table(request).await?;
let _ = self
.system
.register_table(
catalog_name.clone(),
schema_name.clone(),
request.table_name.clone(),
request.table_id,
table_name,
table_id,
engine,
)
.await?;
schema
.register_table(request.table_name, request.table)
.await?;
increment_gauge!(
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
1.0,
&[crate::metrics::db_label(catalog_name, schema_name)],
&[crate::metrics::db_label(&catalog_name, &schema_name)],
);
Ok(true)
}
@@ -382,41 +418,27 @@ impl CatalogManager for LocalCatalogManager {
}
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
let started = self.init_lock.lock().await;
ensure!(
*started,
IllegalManagerStateSnafu {
msg: "Catalog manager not started",
}
);
self.check_state().await?;
let catalog_name = &request.catalog;
let schema_name = &request.schema;
let catalog = self
.catalogs
.catalog(catalog_name)
.await?
.context(CatalogNotFoundSnafu { catalog_name })?;
let schema = catalog
.schema(schema_name)
.await?
.with_context(|| SchemaNotFoundSnafu {
catalog: catalog_name,
schema: schema_name,
})?;
let _lock = self.register_lock.lock().await;
self.check_catalog_schema_exist(catalog_name, schema_name)
.await?;
ensure!(
!schema.table_exist(&request.new_table_name).await?,
self.catalogs
.table(catalog_name, schema_name, &request.new_table_name)
.await?
.is_none(),
TableExistsSnafu {
table: &request.new_table_name
}
);
let old_table = schema
.table(&request.table_name)
let _lock = self.register_lock.lock().await;
let old_table = self
.catalogs
.table(catalog_name, schema_name, &request.table_name)
.await?
.context(TableNotExistSnafu {
table: &request.table_name,
@@ -424,7 +446,8 @@ impl CatalogManager for LocalCatalogManager {
let engine = old_table.table_info().meta.engine.to_string();
// rename table in system catalog
self.system
let _ = self
.system
.register_table(
catalog_name.clone(),
schema_name.clone(),
@@ -434,18 +457,11 @@ impl CatalogManager for LocalCatalogManager {
)
.await?;
let renamed = schema
.rename_table(&request.table_name, request.new_table_name.clone())
.await
.is_ok();
Ok(renamed)
self.catalogs.rename_table(request).await
}
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
{
let started = *self.init_lock.lock().await;
ensure!(started, IllegalManagerStateSnafu { msg: "not started" });
}
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<()> {
self.check_state().await?;
{
let _ = self.register_lock.lock().await;
@@ -466,61 +482,46 @@ impl CatalogManager for LocalCatalogManager {
.ident
.table_id;
if !self.system.deregister_table(&request, table_id).await? {
return Ok(false);
}
self.system.deregister_table(&request, table_id).await?;
self.catalogs.deregister_table(request).await
}
}
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
let started = self.init_lock.lock().await;
ensure!(
*started,
IllegalManagerStateSnafu {
msg: "Catalog manager not started",
}
);
self.check_state().await?;
let catalog_name = &request.catalog;
let schema_name = &request.schema;
let catalog = self
.catalogs
.catalog(catalog_name)
.await?
.context(CatalogNotFoundSnafu { catalog_name })?;
if !self.catalogs.catalog_exist(catalog_name).await? {
return CatalogNotFoundSnafu { catalog_name }.fail()?;
}
{
let _lock = self.register_lock.lock().await;
ensure!(
catalog.schema(schema_name).await?.is_none(),
!self
.catalogs
.schema_exist(catalog_name, schema_name)
.await?,
SchemaExistsSnafu {
schema: schema_name,
}
);
self.system
.register_schema(request.catalog, schema_name.clone())
let _ = self
.system
.register_schema(request.catalog.clone(), schema_name.clone())
.await?;
catalog
.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))
.await?;
Ok(true)
self.catalogs.register_schema_sync(request)
}
}
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
self.check_state().await?;
let catalog_name = request.create_table_request.catalog_name.clone();
let schema_name = request.create_table_request.schema_name.clone();
ensure!(
!*self.init_lock.lock().await,
IllegalManagerStateSnafu {
msg: "Catalog manager already started",
}
);
let mut sys_table_requests = self.system_table_requests.lock().await;
sys_table_requests.push(request);
increment_gauge!(
@@ -531,15 +532,8 @@ impl CatalogManager for LocalCatalogManager {
Ok(())
}
async fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>> {
self.catalogs
.catalog(catalog)
.await?
.context(CatalogNotFoundSnafu {
catalog_name: catalog,
})?
.schema(schema)
.await
async fn schema_exist(&self, catalog: &str, schema: &str) -> Result<bool> {
self.catalogs.schema_exist(catalog, schema).await
}
async fn table(
@@ -548,39 +542,44 @@ impl CatalogManager for LocalCatalogManager {
schema_name: &str,
table_name: &str,
) -> Result<Option<TableRef>> {
let catalog = self
.catalogs
.catalog(catalog_name)
.await?
.context(CatalogNotFoundSnafu { catalog_name })?;
let schema = catalog
.schema(schema_name)
.await?
.with_context(|| SchemaNotFoundSnafu {
catalog: catalog_name,
schema: schema_name,
})?;
schema.table(table_name).await
if schema_name == INFORMATION_SCHEMA_NAME {
let manager: CatalogManagerRef = self.catalogs.clone() as _;
let provider =
InformationSchemaProvider::new(catalog_name.to_string(), Arc::downgrade(&manager));
return provider.table(table_name);
}
self.catalogs
.table(catalog_name, schema_name, table_name)
.await
}
async fn catalog(&self, catalog: &str) -> Result<Option<CatalogProviderRef>> {
async fn catalog_exist(&self, catalog: &str) -> Result<bool> {
if catalog.eq_ignore_ascii_case(SYSTEM_CATALOG_NAME) {
Ok(Some(self.system.clone()))
Ok(true)
} else {
self.catalogs.catalog(catalog).await
self.catalogs.catalog_exist(catalog).await
}
}
async fn table_exist(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
self.catalogs.table_exist(catalog, schema, table).await
}
async fn catalog_names(&self) -> Result<Vec<String>> {
self.catalogs.catalog_names().await
}
async fn register_catalog(
&self,
name: String,
catalog: CatalogProviderRef,
) -> Result<Option<CatalogProviderRef>> {
self.catalogs.register_catalog(name, catalog).await
async fn schema_names(&self, catalog_name: &str) -> Result<Vec<String>> {
self.catalogs.schema_names(catalog_name).await
}
async fn table_names(&self, catalog_name: &str, schema_name: &str) -> Result<Vec<String>> {
self.catalogs.table_names(catalog_name, schema_name).await
}
async fn register_catalog(&self, name: String) -> Result<bool> {
self.catalogs.register_catalog(name).await
}
fn as_any(&self) -> &dyn Any {
@@ -606,6 +605,7 @@ mod tests {
table_name: "T1".to_string(),
table_id: 1,
engine: MITO_ENGINE.to_string(),
is_deleted: false,
}),
Entry::Catalog(CatalogEntry {
catalog_name: "C2".to_string(),
@@ -627,6 +627,7 @@ mod tests {
table_name: "T2".to_string(),
table_id: 2,
engine: MITO_ENGINE.to_string(),
is_deleted: false,
}),
];
let res = LocalCatalogManager::sort_entries(vec);

View File

@@ -18,29 +18,27 @@ use std::collections::HashMap;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::{Arc, RwLock};
use async_trait::async_trait;
use common_catalog::consts::MIN_USER_TABLE_ID;
use common_telemetry::error;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
use metrics::{decrement_gauge, increment_gauge};
use snafu::{ensure, OptionExt};
use snafu::OptionExt;
use table::metadata::TableId;
use table::table::TableIdProvider;
use table::TableRef;
use crate::error::{
self, CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, TableNotFoundSnafu,
CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, TableNotFoundSnafu,
};
use crate::schema::SchemaProvider;
use crate::{
CatalogManager, CatalogProvider, CatalogProviderRef, DeregisterTableRequest,
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
SchemaProviderRef,
CatalogManager, DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
RegisterTableRequest, RenameTableRequest,
};
type SchemaEntries = HashMap<String, HashMap<String, TableRef>>;
/// Simple in-memory list of catalogs
pub struct MemoryCatalogManager {
/// Collection of catalogs containing schemas and ultimately Tables
pub catalogs: RwLock<HashMap<String, CatalogProviderRef>>,
pub catalogs: RwLock<HashMap<String, SchemaEntries>>,
pub table_id: AtomicU32,
}
@@ -50,13 +48,14 @@ impl Default for MemoryCatalogManager {
table_id: AtomicU32::new(MIN_USER_TABLE_ID),
catalogs: Default::default(),
};
let default_catalog = Arc::new(MemoryCatalogProvider::new());
manager
.register_catalog_sync("greptime".to_string(), default_catalog.clone())
.unwrap();
default_catalog
.register_schema_sync("public".to_string(), Arc::new(MemorySchemaProvider::new()))
.unwrap();
let catalog = HashMap::from([(DEFAULT_SCHEMA_NAME.to_string(), HashMap::new())]);
let _ = manager
.catalogs
.write()
.unwrap()
.insert(DEFAULT_CATALOG_NAME.to_string(), catalog);
manager
}
}
@@ -76,82 +75,79 @@ impl CatalogManager for MemoryCatalogManager {
}
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
let schema = self
.catalog(&request.catalog)
.context(CatalogNotFoundSnafu {
catalog_name: &request.catalog,
})?
.schema(&request.schema)
.await?
.context(SchemaNotFoundSnafu {
catalog: &request.catalog,
schema: &request.schema,
})?;
let catalog = request.catalog.clone();
let schema = request.schema.clone();
let result = self.register_table_sync(request);
increment_gauge!(
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
1.0,
&[crate::metrics::db_label(&request.catalog, &request.schema)],
&[crate::metrics::db_label(&catalog, &schema)],
);
schema
.register_table(request.table_name, request.table)
.await
.map(|v| v.is_none())
result
}
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
let catalog = self
.catalog(&request.catalog)
.context(CatalogNotFoundSnafu {
catalog_name: &request.catalog,
})?;
let schema =
catalog
.schema(&request.schema)
.await?
.with_context(|| SchemaNotFoundSnafu {
catalog: &request.catalog,
schema: &request.schema,
})?;
Ok(schema
.rename_table(&request.table_name, request.new_table_name)
.await
.is_ok())
}
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
let schema = self
.catalog(&request.catalog)
.context(CatalogNotFoundSnafu {
let mut catalogs = self.catalogs.write().unwrap();
let schema = catalogs
.get_mut(&request.catalog)
.with_context(|| CatalogNotFoundSnafu {
catalog_name: &request.catalog,
})?
.schema(&request.schema)
.await?
.get_mut(&request.schema)
.with_context(|| SchemaNotFoundSnafu {
catalog: &request.catalog,
schema: &request.schema,
})?;
decrement_gauge!(
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
1.0,
&[crate::metrics::db_label(&request.catalog, &request.schema)],
);
schema
.deregister_table(&request.table_name)
.await
.map(|v| v.is_some())
// check old and new table names
if !schema.contains_key(&request.table_name) {
return TableNotFoundSnafu {
table_info: request.table_name.to_string(),
}
.fail()?;
}
if schema.contains_key(&request.new_table_name) {
return TableExistsSnafu {
table: &request.new_table_name,
}
.fail();
}
let table = schema.remove(&request.table_name).unwrap();
let _ = schema.insert(request.new_table_name, table);
Ok(true)
}
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<()> {
let mut catalogs = self.catalogs.write().unwrap();
let schema = catalogs
.get_mut(&request.catalog)
.with_context(|| CatalogNotFoundSnafu {
catalog_name: &request.catalog,
})?
.get_mut(&request.schema)
.with_context(|| SchemaNotFoundSnafu {
catalog: &request.catalog,
schema: &request.schema,
})?;
let result = schema.remove(&request.table_name);
if result.is_some() {
decrement_gauge!(
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
1.0,
&[crate::metrics::db_label(&request.catalog, &request.schema)],
);
}
Ok(())
}
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
let catalog = self
.catalog(&request.catalog)
.context(CatalogNotFoundSnafu {
catalog_name: &request.catalog,
})?;
catalog
.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))
.await?;
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0);
Ok(true)
let registered = self.register_schema_sync(request)?;
if registered {
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0);
}
Ok(registered)
}
async fn register_system_table(&self, _request: RegisterSystemTableRequest) -> Result<()> {
@@ -159,12 +155,16 @@ impl CatalogManager for MemoryCatalogManager {
Ok(())
}
async fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>> {
if let Some(c) = self.catalog(catalog) {
c.schema(schema).await
} else {
Ok(None)
}
async fn schema_exist(&self, catalog: &str, schema: &str) -> Result<bool> {
Ok(self
.catalogs
.read()
.unwrap()
.get(catalog)
.with_context(|| CatalogNotFoundSnafu {
catalog_name: catalog,
})?
.contains_key(schema))
}
async fn table(
@@ -173,27 +173,73 @@ impl CatalogManager for MemoryCatalogManager {
schema: &str,
table_name: &str,
) -> Result<Option<TableRef>> {
let Some(catalog) = self
.catalog(catalog) else { return Ok(None)};
let Some(s) = catalog.schema(schema).await? else { return Ok(None) };
s.table(table_name).await
let result = try {
self.catalogs
.read()
.unwrap()
.get(catalog)?
.get(schema)?
.get(table_name)
.cloned()?
};
Ok(result)
}
async fn catalog(&self, catalog: &str) -> Result<Option<CatalogProviderRef>> {
Ok(self.catalogs.read().unwrap().get(catalog).cloned())
async fn catalog_exist(&self, catalog: &str) -> Result<bool> {
Ok(self.catalogs.read().unwrap().get(catalog).is_some())
}
async fn table_exist(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
let catalogs = self.catalogs.read().unwrap();
Ok(catalogs
.get(catalog)
.with_context(|| CatalogNotFoundSnafu {
catalog_name: catalog,
})?
.get(schema)
.with_context(|| SchemaNotFoundSnafu { catalog, schema })?
.contains_key(table))
}
async fn catalog_names(&self) -> Result<Vec<String>> {
Ok(self.catalogs.read().unwrap().keys().cloned().collect())
}
async fn register_catalog(
&self,
name: String,
catalog: CatalogProviderRef,
) -> Result<Option<CatalogProviderRef>> {
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
self.register_catalog_sync(name, catalog)
async fn schema_names(&self, catalog_name: &str) -> Result<Vec<String>> {
Ok(self
.catalogs
.read()
.unwrap()
.get(catalog_name)
.with_context(|| CatalogNotFoundSnafu { catalog_name })?
.keys()
.cloned()
.collect())
}
async fn table_names(&self, catalog_name: &str, schema_name: &str) -> Result<Vec<String>> {
Ok(self
.catalogs
.read()
.unwrap()
.get(catalog_name)
.with_context(|| CatalogNotFoundSnafu { catalog_name })?
.get(schema_name)
.with_context(|| SchemaNotFoundSnafu {
catalog: catalog_name,
schema: schema_name,
})?
.keys()
.cloned()
.collect())
}
async fn register_catalog(&self, name: String) -> Result<bool> {
let registered = self.register_catalog_sync(name)?;
if registered {
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
}
Ok(registered)
}
fn as_any(&self) -> &dyn Any {
@@ -202,206 +248,78 @@ impl CatalogManager for MemoryCatalogManager {
}
impl MemoryCatalogManager {
/// Registers a catalog and return `None` if no catalog with the same name was already
/// registered, or `Some` with the previously registered catalog.
pub fn register_catalog_if_absent(
&self,
name: String,
catalog: CatalogProviderRef,
) -> Option<CatalogProviderRef> {
/// Registers a catalog and return the catalog already exist
pub fn register_catalog_if_absent(&self, name: String) -> bool {
let mut catalogs = self.catalogs.write().unwrap();
let entry = catalogs.entry(name);
match entry {
Entry::Occupied(v) => Some(v.get().clone()),
Entry::Occupied(_) => true,
Entry::Vacant(v) => {
v.insert(catalog);
None
let _ = v.insert(HashMap::new());
false
}
}
}
pub fn register_catalog_sync(
&self,
name: String,
catalog: CatalogProviderRef,
) -> Result<Option<CatalogProviderRef>> {
pub fn register_catalog_sync(&self, name: String) -> Result<bool> {
let mut catalogs = self.catalogs.write().unwrap();
Ok(catalogs.insert(name, catalog))
Ok(catalogs.insert(name, HashMap::new()).is_some())
}
fn catalog(&self, catalog_name: &str) -> Option<CatalogProviderRef> {
self.catalogs.read().unwrap().get(catalog_name).cloned()
}
}
impl Default for MemoryCatalogProvider {
fn default() -> Self {
Self::new()
}
}
/// Simple in-memory implementation of a catalog.
pub struct MemoryCatalogProvider {
schemas: RwLock<HashMap<String, Arc<dyn SchemaProvider>>>,
}
impl MemoryCatalogProvider {
/// Instantiates a new MemoryCatalogProvider with an empty collection of schemas.
pub fn new() -> Self {
Self {
schemas: RwLock::new(HashMap::new()),
pub fn register_schema_sync(&self, request: RegisterSchemaRequest) -> Result<bool> {
let mut catalogs = self.catalogs.write().unwrap();
let catalog = catalogs
.get_mut(&request.catalog)
.with_context(|| CatalogNotFoundSnafu {
catalog_name: &request.catalog,
})?;
if catalog.contains_key(&request.schema) {
return Ok(false);
}
let _ = catalog.insert(request.schema, HashMap::new());
Ok(true)
}
pub fn schema_names_sync(&self) -> Result<Vec<String>> {
let schemas = self.schemas.read().unwrap();
Ok(schemas.keys().cloned().collect())
}
pub fn register_table_sync(&self, request: RegisterTableRequest) -> Result<bool> {
let mut catalogs = self.catalogs.write().unwrap();
let schema = catalogs
.get_mut(&request.catalog)
.with_context(|| CatalogNotFoundSnafu {
catalog_name: &request.catalog,
})?
.get_mut(&request.schema)
.with_context(|| SchemaNotFoundSnafu {
catalog: &request.catalog,
schema: &request.schema,
})?;
pub fn register_schema_sync(
&self,
name: String,
schema: SchemaProviderRef,
) -> Result<Option<SchemaProviderRef>> {
let mut schemas = self.schemas.write().unwrap();
ensure!(
!schemas.contains_key(&name),
error::SchemaExistsSnafu { schema: &name }
);
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0);
Ok(schemas.insert(name, schema))
}
pub fn schema_sync(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>> {
let schemas = self.schemas.read().unwrap();
Ok(schemas.get(name).cloned())
}
}
#[async_trait::async_trait]
impl CatalogProvider for MemoryCatalogProvider {
fn as_any(&self) -> &dyn Any {
self
}
async fn schema_names(&self) -> Result<Vec<String>> {
self.schema_names_sync()
}
async fn register_schema(
&self,
name: String,
schema: SchemaProviderRef,
) -> Result<Option<SchemaProviderRef>> {
self.register_schema_sync(name, schema)
}
async fn schema(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>> {
self.schema_sync(name)
}
}
/// Simple in-memory implementation of a schema.
pub struct MemorySchemaProvider {
tables: RwLock<HashMap<String, TableRef>>,
}
impl MemorySchemaProvider {
/// Instantiates a new MemorySchemaProvider with an empty collection of tables.
pub fn new() -> Self {
Self {
tables: RwLock::new(HashMap::new()),
}
}
pub fn register_table_sync(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
let mut tables = self.tables.write().unwrap();
if let Some(existing) = tables.get(name.as_str()) {
// if table with the same name but different table id exists, then it's a fatal bug
if existing.table_info().ident.table_id != table.table_info().ident.table_id {
error!(
"Unexpected table register: {:?}, existing: {:?}",
table.table_info(),
existing.table_info()
);
return TableExistsSnafu { table: name }.fail()?;
if schema.contains_key(&request.table_name) {
return TableExistsSnafu {
table: &request.table_name,
}
Ok(Some(existing.clone()))
} else {
Ok(tables.insert(name, table))
.fail();
}
Ok(schema.insert(request.table_name, request.table).is_none())
}
pub fn rename_table_sync(&self, name: &str, new_name: String) -> Result<TableRef> {
let mut tables = self.tables.write().unwrap();
let Some(table) = tables.remove(name) else {
return TableNotFoundSnafu {
table_info: name.to_string(),
}
.fail()?;
#[cfg(any(test, feature = "testing"))]
pub fn new_with_table(table: TableRef) -> Self {
let manager = Self::default();
let request = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table.table_info().name.clone(),
table_id: table.table_info().ident.table_id,
table,
};
let e = match tables.entry(new_name) {
Entry::Vacant(e) => e,
Entry::Occupied(e) => {
return TableExistsSnafu { table: e.key() }.fail();
}
};
e.insert(table.clone());
Ok(table)
}
pub fn table_exist_sync(&self, name: &str) -> Result<bool> {
let tables = self.tables.read().unwrap();
Ok(tables.contains_key(name))
}
pub fn deregister_table_sync(&self, name: &str) -> Result<Option<TableRef>> {
let mut tables = self.tables.write().unwrap();
Ok(tables.remove(name))
}
}
impl Default for MemorySchemaProvider {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl SchemaProvider for MemorySchemaProvider {
fn as_any(&self) -> &dyn Any {
self
}
async fn table_names(&self) -> Result<Vec<String>> {
let tables = self.tables.read().unwrap();
Ok(tables.keys().cloned().collect())
}
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
let tables = self.tables.read().unwrap();
Ok(tables.get(name).cloned())
}
async fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
self.register_table_sync(name, table)
}
async fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef> {
self.rename_table_sync(name, new_name)
}
async fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
self.deregister_table_sync(name)
}
async fn table_exist(&self, name: &str) -> Result<bool> {
self.table_exist_sync(name)
let _ = manager.register_table_sync(request).unwrap();
manager
}
}
/// Create a memory catalog list contains a numbers table for test
pub fn new_memory_catalog_list() -> Result<Arc<MemoryCatalogManager>> {
pub fn new_memory_catalog_manager() -> Result<Arc<MemoryCatalogManager>> {
Ok(Arc::new(MemoryCatalogManager::default()))
}
@@ -410,88 +328,99 @@ mod tests {
use common_catalog::consts::*;
use common_error::ext::ErrorExt;
use common_error::prelude::StatusCode;
use table::table::numbers::NumbersTable;
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
use super::*;
#[tokio::test]
async fn test_new_memory_catalog_list() {
let catalog_list = new_memory_catalog_list().unwrap();
let default_catalog = CatalogManager::catalog(&*catalog_list, DEFAULT_CATALOG_NAME)
.await
.unwrap()
.unwrap();
let catalog_list = new_memory_catalog_manager().unwrap();
let default_schema = default_catalog
.schema(DEFAULT_SCHEMA_NAME)
.await
.unwrap()
.unwrap();
let register_request = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: NUMBERS_TABLE_NAME.to_string(),
table_id: NUMBERS_TABLE_ID,
table: Arc::new(NumbersTable::default()),
};
default_schema
.register_table("numbers".to_string(), Arc::new(NumbersTable::default()))
let _ = catalog_list.register_table(register_request).await.unwrap();
let table = catalog_list
.table(
DEFAULT_CATALOG_NAME,
DEFAULT_SCHEMA_NAME,
NUMBERS_TABLE_NAME,
)
.await
.unwrap();
let table = default_schema.table("numbers").await.unwrap();
assert!(table.is_some());
assert!(default_schema.table("not_exists").await.unwrap().is_none());
}
#[tokio::test]
async fn test_mem_provider() {
let provider = MemorySchemaProvider::new();
let table_name = "numbers";
assert!(!provider.table_exist_sync(table_name).unwrap());
provider.deregister_table_sync(table_name).unwrap();
let test_table = NumbersTable::default();
// register table successfully
assert!(provider
.register_table_sync(table_name.to_string(), Arc::new(test_table))
let _ = table.unwrap();
assert!(catalog_list
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "not_exists")
.await
.unwrap()
.is_none());
assert!(provider.table_exist_sync(table_name).unwrap());
let other_table = NumbersTable::new(12);
let result = provider.register_table_sync(table_name.to_string(), Arc::new(other_table));
let err = result.err().unwrap();
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
}
#[tokio::test]
async fn test_mem_provider_rename_table() {
let provider = MemorySchemaProvider::new();
let table_name = "num";
assert!(!provider.table_exist_sync(table_name).unwrap());
let test_table: TableRef = Arc::new(NumbersTable::default());
async fn test_mem_manager_rename_table() {
let catalog = MemoryCatalogManager::default();
let table_name = "test_table";
assert!(!catalog
.table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
.await
.unwrap());
// register test table
assert!(provider
.register_table_sync(table_name.to_string(), test_table.clone())
.unwrap()
.is_none());
assert!(provider.table_exist_sync(table_name).unwrap());
let table_id = 2333;
let register_request = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
table_id,
table: Arc::new(NumbersTable::new(table_id)),
};
assert!(catalog.register_table(register_request).await.unwrap());
assert!(catalog
.table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
.await
.unwrap());
// rename test table
let new_table_name = "numbers";
provider
.rename_table_sync(table_name, new_table_name.to_string())
.unwrap();
let new_table_name = "test_table_renamed";
let rename_request = RenameTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
new_table_name: new_table_name.to_string(),
table_id,
};
let _ = catalog.rename_table(rename_request).await.unwrap();
// test old table name not exist
assert!(!provider.table_exist_sync(table_name).unwrap());
provider.deregister_table_sync(table_name).unwrap();
assert!(!catalog
.table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
.await
.unwrap());
// test new table name exists
assert!(provider.table_exist_sync(new_table_name).unwrap());
let registered_table = provider.table(new_table_name).await.unwrap().unwrap();
assert_eq!(
registered_table.table_info().ident.table_id,
test_table.table_info().ident.table_id
);
assert!(catalog
.table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
.await
.unwrap());
let registered_table = catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
.await
.unwrap()
.unwrap();
assert_eq!(registered_table.table_info().ident.table_id, table_id);
let other_table = Arc::new(NumbersTable::new(2));
let result = provider
.register_table(new_table_name.to_string(), other_table)
.await;
let dup_register_request = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: new_table_name.to_string(),
table_id: table_id + 1,
table: Arc::new(NumbersTable::new(table_id + 1)),
};
let result = catalog.register_table(dup_register_request).await;
let err = result.err().unwrap();
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
}
@@ -499,16 +428,11 @@ mod tests {
#[tokio::test]
async fn test_catalog_rename_table() {
let catalog = MemoryCatalogManager::default();
let schema = catalog
.schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
.await
.unwrap()
.unwrap();
// register table
let table_name = "num";
let table_id = 2333;
let table: TableRef = Arc::new(NumbersTable::new(table_id));
// register table
let register_table_req = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
@@ -517,7 +441,11 @@ mod tests {
table,
};
assert!(catalog.register_table(register_table_req).await.unwrap());
assert!(schema.table_exist(table_name).await.unwrap());
assert!(catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
.await
.unwrap()
.is_some());
// rename table
let new_table_name = "numbers_new";
@@ -529,8 +457,16 @@ mod tests {
table_id,
};
assert!(catalog.rename_table(rename_table_req).await.unwrap());
assert!(!schema.table_exist(table_name).await.unwrap());
assert!(schema.table_exist(new_table_name).await.unwrap());
assert!(catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
.await
.unwrap()
.is_none());
assert!(catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
.await
.unwrap()
.is_some());
let registered_table = catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
@@ -543,50 +479,42 @@ mod tests {
#[test]
pub fn test_register_if_absent() {
let list = MemoryCatalogManager::default();
assert!(list
.register_catalog_if_absent(
"test_catalog".to_string(),
Arc::new(MemoryCatalogProvider::new())
)
.is_none());
list.register_catalog_if_absent(
"test_catalog".to_string(),
Arc::new(MemoryCatalogProvider::new()),
)
.unwrap();
list.as_any()
.downcast_ref::<MemoryCatalogManager>()
.unwrap();
assert!(!list.register_catalog_if_absent("test_catalog".to_string(),));
assert!(list.register_catalog_if_absent("test_catalog".to_string()));
}
#[tokio::test]
pub async fn test_catalog_deregister_table() {
let catalog = MemoryCatalogManager::default();
let schema = catalog
.schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
.await
.unwrap()
.unwrap();
let table_name = "foo_table";
let register_table_req = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "numbers".to_string(),
table_name: table_name.to_string(),
table_id: 2333,
table: Arc::new(NumbersTable::default()),
};
catalog.register_table(register_table_req).await.unwrap();
assert!(schema.table_exist("numbers").await.unwrap());
let _ = catalog.register_table(register_table_req).await.unwrap();
assert!(catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
.await
.unwrap()
.is_some());
let deregister_table_req = DeregisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "numbers".to_string(),
table_name: table_name.to_string(),
};
catalog
.deregister_table(deregister_table_req)
.await
.unwrap();
assert!(!schema.table_exist("numbers").await.unwrap());
assert!(catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
.await
.unwrap()
.is_none());
}
}

View File

@@ -20,6 +20,9 @@ pub(crate) const METRIC_CATALOG_MANAGER_CATALOG_COUNT: &str = "catalog.catalog_c
pub(crate) const METRIC_CATALOG_MANAGER_SCHEMA_COUNT: &str = "catalog.schema_count";
pub(crate) const METRIC_CATALOG_MANAGER_TABLE_COUNT: &str = "catalog.table_count";
pub(crate) const METRIC_CATALOG_KV_REMOTE_GET: &str = "catalog.kv.get.remote";
pub(crate) const METRIC_CATALOG_KV_GET: &str = "catalog.kv.get";
#[inline]
pub(crate) fn db_label(catalog: &str, schema: &str) -> (&'static str, String) {
(METRIC_DB_LABEL, build_db_string(catalog, schema))

View File

@@ -12,78 +12,40 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Debug;
use std::pin::Pin;
use std::sync::Arc;
pub use client::MetaKvBackend;
use futures::Stream;
use futures_util::StreamExt;
pub use manager::{RemoteCatalogManager, RemoteCatalogProvider, RemoteSchemaProvider};
use crate::error::Error;
pub use client::{CachedMetaKvBackend, MetaKvBackend};
pub use manager::RemoteCatalogManager;
mod client;
mod manager;
#[derive(Debug, Clone)]
pub struct Kv(pub Vec<u8>, pub Vec<u8>);
pub type ValueIter<'a, E> = Pin<Box<dyn Stream<Item = Result<Kv, E>> + Send + 'a>>;
#[cfg(feature = "testing")]
pub mod mock;
pub mod region_alive_keeper;
#[async_trait::async_trait]
pub trait KvBackend: Send + Sync {
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
where
'a: 'b;
async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), Error>;
/// Compare and set value of key. `expect` is the expected value, if backend's current value associated
/// with key is the same as `expect`, the value will be updated to `val`.
///
/// - If the compare-and-set operation successfully updated value, this method will return an `Ok(Ok())`
/// - If associated value is not the same as `expect`, no value will be updated and an `Ok(Err(Vec<u8>))`
/// will be returned, the `Err(Vec<u8>)` indicates the current associated value of key.
/// - If any error happens during operation, an `Err(Error)` will be returned.
async fn compare_and_set(
&self,
key: &[u8],
expect: &[u8],
val: &[u8],
) -> Result<Result<(), Option<Vec<u8>>>, Error>;
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), Error>;
async fn delete(&self, key: &[u8]) -> Result<(), Error> {
self.delete_range(key, &[]).await
}
/// Default get is implemented based on `range` method.
async fn get(&self, key: &[u8]) -> Result<Option<Kv>, Error> {
let mut iter = self.range(key);
while let Some(r) = iter.next().await {
let kv = r?;
if kv.0 == key {
return Ok(Some(kv));
}
}
return Ok(None);
}
pub trait KvCacheInvalidator: Send + Sync {
async fn invalidate_key(&self, key: &[u8]);
}
pub type KvBackendRef = Arc<dyn KvBackend>;
pub type KvCacheInvalidatorRef = Arc<dyn KvCacheInvalidator>;
#[cfg(test)]
mod tests {
use async_stream::stream;
use std::any::Any;
use super::*;
use async_stream::stream;
use common_meta::kv_backend::{Kv, KvBackend, ValueIter};
use crate::error::Error;
struct MockKvBackend {}
#[async_trait::async_trait]
impl KvBackend for MockKvBackend {
type Error = Error;
fn range<'a, 'b>(&'a self, _key: &[u8]) -> ValueIter<'b, Error>
where
'a: 'b,
@@ -114,17 +76,29 @@ mod tests {
async fn delete_range(&self, _key: &[u8], _end: &[u8]) -> Result<(), Error> {
unimplemented!()
}
async fn move_value(&self, _from_key: &[u8], _to_key: &[u8]) -> Result<(), Error> {
unimplemented!()
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[tokio::test]
async fn test_get() {
let backend = MockKvBackend {};
let result = backend.get(0.to_string().as_bytes()).await;
assert_eq!(0.to_string().as_bytes(), result.unwrap().unwrap().0);
let result = backend.get(1.to_string().as_bytes()).await;
assert_eq!(1.to_string().as_bytes(), result.unwrap().unwrap().0);
let result = backend.get(2.to_string().as_bytes()).await;
assert_eq!(2.to_string().as_bytes(), result.unwrap().unwrap().0);
let result = backend.get(3.to_string().as_bytes()).await;
assert!(result.unwrap().is_none());
}

View File

@@ -12,17 +12,167 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::fmt::Debug;
use std::sync::Arc;
use std::time::Duration;
use async_stream::stream;
use common_telemetry::info;
use common_error::prelude::BoxedError;
use common_meta::error::Error::{CacheNotGet, GetKvCache};
use common_meta::error::{CacheNotGetSnafu, Error, MetaSrvSnafu, Result};
use common_meta::kv_backend::{Kv, KvBackend, KvBackendRef, ValueIter};
use common_meta::rpc::store::{
CompareAndPutRequest, DeleteRangeRequest, MoveValueRequest, PutRequest, RangeRequest,
};
use common_telemetry::{info, timer};
use meta_client::client::MetaClient;
use meta_client::rpc::{CompareAndPutRequest, DeleteRangeRequest, PutRequest, RangeRequest};
use snafu::ResultExt;
use moka::future::{Cache, CacheBuilder};
use snafu::{OptionExt, ResultExt};
use super::KvCacheInvalidator;
use crate::metrics::{METRIC_CATALOG_KV_GET, METRIC_CATALOG_KV_REMOTE_GET};
const CACHE_MAX_CAPACITY: u64 = 10000;
const CACHE_TTL_SECOND: u64 = 10 * 60;
const CACHE_TTI_SECOND: u64 = 5 * 60;
pub type CacheBackendRef = Arc<Cache<Vec<u8>, Kv>>;
pub struct CachedMetaKvBackend {
kv_backend: KvBackendRef,
cache: CacheBackendRef,
}
#[async_trait::async_trait]
impl KvBackend for CachedMetaKvBackend {
type Error = Error;
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
where
'a: 'b,
{
self.kv_backend.range(key)
}
async fn get(&self, key: &[u8]) -> Result<Option<Kv>> {
let _timer = timer!(METRIC_CATALOG_KV_GET);
let init = async {
let _timer = timer!(METRIC_CATALOG_KV_REMOTE_GET);
self.kv_backend.get(key).await.map(|val| {
val.with_context(|| CacheNotGetSnafu {
key: String::from_utf8_lossy(key),
})
})?
};
// currently moka doesn't have `optionally_try_get_with_by_ref`
// TODO(fys): change to moka method when available
// https://github.com/moka-rs/moka/issues/254
match self.cache.try_get_with_by_ref(key, init).await {
Ok(val) => Ok(Some(val)),
Err(e) => match e.as_ref() {
CacheNotGet { .. } => Ok(None),
_ => Err(e),
},
}
.map_err(|e| GetKvCache {
err_msg: e.to_string(),
})
}
async fn set(&self, key: &[u8], val: &[u8]) -> Result<()> {
let ret = self.kv_backend.set(key, val).await;
if ret.is_ok() {
self.invalidate_key(key).await;
}
ret
}
async fn delete(&self, key: &[u8]) -> Result<()> {
let ret = self.kv_backend.delete_range(key, &[]).await;
if ret.is_ok() {
self.invalidate_key(key).await;
}
ret
}
async fn delete_range(&self, _key: &[u8], _end: &[u8]) -> Result<()> {
// TODO(fys): implement it
unimplemented!()
}
async fn compare_and_set(
&self,
key: &[u8],
expect: &[u8],
val: &[u8],
) -> Result<std::result::Result<(), Option<Vec<u8>>>> {
let ret = self.kv_backend.compare_and_set(key, expect, val).await;
if ret.is_ok() {
self.invalidate_key(key).await;
}
ret
}
async fn move_value(&self, from_key: &[u8], to_key: &[u8]) -> Result<()> {
let ret = self.kv_backend.move_value(from_key, to_key).await;
if ret.is_ok() {
self.invalidate_key(from_key).await;
self.invalidate_key(to_key).await;
}
ret
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[async_trait::async_trait]
impl KvCacheInvalidator for CachedMetaKvBackend {
async fn invalidate_key(&self, key: &[u8]) {
self.cache.invalidate(key).await
}
}
impl CachedMetaKvBackend {
pub fn new(client: Arc<MetaClient>) -> Self {
let cache = Arc::new(
CacheBuilder::new(CACHE_MAX_CAPACITY)
.time_to_live(Duration::from_secs(CACHE_TTL_SECOND))
.time_to_idle(Duration::from_secs(CACHE_TTI_SECOND))
.build(),
);
let kv_backend = Arc::new(MetaKvBackend { client });
Self { kv_backend, cache }
}
pub fn wrap(kv_backend: KvBackendRef) -> Self {
let cache = Arc::new(
CacheBuilder::new(CACHE_MAX_CAPACITY)
.time_to_live(Duration::from_secs(CACHE_TTL_SECOND))
.time_to_idle(Duration::from_secs(CACHE_TTI_SECOND))
.build(),
);
Self { kv_backend, cache }
}
pub fn cache(&self) -> &CacheBackendRef {
&self.cache
}
}
use crate::error::{Error, MetaSrvSnafu};
use crate::remote::{Kv, KvBackend, ValueIter};
#[derive(Debug)]
pub struct MetaKvBackend {
pub client: Arc<MetaClient>,
@@ -33,6 +183,8 @@ pub struct MetaKvBackend {
/// comparing to `Accessor`'s list and get method.
#[async_trait::async_trait]
impl KvBackend for MetaKvBackend {
type Error = Error;
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
where
'a: 'b,
@@ -43,6 +195,7 @@ impl KvBackend for MetaKvBackend {
.client
.range(RangeRequest::new().with_prefix(key))
.await
.map_err(BoxedError::new)
.context(MetaSrvSnafu)?;
let kvs = resp.take_kvs();
for mut kv in kvs.into_iter() {
@@ -51,11 +204,12 @@ impl KvBackend for MetaKvBackend {
}))
}
async fn get(&self, key: &[u8]) -> Result<Option<Kv>, Error> {
async fn get(&self, key: &[u8]) -> Result<Option<Kv>> {
let mut response = self
.client
.range(RangeRequest::new().with_key(key))
.await
.map_err(BoxedError::new)
.context(MetaSrvSnafu)?;
Ok(response
.take_kvs()
@@ -63,17 +217,27 @@ impl KvBackend for MetaKvBackend {
.map(|kv| Kv(kv.take_key(), kv.take_value())))
}
async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), Error> {
async fn set(&self, key: &[u8], val: &[u8]) -> Result<()> {
let req = PutRequest::new()
.with_key(key.to_vec())
.with_value(val.to_vec());
let _ = self.client.put(req).await.context(MetaSrvSnafu)?;
let _ = self
.client
.put(req)
.await
.map_err(BoxedError::new)
.context(MetaSrvSnafu)?;
Ok(())
}
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), Error> {
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<()> {
let req = DeleteRangeRequest::new().with_range(key.to_vec(), end.to_vec());
let resp = self.client.delete_range(req).await.context(MetaSrvSnafu)?;
let resp = self
.client
.delete_range(req)
.await
.map_err(BoxedError::new)
.context(MetaSrvSnafu)?;
info!(
"Delete range, key: {}, end: {}, deleted: {}",
String::from_utf8_lossy(key),
@@ -89,7 +253,7 @@ impl KvBackend for MetaKvBackend {
key: &[u8],
expect: &[u8],
val: &[u8],
) -> Result<Result<(), Option<Vec<u8>>>, Error> {
) -> Result<std::result::Result<(), Option<Vec<u8>>>> {
let request = CompareAndPutRequest::new()
.with_key(key.to_vec())
.with_expect(expect.to_vec())
@@ -98,6 +262,7 @@ impl KvBackend for MetaKvBackend {
.client
.compare_and_put(request)
.await
.map_err(BoxedError::new)
.context(MetaSrvSnafu)?;
if response.is_success() {
Ok(Ok(()))
@@ -105,4 +270,19 @@ impl KvBackend for MetaKvBackend {
Ok(Err(response.take_prev_kv().map(|v| v.value().to_vec())))
}
}
async fn move_value(&self, from_key: &[u8], to_key: &[u8]) -> Result<()> {
let req = MoveValueRequest::new(from_key, to_key);
let _ = self
.client
.move_value(req)
.await
.map_err(BoxedError::new)
.context(MetaSrvSnafu)?;
Ok(())
}
fn as_any(&self) -> &dyn Any {
self
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,119 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::sync::{Arc, RwLock as StdRwLock};
use common_recordbatch::RecordBatch;
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
use datatypes::vectors::StringVector;
use table::engine::{CloseTableResult, EngineContext, TableEngine};
use table::metadata::TableId;
use table::requests::{
AlterTableRequest, CloseTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest,
};
use table::test_util::MemTable;
use table::TableRef;
#[derive(Default)]
pub struct MockTableEngine {
tables: StdRwLock<HashMap<TableId, TableRef>>,
}
#[async_trait::async_trait]
impl TableEngine for MockTableEngine {
fn name(&self) -> &str {
"MockTableEngine"
}
/// Create a table with only one column
async fn create_table(
&self,
_ctx: &EngineContext,
request: CreateTableRequest,
) -> table::Result<TableRef> {
let table_id = request.id;
let schema = Arc::new(Schema::new(vec![ColumnSchema::new(
"name",
ConcreteDataType::string_datatype(),
true,
)]));
let data = vec![Arc::new(StringVector::from(vec!["a", "b", "c"])) as _];
let record_batch = RecordBatch::new(schema, data).unwrap();
let table: TableRef = Arc::new(MemTable::new_with_catalog(
&request.table_name,
record_batch,
table_id,
request.catalog_name,
request.schema_name,
vec![0],
)) as Arc<_>;
let mut tables = self.tables.write().unwrap();
let _ = tables.insert(table_id, table.clone() as TableRef);
Ok(table)
}
async fn open_table(
&self,
_ctx: &EngineContext,
request: OpenTableRequest,
) -> table::Result<Option<TableRef>> {
Ok(self.tables.read().unwrap().get(&request.table_id).cloned())
}
async fn alter_table(
&self,
_ctx: &EngineContext,
_request: AlterTableRequest,
) -> table::Result<TableRef> {
unimplemented!()
}
fn get_table(
&self,
_ctx: &EngineContext,
table_id: TableId,
) -> table::Result<Option<TableRef>> {
Ok(self.tables.read().unwrap().get(&table_id).cloned())
}
fn table_exists(&self, _ctx: &EngineContext, table_id: TableId) -> bool {
self.tables.read().unwrap().contains_key(&table_id)
}
async fn drop_table(
&self,
_ctx: &EngineContext,
_request: DropTableRequest,
) -> table::Result<bool> {
unimplemented!()
}
async fn close_table(
&self,
_ctx: &EngineContext,
request: CloseTableRequest,
) -> table::Result<CloseTableResult> {
let _ = self.tables.write().unwrap().remove(&request.table_id);
Ok(CloseTableResult::Released(vec![]))
}
async fn close(&self) -> table::Result<()> {
Ok(())
}
}

View File

@@ -0,0 +1,822 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::future::Future;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use async_trait::async_trait;
use common_meta::error::InvalidProtoMsgSnafu;
use common_meta::heartbeat::handler::{
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
};
use common_meta::ident::TableIdent;
use common_meta::RegionIdent;
use common_telemetry::{debug, error, info, warn};
use snafu::{OptionExt, ResultExt};
use store_api::storage::RegionNumber;
use table::engine::manager::TableEngineManagerRef;
use table::engine::{CloseTableResult, EngineContext, TableEngineRef};
use table::requests::CloseTableRequest;
use table::TableRef;
use tokio::sync::{mpsc, oneshot, Mutex};
use tokio::task::JoinHandle;
use tokio::time::{Duration, Instant};
use crate::error::{Result, TableEngineNotFoundSnafu};
/// [RegionAliveKeepers] manages all [RegionAliveKeeper] in a scope of tables.
pub struct RegionAliveKeepers {
table_engine_manager: TableEngineManagerRef,
keepers: Arc<Mutex<HashMap<TableIdent, Arc<RegionAliveKeeper>>>>,
heartbeat_interval_millis: u64,
started: AtomicBool,
/// The epoch when [RegionAliveKeepers] is created. It's used to get a monotonically non-decreasing
/// elapsed time when submitting heartbeats to Metasrv (because [Instant] is monotonically
/// non-decreasing). The heartbeat request will carry the duration since this epoch, and the
/// duration acts like an "invariant point" for region's keep alive lease.
epoch: Instant,
}
impl RegionAliveKeepers {
pub fn new(
table_engine_manager: TableEngineManagerRef,
heartbeat_interval_millis: u64,
) -> Self {
Self {
table_engine_manager,
keepers: Arc::new(Mutex::new(HashMap::new())),
heartbeat_interval_millis,
started: AtomicBool::new(false),
epoch: Instant::now(),
}
}
pub async fn find_keeper(&self, table_ident: &TableIdent) -> Option<Arc<RegionAliveKeeper>> {
self.keepers.lock().await.get(table_ident).cloned()
}
pub async fn register_table(&self, table_ident: TableIdent, table: TableRef) -> Result<()> {
let keeper = self.find_keeper(&table_ident).await;
if keeper.is_some() {
return Ok(());
}
let table_engine = self
.table_engine_manager
.engine(&table_ident.engine)
.context(TableEngineNotFoundSnafu {
engine_name: &table_ident.engine,
})?;
let keeper = Arc::new(RegionAliveKeeper::new(
table_engine,
table_ident.clone(),
self.heartbeat_interval_millis,
));
for r in table.table_info().meta.region_numbers.iter() {
keeper.register_region(*r).await;
}
let mut keepers = self.keepers.lock().await;
let _ = keepers.insert(table_ident.clone(), keeper.clone());
if self.started.load(Ordering::Relaxed) {
keeper.start().await;
info!("RegionAliveKeeper for table {table_ident} is started!");
} else {
info!("RegionAliveKeeper for table {table_ident} is registered but not started yet!");
}
Ok(())
}
pub async fn deregister_table(
&self,
table_ident: &TableIdent,
) -> Option<Arc<RegionAliveKeeper>> {
self.keepers.lock().await.remove(table_ident).map(|x| {
info!("Deregister RegionAliveKeeper for table {table_ident}");
x
})
}
pub async fn register_region(&self, region_ident: &RegionIdent) {
let table_ident = &region_ident.table_ident;
let Some(keeper) = self.find_keeper(table_ident).await else {
// Alive keeper could be affected by lagging msg, just warn and ignore.
warn!("Alive keeper for region {region_ident} is not found!");
return;
};
keeper.register_region(region_ident.region_number).await
}
pub async fn deregister_region(&self, region_ident: &RegionIdent) {
let table_ident = &region_ident.table_ident;
let Some(keeper) = self.find_keeper(table_ident).await else {
// Alive keeper could be affected by lagging msg, just warn and ignore.
warn!("Alive keeper for region {region_ident} is not found!");
return;
};
let _ = keeper.deregister_region(region_ident.region_number).await;
}
pub async fn start(&self) {
let keepers = self.keepers.lock().await;
for keeper in keepers.values() {
keeper.start().await;
}
self.started.store(true, Ordering::Relaxed);
info!(
"RegionAliveKeepers for tables {:?} are started!",
keepers.keys().map(|x| x.to_string()).collect::<Vec<_>>(),
);
}
pub fn epoch(&self) -> Instant {
self.epoch
}
}
#[async_trait]
impl HeartbeatResponseHandler for RegionAliveKeepers {
fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool {
!ctx.response.region_leases.is_empty()
}
async fn handle(
&self,
ctx: &mut HeartbeatResponseHandlerContext,
) -> common_meta::error::Result<HandleControl> {
let leases = ctx.response.region_leases.drain(..).collect::<Vec<_>>();
for lease in leases {
let table_ident: TableIdent = match lease
.table_ident
.context(InvalidProtoMsgSnafu {
err_msg: "'table_ident' is missing in RegionLease",
})
.and_then(|x| x.try_into())
{
Ok(x) => x,
Err(e) => {
error!(e; "");
continue;
}
};
let Some(keeper) = self.keepers.lock().await.get(&table_ident).cloned() else {
// Alive keeper could be affected by lagging msg, just warn and ignore.
warn!("Alive keeper for table {table_ident} is not found!");
continue;
};
let start_instant = self.epoch + Duration::from_millis(lease.duration_since_epoch);
let deadline = start_instant + Duration::from_secs(lease.lease_seconds);
keeper.keep_lived(lease.regions, deadline).await;
}
Ok(HandleControl::Continue)
}
}
/// [RegionAliveKeeper] starts a countdown for each region in a table. When deadline is reached,
/// the region will be closed.
/// The deadline is controlled by Metasrv. It works like "lease" for regions: a Datanode submits its
/// opened regions to Metasrv, in heartbeats. If Metasrv decides some region could be resided in this
/// Datanode, it will "extend" the region's "lease", with a deadline for [RegionAliveKeeper] to
/// countdown.
pub struct RegionAliveKeeper {
table_engine: TableEngineRef,
table_ident: TableIdent,
countdown_task_handles: Arc<Mutex<HashMap<RegionNumber, Arc<CountdownTaskHandle>>>>,
heartbeat_interval_millis: u64,
started: AtomicBool,
}
impl RegionAliveKeeper {
fn new(
table_engine: TableEngineRef,
table_ident: TableIdent,
heartbeat_interval_millis: u64,
) -> Self {
Self {
table_engine,
table_ident,
countdown_task_handles: Arc::new(Mutex::new(HashMap::new())),
heartbeat_interval_millis,
started: AtomicBool::new(false),
}
}
async fn find_handle(&self, region: &RegionNumber) -> Option<Arc<CountdownTaskHandle>> {
self.countdown_task_handles
.lock()
.await
.get(region)
.cloned()
}
async fn register_region(&self, region: RegionNumber) {
if self.find_handle(&region).await.is_some() {
return;
}
let countdown_task_handles = Arc::downgrade(&self.countdown_task_handles);
let on_task_finished = async move {
if let Some(x) = countdown_task_handles.upgrade() {
let _ = x.lock().await.remove(&region);
} // Else the countdown task handles map could be dropped because the keeper is dropped.
};
let handle = Arc::new(CountdownTaskHandle::new(
self.table_engine.clone(),
self.table_ident.clone(),
region,
|| on_task_finished,
));
let mut handles = self.countdown_task_handles.lock().await;
let _ = handles.insert(region, handle.clone());
if self.started.load(Ordering::Relaxed) {
handle.start(self.heartbeat_interval_millis).await;
info!(
"Region alive countdown for region {region} in table {} is started!",
self.table_ident
);
} else {
info!(
"Region alive countdown for region {region} in table {} is registered but not started yet!",
self.table_ident
);
}
}
async fn deregister_region(&self, region: RegionNumber) -> Option<Arc<CountdownTaskHandle>> {
self.countdown_task_handles
.lock()
.await
.remove(&region)
.map(|x| {
info!(
"Deregister alive countdown for region {region} in table {}",
self.table_ident
);
x
})
}
async fn start(&self) {
let handles = self.countdown_task_handles.lock().await;
for handle in handles.values() {
handle.start(self.heartbeat_interval_millis).await;
}
self.started.store(true, Ordering::Relaxed);
info!(
"Region alive countdowns for regions {:?} in table {} are started!",
handles.keys().copied().collect::<Vec<_>>(),
self.table_ident
);
}
async fn keep_lived(&self, designated_regions: Vec<RegionNumber>, deadline: Instant) {
for region in designated_regions {
if let Some(handle) = self.find_handle(&region).await {
handle.reset_deadline(deadline).await;
}
// Else the region alive keeper might be triggered by lagging messages, we can safely ignore it.
}
}
pub async fn deadline(&self, region: RegionNumber) -> Option<Instant> {
let mut deadline = None;
if let Some(handle) = self.find_handle(&region).await {
let (s, r) = oneshot::channel();
if handle.tx.send(CountdownCommand::Deadline(s)).await.is_ok() {
deadline = r.await.ok()
}
}
deadline
}
}
#[derive(Debug)]
enum CountdownCommand {
Start(u64),
Reset(Instant),
Deadline(oneshot::Sender<Instant>),
}
struct CountdownTaskHandle {
tx: mpsc::Sender<CountdownCommand>,
handler: JoinHandle<()>,
table_ident: TableIdent,
region: RegionNumber,
}
impl CountdownTaskHandle {
/// Creates a new [CountdownTaskHandle] and starts the countdown task.
/// # Params
/// - `on_task_finished`: a callback to be invoked when the task is finished. Note that it will not
/// be invoked if the task is cancelled (by dropping the handle). This is because we want something
/// meaningful to be done when the task is finished, e.g. deregister the handle from the map.
/// While dropping the handle does not necessarily mean the task is finished.
fn new<Fut>(
table_engine: TableEngineRef,
table_ident: TableIdent,
region: RegionNumber,
on_task_finished: impl FnOnce() -> Fut + Send + 'static,
) -> Self
where
Fut: Future<Output = ()> + Send,
{
let (tx, rx) = mpsc::channel(1024);
let mut countdown_task = CountdownTask {
table_engine,
table_ident: table_ident.clone(),
region,
rx,
};
let handler = common_runtime::spawn_bg(async move {
countdown_task.run().await;
on_task_finished().await;
});
Self {
tx,
handler,
table_ident,
region,
}
}
async fn start(&self, heartbeat_interval_millis: u64) {
if let Err(e) = self
.tx
.send(CountdownCommand::Start(heartbeat_interval_millis))
.await
{
warn!(
"Failed to start region alive keeper countdown: {e}. \
Maybe the task is stopped due to region been closed."
);
}
}
async fn reset_deadline(&self, deadline: Instant) {
if let Err(e) = self.tx.send(CountdownCommand::Reset(deadline)).await {
warn!(
"Failed to reset region alive keeper deadline: {e}. \
Maybe the task is stopped due to region been closed."
);
}
}
}
impl Drop for CountdownTaskHandle {
fn drop(&mut self) {
debug!(
"Aborting region alive countdown task for region {} in table {}",
self.region, self.table_ident,
);
self.handler.abort();
}
}
struct CountdownTask {
table_engine: TableEngineRef,
table_ident: TableIdent,
region: RegionNumber,
rx: mpsc::Receiver<CountdownCommand>,
}
impl CountdownTask {
async fn run(&mut self) {
// 30 years. See `Instant::far_future`.
let far_future = Instant::now() + Duration::from_secs(86400 * 365 * 30);
// Make sure the alive countdown is not gonna happen before heartbeat task is started (the
// "start countdown" command will be sent from heartbeat task).
let countdown = tokio::time::sleep_until(far_future);
tokio::pin!(countdown);
let region = &self.region;
let table_ident = &self.table_ident;
loop {
tokio::select! {
command = self.rx.recv() => {
match command {
Some(CountdownCommand::Start(heartbeat_interval_millis)) => {
// Set first deadline in 4 heartbeats (roughly after 20 seconds from now if heartbeat
// interval is set to default 5 seconds), to make Datanode and Metasrv more tolerable to
// network or other jitters during startup.
let first_deadline = Instant::now() + Duration::from_millis(heartbeat_interval_millis) * 4;
countdown.set(tokio::time::sleep_until(first_deadline));
},
Some(CountdownCommand::Reset(deadline)) => {
if countdown.deadline() < deadline {
debug!(
"Reset deadline of region {region} of table {table_ident} to approximately {} seconds later",
(deadline - Instant::now()).as_secs_f32(),
);
countdown.set(tokio::time::sleep_until(deadline));
}
// Else the countdown could be either:
// - not started yet;
// - during startup protection;
// - received a lagging heartbeat message.
// All can be safely ignored.
},
None => {
info!(
"The handle of countdown task for region {region} of table {table_ident} \
is dropped, RegionAliveKeeper out."
);
break;
},
Some(CountdownCommand::Deadline(tx)) => {
let _ = tx.send(countdown.deadline());
}
}
}
() = &mut countdown => {
let result = self.close_region().await;
warn!(
"Region {region} of table {table_ident} is closed, result: {result:?}. \
RegionAliveKeeper out.",
);
break;
}
}
}
}
async fn close_region(&self) -> CloseTableResult {
let ctx = EngineContext::default();
let region = self.region;
let table_ident = &self.table_ident;
loop {
let request = CloseTableRequest {
catalog_name: table_ident.catalog.clone(),
schema_name: table_ident.schema.clone(),
table_name: table_ident.table.clone(),
table_id: table_ident.table_id,
region_numbers: vec![region],
flush: true,
};
match self.table_engine.close_table(&ctx, request).await {
Ok(result) => return result,
// If region is failed to close, immediately retry. Maybe we should panic instead?
Err(e) => error!(e;
"Failed to close region {region} of table {table_ident}. \
For the integrity of data, retry closing and retry without wait.",
),
}
}
}
}
#[cfg(test)]
mod test {
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use api::v1::meta::{HeartbeatResponse, RegionLease};
use common_meta::heartbeat::mailbox::HeartbeatMailbox;
use datatypes::schema::RawSchema;
use table::engine::manager::MemoryTableEngineManager;
use table::engine::TableEngine;
use table::requests::{CreateTableRequest, TableOptions};
use table::test_util::EmptyTable;
use super::*;
use crate::remote::mock::MockTableEngine;
async fn prepare_keepers() -> (TableIdent, RegionAliveKeepers) {
let table_engine = Arc::new(MockTableEngine::default());
let table_engine_manager = Arc::new(MemoryTableEngineManager::new(table_engine));
let keepers = RegionAliveKeepers::new(table_engine_manager, 5000);
let catalog = "my_catalog";
let schema = "my_schema";
let table = "my_table";
let table_ident = TableIdent {
catalog: catalog.to_string(),
schema: schema.to_string(),
table: table.to_string(),
table_id: 1,
engine: "MockTableEngine".to_string(),
};
let table = Arc::new(EmptyTable::new(CreateTableRequest {
id: 1,
catalog_name: catalog.to_string(),
schema_name: schema.to_string(),
table_name: table.to_string(),
desc: None,
schema: RawSchema {
column_schemas: vec![],
timestamp_index: None,
version: 0,
},
region_numbers: vec![1, 2, 3],
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: TableOptions::default(),
engine: "MockTableEngine".to_string(),
}));
keepers
.register_table(table_ident.clone(), table)
.await
.unwrap();
assert!(keepers.keepers.lock().await.contains_key(&table_ident));
(table_ident, keepers)
}
#[tokio::test(flavor = "multi_thread")]
async fn test_handle_heartbeat_response() {
let (table_ident, keepers) = prepare_keepers().await;
keepers.start().await;
let startup_protection_until = Instant::now() + Duration::from_secs(21);
let duration_since_epoch = (Instant::now() - keepers.epoch).as_millis() as _;
let lease_seconds = 100;
let response = HeartbeatResponse {
region_leases: vec![RegionLease {
table_ident: Some(table_ident.clone().into()),
regions: vec![1, 3], // Not extending region 2's lease time.
duration_since_epoch,
lease_seconds,
}],
..Default::default()
};
let keep_alive_until = keepers.epoch
+ Duration::from_millis(duration_since_epoch)
+ Duration::from_secs(lease_seconds);
let (tx, _) = mpsc::channel(8);
let mailbox = Arc::new(HeartbeatMailbox::new(tx));
let mut ctx = HeartbeatResponseHandlerContext::new(mailbox, response);
assert!(keepers.handle(&mut ctx).await.unwrap() == HandleControl::Continue);
// sleep to wait for background task spawned in `handle`
tokio::time::sleep(Duration::from_secs(1)).await;
async fn test(
keeper: &Arc<RegionAliveKeeper>,
region_number: RegionNumber,
startup_protection_until: Instant,
keep_alive_until: Instant,
is_kept_live: bool,
) {
let deadline = keeper.deadline(region_number).await.unwrap();
if is_kept_live {
assert!(deadline > startup_protection_until && deadline == keep_alive_until);
} else {
assert!(deadline <= startup_protection_until);
}
}
let keeper = &keepers
.keepers
.lock()
.await
.get(&table_ident)
.cloned()
.unwrap();
// Test region 1 and 3 is kept lived. Their deadlines are updated to desired instant.
test(keeper, 1, startup_protection_until, keep_alive_until, true).await;
test(keeper, 3, startup_protection_until, keep_alive_until, true).await;
// Test region 2 is not kept lived. It's deadline is not updated: still during startup protection period.
test(keeper, 2, startup_protection_until, keep_alive_until, false).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_region_alive_keepers() {
let (table_ident, keepers) = prepare_keepers().await;
keepers
.register_region(&RegionIdent {
cluster_id: 1,
datanode_id: 1,
table_ident: table_ident.clone(),
region_number: 4,
})
.await;
keepers.start().await;
for keeper in keepers.keepers.lock().await.values() {
let regions = {
let handles = keeper.countdown_task_handles.lock().await;
handles.keys().copied().collect::<Vec<_>>()
};
for region in regions {
// assert countdown tasks are started
let deadline = keeper.deadline(region).await.unwrap();
assert!(deadline <= Instant::now() + Duration::from_secs(20));
}
}
keepers
.deregister_region(&RegionIdent {
cluster_id: 1,
datanode_id: 1,
table_ident: table_ident.clone(),
region_number: 1,
})
.await;
let mut regions = keepers
.find_keeper(&table_ident)
.await
.unwrap()
.countdown_task_handles
.lock()
.await
.keys()
.copied()
.collect::<Vec<_>>();
regions.sort();
assert_eq!(regions, vec![2, 3, 4]);
let keeper = keepers.deregister_table(&table_ident).await.unwrap();
assert!(Arc::try_unwrap(keeper).is_ok(), "keeper is not dropped");
assert!(keepers.keepers.lock().await.is_empty());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_region_alive_keeper() {
let table_engine = Arc::new(MockTableEngine::default());
let table_ident = TableIdent {
catalog: "my_catalog".to_string(),
schema: "my_schema".to_string(),
table: "my_table".to_string(),
table_id: 1024,
engine: "mito".to_string(),
};
let keeper = RegionAliveKeeper::new(table_engine, table_ident, 1000);
let region = 1;
assert!(keeper.find_handle(&region).await.is_none());
keeper.register_region(region).await;
let _ = keeper.find_handle(&region).await.unwrap();
let ten_seconds_later = || Instant::now() + Duration::from_secs(10);
keeper.keep_lived(vec![1, 2, 3], ten_seconds_later()).await;
assert!(keeper.find_handle(&2).await.is_none());
assert!(keeper.find_handle(&3).await.is_none());
let far_future = Instant::now() + Duration::from_secs(86400 * 365 * 29);
// assert if keeper is not started, keep_lived is of no use
assert!(keeper.deadline(region).await.unwrap() > far_future);
keeper.start().await;
keeper.keep_lived(vec![1, 2, 3], ten_seconds_later()).await;
// assert keep_lived works if keeper is started
assert!(keeper.deadline(region).await.unwrap() <= ten_seconds_later());
let handle = keeper.deregister_region(region).await.unwrap();
assert!(Arc::try_unwrap(handle).is_ok(), "handle is not dropped");
assert!(keeper.find_handle(&region).await.is_none());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_countdown_task_handle() {
let table_engine = Arc::new(MockTableEngine::default());
let table_ident = TableIdent {
catalog: "my_catalog".to_string(),
schema: "my_schema".to_string(),
table: "my_table".to_string(),
table_id: 1024,
engine: "mito".to_string(),
};
let finished = Arc::new(AtomicBool::new(false));
let finished_clone = finished.clone();
let handle = CountdownTaskHandle::new(
table_engine.clone(),
table_ident.clone(),
1,
|| async move { finished_clone.store(true, Ordering::Relaxed) },
);
let tx = handle.tx.clone();
// assert countdown task is running
tx.send(CountdownCommand::Start(5000)).await.unwrap();
assert!(!finished.load(Ordering::Relaxed));
drop(handle);
tokio::time::sleep(Duration::from_secs(1)).await;
// assert countdown task is stopped
assert!(tx
.try_send(CountdownCommand::Reset(
Instant::now() + Duration::from_secs(10)
))
.is_err());
// assert `on_task_finished` is not called (because the task is aborted by the handle's drop)
assert!(!finished.load(Ordering::Relaxed));
let finished = Arc::new(AtomicBool::new(false));
let finished_clone = finished.clone();
let handle = CountdownTaskHandle::new(table_engine, table_ident, 1, || async move {
finished_clone.store(true, Ordering::Relaxed)
});
handle.tx.send(CountdownCommand::Start(100)).await.unwrap();
tokio::time::sleep(Duration::from_secs(1)).await;
// assert `on_task_finished` is called when task is finished normally
assert!(finished.load(Ordering::Relaxed));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_countdown_task_run() {
let ctx = &EngineContext::default();
let catalog = "my_catalog";
let schema = "my_schema";
let table = "my_table";
let table_id = 1;
let request = CreateTableRequest {
id: table_id,
catalog_name: catalog.to_string(),
schema_name: schema.to_string(),
table_name: table.to_string(),
desc: None,
schema: RawSchema {
column_schemas: vec![],
timestamp_index: None,
version: 0,
},
region_numbers: vec![],
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: TableOptions::default(),
engine: "mito".to_string(),
};
let table_engine = Arc::new(MockTableEngine::default());
let _ = table_engine.create_table(ctx, request).await.unwrap();
let table_ident = TableIdent {
catalog: catalog.to_string(),
schema: schema.to_string(),
table: table.to_string(),
table_id,
engine: "mito".to_string(),
};
let (tx, rx) = mpsc::channel(10);
let mut task = CountdownTask {
table_engine: table_engine.clone(),
table_ident,
region: 1,
rx,
};
let _handle = common_runtime::spawn_bg(async move {
task.run().await;
});
async fn deadline(tx: &mpsc::Sender<CountdownCommand>) -> Instant {
let (s, r) = oneshot::channel();
tx.send(CountdownCommand::Deadline(s)).await.unwrap();
r.await.unwrap()
}
// if countdown task is not started, its deadline is set to far future
assert!(deadline(&tx).await > Instant::now() + Duration::from_secs(86400 * 365 * 29));
// start countdown in 250ms * 4 = 1s
tx.send(CountdownCommand::Start(250)).await.unwrap();
// assert deadline is correctly set
assert!(deadline(&tx).await <= Instant::now() + Duration::from_secs(1));
// reset countdown in 1.5s
tx.send(CountdownCommand::Reset(
Instant::now() + Duration::from_millis(1500),
))
.await
.unwrap();
// assert the table is closed after deadline is reached
assert!(table_engine.table_exists(ctx, table_id));
// spare 500ms for the task to close the table
tokio::time::sleep(Duration::from_millis(2000)).await;
assert!(!table_engine.table_exists(ctx, table_id));
}
}

View File

@@ -1,69 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::sync::Arc;
use async_trait::async_trait;
use table::TableRef;
use crate::error::{NotSupportedSnafu, Result};
/// Represents a schema, comprising a number of named tables.
#[async_trait]
pub trait SchemaProvider: Sync + Send {
/// Returns the schema provider as [`Any`](std::any::Any)
/// so that it can be downcast to a specific implementation.
fn as_any(&self) -> &dyn Any;
/// Retrieves the list of available table names in this schema.
async fn table_names(&self) -> Result<Vec<String>>;
/// Retrieves a specific table from the schema by name, provided it exists.
async fn table(&self, name: &str) -> Result<Option<TableRef>>;
/// If supported by the implementation, adds a new table to this schema.
/// If a table of the same name existed before, it returns "Table already exists" error.
async fn register_table(&self, name: String, _table: TableRef) -> Result<Option<TableRef>> {
NotSupportedSnafu {
op: format!("register_table({name}, <table>)"),
}
.fail()
}
/// If supported by the implementation, renames an existing table from this schema and returns it.
/// If no table of that name exists, returns "Table not found" error.
async fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef> {
NotSupportedSnafu {
op: format!("rename_table({name}, {new_name})"),
}
.fail()
}
/// If supported by the implementation, removes an existing table from this schema and returns it.
/// If no table of that name exists, returns Ok(None).
async fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
NotSupportedSnafu {
op: format!("deregister_table({name})"),
}
.fail()
}
/// If supported by the implementation, checks the table exist in the schema provider or not.
/// If no matched table in the schema provider, return false.
/// Otherwise, return true.
async fn table_exist(&self, name: &str) -> Result<bool>;
}
pub type SchemaProviderRef = Arc<dyn SchemaProvider>;

View File

@@ -20,8 +20,6 @@ use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MITO_ENGINE,
SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_ID, SYSTEM_CATALOG_TABLE_NAME,
};
use common_query::logical_plan::Expr;
use common_query::physical_plan::{PhysicalPlanRef, SessionContext};
use common_recordbatch::SendableRecordBatchStream;
use common_telemetry::debug;
use common_time::util;
@@ -30,12 +28,13 @@ use datatypes::schema::{ColumnSchema, RawSchema, SchemaRef};
use datatypes::vectors::{BinaryVector, TimestampMillisecondVector, UInt8Vector};
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::ScanRequest;
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::{TableId, TableInfoRef};
use table::requests::{
CreateTableRequest, DeleteRequest, InsertRequest, OpenTableRequest, TableOptions,
};
use table::{Table, TableRef};
use table::{Result as TableResult, Table, TableRef};
use crate::error::{
self, CreateSystemCatalogSnafu, EmptyValueSnafu, Error, InvalidEntryTypeSnafu, InvalidKeySnafu,
@@ -59,17 +58,12 @@ impl Table for SystemCatalogTable {
self.0.schema()
}
async fn scan(
&self,
projection: Option<&Vec<usize>>,
filters: &[Expr],
limit: Option<usize>,
) -> table::Result<PhysicalPlanRef> {
self.0.scan(projection, filters, limit).await
async fn scan_to_stream(&self, request: ScanRequest) -> TableResult<SendableRecordBatchStream> {
self.0.scan_to_stream(request).await
}
/// Insert values into table.
async fn insert(&self, request: InsertRequest) -> table::error::Result<usize> {
async fn insert(&self, request: InsertRequest) -> TableResult<usize> {
self.0.insert(request).await
}
@@ -77,7 +71,7 @@ impl Table for SystemCatalogTable {
self.0.table_info()
}
async fn delete(&self, request: DeleteRequest) -> table::Result<usize> {
async fn delete(&self, request: DeleteRequest) -> TableResult<usize> {
self.0.delete(request).await
}
@@ -93,6 +87,7 @@ impl SystemCatalogTable {
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
table_id: SYSTEM_CATALOG_TABLE_ID,
region_numbers: vec![0],
};
let schema = build_system_catalog_schema();
let ctx = EngineContext::default();
@@ -130,14 +125,17 @@ impl SystemCatalogTable {
/// Create a stream of all entries inside system catalog table
pub async fn records(&self) -> Result<SendableRecordBatchStream> {
let full_projection = None;
let ctx = SessionContext::new();
let scan = self
.scan(full_projection, &[], None)
let scan_req = ScanRequest {
sequence: None,
projection: full_projection,
filters: vec![],
output_ordering: None,
limit: None,
};
let stream = self
.scan_to_stream(scan_req)
.await
.context(error::SystemCatalogTableScanSnafu)?;
let stream = scan
.execute(0, ctx.task_ctx())
.context(error::SystemCatalogTableScanExecSnafu)?;
Ok(stream)
}
}
@@ -205,38 +203,50 @@ pub fn build_table_insert_request(
build_insert_request(
EntryType::Table,
entry_key.as_bytes(),
serde_json::to_string(&TableEntryValue { table_name, engine })
.unwrap()
.as_bytes(),
serde_json::to_string(&TableEntryValue {
table_name,
engine,
is_deleted: false,
})
.unwrap()
.as_bytes(),
)
}
pub(crate) fn build_table_deletion_request(
request: &DeregisterTableRequest,
table_id: TableId,
) -> DeleteRequest {
let table_key = format_table_entry_key(&request.catalog, &request.schema, table_id);
DeleteRequest {
key_column_values: build_primary_key_columns(EntryType::Table, table_key.as_bytes()),
}
) -> InsertRequest {
let entry_key = format_table_entry_key(&request.catalog, &request.schema, table_id);
build_insert_request(
EntryType::Table,
entry_key.as_bytes(),
serde_json::to_string(&TableEntryValue {
table_name: "".to_string(),
engine: "".to_string(),
is_deleted: true,
})
.unwrap()
.as_bytes(),
)
}
fn build_primary_key_columns(entry_type: EntryType, key: &[u8]) -> HashMap<String, VectorRef> {
let mut m = HashMap::with_capacity(3);
m.insert(
"entry_type".to_string(),
Arc::new(UInt8Vector::from_slice([entry_type as u8])) as _,
);
m.insert(
"key".to_string(),
Arc::new(BinaryVector::from_slice(&[key])) as _,
);
// Timestamp in key part is intentionally left to 0
m.insert(
"timestamp".to_string(),
Arc::new(TimestampMillisecondVector::from_slice([0])) as _,
);
m
HashMap::from([
(
"entry_type".to_string(),
Arc::new(UInt8Vector::from_slice([entry_type as u8])) as VectorRef,
),
(
"key".to_string(),
Arc::new(BinaryVector::from_slice(&[key])) as VectorRef,
),
(
"timestamp".to_string(),
// Timestamp in key part is intentionally left to 0
Arc::new(TimestampMillisecondVector::from_slice([0])) as VectorRef,
),
])
}
pub fn build_schema_insert_request(catalog_name: String, schema_name: String) -> InsertRequest {
@@ -256,18 +266,18 @@ pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) ->
let mut columns_values = HashMap::with_capacity(6);
columns_values.extend(primary_key_columns.into_iter());
columns_values.insert(
let _ = columns_values.insert(
"value".to_string(),
Arc::new(BinaryVector::from_slice(&[value])) as _,
);
let now = util::current_time_millis();
columns_values.insert(
let _ = columns_values.insert(
"gmt_created".to_string(),
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
);
columns_values.insert(
let _ = columns_values.insert(
"gmt_modified".to_string(),
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
);
@@ -337,6 +347,7 @@ pub fn decode_system_catalog(
table_name: table_meta.table_name,
table_id,
engine: table_meta.engine,
is_deleted: table_meta.is_deleted,
}))
}
}
@@ -393,6 +404,7 @@ pub struct TableEntry {
pub table_name: String,
pub table_id: TableId,
pub engine: String,
pub is_deleted: bool,
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
@@ -401,12 +413,19 @@ pub struct TableEntryValue {
#[serde(default = "mito_engine")]
pub engine: String,
#[serde(default = "not_deleted")]
pub is_deleted: bool,
}
fn mito_engine() -> String {
MITO_ENGINE.to_string()
}
fn not_deleted() -> bool {
false
}
#[cfg(test)]
mod tests {
use common_recordbatch::RecordBatches;
@@ -476,14 +495,13 @@ mod tests {
}
#[test]
#[should_panic]
pub fn test_decode_mismatch() {
decode_system_catalog(
assert!(decode_system_catalog(
Some(EntryType::Table as u8),
Some("some_catalog.some_schema.42".as_bytes()),
None,
)
.unwrap();
.is_err());
}
#[test]
@@ -498,7 +516,7 @@ mod tests {
let dir = create_temp_dir("system-table-test");
let store_dir = dir.path().to_string_lossy();
let mut builder = object_store::services::Fs::default();
builder.root(&store_dir);
let _ = builder.root(&store_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
let noop_compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
let table_engine = Arc::new(MitoEngine::new(
@@ -508,7 +526,8 @@ mod tests {
Arc::new(NoopLogStore::default()),
object_store.clone(),
noop_compaction_scheduler,
),
)
.unwrap(),
object_store,
));
(dir, table_engine)
@@ -565,6 +584,7 @@ mod tests {
table_name: "my_table".to_string(),
table_id: 1,
engine: MITO_ENGINE.to_string(),
is_deleted: false,
});
assert_eq!(entry, expected);
@@ -576,11 +596,11 @@ mod tests {
},
1,
);
let result = catalog_table.delete(table_deletion).await.unwrap();
let result = catalog_table.insert(table_deletion).await.unwrap();
assert_eq!(result, 1);
let records = catalog_table.records().await.unwrap();
let batches = RecordBatches::try_collect(records).await.unwrap().take();
assert_eq!(batches.len(), 0);
assert_eq!(batches.len(), 1);
}
}

View File

@@ -24,10 +24,7 @@ use session::context::QueryContext;
use snafu::{ensure, OptionExt};
use table::table::adapter::DfTableProviderAdapter;
use crate::error::{
CatalogNotFoundSnafu, QueryAccessDeniedSnafu, Result, SchemaNotFoundSnafu, TableNotExistSnafu,
};
use crate::information_schema::InformationSchemaProvider;
use crate::error::{QueryAccessDeniedSnafu, Result, TableNotExistSnafu};
use crate::CatalogManagerRef;
pub struct DfTableSourceProvider {
@@ -62,7 +59,8 @@ impl DfTableSourceProvider {
TableReference::Bare { .. } => (),
TableReference::Partial { schema, .. } => {
ensure!(
schema.as_ref() == self.default_schema,
schema.as_ref() == self.default_schema
|| schema.as_ref() == INFORMATION_SCHEMA_NAME,
QueryAccessDeniedSnafu {
catalog: &self.default_catalog,
schema: schema.as_ref(),
@@ -74,7 +72,8 @@ impl DfTableSourceProvider {
} => {
ensure!(
catalog.as_ref() == self.default_catalog
&& schema.as_ref() == self.default_schema,
&& (schema.as_ref() == self.default_schema
|| schema.as_ref() == INFORMATION_SCHEMA_NAME),
QueryAccessDeniedSnafu {
catalog: catalog.as_ref(),
schema: schema.as_ref()
@@ -102,41 +101,18 @@ impl DfTableSourceProvider {
let schema_name = table_ref.schema.as_ref();
let table_name = table_ref.table.as_ref();
let schema = if schema_name != INFORMATION_SCHEMA_NAME {
let catalog = self
.catalog_manager
.catalog(catalog_name)
.await?
.context(CatalogNotFoundSnafu { catalog_name })?;
catalog
.schema(schema_name)
.await?
.context(SchemaNotFoundSnafu {
catalog: catalog_name,
schema: schema_name,
})?
} else {
let catalog_provider = self
.catalog_manager
.catalog(catalog_name)
.await?
.context(CatalogNotFoundSnafu { catalog_name })?;
Arc::new(InformationSchemaProvider::new(
catalog_name.to_string(),
catalog_provider,
))
};
let table = schema
.table(table_name)
let table = self
.catalog_manager
.table(catalog_name, schema_name, table_name)
.await?
.with_context(|| TableNotExistSnafu {
table: format_full_table_name(catalog_name, schema_name, table_name),
})?;
let table = DfTableProviderAdapter::new(table);
let table = provider_as_source(Arc::new(table));
self.resolved_tables.insert(resolved_name, table.clone());
Ok(table)
let provider = DfTableProviderAdapter::new(table);
let source = provider_as_source(Arc::new(provider));
let _ = self.resolved_tables.insert(resolved_name, source.clone());
Ok(source)
}
}
@@ -160,14 +136,14 @@ mod tests {
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let _ = result.unwrap();
let table_ref = TableReference::Partial {
schema: Cow::Borrowed("public"),
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let _ = result.unwrap();
let table_ref = TableReference::Partial {
schema: Cow::Borrowed("wrong_schema"),
@@ -182,7 +158,7 @@ mod tests {
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let _ = result.unwrap();
let table_ref = TableReference::Full {
catalog: Cow::Borrowed("wrong_catalog"),
@@ -191,5 +167,25 @@ mod tests {
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_err());
let table_ref = TableReference::Partial {
schema: Cow::Borrowed("information_schema"),
table: Cow::Borrowed("columns"),
};
let _ = table_provider.resolve_table_ref(table_ref).unwrap();
let table_ref = TableReference::Full {
catalog: Cow::Borrowed("greptime"),
schema: Cow::Borrowed("information_schema"),
table: Cow::Borrowed("columns"),
};
let _ = table_provider.resolve_table_ref(table_ref).unwrap();
let table_ref = TableReference::Full {
catalog: Cow::Borrowed("dummy"),
schema: Cow::Borrowed("information_schema"),
table: Cow::Borrowed("columns"),
};
assert!(table_provider.resolve_table_ref(table_ref).is_err());
}
}

View File

@@ -14,49 +14,24 @@
// The `tables` table in system catalog keeps a record of all tables created by user.
use std::any::Any;
use std::sync::Arc;
use async_trait::async_trait;
use common_catalog::consts::{INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_TABLE_NAME};
use common_telemetry::logging;
use snafu::ResultExt;
use table::metadata::TableId;
use table::{Table, TableRef};
use table::Table;
use crate::error::{self, Error, InsertCatalogRecordSnafu, Result as CatalogResult};
use crate::error::{self, InsertCatalogRecordSnafu, Result as CatalogResult};
use crate::system::{
build_schema_insert_request, build_table_deletion_request, build_table_insert_request,
SystemCatalogTable,
};
use crate::{CatalogProvider, DeregisterTableRequest, SchemaProvider, SchemaProviderRef};
use crate::DeregisterTableRequest;
pub struct InformationSchema {
pub system: Arc<SystemCatalogTable>,
}
#[async_trait]
impl SchemaProvider for InformationSchema {
fn as_any(&self) -> &dyn Any {
self
}
async fn table_names(&self) -> Result<Vec<String>, Error> {
Ok(vec![SYSTEM_CATALOG_TABLE_NAME.to_string()])
}
async fn table(&self, name: &str) -> Result<Option<TableRef>, Error> {
if name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME) {
Ok(Some(self.system.clone()))
} else {
Ok(None)
}
}
async fn table_exist(&self, name: &str) -> Result<bool, Error> {
Ok(name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME))
}
}
pub struct SystemCatalog {
pub information_schema: Arc<InformationSchema>,
}
@@ -91,12 +66,21 @@ impl SystemCatalog {
&self,
request: &DeregisterTableRequest,
table_id: TableId,
) -> CatalogResult<bool> {
) -> CatalogResult<()> {
self.information_schema
.system
.delete(build_table_deletion_request(request, table_id))
.insert(build_table_deletion_request(request, table_id))
.await
.map(|x| x == 1)
.map(|x| {
if x != 1 {
let table = common_catalog::format_full_table_name(
&request.catalog,
&request.schema,
&request.table_name
);
logging::warn!("Failed to delete table record from information_schema, unexpected returned result: {x}, table: {table}");
}
})
.with_context(|_| error::DeregisterTableSnafu {
request: request.clone(),
})
@@ -115,30 +99,3 @@ impl SystemCatalog {
.context(InsertCatalogRecordSnafu)
}
}
#[async_trait::async_trait]
impl CatalogProvider for SystemCatalog {
fn as_any(&self) -> &dyn Any {
self
}
async fn schema_names(&self) -> Result<Vec<String>, Error> {
Ok(vec![INFORMATION_SCHEMA_NAME.to_string()])
}
async fn register_schema(
&self,
_name: String,
_schema: SchemaProviderRef,
) -> Result<Option<SchemaProviderRef>, Error> {
panic!("System catalog does not support registering schema!")
}
async fn schema(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>, Error> {
if name.eq_ignore_ascii_case(INFORMATION_SCHEMA_NAME) {
Ok(Some(self.information_schema.clone()))
} else {
Ok(None)
}
}
}

View File

@@ -1,255 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::btree_map::Entry;
use std::collections::{BTreeMap, HashMap};
use std::fmt::{Display, Formatter};
use std::str::FromStr;
use std::sync::Arc;
use async_stream::stream;
use catalog::error::Error;
use catalog::helper::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
use catalog::remote::{Kv, KvBackend, ValueIter};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_recordbatch::RecordBatch;
use common_telemetry::logging::info;
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
use datatypes::vectors::StringVector;
use serde::Serializer;
use table::engine::{EngineContext, TableEngine, TableReference};
use table::metadata::TableId;
use table::requests::{AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest};
use table::test_util::MemTable;
use table::TableRef;
use tokio::sync::RwLock;
pub struct MockKvBackend {
map: RwLock<BTreeMap<Vec<u8>, Vec<u8>>>,
}
impl Default for MockKvBackend {
fn default() -> Self {
let mut map = BTreeMap::default();
let catalog_value = CatalogValue {}.as_bytes().unwrap();
let schema_value = SchemaValue {}.as_bytes().unwrap();
let default_catalog_key = CatalogKey {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
}
.to_string();
let default_schema_key = SchemaKey {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
}
.to_string();
// create default catalog and schema
map.insert(default_catalog_key.into(), catalog_value);
map.insert(default_schema_key.into(), schema_value);
let map = RwLock::new(map);
Self { map }
}
}
impl Display for MockKvBackend {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
futures::executor::block_on(async {
let map = self.map.read().await;
for (k, v) in map.iter() {
f.serialize_str(&String::from_utf8_lossy(k))?;
f.serialize_str(" -> ")?;
f.serialize_str(&String::from_utf8_lossy(v))?;
f.serialize_str("\n")?;
}
Ok(())
})
}
}
#[async_trait::async_trait]
impl KvBackend for MockKvBackend {
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
where
'a: 'b,
{
let prefix = key.to_vec();
let prefix_string = String::from_utf8_lossy(&prefix).to_string();
Box::pin(stream!({
let maps = self.map.read().await.clone();
for (k, v) in maps.range(prefix.clone()..) {
let key_string = String::from_utf8_lossy(k).to_string();
let matches = key_string.starts_with(&prefix_string);
if matches {
yield Ok(Kv(k.clone(), v.clone()))
} else {
info!("Stream finished");
return;
}
}
}))
}
async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), Error> {
let mut map = self.map.write().await;
map.insert(key.to_vec(), val.to_vec());
Ok(())
}
async fn compare_and_set(
&self,
key: &[u8],
expect: &[u8],
val: &[u8],
) -> Result<Result<(), Option<Vec<u8>>>, Error> {
let mut map = self.map.write().await;
let existing = map.entry(key.to_vec());
match existing {
Entry::Vacant(e) => {
if expect.is_empty() {
e.insert(val.to_vec());
Ok(Ok(()))
} else {
Ok(Err(None))
}
}
Entry::Occupied(mut existing) => {
if existing.get() == expect {
existing.insert(val.to_vec());
Ok(Ok(()))
} else {
Ok(Err(Some(existing.get().clone())))
}
}
}
}
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), Error> {
let start = key.to_vec();
let end = end.to_vec();
let range = start..end;
let mut map = self.map.write().await;
map.retain(|k, _| !range.contains(k));
Ok(())
}
}
#[derive(Default)]
pub struct MockTableEngine {
tables: RwLock<HashMap<String, TableRef>>,
}
#[async_trait::async_trait]
impl TableEngine for MockTableEngine {
fn name(&self) -> &str {
"MockTableEngine"
}
/// Create a table with only one column
async fn create_table(
&self,
_ctx: &EngineContext,
request: CreateTableRequest,
) -> table::Result<TableRef> {
let table_name = request.table_name.clone();
let catalog_name = request.catalog_name.clone();
let schema_name = request.schema_name.clone();
let default_table_id = "0".to_owned();
let table_id = TableId::from_str(
request
.table_options
.extra_options
.get("table_id")
.unwrap_or(&default_table_id),
)
.unwrap();
let schema = Arc::new(Schema::new(vec![ColumnSchema::new(
"name",
ConcreteDataType::string_datatype(),
true,
)]));
let data = vec![Arc::new(StringVector::from(vec!["a", "b", "c"])) as _];
let record_batch = RecordBatch::new(schema, data).unwrap();
let table: TableRef = Arc::new(MemTable::new_with_catalog(
&table_name,
record_batch,
table_id,
catalog_name,
schema_name,
vec![0],
)) as Arc<_>;
let mut tables = self.tables.write().await;
tables.insert(table_name, table.clone() as TableRef);
Ok(table)
}
async fn open_table(
&self,
_ctx: &EngineContext,
request: OpenTableRequest,
) -> table::Result<Option<TableRef>> {
Ok(self.tables.read().await.get(&request.table_name).cloned())
}
async fn alter_table(
&self,
_ctx: &EngineContext,
_request: AlterTableRequest,
) -> table::Result<TableRef> {
unimplemented!()
}
fn get_table(
&self,
_ctx: &EngineContext,
table_ref: &TableReference,
) -> table::Result<Option<TableRef>> {
futures::executor::block_on(async {
Ok(self
.tables
.read()
.await
.get(&table_ref.to_string())
.cloned())
})
}
fn table_exists(&self, _ctx: &EngineContext, table_ref: &TableReference) -> bool {
futures::executor::block_on(async {
self.tables
.read()
.await
.contains_key(&table_ref.to_string())
})
}
async fn drop_table(
&self,
_ctx: &EngineContext,
_request: DropTableRequest,
) -> table::Result<bool> {
unimplemented!()
}
async fn close(&self) -> table::Result<()> {
Ok(())
}
}

View File

@@ -14,32 +14,46 @@
#![feature(assert_matches)]
mod mock;
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::collections::HashSet;
use std::sync::Arc;
use std::time::Duration;
use catalog::helper::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
use catalog::remote::{
KvBackend, KvBackendRef, RemoteCatalogManager, RemoteCatalogProvider, RemoteSchemaProvider,
};
use catalog::{CatalogManager, RegisterTableRequest};
use catalog::remote::mock::MockTableEngine;
use catalog::remote::region_alive_keeper::RegionAliveKeepers;
use catalog::remote::{CachedMetaKvBackend, RemoteCatalogManager};
use catalog::{CatalogManager, RegisterSchemaRequest, RegisterTableRequest};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
use common_meta::ident::TableIdent;
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::kv_backend::KvBackend;
use datatypes::schema::RawSchema;
use futures_util::StreamExt;
use table::engine::manager::{MemoryTableEngineManager, TableEngineManagerRef};
use table::engine::{EngineContext, TableEngineRef};
use table::requests::CreateTableRequest;
use table::test_util::EmptyTable;
use tokio::time::Instant;
use crate::mock::{MockKvBackend, MockTableEngine};
struct TestingComponents {
catalog_manager: Arc<RemoteCatalogManager>,
table_engine_manager: TableEngineManagerRef,
region_alive_keepers: Arc<RegionAliveKeepers>,
}
impl TestingComponents {
fn table_engine(&self) -> TableEngineRef {
self.table_engine_manager.engine(MITO_ENGINE).unwrap()
}
}
#[tokio::test]
async fn test_backend() {
common_telemetry::init_default_ut_logging();
let backend = MockKvBackend::default();
let backend = MemoryKvBackend::default();
let default_catalog_key = CatalogKey {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
@@ -68,7 +82,7 @@ mod tests {
let mut res = HashSet::new();
while let Some(r) = iter.next().await {
let kv = r.unwrap();
res.insert(String::from_utf8_lossy(&kv.0).to_string());
let _ = res.insert(String::from_utf8_lossy(&kv.0).to_string());
}
assert_eq!(
vec!["__c-greptime".to_string()],
@@ -76,49 +90,97 @@ mod tests {
);
}
async fn prepare_components(
node_id: u64,
) -> (
KvBackendRef,
TableEngineRef,
Arc<RemoteCatalogManager>,
TableEngineManagerRef,
) {
let backend = Arc::new(MockKvBackend::default()) as KvBackendRef;
#[tokio::test]
async fn test_cached_backend() {
let backend = CachedMetaKvBackend::wrap(Arc::new(MemoryKvBackend::default()));
let default_catalog_key = CatalogKey {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
}
.to_string();
backend
.set(
default_catalog_key.as_bytes(),
&CatalogValue {}.as_bytes().unwrap(),
)
.await
.unwrap();
let ret = backend.get(b"__c-greptime").await.unwrap();
let _ = ret.unwrap();
let _ = backend
.compare_and_set(
b"__c-greptime",
&CatalogValue {}.as_bytes().unwrap(),
b"123",
)
.await
.unwrap();
let ret = backend.get(b"__c-greptime").await.unwrap();
assert_eq!(&b"123"[..], &(ret.as_ref().unwrap().1));
let _ = backend.set(b"__c-greptime", b"1234").await;
let ret = backend.get(b"__c-greptime").await.unwrap();
assert_eq!(&b"1234"[..], &(ret.as_ref().unwrap().1));
backend.delete(b"__c-greptime").await.unwrap();
let ret = backend.get(b"__c-greptime").await.unwrap();
assert!(ret.is_none());
}
async fn prepare_components(node_id: u64) -> TestingComponents {
let backend = Arc::new(MemoryKvBackend::default());
backend.set(b"__c-greptime", b"").await.unwrap();
backend.set(b"__s-greptime-public", b"").await.unwrap();
let cached_backend = Arc::new(CachedMetaKvBackend::wrap(backend));
let table_engine = Arc::new(MockTableEngine::default());
let engine_manager = Arc::new(MemoryTableEngineManager::alias(
MITO_ENGINE.to_string(),
table_engine.clone(),
));
let catalog_manager =
RemoteCatalogManager::new(engine_manager.clone(), node_id, backend.clone());
catalog_manager.start().await.unwrap();
(
backend,
table_engine,
Arc::new(catalog_manager),
engine_manager as Arc<_>,
)
));
let region_alive_keepers = Arc::new(RegionAliveKeepers::new(engine_manager.clone(), 5000));
let catalog_manager = RemoteCatalogManager::new(
engine_manager.clone(),
node_id,
cached_backend.clone(),
region_alive_keepers.clone(),
);
catalog_manager.start().await.unwrap();
TestingComponents {
catalog_manager: Arc::new(catalog_manager),
table_engine_manager: engine_manager,
region_alive_keepers,
}
}
#[tokio::test]
async fn test_remote_catalog_default() {
common_telemetry::init_default_ut_logging();
let node_id = 42;
let (_, _, catalog_manager, _) = prepare_components(node_id).await;
let TestingComponents {
catalog_manager, ..
} = prepare_components(node_id).await;
assert_eq!(
vec![DEFAULT_CATALOG_NAME.to_string()],
catalog_manager.catalog_names().await.unwrap()
);
let default_catalog = catalog_manager
.catalog(DEFAULT_CATALOG_NAME)
.await
.unwrap()
.unwrap();
assert_eq!(
vec![DEFAULT_SCHEMA_NAME.to_string()],
default_catalog.schema_names().await.unwrap()
catalog_manager
.schema_names(DEFAULT_CATALOG_NAME)
.await
.unwrap()
);
}
@@ -126,14 +188,16 @@ mod tests {
async fn test_remote_catalog_register_nonexistent() {
common_telemetry::init_default_ut_logging();
let node_id = 42;
let (_, table_engine, catalog_manager, _) = prepare_components(node_id).await;
let components = prepare_components(node_id).await;
// register a new table with an nonexistent catalog
let catalog_name = "nonexistent_catalog".to_string();
let schema_name = "nonexistent_schema".to_string();
let table_name = "fail_table".to_string();
// this schema has no effect
let table_schema = RawSchema::new(vec![]);
let table = table_engine
let table = components
.table_engine()
.create_table(
&EngineContext {},
CreateTableRequest {
@@ -159,7 +223,7 @@ mod tests {
table_id: 1,
table,
};
let res = catalog_manager.register_table(reg_req).await;
let res = components.catalog_manager.register_table(reg_req).await;
// because nonexistent_catalog does not exist yet.
assert_matches!(
@@ -171,23 +235,16 @@ mod tests {
#[tokio::test]
async fn test_register_table() {
let node_id = 42;
let (_, table_engine, catalog_manager, _) = prepare_components(node_id).await;
let default_catalog = catalog_manager
.catalog(DEFAULT_CATALOG_NAME)
.await
.unwrap()
.unwrap();
let components = prepare_components(node_id).await;
assert_eq!(
vec![DEFAULT_SCHEMA_NAME.to_string()],
default_catalog.schema_names().await.unwrap()
components
.catalog_manager
.schema_names(DEFAULT_CATALOG_NAME)
.await
.unwrap()
);
let default_schema = default_catalog
.schema(DEFAULT_SCHEMA_NAME)
.await
.unwrap()
.unwrap();
// register a new table with an nonexistent catalog
let catalog_name = DEFAULT_CATALOG_NAME.to_string();
let schema_name = DEFAULT_SCHEMA_NAME.to_string();
@@ -195,7 +252,8 @@ mod tests {
let table_id = 1;
// this schema has no effect
let table_schema = RawSchema::new(vec![]);
let table = table_engine
let table = components
.table_engine()
.create_table(
&EngineContext {},
CreateTableRequest {
@@ -221,40 +279,51 @@ mod tests {
table_id,
table,
};
assert!(catalog_manager.register_table(reg_req).await.unwrap());
assert!(components
.catalog_manager
.register_table(reg_req)
.await
.unwrap());
assert_eq!(
vec![table_name],
default_schema.table_names().await.unwrap()
components
.catalog_manager
.table_names(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
.await
.unwrap()
);
}
#[tokio::test]
async fn test_register_catalog_schema_table() {
let node_id = 42;
let (backend, table_engine, catalog_manager, engine_manager) =
prepare_components(node_id).await;
let components = prepare_components(node_id).await;
let catalog_name = "test_catalog".to_string();
let schema_name = "nonexistent_schema".to_string();
let catalog = Arc::new(RemoteCatalogProvider::new(
catalog_name.clone(),
backend.clone(),
engine_manager.clone(),
node_id,
));
// register catalog to catalog manager
CatalogManager::register_catalog(&*catalog_manager, catalog_name.clone(), catalog)
assert!(components
.catalog_manager
.register_catalog(catalog_name.clone())
.await
.unwrap();
.is_ok());
assert_eq!(
HashSet::<String>::from_iter(
vec![DEFAULT_CATALOG_NAME.to_string(), catalog_name.clone()].into_iter()
),
HashSet::from_iter(catalog_manager.catalog_names().await.unwrap().into_iter())
HashSet::from_iter(
components
.catalog_manager
.catalog_names()
.await
.unwrap()
.into_iter()
)
);
let table_to_register = table_engine
let table_to_register = components
.table_engine()
.create_table(
&EngineContext {},
CreateTableRequest {
@@ -283,41 +352,128 @@ mod tests {
};
// this register will fail since schema does not exist yet
assert_matches!(
catalog_manager
components
.catalog_manager
.register_table(reg_req.clone())
.await
.unwrap_err(),
catalog::error::Error::SchemaNotFound { .. }
);
let new_catalog = catalog_manager
.catalog(&catalog_name)
let register_schema_request = RegisterSchemaRequest {
catalog: catalog_name.to_string(),
schema: schema_name.to_string(),
};
assert!(components
.catalog_manager
.register_schema(register_schema_request)
.await
.unwrap()
.expect("catalog should exist since it's already registered");
let schema = Arc::new(RemoteSchemaProvider::new(
catalog_name.clone(),
schema_name.clone(),
node_id,
engine_manager,
backend.clone(),
));
let prev = new_catalog
.register_schema(schema_name.clone(), schema.clone())
.expect("Register schema should not fail"));
assert!(components
.catalog_manager
.register_table(reg_req)
.await
.expect("Register schema should not fail");
assert!(prev.is_none());
assert!(catalog_manager.register_table(reg_req).await.unwrap());
.unwrap());
assert_eq!(
HashSet::from([schema_name.clone()]),
new_catalog
.schema_names()
components
.catalog_manager
.schema_names(&catalog_name)
.await
.unwrap()
.into_iter()
.collect()
)
}
#[tokio::test]
async fn test_register_table_before_and_after_region_alive_keeper_started() {
let components = prepare_components(42).await;
let catalog_manager = &components.catalog_manager;
let region_alive_keepers = &components.region_alive_keepers;
let table_before = TableIdent {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table: "table_before".to_string(),
table_id: 1,
engine: MITO_ENGINE.to_string(),
};
let request = RegisterTableRequest {
catalog: table_before.catalog.clone(),
schema: table_before.schema.clone(),
table_name: table_before.table.clone(),
table_id: table_before.table_id,
table: Arc::new(EmptyTable::new(CreateTableRequest {
id: table_before.table_id,
catalog_name: table_before.catalog.clone(),
schema_name: table_before.schema.clone(),
table_name: table_before.table.clone(),
desc: None,
schema: RawSchema::new(vec![]),
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: Default::default(),
engine: MITO_ENGINE.to_string(),
})),
};
assert!(catalog_manager.register_table(request).await.unwrap());
let keeper = region_alive_keepers
.find_keeper(&table_before)
.await
.unwrap();
let deadline = keeper.deadline(0).await.unwrap();
let far_future = Instant::now() + Duration::from_secs(86400 * 365 * 29);
// assert region alive countdown is not started
assert!(deadline > far_future);
region_alive_keepers.start().await;
let table_after = TableIdent {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table: "table_after".to_string(),
table_id: 2,
engine: MITO_ENGINE.to_string(),
};
let request = RegisterTableRequest {
catalog: table_after.catalog.clone(),
schema: table_after.schema.clone(),
table_name: table_after.table.clone(),
table_id: table_after.table_id,
table: Arc::new(EmptyTable::new(CreateTableRequest {
id: table_after.table_id,
catalog_name: table_after.catalog.clone(),
schema_name: table_after.schema.clone(),
table_name: table_after.table.clone(),
desc: None,
schema: RawSchema::new(vec![]),
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: Default::default(),
engine: MITO_ENGINE.to_string(),
})),
};
assert!(catalog_manager.register_table(request).await.unwrap());
let keeper = region_alive_keepers
.find_keeper(&table_after)
.await
.unwrap();
let deadline = keeper.deadline(0).await.unwrap();
// assert countdown is started for the table registered after [RegionAliveKeepers] started
assert!(deadline <= Instant::now() + Duration::from_secs(20));
let keeper = region_alive_keepers
.find_keeper(&table_before)
.await
.unwrap();
let deadline = keeper.deadline(0).await.unwrap();
// assert countdown is started for the table registered before [RegionAliveKeepers] started, too
assert!(deadline <= Instant::now() + Duration::from_secs(20));
}
}

View File

@@ -4,6 +4,9 @@ version.workspace = true
edition.workspace = true
license.workspace = true
[features]
testing = []
[dependencies]
api = { path = "../api" }
arrow-flight.workspace = true
@@ -16,21 +19,25 @@ common-grpc-expr = { path = "../common/grpc-expr" }
common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
common-time = { path = "../common/time" }
common-meta = { path = "../common/meta" }
common-telemetry = { path = "../common/telemetry" }
datafusion.workspace = true
datatypes = { path = "../datatypes" }
enum_dispatch = "0.3"
futures-util.workspace = true
moka = { version = "0.9", features = ["future"] }
parking_lot = "0.12"
prost.workspace = true
rand.workspace = true
snafu.workspace = true
tokio-stream = { version = "0.1", features = ["net"] }
tokio.workspace = true
tonic.workspace = true
[dev-dependencies]
datanode = { path = "../datanode" }
derive-new = "0.5"
substrait = { path = "../common/substrait" }
tokio.workspace = true
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
prost.workspace = true

View File

@@ -63,7 +63,7 @@ async fn run() {
create_if_not_exists: false,
table_options: Default::default(),
table_id: Some(TableId { id: 1024 }),
region_ids: vec![0],
region_numbers: vec![0],
engine: MITO_ENGINE.to_string(),
};

View File

@@ -0,0 +1,183 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::column::*;
use api::v1::*;
use client::{Client, Database, DEFAULT_SCHEMA_NAME};
use derive_new::new;
use tracing::{error, info};
fn main() {
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
.unwrap();
run();
}
#[tokio::main]
async fn run() {
let greptimedb_endpoint =
std::env::var("GREPTIMEDB_ENDPOINT").unwrap_or_else(|_| "localhost:4001".to_owned());
let greptimedb_dbname =
std::env::var("GREPTIMEDB_DBNAME").unwrap_or_else(|_| DEFAULT_SCHEMA_NAME.to_owned());
let grpc_client = Client::with_urls(vec![&greptimedb_endpoint]);
let client = Database::new_with_dbname(greptimedb_dbname, grpc_client);
let stream_inserter = client.streaming_inserter().unwrap();
if let Err(e) = stream_inserter
.insert(vec![to_insert_request(weather_records_1())])
.await
{
error!("Error: {e}");
}
if let Err(e) = stream_inserter
.insert(vec![to_insert_request(weather_records_2())])
.await
{
error!("Error: {e}");
}
let result = stream_inserter.finish().await;
match result {
Ok(rows) => {
info!("Rows written: {rows}");
}
Err(e) => {
error!("Error: {e}");
}
};
}
#[derive(new)]
struct WeatherRecord {
timestamp_millis: i64,
collector: String,
temperature: f32,
humidity: i32,
}
fn weather_records_1() -> Vec<WeatherRecord> {
vec![
WeatherRecord::new(1686109527000, "c1".to_owned(), 26.4, 15),
WeatherRecord::new(1686023127000, "c1".to_owned(), 29.3, 20),
WeatherRecord::new(1685936727000, "c1".to_owned(), 31.8, 13),
WeatherRecord::new(1686109527000, "c2".to_owned(), 20.4, 67),
WeatherRecord::new(1686023127000, "c2".to_owned(), 18.0, 74),
WeatherRecord::new(1685936727000, "c2".to_owned(), 19.2, 81),
]
}
fn weather_records_2() -> Vec<WeatherRecord> {
vec![
WeatherRecord::new(1686109527001, "c3".to_owned(), 26.4, 15),
WeatherRecord::new(1686023127002, "c3".to_owned(), 29.3, 20),
WeatherRecord::new(1685936727003, "c3".to_owned(), 31.8, 13),
WeatherRecord::new(1686109527004, "c4".to_owned(), 20.4, 67),
WeatherRecord::new(1686023127005, "c4".to_owned(), 18.0, 74),
WeatherRecord::new(1685936727006, "c4".to_owned(), 19.2, 81),
]
}
/// This function generates some random data and bundle them into a
/// `InsertRequest`.
///
/// Data structure:
///
/// - `ts`: a timestamp column
/// - `collector`: a tag column
/// - `temperature`: a value field of f32
/// - `humidity`: a value field of i32
///
fn to_insert_request(records: Vec<WeatherRecord>) -> InsertRequest {
// convert records into columns
let rows = records.len();
// transpose records into columns
let (timestamp_millis, collectors, temp, humidity) = records.into_iter().fold(
(
Vec::with_capacity(rows),
Vec::with_capacity(rows),
Vec::with_capacity(rows),
Vec::with_capacity(rows),
),
|mut acc, rec| {
acc.0.push(rec.timestamp_millis);
acc.1.push(rec.collector);
acc.2.push(rec.temperature);
acc.3.push(rec.humidity);
acc
},
);
let columns = vec![
// timestamp column: `ts`
Column {
column_name: "ts".to_owned(),
values: Some(column::Values {
ts_millisecond_values: timestamp_millis,
..Default::default()
}),
semantic_type: SemanticType::Timestamp as i32,
datatype: ColumnDataType::TimestampMillisecond as i32,
..Default::default()
},
// tag column: collectors
Column {
column_name: "collector".to_owned(),
values: Some(column::Values {
string_values: collectors.into_iter().collect(),
..Default::default()
}),
semantic_type: SemanticType::Tag as i32,
datatype: ColumnDataType::String as i32,
..Default::default()
},
// field column: temperature
Column {
column_name: "temperature".to_owned(),
values: Some(column::Values {
f32_values: temp,
..Default::default()
}),
semantic_type: SemanticType::Field as i32,
datatype: ColumnDataType::Float32 as i32,
..Default::default()
},
// field column: humidity
Column {
column_name: "humidity".to_owned(),
values: Some(column::Values {
i32_values: humidity,
..Default::default()
}),
semantic_type: SemanticType::Field as i32,
datatype: ColumnDataType::Int32 as i32,
..Default::default()
},
];
InsertRequest {
table_name: "weather_demo".to_owned(),
columns,
row_count: rows as u32,
..Default::default()
}
}

View File

@@ -165,7 +165,7 @@ impl Client {
pub async fn health_check(&self) -> Result<()> {
let (_, channel) = self.find_channel()?;
let mut client = HealthCheckClient::new(channel);
client.health_check(HealthCheckRequest {}).await?;
let _ = client.health_check(HealthCheckRequest {}).await?;
Ok(())
}
}

View File

@@ -12,32 +12,60 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::{Debug, Formatter};
use std::sync::{Arc, Mutex};
use std::time::Duration;
use client::Client;
use common_grpc::channel_manager::ChannelManager;
use meta_client::rpc::Peer;
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
use common_meta::peer::Peer;
use common_telemetry::info;
use moka::future::{Cache, CacheBuilder};
use crate::Client;
pub struct DatanodeClients {
channel_manager: ChannelManager,
clients: Cache<Peer, Client>,
started: Arc<Mutex<bool>>,
}
impl Default for DatanodeClients {
fn default() -> Self {
let config = ChannelConfig::new().timeout(Duration::from_secs(8));
Self {
channel_manager: ChannelManager::new(),
channel_manager: ChannelManager::with_config(config),
clients: CacheBuilder::new(1024)
.time_to_live(Duration::from_secs(30 * 60))
.time_to_idle(Duration::from_secs(5 * 60))
.build(),
started: Arc::new(Mutex::new(false)),
}
}
}
impl Debug for DatanodeClients {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DatanodeClients")
.field("channel_manager", &self.channel_manager)
.finish()
}
}
impl DatanodeClients {
pub(crate) async fn get_client(&self, datanode: &Peer) -> Client {
pub fn start(&self) {
let mut started = self.started.lock().unwrap();
if *started {
return;
}
self.channel_manager.start_channel_recycle();
info!("Datanode clients manager is started!");
*started = true;
}
pub async fn get_client(&self, datanode: &Peer) -> Client {
self.clients
.get_with_by_ref(datanode, async move {
Client::with_manager_and_urls(
@@ -48,8 +76,8 @@ impl DatanodeClients {
.await
}
#[cfg(test)]
pub(crate) async fn insert_client(&self, datanode: Peer, client: Client) {
#[cfg(feature = "testing")]
pub async fn insert_client(&self, datanode: Peer, client: Client) {
self.clients.insert(datanode, client).await
}
}

View File

@@ -18,7 +18,7 @@ use api::v1::greptime_request::Request;
use api::v1::query_request::Query;
use api::v1::{
greptime_response, AffectedRows, AlterExpr, AuthHeader, CreateTableExpr, DdlRequest,
DeleteRequest, DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequest, PromRangeQuery,
DeleteRequest, DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequests, PromRangeQuery,
QueryRequest, RequestHeader,
};
use arrow_flight::{FlightData, Ticket};
@@ -33,7 +33,7 @@ use snafu::{ensure, ResultExt};
use crate::error::{
ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
};
use crate::{error, metrics, Client, Result};
use crate::{error, metrics, Client, Result, StreamInserter};
#[derive(Clone, Debug, Default)]
pub struct Database {
@@ -56,8 +56,9 @@ impl Database {
Self {
catalog: catalog.into(),
schema: schema.into(),
dbname: "".to_string(),
client,
..Default::default()
ctx: FlightContext::default(),
}
}
@@ -70,9 +71,11 @@ impl Database {
/// environment
pub fn new_with_dbname(dbname: impl Into<String>, client: Client) -> Self {
Self {
catalog: "".to_string(),
schema: "".to_string(),
dbname: dbname.into(),
client,
..Default::default()
ctx: FlightContext::default(),
}
}
@@ -106,9 +109,29 @@ impl Database {
});
}
pub async fn insert(&self, request: InsertRequest) -> Result<u32> {
pub async fn insert(&self, requests: InsertRequests) -> Result<u32> {
let _timer = timer!(metrics::METRIC_GRPC_INSERT);
self.handle(Request::Insert(request)).await
self.handle(Request::Inserts(requests)).await
}
pub fn streaming_inserter(&self) -> Result<StreamInserter> {
self.streaming_inserter_with_channel_size(65536)
}
pub fn streaming_inserter_with_channel_size(
&self,
channel_size: usize,
) -> Result<StreamInserter> {
let client = self.client.make_database_client()?.inner;
let stream_inserter = StreamInserter::new(
client,
self.dbname().to_string(),
self.ctx.auth_header.clone(),
channel_size,
);
Ok(stream_inserter)
}
pub async fn delete(&self, request: DeleteRequest) -> Result<u32> {
@@ -118,15 +141,7 @@ impl Database {
async fn handle(&self, request: Request) -> Result<u32> {
let mut client = self.client.make_database_client()?.inner;
let request = GreptimeRequest {
header: Some(RequestHeader {
catalog: self.catalog.clone(),
schema: self.schema.clone(),
authorization: self.ctx.auth_header.clone(),
dbname: self.dbname.clone(),
}),
request: Some(request),
};
let request = self.to_rpc_request(request);
let response = client
.handle(request)
.await?
@@ -139,6 +154,19 @@ impl Database {
Ok(value)
}
#[inline]
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
GreptimeRequest {
header: Some(RequestHeader {
catalog: self.catalog.clone(),
schema: self.schema.clone(),
authorization: self.ctx.auth_header.clone(),
dbname: self.dbname.clone(),
}),
request: Some(request),
}
}
pub async fn sql(&self, sql: &str) -> Result<Output> {
let _timer = timer!(metrics::METRIC_GRPC_SQL);
self.do_get(Request::Query(QueryRequest {
@@ -209,22 +237,13 @@ impl Database {
async fn do_get(&self, request: Request) -> Result<Output> {
// FIXME(paomian): should be added some labels for metrics
let _timer = timer!(metrics::METRIC_GRPC_DO_GET);
let request = GreptimeRequest {
header: Some(RequestHeader {
catalog: self.catalog.clone(),
schema: self.schema.clone(),
authorization: self.ctx.auth_header.clone(),
dbname: self.dbname.clone(),
}),
request: Some(request),
};
let request = self.to_rpc_request(request);
let request = Ticket {
ticket: request.encode_to_vec().into(),
};
let mut client = self.client.make_flight_client()?;
// TODO(LFC): Streaming get flight data.
let flight_data: Vec<FlightData> = client
.mut_inner()
.do_get(request)

View File

@@ -34,13 +34,13 @@ pub enum Error {
#[snafu(display("Failed to convert FlightData, source: {}", source))]
ConvertFlightData {
#[snafu(backtrace)]
location: Location,
source: common_grpc::Error,
},
#[snafu(display("Column datatype error, source: {}", source))]
ColumnDataType {
#[snafu(backtrace)]
location: Location,
source: api::error::Error,
},
@@ -57,7 +57,7 @@ pub enum Error {
))]
CreateChannel {
addr: String,
#[snafu(backtrace)]
location: Location,
source: common_grpc::error::Error,
},
@@ -67,6 +67,9 @@ pub enum Error {
#[snafu(display("Illegal Database response: {err_msg}"))]
IllegalDatabaseResponse { err_msg: String },
#[snafu(display("Failed to send request with streaming: {}", err_msg))]
ClientStreaming { err_msg: String, location: Location },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -77,11 +80,12 @@ impl ErrorExt for Error {
Error::IllegalFlightMessages { .. }
| Error::ColumnDataType { .. }
| Error::MissingField { .. }
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
| Error::IllegalDatabaseResponse { .. }
| Error::ClientStreaming { .. } => StatusCode::Internal,
Error::Server { code, .. } => *code,
Error::FlightGet { source, .. } => source.status_code(),
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source } => {
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source, .. } => {
source.status_code()
}
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,

View File

@@ -13,10 +13,12 @@
// limitations under the License.
mod client;
pub mod client_manager;
mod database;
mod error;
pub mod load_balance;
mod metrics;
mod stream_insert;
pub use api;
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
@@ -24,3 +26,4 @@ pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
pub use self::client::Client;
pub use self::database::Database;
pub use self::error::{Error, Result};
pub use self::stream_insert::StreamInserter;

View File

@@ -60,7 +60,7 @@ mod tests {
let random = Random;
for _ in 0..100 {
let peer = random.get_peer(&peers).unwrap();
all.contains(peer);
assert!(all.contains(peer));
}
}
}

View File

@@ -0,0 +1,115 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::greptime_database_client::GreptimeDatabaseClient;
use api::v1::greptime_request::Request;
use api::v1::{
greptime_response, AffectedRows, AuthHeader, GreptimeRequest, GreptimeResponse, InsertRequest,
InsertRequests, RequestHeader,
};
use snafu::OptionExt;
use tokio::sync::mpsc;
use tokio::task::JoinHandle;
use tokio_stream::wrappers::ReceiverStream;
use tonic::transport::Channel;
use tonic::{Response, Status};
use crate::error::{self, IllegalDatabaseResponseSnafu, Result};
/// A structure that provides some methods for streaming data insert.
///
/// [`StreamInserter`] cannot be constructed via the `StreamInserter::new` method.
/// You can use the following way to obtain [`StreamInserter`].
///
/// ```ignore
/// let grpc_client = Client::with_urls(vec!["127.0.0.1:4002"]);
/// let client = Database::new_with_dbname("db_name", grpc_client);
/// let stream_inserter = client.streaming_inserter().unwrap();
/// ```
///
/// If you want to see a concrete usage example, please see
/// [stream_inserter.rs](https://github.com/GreptimeTeam/greptimedb/blob/develop/src/client/examples/stream_ingest.rs).
pub struct StreamInserter {
sender: mpsc::Sender<GreptimeRequest>,
auth_header: Option<AuthHeader>,
dbname: String,
join: JoinHandle<std::result::Result<Response<GreptimeResponse>, Status>>,
}
impl StreamInserter {
pub(crate) fn new(
mut client: GreptimeDatabaseClient<Channel>,
dbname: String,
auth_header: Option<AuthHeader>,
channel_size: usize,
) -> StreamInserter {
let (send, recv) = tokio::sync::mpsc::channel(channel_size);
let join: JoinHandle<std::result::Result<Response<GreptimeResponse>, Status>> =
tokio::spawn(async move {
let recv_stream = ReceiverStream::new(recv);
client.handle_requests(recv_stream).await
});
StreamInserter {
sender: send,
auth_header,
dbname,
join,
}
}
pub async fn insert(&self, requests: Vec<InsertRequest>) -> Result<()> {
let inserts = InsertRequests { inserts: requests };
let request = self.to_rpc_request(Request::Inserts(inserts));
self.sender.send(request).await.map_err(|e| {
error::ClientStreamingSnafu {
err_msg: e.to_string(),
}
.build()
})
}
pub async fn finish(self) -> Result<u32> {
drop(self.sender);
let response = self.join.await.unwrap()?;
let response = response
.into_inner()
.response
.context(IllegalDatabaseResponseSnafu {
err_msg: "GreptimeResponse is empty",
})?;
let greptime_response::Response::AffectedRows(AffectedRows { value }) = response;
Ok(value)
}
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
GreptimeRequest {
header: Some(RequestHeader {
authorization: self.auth_header.clone(),
dbname: self.dbname.clone(),
..Default::default()
}),
request: Some(request),
}
}
}

View File

@@ -10,7 +10,9 @@ name = "greptime"
path = "src/bin/greptime.rs"
[features]
mem-prof = ["tikv-jemallocator", "tikv-jemalloc-ctl"]
default = ["metrics-process"]
tokio-console = ["common-telemetry/tokio-console"]
metrics-process = ["servers/metrics-process"]
[dependencies]
anymap = "1.0.0-beta.2"
@@ -24,32 +26,32 @@ common-recordbatch = { path = "../common/recordbatch" }
common-telemetry = { path = "../common/telemetry", features = [
"deadlock_detection",
] }
config = "0.13"
datanode = { path = "../datanode" }
either = "1.8"
frontend = { path = "../frontend" }
futures.workspace = true
meta-client = { path = "../meta-client" }
meta-srv = { path = "../meta-srv" }
metrics.workspace = true
nu-ansi-term = "0.46"
partition = { path = "../partition" }
query = { path = "../query" }
rustyline = "10.1"
serde.workspace = true
servers = { path = "../servers" }
session = { path = "../session" }
snafu.workspace = true
substrait = { path = "../common/substrait" }
tikv-jemalloc-ctl = { version = "0.5", optional = true }
tikv-jemallocator = { version = "0.5", optional = true }
tikv-jemallocator = "0.5"
tokio.workspace = true
toml = "0.5"
[dev-dependencies]
common-test-util = { path = "../common/test-util" }
rexpect = "0.5"
temp-env = "0.3"
serde.workspace = true
toml = "0.5"
[build-dependencies]
build-data = "0.1.3"
build-data = "0.1.4"

View File

@@ -18,6 +18,10 @@ fn main() {
"cargo:rustc-env=GIT_COMMIT={}",
build_data::get_git_commit().unwrap_or_else(|_| DEFAULT_VALUE.to_string())
);
println!(
"cargo:rustc-env=GIT_COMMIT_SHORT={}",
build_data::get_git_commit_short().unwrap_or_else(|_| DEFAULT_VALUE.to_string())
);
println!(
"cargo:rustc-env=GIT_BRANCH={}",
build_data::get_git_branch().unwrap_or_else(|_| DEFAULT_VALUE.to_string())

View File

@@ -20,7 +20,8 @@ use clap::Parser;
use cmd::error::Result;
use cmd::options::{Options, TopLevelOptions};
use cmd::{cli, datanode, frontend, metasrv, standalone};
use common_telemetry::logging::{error, info};
use common_telemetry::logging::{error, info, TracingOptions};
use metrics::gauge;
#[derive(Parser)]
#[clap(name = "greptimedb", version = print_version())]
@@ -31,6 +32,10 @@ struct Command {
log_level: Option<String>,
#[clap(subcommand)]
subcmd: SubCommand,
#[cfg(feature = "tokio-console")]
#[clap(long)]
tokio_console_addr: Option<String>,
}
pub enum Application {
@@ -42,13 +47,13 @@ pub enum Application {
}
impl Application {
async fn run(&mut self) -> Result<()> {
async fn start(&mut self) -> Result<()> {
match self {
Application::Datanode(instance) => instance.run().await,
Application::Frontend(instance) => instance.run().await,
Application::Metasrv(instance) => instance.run().await,
Application::Standalone(instance) => instance.run().await,
Application::Cli(instance) => instance.run().await,
Application::Datanode(instance) => instance.start().await,
Application::Frontend(instance) => instance.start().await,
Application::Metasrv(instance) => instance.start().await,
Application::Standalone(instance) => instance.start().await,
Application::Cli(instance) => instance.start().await,
}
}
@@ -159,28 +164,63 @@ fn print_version() -> &'static str {
)
}
#[cfg(feature = "mem-prof")]
fn short_version() -> &'static str {
env!("CARGO_PKG_VERSION")
}
// {app_name}-{branch_name}-{commit_short}
// The branch name (tag) of a release build should already contain the short
// version so the full version doesn't concat the short version explicitly.
fn full_version() -> &'static str {
concat!(
"greptimedb-",
env!("GIT_BRANCH"),
"-",
env!("GIT_COMMIT_SHORT")
)
}
fn log_env_flags() {
info!("command line arguments");
for argument in std::env::args() {
info!("argument: {}", argument);
}
}
#[global_allocator]
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
#[tokio::main]
async fn main() -> Result<()> {
let cmd = Command::parse();
// TODO(dennis):
// 1. adds ip/port to app
let app_name = &cmd.subcmd.to_string();
let opts = cmd.load_options()?;
let logging_opts = opts.logging_options();
let tracing_opts = TracingOptions {
#[cfg(feature = "tokio-console")]
tokio_console_addr: cmd.tokio_console_addr.clone(),
};
common_telemetry::set_panic_hook();
common_telemetry::init_default_metrics_recorder();
let _guard = common_telemetry::init_global_logging(app_name, logging_opts);
let _guard = common_telemetry::init_global_logging(app_name, logging_opts, tracing_opts);
// Report app version as gauge.
gauge!("app_version", 1.0, "short_version" => short_version(), "version" => full_version());
// Log version and argument flags.
info!(
"short_version: {}, full_version: {}",
short_version(),
full_version()
);
log_env_flags();
let mut app = cmd.build(opts).await?;
tokio::select! {
result = app.run() => {
result = app.start() => {
if let Err(err) = result {
error!(err; "Fatal error occurs!");
}

View File

@@ -28,7 +28,7 @@ pub struct Instance {
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
pub async fn start(&mut self) -> Result<()> {
self.repl.run().await
}
@@ -53,8 +53,8 @@ impl Command {
if let Some(dir) = top_level_opts.log_dir {
logging_opts.dir = dir;
}
if let Some(level) = top_level_opts.log_level {
logging_opts.level = level;
if top_level_opts.log_level.is_some() {
logging_opts.level = top_level_opts.log_level;
}
Ok(Options::Cli(Box::new(logging_opts)))
}
@@ -107,7 +107,7 @@ mod tests {
let opts = cmd.load_options(TopLevelOptions::default()).unwrap();
let logging_opts = opts.logging_options();
assert_eq!("/tmp/greptimedb/logs", logging_opts.dir);
assert_eq!("info", logging_opts.level);
assert!(logging_opts.level.is_none());
assert!(!logging_opts.enable_jaeger_tracing);
}
@@ -129,6 +129,6 @@ mod tests {
.unwrap();
let logging_opts = opts.logging_options();
assert_eq!("/tmp/greptimedb/test/logs", logging_opts.dir);
assert_eq!("debug", logging_opts.level);
assert_eq!("debug", logging_opts.level.as_ref().unwrap());
}
}

View File

@@ -16,15 +16,16 @@ use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use catalog::remote::MetaKvBackend;
use catalog::remote::CachedMetaKvBackend;
use client::client_manager::DatanodeClients;
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_base::Plugins;
use common_error::prelude::ErrorExt;
use common_query::Output;
use common_recordbatch::RecordBatches;
use common_telemetry::logging;
use either::Either;
use frontend::catalog::FrontendCatalogManager;
use frontend::datanode::DatanodeClients;
use meta_client::client::MetaClientBuilder;
use partition::manager::PartitionRuleManager;
use partition::route::TableRoutes;
@@ -107,7 +108,7 @@ impl Repl {
Ok(ref line) => {
let request = line.trim();
self.rl.add_history_entry(request.to_string());
let _ = self.rl.add_history_entry(request.to_string());
request.try_into()
}
@@ -136,7 +137,7 @@ impl Repl {
}
}
ReplCommand::Sql { sql } => {
self.execute_sql(sql).await;
let _ = self.execute_sql(sql).await;
}
ReplCommand::Exit => {
return Ok(());
@@ -253,9 +254,7 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
.context(StartMetaClientSnafu)?;
let meta_client = Arc::new(meta_client);
let backend = Arc::new(MetaKvBackend {
client: meta_client.clone(),
});
let cached_meta_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
let table_routes = Arc::new(TableRoutes::new(meta_client));
let partition_manager = Arc::new(PartitionRuleManager::new(table_routes));
@@ -263,11 +262,19 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
let datanode_clients = Arc::new(DatanodeClients::default());
let catalog_list = Arc::new(FrontendCatalogManager::new(
backend,
cached_meta_backend.clone(),
cached_meta_backend,
partition_manager,
datanode_clients,
));
let state = Arc::new(QueryEngineState::new(catalog_list, Default::default()));
let plugins: Arc<Plugins> = Default::default();
let state = Arc::new(QueryEngineState::new(
catalog_list,
false,
None,
None,
plugins.clone(),
));
Ok(DatafusionQueryEngine::new(state))
Ok(DatafusionQueryEngine::new(state, plugins))
}

View File

@@ -23,14 +23,13 @@ use snafu::ResultExt;
use crate::error::{MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
use crate::options::{Options, TopLevelOptions};
use crate::toml_loader;
pub struct Instance {
datanode: Datanode,
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
pub async fn start(&mut self) -> Result<()> {
self.datanode.start().await.context(StartDatanodeSnafu)
}
@@ -85,61 +84,54 @@ struct StartCommand {
rpc_addr: Option<String>,
#[clap(long)]
rpc_hostname: Option<String>,
#[clap(long)]
mysql_addr: Option<String>,
#[clap(long)]
metasrv_addr: Option<String>,
#[clap(long, multiple = true, value_delimiter = ',')]
metasrv_addr: Option<Vec<String>>,
#[clap(short, long)]
config_file: Option<String>,
#[clap(long)]
data_dir: Option<String>,
data_home: Option<String>,
#[clap(long)]
wal_dir: Option<String>,
#[clap(long)]
http_addr: Option<String>,
#[clap(long)]
http_timeout: Option<u64>,
#[clap(long, default_value = "GREPTIMEDB_DATANODE")]
env_prefix: String,
}
impl StartCommand {
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
let mut opts: DatanodeOptions = if let Some(path) = &self.config_file {
toml_loader::from_file!(path)?
} else {
DatanodeOptions::default()
};
let mut opts: DatanodeOptions = Options::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
DatanodeOptions::env_list_keys(),
)?;
if let Some(dir) = top_level_opts.log_dir {
opts.logging.dir = dir;
}
if let Some(level) = top_level_opts.log_level {
opts.logging.level = level;
if top_level_opts.log_level.is_some() {
opts.logging.level = top_level_opts.log_level;
}
if let Some(addr) = self.rpc_addr.clone() {
opts.rpc_addr = addr;
if let Some(addr) = &self.rpc_addr {
opts.rpc_addr = addr.clone();
}
if self.rpc_hostname.is_some() {
opts.rpc_hostname = self.rpc_hostname.clone();
}
if let Some(addr) = self.mysql_addr.clone() {
opts.mysql_addr = addr;
}
if let Some(node_id) = self.node_id {
opts.node_id = Some(node_id);
}
if let Some(meta_addr) = self.metasrv_addr.clone() {
if let Some(metasrv_addrs) = &self.metasrv_addr {
opts.meta_client_options
.get_or_insert_with(MetaClientOptions::default)
.metasrv_addrs = meta_addr
.split(',')
.map(&str::trim)
.map(&str::to_string)
.collect::<_>();
.metasrv_addrs = metasrv_addrs.clone();
opts.mode = Mode::Distributed;
}
@@ -150,16 +142,20 @@ impl StartCommand {
.fail();
}
if let Some(data_dir) = self.data_dir.clone() {
opts.storage.store = ObjectStoreConfig::File(FileConfig { data_dir });
if let Some(data_home) = &self.data_home {
opts.storage.store = ObjectStoreConfig::File(FileConfig {
data_home: data_home.clone(),
});
}
if let Some(wal_dir) = self.wal_dir.clone() {
opts.wal.dir = wal_dir;
if let Some(wal_dir) = &self.wal_dir {
opts.wal.dir = Some(wal_dir.clone());
}
if let Some(http_addr) = self.http_addr.clone() {
opts.http_opts.addr = http_addr
if let Some(http_addr) = &self.http_addr {
opts.http_opts.addr = http_addr.clone();
}
if let Some(http_timeout) = self.http_timeout {
opts.http_opts.timeout = Duration::from_secs(http_timeout)
}
@@ -174,7 +170,9 @@ impl StartCommand {
logging::info!("Datanode start command: {:#?}", self);
logging::info!("Datanode options: {:#?}", opts);
let datanode = Datanode::new(opts).await.context(StartDatanodeSnafu)?;
let datanode = Datanode::new(opts, Default::default())
.await
.context(StartDatanodeSnafu)?;
Ok(Instance { datanode })
}
@@ -191,6 +189,7 @@ mod tests {
use servers::Mode;
use super::*;
use crate::options::ENV_VAR_SEP;
#[test]
fn test_read_from_config_file() {
@@ -202,8 +201,6 @@ mod tests {
rpc_addr = "127.0.0.1:3001"
rpc_hostname = "127.0.0.1"
rpc_runtime_size = 8
mysql_addr = "127.0.0.1:4406"
mysql_runtime_size = 2
[meta_client_options]
metasrv_addrs = ["127.0.0.1:3002"]
@@ -212,7 +209,7 @@ mod tests {
tcp_nodelay = true
[wal]
dir = "/tmp/greptimedb/wal"
dir = "/other/wal"
file_size = "1GB"
purge_threshold = "50GB"
purge_interval = "10m"
@@ -221,7 +218,7 @@ mod tests {
[storage]
type = "File"
data_dir = "/tmp/greptimedb/data/"
data_home = "/tmp/greptimedb/"
[storage.compaction]
max_inflight_tasks = 3
@@ -232,6 +229,7 @@ mod tests {
checkpoint_margin = 9
gc_duration = '7s'
checkpoint_on_startup = true
compress = true
[logging]
level = "debug"
@@ -248,10 +246,9 @@ mod tests {
cmd.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
assert_eq!("127.0.0.1:4406".to_string(), options.mysql_addr);
assert_eq!(2, options.mysql_runtime_size);
assert_eq!(Some(42), options.node_id);
assert_eq!("/other/wal", options.wal.dir.unwrap());
assert_eq!(Duration::from_secs(600), options.wal.purge_interval);
assert_eq!(1024 * 1024 * 1024, options.wal.file_size.0);
assert_eq!(1024 * 1024 * 1024 * 50, options.wal.purge_threshold.0);
@@ -270,11 +267,12 @@ mod tests {
assert!(tcp_nodelay);
match &options.storage.store {
ObjectStoreConfig::File(FileConfig { data_dir, .. }) => {
assert_eq!("/tmp/greptimedb/data/", data_dir)
ObjectStoreConfig::File(FileConfig { data_home, .. }) => {
assert_eq!("/tmp/greptimedb/", data_home)
}
ObjectStoreConfig::S3 { .. } => unreachable!(),
ObjectStoreConfig::Oss { .. } => unreachable!(),
ObjectStoreConfig::Azblob { .. } => unreachable!(),
};
assert_eq!(
@@ -291,11 +289,12 @@ mod tests {
checkpoint_margin: Some(9),
gc_duration: Some(Duration::from_secs(7)),
checkpoint_on_startup: true,
compress: true
},
options.storage.manifest,
);
assert_eq!("debug".to_string(), options.logging.level);
assert_eq!("debug", options.logging.level.unwrap());
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
}
@@ -310,7 +309,7 @@ mod tests {
if let Options::Datanode(opt) = (StartCommand {
node_id: Some(42),
metasrv_addr: Some("127.0.0.1:3002".to_string()),
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
..Default::default()
})
.load_options(TopLevelOptions::default())
@@ -320,19 +319,19 @@ mod tests {
}
assert!((StartCommand {
metasrv_addr: Some("127.0.0.1:3002".to_string()),
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
..Default::default()
})
.load_options(TopLevelOptions::default())
.is_err());
// Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value
(StartCommand {
assert!((StartCommand {
node_id: Some(42),
..Default::default()
})
.load_options(TopLevelOptions::default())
.unwrap();
.is_ok());
}
#[test]
@@ -348,6 +347,126 @@ mod tests {
let logging_opt = options.logging_options();
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
assert_eq!("debug", logging_opt.level);
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
}
#[test]
fn test_config_precedence_order() {
let mut file = create_named_temp_file();
let toml_str = r#"
mode = "distributed"
enable_memory_catalog = false
node_id = 42
rpc_addr = "127.0.0.1:3001"
rpc_hostname = "127.0.0.1"
rpc_runtime_size = 8
[meta_client_options]
timeout_millis = 3000
connect_timeout_millis = 5000
tcp_nodelay = true
[wal]
file_size = "1GB"
purge_threshold = "50GB"
purge_interval = "10m"
read_batch_size = 128
sync_write = false
[storage]
type = "File"
data_home = "/tmp/greptimedb/"
[storage.compaction]
max_inflight_tasks = 3
max_files_in_level0 = 7
max_purge_tasks = 32
[storage.manifest]
checkpoint_on_startup = true
[logging]
level = "debug"
dir = "/tmp/greptimedb/test/logs"
"#;
write!(file, "{}", toml_str).unwrap();
let env_prefix = "DATANODE_UT";
temp_env::with_vars(
vec![
(
// storage.manifest.gc_duration = 9s
vec![
env_prefix.to_string(),
"storage".to_uppercase(),
"manifest".to_uppercase(),
"gc_duration".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("9s"),
),
(
// storage.compaction.max_purge_tasks = 99
vec![
env_prefix.to_string(),
"storage".to_uppercase(),
"compaction".to_uppercase(),
"max_purge_tasks".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("99"),
),
(
// meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
vec![
env_prefix.to_string(),
"meta_client_options".to_uppercase(),
"metasrv_addrs".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003"),
),
],
|| {
let command = StartCommand {
config_file: Some(file.path().to_str().unwrap().to_string()),
wal_dir: Some("/other/wal/dir".to_string()),
env_prefix: env_prefix.to_string(),
..Default::default()
};
let Options::Datanode(opts) =
command.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
// Should be read from env, env > default values.
assert_eq!(
opts.storage.manifest.gc_duration,
Some(Duration::from_secs(9))
);
assert_eq!(
opts.meta_client_options.unwrap().metasrv_addrs,
vec![
"127.0.0.1:3001".to_string(),
"127.0.0.1:3002".to_string(),
"127.0.0.1:3003".to_string()
]
);
// Should be read from config file, config file > env > default values.
assert_eq!(opts.storage.compaction.max_purge_tasks, 32);
// Should be read from cli, cli > config file > env > default values.
assert_eq!(opts.wal.dir.unwrap(), "/other/wal/dir");
// Should be default value.
assert_eq!(
opts.storage.manifest.checkpoint_margin,
DatanodeOptions::default()
.storage
.manifest
.checkpoint_margin
);
},
);
}
}

View File

@@ -15,6 +15,7 @@
use std::any::Any;
use common_error::prelude::*;
use config::ConfigError;
use rustyline::error::ReadlineError;
use snafu::Location;
@@ -23,59 +24,46 @@ use snafu::Location;
pub enum Error {
#[snafu(display("Failed to start datanode, source: {}", source))]
StartDatanode {
#[snafu(backtrace)]
location: Location,
source: datanode::error::Error,
},
#[snafu(display("Failed to shutdown datanode, source: {}", source))]
ShutdownDatanode {
#[snafu(backtrace)]
location: Location,
source: datanode::error::Error,
},
#[snafu(display("Failed to start frontend, source: {}", source))]
StartFrontend {
#[snafu(backtrace)]
location: Location,
source: frontend::error::Error,
},
#[snafu(display("Failed to shutdown frontend, source: {}", source))]
ShutdownFrontend {
#[snafu(backtrace)]
location: Location,
source: frontend::error::Error,
},
#[snafu(display("Failed to build meta server, source: {}", source))]
BuildMetaServer {
#[snafu(backtrace)]
location: Location,
source: meta_srv::error::Error,
},
#[snafu(display("Failed to start meta server, source: {}", source))]
StartMetaServer {
#[snafu(backtrace)]
location: Location,
source: meta_srv::error::Error,
},
#[snafu(display("Failed to shutdown meta server, source: {}", source))]
ShutdownMetaServer {
#[snafu(backtrace)]
location: Location,
source: meta_srv::error::Error,
},
#[snafu(display("Failed to read config file: {}, source: {}", path, source))]
ReadConfig {
path: String,
source: std::io::Error,
location: Location,
},
#[snafu(display("Failed to parse config, source: {}", source))]
ParseConfig {
source: toml::de::Error,
location: Location,
},
#[snafu(display("Missing config, msg: {}", msg))]
MissingConfig { msg: String, location: Location },
@@ -84,14 +72,14 @@ pub enum Error {
#[snafu(display("Illegal auth config: {}", source))]
IllegalAuthConfig {
#[snafu(backtrace)]
location: Location,
source: servers::auth::Error,
},
#[snafu(display("Unsupported selector type, {} source: {}", selector_type, source))]
UnsupportedSelectorType {
selector_type: String,
#[snafu(backtrace)]
location: Location,
source: meta_srv::error::Error,
},
@@ -113,46 +101,58 @@ pub enum Error {
#[snafu(display("Failed to request database, sql: {sql}, source: {source}"))]
RequestDatabase {
sql: String,
#[snafu(backtrace)]
location: Location,
source: client::Error,
},
#[snafu(display("Failed to collect RecordBatches, source: {source}"))]
CollectRecordBatches {
#[snafu(backtrace)]
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to pretty print Recordbatches, source: {source}"))]
PrettyPrintRecordBatches {
#[snafu(backtrace)]
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to start Meta client, source: {}", source))]
StartMetaClient {
#[snafu(backtrace)]
location: Location,
source: meta_client::error::Error,
},
#[snafu(display("Failed to parse SQL: {}, source: {}", sql, source))]
ParseSql {
sql: String,
#[snafu(backtrace)]
location: Location,
source: query::error::Error,
},
#[snafu(display("Failed to plan statement, source: {}", source))]
PlanStatement {
#[snafu(backtrace)]
location: Location,
source: query::error::Error,
},
#[snafu(display("Failed to encode logical plan in substrait, source: {}", source))]
SubstraitEncodeLogicalPlan {
#[snafu(backtrace)]
location: Location,
source: substrait::error::Error,
},
#[snafu(display("Failed to load layered config, source: {}", source))]
LoadLayeredConfig {
source: ConfigError,
location: Location,
},
#[snafu(display("Failed to start catalog manager, source: {}", source))]
StartCatalogManager {
location: Location,
source: catalog::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -160,31 +160,29 @@ pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::StartDatanode { source } => source.status_code(),
Error::StartFrontend { source } => source.status_code(),
Error::ShutdownDatanode { source } => source.status_code(),
Error::ShutdownFrontend { source } => source.status_code(),
Error::StartMetaServer { source } => source.status_code(),
Error::ShutdownMetaServer { source } => source.status_code(),
Error::BuildMetaServer { source } => source.status_code(),
Error::StartDatanode { source, .. } => source.status_code(),
Error::StartFrontend { source, .. } => source.status_code(),
Error::ShutdownDatanode { source, .. } => source.status_code(),
Error::ShutdownFrontend { source, .. } => source.status_code(),
Error::StartMetaServer { source, .. } => source.status_code(),
Error::ShutdownMetaServer { source, .. } => source.status_code(),
Error::BuildMetaServer { source, .. } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
StatusCode::InvalidArguments
}
Error::IllegalConfig { .. } | Error::InvalidReplCommand { .. } => {
StatusCode::InvalidArguments
}
Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments,
Error::MissingConfig { .. }
| Error::LoadLayeredConfig { .. }
| Error::IllegalConfig { .. }
| Error::InvalidReplCommand { .. }
| Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments,
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
Error::RequestDatabase { source, .. } => source.status_code(),
Error::CollectRecordBatches { source } | Error::PrettyPrintRecordBatches { source } => {
Error::CollectRecordBatches { source, .. }
| Error::PrettyPrintRecordBatches { source, .. } => source.status_code(),
Error::StartMetaClient { source, .. } => source.status_code(),
Error::ParseSql { source, .. } | Error::PlanStatement { source, .. } => {
source.status_code()
}
Error::StartMetaClient { source } => source.status_code(),
Error::ParseSql { source, .. } | Error::PlanStatement { source } => {
source.status_code()
}
Error::SubstraitEncodeLogicalPlan { source } => source.status_code(),
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
Error::StartCatalogManager { source, .. } => source.status_code(),
}
}

View File

@@ -16,30 +16,31 @@ use std::sync::Arc;
use clap::Parser;
use common_base::Plugins;
use common_telemetry::logging;
use frontend::frontend::FrontendOptions;
use frontend::grpc::GrpcOptions;
use frontend::influxdb::InfluxdbOptions;
use frontend::instance::{FrontendInstance, Instance as FeInstance};
use frontend::mysql::MysqlOptions;
use frontend::opentsdb::OpentsdbOptions;
use frontend::postgres::PostgresOptions;
use frontend::prom::PromOptions;
use frontend::service_config::{InfluxdbOptions, PromOptions};
use meta_client::MetaClientOptions;
use servers::auth::UserProviderRef;
use servers::tls::{TlsMode, TlsOption};
use servers::{auth, Mode};
use snafu::ResultExt;
use crate::error::{self, IllegalAuthConfigSnafu, Result};
use crate::error::{self, IllegalAuthConfigSnafu, Result, StartCatalogManagerSnafu};
use crate::options::{Options, TopLevelOptions};
use crate::toml_loader;
pub struct Instance {
frontend: FeInstance,
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
pub async fn start(&mut self) -> Result<()> {
self.frontend
.catalog_manager()
.start()
.await
.context(StartCatalogManagerSnafu)?;
self.frontend
.start()
.await
@@ -89,7 +90,7 @@ impl SubCommand {
}
}
#[derive(Debug, Parser)]
#[derive(Debug, Default, Parser)]
pub struct StartCommand {
#[clap(long)]
http_addr: Option<String>,
@@ -107,8 +108,8 @@ pub struct StartCommand {
config_file: Option<String>,
#[clap(short, long)]
influxdb_enable: Option<bool>,
#[clap(long)]
metasrv_addr: Option<String>,
#[clap(long, multiple = true, value_delimiter = ',')]
metasrv_addr: Option<Vec<String>>,
#[clap(long)]
tls_mode: Option<TlsMode>,
#[clap(long)]
@@ -119,31 +120,36 @@ pub struct StartCommand {
user_provider: Option<String>,
#[clap(long)]
disable_dashboard: Option<bool>,
#[clap(long, default_value = "GREPTIMEDB_FRONTEND")]
env_prefix: String,
}
impl StartCommand {
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
let mut opts: FrontendOptions = if let Some(path) = &self.config_file {
toml_loader::from_file!(path)?
} else {
FrontendOptions::default()
};
let mut opts: FrontendOptions = Options::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
FrontendOptions::env_list_keys(),
)?;
if let Some(dir) = top_level_opts.log_dir {
opts.logging.dir = dir;
}
if let Some(level) = top_level_opts.log_level {
opts.logging.level = level;
if top_level_opts.log_level.is_some() {
opts.logging.level = top_level_opts.log_level;
}
let tls_option = TlsOption::new(
let tls_opts = TlsOption::new(
self.tls_mode.clone(),
self.tls_cert_path.clone(),
self.tls_key_path.clone(),
);
if let Some(addr) = self.http_addr.clone() {
opts.http_options.get_or_insert_with(Default::default).addr = addr;
if let Some(addr) = &self.http_addr {
if let Some(http_opts) = &mut opts.http_options {
http_opts.addr = addr.clone()
}
}
if let Some(disable_dashboard) = self.disable_dashboard {
@@ -152,47 +158,44 @@ impl StartCommand {
.disable_dashboard = disable_dashboard;
}
if let Some(addr) = self.grpc_addr.clone() {
opts.grpc_options = Some(GrpcOptions {
addr,
..Default::default()
});
if let Some(addr) = &self.grpc_addr {
if let Some(grpc_opts) = &mut opts.grpc_options {
grpc_opts.addr = addr.clone()
}
}
if let Some(addr) = self.mysql_addr.clone() {
opts.mysql_options = Some(MysqlOptions {
addr,
tls: tls_option.clone(),
..Default::default()
});
if let Some(addr) = &self.mysql_addr {
if let Some(mysql_opts) = &mut opts.mysql_options {
mysql_opts.addr = addr.clone();
mysql_opts.tls = tls_opts.clone();
}
}
if let Some(addr) = self.prom_addr.clone() {
opts.prom_options = Some(PromOptions { addr });
if let Some(addr) = &self.prom_addr {
opts.prom_options = Some(PromOptions { addr: addr.clone() });
}
if let Some(addr) = self.postgres_addr.clone() {
opts.postgres_options = Some(PostgresOptions {
addr,
tls: tls_option,
..Default::default()
});
if let Some(addr) = &self.postgres_addr {
if let Some(postgres_opts) = &mut opts.postgres_options {
postgres_opts.addr = addr.clone();
postgres_opts.tls = tls_opts;
}
}
if let Some(addr) = self.opentsdb_addr.clone() {
opts.opentsdb_options = Some(OpentsdbOptions {
addr,
..Default::default()
});
if let Some(addr) = &self.opentsdb_addr {
if let Some(opentsdb_addr) = &mut opts.opentsdb_options {
opentsdb_addr.addr = addr.clone();
}
}
if let Some(enable) = self.influxdb_enable {
opts.influxdb_options = Some(InfluxdbOptions { enable });
}
if let Some(metasrv_addr) = self.metasrv_addr.clone() {
if let Some(metasrv_addrs) = &self.metasrv_addr {
opts.meta_client_options
.get_or_insert_with(MetaClientOptions::default)
.metasrv_addrs = metasrv_addr
.split(',')
.map(&str::trim)
.map(&str::to_string)
.collect::<Vec<_>>();
.metasrv_addrs = metasrv_addrs.clone();
opts.mode = Mode::Distributed;
}
@@ -200,6 +203,9 @@ impl StartCommand {
}
async fn build(self, opts: FrontendOptions) -> Result<Instance> {
logging::info!("Frontend start command: {:#?}", self);
logging::info!("Frontend options: {:#?}", opts);
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone())
@@ -230,34 +236,35 @@ mod tests {
use std::io::Write;
use std::time::Duration;
use common_base::readable_size::ReadableSize;
use common_test_util::temp_dir::create_named_temp_file;
use frontend::service_config::GrpcOptions;
use servers::auth::{Identity, Password, UserProviderRef};
use super::*;
use crate::options::ENV_VAR_SEP;
#[test]
fn test_try_from_start_command() {
let command = StartCommand {
http_addr: Some("127.0.0.1:1234".to_string()),
grpc_addr: None,
prom_addr: Some("127.0.0.1:4444".to_string()),
mysql_addr: Some("127.0.0.1:5678".to_string()),
postgres_addr: Some("127.0.0.1:5432".to_string()),
opentsdb_addr: Some("127.0.0.1:4321".to_string()),
influxdb_enable: Some(false),
config_file: None,
metasrv_addr: None,
tls_mode: None,
tls_cert_path: None,
tls_key_path: None,
user_provider: None,
disable_dashboard: Some(false),
..Default::default()
};
let Options::Frontend(opts) =
command.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
assert_eq!(opts.http_options.as_ref().unwrap().addr, "127.0.0.1:1234");
assert_eq!(
ReadableSize::mb(64),
opts.http_options.as_ref().unwrap().body_limit
);
assert_eq!(opts.mysql_options.as_ref().unwrap().addr, "127.0.0.1:5678");
assert_eq!(
opts.postgres_options.as_ref().unwrap().addr,
@@ -299,7 +306,8 @@ mod tests {
[http_options]
addr = "127.0.0.1:4000"
timeout = "30s"
body_limit = "2GB"
[logging]
level = "debug"
dir = "/tmp/greptimedb/test/logs"
@@ -307,20 +315,9 @@ mod tests {
write!(file, "{}", toml_str).unwrap();
let command = StartCommand {
http_addr: None,
grpc_addr: None,
mysql_addr: None,
prom_addr: None,
postgres_addr: None,
opentsdb_addr: None,
influxdb_enable: None,
config_file: Some(file.path().to_str().unwrap().to_string()),
metasrv_addr: None,
tls_mode: None,
tls_cert_path: None,
tls_key_path: None,
user_provider: None,
disable_dashboard: Some(false),
..Default::default()
};
let Options::Frontend(fe_opts) =
@@ -335,62 +332,40 @@ mod tests {
fe_opts.http_options.as_ref().unwrap().timeout
);
assert_eq!("debug".to_string(), fe_opts.logging.level);
assert_eq!(
ReadableSize::gb(2),
fe_opts.http_options.as_ref().unwrap().body_limit
);
assert_eq!("debug", fe_opts.logging.level.as_ref().unwrap());
assert_eq!("/tmp/greptimedb/test/logs".to_string(), fe_opts.logging.dir);
}
#[tokio::test]
async fn test_try_from_start_command_to_anymap() {
let command = StartCommand {
http_addr: None,
grpc_addr: None,
mysql_addr: None,
prom_addr: None,
postgres_addr: None,
opentsdb_addr: None,
influxdb_enable: None,
config_file: None,
metasrv_addr: None,
tls_mode: None,
tls_cert_path: None,
tls_key_path: None,
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
disable_dashboard: Some(false),
..Default::default()
};
let plugins = load_frontend_plugins(&command.user_provider);
assert!(plugins.is_ok());
let plugins = plugins.unwrap();
let provider = plugins.get::<UserProviderRef>();
assert!(provider.is_some());
let provider = provider.unwrap();
let provider = plugins.get::<UserProviderRef>().unwrap();
let result = provider
.authenticate(
Identity::UserId("test", None),
Password::PlainText("test".to_string().into()),
)
.await;
assert!(result.is_ok());
let _ = result.unwrap();
}
#[test]
fn test_top_level_options() {
let cmd = StartCommand {
http_addr: None,
grpc_addr: None,
mysql_addr: None,
prom_addr: None,
postgres_addr: None,
opentsdb_addr: None,
influxdb_enable: None,
config_file: None,
metasrv_addr: None,
tls_mode: None,
tls_cert_path: None,
tls_key_path: None,
user_provider: None,
disable_dashboard: Some(false),
..Default::default()
};
let options = cmd
@@ -402,6 +377,116 @@ mod tests {
let logging_opt = options.logging_options();
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
assert_eq!("debug", logging_opt.level);
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
}
#[test]
fn test_config_precedence_order() {
let mut file = create_named_temp_file();
let toml_str = r#"
mode = "distributed"
[http_options]
addr = "127.0.0.1:4000"
[meta_client_options]
timeout_millis = 3000
connect_timeout_millis = 5000
tcp_nodelay = true
[mysql_options]
addr = "127.0.0.1:4002"
"#;
write!(file, "{}", toml_str).unwrap();
let env_prefix = "FRONTEND_UT";
temp_env::with_vars(
vec![
(
// mysql_options.addr = 127.0.0.1:14002
vec![
env_prefix.to_string(),
"mysql_options".to_uppercase(),
"addr".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("127.0.0.1:14002"),
),
(
// mysql_options.runtime_size = 11
vec![
env_prefix.to_string(),
"mysql_options".to_uppercase(),
"runtime_size".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("11"),
),
(
// http_options.addr = 127.0.0.1:24000
vec![
env_prefix.to_string(),
"http_options".to_uppercase(),
"addr".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("127.0.0.1:24000"),
),
(
// meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
vec![
env_prefix.to_string(),
"meta_client_options".to_uppercase(),
"metasrv_addrs".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003"),
),
],
|| {
let command = StartCommand {
config_file: Some(file.path().to_str().unwrap().to_string()),
http_addr: Some("127.0.0.1:14000".to_string()),
env_prefix: env_prefix.to_string(),
..Default::default()
};
let top_level_opts = TopLevelOptions {
log_dir: None,
log_level: Some("error".to_string()),
};
let Options::Frontend(fe_opts) =
command.load_options(top_level_opts).unwrap() else {unreachable!()};
// Should be read from env, env > default values.
assert_eq!(fe_opts.mysql_options.as_ref().unwrap().runtime_size, 11);
assert_eq!(
fe_opts.meta_client_options.unwrap().metasrv_addrs,
vec![
"127.0.0.1:3001".to_string(),
"127.0.0.1:3002".to_string(),
"127.0.0.1:3003".to_string()
]
);
// Should be read from config file, config file > env > default values.
assert_eq!(
fe_opts.mysql_options.as_ref().unwrap().addr,
"127.0.0.1:4002"
);
// Should be read from cli, cli > config file > env > default values.
assert_eq!(
fe_opts.http_options.as_ref().unwrap().addr,
"127.0.0.1:14000"
);
// Should be default value.
assert_eq!(
fe_opts.grpc_options.as_ref().unwrap().addr,
GrpcOptions::default().addr
);
},
);
}
}

View File

@@ -21,4 +21,3 @@ pub mod frontend;
pub mod metasrv;
pub mod options;
pub mod standalone;
mod toml_loader;

View File

@@ -20,16 +20,15 @@ use meta_srv::bootstrap::MetaSrvInstance;
use meta_srv::metasrv::MetaSrvOptions;
use snafu::ResultExt;
use crate::error::Result;
use crate::error::{self, Result};
use crate::options::{Options, TopLevelOptions};
use crate::{error, toml_loader};
pub struct Instance {
instance: MetaSrvInstance,
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
pub async fn start(&mut self) -> Result<()> {
self.instance
.start()
.await
@@ -79,7 +78,7 @@ impl SubCommand {
}
}
#[derive(Debug, Parser)]
#[derive(Debug, Default, Parser)]
struct StartCommand {
#[clap(long)]
bind_addr: Option<String>,
@@ -94,48 +93,57 @@ struct StartCommand {
#[clap(long)]
use_memory_store: bool,
#[clap(long)]
disable_region_failover: bool,
#[clap(long)]
http_addr: Option<String>,
#[clap(long)]
http_timeout: Option<u64>,
#[clap(long, default_value = "GREPTIMEDB_METASRV")]
env_prefix: String,
}
impl StartCommand {
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
let mut opts: MetaSrvOptions = if let Some(path) = &self.config_file {
toml_loader::from_file!(path)?
} else {
MetaSrvOptions::default()
};
let mut opts: MetaSrvOptions = Options::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
None,
)?;
if let Some(dir) = top_level_opts.log_dir {
opts.logging.dir = dir;
}
if let Some(level) = top_level_opts.log_level {
opts.logging.level = level;
if top_level_opts.log_level.is_some() {
opts.logging.level = top_level_opts.log_level;
}
if let Some(addr) = self.bind_addr.clone() {
opts.bind_addr = addr;
if let Some(addr) = &self.bind_addr {
opts.bind_addr = addr.clone();
}
if let Some(addr) = self.server_addr.clone() {
opts.server_addr = addr;
if let Some(addr) = &self.server_addr {
opts.server_addr = addr.clone();
}
if let Some(addr) = self.store_addr.clone() {
opts.store_addr = addr;
if let Some(addr) = &self.store_addr {
opts.store_addr = addr.clone();
}
if let Some(selector_type) = &self.selector {
opts.selector = selector_type[..]
.try_into()
.context(error::UnsupportedSelectorTypeSnafu { selector_type })?;
}
if self.use_memory_store {
opts.use_memory_store = true;
opts.use_memory_store = self.use_memory_store;
opts.disable_region_failover = self.disable_region_failover;
if let Some(http_addr) = &self.http_addr {
opts.http_opts.addr = http_addr.clone();
}
if let Some(http_addr) = self.http_addr.clone() {
opts.http_opts.addr = http_addr;
}
if let Some(http_timeout) = self.http_timeout {
opts.http_opts.timeout = Duration::from_secs(http_timeout);
}
@@ -167,6 +175,7 @@ mod tests {
use meta_srv::selector::SelectorType;
use super::*;
use crate::options::ENV_VAR_SEP;
#[test]
fn test_read_from_cmd() {
@@ -174,11 +183,8 @@ mod tests {
bind_addr: Some("127.0.0.1:3002".to_string()),
server_addr: Some("127.0.0.1:3002".to_string()),
store_addr: Some("127.0.0.1:2380".to_string()),
config_file: None,
selector: Some("LoadBased".to_string()),
use_memory_store: false,
http_addr: None,
http_timeout: None,
..Default::default()
};
let Options::Metasrv(options) =
@@ -206,14 +212,8 @@ mod tests {
write!(file, "{}", toml_str).unwrap();
let cmd = StartCommand {
bind_addr: None,
server_addr: None,
store_addr: None,
selector: None,
config_file: Some(file.path().to_str().unwrap().to_string()),
use_memory_store: false,
http_addr: None,
http_timeout: None,
..Default::default()
};
let Options::Metasrv(options) =
@@ -223,7 +223,7 @@ mod tests {
assert_eq!("127.0.0.1:2379".to_string(), options.store_addr);
assert_eq!(15, options.datanode_lease_secs);
assert_eq!(SelectorType::LeaseBased, options.selector);
assert_eq!("debug".to_string(), options.logging.level);
assert_eq!("debug", options.logging.level.as_ref().unwrap());
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
}
@@ -233,11 +233,8 @@ mod tests {
bind_addr: Some("127.0.0.1:3002".to_string()),
server_addr: Some("127.0.0.1:3002".to_string()),
store_addr: Some("127.0.0.1:2380".to_string()),
config_file: None,
selector: Some("LoadBased".to_string()),
use_memory_store: false,
http_addr: None,
http_timeout: None,
..Default::default()
};
let options = cmd
@@ -249,6 +246,74 @@ mod tests {
let logging_opt = options.logging_options();
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
assert_eq!("debug", logging_opt.level);
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
}
#[test]
fn test_config_precedence_order() {
let mut file = create_named_temp_file();
let toml_str = r#"
server_addr = "127.0.0.1:3002"
datanode_lease_secs = 15
selector = "LeaseBased"
use_memory_store = false
[http_options]
addr = "127.0.0.1:4000"
[logging]
level = "debug"
dir = "/tmp/greptimedb/test/logs"
"#;
write!(file, "{}", toml_str).unwrap();
let env_prefix = "METASRV_UT";
temp_env::with_vars(
vec![
(
// bind_addr = 127.0.0.1:14002
vec![env_prefix.to_string(), "bind_addr".to_uppercase()].join(ENV_VAR_SEP),
Some("127.0.0.1:14002"),
),
(
// server_addr = 127.0.0.1:13002
vec![env_prefix.to_string(), "server_addr".to_uppercase()].join(ENV_VAR_SEP),
Some("127.0.0.1:13002"),
),
(
// http_options.addr = 127.0.0.1:24000
vec![
env_prefix.to_string(),
"http_options".to_uppercase(),
"addr".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("127.0.0.1:24000"),
),
],
|| {
let command = StartCommand {
http_addr: Some("127.0.0.1:14000".to_string()),
config_file: Some(file.path().to_str().unwrap().to_string()),
env_prefix: env_prefix.to_string(),
..Default::default()
};
let Options::Metasrv(opts) =
command.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
// Should be read from env, env > default values.
assert_eq!(opts.bind_addr, "127.0.0.1:14002");
// Should be read from config file, config file > env > default values.
assert_eq!(opts.server_addr, "127.0.0.1:3002");
// Should be read from cli, cli > config file > env > default values.
assert_eq!(opts.http_opts.addr, "127.0.0.1:14000");
// Should be default value.
assert_eq!(opts.store_addr, "127.0.0.1:2379");
},
);
}
}

View File

@@ -11,10 +11,19 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_telemetry::logging::LoggingOptions;
use config::{Config, Environment, File, FileFormat};
use datanode::datanode::DatanodeOptions;
use frontend::frontend::FrontendOptions;
use meta_srv::metasrv::MetaSrvOptions;
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use crate::error::{LoadLayeredConfigSnafu, Result};
pub const ENV_VAR_SEP: &str = "__";
pub const ENV_LIST_SEP: &str = ",";
pub struct MixOptions {
pub fe_opts: FrontendOptions,
@@ -30,6 +39,12 @@ pub enum Options {
Cli(Box<LoggingOptions>),
}
#[derive(Clone, Debug, Default)]
pub struct TopLevelOptions {
pub log_dir: Option<String>,
pub log_level: Option<String>,
}
impl Options {
pub fn logging_options(&self) -> &LoggingOptions {
match self {
@@ -40,10 +55,220 @@ impl Options {
Options::Cli(opts) => opts,
}
}
/// Load the configuration from multiple sources and merge them.
/// The precedence order is: config file > environment variables > default values.
/// `env_prefix` is the prefix of environment variables, e.g. "FRONTEND__xxx".
/// The function will use dunder(double underscore) `__` as the separator for environment variables, for example:
/// `DATANODE__STORAGE__MANIFEST__CHECKPOINT_MARGIN` will be mapped to `DatanodeOptions.storage.manifest.checkpoint_margin` field in the configuration.
/// `list_keys` is the list of keys that should be parsed as a list, for example, you can pass `Some(&["meta_client_options.metasrv_addrs"]` to parse `GREPTIMEDB_METASRV__META_CLIENT_OPTIONS__METASRV_ADDRS` as a list.
/// The function will use comma `,` as the separator for list values, for example: `127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003`.
pub fn load_layered_options<'de, T: Serialize + Deserialize<'de> + Default>(
config_file: Option<&str>,
env_prefix: &str,
list_keys: Option<&[&str]>,
) -> Result<T> {
let default_opts = T::default();
let env_source = {
let mut env = Environment::default();
if !env_prefix.is_empty() {
env = env.prefix(env_prefix);
}
if let Some(list_keys) = list_keys {
env = env.list_separator(ENV_LIST_SEP);
for key in list_keys {
env = env.with_list_parse_key(key);
}
}
env.try_parsing(true)
.separator(ENV_VAR_SEP)
.ignore_empty(true)
};
// Add default values and environment variables as the sources of the configuration.
let mut layered_config = Config::builder()
.add_source(Config::try_from(&default_opts).context(LoadLayeredConfigSnafu)?)
.add_source(env_source);
// Add config file as the source of the configuration if it is specified.
if let Some(config_file) = config_file {
layered_config = layered_config.add_source(File::new(config_file, FileFormat::Toml));
}
let opts = layered_config
.build()
.context(LoadLayeredConfigSnafu)?
.try_deserialize()
.context(LoadLayeredConfigSnafu)?;
Ok(opts)
}
}
#[derive(Clone, Debug, Default)]
pub struct TopLevelOptions {
pub log_dir: Option<String>,
pub log_level: Option<String>,
#[cfg(test)]
mod tests {
use std::io::Write;
use std::time::Duration;
use common_test_util::temp_dir::create_named_temp_file;
use datanode::datanode::{DatanodeOptions, ObjectStoreConfig};
use super::*;
#[test]
fn test_load_layered_options() {
let mut file = create_named_temp_file();
let toml_str = r#"
mode = "distributed"
enable_memory_catalog = false
rpc_addr = "127.0.0.1:3001"
rpc_hostname = "127.0.0.1"
rpc_runtime_size = 8
mysql_addr = "127.0.0.1:4406"
mysql_runtime_size = 2
[meta_client_options]
timeout_millis = 3000
connect_timeout_millis = 5000
tcp_nodelay = true
[wal]
dir = "/tmp/greptimedb/wal"
file_size = "1GB"
purge_threshold = "50GB"
purge_interval = "10m"
read_batch_size = 128
sync_write = false
[storage.compaction]
max_inflight_tasks = 3
max_files_in_level0 = 7
max_purge_tasks = 32
[logging]
level = "debug"
dir = "/tmp/greptimedb/test/logs"
"#;
write!(file, "{}", toml_str).unwrap();
let env_prefix = "DATANODE_UT";
temp_env::with_vars(
// The following environment variables will be used to override the values in the config file.
vec![
(
// storage.manifest.checkpoint_margin = 99
vec![
env_prefix.to_string(),
"storage".to_uppercase(),
"manifest".to_uppercase(),
"checkpoint_margin".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("99"),
),
(
// storage.type = S3
vec![
env_prefix.to_string(),
"storage".to_uppercase(),
"type".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("S3"),
),
(
// storage.bucket = mybucket
vec![
env_prefix.to_string(),
"storage".to_uppercase(),
"bucket".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("mybucket"),
),
(
// storage.manifest.gc_duration = 42s
vec![
env_prefix.to_string(),
"storage".to_uppercase(),
"manifest".to_uppercase(),
"gc_duration".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("42s"),
),
(
// storage.manifest.checkpoint_on_startup = true
vec![
env_prefix.to_string(),
"storage".to_uppercase(),
"manifest".to_uppercase(),
"checkpoint_on_startup".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("true"),
),
(
// wal.dir = /other/wal/dir
vec![
env_prefix.to_string(),
"wal".to_uppercase(),
"dir".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("/other/wal/dir"),
),
(
// meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
vec![
env_prefix.to_string(),
"meta_client_options".to_uppercase(),
"metasrv_addrs".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003"),
),
],
|| {
let opts: DatanodeOptions = Options::load_layered_options(
Some(file.path().to_str().unwrap()),
env_prefix,
DatanodeOptions::env_list_keys(),
)
.unwrap();
// Check the configs from environment variables.
assert_eq!(opts.storage.manifest.checkpoint_margin, Some(99));
match opts.storage.store {
ObjectStoreConfig::S3(s3_config) => {
assert_eq!(s3_config.bucket, "mybucket".to_string());
}
_ => panic!("unexpected store type"),
}
assert_eq!(
opts.storage.manifest.gc_duration,
Some(Duration::from_secs(42))
);
assert!(opts.storage.manifest.checkpoint_on_startup);
assert_eq!(
opts.meta_client_options.unwrap().metasrv_addrs,
vec![
"127.0.0.1:3001".to_string(),
"127.0.0.1:3002".to_string(),
"127.0.0.1:3003".to_string()
]
);
// Should be the values from config file, not environment variables.
assert_eq!(opts.wal.dir.unwrap(), "/tmp/greptimedb/wal");
// Should be default values.
assert_eq!(opts.node_id, None);
},
);
}
}

View File

@@ -21,14 +21,11 @@ use common_telemetry::logging::LoggingOptions;
use datanode::datanode::{Datanode, DatanodeOptions, ProcedureConfig, StorageConfig, WalConfig};
use datanode::instance::InstanceRef;
use frontend::frontend::FrontendOptions;
use frontend::grpc::GrpcOptions;
use frontend::influxdb::InfluxdbOptions;
use frontend::instance::{FrontendInstance, Instance as FeInstance};
use frontend::mysql::MysqlOptions;
use frontend::opentsdb::OpentsdbOptions;
use frontend::postgres::PostgresOptions;
use frontend::prom::PromOptions;
use frontend::prometheus::PrometheusOptions;
use frontend::service_config::{
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromOptions,
PrometheusOptions,
};
use serde::{Deserialize, Serialize};
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
@@ -41,7 +38,6 @@ use crate::error::{
};
use crate::frontend::load_frontend_plugins;
use crate::options::{MixOptions, Options, TopLevelOptions};
use crate::toml_loader;
#[derive(Parser)]
pub struct Command {
@@ -136,6 +132,7 @@ impl StandaloneOptions {
prom_options: self.prom_options,
meta_client_options: None,
logging: self.logging,
..Default::default()
}
}
@@ -156,7 +153,7 @@ pub struct Instance {
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
pub async fn start(&mut self) -> Result<()> {
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
self.datanode
.start_instance()
@@ -184,7 +181,7 @@ impl Instance {
}
}
#[derive(Debug, Parser)]
#[derive(Debug, Default, Parser)]
struct StartCommand {
#[clap(long)]
http_addr: Option<String>,
@@ -212,43 +209,46 @@ struct StartCommand {
tls_key_path: Option<String>,
#[clap(long)]
user_provider: Option<String>,
#[clap(long, default_value = "GREPTIMEDB_STANDALONE")]
env_prefix: String,
}
impl StartCommand {
fn load_options(&self, top_level_options: TopLevelOptions) -> Result<Options> {
let enable_memory_catalog = self.enable_memory_catalog;
let config_file = &self.config_file;
let mut opts: StandaloneOptions = if let Some(path) = config_file {
toml_loader::from_file!(path)?
} else {
StandaloneOptions::default()
};
let mut opts: StandaloneOptions = Options::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
None,
)?;
opts.enable_memory_catalog = enable_memory_catalog;
opts.enable_memory_catalog = self.enable_memory_catalog;
let mut fe_opts = opts.clone().frontend_options();
let mut logging = opts.logging.clone();
let dn_opts = opts.datanode_options();
opts.mode = Mode::Standalone;
if let Some(dir) = top_level_options.log_dir {
logging.dir = dir;
}
if let Some(level) = top_level_options.log_level {
logging.level = level;
opts.logging.dir = dir;
}
fe_opts.mode = Mode::Standalone;
if let Some(addr) = self.http_addr.clone() {
fe_opts.http_options = Some(HttpOptions {
addr,
..Default::default()
});
if top_level_options.log_level.is_some() {
opts.logging.level = top_level_options.log_level;
}
if let Some(addr) = self.rpc_addr.clone() {
let tls_opts = TlsOption::new(
self.tls_mode.clone(),
self.tls_cert_path.clone(),
self.tls_key_path.clone(),
);
if let Some(addr) = &self.http_addr {
if let Some(http_opts) = &mut opts.http_options {
http_opts.addr = addr.clone()
}
}
if let Some(addr) = &self.rpc_addr {
// frontend grpc addr conflict with datanode default grpc addr
let datanode_grpc_addr = DatanodeOptions::default().rpc_addr;
if addr == datanode_grpc_addr {
if addr.eq(&datanode_grpc_addr) {
return IllegalConfigSnafu {
msg: format!(
"gRPC listen address conflicts with datanode reserved gRPC addr: {datanode_grpc_addr}",
@@ -256,56 +256,42 @@ impl StartCommand {
}
.fail();
}
fe_opts.grpc_options = Some(GrpcOptions {
addr,
..Default::default()
});
if let Some(grpc_opts) = &mut opts.grpc_options {
grpc_opts.addr = addr.clone()
}
}
if let Some(addr) = self.mysql_addr.clone() {
fe_opts.mysql_options = Some(MysqlOptions {
addr,
..Default::default()
})
if let Some(addr) = &self.mysql_addr {
if let Some(mysql_opts) = &mut opts.mysql_options {
mysql_opts.addr = addr.clone();
mysql_opts.tls = tls_opts.clone();
}
}
if let Some(addr) = self.prom_addr.clone() {
fe_opts.prom_options = Some(PromOptions { addr })
if let Some(addr) = &self.prom_addr {
opts.prom_options = Some(PromOptions { addr: addr.clone() })
}
if let Some(addr) = self.postgres_addr.clone() {
fe_opts.postgres_options = Some(PostgresOptions {
addr,
..Default::default()
})
if let Some(addr) = &self.postgres_addr {
if let Some(postgres_opts) = &mut opts.postgres_options {
postgres_opts.addr = addr.clone();
postgres_opts.tls = tls_opts;
}
}
if let Some(addr) = self.opentsdb_addr.clone() {
fe_opts.opentsdb_options = Some(OpentsdbOptions {
addr,
..Default::default()
});
if let Some(addr) = &self.opentsdb_addr {
if let Some(opentsdb_addr) = &mut opts.opentsdb_options {
opentsdb_addr.addr = addr.clone();
}
}
if self.influxdb_enable {
fe_opts.influxdb_options = Some(InfluxdbOptions { enable: true });
opts.influxdb_options = Some(InfluxdbOptions { enable: true });
}
let tls_option = TlsOption::new(
self.tls_mode.clone(),
self.tls_cert_path.clone(),
self.tls_key_path.clone(),
);
if let Some(mut mysql_options) = fe_opts.mysql_options {
mysql_options.tls = tls_option.clone();
fe_opts.mysql_options = Some(mysql_options);
}
if let Some(mut postgres_options) = fe_opts.postgres_options {
postgres_options.tls = tls_option;
fe_opts.postgres_options = Some(postgres_options);
}
let fe_opts = opts.clone().frontend_options();
let logging = opts.logging.clone();
let dn_opts = opts.datanode_options();
Ok(Options::Standalone(Box::new(MixOptions {
fe_opts,
@@ -317,12 +303,13 @@ impl StartCommand {
async fn build(self, fe_opts: FrontendOptions, dn_opts: DatanodeOptions) -> Result<Instance> {
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
info!("Standalone start command: {:#?}", self);
info!(
"Standalone frontend options: {:#?}, datanode options: {:#?}",
fe_opts, dn_opts
);
let datanode = Datanode::new(dn_opts.clone())
let datanode = Datanode::new(dn_opts.clone(), Default::default())
.await
.context(StartDatanodeSnafu)?;
@@ -351,46 +338,35 @@ async fn build_frontend(
#[cfg(test)]
mod tests {
use std::default::Default;
use std::io::Write;
use std::time::Duration;
use common_base::readable_size::ReadableSize;
use common_test_util::temp_dir::create_named_temp_file;
use servers::auth::{Identity, Password, UserProviderRef};
use servers::Mode;
use super::*;
use crate::options::ENV_VAR_SEP;
#[tokio::test]
async fn test_try_from_start_command_to_anymap() {
let command = StartCommand {
http_addr: None,
rpc_addr: None,
prom_addr: None,
mysql_addr: None,
postgres_addr: None,
opentsdb_addr: None,
config_file: None,
influxdb_enable: false,
enable_memory_catalog: false,
tls_mode: None,
tls_cert_path: None,
tls_key_path: None,
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
..Default::default()
};
let plugins = load_frontend_plugins(&command.user_provider);
assert!(plugins.is_ok());
let plugins = plugins.unwrap();
let provider = plugins.get::<UserProviderRef>();
assert!(provider.is_some());
let provider = provider.unwrap();
let provider = plugins.get::<UserProviderRef>().unwrap();
let result = provider
.authenticate(
Identity::UserId("test", None),
Password::PlainText("test".to_string().into()),
)
.await;
assert!(result.is_ok());
let _ = result.unwrap();
}
#[test]
@@ -434,6 +410,7 @@ mod tests {
[http_options]
addr = "127.0.0.1:4000"
timeout = "30s"
body_limit = "128MB"
[logging]
level = "debug"
@@ -441,19 +418,9 @@ mod tests {
"#;
write!(file, "{}", toml_str).unwrap();
let cmd = StartCommand {
http_addr: None,
rpc_addr: None,
prom_addr: None,
mysql_addr: None,
postgres_addr: None,
opentsdb_addr: None,
config_file: Some(file.path().to_str().unwrap().to_string()),
influxdb_enable: false,
enable_memory_catalog: false,
tls_mode: None,
tls_cert_path: None,
tls_key_path: None,
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
..Default::default()
};
let Options::Standalone(options) = cmd.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
@@ -469,6 +436,10 @@ mod tests {
Duration::from_secs(30),
fe_opts.http_options.as_ref().unwrap().timeout
);
assert_eq!(
ReadableSize::mb(128),
fe_opts.http_options.as_ref().unwrap().body_limit
);
assert_eq!(
"127.0.0.1:4001".to_string(),
fe_opts.grpc_options.unwrap().addr
@@ -484,7 +455,7 @@ mod tests {
);
assert!(fe_opts.influxdb_options.as_ref().unwrap().enable);
assert_eq!("/tmp/greptimedb/test/wal", dn_opts.wal.dir);
assert_eq!("/tmp/greptimedb/test/wal", dn_opts.wal.dir.unwrap());
match &dn_opts.storage.store {
datanode::datanode::ObjectStoreConfig::S3(s3_config) => {
assert_eq!(
@@ -497,26 +468,15 @@ mod tests {
}
}
assert_eq!("debug".to_string(), logging_opts.level);
assert_eq!("debug", logging_opts.level.as_ref().unwrap());
assert_eq!("/tmp/greptimedb/test/logs".to_string(), logging_opts.dir);
}
#[test]
fn test_top_level_options() {
let cmd = StartCommand {
http_addr: None,
rpc_addr: None,
prom_addr: None,
mysql_addr: None,
postgres_addr: None,
opentsdb_addr: None,
config_file: None,
influxdb_enable: false,
enable_memory_catalog: false,
tls_mode: None,
tls_cert_path: None,
tls_key_path: None,
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
..Default::default()
};
let Options::Standalone(opts) = cmd
@@ -529,6 +489,94 @@ mod tests {
};
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
assert_eq!("debug", opts.logging.level);
assert_eq!("debug", opts.logging.level.unwrap());
}
#[test]
fn test_config_precedence_order() {
let mut file = create_named_temp_file();
let toml_str = r#"
mode = "standalone"
[http_options]
addr = "127.0.0.1:4000"
[logging]
level = "debug"
"#;
write!(file, "{}", toml_str).unwrap();
let env_prefix = "STANDALONE_UT";
temp_env::with_vars(
vec![
(
// logging.dir = /other/log/dir
vec![
env_prefix.to_string(),
"logging".to_uppercase(),
"dir".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("/other/log/dir"),
),
(
// logging.level = info
vec![
env_prefix.to_string(),
"logging".to_uppercase(),
"level".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("info"),
),
(
// http_options.addr = 127.0.0.1:24000
vec![
env_prefix.to_string(),
"http_options".to_uppercase(),
"addr".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("127.0.0.1:24000"),
),
],
|| {
let command = StartCommand {
config_file: Some(file.path().to_str().unwrap().to_string()),
http_addr: Some("127.0.0.1:14000".to_string()),
env_prefix: env_prefix.to_string(),
..Default::default()
};
let top_level_opts = TopLevelOptions {
log_dir: None,
log_level: None,
};
let Options::Standalone(opts) =
command.load_options(top_level_opts).unwrap() else {unreachable!()};
// Should be read from env, env > default values.
assert_eq!(opts.logging.dir, "/other/log/dir");
// Should be read from config file, config file > env > default values.
assert_eq!(opts.logging.level.as_ref().unwrap(), "debug");
// Should be read from cli, cli > config file > env > default values.
assert_eq!(
opts.fe_opts.http_options.as_ref().unwrap().addr,
"127.0.0.1:14000"
);
assert_eq!(
ReadableSize::mb(64),
opts.fe_opts.http_options.as_ref().unwrap().body_limit
);
// Should be default value.
assert_eq!(
opts.fe_opts.grpc_options.unwrap().addr,
GrpcOptions::default().addr
);
},
);
}
}

View File

@@ -1,94 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
macro_rules! from_file {
($path: expr) => {
toml::from_str(
&std::fs::read_to_string($path)
.context(crate::error::ReadConfigSnafu { path: $path })?,
)
.context(crate::error::ParseConfigSnafu)
};
}
pub(crate) use from_file;
#[cfg(test)]
mod tests {
use std::fs::File;
use std::io::Write;
use common_test_util::temp_dir::create_temp_dir;
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use super::*;
use crate::error::Result;
#[derive(Clone, PartialEq, Debug, Deserialize, Serialize)]
#[serde(default)]
struct MockConfig {
path: String,
port: u32,
host: String,
}
impl Default for MockConfig {
fn default() -> Self {
Self {
path: "test".to_string(),
port: 0,
host: "localhost".to_string(),
}
}
}
#[test]
fn test_from_file() -> Result<()> {
let config = MockConfig {
path: "/tmp".to_string(),
port: 999,
host: "greptime.test".to_string(),
};
let dir = create_temp_dir("test_from_file");
let test_file = format!("{}/test.toml", dir.path().to_str().unwrap());
let s = toml::to_string(&config).unwrap();
assert!(s.contains("host") && s.contains("path") && s.contains("port"));
let mut file = File::create(&test_file).unwrap();
file.write_all(s.as_bytes()).unwrap();
let loaded_config: MockConfig = from_file!(&test_file)?;
assert_eq!(loaded_config, config);
// Only host in file
let mut file = File::create(&test_file).unwrap();
file.write_all("host='greptime.test'\n".as_bytes()).unwrap();
let loaded_config: MockConfig = from_file!(&test_file)?;
assert_eq!(loaded_config.host, "greptime.test");
assert_eq!(loaded_config.port, 0);
assert_eq!(loaded_config.path, "test");
// Truncate the file.
let file = File::create(&test_file).unwrap();
file.set_len(0).unwrap();
let loaded_config: MockConfig = from_file!(&test_file)?;
assert_eq!(loaded_config, MockConfig::default());
Ok(())
}
}

View File

@@ -27,10 +27,10 @@ mod tests {
impl Repl {
fn send_line(&mut self, line: &str) {
self.repl.send_line(line).unwrap();
let _ = self.repl.send_line(line).unwrap();
// read a line to consume the prompt
self.read_line();
let _ = self.read_line();
}
fn read_line(&mut self) -> String {
@@ -51,7 +51,7 @@ mod tests {
#[ignore]
#[test]
fn test_repl() {
let data_dir = create_temp_dir("data");
let data_home = create_temp_dir("data");
let wal_dir = create_temp_dir("wal");
let mut bin_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
@@ -65,7 +65,7 @@ mod tests {
"start",
"--rpc-addr=0.0.0.0:4321",
"--node-id=1",
&format!("--data-dir={}", data_dir.path().display()),
&format!("--data-home={}", data_home.path().display()),
&format!("--wal-dir={}", wal_dir.path().display()),
])
.stdout(Stdio::null())
@@ -76,7 +76,7 @@ mod tests {
std::thread::sleep(Duration::from_secs(3));
let mut repl_cmd = Command::new("./greptime");
repl_cmd.current_dir(bin_path).args([
let _ = repl_cmd.current_dir(bin_path).args([
"--log-level=off",
"cli",
"attach",
@@ -105,7 +105,7 @@ mod tests {
test_select(repl);
datanode.kill().unwrap();
datanode.wait().unwrap();
let _ = datanode.wait().unwrap();
}
fn test_create_database(repl: &mut Repl) {

View File

@@ -15,6 +15,7 @@
pub mod bit_vec;
pub mod buffer;
pub mod bytes;
pub mod paths;
#[allow(clippy::all)]
pub mod readable_size;
@@ -40,7 +41,7 @@ impl Plugins {
}
pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
self.lock().insert(value);
let _ = self.lock().insert(value);
}
pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {

View File

@@ -0,0 +1,25 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Path constants for table engines, cluster states and WAL
/// All paths relative to data_home(file storage) or root path(S3, OSS etc).
/// WAL dir for local file storage
pub const WAL_DIR: &str = "wal/";
/// Data dir for table engines
pub const DATA_DIR: &str = "data/";
/// Cluster state dir
pub const CLUSTER_DIR: &str = "cluster/";

View File

@@ -119,7 +119,7 @@ mod tests {
#[test]
fn [<test_read_write_ $num_ty _from_vec_buffer>]() {
let mut buf = vec![];
assert!(buf.[<write_ $num_ty _le>]($num_ty::MAX).is_ok());
let _ = buf.[<write_ $num_ty _le>]($num_ty::MAX).unwrap();
assert_eq!($num_ty::MAX, buf.as_slice().[<read_ $num_ty _le>]().unwrap());
}
}
@@ -132,7 +132,7 @@ mod tests {
#[test]
pub fn test_peek_write_from_vec_buffer() {
let mut buf: Vec<u8> = vec![];
assert!(buf.write_from_slice("hello".as_bytes()).is_ok());
buf.write_from_slice("hello".as_bytes()).unwrap();
let mut slice = buf.as_slice();
assert_eq!(104, slice.peek_u8_le().unwrap());
slice.advance_by(1);

View File

@@ -27,6 +27,8 @@ pub const MAX_SYS_TABLE_ID: u32 = MIN_USER_TABLE_ID - 1;
pub const SYSTEM_CATALOG_TABLE_ID: u32 = 0;
/// scripts table id
pub const SCRIPTS_TABLE_ID: u32 = 1;
/// numbers table id
pub const NUMBERS_TABLE_ID: u32 = 2;
pub const MITO_ENGINE: &str = "mito";
pub const IMMUTABLE_FILE_ENGINE: &str = "file";

View File

@@ -36,9 +36,6 @@ pub enum Error {
location: Location,
source: serde_json::error::Error,
},
#[snafu(display("Failed to parse node id: {}", key))]
ParseNodeId { key: String, location: Location },
}
impl ErrorExt for Error {
@@ -47,7 +44,6 @@ impl ErrorExt for Error {
Error::InvalidCatalog { .. }
| Error::DeserializeCatalogEntryValue { .. }
| Error::SerializeCatalogEntryValue { .. } => StatusCode::Unexpected,
Error::ParseNodeId { .. } => StatusCode::InvalidArguments,
}
}

View File

@@ -24,11 +24,13 @@ datafusion.workspace = true
derive_builder = "0.12"
futures.workspace = true
object-store = { path = "../../object-store" }
orc-rust = { git = "https://github.com/WenyXu/orc-rs.git", rev = "0319acd32456e403c20f135cc012441a76852605" }
regex = "1.7"
snafu.workspace = true
tokio.workspace = true
tokio-util.workspace = true
url = "2.3"
paste = "1.0"
[dev-dependencies]
common-test-util = { path = "../test-util" }

View File

@@ -12,24 +12,26 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::future::Future;
use arrow::record_batch::RecordBatch;
use async_trait::async_trait;
use datafusion::parquet::format::FileMetaData;
use object_store::Writer;
use snafu::{OptionExt, ResultExt};
use tokio::io::{AsyncWrite, AsyncWriteExt};
use tokio_util::compat::Compat;
use crate::error::{self, Result};
use crate::share_buffer::SharedBuffer;
pub struct BufferedWriter<T, U> {
writer: T,
/// None stands for [`BufferedWriter`] closed.
pub struct LazyBufferedWriter<T, U, F> {
path: String,
writer_factory: F,
writer: Option<T>,
/// None stands for [`LazyBufferedWriter`] closed.
encoder: Option<U>,
buffer: SharedBuffer,
rows_written: usize,
bytes_written: u64,
flushed: bool,
threshold: usize,
}
@@ -42,58 +44,79 @@ pub trait ArrowWriterCloser {
async fn close(mut self) -> Result<FileMetaData>;
}
pub type DefaultBufferedWriter<E> = BufferedWriter<Compat<Writer>, E>;
impl<T: AsyncWrite + Send + Unpin, U: DfRecordBatchEncoder + ArrowWriterCloser>
BufferedWriter<T, U>
impl<
T: AsyncWrite + Send + Unpin,
U: DfRecordBatchEncoder + ArrowWriterCloser,
F: FnMut(String) -> Fut,
Fut: Future<Output = Result<T>>,
> LazyBufferedWriter<T, U, F>
{
/// Closes `LazyBufferedWriter` and optionally flushes all data to underlying storage
/// if any row's been written.
pub async fn close_with_arrow_writer(mut self) -> Result<(FileMetaData, u64)> {
let encoder = self
.encoder
.take()
.context(error::BufferedWriterClosedSnafu)?;
let metadata = encoder.close().await?;
let written = self.try_flush(true).await?;
// Use `rows_written` to keep a track of if any rows have been written.
// If no row's been written, then we can simply close the underlying
// writer without flush so that no file will be actually created.
if self.rows_written != 0 {
self.bytes_written += self.try_flush(true).await?;
}
// It's important to shut down! flushes all pending writes
self.close().await?;
Ok((metadata, written))
self.close_inner_writer().await?;
Ok((metadata, self.bytes_written))
}
}
impl<T: AsyncWrite + Send + Unpin, U: DfRecordBatchEncoder> BufferedWriter<T, U> {
pub async fn close(&mut self) -> Result<()> {
self.writer.shutdown().await.context(error::AsyncWriteSnafu)
impl<
T: AsyncWrite + Send + Unpin,
U: DfRecordBatchEncoder,
F: FnMut(String) -> Fut,
Fut: Future<Output = Result<T>>,
> LazyBufferedWriter<T, U, F>
{
/// Closes the writer without flushing the buffer data.
pub async fn close_inner_writer(&mut self) -> Result<()> {
if let Some(writer) = &mut self.writer {
writer.shutdown().await.context(error::AsyncWriteSnafu)?;
}
Ok(())
}
pub fn new(threshold: usize, buffer: SharedBuffer, encoder: U, writer: T) -> Self {
pub fn new(
threshold: usize,
buffer: SharedBuffer,
encoder: U,
path: impl AsRef<str>,
writer_factory: F,
) -> Self {
Self {
path: path.as_ref().to_string(),
threshold,
writer,
encoder: Some(encoder),
buffer,
rows_written: 0,
bytes_written: 0,
flushed: false,
writer_factory,
writer: None,
}
}
pub fn bytes_written(&self) -> u64 {
self.bytes_written
}
pub async fn write(&mut self, batch: &RecordBatch) -> Result<()> {
let encoder = self
.encoder
.as_mut()
.context(error::BufferedWriterClosedSnafu)?;
encoder.write(batch)?;
self.try_flush(false).await?;
self.rows_written += batch.num_rows();
self.bytes_written += self.try_flush(false).await?;
Ok(())
}
pub fn flushed(&self) -> bool {
self.flushed
}
pub async fn try_flush(&mut self, all: bool) -> Result<u64> {
let mut bytes_written: u64 = 0;
@@ -106,7 +129,8 @@ impl<T: AsyncWrite + Send + Unpin, U: DfRecordBatchEncoder> BufferedWriter<T, U>
};
let size = chunk.len();
self.writer
self.maybe_init_writer()
.await?
.write_all(&chunk)
.await
.context(error::AsyncWriteSnafu)?;
@@ -117,22 +141,27 @@ impl<T: AsyncWrite + Send + Unpin, U: DfRecordBatchEncoder> BufferedWriter<T, U>
if all {
bytes_written += self.try_flush_all().await?;
}
self.flushed = bytes_written > 0;
self.bytes_written += bytes_written;
Ok(bytes_written)
}
/// Only initiates underlying file writer when rows have been written.
async fn maybe_init_writer(&mut self) -> Result<&mut T> {
if let Some(ref mut writer) = self.writer {
Ok(writer)
} else {
let writer = (self.writer_factory)(self.path.clone()).await?;
Ok(self.writer.insert(writer))
}
}
async fn try_flush_all(&mut self) -> Result<u64> {
let remain = self.buffer.buffer.lock().unwrap().split();
let size = remain.len();
self.writer
self.maybe_init_writer()
.await?
.write_all(&remain)
.await
.context(error::AsyncWriteSnafu)?;
Ok(size as u64)
}
}

View File

@@ -17,9 +17,10 @@ use std::io;
use std::str::FromStr;
use async_compression::tokio::bufread::{BzDecoder, GzipDecoder, XzDecoder, ZstdDecoder};
use async_compression::tokio::write;
use bytes::Bytes;
use futures::Stream;
use tokio::io::{AsyncRead, BufReader};
use tokio::io::{AsyncRead, AsyncWriteExt, BufReader};
use tokio_util::io::{ReaderStream, StreamReader};
use crate::error::{self, Error, Result};
@@ -73,37 +74,107 @@ impl CompressionType {
!matches!(self, &Self::Uncompressed)
}
pub fn convert_async_read<T: AsyncRead + Unpin + Send + 'static>(
&self,
s: T,
) -> Box<dyn AsyncRead + Unpin + Send> {
pub const fn file_extension(&self) -> &'static str {
match self {
CompressionType::Gzip => Box::new(GzipDecoder::new(BufReader::new(s))),
CompressionType::Bzip2 => Box::new(BzDecoder::new(BufReader::new(s))),
CompressionType::Xz => Box::new(XzDecoder::new(BufReader::new(s))),
CompressionType::Zstd => Box::new(ZstdDecoder::new(BufReader::new(s))),
CompressionType::Uncompressed => Box::new(s),
}
}
pub fn convert_stream<T: Stream<Item = io::Result<Bytes>> + Unpin + Send + 'static>(
&self,
s: T,
) -> Box<dyn Stream<Item = io::Result<Bytes>> + Send + Unpin> {
match self {
CompressionType::Gzip => {
Box::new(ReaderStream::new(GzipDecoder::new(StreamReader::new(s))))
}
CompressionType::Bzip2 => {
Box::new(ReaderStream::new(BzDecoder::new(StreamReader::new(s))))
}
CompressionType::Xz => {
Box::new(ReaderStream::new(XzDecoder::new(StreamReader::new(s))))
}
CompressionType::Zstd => {
Box::new(ReaderStream::new(ZstdDecoder::new(StreamReader::new(s))))
}
CompressionType::Uncompressed => Box::new(s),
Self::Gzip => "gz",
Self::Bzip2 => "bz2",
Self::Xz => "xz",
Self::Zstd => "zst",
Self::Uncompressed => "",
}
}
}
macro_rules! impl_compression_type {
($(($enum_item:ident, $prefix:ident)),*) => {
paste::item! {
impl CompressionType {
pub async fn encode(&self, content: impl AsRef<[u8]>) -> io::Result<Vec<u8>> {
match self {
$(
CompressionType::$enum_item => {
let mut buffer = Vec::with_capacity(content.as_ref().len());
let mut encoder = write::[<$prefix Encoder>]::new(&mut buffer);
encoder.write_all(content.as_ref()).await?;
encoder.shutdown().await?;
Ok(buffer)
}
)*
CompressionType::Uncompressed => Ok(content.as_ref().to_vec()),
}
}
pub async fn decode(&self, content: impl AsRef<[u8]>) -> io::Result<Vec<u8>> {
match self {
$(
CompressionType::$enum_item => {
let mut buffer = Vec::with_capacity(content.as_ref().len() * 2);
let mut encoder = write::[<$prefix Decoder>]::new(&mut buffer);
encoder.write_all(content.as_ref()).await?;
encoder.shutdown().await?;
Ok(buffer)
}
)*
CompressionType::Uncompressed => Ok(content.as_ref().to_vec()),
}
}
pub fn convert_async_read<T: AsyncRead + Unpin + Send + 'static>(
&self,
s: T,
) -> Box<dyn AsyncRead + Unpin + Send> {
match self {
$(CompressionType::$enum_item => Box::new([<$prefix Decoder>]::new(BufReader::new(s))),)*
CompressionType::Uncompressed => Box::new(s),
}
}
pub fn convert_stream<T: Stream<Item = io::Result<Bytes>> + Unpin + Send + 'static>(
&self,
s: T,
) -> Box<dyn Stream<Item = io::Result<Bytes>> + Send + Unpin> {
match self {
$(CompressionType::$enum_item => Box::new(ReaderStream::new([<$prefix Decoder>]::new(StreamReader::new(s)))),)*
CompressionType::Uncompressed => Box::new(s),
}
}
}
#[cfg(test)]
mod tests {
use super::CompressionType;
$(
#[tokio::test]
async fn [<test_ $enum_item:lower _compression>]() {
let string = "foo_bar".as_bytes().to_vec();
let compress = CompressionType::$enum_item
.encode(&string)
.await
.unwrap();
let decompress = CompressionType::$enum_item
.decode(&compress)
.await
.unwrap();
assert_eq!(decompress, string);
})*
#[tokio::test]
async fn test_uncompression() {
let string = "foo_bar".as_bytes().to_vec();
let compress = CompressionType::Uncompressed
.encode(&string)
.await
.unwrap();
let decompress = CompressionType::Uncompressed
.decode(&compress)
.await
.unwrap();
assert_eq!(decompress, string);
}
}
}
};
}
impl_compression_type!((Gzip, Gzip), (Bzip2, Bz), (Xz, Xz), (Zstd, Zstd));

View File

@@ -41,9 +41,6 @@ pub enum Error {
#[snafu(display("empty host: {}", url))]
EmptyHostPath { url: String, location: Location },
#[snafu(display("Invalid path: {}", path))]
InvalidPath { path: String, location: Location },
#[snafu(display("Invalid url: {}, error :{}", url, source))]
InvalidUrl {
url: String,
@@ -51,18 +48,18 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to decompression, source: {}", source))]
Decompression {
source: object_store::Error,
location: Location,
},
#[snafu(display("Failed to build backend, source: {}", source))]
BuildBackend {
source: object_store::Error,
location: Location,
},
#[snafu(display("Failed to build orc reader, source: {}", source))]
OrcReader {
location: Location,
source: orc_rust::error::Error,
},
#[snafu(display("Failed to read object from path: {}, source: {}", path, source))]
ReadObject {
path: String,
@@ -148,9 +145,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Missing required field: {}", name))]
MissingRequiredField { name: String, location: Location },
#[snafu(display("Buffered writer closed"))]
BufferedWriterClosed { location: Location },
}
@@ -173,20 +167,18 @@ impl ErrorExt for Error {
| InvalidConnection { .. }
| InvalidUrl { .. }
| EmptyHostPath { .. }
| InvalidPath { .. }
| InferSchema { .. }
| ReadParquetSnafu { .. }
| ParquetToSchema { .. }
| ParseFormat { .. }
| MergeSchema { .. }
| MissingRequiredField { .. } => StatusCode::InvalidArguments,
| MergeSchema { .. } => StatusCode::InvalidArguments,
Decompression { .. }
| JoinHandle { .. }
JoinHandle { .. }
| ReadRecordBatch { .. }
| WriteRecordBatch { .. }
| EncodeRecordBatch { .. }
| BufferedWriterClosed { .. } => StatusCode::Unexpected,
| BufferedWriterClosed { .. }
| OrcReader { .. } => StatusCode::Unexpected,
}
}
@@ -197,17 +189,16 @@ impl ErrorExt for Error {
fn location_opt(&self) -> Option<common_error::snafu::Location> {
use Error::*;
match self {
OrcReader { location, .. } => Some(*location),
BuildBackend { location, .. } => Some(*location),
ReadObject { location, .. } => Some(*location),
ListObjects { location, .. } => Some(*location),
InferSchema { location, .. } => Some(*location),
ReadParquetSnafu { location, .. } => Some(*location),
ParquetToSchema { location, .. } => Some(*location),
Decompression { location, .. } => Some(*location),
JoinHandle { location, .. } => Some(*location),
ParseFormat { location, .. } => Some(*location),
MergeSchema { location, .. } => Some(*location),
MissingRequiredField { location, .. } => Some(*location),
WriteObject { location, .. } => Some(*location),
ReadRecordBatch { location, .. } => Some(*location),
WriteRecordBatch { location, .. } => Some(*location),
@@ -217,7 +208,6 @@ impl ErrorExt for Error {
UnsupportedBackendProtocol { location, .. } => Some(*location),
EmptyHostPath { location, .. } => Some(*location),
InvalidPath { location, .. } => Some(*location),
InvalidUrl { location, .. } => Some(*location),
InvalidConnection { location, .. } => Some(*location),
UnsupportedCompressionType { location, .. } => Some(*location),

View File

@@ -14,6 +14,7 @@
pub mod csv;
pub mod json;
pub mod orc;
pub mod parquet;
#[cfg(test)]
pub mod tests;
@@ -35,12 +36,12 @@ use datafusion::physical_plan::SendableRecordBatchStream;
use futures::StreamExt;
use object_store::ObjectStore;
use snafu::ResultExt;
use tokio_util::compat::FuturesAsyncWriteCompatExt;
use self::csv::CsvFormat;
use self::json::JsonFormat;
use self::orc::OrcFormat;
use self::parquet::ParquetFormat;
use crate::buffered_writer::{BufferedWriter, DfRecordBatchEncoder};
use crate::buffered_writer::{DfRecordBatchEncoder, LazyBufferedWriter};
use crate::compression::CompressionType;
use crate::error::{self, Result};
use crate::share_buffer::SharedBuffer;
@@ -57,6 +58,18 @@ pub enum Format {
Csv(CsvFormat),
Json(JsonFormat),
Parquet(ParquetFormat),
Orc(OrcFormat),
}
impl Format {
pub fn suffix(&self) -> &'static str {
match self {
Format::Csv(_) => ".csv",
Format::Json(_) => ".json",
Format::Parquet(_) => ".parquet",
&Format::Orc(_) => ".orc",
}
}
}
impl TryFrom<&HashMap<String, String>> for Format {
@@ -72,6 +85,7 @@ impl TryFrom<&HashMap<String, String>> for Format {
"CSV" => Ok(Self::Csv(CsvFormat::try_from(options)?)),
"JSON" => Ok(Self::Json(JsonFormat::try_from(options)?)),
"PARQUET" => Ok(Self::Parquet(ParquetFormat::default())),
"ORC" => Ok(Self::Orc(OrcFormat)),
_ => error::UnsupportedFormatSnafu { format: &format }.fail(),
}
}
@@ -110,6 +124,7 @@ impl ArrowDecoder for arrow::csv::reader::Decoder {
}
}
#[allow(deprecated)]
impl ArrowDecoder for arrow::json::RawDecoder {
fn decode(&mut self, buf: &[u8]) -> result::Result<usize, ArrowError> {
self.decode(buf)
@@ -180,15 +195,14 @@ pub async fn stream_to_file<T: DfRecordBatchEncoder, U: Fn(SharedBuffer) -> T>(
threshold: usize,
encoder_factory: U,
) -> Result<usize> {
let writer = store
.writer(path)
.await
.context(error::WriteObjectSnafu { path })?
.compat_write();
let buffer = SharedBuffer::with_capacity(threshold);
let encoder = encoder_factory(buffer.clone());
let mut writer = BufferedWriter::new(threshold, buffer, encoder, writer);
let mut writer = LazyBufferedWriter::new(threshold, buffer, encoder, path, |path| async {
store
.writer(&path)
.await
.context(error::WriteObjectSnafu { path })
});
let mut rows = 0;
@@ -199,9 +213,8 @@ pub async fn stream_to_file<T: DfRecordBatchEncoder, U: Fn(SharedBuffer) -> T>(
}
// Flushes all pending writes
writer.try_flush(true).await?;
writer.close().await?;
let _ = writer.try_flush(true).await?;
writer.close_inner_writer().await?;
Ok(rows)
}

View File

@@ -17,6 +17,7 @@ use std::str::FromStr;
use std::sync::Arc;
use arrow::csv;
#[allow(deprecated)]
use arrow::csv::reader::infer_reader_schema as infer_csv_schema;
use arrow::record_batch::RecordBatch;
use arrow_schema::{Schema, SchemaRef};
@@ -113,8 +114,7 @@ pub struct CsvConfig {
impl CsvConfig {
fn builder(&self) -> csv::ReaderBuilder {
let mut builder = csv::ReaderBuilder::new()
.with_schema(self.file_schema.clone())
let mut builder = csv::ReaderBuilder::new(self.file_schema.clone())
.with_delimiter(self.delimiter)
.with_batch_size(self.batch_size)
.has_header(self.has_header);
@@ -160,6 +160,7 @@ impl FileOpener for CsvOpener {
}
}
#[allow(deprecated)]
#[async_trait]
impl FileFormat for CsvFormat {
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
@@ -290,20 +291,20 @@ mod tests {
#[test]
fn test_try_from() {
let mut map = HashMap::new();
let map = HashMap::new();
let format: CsvFormat = CsvFormat::try_from(&map).unwrap();
assert_eq!(format, CsvFormat::default());
map.insert(
FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(),
"2000".to_string(),
);
map.insert(FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string());
map.insert(FORMAT_DELIMITER.to_string(), b'\t'.to_string());
map.insert(FORMAT_HAS_HEADER.to_string(), "false".to_string());
let map = HashMap::from([
(
FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(),
"2000".to_string(),
),
(FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string()),
(FORMAT_DELIMITER.to_string(), b'\t'.to_string()),
(FORMAT_HAS_HEADER.to_string(), "false".to_string()),
]);
let format = CsvFormat::try_from(&map).unwrap();
assert_eq!(

View File

@@ -20,6 +20,7 @@ use std::sync::Arc;
use arrow::datatypes::SchemaRef;
use arrow::json::reader::{infer_json_schema_from_iterator, ValueIter};
use arrow::json::writer::LineDelimited;
#[allow(deprecated)]
use arrow::json::{self, RawReaderBuilder};
use arrow::record_batch::RecordBatch;
use arrow_schema::Schema;
@@ -129,6 +130,7 @@ impl JsonOpener {
}
}
#[allow(deprecated)]
impl FileOpener for JsonOpener {
fn open(&self, meta: FileMeta) -> DataFusionResult<FileOpenFuture> {
open_with_decoder(
@@ -159,8 +161,7 @@ pub async fn stream_to_json(
impl DfRecordBatchEncoder for json::Writer<SharedBuffer, LineDelimited> {
fn write(&mut self, batch: &RecordBatch) -> Result<()> {
self.write(batch.clone())
.context(error::WriteRecordBatchSnafu)
self.write(batch).context(error::WriteRecordBatchSnafu)
}
}
@@ -213,18 +214,18 @@ mod tests {
#[test]
fn test_try_from() {
let mut map = HashMap::new();
let map = HashMap::new();
let format = JsonFormat::try_from(&map).unwrap();
assert_eq!(format, JsonFormat::default());
map.insert(
FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(),
"2000".to_string(),
);
map.insert(FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string());
let map = HashMap::from([
(
FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(),
"2000".to_string(),
),
(FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string()),
]);
let format = JsonFormat::try_from(&map).unwrap();
assert_eq!(

View File

@@ -0,0 +1,102 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::pin::Pin;
use std::task::{Context, Poll};
use arrow_schema::{Schema, SchemaRef};
use async_trait::async_trait;
use datafusion::arrow::record_batch::RecordBatch as DfRecordBatch;
use datafusion::error::{DataFusionError, Result as DfResult};
use datafusion::physical_plan::RecordBatchStream;
use futures::Stream;
use object_store::ObjectStore;
use orc_rust::arrow_reader::{create_arrow_schema, Cursor};
use orc_rust::async_arrow_reader::ArrowStreamReader;
pub use orc_rust::error::Error as OrcError;
use orc_rust::reader::Reader;
use snafu::ResultExt;
use tokio::io::{AsyncRead, AsyncSeek};
use crate::error::{self, Result};
use crate::file_format::FileFormat;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub struct OrcFormat;
pub async fn new_orc_cursor<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
reader: R,
) -> Result<Cursor<R>> {
let reader = Reader::new_async(reader)
.await
.context(error::OrcReaderSnafu)?;
let cursor = Cursor::root(reader).context(error::OrcReaderSnafu)?;
Ok(cursor)
}
pub async fn new_orc_stream_reader<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
reader: R,
) -> Result<ArrowStreamReader<R>> {
let cursor = new_orc_cursor(reader).await?;
Ok(ArrowStreamReader::new(cursor, None))
}
pub async fn infer_orc_schema<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
reader: R,
) -> Result<Schema> {
let cursor = new_orc_cursor(reader).await?;
Ok(create_arrow_schema(&cursor))
}
pub struct OrcArrowStreamReaderAdapter<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> {
stream: ArrowStreamReader<T>,
}
impl<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> OrcArrowStreamReaderAdapter<T> {
pub fn new(stream: ArrowStreamReader<T>) -> Self {
Self { stream }
}
}
impl<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> RecordBatchStream
for OrcArrowStreamReaderAdapter<T>
{
fn schema(&self) -> SchemaRef {
self.stream.schema()
}
}
impl<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> Stream for OrcArrowStreamReaderAdapter<T> {
type Item = DfResult<DfRecordBatch>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let batch = futures::ready!(Pin::new(&mut self.stream).poll_next(cx))
.map(|r| r.map_err(|e| DataFusionError::External(Box::new(e))));
Poll::Ready(batch)
}
}
#[async_trait]
impl FileFormat for OrcFormat {
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
let reader = store
.reader(path)
.await
.context(error::ReadObjectSnafu { path })?;
let schema = infer_orc_schema(reader).await?;
Ok(schema)
}
}

View File

@@ -20,7 +20,7 @@ use crate::error::{BuildBackendSnafu, Result};
pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
let mut builder = Fs::default();
builder.root(root);
let _ = builder.root(root);
let object_store = ObjectStore::new(builder)
.context(BuildBackendSnafu)?
.finish();

View File

@@ -34,28 +34,26 @@ pub fn build_s3_backend(
) -> Result<ObjectStore> {
let mut builder = S3::default();
builder.root(path);
builder.bucket(host);
let _ = builder.root(path).bucket(host);
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
builder.endpoint(endpoint);
let _ = builder.endpoint(endpoint);
}
if let Some(region) = connection.get(REGION) {
builder.region(region);
let _ = builder.region(region);
}
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
builder.access_key_id(key_id);
let _ = builder.access_key_id(key_id);
}
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
builder.secret_access_key(key);
let _ = builder.secret_access_key(key);
}
if let Some(session_token) = connection.get(SESSION_TOKEN) {
builder.security_token(session_token);
let _ = builder.security_token(session_token);
}
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
@@ -69,7 +67,7 @@ pub fn build_s3_backend(
.build()
})?;
if enable {
builder.enable_virtual_host_style();
let _ = builder.enable_virtual_host_style();
}
}

View File

@@ -55,7 +55,7 @@ pub fn format_schema(schema: Schema) -> Vec<String> {
pub fn test_store(root: &str) -> ObjectStore {
let mut builder = Fs::default();
builder.root(root);
let _ = builder.root(root);
ObjectStore::new(builder).unwrap().finish()
}
@@ -64,7 +64,7 @@ pub fn test_tmp_store(root: &str) -> (ObjectStore, TempDir) {
let dir = create_temp_dir(root);
let mut builder = Fs::default();
builder.root("/");
let _ = builder.root("/");
(ObjectStore::new(builder).unwrap().finish(), dir)
}
@@ -113,14 +113,14 @@ pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usi
let output_path = format!("{}/{}", dir.path().display(), "output");
stream_to_json(
assert!(stream_to_json(
Box::pin(stream),
tmp_store.clone(),
&output_path,
threshold(size),
)
.await
.unwrap();
.is_ok());
let written = tmp_store.read(&output_path).await.unwrap();
let origin = store.read(origin_path).await.unwrap();
@@ -155,14 +155,14 @@ pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usiz
let output_path = format!("{}/{}", dir.path().display(), "output");
stream_to_csv(
assert!(stream_to_csv(
Box::pin(stream),
tmp_store.clone(),
&output_path,
threshold(size),
)
.await
.unwrap();
.is_ok());
let written = tmp_store.read(&output_path).await.unwrap();
let origin = store.read(origin_path).await.unwrap();

View File

@@ -0,0 +1,6 @@
hostname,environment,usage_user,usage_system,usage_idle,usage_nice,usage_iowait,usage_irq,usage_softirq,usage_steal,usage_guest,usage_guest_nice,ts
host_0,test,32,58,36,72,61,21,53,12,59,72,2023-04-01T00:00:00+00:00
host_1,staging,12,32,50,84,19,73,38,37,72,2,2023-04-01T00:00:00+00:00
host_2,test,98,5,40,95,64,39,21,63,53,94,2023-04-01T00:00:00+00:00
host_3,test,98,95,7,48,99,67,14,86,36,23,2023-04-01T00:00:00+00:00
host_4,test,32,44,11,53,64,9,17,39,20,7,2023-04-01T00:00:00+00:00
1 hostname environment usage_user usage_system usage_idle usage_nice usage_iowait usage_irq usage_softirq usage_steal usage_guest usage_guest_nice ts
2 host_0 test 32 58 36 72 61 21 53 12 59 72 2023-04-01T00:00:00+00:00
3 host_1 staging 12 32 50 84 19 73 38 37 72 2 2023-04-01T00:00:00+00:00
4 host_2 test 98 5 40 95 64 39 21 63 53 94 2023-04-01T00:00:00+00:00
5 host_3 test 98 95 7 48 99 67 14 86 36 23 2023-04-01T00:00:00+00:00
6 host_4 test 32 44 11 53 64 9 17 39 20 7 2023-04-01T00:00:00+00:00

View File

@@ -0,0 +1,11 @@
## Generate orc data
```bash
python3 -m venv venv
venv/bin/pip install -U pip
venv/bin/pip install -U pyorc
./venv/bin/python write.py
cargo test
```

Binary file not shown.

View File

@@ -0,0 +1,103 @@
# Copyright 2023 Greptime Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import datetime
import pyorc
data = {
"double_a": [1.0, 2.0, 3.0, 4.0, 5.0],
"a": [1.0, 2.0, None, 4.0, 5.0],
"b": [True, False, None, True, False],
"str_direct": ["a", "cccccc", None, "ddd", "ee"],
"d": ["a", "bb", None, "ccc", "ddd"],
"e": ["ddd", "cc", None, "bb", "a"],
"f": ["aaaaa", "bbbbb", None, "ccccc", "ddddd"],
"int_short_repeated": [5, 5, None, 5, 5],
"int_neg_short_repeated": [-5, -5, None, -5, -5],
"int_delta": [1, 2, None, 4, 5],
"int_neg_delta": [5, 4, None, 2, 1],
"int_direct": [1, 6, None, 3, 2],
"int_neg_direct": [-1, -6, None, -3, -2],
"bigint_direct": [1, 6, None, 3, 2],
"bigint_neg_direct": [-1, -6, None, -3, -2],
"bigint_other": [5, -5, 1, 5, 5],
"utf8_increase": ["a", "bb", "ccc", "dddd", "eeeee"],
"utf8_decrease": ["eeeee", "dddd", "ccc", "bb", "a"],
"timestamp_simple": [datetime.datetime(2023, 4, 1, 20, 15, 30, 2000), datetime.datetime.fromtimestamp(int('1629617204525777000')/1000000000), datetime.datetime(2023, 1, 1), datetime.datetime(2023, 2, 1), datetime.datetime(2023, 3, 1)],
"date_simple": [datetime.date(2023, 4, 1), datetime.date(2023, 3, 1), datetime.date(2023, 1, 1), datetime.date(2023, 2, 1), datetime.date(2023, 3, 1)]
}
def infer_schema(data):
schema = "struct<"
for key, value in data.items():
dt = type(value[0])
if dt == float:
dt = "float"
elif dt == int:
dt = "int"
elif dt == bool:
dt = "boolean"
elif dt == str:
dt = "string"
elif key.startswith("timestamp"):
dt = "timestamp"
elif key.startswith("date"):
dt = "date"
else:
print(key,value,dt)
raise NotImplementedError
if key.startswith("double"):
dt = "double"
if key.startswith("bigint"):
dt = "bigint"
schema += key + ":" + dt + ","
schema = schema[:-1] + ">"
return schema
def _write(
schema: str,
data,
file_name: str,
compression=pyorc.CompressionKind.NONE,
dict_key_size_threshold=0.0,
):
output = open(file_name, "wb")
writer = pyorc.Writer(
output,
schema,
dict_key_size_threshold=dict_key_size_threshold,
# use a small number to ensure that compression crosses value boundaries
compression_block_size=32,
compression=compression,
)
num_rows = len(list(data.values())[0])
for x in range(num_rows):
row = tuple(values[x] for values in data.values())
writer.write(row)
writer.close()
with open(file_name, "rb") as f:
reader = pyorc.Reader(f)
list(reader)
_write(
infer_schema(data),
data,
"test.orc",
)

View File

@@ -33,6 +33,8 @@ pub enum StatusCode {
Internal = 1003,
/// Invalid arguments.
InvalidArguments = 1004,
/// The task is cancelled.
Cancelled = 1005,
// ====== End of common status code ================
// ====== Begin of SQL related status code =========
@@ -100,6 +102,7 @@ impl StatusCode {
| StatusCode::Unsupported
| StatusCode::Unexpected
| StatusCode::InvalidArguments
| StatusCode::Cancelled
| StatusCode::InvalidSyntax
| StatusCode::PlanQuery
| StatusCode::EngineExecuteQuery
@@ -125,6 +128,7 @@ impl StatusCode {
| StatusCode::Unsupported
| StatusCode::Unexpected
| StatusCode::Internal
| StatusCode::Cancelled
| StatusCode::PlanQuery
| StatusCode::EngineExecuteQuery
| StatusCode::StorageUnavailable

View File

@@ -8,6 +8,8 @@ license.workspace = true
proc-macro = true
[dependencies]
common-telemetry = { path = "../telemetry" }
backtrace = "0.3"
quote = "1.0"
syn = "1.0"
proc-macro2 = "1.0"

View File

@@ -15,11 +15,13 @@
mod range_fn;
use proc_macro::TokenStream;
use quote::{quote, quote_spanned};
use quote::{quote, quote_spanned, ToTokens};
use range_fn::process_range_fn;
use syn::parse::Parser;
use syn::spanned::Spanned;
use syn::{parse_macro_input, DeriveInput, ItemStruct};
use syn::{
parse_macro_input, AttributeArgs, DeriveInput, ItemFn, ItemStruct, Lit, Meta, NestedMeta,
};
/// Make struct implemented trait [AggrFuncTypeStore], which is necessary when writing UDAF.
/// This derive macro is expect to be used along with attribute macro [as_aggr_func_creator].
@@ -114,3 +116,109 @@ pub fn as_aggr_func_creator(_args: TokenStream, input: TokenStream) -> TokenStre
pub fn range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
process_range_fn(args, input)
}
/// Attribute macro to print the caller to the annotated function.
/// The caller is printed as its filename and the call site line number.
///
/// This macro works like this: inject the tracking codes as the first statement to the annotated
/// function body. The tracking codes use [backtrace-rs](https://crates.io/crates/backtrace) to get
/// the callers. So you must dependent on the `backtrace-rs` crate.
///
/// # Arguments
/// - `depth`: The max depth of call stack to print. Optional, defaults to 1.
///
/// # Example
/// ```rust, ignore
///
/// #[print_caller(depth = 3)]
/// fn foo() {}
/// ```
#[proc_macro_attribute]
pub fn print_caller(args: TokenStream, input: TokenStream) -> TokenStream {
let mut depth = 1;
let args = parse_macro_input!(args as AttributeArgs);
for meta in args.iter() {
if let NestedMeta::Meta(Meta::NameValue(name_value)) = meta {
let ident = name_value
.path
.get_ident()
.expect("Expected an ident!")
.to_string();
if ident == "depth" {
let Lit::Int(i) = &name_value.lit else { panic!("Expected 'depth' to be a valid int!") };
depth = i.base10_parse::<usize>().expect("Invalid 'depth' value");
break;
}
}
}
let tokens: TokenStream = quote! {
{
let curr_file = file!();
let bt = backtrace::Backtrace::new();
let call_stack = bt
.frames()
.iter()
.skip_while(|f| {
!f.symbols().iter().any(|s| {
s.filename()
.map(|p| p.ends_with(curr_file))
.unwrap_or(false)
})
})
.skip(1)
.take(#depth);
let call_stack = call_stack
.map(|f| {
f.symbols()
.iter()
.map(|s| {
let filename = s
.filename()
.map(|p| format!("{:?}", p))
.unwrap_or_else(|| "unknown".to_string());
let lineno = s
.lineno()
.map(|l| format!("{}", l))
.unwrap_or_else(|| "unknown".to_string());
format!("filename: {}, lineno: {}", filename, lineno)
})
.collect::<Vec<String>>()
.join(", ")
})
.collect::<Vec<_>>();
match call_stack.len() {
0 => common_telemetry::info!("unable to find call stack"),
1 => common_telemetry::info!("caller: {}", call_stack[0]),
_ => {
let mut s = String::new();
s.push_str("[\n");
for e in call_stack {
s.push_str("\t");
s.push_str(&e);
s.push_str("\n");
}
s.push_str("]");
common_telemetry::info!("call stack: {}", s)
}
}
}
}
.into();
let stmt = match syn::parse(tokens) {
Ok(stmt) => stmt,
Err(e) => return e.into_compile_error().into(),
};
let mut item = parse_macro_input!(input as ItemFn);
item.block.stmts.insert(0, stmt);
item.into_token_stream().into()
}

View File

@@ -22,7 +22,7 @@ struct Foo {}
#[test]
#[allow(clippy::extra_unused_type_parameters)]
fn test_derive() {
Foo::default();
let _ = Foo::default();
assert_fields!(Foo: input_types);
assert_impl_all!(Foo: std::fmt::Debug, Default, AggrFuncTypeStore);
}

View File

@@ -158,19 +158,19 @@ mod test {
fn test_update_batch() {
// test update empty batch, expect not updating anything
let mut argmax = Argmax::<i32>::default();
assert!(argmax.update_batch(&[]).is_ok());
argmax.update_batch(&[]).unwrap();
assert_eq!(Value::Null, argmax.evaluate().unwrap());
// test update one not-null value
let mut argmax = Argmax::<i32>::default();
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Some(42)]))];
assert!(argmax.update_batch(&v).is_ok());
argmax.update_batch(&v).unwrap();
assert_eq!(Value::from(0_u64), argmax.evaluate().unwrap());
// test update one null value
let mut argmax = Argmax::<i32>::default();
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Option::<i32>::None]))];
assert!(argmax.update_batch(&v).is_ok());
argmax.update_batch(&v).unwrap();
assert_eq!(Value::Null, argmax.evaluate().unwrap());
// test update no null-value batch
@@ -180,7 +180,7 @@ mod test {
Some(1),
Some(3),
]))];
assert!(argmax.update_batch(&v).is_ok());
argmax.update_batch(&v).unwrap();
assert_eq!(Value::from(2_u64), argmax.evaluate().unwrap());
// test update null-value batch
@@ -190,7 +190,7 @@ mod test {
None,
Some(4),
]))];
assert!(argmax.update_batch(&v).is_ok());
argmax.update_batch(&v).unwrap();
assert_eq!(Value::from(2_u64), argmax.evaluate().unwrap());
// test update with constant vector
@@ -199,7 +199,7 @@ mod test {
Arc::new(Int32Vector::from_vec(vec![4])),
10,
))];
assert!(argmax.update_batch(&v).is_ok());
argmax.update_batch(&v).unwrap();
assert_eq!(Value::from(0_u64), argmax.evaluate().unwrap());
}
}

View File

@@ -166,19 +166,19 @@ mod test {
fn test_update_batch() {
// test update empty batch, expect not updating anything
let mut argmin = Argmin::<i32>::default();
assert!(argmin.update_batch(&[]).is_ok());
argmin.update_batch(&[]).unwrap();
assert_eq!(Value::Null, argmin.evaluate().unwrap());
// test update one not-null value
let mut argmin = Argmin::<i32>::default();
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Some(42)]))];
assert!(argmin.update_batch(&v).is_ok());
argmin.update_batch(&v).unwrap();
assert_eq!(Value::from(0_u32), argmin.evaluate().unwrap());
// test update one null value
let mut argmin = Argmin::<i32>::default();
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Option::<i32>::None]))];
assert!(argmin.update_batch(&v).is_ok());
argmin.update_batch(&v).unwrap();
assert_eq!(Value::Null, argmin.evaluate().unwrap());
// test update no null-value batch
@@ -188,7 +188,7 @@ mod test {
Some(1),
Some(3),
]))];
assert!(argmin.update_batch(&v).is_ok());
argmin.update_batch(&v).unwrap();
assert_eq!(Value::from(0_u32), argmin.evaluate().unwrap());
// test update null-value batch
@@ -198,7 +198,7 @@ mod test {
None,
Some(4),
]))];
assert!(argmin.update_batch(&v).is_ok());
argmin.update_batch(&v).unwrap();
assert_eq!(Value::from(0_u32), argmin.evaluate().unwrap());
// test update with constant vector
@@ -207,7 +207,7 @@ mod test {
Arc::new(Int32Vector::from_vec(vec![4])),
10,
))];
assert!(argmin.update_batch(&v).is_ok());
argmin.update_batch(&v).unwrap();
assert_eq!(Value::from(0_u32), argmin.evaluate().unwrap());
}
}

View File

@@ -192,20 +192,20 @@ mod test {
fn test_update_batch() {
// test update empty batch, expect not updating anything
let mut diff = Diff::<i32, i64>::default();
assert!(diff.update_batch(&[]).is_ok());
diff.update_batch(&[]).unwrap();
assert!(diff.values.is_empty());
assert_eq!(Value::Null, diff.evaluate().unwrap());
// test update one not-null value
let mut diff = Diff::<i32, i64>::default();
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Some(42)]))];
assert!(diff.update_batch(&v).is_ok());
diff.update_batch(&v).unwrap();
assert_eq!(Value::Null, diff.evaluate().unwrap());
// test update one null value
let mut diff = Diff::<i32, i64>::default();
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Option::<i32>::None]))];
assert!(diff.update_batch(&v).is_ok());
diff.update_batch(&v).unwrap();
assert_eq!(Value::Null, diff.evaluate().unwrap());
// test update no null-value batch
@@ -216,7 +216,7 @@ mod test {
Some(2),
]))];
let values = vec![Value::from(2_i64), Value::from(1_i64)];
assert!(diff.update_batch(&v).is_ok());
diff.update_batch(&v).unwrap();
assert_eq!(
Value::List(ListValue::new(
Some(Box::new(values)),
@@ -234,7 +234,7 @@ mod test {
Some(4),
]))];
let values = vec![Value::from(5_i64), Value::from(1_i64)];
assert!(diff.update_batch(&v).is_ok());
diff.update_batch(&v).unwrap();
assert_eq!(
Value::List(ListValue::new(
Some(Box::new(values)),
@@ -250,7 +250,7 @@ mod test {
4,
))];
let values = vec![Value::from(0_i64), Value::from(0_i64), Value::from(0_i64)];
assert!(diff.update_batch(&v).is_ok());
diff.update_batch(&v).unwrap();
assert_eq!(
Value::List(ListValue::new(
Some(Box::new(values)),

View File

@@ -188,19 +188,19 @@ mod test {
fn test_update_batch() {
// test update empty batch, expect not updating anything
let mut mean = Mean::<i32>::default();
assert!(mean.update_batch(&[]).is_ok());
mean.update_batch(&[]).unwrap();
assert_eq!(Value::Null, mean.evaluate().unwrap());
// test update one not-null value
let mut mean = Mean::<i32>::default();
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Some(42)]))];
assert!(mean.update_batch(&v).is_ok());
mean.update_batch(&v).unwrap();
assert_eq!(Value::from(42.0_f64), mean.evaluate().unwrap());
// test update one null value
let mut mean = Mean::<i32>::default();
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Option::<i32>::None]))];
assert!(mean.update_batch(&v).is_ok());
mean.update_batch(&v).unwrap();
assert_eq!(Value::Null, mean.evaluate().unwrap());
// test update no null-value batch
@@ -210,7 +210,7 @@ mod test {
Some(1),
Some(2),
]))];
assert!(mean.update_batch(&v).is_ok());
mean.update_batch(&v).unwrap();
assert_eq!(Value::from(0.6666666666666666), mean.evaluate().unwrap());
// test update null-value batch
@@ -221,7 +221,7 @@ mod test {
Some(3),
Some(4),
]))];
assert!(mean.update_batch(&v).is_ok());
mean.update_batch(&v).unwrap();
assert_eq!(Value::from(1.6666666666666667), mean.evaluate().unwrap());
// test update with constant vector
@@ -230,7 +230,7 @@ mod test {
Arc::new(Int32Vector::from_vec(vec![4])),
10,
))];
assert!(mean.update_batch(&v).is_ok());
mean.update_batch(&v).unwrap();
assert_eq!(Value::from(4.0), mean.evaluate().unwrap());
}
}

View File

@@ -299,7 +299,7 @@ mod test {
fn test_update_batch() {
// test update empty batch, expect not updating anything
let mut percentile = Percentile::<i32>::default();
assert!(percentile.update_batch(&[]).is_ok());
percentile.update_batch(&[]).unwrap();
assert!(percentile.not_greater.is_empty());
assert!(percentile.greater.is_empty());
assert_eq!(Value::Null, percentile.evaluate().unwrap());
@@ -310,7 +310,7 @@ mod test {
Arc::new(Int32Vector::from(vec![Some(42)])),
Arc::new(Float64Vector::from(vec![Some(100.0_f64)])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(Value::from(42.0_f64), percentile.evaluate().unwrap());
// test update one null value
@@ -319,7 +319,7 @@ mod test {
Arc::new(Int32Vector::from(vec![Option::<i32>::None])),
Arc::new(Float64Vector::from(vec![Some(100.0_f64)])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(Value::Null, percentile.evaluate().unwrap());
// test update no null-value batch
@@ -332,7 +332,7 @@ mod test {
Some(100.0_f64),
])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(Value::from(2_f64), percentile.evaluate().unwrap());
// test update null-value batch
@@ -346,7 +346,7 @@ mod test {
Some(100.0_f64),
])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(Value::from(4_f64), percentile.evaluate().unwrap());
// test update with constant vector
@@ -358,7 +358,7 @@ mod test {
)),
Arc::new(Float64Vector::from(vec![Some(100.0_f64), Some(100.0_f64)])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(Value::from(4_f64), percentile.evaluate().unwrap());
// test left border
@@ -371,7 +371,7 @@ mod test {
Some(0.0_f64),
])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(Value::from(-1.0_f64), percentile.evaluate().unwrap());
// test medium
@@ -384,7 +384,7 @@ mod test {
Some(50.0_f64),
])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(Value::from(1.0_f64), percentile.evaluate().unwrap());
// test right border
@@ -397,7 +397,7 @@ mod test {
Some(100.0_f64),
])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(Value::from(2.0_f64), percentile.evaluate().unwrap());
// the following is the result of numpy.percentile
@@ -414,7 +414,7 @@ mod test {
Some(40.0_f64),
])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(Value::from(6.400000000_f64), percentile.evaluate().unwrap());
// the following is the result of numpy.percentile
@@ -430,7 +430,7 @@ mod test {
Some(95.0_f64),
])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(
Value::from(9.700_000_000_000_001_f64),
percentile.evaluate().unwrap()

View File

@@ -267,7 +267,7 @@ mod test {
fn test_update_batch() {
// test update empty batch, expect not updating anything
let mut polyval = Polyval::<i32, i64>::default();
assert!(polyval.update_batch(&[]).is_ok());
polyval.update_batch(&[]).unwrap();
assert!(polyval.values.is_empty());
assert_eq!(Value::Null, polyval.evaluate().unwrap());
@@ -277,7 +277,7 @@ mod test {
Arc::new(Int32Vector::from(vec![Some(3)])),
Arc::new(Int64Vector::from(vec![Some(2_i64)])),
];
assert!(polyval.update_batch(&v).is_ok());
polyval.update_batch(&v).unwrap();
assert_eq!(Value::Int64(3), polyval.evaluate().unwrap());
// test update one null value
@@ -286,7 +286,7 @@ mod test {
Arc::new(Int32Vector::from(vec![Option::<i32>::None])),
Arc::new(Int64Vector::from(vec![Some(2_i64)])),
];
assert!(polyval.update_batch(&v).is_ok());
polyval.update_batch(&v).unwrap();
assert_eq!(Value::Null, polyval.evaluate().unwrap());
// test update no null-value batch
@@ -299,7 +299,7 @@ mod test {
Some(2_i64),
])),
];
assert!(polyval.update_batch(&v).is_ok());
polyval.update_batch(&v).unwrap();
assert_eq!(Value::Int64(13), polyval.evaluate().unwrap());
// test update null-value batch
@@ -313,7 +313,7 @@ mod test {
Some(2_i64),
])),
];
assert!(polyval.update_batch(&v).is_ok());
polyval.update_batch(&v).unwrap();
assert_eq!(Value::Int64(13), polyval.evaluate().unwrap());
// test update with constant vector
@@ -325,7 +325,7 @@ mod test {
)),
Arc::new(Int64Vector::from(vec![Some(5_i64), Some(5_i64)])),
];
assert!(polyval.update_batch(&v).is_ok());
polyval.update_batch(&v).unwrap();
assert_eq!(Value::Int64(24), polyval.evaluate().unwrap());
}
}

Some files were not shown because too many files have changed in this diff Show More