Compare commits

...

243 Commits

Author SHA1 Message Date
Weny Xu
d57b144b2f chore: change test_remove_outdated_meta_task sleep time to 40ms (#2620)
chore: change test_remove_outdated_meta_task sleep time to 300ms
2023-10-18 11:33:35 +00:00
WU Jingdi
46e106bcc3 feat: allow nest range expr in Range Query (#2557)
* feat: eable range expr nest

* fix: change range expr rewrite format

* chore: organize range query tests

* chore: change range expr name(e.g. MAX(v) RANGE 5s FILL 6)

* chore: add range query test

* chore: fix code advice

* chore: fix ca
2023-10-18 07:03:26 +00:00
localhost
a7507a2b12 chore: change telemetry report url to resolve connectivity issues (#2608)
chore: change otel report url to resolve connectivity issues
2023-10-18 06:58:54 +00:00
Wei
5b8e5066a0 refactor: make ReadableSize more readable. (#2614)
* refactor: ReadableSize is readable.

* docs: Update src/common/base/src/readable_size.rs

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-10-18 06:32:50 +00:00
Weny Xu
dcd481e6a4 feat: stop the procedure manager if a new leader is elected (#2576)
* feat: stop the procedure manager if a new leader is elected

* chore: apply suggestions from CR

* chore: apply suggestions

* chore: apply suggestions from CR

* feat: add should_report to GreptimeDBTelemetry

Signed-off-by: WenyXu <wenymedia@gmail.com>

* refactor: refactor subscribing leader change loop

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>
2023-10-18 06:12:28 +00:00
zyy17
3217b56cc1 ci: release new version '0.4.0' -> '0.4.1' (#2611) 2023-10-17 07:33:41 +00:00
shuiyisong
eccad647d0 chore: add export data to migrate tool (#2610)
* chore: add export data to migrate tool

* chore: export copy from sql too
2023-10-17 06:33:58 +00:00
Yun Chen
829db8c5c1 fix!: align frontend cmd name to rpc_* (#2609)
fix: align frontend cmd name to rpc_*
2023-10-17 06:18:18 +00:00
Ruihang Xia
9056c3a6aa feat: implement greptime cli export (#2535)
* feat: implement greptime cli export

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* read information schema

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* parse database name from cli params

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-10-17 01:56:52 +00:00
ZhangJian He
d9e7b898a3 feat: add walconfig dir back (#2606)
Signed-off-by: ZhangJian He <shoothzj@gmail.com>
2023-10-16 11:26:06 +00:00
zyy17
59d4081f7a ci: correct image name of dev build (#2603) 2023-10-16 03:54:44 +00:00
zyy17
6e87ac0a0e ci: refine release-cn-artifacts action (#2600)
* ci: add copy-image.sh and upload-artifacts-to-s3.sh

* ci: remove unused options in dev build

* ci: use 'upload-artifacts-to-s3.sh' and 'copy-image.sh' in release-cn-artifacts action

* refactor: refine copy-image.sh
2023-10-13 17:04:06 +08:00
shuiyisong
d89cfd0d4d fix: auth in standalone mode (#2591)
chore: user_provider in standalone mode
2023-10-13 08:37:58 +00:00
Yingwen
8a0054aa89 fix: make nyc-taxi bench work again (#2599)
* fix: invalid requests created by nyc-taxi

* feat: add timestamp to table name

* style: fix clippy

* chore: re-export deps for client

* fix: wait result

* chore: no need to define a prefix constant
2023-10-13 08:16:26 +00:00
Yun Chen
f859932745 fix: convert to ReadableSize & Durations (#2594)
* fix: convert to ReadableSize & Durations

* fix: change more grpc sender/recv message size to ReadableSize

fix: format

fix: cargo fmt

fix: change cmd test to use durations

fix: revert metaclient change

fix: convert default fields in meta client options

fix: human serde meta client durations

* fix: remove milisecond postfix in heartbeat option

* fix: humantime serde on heartbeat

* fix: update config example

* fix: update integration test config

* fix: address pr comments

* fix: fix pr comment on default annotation
2023-10-13 03:28:29 +00:00
Ruihang Xia
9a8fc08e6a docs(benchmark): update 0.4.0 tsbs result (#2597)
* docs(benchmark): update 0.4.0 tsbs result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-10-13 03:08:14 +00:00
Ruihang Xia
825e4beead build(ci): pin linux runner to ubuntu-20.04 (#2586)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-10-12 18:08:05 +08:00
zyy17
0a23b40321 ci: downgrade builder version: ubuntu 22.04 -> ubuntu 20.04 for compatible with older version glibc(>=2.31) (#2592) 2023-10-12 16:46:25 +08:00
Ruihang Xia
cf6ef0a30d chore(cli): deregister cli attach command (#2589)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-10-12 08:11:17 +00:00
dennis zhuang
65a659d136 fix: ensure data_home directory created (#2588)
fix: ensure data_home directory created before creating metadata store, #2587
2023-10-12 07:32:55 +00:00
Ruihang Xia
62bcb45787 feat!: change config name from kv_store to metadata_store (#2585)
featchange config name from kv_store to metadata_store

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-10-12 06:55:09 +00:00
zyy17
94f3542a4f ci: fix skopeo running errors (#2581)
ci: fix skopeo auth error
2023-10-12 06:13:56 +00:00
LFC
fc3bc5327d ci: release Windows artifacts (#2574)
* ci: release Windows artifacts

* ci: release Windows artifacts
2023-10-12 14:10:59 +08:00
Ning Sun
9e33ddceea ci: run windows tests every night instead of every commit (#2577)
ci: move windows ci to nightly-ci
2023-10-12 02:53:42 +00:00
zyy17
c9bdf4ff9f ci: refine the process of releasing dev-builder images (#2580)
* fix: fix error of releasing android builder image

* fix: run skopeo error

* ci: add 'release-dev-builder-images-cn' job

* ci: add 'disable_building_images'

* fix: add vars

* ci: use skopeo container

* ci: update opts defaule values
2023-10-12 02:41:54 +00:00
dennis zhuang
0a9972aa9a fix: cache capacity unit in sample config (#2575) 2023-10-11 11:02:39 +00:00
zyy17
76d5b710c8 ci: add more options for releasing dev-builder images (#2573) 2023-10-11 16:24:50 +08:00
zyy17
fe02366ce6 fix: remove unused options and add 'build-android-artifacts' (#2572) 2023-10-11 15:32:58 +08:00
zyy17
d7aeb369a6 refactor: add new action 'release-cn-artifacts' (#2554)
* refactor: add new action 'release-cn-artifacts'

* refactor: refine naming: 'release-artifacts' -> 'publish-github-release'

Signed-off-by: zyy17 <zyylsxm@gmail.com>

---------

Signed-off-by: zyy17 <zyylsxm@gmail.com>
2023-10-11 03:42:04 +00:00
zyy17
9284bb7a2b ci: seperate the job of building dev-builder images (#2569) 2023-10-11 11:09:53 +08:00
liyang
e23dd5a44f fix: fix to readme document link (#2566) 2023-10-11 02:45:43 +00:00
zyy17
c60b59adc8 chore: add the steps of building android binary (#2567) 2023-10-11 02:31:11 +00:00
Lei, HUANG
c9c2b3c91f fix: revert memtable pk rb cache to rwlock (#2565)
* fix: revert memtable pk rb cache to rwlock

* feat: refine
2023-10-10 20:51:05 +08:00
Yingwen
7f75190fce chore: update Cargo.lock (#2564) 2023-10-10 16:28:50 +08:00
Yingwen
0a394c73a2 chore: bump version to 0.4.0 (#2563) 2023-10-10 16:16:15 +08:00
JeremyHi
ae95f23e05 feat: add metrics for region server (#2552)
* feat: add metircs for region server

* fix: add comment and remove unused code
2023-10-10 07:40:16 +00:00
Lei, HUANG
6b39f5923d feat: add compaction metrics (#2560)
* feat: add compaction metrics

* feat: add compaction request total count

* fix: CR comments
2023-10-10 07:38:39 +00:00
JeremyHi
ed725d030f fix: support multi addrs while using etcd (#2562)
fix: support multi addrs while useing etcd
2023-10-10 07:30:48 +00:00
Wei
4fe7e162af fix: human_time mismatch (#2558)
* fix: human_time mismatch.

* fix: add comment
2023-10-10 07:22:12 +00:00
Yingwen
8a5ef826b9 fix(mito): Do not write to memtables if writing wal is failed (#2561)
* feat: add writes total metrics

* fix: don't write memtable if write ctx is failed

* feat: write rows metrics
2023-10-10 06:55:57 +00:00
Ruihang Xia
07be50403e feat: add basic metrics to query (#2559)
* add metrics to merge scan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* count series in promql

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* tweak label name

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* tweak label name

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* document metric label

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-10-10 06:55:25 +00:00
Lei, HUANG
8bdef9a348 feat: memtable filter push down (#2539)
* feat: memtable support filter pushdown to prune primary keys

* fix: switch to next time series when pk not selected

* fix: allow predicate evaluation failure

* fix: some clippy warnings

* fix: panic when no primary key in schema

* feat: cache decoded record batch for primary key

* refactor: use arcswap instead of rwlock

* fix: format toml
2023-10-10 04:03:10 +00:00
Yingwen
d4577e7372 feat(mito): add metrics to mito engine (#2556)
* feat: allow discarding a timer

* feat: flush metrics

* feat: flush bytes and region count metrics

* refactor: add as_str to get static string

* feat: add handle request elapsed metrics

* feat: add some write related metrics

* style: fix clippy
2023-10-10 03:53:17 +00:00
dennis zhuang
88f26673f0 fix: adds back http_timeout for frontend subcommand (#2555) 2023-10-10 03:05:16 +00:00
Baasit
19f300fc5a feat: renaming kv directory to metadata (#2549)
* fix: renamed kv directory to metadata directory

* fix: changed function name

* fix: changed function name
2023-10-09 11:43:17 +00:00
Weny Xu
cc83764331 fix: check table exists before allocating table id (#2546)
* fix: check table exists before allocating table_id

* chore: apply suggestions from CR
2023-10-09 11:40:10 +00:00
Yingwen
81aa7a4caf chore(mito): change default batch size/row group size (#2550) 2023-10-09 11:10:12 +00:00
Yingwen
d68dd1f3eb fix: schema validation is skipped once we need to fill a column (#2548)
* test: test different order

* test: add tests for missing and invalid columns

* fix: do not skip schema validation while missing columns

* chore: use field_columns()

* test: add tests for different column order
2023-10-09 09:20:51 +00:00
Lei, HUANG
9b3470b049 feat: android image builder dockerfile (#2541)
* feat: android image builder dockerfile

* feat: add building android dev-builder to ci config file

* fix: add build arg

* feat: use makefile to build image and add strip command
2023-10-09 09:10:14 +00:00
Weny Xu
8cc862ff8a refactor: refactor cache invalidator (#2540) 2023-10-09 08:19:18 +00:00
Weny Xu
81ccb58fb4 refactor!: compare with origin bytes during the transactions (#2538)
* refactor: compare with origin bytes during the transaction

* refactor: use serialize_str instead

* Update src/common/meta/src/key.rs

Co-authored-by: JeremyHi <jiachun_feng@proton.me>

* chore: apply suggestions from CR

---------

Co-authored-by: JeremyHi <jiachun_feng@proton.me>
2023-10-09 08:17:19 +00:00
Weny Xu
ce3c10a86e refactor: de/encode protobuf-encoded byte array with base64 (#2545) 2023-10-09 05:31:44 +00:00
shuiyisong
007f7ba03c refactor: extract plugins crate (#2487)
* chore: move frontend plugins fn

* chore: move datanode plugins to fn

* chore: add opt plugins

* chore: add plugins to meta-srv

* chore: setup meta plugins, wait for router extension

* chore: try use configurator for grpc too

* chore: minor fix fmt

* chore: minor fix fmt

* chore: add start meta_srv for hook

* chore: merge develop

* chore: minor fix

* chore: replace Arc<Plugins> with PluginsRef

* chore: fix header

* chore: remove empty file

* chore: modify comments

* chore: remove PluginsRef type alias

* chore: remove `OptPlugins`
2023-10-09 04:54:27 +00:00
Weny Xu
dfe68a7e0b refactor: check push result out of loop (#2511)
* refactor: check push result out of loop

* chore: apply suggestions from CR
2023-10-09 02:49:48 +00:00
Ruihang Xia
d5e4fcaaff feat: dist plan optimize part 2 (#2543)
* allow udf and scalar fn

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* put CountWildcardRule before dist planner

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* bump datafusion to fix first_value/last_value

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use retain instead

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-10-09 02:18:36 +00:00
Yingwen
17b385a985 fix: compiler errors under pprof and mem-prof features (#2537)
* fix: compiler errors under pprof feature

* fix: compiler errors under mem-prof feature
2023-10-08 08:28:45 +00:00
shuiyisong
067917845f fix: carry dbname from frontend to datanode (#2520)
* chore: add dbname in region request header for tracking purpose

* chore: fix handle read

* chore: add write meter

* chore: add meter-core to dep

* chore: add converter between RegionRequestHeader and QueryContext & update proto version
2023-10-08 06:30:23 +00:00
Weny Xu
a680133acc feat: enable no delay for mysql, opentsdb, http (#2530)
* refactor: enable no delay for mysql, opentsdb, http

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-10-08 06:19:52 +00:00
Yingwen
0593c3bde3 fix(mito): pruning for mito2 (#2525)
* fix: pruning for mito2

* chore: refactor projection parameters; add some tests; customize row group size for each flush task.

* chore: pass whole RegionFlushRequest

---------

Co-authored-by: Lei, HUANG <mrsatangel@gmail.com>
2023-10-08 03:45:15 +00:00
Lei, HUANG
0292445476 fix: timestamp range filter (#2533)
* fix: timestamp range filter

* fix: rebase develop

* fix: some style issues
2023-10-08 03:29:02 +00:00
dennis zhuang
ff15bc41d6 feat: improve object storage cache (#2522)
* feat: refactor object storage cache with moka

* chore: minor fixes

* fix: concurrent issues and invalidate cache after write/delete

* chore: minor changes

* fix: cargo lock

* refactor: rename

* chore: change DEFAULT_OBJECT_STORE_CACHE_SIZE to 256Mib

* fix: typo

* chore: style

* fix: toml format

* chore: toml

* fix: toml format

* Update src/object-store/src/layers/lru_cache/read_cache.rs

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* chore: update Cargo.toml

Co-authored-by: Yingwen <realevenyag@gmail.com>

* chore: update src/object-store/Cargo.toml

Co-authored-by: Yingwen <realevenyag@gmail.com>

* chore: refactor and apply suggestions

* fix: typo

* feat: adds back allow list for caching

* chore: cr suggestion

Co-authored-by: Yingwen <realevenyag@gmail.com>

* chore: cr suggestion

Co-authored-by: Yingwen <realevenyag@gmail.com>

* refactor: wrap inner Accessor with Arc

* chore: remove run_pending_task in read and write path

* chore: the arc is unnecessary

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-10-08 03:27:49 +00:00
Yingwen
657542c0b8 feat(mito): Cache repeated vector for tags (#2523)
* feat: add vector_cache to CacheManager

* feat: cache repeated vectors

* feat: skip decoding pk if output doesn't contain tags

* test: add TestRegionMetadataBuilder

* test: test ProjectionMapper

* test: test vector cache

* test: test projection mapper convert

* style: fix clippy

* feat: do not cache vector if it is too large

* docs: update comment
2023-10-07 11:36:00 +00:00
Ning Sun
0ad3fb6040 fix: mysql timezone settings (#2534)
* fix: restore time zone settings for mysql

* test: add integration test for time zone

* test: fix unit test for check
2023-10-07 10:21:32 +00:00
Bamboo1
b44e39f897 feat: the schema of RegionMetadata is not output during debug (#2498)
* feat: the schema of RegionMetadata is not output during debug because column_metadatas contains duplicate information

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* feat: the id_to_index of RegionMetadata is not output during debug

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* feat: add debug trait

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* feat: use default debug in ConcreteDataType

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* chore: add std::fmt

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* test: add debug trait test

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* chore: typo

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* chore: resolve conversation

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* chore: format

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* fix: test bug

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

---------

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>
2023-10-07 08:01:54 +00:00
Weny Xu
f50f2a84a9 fix: open region missing options (#2473)
* fix: open region missing options

* refactor: remove redundant clone

* chore: apply suggestions from CR

* chore: apply suggestions

* chore: apply suggestions

* test: add test for initialize_region_server

* feat: introduce RegionInfo
2023-10-07 07:17:16 +00:00
Yingwen
fe783c7c1f perf(mito): Use a heap to merge batches for the same key (#2521)
* feat: merge by heap

* fix: fix heap order

* feat: avoid pop/push next and refactor some functions

* feat: replace merge_batches and fixe tests

* test: add test that a key is deleted

* fix: skip empty batch

* style: clippy

* chore: fix typos
2023-10-07 02:56:08 +00:00
Weny Xu
00fe7d104e feat: enable tcp no_delay by default for internal services (#2527) 2023-10-07 02:35:28 +00:00
Zhenchi
201acd152d fix: missing file engine with default options (#2519)
Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-09-28 10:25:12 +00:00
Niwaka
04dbd835a1 feat: support greatest function (#2490)
* feat: support greatest function

* feat: make greatest take date_type as input

* fix: move sqlness test into common/function/time.sql

* fix: avoid using unwarp

* fix: use downcast

* refactor: simplify arrow cast
2023-09-28 10:25:09 +00:00
Wenjie0329
e3d333258b docs: add event banner (#2518) 2023-09-28 08:08:43 +00:00
Ruihang Xia
10ecc30817 feat: pushdown aggr, limit and sort plan (#2495)
* check partition for aggr plan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* handle empty partition rule

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove CheckPartition option

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update some valid sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* opt-out promql plan and update sqlness

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix limit

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix insert select subquery

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update unit test result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/query/src/dist_plan/analyzer.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-09-28 06:35:45 +00:00
JeremyHi
52ac093110 fix: drop table 0 rows affected (#2515) 2023-09-28 06:21:18 +00:00
Zhenchi
1f1d72bdb8 feat: defensively specify limit parameter for file stream (#2517)
Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-09-28 06:14:27 +00:00
Zhenchi
7edafc3407 feat: push down filters to region engine (#2513)
feat: pushdown filters to region engine

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-09-27 13:50:44 +00:00
LFC
ccd6de8d6b fix: allow .(dot) literal in table name (#2483)
* fix: allow `.`(dot) literal in table name

* fix: resolve PR comments
2023-09-27 11:50:07 +00:00
shuiyisong
ee8d472aae chore: tune return msg (#2506)
* chore: test return msg

* fix: test_child_error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore: fix test

* chore: minor fix grpc return value

* chore: format return msg

* chore: use root error as return value

* chore: fix empty err display

* chore: iter through external error

* chore: remove err msg

* chore: remove unused field

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-27 10:40:25 +00:00
Weny Xu
9282e59a3b fix: re-create heartbeat stream ASAP (#2499)
* chore: set default connect_timeout_millis to 1000

* fix: re-create heartbeat stream ASAP

* chore: apply suggestions
2023-09-27 04:00:16 +00:00
Ruihang Xia
fbe2f2df46 refactor: simplify warn! and error! macros (#2503)
* refactor: simplify the error! and warn! macros

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* support display format

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* err.msg to err

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-27 03:07:03 +00:00
Yingwen
db6ceda5f0 fix(mito): fix region drop task runs multiple times but never clean the dir (#2504)
fix: fix region drop task runs multiple times but never clean the directory
2023-09-27 02:58:17 +00:00
Ruihang Xia
e352fb4495 fix: check for table scan before expanding (#2491)
* fix: check for table scan before expanding

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* change assert_ok to unwrap

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy warning

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* don't skip dml

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* uncomment ignored tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-26 12:12:08 +00:00
Yingwen
a6116bb866 feat(mito): Add cache manager (#2488)
* feat: add cache manager

* feat: add cache to reader builder

* feat: add AsyncFileReaderCache

* feat: Impl AsyncFileReaderCache

* chore: move moka dep to workspace

* feat: add moka cache to the manager

* feat: implement parquet meta cache

* test: test cache manager

* feat: consider vec size

* style: fix clippy

* test: fix config api test

* feat: divide cache

* test: test disabling meta cache

* test: fix config api test

* feat: remove meta cache if file is purged
2023-09-26 11:46:19 +00:00
Ruihang Xia
515ce825bd feat: stack trace style debug print for error (#2489)
* impl macro stack_trace_debug

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* manually mark external error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* ignore warnings

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy warnings

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use debug print

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* simplify the error and warn macro

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix ut

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add docs

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* replace snafu backtrace with location

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-26 11:23:21 +00:00
Vanish
7fc9604735 feat: distribute truncate table in region server (#2414)
* feat: distribute truncate table

* chore: add metrics for truncate table

* test: add sqlness test

* chore: cr

* test: add multi truncate

* chore: add trace id to the header
2023-09-26 11:14:14 +00:00
Zhenchi
a4282415f7 fix: convert datetime to chrono datetime (#2497)
* fix: convert datetime to chrono datetime

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: typo

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix the bad fix

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-09-26 09:04:12 +00:00
Zhenchi
0bf26642a4 feat: re-support query engine execute dml (#2484)
* feat: re-support query engine execute dml

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: remove region_number in InsertRequest

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: add doc comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-09-26 08:37:04 +00:00
Weny Xu
230a3026ad fix: dn doesn't have chance to send a heartbeat to the new leader (#2471)
* refactor: set meta leader lease secs to 3s

* fix: correct default heartbeat interval

* refactor: ask meta leader in parallel

* feat: configure heartbeat client timeout to 500ms

* fix: trigger to send heartbeat immediately after fail

* fix: fix clippy
2023-09-26 05:05:38 +00:00
Wei
54e506a494 refactor: datetime time unit (#2469)
* refactor: datetime time unit

* Update src/common/time/src/datetime.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* chore: cr.

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-09-25 10:00:56 +00:00
Yingwen
7ecfaa240f refactor(mito): remove #[allow(dead_code)] (#2479) 2023-09-25 09:20:00 +00:00
LFC
c0f080df26 fix: print root cause error message to user facing interface (#2486) 2023-09-25 08:44:49 +00:00
Niwaka
f9351e4fb5 chore: add integration test for issue2437 (#2481) 2023-09-25 06:23:16 +00:00
zyy17
00272d53cc chore: fix typo (#2477) 2023-09-24 06:47:14 +00:00
JeremyHi
7310ec0bb3 chore: refactor options (#2476) 2023-09-24 02:12:33 +00:00
Yingwen
73842f10e7 fix(mito): normalize region dir in RegionOpener (#2475)
fix: normalize region dir in RegionOpener
2023-09-23 10:06:00 +00:00
Yingwen
32d1d68441 fix(mito): reset is_sorted to true after the merger finishing one series (#2474)
fix: reset is_sorted flag to true after the merger finishing one series
2023-09-23 10:05:34 +00:00
Ning Sun
ffa729cdf5 feat: implement storage for OTLP histogram (#2282)
* feat: implement new histogram data model

* feat:  use prometheus table format for histogram

* refactor: remove duplicated code

* fix: histogram tag column

* fix: use accumulated count in buckets

* refactor: using row based protocol for otlp WIP

* refactor: use row based writer for otlp.

Also updated row writer for owned keys

* refactor: use row writers for otlp

* test: add integration tests for histogram

* refactor: change le column name
2023-09-23 07:59:14 +00:00
JeremyHi
9d0de25bff chore: typo (#2470) 2023-09-22 09:47:34 +00:00
Wei
aef9e7bfc3 refactor: not allowed int64 type as time index (#2460)
* refactor: remove is_timestamp_compatible.

* chore: fmt

* refactor: remove int64 to timestamp match

* chore

* chore: apply suggestions from code review

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* chore: fmt

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-09-22 06:28:02 +00:00
Yingwen
c6e95ffe63 fix(mito): compaction scheduler schedules more tasks than expected (#2466)
* test: test on_compaction_finished

* fix: avoid submit same region to compact

* feat: persist and recover compaction time window

* test: fix test

* test: sort like result
2023-09-22 06:13:12 +00:00
Yingwen
c9f8b9c7c3 feat: update proto and remove create_if_not_exists (#2467) 2023-09-22 03:24:49 +00:00
Baasit
688e64632d feat: support for show full tables (#2410)
* feat: added show tables command

* fix(tests): fixed parser and statement unit tests

* chore: implemeted display trait for table type

* fix: handled no tabletype and error for usopprted command in show databse

* chore: removed full as a show kind, instead as a show option

* chore(tests): fixed failing test and added more tests for show full

* chore: refactored table types to use filters

* fix: changed table_type to tables
2023-09-22 02:34:57 +00:00
JeremyHi
8e5eaf5472 chore: remove unused region_stats method form table (#2458)
chore: remove unused region_status method form table
2023-09-22 02:27:29 +00:00
LinFeng
621c6f371b feat: limit grpc message size (#2459)
* feat: add two grpc config options

Those options are for:
* Limit receiving(decoding) message size
* Limit sending(enoding) message size

* test: add integration tests for message size limit
2023-09-22 02:07:46 +00:00
Ruihang Xia
4c7ad44605 refactor: remove SqlStatementExecutor (#2464)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-22 01:57:48 +00:00
Weny Xu
6306aeabf0 chore: bump opendal to 0.40 (#2465) 2023-09-21 14:25:23 +00:00
JeremyHi
40781ec754 fix: test on windows (#2462)
* fix: test on windows

* fix: fix windows root

* fix: use relative path instead of root

* fix: remove incorrect replace

* fix: fix tests

---------

Co-authored-by: WenyXu <wenymedia@gmail.com>
2023-09-21 10:57:56 +00:00
zyy17
c7b490e1a0 ci: expand upload retry timeout (#2461) 2023-09-21 10:02:15 +00:00
Ruihang Xia
e3f53a8060 fix: add slash after generated region_dir (#2463)
* fix: add slash after generated region_dir

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update ut

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-21 07:46:05 +00:00
Weny Xu
580d11b1e1 refactor: unify table metadata cache invalidator (#2449)
* refactor: unify table metadata cache invalidator

* chore: apply from suggestions
2023-09-21 03:45:49 +00:00
shuiyisong
20f4f7971a refactor: remove source and location in snafu display (#2428)
* refactor: remove source pt 1

* refactor: remove source pt 2

* refactor: remove source pt 3

* refactor: remove location pt 1

* refactor: remove location pt 2

* chore: remove rustc files

* chore: fix error case

* chore: fix test

* chore: fix test

* chore: fix cr issue

Co-authored-by: fys <40801205+fengys1996@users.noreply.github.com>

---------

Co-authored-by: fys <40801205+fengys1996@users.noreply.github.com>
2023-09-21 02:55:24 +00:00
dennis zhuang
9863e501f1 test: revert ignored tests (#2455) 2023-09-21 02:33:18 +00:00
zyy17
df0877111e ci: make upload-to-s3 configurable(for now, it's false) (#2456) 2023-09-20 14:12:54 +00:00
dennis zhuang
23cc7d82e5 feat: supports binary data type (#2454) 2023-09-20 12:53:19 +00:00
Ruihang Xia
34d6288945 feat: bring back sqlness and integration tests (#2448)
* enable integration test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* disable sqlness region failover

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* enable sqlness in CI

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* sort unstable result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* set require_lease_before_startup to true

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: fix inconsistent cache

* replace windows path chars

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* ignore some integration cases in windows

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Revert "ignore some integration cases in windows"

This reverts commit 122478b7c1.

* disable windows for now

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: fix close region bug in RegionHeartbeatResponseHandler

* disable failover tests

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: WenyXu <wenymedia@gmail.com>
2023-09-20 09:17:30 +00:00
JeremyHi
567fbad647 chore: type alias typo (#2452)
chore: typo
2023-09-20 07:53:35 +00:00
Ruihang Xia
a5c499572c feat: open region in parallel (#2451)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-20 07:40:17 +00:00
JeremyHi
ca50ba5dc4 fix: remark region as inactive on leader changed (#2446)
* fix: remark reigon as inactive on leader changed

* chore: by comment
2023-09-20 06:37:27 +00:00
Yingwen
17e560c909 feat(mito): Allow to retry create request and alter request (#2447)
* feat: RegionMetadataBuilder allow adding/dropping columns multiple times

* test: test add if not exists/drop if exists

* feat: change validator and add need_alter

* test: fix tests and test need_alter

* test: test alter retry

* feat: open before create

* style: fix clippy
2023-09-20 06:36:46 +00:00
Weny Xu
339e12c64a fix: fix alter table verification (#2437)
* fix: fix verify alter

* refactor: move AlterTable UpdateMetadata to last step

* refactor: send region request in parallel

* Update src/table/src/metadata.rs

Co-authored-by: LFC <990479+MichaelScofield@users.noreply.github.com>

* Update src/table/src/metadata.rs

Co-authored-by: JeremyHi <jiachun_feng@proton.me>

---------

Co-authored-by: LFC <990479+MichaelScofield@users.noreply.github.com>
Co-authored-by: JeremyHi <jiachun_feng@proton.me>
2023-09-19 13:40:48 +00:00
Ruihang Xia
0f79ccab31 refactor: remove the old mito engine (#2443)
Co-authored-by: Even Yag <realevenyag@gmail.com>
2023-09-19 09:30:13 +00:00
Yingwen
7b606ed289 feat(mito): make use of options in RegionCreate/OpenRequest (#2436)
* refactor: move RegionOptions to options mod

* refactor: define compaction strategy in region/options.rs

* feat: use duration for time window

* refactor: rename CompactionStrategy to CompactionOptions

* feat: use serde to parse options

* feat: parse options

* feat: set options on creation/opening

* test: test create/open with options

* chore: remove todo

* feat: get compaction ttl and options from RegionOptions

* style: fix clippy

* chore: Remove unused engine_options

* style: fix clippy

* chore: remove todo
2023-09-19 09:06:09 +00:00
Weny Xu
1fb2d95c5f fix: fix open region missing path (#2441)
* fix: fix open region missing path

* fix: correct log

* chore: apply suggestions from CR

* fix: fix tests
2023-09-19 08:50:59 +00:00
Wei
8ee62a7d90 fix: parse i64 to different kinds of timestamp (#2440)
* feat: support i64 to multi-timestamp.

* chore: fmt
2023-09-19 08:26:25 +00:00
Ruihang Xia
802229de87 fix: type cast bugs found by sqlness (#2438)
* update valid results

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* accomplish datatype

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* cast null

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix unit tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-19 08:20:41 +00:00
Zhenchi
deac284973 refactor: RegionRequestHandler -> RegionQueryHandler (#2439)
* refactor: RegionRequestHandler -> RegionQueryHandler

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor: rename FrontendRegionQueryHandler

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: privte RegionInvoker

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-09-19 08:19:58 +00:00
Wei
5805e8d4b6 feat: type conversion between Values (#2394)
* feat: add cast() in datatype trait.

* feat: add cast for primitive type

* feat: add unit test cases

* test: add datetime/time cases.

* refactor: time_type cast function.

* chore: typos.

* refactor code.

* feat: add can_cast_type func.

* chore: rename cast to try_cast

* feat: impl cast_with_opt

* chore: pub use cast_with_opt

* chore: add timezone for test

* Update src/common/time/src/date.rs

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* chore: duration type

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-09-18 14:25:38 +00:00
dennis zhuang
342cc0a4c4 fix: compile error after updating protos (#2435) 2023-09-18 12:12:39 +00:00
Weny Xu
df6c79a378 fix: check version before alter region (#2433)
* fix: check version before alter region

* chore: apply suggestions from CR

* Update src/mito2/src/worker/handle_alter.rs

Co-authored-by: dennis zhuang <killme2008@gmail.com>

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-09-18 11:49:26 +00:00
dennis zhuang
5566f34bd1 feat: make scripts table work again (#2420)
* feat: make scripts table work again

* chore: typo

* fix: license header

* Update src/table/src/metadata.rs

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* chore: cr comments

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-18 11:43:21 +00:00
Wei
14e6998d41 feat: impl duration datatype and vectors (#2180)
* feat: impl datatype, vector traits for duration.

* feat: duration and grpc.

* test: add unit test cases.

* chore: style and test case.

* fix: update greptime-proto version and helper.rs

* chore: fix type name.

* Update src/datatypes/src/data_type.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* chore: cr.

* chore: fix greptime-proto

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-09-18 11:28:06 +00:00
Weny Xu
43476e1ff9 refactor: rename coordination to require_lease_before_startup (#2431) 2023-09-18 11:07:42 +00:00
Weny Xu
c42cce57ca fix: fix incorrect matches (#2430)
* fix: fix incorrect matches

* fix: fix incorrect status code
2023-09-18 10:53:32 +00:00
Zhenchi
d6d46378a1 test: fix some integration tests (#2432)
* test: fix some integration tests

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* test: add timezone setting

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-09-18 10:52:14 +00:00
Ruihang Xia
fbbf3978d9 fix: render comment in SHOW CREATE TABLE (#2427)
* feat: add comment field to ColumnDef

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix sqlness case

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-18 10:51:10 +00:00
dennis zhuang
b0c56a3e23 feat: type alias (#2331)
* fix: remove location from error msg

* feat: adds transformer for sqlparser statements

* feat: supports type alias

* fix: typo

* fix: license header

* test: adds timestamp_types test

* refactor: transform

* fix: rebase develop and fix tests

* fix: compile error

* chore: delete src/datanode/src/sql/create_external.rs
2023-09-18 09:43:02 +00:00
Zhenchi
73af1368bd test: more integration test cases for external table (#2429)
Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-09-18 09:21:51 +00:00
Weny Xu
2c3ff90dbc feat: start services after first heartbeat response processed (#2424)
* feat: start services after first heartbeat response processed

* refactor: watch changes in RegionAliveKeeper

* feat: add coordination to DatanodeOptions

* chore: apply suggestions from CR

* chore: apply suggestions from CR

* chore: enable coordination in sqlness
2023-09-18 08:49:26 +00:00
Zhenchi
3a39215f11 feat: migrate file engine from table to reigon (#2365)
* feat: migrate file engine from table to reigon

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* Update src/file-engine/src/engine.rs

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* feat: specify ts index for file engine

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat: handle time index for external table

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: some integration testsg

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat: add file schema and table schema compatibility

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat: compatible file schema to region schema

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat: add error msg

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: simplify close

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: implement set_writable

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: tests-integration compilation

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: zhongzc <zhongzc@zhongzcs-MacBook-Pro.local>
2023-09-18 08:02:43 +00:00
JeremyHi
e7e254cd11 feat: all distributed time together (#2423) 2023-09-17 15:18:52 +00:00
Yingwen
4a82926d72 docs: fix cargo doc errors and warnings (#2421)
* docs: fix cargo doc warnings and errors

* docs: fix warnings

* docs: fix warnings

* chore: rm src/common/function-macro/src/lib.rs
2023-09-17 11:45:15 +00:00
Ruihang Xia
92824d1c66 fix: update several sqlness results (#2422)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-17 11:33:40 +00:00
Yingwen
55ae5e5b66 feat(mito): Implements compaction scheduler (#2413)
* feat: allow multiple waiters in compaction request

* feat: compaction status wip

* feat: track region status in compaction scheduler

* feat: impl compaction scheduler

* feat: call compaction scheduler

* feat: remove status if nothing to compact

* feat: schedule compaction after flush

* feat: set compacting to false after compaction finished

* refactor: flush status only needs region id and version control

* refactor: schedule_compaction don't need region as argument

* test: test flush/scheduler for empty requests

* test: trigger compaction in test

* feat: notify scheduler on truncated

* chore: Apply suggestions from code review

Co-authored-by: JeremyHi <jiachun_feng@proton.me>

---------

Co-authored-by: JeremyHi <jiachun_feng@proton.me>
2023-09-17 09:15:11 +00:00
Ruihang Xia
693e8de83a feat: scope spawned task with trace id (#2419)
* feat: scope spawned task with trace id

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-17 09:05:28 +00:00
JeremyHi
542e863ecc fix: missing datanode id on keep lease (#2415) 2023-09-17 07:57:17 +00:00
Ruihang Xia
49310acea1 refactor: rename common-function-macro subcrate (#2418)
* rename common-function-macro to common-macro

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* put impl into their own file

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-17 07:56:41 +00:00
Weny Xu
5b08e03944 feat: sync regions between RegionServer and RegionAliveKeeper (#2417)
* feat: sync regions between RegionServer and RegionAliveKeeper

* Apply suggestions from code review

Co-authored-by: JeremyHi <jiachun_feng@proton.me>

* refactor: rename event name

* chore: apply suggestions

---------

Co-authored-by: JeremyHi <jiachun_feng@proton.me>
2023-09-17 07:55:44 +00:00
JeremyHi
98a40bae95 feat!: unify naming with options (#2416) 2023-09-17 07:24:57 +00:00
JeremyHi
342a6d071f feat: heartbeat request with header (#2412)
* feat: heartbeat request with header

* chore: frontend send heartbeat with a longer interval
2023-09-16 09:56:41 +00:00
Baasit
0a692aafb0 feat: clap wrapper around sqlness (#2400)
* feat: wrapped sqlness with clap to provide nice interface

* fix: added spaces and changed -f flag to bool
2023-09-16 08:53:08 +00:00
dennis zhuang
627c5b7419 feat: move table operations from frontend to operator crate (#2411)
* feat: move table operations from frontend to operator crate

* chore: blank line

* fix: toml format

* chore: move constants
2023-09-16 07:58:45 +00:00
Ruihang Xia
5e35087b67 fix: generate region path with given prefix (#2409)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-16 03:23:36 +00:00
Ruihang Xia
c149c123c3 feat: reopen corresponding regions on starting datanode (#2399)
* separate config and datanode impl

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* partial implement of fetching region id list

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* reopen all regions on starting region server

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness & assign default datanode id

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* set writable on lease

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* apply cr suggs.

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/datanode/src/datanode.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-09-15 13:30:20 +00:00
Vanish
0bd6b9bb39 feat: implement truncate region for mito2 (#2335)
* feat: implement truncate region for mito2.

* chore: add license header and fix typos

* Update src/mito2/src/worker/handle_truncate.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* cr

* chore: consider the flush task being executed before truncating the region.

* test

* feat: check flush and compaction tasks

* chore: remove useless changes

* Update src/mito2/src/manifest/action.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* Update src/mito2/src/worker/handle_flush.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* chore: CR, consider sequence number

* test: use EventListener to test the flush task during truncate

* fix: fix listener error

* Update src/mito2/src/engine/truncate_test.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* chore: cr

* fix: remove set None

* Update src/mito2/src/region/version.rs

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* Update src/mito2/src/worker/handle_flush.rs

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* Update src/mito2/src/worker/handle_truncate.rs

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* doc: add some doc for FlushTruncateListener and RegionTruncate

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-09-15 13:20:01 +00:00
Bamboo1
6aec30a1a8 feat: reserve internal column (#2396)
* feat: reserve internal column

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* test: add function test

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* fix: spell typos

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* chore: resolve conversation

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

---------

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>
2023-09-15 11:19:40 +00:00
LFC
a688760563 fix: validate partition columns (#2393)
fix: partition column must belong to primary keys or equals to time index
2023-09-15 10:07:32 +00:00
LFC
4b13c88752 fix: resolve more integration tests (#2406)
* fix: resolve more integration tests

* Update tests-integration/tests/http.rs

Co-authored-by: Weny Xu <wenymedia@gmail.com>

---------

Co-authored-by: Weny Xu <wenymedia@gmail.com>
2023-09-15 09:43:16 +00:00
JeremyHi
9572b1edbb feat: region storage path (#2404)
* feat: region storage path

* Update src/common/meta/src/key/datanode_table.rs

Co-authored-by: Weny Xu <wenymedia@gmail.com>

* chore: by cr

* feat: upgrade proto

---------

Co-authored-by: Weny Xu <wenymedia@gmail.com>
2023-09-15 09:07:54 +00:00
dennis zhuang
43e3c94fd1 refactor: catalog managers (#2405)
* feat: rename catalog::local to catalog::memory

* refactor: catalog managers

* chore: license header
2023-09-15 08:48:14 +00:00
Ruihang Xia
364b99a14c fix: enable ignored promql unit tests (#2403)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-15 07:37:30 +00:00
shuiyisong
a8ae386a57 chore: add #[serde(default)] to new added engine field (#2402)
chore: add serde default to new field
2023-09-15 07:11:57 +00:00
LFC
fe5679e77e refactor: remove table ident (#2368)
* refactor:
1. remove TableIdent, use TableId directly
2. use the latest greptime-proto
3. independently invalidate table id cache and table name cache

* rebase

* fix: resolve PR comments

* fix: resolve PR comments
2023-09-15 05:14:40 +00:00
JeremyHi
8e70b9e982 feat: remove deprecated metadata keys (#2398)
* feat: remove deprecated metadata keys

* feat: this time, weny indeed said [removes it]
2023-09-15 02:11:21 +00:00
JeremyHi
d1adb915bf feat: set readonly first when deregister region (#2391)
* feat: set readonly first when deregister region

* revert distxxx
2023-09-14 12:12:38 +00:00
Yingwen
a84a8ad04f fix: alter table procedure panics while renaming table (#2397)
* fix: procedure panic on renaming table

* test: fix test_insert_and_select invalid arguments

* test: fix test_standalone_insert_and_query using wrong semantic type

* test: fix test_distributed_insert_delete_and_query semantic type
2023-09-14 11:50:00 +00:00
JeremyHi
7bb8a5999c feat!: add engine name to DatanodeTableValue (#2395)
* feat: add engine name to DatanodeTableValue

* fix: by cr
2023-09-14 09:50:35 +00:00
Yingwen
26992d58cd chore: decrease mutable write buffer limit (#2390)
* chore: set mutable limit to half of the global write buffer size

* refactor: put handle_flush_finished after handle_flush_request

* refactor: rename tests.rs to basic_test.rs

* style: fmt code
2023-09-14 08:24:14 +00:00
Ruihang Xia
47bf300869 fix: update sqlness result for order_by (#2389)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-14 07:28:40 +00:00
Yingwen
a7df5a7c9a fix(mito): incorrect field index in ProjectionMapper (#2388)
* chore: update todo comments

* test: add test for projection

* fix: panics when projecting fields

* chore: remove todos
2023-09-14 04:15:15 +00:00
Yingwen
d4ae8a6fed feat(mito): Add writable flag to region (#2349)
* feat: add writable flag to region.

* refactor: rename MitoEngine to MitoEngine::scanner

* feat: add set_writable() to RegionEngine

* feat: check whether region is writable

* feat: make set_writable sync

* test: test set_writable

* docs: update comments

* feat: send result on compaction failure

* refactor: wrap output sender in new type

* feat: on failure

* refactor: use get_region_or/writable_region_or

* refactor: remove send_result

* feat: notify waiters on flush scheduler drop

* test: fix tests

* fix: only alter writable region
2023-09-14 02:45:30 +00:00
Yingwen
da54a0c139 fix: alter table procedure forgets to update next column id (#2385)
* feat: add more info to error messages

* feat: store next column id in procedure

* fix: update next column id for table info

* test: fix add col test

* chore: remove location from invalid request error

* test: update test

* test: fix test
2023-09-14 02:06:57 +00:00
Ruihang Xia
cc7eb3d317 fix: querying temporary table (#2387)
* fix information schema

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove log

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-13 14:30:56 +00:00
Weny Xu
93f3048f4f refactor: migrate OpenDal to 0.39 (#2383)
* chore: bump opendal to 7d552

* refactor: migrate OpenDal to 0.39

* chore: apply suggestions from CR
2023-09-13 12:43:53 +00:00
LFC
d08b05c963 fix: make test-integration able to compile (#2384)
* fix: make test-integration able to compile

* chore: fmt toml

---------

Co-authored-by: WenyXu <wenymedia@gmail.com>
2023-09-13 12:42:46 +00:00
JeremyHi
f76aa278fd feat: atomic metadata (#2366)
* feat: atomic creating metadata

* chore: exist exists

* chore: license header

* chore: weny never say that

* feat: add put_conditionally to kv_backend
2023-09-13 10:51:05 +00:00
JeremyHi
6f4779b474 feat: engine name in heartbeat (#2377) 2023-09-13 09:10:10 +00:00
Ruihang Xia
de723d9c1c fix: update sqlness result in distributed mode (#2381)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-13 09:07:55 +00:00
dennis zhuang
7448e975c2 chore: change error messages (#2379)
* chore: change error messages

* chore: remove locaton in table not found error msg
2023-09-13 08:21:03 +00:00
dennis zhuang
3f97a0d285 fix: gRPC max mesage size limitation (#2375)
* fix: gRPC max mesage size limitation

* chore: don't set max_encoding_message_size
2023-09-13 08:13:49 +00:00
Ruihang Xia
60bdf9685f feat: use the latest command line options for sqlness runner (#2371)
feat: use the latest command line options

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-13 03:38:43 +00:00
Ruihang Xia
9c76d2cf54 feat: convert sql number to values with target type (#2370)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-13 11:14:42 +08:00
Weny Xu
1a7268186b chore: bump raft-engine to 22dfb4 (#2360) 2023-09-12 07:57:15 -05:00
Ruihang Xia
eeecce4623 refactor: remove table procedure (#2359)
remove table procedure

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-12 07:57:15 -05:00
Ruihang Xia
1ad5f6e5d5 refactor: system tables in FrontendCatalogManager (#2358)
* rename method names

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove system table, table engine, register/deregister

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add system catalog

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* run nextest

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* some documents

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: fix clippy

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: WenyXu <wenymedia@gmail.com>
2023-09-12 07:57:15 -05:00
Yingwen
46eca5026e fix(mito): Stores and recovers flushed sequence (#2355)
* test: add test for reopen

* feat: last entry id starts from flushed entry id

* fix: store flushed sequence and recover it from manifest

* test: check sequence in alter test

* test: more tests for alter
2023-09-12 07:57:15 -05:00
Weny Xu
912341e4fa fix: fix start issues under standalone mode (#2352)
* fix: fix standalone starts

* chore: bump raft-engine to 571462e

* refactor: remove MetadataService
2023-09-12 07:57:15 -05:00
JeremyHi
80c5d52015 feat: stop region server (#2356)
* feat: stop region server

* fix: close region first
2023-09-12 07:57:15 -05:00
Zhenchi
4af126eb1b feat: consolidate Insert request related partitioning and distributed processing operations into Inserter (#2346)
* refactor: RegionRequest as param of RegionRequestHandler.handle

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat: partition insert & delete reqs for both standalone and distributed mode

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: nit change

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: wrong function nameg

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat: do request in inserter & deleter

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat: remove RegionRequestHandler.handle

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor: rename table_creator

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: nit change

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor: address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: nit change

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-09-12 07:57:15 -05:00
LFC
fe954b78a2 refactor: system tables in new region server (#2344)
refactor: inverse the dependency between system tables and catalog manager
2023-09-12 07:57:15 -05:00
JeremyHi
3cab6de391 feat: filter out empty heartbeat req (#2345)
* feat: filter out empty heartbeat request

* fix: big mistake
2023-09-12 07:57:15 -05:00
Yingwen
606ee43f1d feat(mito): Implement skeleton for alteration (#2343)
* feat: impl handle_alter wip

* refactor: move send_result to worker.rs

* feat: skeleton for handle_alter_request

* feat: write requests should wait for alteration

* feat: define alter request

* chore: no warnings

* fix: remove memtables after flush

* chore: update comments and impl add_write_request_to_pending

* feat: add schema version to RegionMetadata

* feat: impl alter_schema/can_alter_directly

* chore: use send_result

* test: pull next_batch again

* feat: convert pb AlterRequest to RegionAlterRequest

* feat: validate alter request

* feat: validate request and alter metadata

* feat: allow none location

* test: test alter

* fix: recover files and flushed entry id from manifest

* test: test alter

* chore: change comments and variables

* chore: fix compiler errors

* feat: add is_empty() to MemtableVersion

* test: fix metadata alter test

* fix: Compaction picker doesn't notify waiters if it returns None

* chore: address CR comments

* test: add tests for alter request

* refactor: use send_result
2023-09-12 07:57:15 -05:00
Lei, HUANG
3331e3158c feat(mito2): compaction (#2317)
* feat: compaction component

* feat: mito2 compaction

* Avoid building time range predicates when merge SST files since in TWCS we don't enforce strict time window.

* fix: some CR comments

* minor: change CompactionRequest::senders to an option

* chore: handle compaction finish error

* feat: integrate compaction into region worker

* chore: rebase upstream

* fix: Some CR comments

* chore: Apply suggestions from code review

* style: fix clippy

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-09-12 07:57:15 -05:00
Weny Xu
a4604afde5 refactor: rename NEXT_TABLE_ROUTE_PREFIX to TABLE_ROUTE_PREFIX (#2348)
* refactor: rename NEXT_TABLE_ROUTE_PREFIX to TABLE_ROUTE_PREFIX

* chore: apply suggestions from CR
2023-09-12 07:57:15 -05:00
Weny Xu
f386329e29 refactor: introduce DdlTaskExecutor and refactor statement executor (#2341)
* feat: add kv store option

* refactor: refactor statement executor

* refactor: refactor standalone table creator

* chore: apply suggestions from CR

* chore: apply suggestions from CR

* refactor: move ShowCreateTable and CreateDatabase to StatementExecutor

* fix: fix RegionDistribution

* feat: build standalone

* chore: apply suggestions from CR

* chore: apply suggestions from CR

* chore: apply suggestions from CR
2023-09-12 07:57:15 -05:00
Yingwen
3f6d557b8d feat: Implements a reader to make schema compatible (#2326)
* docs: update comment

* feat: Add compat reader to SeqScan

* feat: add struct to compat pk and fields

* refactor: remove unused fields from ParquetReader

* feat: compat framework

* feat: Implement CompatPrimaryKey and CompatFields

* feat: implement compat reader

* feat: Test compat reader

* test: test compat reader

* feat: add more checks to concat

* style: fix clippy

* test: more tests for compat reader

* test: test reader with projection
2023-09-12 07:57:15 -05:00
Ruihang Xia
6215f124f7 refactor: remove datanode instance (#2342)
* pass nextest

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove deadcode

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* rename region_alive_keepers

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-12 07:57:15 -05:00
LFC
1d83c942a9 refactor: script table creation (#2340)
* refactor:
1. remove method `register_system_table` from CatalogManager
2. the creation of ScriptTable (as a system table) is removed from CatalogManager. Instead, the ScriptTable is created when Frontend instance is starting; and is created by calling Frontend instance's grpc handler.

* rebase
2023-09-12 07:57:15 -05:00
Ruihang Xia
f287a5db9f feat: adapt region keep aliver for region server (#2333)
* basic impl

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* refactor, collapse one layer

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove old heartbeat handler impls

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove old region alive keeper

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove remote catalog manager

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* global replace

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* test countdown task

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-12 07:57:15 -05:00
Zhenchi
dac6b2e80a feat(frontend): migrate delete to region server (#2329)
* feat(frontend): migrate delete to region server

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat: add more check and do trim columns

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor: RegionRequestHandler.handle retrun AffectedRows

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-09-12 07:57:15 -05:00
Yingwen
1e44e86d81 feat(mito): Stall write requests and add more flush tests (#2322)
* feat: impl reject write

* feat: sanitize reject size

* feat: add should_stall to WriteBufferManager

* feat: stall requests

* test: mock WriteBufferManager

* feat: add new_with_manager for test and remove object_store from inner

* feat: add an event listener for tests

* feat: Use listener to test flush

* refactor: add flush_test.rs

* style: fix clippy

* feat: test write stall

* test: test flush empty
2023-09-12 07:57:15 -05:00
JeremyHi
56691ff03b refactor: mailbox timeout (#2330)
refactor: Optimize the timeout mechanism of the mailbox
2023-09-12 07:57:15 -05:00
Weny Xu
e4de63625f refactor: refactor raft engine backend and state store (#2336)
* refactor: remove redundant code

* refactor: refactor RaftEngineBackend Error to common_meta::error::Error

* refactor: refactor state store

* chore: apply suggestions from CR
2023-09-12 07:57:15 -05:00
Ruihang Xia
4b2b59c31b refactor: clean unnecessary disabled lints (#2338)
* clean manifest

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean engine

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean region

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean asscess_layer

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean manifest manager

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean row_converter

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean scheduler

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* clean worker

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-12 07:57:15 -05:00
Weny Xu
2ee2d29085 refactor: move Sequence to common meta (#2337) 2023-09-12 07:57:15 -05:00
Yingwen
c3f6529178 fix: improve error message in validate_proto_value (#2328)
* fix: correct error message in validate_proto_value()

* fix: print location in InvalidRequest error

* style: format
2023-09-12 07:57:15 -05:00
Ruihang Xia
eb7116ab56 feat: read/write works in distributed mode 🎉 (#2327)
* add do_get method to RegionRequestHandler

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* move RegionRequestHandler to client crate

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use RegionRequestHandler in MergeScan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* minor fix

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* ignore tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix format

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-12 07:57:15 -05:00
Zhenchi
5f7d48f107 feat(frontend): reorg insert converters and introduce stmt_to_region (#2324)
* feat(frontend): reorg insert converters and introduce stmt_to_region

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: shorten import path

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: add check for column count

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: clippy

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-09-12 07:57:15 -05:00
LFC
711e27d9fa feat: distributed alter table in region server (#2311)
* feat: distributed alter table in region server

* rebase
2023-09-12 07:57:15 -05:00
Weny Xu
922e342b63 refactor: refactor ddl manager (#2306)
* refactor: refactor ddl manager

* chore: apply suggestions from CR
2023-09-12 07:57:15 -05:00
Zhenchi
7dde9ce3ce feat(frontend): migrate insert to region server (#2318)
* feat(frontend): migrate insert to region server

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* refactor: move converter to Inserter

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: rename convert function

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: address comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: add span id

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: compilation

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* retrigger action

* retrigger action

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-09-12 07:57:15 -05:00
Yingwen
3eccb36047 feat: avoid using vector to get default value (#2323) 2023-09-12 07:57:15 -05:00
Ruihang Xia
f71aa373c1 feat: start datanode with config (#2312)
* remove memory-catalog and procedure

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* derive serde for MitoConfig

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* start datanode with configs

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove dir in WalConfig

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add rename field attr

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add stupid duplicated mito config

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove wrong import

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* wired compile error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-12 07:57:15 -05:00
Ruihang Xia
50fca2400e feat: adapt methods from RegionEngine for MitoEngine (#2315)
* feat: adapt methods from RegionEngine for MitoEngine

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* minor fixes

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-12 07:57:15 -05:00
JeremyHi
920763d7dd feat: add metric and manage tool for InactiveRegionKey (#2313)
* feat: add metric and manage tool for InactiveRegionKey

* chore: by review comment
2023-09-12 07:57:15 -05:00
dennis zhuang
a3d5931fca feat: unify all protocol options (#2316)
* feat: unify all protocol options

* feat: adds enable to example configs

* chore: style

Co-authored-by: JeremyHi <jiachun_feng@proton.me>

---------

Co-authored-by: JeremyHi <jiachun_feng@proton.me>
2023-09-12 07:57:15 -05:00
dennis zhuang
b1599ad3a5 fix: can't adding new columns as primary key (#2310) 2023-09-12 07:57:15 -05:00
dennis zhuang
38697e0c4d feat: build http client for cloud object storage (#2314)
* feat: build http client for s3/oss/azblob storages

* chore: style

* fix: test

* fix: cargo toml fmt
2023-09-12 07:57:15 -05:00
Yingwen
50220f8f04 feat: Impl write buffer manager for mito2 (#2309)
* feat: add write buffer manager to builder

* feat: impl WriteBufferManager

* feat: impl MemtableVersion::mutable_usage

* chore: Address CR comments

Co-authored-by: JeremyHi <jiachun_feng@proton.me>

* refactor: rename mutable_limitation to mutable_limit

---------

Co-authored-by: JeremyHi <jiachun_feng@proton.me>
2023-09-12 07:57:15 -05:00
Niwaka
3504d8254e fix: unused table options (#2267)
* fix: unused table options keys

* refactor: simplify validate table options

* chore: Add newlines

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-09-12 07:57:15 -05:00
dennis zhuang
fad58835bf fix: don't raise an error when manifest directory is not created (#2308)
* fix: don't raise an error when manifest directory is not created

* chore: apply suggestion

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

---------

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
2023-09-12 07:57:15 -05:00
Lei, HUANG
43fdff3639 feat: remove memtable request (#2307)
* refactor: remove scan request from memtable API

* docs: Update comment

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-09-12 07:57:15 -05:00
Weny Xu
271f80daad fix: LoadBase Selector cannot follow the region distribution rules (#2259)
* fix: LoadBase Selector cannot follow the region distribution rules

* chore: apply suggestions from CR
2023-09-12 07:57:15 -05:00
Lei, HUANG
36231a5d50 feat(mito2): add alloc_tracker for memtable (#2266)
* feat: add alloc_tracker for memtable

* chore: integrate WriteBufferManager
2023-09-12 07:57:15 -05:00
JeremyHi
a7fa40e16d fix: filter out outdated heartbeat (#2303)
* fix: filter out outdated heartbeat, #1707

* feat: reorder handlers

* refactor: disableXXX to enableXXX

* feat: make full use of region leases to facilitate failover

* chore: minor refactor

* chore: by comment

* feat: logging on inactive/active
2023-09-12 07:57:15 -05:00
Yingwen
648b2ae293 feat(mito): Flush region (#2291)
* chore: call handle_flush_request

* feat: alias SchedulerRef and clean scheduler on drop

* feat: add scheduler to workers

* feat: remove RegionMemtableStats

* feat: pick regions to flush

* feat: add more fields to region flush task

* feat: smallvec workspace dep

* feat: Use list to hold immutable memtables

* feat: flush job wip

* feat: use access layer to read write sst

* feat: flush memtables to l0

* feat: write manifest

* feat: schedule next flush on success

* feat: schedule flush on success and failure

* feat: add purger to region

* feat: apply edit after flush

* feat: collect stats for SSTs

* feat: manual flush

* test: test flush and fix manifest test

* feat: remove flush scheduler job limit

* fix: typo

* style: clippy

* feat: clean flushed files on failure

* chore: address CR comment

* refactor: Use put_rows

* feat: Clean flush scheduler on drop

* feat: remove region flush status on drop and close

* chore: address CR comment
2023-09-12 07:57:15 -05:00
Weny Xu
fa5e3b94d3 refactor: refactor ddl procedure (#2304) 2023-09-12 07:57:15 -05:00
Weny Xu
4818887e38 refactor: refactor DistInstance (#2305) 2023-09-12 07:57:15 -05:00
Ruihang Xia
eddff17523 feat: drop region in mito2 (#2286)
* basic impl

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* check in opening region

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo again

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/mito2/src/worker/handle_drop.rs

Co-authored-by: JeremyHi <jiachun_feng@proton.me>

* remove file in order

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix remove logic

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use scan to list files

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: JeremyHi <jiachun_feng@proton.me>
2023-09-12 07:57:15 -05:00
Weny Xu
c839ed271c refactor: refactor: ddl context (#2301)
* refactor: refactor: ddl context

* refactor: remove unused code

* chore: apply suggestions from CR
2023-09-12 07:57:15 -05:00
JeremyHi
4d2cae4174 refactor: inactive node manager (#2300)
refactor: use region_id instead of table&region_num in InactiveNodeManager
2023-09-12 07:57:15 -05:00
Yingwen
b234733c61 feat(mito): Support deleting rows in mito2 (#2275)
* feat: check delete request

* test: test delete and overwrite
2023-09-12 07:57:15 -05:00
Lei, HUANG
9691d19601 feat: impl kv backend for raft engine (#2280)
* feat: kv backend on raft-engine

* feat: raft-engine kvbackend

* fix: toml

* fix: some review comments

* chore: optimize delete

* fix: lift lock in batch_delete
2023-09-12 07:57:15 -05:00
LFC
ff3881f0e1 feat: drop distributed Mito2 table (#2260)
* feat: drop distributed Mito2 table

* rebase develop

* fix: resolve PR comments

* fix: resolve PR comments
2023-09-12 07:57:15 -05:00
JeremyHi
fa542f6e93 feat: new heartbeat (#2299) 2023-09-12 07:57:15 -05:00
Zhenchi
d6c82867d5 refactor: remove the most Table impls (#2274)
Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-12 07:57:15 -05:00
Lei, HUANG
86d56f71ef fix: flume bug (#2298)
fix: flume
2023-09-12 07:57:15 -05:00
Zhenchi
b42d343ae6 feat(frontend): unify column inserter and row inserter (#2293)
* refactor: unify inserter

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat(frontend): unify column inserter and row inserter

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: remove redundant clone

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: move empty check ahead

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* feat: add more logs

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix: leading license

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* adjust indent

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-09-12 07:57:15 -05:00
Yingwen
365e557e7a feat(mito): Integrate access layer and file purger to region (#2296)
* feat: alias SchedulerRef and clean scheduler on drop

* feat: add scheduler to workers

* feat: use access layer to read write sst

* feat: add purger to region

* refactor: allow getting region_dir from AccessLayer

* feat: add scheduler to FlushScheduler

* feat: getter for object store

* chore: fix typo

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-12 07:57:15 -05:00
Ruihang Xia
46d171d341 chore: bump greptime-proto to replace region_dir (#2290)
chore: bump greptime-proto to replace region_dir

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-12 07:57:15 -05:00
Ruihang Xia
718246ea1a feat: implement heartbeat for region server (#2279)
* retrieve region stats from region server

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* implement heartbeat handler

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* start datanode with region server

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove comment

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* disable non-unit test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* implement heartbeat task

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-12 07:57:15 -05:00
JeremyHi
58d07e0e62 feat: v04 rm unused exprs (#2285)
* feat: rm compact and flush exprs

* refactor: continue to rm compact and flush
2023-09-12 07:57:15 -05:00
dennis zhuang
db89235474 feat: only allow timestamp type as time index (#2281)
* feat: only allow timestamp data type as time index

* test: update sqltest cases, todo: need some fixes

* fix: sqlness tests

* fix: forgot adding back cte test

* chore: style
2023-09-12 07:57:15 -05:00
shuiyisong
6e593401f7 refactor: collecting memory usage during scan (#2353)
* chore: try custom metrics

* chore: fix header

* chore: minor change
2023-09-12 15:52:57 +08:00
Yingwen
466fbaca5d fix: panic in try_into_vector() (#2351) 2023-09-11 19:06:31 +08:00
ZonaHe
de966af83b feat: update dashboard to v0.3.3 (#2339)
Co-authored-by: ZonaHex <ZonaHex@users.noreply.github.com>
2023-09-06 19:19:56 +08:00
905 changed files with 44666 additions and 38872 deletions

View File

@@ -1,93 +0,0 @@
name: Build and push dev-builder image
description: Build and push dev-builder image to DockerHub and ACR
inputs:
dockerhub-image-registry:
description: The dockerhub image registry to store the images
required: false
default: docker.io
dockerhub-image-registry-username:
description: The dockerhub username to login to the image registry
required: true
dockerhub-image-registry-token:
description: The dockerhub token to login to the image registry
required: true
dockerhub-image-namespace:
description: The dockerhub namespace of the image registry to store the images
required: false
default: greptime
acr-image-registry:
description: The ACR image registry to store the images
required: true
acr-image-registry-username:
description: The ACR username to login to the image registry
required: true
acr-image-registry-password:
description: The ACR password to login to the image registry
required: true
acr-image-namespace:
description: The ACR namespace of the image registry to store the images
required: false
default: greptime
version:
description: Version of the dev-builder
required: false
default: latest
runs:
using: composite
steps:
- name: Login to Dockerhub
uses: docker/login-action@v2
with:
registry: ${{ inputs.dockerhub-image-registry }}
username: ${{ inputs.dockerhub-image-registry-username }}
password: ${{ inputs.dockerhub-image-registry-token }}
- name: Build and push ubuntu dev builder image to dockerhub
shell: bash
run:
make dev-builder \
BASE_IMAGE=ubuntu \
BUILDX_MULTI_PLATFORM_BUILD=true \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
IMAGE_TAG=${{ inputs.version }}
- name: Build and push centos dev builder image to dockerhub
shell: bash
run:
make dev-builder \
BASE_IMAGE=centos \
BUILDX_MULTI_PLATFORM_BUILD=true \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
IMAGE_TAG=${{ inputs.version }}
- name: Login to ACR
uses: docker/login-action@v2
continue-on-error: true
with:
registry: ${{ inputs.acr-image-registry }}
username: ${{ inputs.acr-image-registry-username }}
password: ${{ inputs.acr-image-registry-password }}
- name: Build and push ubuntu dev builder image to ACR
shell: bash
continue-on-error: true
run: # buildx will cache the images that already built, so it will not take long time to build the images again.
make dev-builder \
BASE_IMAGE=ubuntu \
BUILDX_MULTI_PLATFORM_BUILD=true \
IMAGE_REGISTRY=${{ inputs.acr-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.acr-image-namespace }} \
IMAGE_TAG=${{ inputs.version }}
- name: Build and push centos dev builder image to ACR
shell: bash
continue-on-error: true
run: # buildx will cache the images that already built, so it will not take long time to build the images again.
make dev-builder \
BASE_IMAGE=centos \
BUILDX_MULTI_PLATFORM_BUILD=true \
IMAGE_REGISTRY=${{ inputs.acr-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.acr-image-namespace }} \
IMAGE_TAG=${{ inputs.version }}

View File

@@ -0,0 +1,76 @@
name: Build and push dev-builder images
description: Build and push dev-builder images to DockerHub and ACR
inputs:
dockerhub-image-registry:
description: The dockerhub image registry to store the images
required: false
default: docker.io
dockerhub-image-registry-username:
description: The dockerhub username to login to the image registry
required: true
dockerhub-image-registry-token:
description: The dockerhub token to login to the image registry
required: true
dockerhub-image-namespace:
description: The dockerhub namespace of the image registry to store the images
required: false
default: greptime
version:
description: Version of the dev-builder
required: false
default: latest
build-dev-builder-ubuntu:
description: Build dev-builder-ubuntu image
required: false
default: 'true'
build-dev-builder-centos:
description: Build dev-builder-centos image
required: false
default: 'true'
build-dev-builder-android:
description: Build dev-builder-android image
required: false
default: 'true'
runs:
using: composite
steps:
- name: Login to Dockerhub
uses: docker/login-action@v2
with:
registry: ${{ inputs.dockerhub-image-registry }}
username: ${{ inputs.dockerhub-image-registry-username }}
password: ${{ inputs.dockerhub-image-registry-token }}
- name: Build and push dev-builder-ubuntu image
shell: bash
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
run: |
make dev-builder \
BASE_IMAGE=ubuntu \
BUILDX_MULTI_PLATFORM_BUILD=true \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
IMAGE_TAG=${{ inputs.version }}
- name: Build and push dev-builder-centos image
shell: bash
if: ${{ inputs.build-dev-builder-centos == 'true' }}
run: |
make dev-builder \
BASE_IMAGE=centos \
BUILDX_MULTI_PLATFORM_BUILD=true \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
IMAGE_TAG=${{ inputs.version }}
- name: Build and push dev-builder-android image # Only build image for amd64 platform.
shell: bash
if: ${{ inputs.build-dev-builder-android == 'true' }}
run: |
make dev-builder \
BASE_IMAGE=android \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
IMAGE_TAG=${{ inputs.version }} && \
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}

View File

@@ -16,35 +16,20 @@ inputs:
version:
description: Version of the artifact
required: true
release-to-s3-bucket:
description: S3 bucket to store released artifacts
required: true
aws-access-key-id:
description: AWS access key id
required: true
aws-secret-access-key:
description: AWS secret access key
required: true
aws-region:
description: AWS region
required: true
upload-to-s3:
description: Upload to S3
required: false
default: 'true'
upload-latest-artifacts:
description: Upload the latest artifacts to S3
required: false
default: 'true'
working-dir:
description: Working directory to build the artifacts
required: false
default: .
build-android-artifacts:
description: Build android artifacts
required: false
default: 'false'
runs:
using: composite
steps:
- name: Build greptime binary
shell: bash
if: ${{ inputs.build-android-artifacts == 'false' }}
run: |
cd ${{ inputs.working-dir }} && \
make build-by-dev-builder \
@@ -54,14 +39,25 @@ runs:
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts
if: ${{ inputs.build-android-artifacts == 'false' }}
with:
artifacts-dir: ${{ inputs.artifacts-dir }}
target-file: ./target/${{ inputs.cargo-profile }}/greptime
version: ${{ inputs.version }}
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
aws-region: ${{ inputs.aws-region }}
upload-to-s3: ${{ inputs.upload-to-s3 }}
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
working-dir: ${{ inputs.working-dir }}
# TODO(zyy17): We can remove build-android-artifacts flag in the future.
- name: Build greptime binary
shell: bash
if: ${{ inputs.build-android-artifacts == 'true' }}
run: |
cd ${{ inputs.working-dir }} && make strip-android-bin
- name: Upload android artifacts
uses: ./.github/actions/upload-artifacts
if: ${{ inputs.build-android-artifacts == 'true' }}
with:
artifacts-dir: ${{ inputs.artifacts-dir }}
target-file: ./target/aarch64-linux-android/release/greptime
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}

View File

@@ -13,30 +13,10 @@ inputs:
disable-run-tests:
description: Disable running integration tests
required: true
release-to-s3-bucket:
description: S3 bucket to store released artifacts
required: true
aws-access-key-id:
description: AWS access key id
required: true
aws-secret-access-key:
description: AWS secret access key
required: true
aws-region:
description: AWS region
required: true
dev-mode:
description: Enable dev mode, only build standard greptime
required: false
default: 'false'
upload-to-s3:
description: Upload to S3
required: false
default: 'true'
upload-latest-artifacts:
description: Upload the latest artifacts to S3
required: false
default: 'true'
working-dir:
description: Working directory to build the artifacts
required: false
@@ -68,12 +48,6 @@ runs:
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
version: ${{ inputs.version }}
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
aws-region: ${{ inputs.aws-region }}
upload-to-s3: ${{ inputs.upload-to-s3 }}
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
working-dir: ${{ inputs.working-dir }}
- name: Build greptime without pyo3
@@ -85,12 +59,6 @@ runs:
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
version: ${{ inputs.version }}
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
aws-region: ${{ inputs.aws-region }}
upload-to-s3: ${{ inputs.upload-to-s3 }}
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
working-dir: ${{ inputs.working-dir }}
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
@@ -107,10 +75,14 @@ runs:
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
version: ${{ inputs.version }}
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
aws-region: ${{ inputs.aws-region }}
upload-to-s3: ${{ inputs.upload-to-s3 }}
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
working-dir: ${{ inputs.working-dir }}
- name: Build greptime on android base image
uses: ./.github/actions/build-greptime-binary
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build android base image on amd64.
with:
base-image: android
artifacts-dir: greptime-android-arm64-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
build-android-artifacts: true

View File

@@ -19,21 +19,9 @@ inputs:
disable-run-tests:
description: Disable running integration tests
required: true
release-to-s3-bucket:
description: S3 bucket to store released artifacts
required: true
artifacts-dir:
description: Directory to store artifacts
required: true
aws-access-key-id:
description: AWS access key id
required: true
aws-secret-access-key:
description: AWS secret access key
required: true
aws-region:
description: AWS region
required: true
runs:
using: composite
steps:
@@ -99,7 +87,3 @@ runs:
artifacts-dir: ${{ inputs.artifacts-dir }}
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
version: ${{ inputs.version }}
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
aws-region: ${{ inputs.aws-region }}

View File

@@ -0,0 +1,80 @@
name: Build Windows artifacts
description: Build Windows artifacts
inputs:
arch:
description: Architecture to build
required: true
rust-toolchain:
description: Rust toolchain to use
required: true
cargo-profile:
description: Cargo profile to build
required: true
features:
description: Cargo features to build
required: true
version:
description: Version of the artifact
required: true
disable-run-tests:
description: Disable running integration tests
required: true
artifacts-dir:
description: Directory to store artifacts
required: true
runs:
using: composite
steps:
- uses: arduino/setup-protoc@v1
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ inputs.rust-toolchain }}
targets: ${{ inputs.arch }}
components: llvm-tools-preview
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Install Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install PyArrow Package
shell: pwsh
run: pip install pyarrow
- name: Install WSL distribution
uses: Vampire/setup-wsl@v2
with:
distribution: Ubuntu-22.04
- name: Install latest nextest release # For integration tests.
if: ${{ inputs.disable-run-tests == 'false' }}
uses: taiki-e/install-action@nextest
- name: Run integration tests
if: ${{ inputs.disable-run-tests == 'false' }}
shell: pwsh
run: make test sqlness-test
- name: Upload sqlness logs
if: ${{ failure() }} # Only upload logs when the integration tests failed.
uses: actions/upload-artifact@v3
with:
name: sqlness-logs
path: ${{ runner.temp }}/greptime-*.log
retention-days: 3
- name: Build greptime binary
shell: pwsh
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }}
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts
with:
artifacts-dir: ${{ inputs.artifacts-dir }}
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
version: ${{ inputs.version }}

View File

@@ -1,5 +1,5 @@
name: Release artifacts
description: Release artifacts
name: Publish GitHub release
description: Publish GitHub release
inputs:
version:
description: Version to release

View File

@@ -0,0 +1,138 @@
name: Release CN artifacts
description: Release artifacts to CN region
inputs:
src-image-registry:
description: The source image registry to store the images
required: true
default: docker.io
src-image-namespace:
description: The namespace of the source image registry to store the images
required: true
default: greptime
src-image-name:
description: The name of the source image
required: false
default: greptimedb
dst-image-registry:
description: The destination image registry to store the images
required: true
dst-image-namespace:
description: The namespace of the destination image registry to store the images
required: true
default: greptime
dst-image-registry-username:
description: The username to login to the image registry
required: true
dst-image-registry-password:
description: The password to login to the image registry
required: true
version:
description: Version of the artifact
required: true
dev-mode:
description: Enable dev mode, only push standard greptime
required: false
default: 'false'
push-latest-tag:
description: Whether to push the latest tag of the image
required: false
default: 'true'
aws-cn-s3-bucket:
description: S3 bucket to store released artifacts in CN region
required: true
aws-cn-access-key-id:
description: AWS access key id in CN region
required: true
aws-cn-secret-access-key:
description: AWS secret access key in CN region
required: true
aws-cn-region:
description: AWS region in CN
required: true
upload-to-s3:
description: Upload to S3
required: false
default: 'true'
artifacts-dir:
description: Directory to store artifacts
required: false
default: 'artifacts'
update-version-info:
description: Update the version info in S3
required: false
default: 'true'
upload-max-retry-times:
description: Max retry times for uploading artifacts to S3
required: false
default: "20"
upload-retry-timeout:
description: Timeout for uploading artifacts to S3
required: false
default: "30" # minutes
runs:
using: composite
steps:
- name: Download artifacts
uses: actions/download-artifact@v3
with:
path: ${{ inputs.artifacts-dir }}
- name: Release artifacts to cn region
uses: nick-invision/retry@v2
if: ${{ inputs.upload-to-s3 == 'true' }}
env:
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
with:
max_attempts: ${{ inputs.upload-max-retry-times }}
timeout_minutes: ${{ inputs.upload-retry-timeout }}
command: |
./.github/scripts/upload-artifacts-to-s3.sh \
${{ inputs.artifacts-dir }} \
${{ inputs.version }} \
${{ inputs.aws-cn-s3-bucket }}
- name: Push greptimedb image from Dockerhub to ACR
shell: bash
env:
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
run: |
./.github/scripts/copy-image.sh \
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}:${{ inputs.version }} \
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
- name: Push latest greptimedb image from Dockerhub to ACR
shell: bash
if: ${{ inputs.push-latest-tag == 'true' }}
env:
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
run: |
./.github/scripts/copy-image.sh \
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}:latest \
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
- name: Push greptimedb-centos image from DockerHub to ACR
shell: bash
if: ${{ inputs.dev-mode == 'false' }}
env:
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
run: |
./.github/scripts/copy-image.sh \
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
- name: Push greptimedb-centos image from DockerHub to ACR
shell: bash
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
env:
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
run: |
./.github/scripts/copy-image.sh \
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}

View File

@@ -10,34 +10,6 @@ inputs:
version:
description: Version of the artifact
required: true
release-to-s3-bucket:
description: S3 bucket to store released artifacts
required: true
aws-access-key-id:
description: AWS access key id
required: true
aws-secret-access-key:
description: AWS secret access key
required: true
aws-region:
description: AWS region
required: true
upload-to-s3:
description: Upload to S3
required: false
default: 'true'
upload-latest-artifacts:
description: Upload the latest artifacts to S3
required: false
default: 'true'
upload-max-retry-times:
description: Max retry times for uploading artifacts to S3
required: false
default: "20"
upload-retry-timeout:
description: Timeout for uploading artifacts to S3
required: false
default: "10" # minutes
working-dir:
description: Working directory to upload the artifacts
required: false
@@ -61,9 +33,21 @@ runs:
working-directory: ${{ inputs.working-dir }}
shell: bash
run: |
tar -zcvf ${{ inputs.artifacts-dir }}.tar.gz ${{ inputs.artifacts-dir }} && \
tar -zcvf ${{ inputs.artifacts-dir }}.tar.gz ${{ inputs.artifacts-dir }}
- name: Calculate checksum
if: runner.os != 'Windows'
working-directory: ${{ inputs.working-dir }}
shell: bash
run: |
echo $(shasum -a 256 ${{ inputs.artifacts-dir }}.tar.gz | cut -f1 -d' ') > ${{ inputs.artifacts-dir }}.sha256sum
- name: Calculate checksum on Windows
if: runner.os == 'Windows'
working-directory: ${{ inputs.working-dir }}
shell: pwsh
run: Get-FileHash ${{ inputs.artifacts-dir }}.tar.gz -Algorithm SHA256 | select -ExpandProperty Hash > ${{ inputs.artifacts-dir }}.sha256sum
# Note: The artifacts will be double zip compressed(related issue: https://github.com/actions/upload-artifact/issues/39).
# However, when we use 'actions/download-artifact@v3' to download the artifacts, it will be automatically unzipped.
- name: Upload artifacts
@@ -77,49 +61,3 @@ runs:
with:
name: ${{ inputs.artifacts-dir }}.sha256sum
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.sha256sum
- name: Upload artifacts to S3
if: ${{ inputs.upload-to-s3 == 'true' }}
uses: nick-invision/retry@v2
env:
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
AWS_DEFAULT_REGION: ${{ inputs.aws-region }}
with:
max_attempts: ${{ inputs.upload-max-retry-times }}
timeout_minutes: ${{ inputs.upload-retry-timeout }}
# The bucket layout will be:
# releases/greptimedb
# ├── v0.1.0
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
# └── v0.2.0
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
command: |
cd ${{ inputs.working-dir }} && \
aws s3 cp \
${{ inputs.artifacts-dir }}.tar.gz \
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/${{ inputs.version }}/${{ inputs.artifacts-dir }}.tar.gz && \
aws s3 cp \
${{ inputs.artifacts-dir }}.sha256sum \
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/${{ inputs.version }}/${{ inputs.artifacts-dir }}.sha256sum
- name: Upload latest artifacts to S3
if: ${{ inputs.upload-to-s3 == 'true' && inputs.upload-latest-artifacts == 'true' }} # We'll also upload the latest artifacts to S3 in the scheduled and formal release.
uses: nick-invision/retry@v2
env:
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
AWS_DEFAULT_REGION: ${{ inputs.aws-region }}
with:
max_attempts: ${{ inputs.upload-max-retry-times }}
timeout_minutes: ${{ inputs.upload-retry-timeout }}
command: |
cd ${{ inputs.working-dir }} && \
aws s3 cp \
${{ inputs.artifacts-dir }}.tar.gz \
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/latest/${{ inputs.artifacts-dir }}.tar.gz && \
aws s3 cp \
${{ inputs.artifacts-dir }}.sha256sum \
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/latest/${{ inputs.artifacts-dir }}.sha256sum

47
.github/scripts/copy-image.sh vendored Executable file
View File

@@ -0,0 +1,47 @@
#!/usr/bin/env bash
set -e
set -o pipefail
SRC_IMAGE=$1
DST_REGISTRY=$2
SKOPEO_STABLE_IMAGE="quay.io/skopeo/stable:latest"
# Check if necessary variables are set.
function check_vars() {
for var in DST_REGISTRY_USERNAME DST_REGISTRY_PASSWORD DST_REGISTRY SRC_IMAGE; do
if [ -z "${!var}" ]; then
echo "$var is not set or empty."
echo "Usage: DST_REGISTRY_USERNAME=<your-dst-registry-username> DST_REGISTRY_PASSWORD=<your-dst-registry-password> $0 <dst-registry> <src-image>"
exit 1
fi
done
}
# Copies images from DockerHub to the destination registry.
function copy_images_from_dockerhub() {
# Check if docker is installed.
if ! command -v docker &> /dev/null; then
echo "docker is not installed. Please install docker to continue."
exit 1
fi
# Extract the name and tag of the source image.
IMAGE_NAME=$(echo "$SRC_IMAGE" | sed "s/.*\///")
echo "Copying $SRC_IMAGE to $DST_REGISTRY/$IMAGE_NAME"
docker run "$SKOPEO_STABLE_IMAGE" copy -a docker://"$SRC_IMAGE" \
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
docker://"$DST_REGISTRY/$IMAGE_NAME"
}
function main() {
check_vars
copy_images_from_dockerhub
}
# Usage example:
# DST_REGISTRY_USERNAME=123 DST_REGISTRY_PASSWORD=456 \
# ./copy-image.sh greptime/greptimedb:v0.4.0 greptime-registry.cn-hangzhou.cr.aliyuncs.com
main

102
.github/scripts/upload-artifacts-to-s3.sh vendored Executable file
View File

@@ -0,0 +1,102 @@
#!/usr/bin/env bash
set -e
set -o pipefail
ARTIFACTS_DIR=$1
VERSION=$2
AWS_S3_BUCKET=$3
RELEASE_DIRS="releases/greptimedb"
GREPTIMEDB_REPO="GreptimeTeam/greptimedb"
# Check if necessary variables are set.
function check_vars() {
for var in AWS_S3_BUCKET VERSION ARTIFACTS_DIR; do
if [ -z "${!var}" ]; then
echo "$var is not set or empty."
echo "Usage: $0 <artifacts-dir> <version> <aws-s3-bucket>"
exit 1
fi
done
}
# Uploads artifacts to AWS S3 bucket.
function upload_artifacts() {
# The bucket layout will be:
# releases/greptimedb
# ├── latest-version.txt
# ├── latest-nightly-version.txt
# ├── v0.1.0
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
# └── v0.2.0
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
aws s3 cp \
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
done
}
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
function update_version_info() {
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
# If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "Updating latest-version.txt"
echo "$VERSION" > latest-version.txt
aws s3 cp \
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
fi
# If it's the nightly release, update latest-nightly-version.txt.
if [[ "$VERSION" == *"nightly"* ]]; then
echo "Updating latest-nightly-version.txt"
echo "$VERSION" > latest-nightly-version.txt
aws s3 cp \
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
fi
fi
}
# Downloads artifacts from Github if DOWNLOAD_ARTIFACTS_FROM_GITHUB is true.
function download_artifacts_from_github() {
if [ "$DOWNLOAD_ARTIFACTS_FROM_GITHUB" == "true" ]; then
# Check if jq is installed.
if ! command -v jq &> /dev/null; then
echo "jq is not installed. Please install jq to continue."
exit 1
fi
# Get the latest release API response.
RELEASES_API_RESPONSE=$(curl -s -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/$GREPTIMEDB_REPO/releases/latest")
# Extract download URLs for the artifacts.
# Exclude source code archives which are typically named as 'greptimedb-<version>.zip' or 'greptimedb-<version>.tar.gz'.
ASSET_URLS=$(echo "$RELEASES_API_RESPONSE" | jq -r '.assets[] | select(.name | test("greptimedb-.*\\.(zip|tar\\.gz)$") | not) | .browser_download_url')
# Download each asset.
while IFS= read -r url; do
if [ -n "$url" ]; then
curl -LJO "$url"
echo "Downloaded: $url"
fi
done <<< "$ASSET_URLS"
fi
}
function main() {
check_vars
download_artifacts_from_github
upload_artifacts
update_version_info
}
# Usage example:
# AWS_ACCESS_KEY_ID=<your_access_key_id> \
# AWS_SECRET_ACCESS_KEY=<your_secret_access_key> \
# AWS_DEFAULT_REGION=<your_region> \
# UPDATE_VERSION_INFO=true \
# DOWNLOAD_ARTIFACTS_FROM_GITHUB=false \
# ./upload-artifacts-to-s3.sh <artifacts-dir> <version> <aws-s3-bucket>
main

View File

@@ -17,7 +17,7 @@ env:
jobs:
apidoc:
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1

View File

@@ -16,11 +16,11 @@ on:
description: The runner uses to build linux-amd64 artifacts
default: ec2-c6i.4xlarge-amd64
options:
- ubuntu-latest
- ubuntu-latest-8-cores
- ubuntu-latest-16-cores
- ubuntu-latest-32-cores
- ubuntu-latest-64-cores
- ubuntu-20.04
- ubuntu-20.04-8-cores
- ubuntu-20.04-16-cores
- ubuntu-20.04-32-cores
- ubuntu-20.04-64-cores
- ec2-c6i.xlarge-amd64 # 4C8G
- ec2-c6i.2xlarge-amd64 # 8C16G
- ec2-c6i.4xlarge-amd64 # 16C32G
@@ -78,7 +78,7 @@ jobs:
allocate-runners:
name: Allocate runners
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
outputs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
@@ -164,12 +164,7 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
dev-mode: true # Only build the standard greptime binary.
upload-to-s3: false # No need to upload to S3.
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
build-linux-arm64-artifacts:
@@ -198,12 +193,7 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
dev-mode: true # Only build the standard greptime binary.
upload-to-s3: false # No need to upload to S3.
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
release-images-to-dockerhub:
@@ -214,7 +204,7 @@ jobs:
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
]
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
outputs:
build-result: ${{ steps.set-build-result.outputs.build-result }}
steps:
@@ -239,41 +229,44 @@ jobs:
run: |
echo "build-result=success" >> $GITHUB_OUTPUT
release-images-to-acr:
name: Build and push images to ACR
release-cn-artifacts:
name: Release artifacts to CN region
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
release-images-to-dockerhub,
]
runs-on: ubuntu-latest
# When we push to ACR, it's easy to fail due to some unknown network issues.
# However, we don't want to fail the whole workflow because of this.
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
runs-on: ubuntu-20.04
continue-on-error: true
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build and push images to ACR
uses: ./.github/actions/build-images
- name: Release artifacts to CN region
uses: ./.github/actions/release-cn-artifacts
with:
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-name: ${{ env.IMAGE_NAME }}
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: ${{ env.IMAGE_NAME }}
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: false # Don't push the latest tag to registry.
dev-mode: true # Only build the standard images.
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
dev-mode: true # Only build the standard images(exclude centos images).
push-latest-tag: false # Don't push the latest tag to registry.
update-version-info: false # Don't update the version info in S3.
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
needs: [
allocate-runners,
build-linux-amd64-artifacts,
@@ -298,7 +291,7 @@ jobs:
name: Stop linux-arm64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
needs: [
allocate-runners,
build-linux-arm64-artifacts,
@@ -325,7 +318,7 @@ jobs:
needs: [
release-images-to-dockerhub
]
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:

View File

@@ -34,7 +34,7 @@ env:
jobs:
typos:
name: Spell Check with Typos
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- uses: crate-ci/typos@v1.13.10
@@ -42,7 +42,7 @@ jobs:
check:
name: Check
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
@@ -60,7 +60,7 @@ jobs:
toml:
name: Toml Check
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
@@ -80,7 +80,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-latest-8-cores, windows-latest-8-cores ]
os: [ ubuntu-20.04-8-cores ]
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
@@ -105,7 +105,7 @@ jobs:
fmt:
name: Rustfmt
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
@@ -124,7 +124,7 @@ jobs:
clippy:
name: Clippy
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
@@ -142,7 +142,7 @@ jobs:
coverage:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest-8-cores
runs-on: ubuntu-20.04-8-cores
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
@@ -188,43 +188,3 @@ jobs:
flags: rust
fail_ci_if_error: false
verbose: true
test-on-windows:
if: github.event.pull_request.draft == false
runs-on: windows-latest-8-cores
timeout-minutes: 60
steps:
- run: git config --global core.autocrlf false
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: llvm-tools-preview
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Install Cargo Nextest
uses: taiki-e/install-action@nextest
- name: Install Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install PyArrow Package
run: pip install pyarrow
- name: Install WSL distribution
uses: Vampire/setup-wsl@v2
with:
distribution: Ubuntu-22.04
- name: Running tests
run: cargo nextest run -F pyo3_backend,dashboard
env:
RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
GT_S3_REGION: ${{ secrets.S3_REGION }}
UNITTEST_LOG_DIR: "__unittest_logs"

View File

@@ -11,7 +11,7 @@ on:
jobs:
doc_issue:
if: github.event.label.name == 'doc update required'
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
steps:
- name: create an issue in doc repo
uses: dacbd/create-issue-action@main
@@ -25,7 +25,7 @@ jobs:
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
cloud_issue:
if: github.event.label.name == 'cloud followup required'
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
steps:
- name: create an issue in cloud repo
uses: dacbd/create-issue-action@main

View File

@@ -30,7 +30,7 @@ name: CI
jobs:
typos:
name: Spell Check with Typos
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- uses: crate-ci/typos@v1.13.10
@@ -38,33 +38,33 @@ jobs:
check:
name: Check
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
steps:
- run: 'echo "No action required"'
fmt:
name: Rustfmt
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
steps:
- run: 'echo "No action required"'
clippy:
name: Clippy
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
steps:
- run: 'echo "No action required"'
coverage:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
steps:
- run: 'echo "No action required"'
sqlness:
name: Sqlness Test
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
steps:
- run: 'echo "No action required"'

View File

@@ -8,7 +8,7 @@ on:
types: [opened, synchronize, reopened, ready_for_review]
jobs:
license-header-check:
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
name: license-header-check
steps:
- uses: actions/checkout@v2

View File

@@ -14,11 +14,11 @@ on:
description: The runner uses to build linux-amd64 artifacts
default: ec2-c6i.2xlarge-amd64
options:
- ubuntu-latest
- ubuntu-latest-8-cores
- ubuntu-latest-16-cores
- ubuntu-latest-32-cores
- ubuntu-latest-64-cores
- ubuntu-20.04
- ubuntu-20.04-8-cores
- ubuntu-20.04-16-cores
- ubuntu-20.04-32-cores
- ubuntu-20.04-64-cores
- ec2-c6i.xlarge-amd64 # 4C8G
- ec2-c6i.2xlarge-amd64 # 8C16G
- ec2-c6i.4xlarge-amd64 # 16C32G
@@ -70,7 +70,7 @@ jobs:
allocate-runners:
name: Allocate runners
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
outputs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
@@ -147,11 +147,6 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
upload-latest-artifacts: false
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
@@ -171,11 +166,6 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
upload-latest-artifacts: false
release-images-to-dockerhub:
name: Build and push images to DockerHub
@@ -185,7 +175,7 @@ jobs:
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
]
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
outputs:
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
steps:
@@ -208,15 +198,14 @@ jobs:
run: |
echo "nightly-build-result=success" >> $GITHUB_OUTPUT
release-images-to-acr:
name: Build and push images to ACR
release-cn-artifacts:
name: Release artifacts to CN region
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
release-images-to-dockerhub,
]
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
# When we push to ACR, it's easy to fail due to some unknown network issues.
# However, we don't want to fail the whole workflow because of this.
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
@@ -226,21 +215,30 @@ jobs:
with:
fetch-depth: 0
- name: Build and push images to ACR
uses: ./.github/actions/build-images
- name: Release artifacts to CN region
uses: ./.github/actions/release-cn-artifacts
with:
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: greptimedb
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: false # Don't push the latest tag to registry.
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
dev-mode: false
update-version-info: false # Don't update version info in S3.
push-latest-tag: false # Don't push the latest tag to registry.
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
needs: [
allocate-runners,
build-linux-amd64-artifacts,
@@ -265,7 +263,7 @@ jobs:
name: Stop linux-arm64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
needs: [
allocate-runners,
build-linux-arm64-artifacts,
@@ -292,7 +290,7 @@ jobs:
needs: [
release-images-to-dockerhub
]
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:

82
.github/workflows/nightly-ci.yml vendored Normal file
View File

@@ -0,0 +1,82 @@
# Nightly CI: runs tests every night for our second tier plaforms (Windows)
on:
schedule:
- cron: '0 23 * * 1-5'
workflow_dispatch:
name: Nightly CI
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
env:
RUST_TOOLCHAIN: nightly-2023-08-07
jobs:
sqlness:
name: Sqlness Test
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ windows-latest-8-cores ]
timeout-minutes: 60
steps:
- uses: actions/checkout@v4.1.0
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Run sqlness
run: cargo sqlness
- name: Upload sqlness logs
if: always()
uses: actions/upload-artifact@v3
with:
name: sqlness-logs
path: ${{ runner.temp }}/greptime-*.log
retention-days: 3
test-on-windows:
runs-on: windows-latest-8-cores
timeout-minutes: 60
steps:
- run: git config --global core.autocrlf false
- uses: actions/checkout@v4.1.0
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: llvm-tools-preview
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Install Cargo Nextest
uses: taiki-e/install-action@nextest
- name: Install Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install PyArrow Package
run: pip install pyarrow
- name: Install WSL distribution
uses: Vampire/setup-wsl@v2
with:
distribution: Ubuntu-22.04
- name: Running tests
run: cargo nextest run -F pyo3_backend,dashboard
env:
RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
GT_S3_REGION: ${{ secrets.S3_REGION }}
UNITTEST_LOG_DIR: "__unittest_logs"

View File

@@ -10,7 +10,7 @@ on:
jobs:
check:
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
timeout-minutes: 10
steps:
- uses: thehanimo/pr-title-checker@v1.3.4
@@ -19,7 +19,7 @@ jobs:
pass_on_octokit_error: false
configuration_path: ".github/pr-title-checker-config.json"
breaking:
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
timeout-minutes: 10
steps:
- uses: thehanimo/pr-title-checker@v1.3.4

View File

@@ -0,0 +1,85 @@
name: Release dev-builder images
on:
workflow_dispatch: # Allows you to run this workflow manually.
inputs:
version:
description: Version of the dev-builder
required: false
default: latest
release_dev_builder_ubuntu_image:
type: boolean
description: Release dev-builder-ubuntu image
required: false
default: false
release_dev_builder_centos_image:
type: boolean
description: Release dev-builder-centos image
required: false
default: false
release_dev_builder_android_image:
type: boolean
description: Release dev-builder-android image
required: false
default: false
jobs:
release-dev-builder-images:
name: Release dev builder images
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
runs-on: ubuntu-20.04-16-cores
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build and push dev builder images
uses: ./.github/actions/build-dev-builder-images
with:
version: ${{ inputs.version }}
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
name: Release dev builder images to CN region
runs-on: ubuntu-20.04
needs: [
release-dev-builder-images
]
steps:
- name: Push dev-builder-ubuntu image
shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image }}
env:
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
run: |
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }} \
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }}
- name: Push dev-builder-centos image
shell: bash
if: ${{ inputs.release_dev_builder_centos_image }}
env:
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
run: |
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }} \
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }}
- name: Push dev-builder-android image
shell: bash
if: ${{ inputs.release_dev_builder_android_image }}
env:
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
run: |
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }} \
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }}

View File

@@ -18,11 +18,11 @@ on:
description: The runner uses to build linux-amd64 artifacts
default: ec2-c6i.4xlarge-amd64
options:
- ubuntu-latest
- ubuntu-latest-8-cores
- ubuntu-latest-16-cores
- ubuntu-latest-32-cores
- ubuntu-latest-64-cores
- ubuntu-20.04
- ubuntu-20.04-8-cores
- ubuntu-20.04-16-cores
- ubuntu-20.04-32-cores
- ubuntu-20.04-64-cores
- ec2-c6i.xlarge-amd64 # 4C8G
- ec2-c6i.2xlarge-amd64 # 8C16G
- ec2-c6i.4xlarge-amd64 # 16C32G
@@ -63,7 +63,12 @@ on:
description: Build macos artifacts
required: false
default: false
release_artifacts:
build_windows_artifacts:
type: boolean
description: Build Windows artifacts
required: false
default: false
publish_github_release:
type: boolean
description: Create GitHub release and upload artifacts
required: false
@@ -73,11 +78,6 @@ on:
description: Build and push images to DockerHub and ACR
required: false
default: false
release_dev_builder_image:
type: boolean
description: Release dev-builder image
required: false
default: false
# Use env variables to control all the release process.
env:
@@ -91,17 +91,18 @@ env:
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
NIGHTLY_RELEASE_PREFIX: nightly
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
NEXT_RELEASE_VERSION: v0.4.0
NEXT_RELEASE_VERSION: v0.5.0
jobs:
allocate-runners:
name: Allocate runners
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
outputs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
windows-runner: windows-latest-8-cores
# The following EC2 resource id will be used for resource releasing.
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
@@ -177,10 +178,6 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
@@ -200,10 +197,6 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
build-macos-artifacts:
name: Build macOS artifacts
@@ -245,11 +238,43 @@ jobs:
features: ${{ matrix.features }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
build-windows-artifacts:
name: Build Windows artifacts
strategy:
fail-fast: false
matrix:
include:
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
arch: x86_64-pc-windows-msvc
features: servers/dashboard
artifacts-dir-prefix: greptime-windows-amd64
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
arch: x86_64-pc-windows-msvc
features: pyo3_backend,servers/dashboard
artifacts-dir-prefix: greptime-windows-amd64-pyo3
runs-on: ${{ matrix.os }}
needs: [
allocate-runners,
]
if: ${{ inputs.build_windows_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
steps:
- run: git config --global core.autocrlf false
- uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./.github/actions/build-windows-artifacts
with:
arch: ${{ matrix.arch }}
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
cargo-profile: ${{ env.CARGO_PROFILE }}
features: ${{ matrix.features }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
release-images-to-dockerhub:
name: Build and push images to DockerHub
@@ -274,15 +299,14 @@ jobs:
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }}
release-images-to-acr:
name: Build and push images to ACR
release-cn-artifacts:
name: Release artifacts to CN region
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
release-images-to-dockerhub,
]
runs-on: ubuntu-2004-16-cores
runs-on: ubuntu-20.04
# When we push to ACR, it's easy to fail due to some unknown network issues.
# However, we don't want to fail the whole workflow because of this.
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
@@ -292,18 +316,28 @@ jobs:
with:
fetch-depth: 0
- name: Build and push images to ACR
uses: ./.github/actions/build-images
- name: Release artifacts to CN region
uses: ./.github/actions/release-cn-artifacts
with:
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: greptimedb
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
version: ${{ needs.allocate-runners.outputs.version }}
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
dev-mode: false
update-version-info: true
push-latest-tag: true
release-artifacts:
publish-github-release:
name: Create GitHub release and upload artifacts
if: ${{ inputs.release_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
if: ${{ inputs.publish_github_release || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
@@ -311,36 +345,17 @@ jobs:
build-macos-artifacts,
release-images-to-dockerhub,
]
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Release artifacts
uses: ./.github/actions/release-artifacts
- name: Publish GitHub release
uses: ./.github/actions/publish-github-release
with:
version: ${{ needs.allocate-runners.outputs.version }}
release-dev-builder-image:
name: Release dev builder image
if: ${{ inputs.release_dev_builder_image }} # Only manually trigger this job.
runs-on: ubuntu-latest-16-cores
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build and push dev builder image
uses: ./.github/actions/build-dev-builder-image
with:
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
acr-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
acr-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
acr-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
### Stop runners ###
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
@@ -348,7 +363,7 @@ jobs:
name: Stop linux-amd64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
needs: [
allocate-runners,
build-linux-amd64-artifacts,
@@ -373,7 +388,7 @@ jobs:
name: Stop linux-arm64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
needs: [
allocate-runners,
build-linux-arm64-artifacts,

1326
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -8,10 +8,11 @@ members = [
"src/cmd",
"src/common/base",
"src/common/catalog",
"src/common/config",
"src/common/datasource",
"src/common/error",
"src/common/function",
"src/common/function-macro",
"src/common/macro",
"src/common/greptimedb-telemetry",
"src/common/grpc",
"src/common/grpc-expr",
@@ -29,15 +30,16 @@ members = [
"src/common/version",
"src/datanode",
"src/datatypes",
"src/file-table-engine",
"src/file-engine",
"src/frontend",
"src/log-store",
"src/meta-client",
"src/meta-srv",
"src/mito",
"src/mito2",
"src/object-store",
"src/operator",
"src/partition",
"src/plugins",
"src/promql",
"src/query",
"src/script",
@@ -47,51 +49,61 @@ members = [
"src/storage",
"src/store-api",
"src/table",
"src/table-procedure",
"tests-integration",
"tests/runner",
]
resolver = "2"
[workspace.package]
version = "0.4.0-nightly"
version = "0.4.1"
edition = "2021"
license = "Apache-2.0"
[workspace.dependencies]
aquamarine = "0.3"
arrow = { version = "43.0" }
etcd-client = "0.11"
arrow-array = "43.0"
arrow-flight = "43.0"
arrow-schema = { version = "43.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
chrono = { version = "0.4", features = ["serde"] }
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
derive_builder = "0.12"
etcd-client = "0.11"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "4a277f27caa035a801d5b9c020a0449777736614" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "1f1dd532a111e3834cc3019c5605e2993ffb9dc3" }
humantime-serde = "1.1"
itertools = "0.10"
lazy_static = "1.4"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
metrics = "0.20"
moka = "0.12"
once_cell = "1.18"
opentelemetry-proto = { version = "0.2", features = ["gen-tonic", "metrics"] }
parquet = "43.0"
paste = "1.0"
prost = "0.11"
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
rand = "0.8"
regex = "1.8"
reqwest = { version = "0.11", default-features = false, features = [
"json",
"rustls-tls-native-roots",
"stream",
] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
smallvec = "1"
snafu = { version = "0.7", features = ["backtraces"] }
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "296a4f6c73b129d6f565a42a2e5e53c6bc2b9da4", features = [
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "6cf9d23d5b8fbecd65efc1d9afb7e80ad7a424da", features = [
"visitor",
] }
strum = { version = "0.25", features = ["derive"] }
@@ -101,8 +113,6 @@ tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.7"
tonic = { version = "0.9", features = ["tls"] }
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
metrics = "0.20"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
## workspaces members
api = { path = "src/api" }
auth = { path = "src/auth" }
@@ -111,29 +121,29 @@ client = { path = "src/client" }
cmd = { path = "src/cmd" }
common-base = { path = "src/common/base" }
common-catalog = { path = "src/common/catalog" }
common-config = { path = "src/common/config" }
common-datasource = { path = "src/common/datasource" }
common-error = { path = "src/common/error" }
common-function = { path = "src/common/function" }
common-function-macro = { path = "src/common/function-macro" }
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
common-grpc = { path = "src/common/grpc" }
common-grpc-expr = { path = "src/common/grpc-expr" }
common-macro = { path = "src/common/macro" }
common-mem-prof = { path = "src/common/mem-prof" }
common-meta = { path = "src/common/meta" }
common-pprof = { path = "src/common/pprof" }
common-procedure = { path = "src/common/procedure" }
common-procedure-test = { path = "src/common/procedure-test" }
common-pprof = { path = "src/common/pprof" }
common-query = { path = "src/common/query" }
common-recordbatch = { path = "src/common/recordbatch" }
common-runtime = { path = "src/common/runtime" }
substrait = { path = "src/common/substrait" }
common-telemetry = { path = "src/common/telemetry" }
common-test-util = { path = "src/common/test-util" }
common-time = { path = "src/common/time" }
common-version = { path = "src/common/version" }
datanode = { path = "src/datanode" }
datatypes = { path = "src/datatypes" }
file-table-engine = { path = "src/file-table-engine" }
file-engine = { path = "src/file-engine" }
frontend = { path = "src/frontend" }
log-store = { path = "src/log-store" }
meta-client = { path = "src/meta-client" }
@@ -141,7 +151,9 @@ meta-srv = { path = "src/meta-srv" }
mito = { path = "src/mito" }
mito2 = { path = "src/mito2" }
object-store = { path = "src/object-store" }
operator = { path = "src/operator" }
partition = { path = "src/partition" }
plugins = { path = "src/plugins" }
promql = { path = "src/promql" }
query = { path = "src/query" }
script = { path = "src/script" }
@@ -150,8 +162,8 @@ session = { path = "src/session" }
sql = { path = "src/sql" }
storage = { path = "src/storage" }
store-api = { path = "src/store-api" }
substrait = { path = "src/common/substrait" }
table = { path = "src/table" }
table-procedure = { path = "src/table-procedure" }
[workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git"

View File

@@ -55,11 +55,15 @@ else
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
endif
ifneq ($(strip $(CARGO_BUILD_EXTRA_OPTS)),)
CARGO_BUILD_OPTS += ${CARGO_BUILD_EXTRA_OPTS}
endif
##@ Build
.PHONY: build
build: ## Build debug version greptime.
cargo build ${CARGO_BUILD_OPTS}
cargo ${CARGO_EXTENSION} build ${CARGO_BUILD_OPTS}
.POHNY: build-by-dev-builder
build-by-dev-builder: ## Build greptime by dev-builder.
@@ -67,11 +71,34 @@ build-by-dev-builder: ## Build greptime by dev-builder.
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
make build \
CARGO_EXTENSION="${CARGO_EXTENSION}" \
CARGO_PROFILE=${CARGO_PROFILE} \
FEATURES=${FEATURES} \
TARGET_DIR=${TARGET_DIR} \
TARGET=${TARGET} \
RELEASE=${RELEASE}
RELEASE=${RELEASE} \
CARGO_BUILD_EXTRA_OPTS="${CARGO_BUILD_EXTRA_OPTS}"
.PHONY: build-android-bin
build-android-bin: ## Build greptime binary for android.
docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
make build \
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
CARGO_PROFILE=release \
FEATURES="${FEATURES}" \
TARGET_DIR="${TARGET_DIR}" \
TARGET="${TARGET}" \
RELEASE="${RELEASE}" \
CARGO_BUILD_EXTRA_OPTS="--bin greptime --no-default-features"
.PHONY: strip-android-bin
strip-android-bin: build-android-bin ## Strip greptime binary for android.
docker run --network=host \
-v ${PWD}:/greptimedb \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip /greptimedb/target/aarch64-linux-android/release/greptime'
.PHONY: clean
clean: ## Clean the project.

View File

@@ -27,6 +27,14 @@
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
</p>
## Upcoming Event
Come and meet us in **KubeCon + CloudNativeCon North America 2023!**
<p align="center">
<picture>
<img alt="KubeCon + CloudNativeCon North Logo" src="./docs/banner/KCCNC_NA_2023_1000x200_Email Banner.png" width="800px">
</picture>
</p>
## What is GreptimeDB
GreptimeDB is an open-source time-series database with a special focus on
@@ -96,11 +104,11 @@ Or if you built from docker:
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
```
Please see [the online document site](https://docs.greptime.com/getting-started/overview#install-greptimedb) for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
Please see the online document site for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
### Get started
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview#connect) on our [official document site](https://docs.greptime.com/).
Read the [complete getting started guide](https://docs.greptime.com/getting-started/try-out-greptimedb) on our [official document site](https://docs.greptime.com/).
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).

View File

@@ -6,8 +6,10 @@ license.workspace = true
[dependencies]
arrow.workspace = true
chrono.workspace = true
clap = { version = "4.0", features = ["derive"] }
client = { workspace = true }
futures-util.workspace = true
indicatif = "0.17.1"
itertools.workspace = true
parquet.workspace = true

View File

@@ -27,16 +27,16 @@ use arrow::record_batch::RecordBatch;
use clap::Parser;
use client::api::v1::column::Values;
use client::api::v1::{
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests,
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
};
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use client::{Client, Database, Output, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use futures_util::TryStreamExt;
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
use tokio::task::JoinSet;
const CATALOG_NAME: &str = "greptime";
const SCHEMA_NAME: &str = "public";
const TABLE_NAME: &str = "nyc_taxi";
#[derive(Parser)]
#[command(name = "NYC benchmark runner")]
@@ -74,7 +74,12 @@ fn get_file_list<P: AsRef<Path>>(path: P) -> Vec<PathBuf> {
.collect()
}
fn new_table_name() -> String {
format!("nyc_taxi_{}", chrono::Utc::now().timestamp())
}
async fn write_data(
table_name: &str,
batch_size: usize,
db: &Database,
path: PathBuf,
@@ -104,8 +109,7 @@ async fn write_data(
}
let (columns, row_count) = convert_record_batch(record_batch);
let request = InsertRequest {
table_name: TABLE_NAME.to_string(),
region_number: 0,
table_name: table_name.to_string(),
columns,
row_count,
};
@@ -114,7 +118,7 @@ async fn write_data(
};
let now = Instant::now();
let _ = db.insert(requests).await.unwrap();
db.insert(requests).await.unwrap();
let elapsed = now.elapsed();
total_rpc_elapsed_ms += elapsed.as_millis();
progress_bar.inc(row_count as _);
@@ -132,6 +136,11 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
let (values, datatype) = build_values(array);
let semantic_type = match field.name().as_str() {
"VendorID" => SemanticType::Tag,
"tpep_pickup_datetime" => SemanticType::Timestamp,
_ => SemanticType::Field,
};
let column = Column {
column_name: field.name().clone(),
@@ -142,8 +151,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
.unwrap_or_default(),
datatype: datatype.into(),
// datatype and semantic_type are set to default
..Default::default()
semantic_type: semantic_type as i32,
};
columns.push(column);
}
@@ -189,7 +197,7 @@ fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
let values = array.values();
(
Values {
ts_microsecond_values: values.to_vec(),
timestamp_microsecond_values: values.to_vec(),
..Default::default()
},
ColumnDataType::TimestampMicrosecond,
@@ -244,156 +252,193 @@ fn is_record_batch_full(batch: &RecordBatch) -> bool {
batch.columns().iter().all(|col| col.null_count() == 0)
}
fn create_table_expr() -> CreateTableExpr {
fn create_table_expr(table_name: &str) -> CreateTableExpr {
CreateTableExpr {
catalog_name: CATALOG_NAME.to_string(),
schema_name: SCHEMA_NAME.to_string(),
table_name: TABLE_NAME.to_string(),
table_name: table_name.to_string(),
desc: "".to_string(),
column_defs: vec![
ColumnDef {
name: "VendorID".to_string(),
datatype: ColumnDataType::Int64 as i32,
data_type: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Tag as i32,
comment: String::new(),
},
ColumnDef {
name: "tpep_pickup_datetime".to_string(),
datatype: ColumnDataType::TimestampMicrosecond as i32,
is_nullable: true,
data_type: ColumnDataType::TimestampMicrosecond as i32,
is_nullable: false,
default_constraint: vec![],
semantic_type: SemanticType::Timestamp as i32,
comment: String::new(),
},
ColumnDef {
name: "tpep_dropoff_datetime".to_string(),
datatype: ColumnDataType::TimestampMicrosecond as i32,
data_type: ColumnDataType::TimestampMicrosecond as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
},
ColumnDef {
name: "passenger_count".to_string(),
datatype: ColumnDataType::Float64 as i32,
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
},
ColumnDef {
name: "trip_distance".to_string(),
datatype: ColumnDataType::Float64 as i32,
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
},
ColumnDef {
name: "RatecodeID".to_string(),
datatype: ColumnDataType::Float64 as i32,
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
},
ColumnDef {
name: "store_and_fwd_flag".to_string(),
datatype: ColumnDataType::String as i32,
data_type: ColumnDataType::String as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
},
ColumnDef {
name: "PULocationID".to_string(),
datatype: ColumnDataType::Int64 as i32,
data_type: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
},
ColumnDef {
name: "DOLocationID".to_string(),
datatype: ColumnDataType::Int64 as i32,
data_type: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
},
ColumnDef {
name: "payment_type".to_string(),
datatype: ColumnDataType::Int64 as i32,
data_type: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
},
ColumnDef {
name: "fare_amount".to_string(),
datatype: ColumnDataType::Float64 as i32,
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
},
ColumnDef {
name: "extra".to_string(),
datatype: ColumnDataType::Float64 as i32,
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
},
ColumnDef {
name: "mta_tax".to_string(),
datatype: ColumnDataType::Float64 as i32,
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
},
ColumnDef {
name: "tip_amount".to_string(),
datatype: ColumnDataType::Float64 as i32,
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
},
ColumnDef {
name: "tolls_amount".to_string(),
datatype: ColumnDataType::Float64 as i32,
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
},
ColumnDef {
name: "improvement_surcharge".to_string(),
datatype: ColumnDataType::Float64 as i32,
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
},
ColumnDef {
name: "total_amount".to_string(),
datatype: ColumnDataType::Float64 as i32,
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
},
ColumnDef {
name: "congestion_surcharge".to_string(),
datatype: ColumnDataType::Float64 as i32,
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
},
ColumnDef {
name: "airport_fee".to_string(),
datatype: ColumnDataType::Float64 as i32,
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
},
],
time_index: "tpep_pickup_datetime".to_string(),
primary_keys: vec!["VendorID".to_string()],
create_if_not_exists: false,
create_if_not_exists: true,
table_options: Default::default(),
region_numbers: vec![0],
table_id: None,
engine: "mito".to_string(),
}
}
fn query_set() -> HashMap<String, String> {
fn query_set(table_name: &str) -> HashMap<String, String> {
HashMap::from([
(
"count_all".to_string(),
format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
format!("SELECT COUNT(*) FROM {table_name};"),
),
(
"fare_amt_by_passenger".to_string(),
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count"),
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {table_name} GROUP BY passenger_count"),
)
])
}
async fn do_write(args: &Args, db: &Database) {
async fn do_write(args: &Args, db: &Database, table_name: &str) {
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
let mut write_jobs = JoinSet::new();
let create_table_result = db.create(create_table_expr()).await;
let create_table_result = db.create(create_table_expr(table_name)).await;
println!("Create table result: {create_table_result:?}");
let progress_bar_style = ProgressStyle::with_template(
@@ -411,8 +456,10 @@ async fn do_write(args: &Args, db: &Database) {
let db = db.clone();
let mpb = multi_progress_bar.clone();
let pb_style = progress_bar_style.clone();
let _ = write_jobs
.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
let table_name = table_name.to_string();
let _ = write_jobs.spawn(async move {
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
});
}
}
while write_jobs.join_next().await.is_some() {
@@ -421,24 +468,32 @@ async fn do_write(args: &Args, db: &Database) {
let db = db.clone();
let mpb = multi_progress_bar.clone();
let pb_style = progress_bar_style.clone();
let _ = write_jobs
.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
let table_name = table_name.to_string();
let _ = write_jobs.spawn(async move {
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
});
}
}
}
async fn do_query(num_iter: usize, db: &Database) {
for (query_name, query) in query_set() {
async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
for (query_name, query) in query_set(table_name) {
println!("Running query: {query}");
for i in 0..num_iter {
let now = Instant::now();
let _res = db.sql(&query).await.unwrap();
let res = db.sql(&query).await.unwrap();
match res {
Output::AffectedRows(_) | Output::RecordBatches(_) => (),
Output::Stream(stream) => {
stream.try_collect::<Vec<_>>().await.unwrap();
}
}
let elapsed = now.elapsed();
println!(
"query {}, iteration {}: {}ms",
query_name,
i,
elapsed.as_millis()
elapsed.as_millis(),
);
}
}
@@ -455,13 +510,14 @@ fn main() {
.block_on(async {
let client = Client::with_urls(vec![&args.endpoint]);
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
let table_name = new_table_name();
if !args.skip_write {
do_write(&args, &db).await;
do_write(&args, &db, &table_name).await;
}
if !args.skip_read {
do_query(args.iter_num, &db).await;
do_query(args.iter_num, &db, &table_name).await;
}
})
}

View File

@@ -1,7 +1,5 @@
# Node running mode, see `standalone.example.toml`.
mode = "distributed"
# Whether to use in-memory catalog, see `standalone.example.toml`.
enable_memory_catalog = false
# The datanode identifier, should be unique.
node_id = 42
# gRPC server address, "127.0.0.1:3001" by default.
@@ -10,19 +8,24 @@ rpc_addr = "127.0.0.1:3001"
rpc_hostname = "127.0.0.1"
# The number of gRPC server worker threads, 8 by default.
rpc_runtime_size = 8
# Start services after regions have obtained leases.
# It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
require_lease_before_startup = false
[heartbeat]
# Interval for sending heartbeat messages to the Metasrv in milliseconds, 5000 by default.
interval_millis = 5000
# Interval for sending heartbeat messages to the Metasrv, 3 seconds by default.
interval = "3s"
# Metasrv client options.
[meta_client_options]
[meta_client]
# Metasrv address list.
metasrv_addrs = ["127.0.0.1:3002"]
# Operation timeout in milliseconds, 3000 by default.
timeout_millis = 3000
# Connect server timeout in milliseconds, 5000 by default.
connect_timeout_millis = 5000
# Heartbeat timeout, 500 milliseconds by default.
heartbeat_timeout = "500ms"
# Operation timeout, 3 seconds by default.
timeout = "3s"
# Connect server timeout, 1 second by default.
connect_timeout = "1s"
# `TCP_NODELAY` option for accepted connections, true by default.
tcp_nodelay = true
@@ -44,6 +47,12 @@ type = "File"
# TTL for all tables. Disabled by default.
# global_ttl = "7d"
# Cache configuration for object storage such as 'S3' etc.
# The local file cache directory
# cache_path = "/path/local_cache"
# The local file cache capacity in bytes.
# cache_capacity = "256MB"
# Compaction options, see `standalone.example.toml`.
[storage.compaction]
max_inflight_tasks = 4
@@ -71,10 +80,27 @@ auto_flush_interval = "1h"
# Global write buffer size for all regions.
global_write_buffer_size = "1GB"
# Procedure storage options, see `standalone.example.toml`.
[procedure]
max_retry_times = 3
retry_delay = "500ms"
# Mito engine options
[[region_engine]]
[region_engine.mito]
# Number of region workers
num_workers = 8
# Request channel size of each worker
worker_channel_size = 128
# Max batch size for a worker to handle requests
worker_request_batch_size = 64
# Number of meta action updated to trigger a new checkpoint for the manifest
manifest_checkpoint_distance = 10
# Manifest compression type
manifest_compress_type = "Uncompressed"
# Max number of running background jobs
max_background_jobs = 4
# Interval to auto flush a region if it has not flushed yet.
auto_flush_interval = "1h"
# Global write buffer size for all regions.
global_write_buffer_size = "1GB"
# Global write buffer size threshold to reject write requests (default 2G).
global_write_buffer_reject_size = "2GB"
# Log options
# [logging]

View File

@@ -2,64 +2,67 @@
mode = "distributed"
[heartbeat]
# Interval for sending heartbeat task to the Metasrv in milliseconds, 5000 by default.
interval_millis = 5000
# Interval for retry sending heartbeat task in milliseconds, 5000 by default.
retry_interval_millis = 5000
# Interval for sending heartbeat task to the Metasrv, 5 seconds by default.
interval = "5s"
# Interval for retry sending heartbeat task, 5 seconds by default.
retry_interval = "5s"
# HTTP server options, see `standalone.example.toml`.
[http_options]
[http]
addr = "127.0.0.1:4000"
timeout = "30s"
body_limit = "64MB"
# gRPC server options, see `standalone.example.toml`.
[grpc_options]
[grpc]
addr = "127.0.0.1:4001"
runtime_size = 8
# MySQL server options, see `standalone.example.toml`.
[mysql_options]
[mysql]
enable = true
addr = "127.0.0.1:4002"
runtime_size = 2
# MySQL server TLS options, see `standalone.example.toml`.
[mysql_options.tls]
[mysql.tls]
mode = "disable"
cert_path = ""
key_path = ""
# PostgresSQL server options, see `standalone.example.toml`.
[postgres_options]
[postgres]
enable = true
addr = "127.0.0.1:4003"
runtime_size = 2
# PostgresSQL server TLS options, see `standalone.example.toml`.
[postgres_options.tls]
[postgres.tls]
mode = "disable"
cert_path = ""
key_path = ""
# OpenTSDB protocol options, see `standalone.example.toml`.
[opentsdb_options]
[opentsdb]
enable = true
addr = "127.0.0.1:4242"
runtime_size = 2
# InfluxDB protocol options, see `standalone.example.toml`.
[influxdb_options]
[influxdb]
enable = true
# Prometheus remote storage options, see `standalone.example.toml`.
[prom_store_options]
[prom_store]
enable = true
# Metasrv client options, see `datanode.example.toml`.
[meta_client_options]
[meta_client]
metasrv_addrs = ["127.0.0.1:3002"]
timeout_millis = 3000
timeout = "3s"
# DDL timeouts options.
ddl_timeout_millis = 10000
connect_timeout_millis = 5000
ddl_timeout = "10s"
connect_timeout = "1s"
tcp_nodelay = true
# Log options, see `standalone.example.toml`

View File

@@ -6,8 +6,6 @@ bind_addr = "127.0.0.1:3002"
server_addr = "127.0.0.1:3002"
# Etcd server address, "127.0.0.1:2379" by default.
store_addr = "127.0.0.1:2379"
# Datanode lease in seconds, 15 seconds by default.
datanode_lease_secs = 15
# Datanode selector type.
# - "LeaseBased" (default value).
# - "LoadBased"
@@ -34,6 +32,6 @@ retry_delay = "500ms"
# [datanode]
# # Datanode client options.
# [datanode.client_options]
# timeout_millis = 10000
# connect_timeout_millis = 10000
# timeout = "10s"
# connect_timeout = "10s"
# tcp_nodelay = true

View File

@@ -1,12 +1,10 @@
# Node running mode, "standalone" or "distributed".
mode = "standalone"
# Whether to use in-memory catalog, `false` by default.
enable_memory_catalog = false
# Whether to enable greptimedb telemetry, true by default.
enable_telemetry = true
# HTTP server options.
[http_options]
[http]
# Server address, "127.0.0.1:4000" by default.
addr = "127.0.0.1:4000"
# HTTP request timeout, 30s by default.
@@ -16,21 +14,23 @@ timeout = "30s"
body_limit = "64MB"
# gRPC server options.
[grpc_options]
[grpc]
# Server address, "127.0.0.1:4001" by default.
addr = "127.0.0.1:4001"
# The number of server worker threads, 8 by default.
runtime_size = 8
# MySQL server options.
[mysql_options]
[mysql]
# Whether to enable
enable = true
# Server address, "127.0.0.1:4002" by default.
addr = "127.0.0.1:4002"
# The number of server worker threads, 2 by default.
runtime_size = 2
# MySQL server TLS options.
[mysql_options.tls]
[mysql.tls]
# TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
# - "disable" (default value)
# - "prefer"
@@ -44,14 +44,16 @@ cert_path = ""
key_path = ""
# PostgresSQL server options.
[postgres_options]
[postgres]
# Whether to enable
enable = true
# Server address, "127.0.0.1:4003" by default.
addr = "127.0.0.1:4003"
# The number of server worker threads, 2 by default.
runtime_size = 2
# PostgresSQL server TLS options, see `[mysql_options.tls]` section.
[postgres_options.tls]
[postgres.tls]
# TLS mode.
mode = "disable"
# certificate file path.
@@ -60,19 +62,21 @@ cert_path = ""
key_path = ""
# OpenTSDB protocol options.
[opentsdb_options]
[opentsdb]
# Whether to enable
enable = true
# OpenTSDB telnet API server address, "127.0.0.1:4242" by default.
addr = "127.0.0.1:4242"
# The number of server worker threads, 2 by default.
runtime_size = 2
# InfluxDB protocol options.
[influxdb_options]
[influxdb]
# Whether to enable InfluxDB protocol in HTTP API, true by default.
enable = true
# Prometheus remote storage options
[prom_store_options]
[prom_store]
# Whether to enable Prometheus remote write and read in HTTP API, true by default.
enable = true
@@ -91,6 +95,20 @@ read_batch_size = 128
# Whether to sync log file after every write.
sync_write = false
# Metadata storage options.
[metadata_store]
# Kv file size in bytes.
file_size = "256MB"
# Kv purge threshold.
purge_threshold = "4GB"
# Procedure storage options.
[procedure]
# Procedure max retry time.
max_retry_times = 3
# Initial retry delay of procedures, increases exponentially
retry_delay = "500ms"
# Storage options.
[storage]
# The working home directory.
@@ -99,6 +117,10 @@ data_home = "/tmp/greptimedb/"
type = "File"
# TTL for all tables. Disabled by default.
# global_ttl = "7d"
# Cache configuration for object storage such as 'S3' etc.
# cache_path = "/path/local_cache"
# The local file cache capacity in bytes.
# cache_capacity = "256MB"
# Compaction options.
[storage.compaction]
@@ -130,13 +152,6 @@ auto_flush_interval = "1h"
# Global write buffer size for all regions.
global_write_buffer_size = "1GB"
# Procedure storage options.
[procedure]
# Procedure max retry time.
max_retry_times = 3
# Initial retry delay of procedures, increases exponentially
retry_delay = "500ms"
# Log options
# [logging]
# Specify logs directory.

View File

@@ -1,4 +1,4 @@
FROM ubuntu:22.04 as builder
FROM ubuntu:20.04 as builder
ARG CARGO_PROFILE
ARG FEATURES
@@ -7,6 +7,11 @@ ARG OUTPUT_DIR
ENV LANG en_US.utf8
WORKDIR /greptimedb
# Add PPA for Python 3.10.
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
add-apt-repository ppa:deadsnakes/ppa -y
# Install dependencies.
RUN --mount=type=cache,target=/var/cache/apt \
apt-get update && apt-get install -y \

View File

@@ -0,0 +1,41 @@
FROM --platform=linux/amd64 saschpe/android-ndk:34-jdk17.0.8_7-ndk25.2.9519653-cmake3.22.1
ENV LANG en_US.utf8
WORKDIR /greptimedb
# Rename libunwind to libgcc
RUN cp ${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/14.0.7/lib/linux/aarch64/libunwind.a ${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/14.0.7/lib/linux/aarch64/libgcc.a
# Install dependencies.
RUN apt-get update && apt-get install -y \
libssl-dev \
protobuf-compiler \
curl \
git \
build-essential \
pkg-config \
python3 \
python3-dev \
python3-pip \
&& pip3 install --upgrade pip \
&& pip3 install pyarrow
# Trust workdir
RUN git config --global --add safe.directory /greptimedb
# Install Rust.
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /root/.cargo/bin/:$PATH
# Add android toolchains
ARG RUST_TOOLCHAIN
RUN rustup toolchain install ${RUST_TOOLCHAIN}
RUN rustup target add aarch64-linux-android
# Install cargo-ndk
RUN cargo install cargo-ndk
ENV ANDROID_NDK_HOME $NDK_ROOT
# Builder entrypoint.
CMD ["cargo", "ndk", "--platform", "23", "-t", "aarch64-linux-android", "build", "--bin", "greptime", "--profile", "release", "--no-default-features"]

View File

@@ -1,8 +1,13 @@
FROM ubuntu:22.04
FROM ubuntu:20.04
ENV LANG en_US.utf8
WORKDIR /greptimedb
# Add PPA for Python 3.10.
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
add-apt-repository ppa:deadsnakes/ppa -y
# Install dependencies.
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
libssl-dev \

Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

View File

@@ -0,0 +1,61 @@
# TSBS benchmark - v0.4.0
## Environment
### Local
| | |
| ------ | ---------------------------------- |
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
| Memory | 32GB |
| Disk | SOLIDIGM SSDPFKNU010TZ |
| OS | Ubuntu 22.04.2 LTS |
### Aliyun amd64
| | |
| ------- | -------------- |
| Machine | ecs.g7.4xlarge |
| CPU | 16 core |
| Memory | 64GB |
| Disk | 100G |
| OS | Ubuntu 22.04 |
### Aliyun arm64
| | |
| ------- | ----------------- |
| Machine | ecs.g8y.4xlarge |
| CPU | 16 core |
| Memory | 64GB |
| Disk | 100G |
| OS | Ubuntu 22.04 ARM |
## Write performance
| Environment | Ingest raterows/s |
| ------------------ | --------------------- |
| Local | 365280.60 |
| Aliyun g7.4xlarge | 341368.72 |
| Aliyun g8y.4xlarge | 320907.29 |
## Query performance
| Query type | Local (ms) | Aliyun g7.4xlarge (ms) | Aliyun g8y.4xlarge (ms) |
| --------------------- | ---------- | ---------------------- | ----------------------- |
| cpu-max-all-1 | 50.70 | 31.46 | 47.61 |
| cpu-max-all-8 | 262.16 | 129.26 | 152.43 |
| double-groupby-1 | 2512.71 | 1408.19 | 1586.10 |
| double-groupby-5 | 3896.15 | 2304.29 | 2585.29 |
| double-groupby-all | 5404.67 | 3337.61 | 3773.91 |
| groupby-orderby-limit | 3786.98 | 2065.72 | 2312.57 |
| high-cpu-1 | 71.96 | 37.29 | 54.01 |
| high-cpu-all | 9468.75 | 7595.69 | 8467.46 |
| lastpoint | 13379.43 | 11253.76 | 12949.40 |
| single-groupby-1-1-1 | 20.72 | 12.16 | 13.35 |
| single-groupby-1-1-12 | 28.53 | 15.67 | 21.62 |
| single-groupby-1-8-1 | 72.23 | 37.90 | 43.52 |
| single-groupby-5-1-1 | 26.75 | 15.59 | 17.48 |
| single-groupby-5-1-12 | 45.41 | 22.90 | 31.96 |
| single-groupby-5-8-1 | 107.96 | 59.76 | 69.58 |

View File

@@ -7,6 +7,7 @@ license.workspace = true
[dependencies]
common-base = { workspace = true }
common-error = { workspace = true }
common-macro = { workspace = true }
common-time = { workspace = true }
datatypes = { workspace = true }
greptime-proto.workspace = true

View File

@@ -16,14 +16,16 @@ use std::any::Any;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use datatypes::prelude::ConcreteDataType;
use snafu::prelude::*;
use snafu::Location;
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Unknown proto column datatype: {}", datatype))]
UnknownColumnDataType { datatype: i32, location: Location },
@@ -34,22 +36,14 @@ pub enum Error {
location: Location,
},
#[snafu(display(
"Failed to convert column default constraint, column: {}, source: {}",
column,
source
))]
#[snafu(display("Failed to convert column default constraint, column: {}", column))]
ConvertColumnDefaultConstraint {
column: String,
location: Location,
source: datatypes::error::Error,
},
#[snafu(display(
"Invalid column default constraint, column: {}, source: {}",
column,
source
))]
#[snafu(display("Invalid column default constraint, column: {}", column))]
InvalidColumnDefaultConstraint {
column: String,
location: Location,

View File

@@ -18,32 +18,32 @@ use common_base::BitVec;
use common_time::interval::IntervalUnit;
use common_time::time::Time;
use common_time::timestamp::TimeUnit;
use common_time::{Date, DateTime, Interval, Timestamp};
use common_time::{Date, DateTime, Duration, Interval, Timestamp};
use datatypes::prelude::{ConcreteDataType, ValueRef};
use datatypes::scalars::ScalarVector;
use datatypes::types::{
Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type,
DurationType, Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type,
};
use datatypes::value::{OrderedF32, OrderedF64, Value};
use datatypes::vectors::{
BinaryVector, BooleanVector, DateTimeVector, DateVector, Float32Vector, Float64Vector,
Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
BinaryVector, BooleanVector, DateTimeVector, DateVector, DurationMicrosecondVector,
DurationMillisecondVector, DurationNanosecondVector, DurationSecondVector, Float32Vector,
Float64Vector, Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
IntervalYearMonthVector, PrimitiveVector, StringVector, TimeMicrosecondVector,
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
UInt64Vector, VectorRef,
};
use greptime_proto::v1;
use greptime_proto::v1::ddl_request::Expr;
use greptime_proto::v1::greptime_request::Request;
use greptime_proto::v1::query_request::Query;
use greptime_proto::v1::value::ValueData;
use greptime_proto::v1::{DdlRequest, IntervalMonthDayNano, QueryRequest, SemanticType};
use greptime_proto::v1::{self, DdlRequest, IntervalMonthDayNano, QueryRequest, Row, SemanticType};
use snafu::prelude::*;
use crate::error::{self, Result};
use crate::v1::column::Values;
use crate::v1::{Column, ColumnDataType};
use crate::v1::{Column, ColumnDataType, Value as GrpcValue};
#[derive(Debug, PartialEq, Eq)]
pub struct ColumnDataTypeWrapper(ColumnDataType);
@@ -101,6 +101,14 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
ColumnDataType::IntervalMonthDayNano => {
ConcreteDataType::interval_month_day_nano_datatype()
}
ColumnDataType::DurationSecond => ConcreteDataType::duration_second_datatype(),
ColumnDataType::DurationMillisecond => {
ConcreteDataType::duration_millisecond_datatype()
}
ColumnDataType::DurationMicrosecond => {
ConcreteDataType::duration_microsecond_datatype()
}
ColumnDataType::DurationNanosecond => ConcreteDataType::duration_nanosecond_datatype(),
}
}
}
@@ -142,6 +150,12 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
IntervalType::DayTime(_) => ColumnDataType::IntervalDayTime,
IntervalType::MonthDayNano(_) => ColumnDataType::IntervalMonthDayNano,
},
ConcreteDataType::Duration(d) => match d {
DurationType::Second(_) => ColumnDataType::DurationSecond,
DurationType::Millisecond(_) => ColumnDataType::DurationMillisecond,
DurationType::Microsecond(_) => ColumnDataType::DurationMicrosecond,
DurationType::Nanosecond(_) => ColumnDataType::DurationNanosecond,
},
ConcreteDataType::Null(_)
| ConcreteDataType::List(_)
| ConcreteDataType::Dictionary(_) => {
@@ -215,19 +229,19 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
..Default::default()
},
ColumnDataType::TimestampSecond => Values {
ts_second_values: Vec::with_capacity(capacity),
timestamp_second_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::TimestampMillisecond => Values {
ts_millisecond_values: Vec::with_capacity(capacity),
timestamp_millisecond_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::TimestampMicrosecond => Values {
ts_microsecond_values: Vec::with_capacity(capacity),
timestamp_microsecond_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::TimestampNanosecond => Values {
ts_nanosecond_values: Vec::with_capacity(capacity),
timestamp_nanosecond_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::TimeSecond => Values {
@@ -258,6 +272,22 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
interval_month_day_nano_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::DurationSecond => Values {
duration_second_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::DurationMillisecond => Values {
duration_millisecond_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::DurationMicrosecond => Values {
duration_microsecond_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::DurationNanosecond => Values {
duration_nanosecond_values: Vec::with_capacity(capacity),
..Default::default()
},
}
}
@@ -287,10 +317,10 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
Value::Date(val) => values.date_values.push(val.val()),
Value::DateTime(val) => values.datetime_values.push(val.val()),
Value::Timestamp(val) => match val.unit() {
TimeUnit::Second => values.ts_second_values.push(val.value()),
TimeUnit::Millisecond => values.ts_millisecond_values.push(val.value()),
TimeUnit::Microsecond => values.ts_microsecond_values.push(val.value()),
TimeUnit::Nanosecond => values.ts_nanosecond_values.push(val.value()),
TimeUnit::Second => values.timestamp_second_values.push(val.value()),
TimeUnit::Millisecond => values.timestamp_millisecond_values.push(val.value()),
TimeUnit::Microsecond => values.timestamp_microsecond_values.push(val.value()),
TimeUnit::Nanosecond => values.timestamp_nanosecond_values.push(val.value()),
},
Value::Time(val) => match val.unit() {
TimeUnit::Second => values.time_second_values.push(val.value()),
@@ -305,6 +335,12 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
.interval_month_day_nano_values
.push(convert_i128_to_interval(val.to_i128())),
},
Value::Duration(val) => match val.unit() {
TimeUnit::Second => values.duration_second_values.push(val.value()),
TimeUnit::Millisecond => values.duration_millisecond_values.push(val.value()),
TimeUnit::Microsecond => values.duration_microsecond_values.push(val.value()),
TimeUnit::Nanosecond => values.duration_nanosecond_values.push(val.value()),
},
Value::List(_) => unreachable!(),
});
column.null_mask = null_mask.into_vec();
@@ -339,8 +375,6 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
Some(Expr::CreateTable(_)) => "ddl.create_table",
Some(Expr::Alter(_)) => "ddl.alter",
Some(Expr::DropTable(_)) => "ddl.drop_table",
Some(Expr::FlushTable(_)) => "ddl.flush_table",
Some(Expr::CompactTable(_)) => "ddl.compact_table",
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
None => "ddl.empty",
}
@@ -378,10 +412,16 @@ pub fn pb_value_to_value_ref(value: &v1::Value) -> ValueRef {
ValueData::StringValue(string) => ValueRef::String(string.as_str()),
ValueData::DateValue(d) => ValueRef::Date(Date::from(*d)),
ValueData::DatetimeValue(d) => ValueRef::DateTime(DateTime::new(*d)),
ValueData::TsSecondValue(t) => ValueRef::Timestamp(Timestamp::new_second(*t)),
ValueData::TsMillisecondValue(t) => ValueRef::Timestamp(Timestamp::new_millisecond(*t)),
ValueData::TsMicrosecondValue(t) => ValueRef::Timestamp(Timestamp::new_microsecond(*t)),
ValueData::TsNanosecondValue(t) => ValueRef::Timestamp(Timestamp::new_nanosecond(*t)),
ValueData::TimestampSecondValue(t) => ValueRef::Timestamp(Timestamp::new_second(*t)),
ValueData::TimestampMillisecondValue(t) => {
ValueRef::Timestamp(Timestamp::new_millisecond(*t))
}
ValueData::TimestampMicrosecondValue(t) => {
ValueRef::Timestamp(Timestamp::new_microsecond(*t))
}
ValueData::TimestampNanosecondValue(t) => {
ValueRef::Timestamp(Timestamp::new_nanosecond(*t))
}
ValueData::TimeSecondValue(t) => ValueRef::Time(Time::new_second(*t)),
ValueData::TimeMillisecondValue(t) => ValueRef::Time(Time::new_millisecond(*t)),
ValueData::TimeMicrosecondValue(t) => ValueRef::Time(Time::new_microsecond(*t)),
@@ -392,6 +432,10 @@ pub fn pb_value_to_value_ref(value: &v1::Value) -> ValueRef {
let interval = Interval::from_month_day_nano(v.months, v.days, v.nanoseconds);
ValueRef::Interval(interval)
}
ValueData::DurationSecondValue(v) => ValueRef::Duration(Duration::new_second(*v)),
ValueData::DurationMillisecondValue(v) => ValueRef::Duration(Duration::new_millisecond(*v)),
ValueData::DurationMicrosecondValue(v) => ValueRef::Duration(Duration::new_microsecond(*v)),
ValueData::DurationNanosecondValue(v) => ValueRef::Duration(Duration::new_nanosecond(*v)),
}
}
@@ -421,17 +465,17 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
ConcreteDataType::Date(_) => Arc::new(DateVector::from_vec(values.date_values)),
ConcreteDataType::DateTime(_) => Arc::new(DateTimeVector::from_vec(values.datetime_values)),
ConcreteDataType::Timestamp(unit) => match unit {
TimestampType::Second(_) => {
Arc::new(TimestampSecondVector::from_vec(values.ts_second_values))
}
TimestampType::Second(_) => Arc::new(TimestampSecondVector::from_vec(
values.timestamp_second_values,
)),
TimestampType::Millisecond(_) => Arc::new(TimestampMillisecondVector::from_vec(
values.ts_millisecond_values,
values.timestamp_millisecond_values,
)),
TimestampType::Microsecond(_) => Arc::new(TimestampMicrosecondVector::from_vec(
values.ts_microsecond_values,
values.timestamp_microsecond_values,
)),
TimestampType::Nanosecond(_) => Arc::new(TimestampNanosecondVector::from_vec(
values.ts_nanosecond_values,
values.timestamp_nanosecond_values,
)),
},
ConcreteDataType::Time(unit) => match unit {
@@ -464,6 +508,20 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
))
}
},
ConcreteDataType::Duration(unit) => match unit {
DurationType::Second(_) => Arc::new(DurationSecondVector::from_vec(
values.duration_second_values,
)),
DurationType::Millisecond(_) => Arc::new(DurationMillisecondVector::from_vec(
values.duration_millisecond_values,
)),
DurationType::Microsecond(_) => Arc::new(DurationMicrosecondVector::from_vec(
values.duration_microsecond_values,
)),
DurationType::Nanosecond(_) => Arc::new(DurationNanosecondVector::from_vec(
values.duration_nanosecond_values,
)),
},
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
unreachable!()
}
@@ -553,22 +611,22 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
.map(|v| Value::Date(v.into()))
.collect(),
ConcreteDataType::Timestamp(TimestampType::Second(_)) => values
.ts_second_values
.timestamp_second_values
.into_iter()
.map(|v| Value::Timestamp(Timestamp::new_second(v)))
.collect(),
ConcreteDataType::Timestamp(TimestampType::Millisecond(_)) => values
.ts_millisecond_values
.timestamp_millisecond_values
.into_iter()
.map(|v| Value::Timestamp(Timestamp::new_millisecond(v)))
.collect(),
ConcreteDataType::Timestamp(TimestampType::Microsecond(_)) => values
.ts_microsecond_values
.timestamp_microsecond_values
.into_iter()
.map(|v| Value::Timestamp(Timestamp::new_microsecond(v)))
.collect(),
ConcreteDataType::Timestamp(TimestampType::Nanosecond(_)) => values
.ts_nanosecond_values
.timestamp_nanosecond_values
.into_iter()
.map(|v| Value::Timestamp(Timestamp::new_nanosecond(v)))
.collect(),
@@ -614,6 +672,26 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
))
})
.collect(),
ConcreteDataType::Duration(DurationType::Second(_)) => values
.duration_second_values
.into_iter()
.map(|v| Value::Duration(Duration::new_second(v)))
.collect(),
ConcreteDataType::Duration(DurationType::Millisecond(_)) => values
.duration_millisecond_values
.into_iter()
.map(|v| Value::Duration(Duration::new_millisecond(v)))
.collect(),
ConcreteDataType::Duration(DurationType::Microsecond(_)) => values
.duration_microsecond_values
.into_iter()
.map(|v| Value::Duration(Duration::new_microsecond(v)))
.collect(),
ConcreteDataType::Duration(DurationType::Nanosecond(_)) => values
.duration_nanosecond_values
.into_iter()
.map(|v| Value::Duration(Duration::new_nanosecond(v)))
.collect(),
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
unreachable!()
}
@@ -685,16 +763,16 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
},
Value::Timestamp(v) => match v.unit() {
TimeUnit::Second => v1::Value {
value_data: Some(ValueData::TsSecondValue(v.value())),
value_data: Some(ValueData::TimestampSecondValue(v.value())),
},
TimeUnit::Millisecond => v1::Value {
value_data: Some(ValueData::TsMillisecondValue(v.value())),
value_data: Some(ValueData::TimestampMillisecondValue(v.value())),
},
TimeUnit::Microsecond => v1::Value {
value_data: Some(ValueData::TsMicrosecondValue(v.value())),
value_data: Some(ValueData::TimestampMicrosecondValue(v.value())),
},
TimeUnit::Nanosecond => v1::Value {
value_data: Some(ValueData::TsNanosecondValue(v.value())),
value_data: Some(ValueData::TimestampNanosecondValue(v.value())),
},
},
Value::Time(v) => match v.unit() {
@@ -724,6 +802,20 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
)),
},
},
Value::Duration(v) => match v.unit() {
TimeUnit::Second => v1::Value {
value_data: Some(ValueData::DurationSecondValue(v.value())),
},
TimeUnit::Millisecond => v1::Value {
value_data: Some(ValueData::DurationMillisecondValue(v.value())),
},
TimeUnit::Microsecond => v1::Value {
value_data: Some(ValueData::DurationMicrosecondValue(v.value())),
},
TimeUnit::Nanosecond => v1::Value {
value_data: Some(ValueData::DurationNanosecondValue(v.value())),
},
},
Value::List(_) => return None,
};
@@ -750,10 +842,10 @@ pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
ValueData::StringValue(_) => ColumnDataType::String,
ValueData::DateValue(_) => ColumnDataType::Date,
ValueData::DatetimeValue(_) => ColumnDataType::Datetime,
ValueData::TsSecondValue(_) => ColumnDataType::TimestampSecond,
ValueData::TsMillisecondValue(_) => ColumnDataType::TimestampMillisecond,
ValueData::TsMicrosecondValue(_) => ColumnDataType::TimestampMicrosecond,
ValueData::TsNanosecondValue(_) => ColumnDataType::TimestampNanosecond,
ValueData::TimestampSecondValue(_) => ColumnDataType::TimestampSecond,
ValueData::TimestampMillisecondValue(_) => ColumnDataType::TimestampMillisecond,
ValueData::TimestampMicrosecondValue(_) => ColumnDataType::TimestampMicrosecond,
ValueData::TimestampNanosecondValue(_) => ColumnDataType::TimestampNanosecond,
ValueData::TimeSecondValue(_) => ColumnDataType::TimeSecond,
ValueData::TimeMillisecondValue(_) => ColumnDataType::TimeMillisecond,
ValueData::TimeMicrosecondValue(_) => ColumnDataType::TimeMicrosecond,
@@ -761,6 +853,10 @@ pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
ValueData::IntervalYearMonthValues(_) => ColumnDataType::IntervalYearMonth,
ValueData::IntervalDayTimeValues(_) => ColumnDataType::IntervalDayTime,
ValueData::IntervalMonthDayNanoValues(_) => ColumnDataType::IntervalMonthDayNano,
ValueData::DurationSecondValue(_) => ColumnDataType::DurationSecond,
ValueData::DurationMillisecondValue(_) => ColumnDataType::DurationMillisecond,
ValueData::DurationMicrosecondValue(_) => ColumnDataType::DurationMicrosecond,
ValueData::DurationNanosecondValue(_) => ColumnDataType::DurationNanosecond,
};
Some(value_type)
}
@@ -797,15 +893,92 @@ pub fn to_column_data_type(data_type: &ConcreteDataType) -> Option<ColumnDataTyp
ConcreteDataType::Time(TimeType::Millisecond(_)) => ColumnDataType::TimeMillisecond,
ConcreteDataType::Time(TimeType::Microsecond(_)) => ColumnDataType::TimeMicrosecond,
ConcreteDataType::Time(TimeType::Nanosecond(_)) => ColumnDataType::TimeNanosecond,
ConcreteDataType::Null(_)
| ConcreteDataType::Interval(_)
| ConcreteDataType::List(_)
| ConcreteDataType::Dictionary(_) => return None,
ConcreteDataType::Duration(DurationType::Second(_)) => ColumnDataType::DurationSecond,
ConcreteDataType::Duration(DurationType::Millisecond(_)) => {
ColumnDataType::DurationMillisecond
}
ConcreteDataType::Duration(DurationType::Microsecond(_)) => {
ColumnDataType::DurationMicrosecond
}
ConcreteDataType::Duration(DurationType::Nanosecond(_)) => {
ColumnDataType::DurationNanosecond
}
ConcreteDataType::Interval(IntervalType::YearMonth(_)) => ColumnDataType::IntervalYearMonth,
ConcreteDataType::Interval(IntervalType::MonthDayNano(_)) => {
ColumnDataType::IntervalMonthDayNano
}
ConcreteDataType::Interval(IntervalType::DayTime(_)) => ColumnDataType::IntervalDayTime,
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
return None
}
};
Some(column_data_type)
}
pub fn vectors_to_rows<'a>(
columns: impl Iterator<Item = &'a VectorRef>,
row_count: usize,
) -> Vec<Row> {
let mut rows = vec![Row { values: vec![] }; row_count];
for column in columns {
for (row_index, row) in rows.iter_mut().enumerate() {
row.values.push(value_to_grpc_value(column.get(row_index)))
}
}
rows
}
pub fn value_to_grpc_value(value: Value) -> GrpcValue {
GrpcValue {
value_data: match value {
Value::Null => None,
Value::Boolean(v) => Some(ValueData::BoolValue(v)),
Value::UInt8(v) => Some(ValueData::U8Value(v as _)),
Value::UInt16(v) => Some(ValueData::U16Value(v as _)),
Value::UInt32(v) => Some(ValueData::U32Value(v)),
Value::UInt64(v) => Some(ValueData::U64Value(v)),
Value::Int8(v) => Some(ValueData::I8Value(v as _)),
Value::Int16(v) => Some(ValueData::I16Value(v as _)),
Value::Int32(v) => Some(ValueData::I32Value(v)),
Value::Int64(v) => Some(ValueData::I64Value(v)),
Value::Float32(v) => Some(ValueData::F32Value(*v)),
Value::Float64(v) => Some(ValueData::F64Value(*v)),
Value::String(v) => Some(ValueData::StringValue(v.as_utf8().to_string())),
Value::Binary(v) => Some(ValueData::BinaryValue(v.to_vec())),
Value::Date(v) => Some(ValueData::DateValue(v.val())),
Value::DateTime(v) => Some(ValueData::DatetimeValue(v.val())),
Value::Timestamp(v) => Some(match v.unit() {
TimeUnit::Second => ValueData::TimestampSecondValue(v.value()),
TimeUnit::Millisecond => ValueData::TimestampMillisecondValue(v.value()),
TimeUnit::Microsecond => ValueData::TimestampMicrosecondValue(v.value()),
TimeUnit::Nanosecond => ValueData::TimestampNanosecondValue(v.value()),
}),
Value::Time(v) => Some(match v.unit() {
TimeUnit::Second => ValueData::TimeSecondValue(v.value()),
TimeUnit::Millisecond => ValueData::TimeMillisecondValue(v.value()),
TimeUnit::Microsecond => ValueData::TimeMicrosecondValue(v.value()),
TimeUnit::Nanosecond => ValueData::TimeNanosecondValue(v.value()),
}),
Value::Interval(v) => Some(match v.unit() {
IntervalUnit::YearMonth => ValueData::IntervalYearMonthValues(v.to_i32()),
IntervalUnit::DayTime => ValueData::IntervalDayTimeValues(v.to_i64()),
IntervalUnit::MonthDayNano => {
ValueData::IntervalMonthDayNanoValues(convert_i128_to_interval(v.to_i128()))
}
}),
Value::Duration(v) => Some(match v.unit() {
TimeUnit::Second => ValueData::DurationSecondValue(v.value()),
TimeUnit::Millisecond => ValueData::DurationMillisecondValue(v.value()),
TimeUnit::Microsecond => ValueData::DurationMicrosecondValue(v.value()),
TimeUnit::Nanosecond => ValueData::DurationNanosecondValue(v.value()),
}),
Value::List(_) => unreachable!(),
},
}
}
/// Returns true if the column type is equal to expected type.
fn is_column_type_eq(column_type: ColumnDataType, expect_type: &ConcreteDataType) -> bool {
if let Some(expect) = to_column_data_type(expect_type) {
@@ -820,14 +993,16 @@ mod tests {
use std::sync::Arc;
use datatypes::types::{
IntervalDayTimeType, IntervalMonthDayNanoType, IntervalYearMonthType, TimeMillisecondType,
TimeSecondType, TimestampMillisecondType, TimestampSecondType,
DurationMillisecondType, DurationSecondType, Int32Type, IntervalDayTimeType,
IntervalMonthDayNanoType, IntervalYearMonthType, TimeMillisecondType, TimeSecondType,
TimestampMillisecondType, TimestampSecondType, UInt32Type,
};
use datatypes::vectors::{
BooleanVector, IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector,
TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector,
TimestampMicrosecondVector, TimestampMillisecondVector, TimestampNanosecondVector,
TimestampSecondVector, Vector,
BooleanVector, DurationMicrosecondVector, DurationMillisecondVector,
DurationNanosecondVector, DurationSecondVector, IntervalDayTimeVector,
IntervalMonthDayNanoVector, IntervalYearMonthVector, TimeMicrosecondVector,
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, Vector,
};
use paste::paste;
@@ -888,7 +1063,7 @@ mod tests {
assert_eq!(2, values.capacity());
let values = values_with_capacity(ColumnDataType::TimestampMillisecond, 2);
let values = values.ts_millisecond_values;
let values = values.timestamp_millisecond_values;
assert_eq!(2, values.capacity());
let values = values_with_capacity(ColumnDataType::TimeMillisecond, 2);
@@ -902,6 +1077,10 @@ mod tests {
let values = values_with_capacity(ColumnDataType::IntervalMonthDayNano, 2);
let values = values.interval_month_day_nano_values;
assert_eq!(2, values.capacity());
let values = values_with_capacity(ColumnDataType::DurationMillisecond, 2);
let values = values.duration_millisecond_values;
assert_eq!(2, values.capacity());
}
#[test]
@@ -986,6 +1165,10 @@ mod tests {
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
ColumnDataTypeWrapper(ColumnDataType::IntervalMonthDayNano).into()
);
assert_eq!(
ConcreteDataType::duration_millisecond_datatype(),
ColumnDataTypeWrapper(ColumnDataType::DurationMillisecond).into()
)
}
#[test]
@@ -1074,6 +1257,12 @@ mod tests {
.try_into()
.unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::DurationMillisecond),
ConcreteDataType::duration_millisecond_datatype()
.try_into()
.unwrap()
);
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
assert!(result.is_err());
@@ -1107,28 +1296,28 @@ mod tests {
push_vals(&mut column, 3, vector);
assert_eq!(
vec![1, 2, 3],
column.values.as_ref().unwrap().ts_nanosecond_values
column.values.as_ref().unwrap().timestamp_nanosecond_values
);
let vector = Arc::new(TimestampMillisecondVector::from_vec(vec![4, 5, 6]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![4, 5, 6],
column.values.as_ref().unwrap().ts_millisecond_values
column.values.as_ref().unwrap().timestamp_millisecond_values
);
let vector = Arc::new(TimestampMicrosecondVector::from_vec(vec![7, 8, 9]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![7, 8, 9],
column.values.as_ref().unwrap().ts_microsecond_values
column.values.as_ref().unwrap().timestamp_microsecond_values
);
let vector = Arc::new(TimestampSecondVector::from_vec(vec![10, 11, 12]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![10, 11, 12],
column.values.as_ref().unwrap().ts_second_values
column.values.as_ref().unwrap().timestamp_second_values
);
}
@@ -1217,6 +1406,47 @@ mod tests {
});
}
#[test]
fn test_column_put_duration_values() {
let mut column = Column {
column_name: "test".to_string(),
semantic_type: 0,
values: Some(Values {
..Default::default()
}),
null_mask: vec![],
datatype: 0,
};
let vector = Arc::new(DurationNanosecondVector::from_vec(vec![1, 2, 3]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![1, 2, 3],
column.values.as_ref().unwrap().duration_nanosecond_values
);
let vector = Arc::new(DurationMicrosecondVector::from_vec(vec![7, 8, 9]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![7, 8, 9],
column.values.as_ref().unwrap().duration_microsecond_values
);
let vector = Arc::new(DurationMillisecondVector::from_vec(vec![4, 5, 6]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![4, 5, 6],
column.values.as_ref().unwrap().duration_millisecond_values
);
let vector = Arc::new(DurationSecondVector::from_vec(vec![10, 11, 12]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![10, 11, 12],
column.values.as_ref().unwrap().duration_second_values
);
}
#[test]
fn test_column_put_vector() {
use crate::v1::SemanticType;
@@ -1257,7 +1487,7 @@ mod tests {
let actual = pb_values_to_values(
&ConcreteDataType::Timestamp(TimestampType::Second(TimestampSecondType)),
Values {
ts_second_values: vec![1_i64, 2_i64, 3_i64],
timestamp_second_values: vec![1_i64, 2_i64, 3_i64],
..Default::default()
},
);
@@ -1272,7 +1502,7 @@ mod tests {
let actual = pb_values_to_values(
&ConcreteDataType::Timestamp(TimestampType::Millisecond(TimestampMillisecondType)),
Values {
ts_millisecond_values: vec![1_i64, 2_i64, 3_i64],
timestamp_millisecond_values: vec![1_i64, 2_i64, 3_i64],
..Default::default()
},
);
@@ -1317,6 +1547,39 @@ mod tests {
assert_eq!(expect, actual);
}
#[test]
fn test_convert_duration_values() {
// second
let actual = pb_values_to_values(
&ConcreteDataType::Duration(DurationType::Second(DurationSecondType)),
Values {
duration_second_values: vec![1_i64, 2_i64, 3_i64],
..Default::default()
},
);
let expect = vec![
Value::Duration(Duration::new_second(1_i64)),
Value::Duration(Duration::new_second(2_i64)),
Value::Duration(Duration::new_second(3_i64)),
];
assert_eq!(expect, actual);
// millisecond
let actual = pb_values_to_values(
&ConcreteDataType::Duration(DurationType::Millisecond(DurationMillisecondType)),
Values {
duration_millisecond_values: vec![1_i64, 2_i64, 3_i64],
..Default::default()
},
);
let expect = vec![
Value::Duration(Duration::new_millisecond(1_i64)),
Value::Duration(Duration::new_millisecond(2_i64)),
Value::Duration(Duration::new_millisecond(3_i64)),
];
assert_eq!(expect, actual);
}
#[test]
fn test_convert_interval_values() {
// year_month
@@ -1524,4 +1787,76 @@ mod tests {
Value::DateTime(3.into())
]
);
#[test]
fn test_vectors_to_rows_for_different_types() {
let boolean_vec = BooleanVector::from_vec(vec![true, false, true]);
let int8_vec = PrimitiveVector::<Int8Type>::from_iter_values(vec![1, 2, 3]);
let int32_vec = PrimitiveVector::<Int32Type>::from_iter_values(vec![100, 200, 300]);
let uint8_vec = PrimitiveVector::<UInt8Type>::from_iter_values(vec![10, 20, 30]);
let uint32_vec = PrimitiveVector::<UInt32Type>::from_iter_values(vec![1000, 2000, 3000]);
let float32_vec = Float32Vector::from_vec(vec![1.1, 2.2, 3.3]);
let date_vec = DateVector::from_vec(vec![10, 20, 30]);
let string_vec = StringVector::from_vec(vec!["a", "b", "c"]);
let vector_refs: Vec<VectorRef> = vec![
Arc::new(boolean_vec),
Arc::new(int8_vec),
Arc::new(int32_vec),
Arc::new(uint8_vec),
Arc::new(uint32_vec),
Arc::new(float32_vec),
Arc::new(date_vec),
Arc::new(string_vec),
];
let result = vectors_to_rows(vector_refs.iter(), 3);
assert_eq!(result.len(), 3);
assert_eq!(result[0].values.len(), 8);
let values = result[0]
.values
.iter()
.map(|v| v.value_data.clone().unwrap())
.collect::<Vec<_>>();
assert_eq!(values[0], ValueData::BoolValue(true));
assert_eq!(values[1], ValueData::I8Value(1));
assert_eq!(values[2], ValueData::I32Value(100));
assert_eq!(values[3], ValueData::U8Value(10));
assert_eq!(values[4], ValueData::U32Value(1000));
assert_eq!(values[5], ValueData::F32Value(1.1));
assert_eq!(values[6], ValueData::DateValue(10));
assert_eq!(values[7], ValueData::StringValue("a".to_string()));
assert_eq!(result[1].values.len(), 8);
let values = result[1]
.values
.iter()
.map(|v| v.value_data.clone().unwrap())
.collect::<Vec<_>>();
assert_eq!(values[0], ValueData::BoolValue(false));
assert_eq!(values[1], ValueData::I8Value(2));
assert_eq!(values[2], ValueData::I32Value(200));
assert_eq!(values[3], ValueData::U8Value(20));
assert_eq!(values[4], ValueData::U32Value(2000));
assert_eq!(values[5], ValueData::F32Value(2.2));
assert_eq!(values[6], ValueData::DateValue(20));
assert_eq!(values[7], ValueData::StringValue("b".to_string()));
assert_eq!(result[2].values.len(), 8);
let values = result[2]
.values
.iter()
.map(|v| v.value_data.clone().unwrap())
.collect::<Vec<_>>();
assert_eq!(values[0], ValueData::BoolValue(true));
assert_eq!(values[1], ValueData::I8Value(3));
assert_eq!(values[2], ValueData::I32Value(300));
assert_eq!(values[3], ValueData::U8Value(30));
assert_eq!(values[4], ValueData::U32Value(3000));
assert_eq!(values[5], ValueData::F32Value(3.3));
assert_eq!(values[6], ValueData::DateValue(30));
assert_eq!(values[7], ValueData::StringValue("c".to_string()));
}
}

View File

@@ -12,7 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema};
use std::collections::HashMap;
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, COMMENT_KEY};
use snafu::ResultExt;
use crate::error::{self, Result};
@@ -20,7 +22,7 @@ use crate::helper::ColumnDataTypeWrapper;
use crate::v1::ColumnDef;
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
let data_type = ColumnDataTypeWrapper::try_new(column_def.datatype)?;
let data_type = ColumnDataTypeWrapper::try_new(column_def.data_type)?;
let constraint = if column_def.default_constraint.is_empty() {
None
@@ -34,9 +36,17 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
)
};
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
.with_default_constraint(constraint)
.context(error::InvalidColumnDefaultConstraintSnafu {
column: &column_def.name,
})
let mut metadata = HashMap::new();
if !column_def.comment.is_empty() {
metadata.insert(COMMENT_KEY.to_string(), column_def.comment.clone());
}
Ok(
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
.with_default_constraint(constraint)
.context(error::InvalidColumnDefaultConstraintSnafu {
column: &column_def.name,
})?
.with_metadata(metadata),
)
}

View File

@@ -4,8 +4,6 @@ version.workspace = true
edition.workspace = true
license.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
default = []
testing = []
@@ -14,6 +12,7 @@ testing = []
api.workspace = true
async-trait.workspace = true
common-error.workspace = true
common-macro.workspace = true
digest = "0.10"
hex = { version = "0.4" }
secrecy = { version = "0.8", features = ["serde", "alloc"] }

View File

@@ -26,7 +26,7 @@ use crate::{UserInfoRef, UserProviderRef};
pub(crate) const DEFAULT_USERNAME: &str = "greptime";
/// construct a [`UserInfo`] impl with name
/// construct a [`UserInfo`](crate::user_info::UserInfo) impl with name
/// use default username `greptime` if None is provided
pub fn userinfo_by_name(username: Option<String>) -> UserInfoRef {
DefaultUserInfo::with_name(username.unwrap_or_else(|| DEFAULT_USERNAME.to_string()))

View File

@@ -14,10 +14,12 @@
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Invalid config value: {}, {}", value, msg))]
InvalidConfig { value: String, msg: String },
@@ -28,13 +30,14 @@ pub enum Error {
#[snafu(display("Internal state error: {}", msg))]
InternalState { msg: String },
#[snafu(display("IO error, source: {}", source))]
#[snafu(display("IO error"))]
Io {
source: std::io::Error,
#[snafu(source)]
error: std::io::Error,
location: Location,
},
#[snafu(display("Auth failed, source: {}", source))]
#[snafu(display("Auth failed"))]
AuthBackend {
location: Location,
source: BoxedError,

View File

@@ -22,15 +22,15 @@ use crate::UserInfoRef;
pub trait UserProvider: Send + Sync {
fn name(&self) -> &str;
/// [`authenticate`] checks whether a user is valid and allowed to access the database.
/// Checks whether a user is valid and allowed to access the database.
async fn authenticate(&self, id: Identity<'_>, password: Password<'_>) -> Result<UserInfoRef>;
/// [`authorize`] checks whether a connection request
/// Checks whether a connection request
/// from a certain user to a certain catalog/schema is legal.
/// This method should be called after [`authenticate`].
/// This method should be called after [authenticate()](UserProvider::authenticate()).
async fn authorize(&self, catalog: &str, schema: &str, user_info: &UserInfoRef) -> Result<()>;
/// [`auth`] is a combination of [`authenticate`] and [`authorize`].
/// Combination of [authenticate()](UserProvider::authenticate()) and [authorize()](UserProvider::authorize()).
/// In most cases it's preferred for both convenience and performance.
async fn auth(
&self,

View File

@@ -16,6 +16,7 @@ async-trait = "0.1"
common-catalog = { workspace = true }
common-error = { workspace = true }
common-grpc = { workspace = true }
common-macro = { workspace = true }
common-meta = { workspace = true }
common-query = { workspace = true }
common-recordbatch = { workspace = true }
@@ -30,8 +31,9 @@ futures-util.workspace = true
lazy_static.workspace = true
meta-client = { workspace = true }
metrics.workspace = true
moka = { version = "0.11", features = ["future"] }
moka = { workspace = true, features = ["future"] }
parking_lot = "0.12"
partition.workspace = true
regex.workspace = true
serde.workspace = true
serde_json = "1.0"
@@ -46,7 +48,6 @@ catalog = { workspace = true, features = ["testing"] }
chrono.workspace = true
common-test-util = { workspace = true }
log-store = { workspace = true }
mito = { workspace = true, features = ["test"] }
object-store = { workspace = true }
storage = { workspace = true }
tokio.workspace = true

View File

@@ -17,54 +17,48 @@ use std::fmt::Debug;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use datafusion::error::DataFusionError;
use datatypes::prelude::ConcreteDataType;
use snafu::{Location, Snafu};
use table::metadata::TableId;
use tokio::task::JoinError;
use crate::DeregisterTableRequest;
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Failed to list catalogs, source: {}", source))]
#[snafu(display("Failed to list catalogs"))]
ListCatalogs {
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to list {}'s schemas, source: {}", catalog, source))]
#[snafu(display("Failed to list {}'s schemas", catalog))]
ListSchemas {
location: Location,
catalog: String,
source: BoxedError,
},
#[snafu(display(
"Failed to re-compile script due to internal error, source: {}",
source
))]
#[snafu(display("Failed to re-compile script due to internal error"))]
CompileScriptInternal {
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to open system catalog table, source: {}", source))]
#[snafu(display("Failed to open system catalog table"))]
OpenSystemCatalog {
location: Location,
source: table::error::Error,
},
#[snafu(display("Failed to create system catalog table, source: {}", source))]
#[snafu(display("Failed to create system catalog table"))]
CreateSystemCatalog {
location: Location,
source: table::error::Error,
},
#[snafu(display(
"Failed to create table, table info: {}, source: {}",
table_info,
source
))]
#[snafu(display("Failed to create table, table info: {}", table_info))]
CreateTable {
table_info: String,
location: Location,
@@ -98,13 +92,14 @@ pub enum Error {
#[snafu(display("Catalog value is not present"))]
EmptyValue { location: Location },
#[snafu(display("Failed to deserialize value, source: {}", source))]
#[snafu(display("Failed to deserialize value"))]
ValueDeserialize {
source: serde_json::error::Error,
#[snafu(source)]
error: serde_json::error::Error,
location: Location,
},
#[snafu(display("Table engine not found: {}, source: {}", engine_name, source))]
#[snafu(display("Table engine not found: {}", engine_name))]
TableEngineNotFound {
engine_name: String,
location: Location,
@@ -142,15 +137,18 @@ pub enum Error {
#[snafu(display("Operation {} not supported", op))]
NotSupported { op: String, location: Location },
#[snafu(display("Failed to open table, table info: {}, source: {}", table_info, source))]
#[snafu(display("Failed to open table {table_id}"))]
OpenTable {
table_info: String,
table_id: TableId,
location: Location,
source: table::error::Error,
},
#[snafu(display("Failed to open table in parallel, source: {}", source))]
ParallelOpenTable { source: JoinError },
#[snafu(display("Failed to open table in parallel"))]
ParallelOpenTable {
#[snafu(source)]
error: JoinError,
},
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
TableNotFound {
@@ -164,72 +162,52 @@ pub enum Error {
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to create recordbatch, source: {}", source))]
#[snafu(display("Failed to create recordbatch"))]
CreateRecordBatch {
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display(
"Failed to insert table creation record to system catalog, source: {}",
source
))]
#[snafu(display("Failed to insert table creation record to system catalog"))]
InsertCatalogRecord {
location: Location,
source: table::error::Error,
},
#[snafu(display(
"Failed to deregister table, request: {:?}, source: {}",
request,
source
))]
DeregisterTable {
request: DeregisterTableRequest,
location: Location,
source: table::error::Error,
},
#[snafu(display("Illegal catalog manager state: {}", msg))]
IllegalManagerState { location: Location, msg: String },
#[snafu(display("Failed to scan system catalog table, source: {}", source))]
#[snafu(display("Failed to scan system catalog table"))]
SystemCatalogTableScan {
location: Location,
source: table::error::Error,
},
#[snafu(display("{source}"))]
#[snafu(display(""))]
Internal {
location: Location,
source: BoxedError,
},
#[snafu(display(
"Failed to upgrade weak catalog manager reference. location: {}",
location
))]
#[snafu(display("Failed to upgrade weak catalog manager reference"))]
UpgradeWeakCatalogManagerRef { location: Location },
#[snafu(display("Failed to execute system catalog table scan, source: {}", source))]
#[snafu(display("Failed to execute system catalog table scan"))]
SystemCatalogTableScanExec {
location: Location,
source: common_query::error::Error,
},
#[snafu(display("Cannot parse catalog value, source: {}", source))]
#[snafu(display("Cannot parse catalog value"))]
InvalidCatalogValue {
location: Location,
source: common_catalog::error::Error,
},
#[snafu(display("Failed to perform metasrv operation, source: {}", source))]
#[snafu(display("Failed to perform metasrv operation"))]
MetaSrv {
location: Location,
source: meta_client::error::Error,
},
#[snafu(display("Invalid table info in catalog, source: {}", source))]
#[snafu(display("Invalid table info in catalog"))]
InvalidTableInfoInCatalog {
location: Location,
source: datatypes::error::Error,
@@ -238,17 +216,14 @@ pub enum Error {
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
QueryAccessDenied { catalog: String, schema: String },
#[snafu(display("Invalid system table definition: {err_msg}"))]
InvalidSystemTableDef { err_msg: String, location: Location },
#[snafu(display("{}: {}", msg, source))]
#[snafu(display(""))]
Datafusion {
msg: String,
source: DataFusionError,
#[snafu(source)]
error: DataFusionError,
location: Location,
},
#[snafu(display("Table schema mismatch, source: {}", source))]
#[snafu(display("Table schema mismatch"))]
TableSchemaMismatch {
location: Location,
source: table::error::Error,
@@ -257,7 +232,7 @@ pub enum Error {
#[snafu(display("A generic error has occurred, msg: {}", msg))]
Generic { msg: String, location: Location },
#[snafu(display("Table metadata manager error: {}", source))]
#[snafu(display("Table metadata manager error"))]
TableMetadataManager {
source: common_meta::error::Error,
location: Location,
@@ -272,10 +247,8 @@ impl ErrorExt for Error {
Error::InvalidKey { .. }
| Error::SchemaNotFound { .. }
| Error::TableNotFound { .. }
| Error::IllegalManagerState { .. }
| Error::CatalogNotFound { .. }
| Error::InvalidEntryType { .. }
| Error::InvalidSystemTableDef { .. }
| Error::ParallelOpenTable { .. } => StatusCode::Unexpected,
Error::SystemCatalog { .. }
@@ -306,7 +279,6 @@ impl ErrorExt for Error {
| Error::InsertCatalogRecord { source, .. }
| Error::OpenTable { source, .. }
| Error::CreateTable { source, .. }
| Error::DeregisterTable { source, .. }
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
Error::MetaSrv { source, .. } => source.status_code(),

View File

@@ -158,7 +158,7 @@ impl InformationSchemaColumnsBuilder {
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
if !catalog_manager
.schema_exist(&catalog_name, &schema_name)
.schema_exists(&catalog_name, &schema_name)
.await?
{
continue;

View File

@@ -154,7 +154,7 @@ impl InformationSchemaTablesBuilder {
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
if !catalog_manager
.schema_exist(&catalog_name, &schema_name)
.schema_exists(&catalog_name, &schema_name)
.await?
{
continue;

View File

@@ -12,11 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod immutable;
mod procedure;
#[cfg(test)]
mod tests;
pub use client::{CachedMetaKvBackend, MetaKvBackend};
use table::metadata::TableVersion;
mod client;
mod manager;
const INIT_TABLE_VERSION: TableVersion = 0;
#[cfg(feature = "testing")]
pub mod mock;
pub use manager::KvBackendCatalogManager;

View File

@@ -18,8 +18,9 @@ use std::sync::Arc;
use std::time::Duration;
use common_error::ext::BoxedError;
use common_meta::cache_invalidator::KvCacheInvalidator;
use common_meta::error::Error::{CacheNotGet, GetKvCache};
use common_meta::error::{CacheNotGetSnafu, Error, MetaSrvSnafu, Result};
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, Result};
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
use common_meta::rpc::store::{
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
@@ -28,12 +29,11 @@ use common_meta::rpc::store::{
RangeRequest, RangeResponse,
};
use common_meta::rpc::KeyValue;
use common_telemetry::timer;
use common_telemetry::{debug, timer};
use meta_client::client::MetaClient;
use moka::future::{Cache, CacheBuilder};
use snafu::{OptionExt, ResultExt};
use super::KvCacheInvalidator;
use crate::metrics::{METRIC_CATALOG_KV_GET, METRIC_CATALOG_KV_REMOTE_GET};
const CACHE_MAX_CAPACITY: u64 = 10000;
@@ -197,7 +197,8 @@ impl KvBackend for CachedMetaKvBackend {
#[async_trait::async_trait]
impl KvCacheInvalidator for CachedMetaKvBackend {
async fn invalidate_key(&self, key: &[u8]) {
self.cache.invalidate(key).await
self.cache.invalidate(key).await;
debug!("invalidated cache key: {}", String::from_utf8_lossy(key));
}
}
@@ -251,7 +252,7 @@ impl KvBackend for MetaKvBackend {
.range(req)
.await
.map_err(BoxedError::new)
.context(MetaSrvSnafu)
.context(ExternalSnafu)
}
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
@@ -260,7 +261,7 @@ impl KvBackend for MetaKvBackend {
.range(RangeRequest::new().with_key(key))
.await
.map_err(BoxedError::new)
.context(MetaSrvSnafu)?;
.context(ExternalSnafu)?;
Ok(response.take_kvs().get_mut(0).map(|kv| KeyValue {
key: kv.take_key(),
value: kv.take_value(),
@@ -272,7 +273,7 @@ impl KvBackend for MetaKvBackend {
.batch_put(req)
.await
.map_err(BoxedError::new)
.context(MetaSrvSnafu)
.context(ExternalSnafu)
}
async fn put(&self, req: PutRequest) -> Result<PutResponse> {
@@ -280,7 +281,7 @@ impl KvBackend for MetaKvBackend {
.put(req)
.await
.map_err(BoxedError::new)
.context(MetaSrvSnafu)
.context(ExternalSnafu)
}
async fn delete_range(&self, req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
@@ -288,7 +289,7 @@ impl KvBackend for MetaKvBackend {
.delete_range(req)
.await
.map_err(BoxedError::new)
.context(MetaSrvSnafu)
.context(ExternalSnafu)
}
async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
@@ -296,7 +297,7 @@ impl KvBackend for MetaKvBackend {
.batch_delete(req)
.await
.map_err(BoxedError::new)
.context(MetaSrvSnafu)
.context(ExternalSnafu)
}
async fn batch_get(&self, req: BatchGetRequest) -> Result<BatchGetResponse> {
@@ -304,7 +305,7 @@ impl KvBackend for MetaKvBackend {
.batch_get(req)
.await
.map_err(BoxedError::new)
.context(MetaSrvSnafu)
.context(ExternalSnafu)
}
async fn compare_and_put(
@@ -315,7 +316,7 @@ impl KvBackend for MetaKvBackend {
.compare_and_put(request)
.await
.map_err(BoxedError::new)
.context(MetaSrvSnafu)
.context(ExternalSnafu)
}
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse> {
@@ -323,7 +324,7 @@ impl KvBackend for MetaKvBackend {
.move_value(req)
.await
.map_err(BoxedError::new)
.context(MetaSrvSnafu)
.context(ExternalSnafu)
}
fn as_any(&self) -> &dyn Any {

View File

@@ -0,0 +1,292 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::collections::BTreeSet;
use std::sync::{Arc, Weak};
use common_catalog::consts::{DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID};
use common_error::ext::BoxedError;
use common_meta::cache_invalidator::{CacheInvalidator, CacheInvalidatorRef, Context};
use common_meta::datanode_manager::DatanodeManagerRef;
use common_meta::error::Result as MetaResult;
use common_meta::key::catalog_name::CatalogNameKey;
use common_meta::key::schema_name::SchemaNameKey;
use common_meta::key::table_name::TableNameKey;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef;
use common_meta::table_name::TableName;
use futures_util::TryStreamExt;
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
use snafu::prelude::*;
use table::dist_table::DistTable;
use table::metadata::TableId;
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
use table::TableRef;
use crate::error::{
self as catalog_err, ListCatalogsSnafu, ListSchemasSnafu, Result as CatalogResult,
TableMetadataManagerSnafu,
};
use crate::information_schema::{InformationSchemaProvider, COLUMNS, TABLES};
use crate::CatalogManager;
/// Access all existing catalog, schema and tables.
///
/// The result comes from two source, all the user tables are presented in
/// a kv-backend which persists the metadata of a table. And system tables
/// comes from `SystemCatalog`, which is static and read-only.
#[derive(Clone)]
pub struct KvBackendCatalogManager {
// TODO(LFC): Maybe use a real implementation for Standalone mode.
// Now we use `NoopKvCacheInvalidator` for Standalone mode. In Standalone mode, the KV backend
// is implemented by RaftEngine. Maybe we need a cache for it?
cache_invalidator: CacheInvalidatorRef,
partition_manager: PartitionRuleManagerRef,
table_metadata_manager: TableMetadataManagerRef,
datanode_manager: DatanodeManagerRef,
/// A sub-CatalogManager that handles system tables
system_catalog: SystemCatalog,
}
#[async_trait::async_trait]
impl CacheInvalidator for KvBackendCatalogManager {
async fn invalidate_table_name(&self, ctx: &Context, table_name: TableName) -> MetaResult<()> {
self.cache_invalidator
.invalidate_table_name(ctx, table_name)
.await
}
async fn invalidate_table_id(&self, ctx: &Context, table_id: TableId) -> MetaResult<()> {
self.cache_invalidator
.invalidate_table_id(ctx, table_id)
.await
}
}
impl KvBackendCatalogManager {
pub fn new(
backend: KvBackendRef,
cache_invalidator: CacheInvalidatorRef,
datanode_manager: DatanodeManagerRef,
) -> Arc<Self> {
Arc::new_cyclic(|me| Self {
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
cache_invalidator,
datanode_manager,
system_catalog: SystemCatalog {
catalog_manager: me.clone(),
},
})
}
pub fn partition_manager(&self) -> PartitionRuleManagerRef {
self.partition_manager.clone()
}
pub fn table_metadata_manager_ref(&self) -> &TableMetadataManagerRef {
&self.table_metadata_manager
}
pub fn datanode_manager(&self) -> DatanodeManagerRef {
self.datanode_manager.clone()
}
}
#[async_trait::async_trait]
impl CatalogManager for KvBackendCatalogManager {
async fn catalog_names(&self) -> CatalogResult<Vec<String>> {
let stream = self
.table_metadata_manager
.catalog_manager()
.catalog_names()
.await;
let keys = stream
.try_collect::<Vec<_>>()
.await
.map_err(BoxedError::new)
.context(ListCatalogsSnafu)?;
Ok(keys)
}
async fn schema_names(&self, catalog: &str) -> CatalogResult<Vec<String>> {
let stream = self
.table_metadata_manager
.schema_manager()
.schema_names(catalog)
.await;
let mut keys = stream
.try_collect::<BTreeSet<_>>()
.await
.map_err(BoxedError::new)
.context(ListSchemasSnafu { catalog })?
.into_iter()
.collect::<Vec<_>>();
keys.extend_from_slice(&self.system_catalog.schema_names());
Ok(keys)
}
async fn table_names(&self, catalog: &str, schema: &str) -> CatalogResult<Vec<String>> {
let mut tables = self
.table_metadata_manager
.table_name_manager()
.tables(catalog, schema)
.await
.context(TableMetadataManagerSnafu)?
.into_iter()
.map(|(k, _)| k)
.collect::<Vec<String>>();
tables.extend_from_slice(&self.system_catalog.table_names(schema));
Ok(tables)
}
async fn catalog_exists(&self, catalog: &str) -> CatalogResult<bool> {
self.table_metadata_manager
.catalog_manager()
.exists(CatalogNameKey::new(catalog))
.await
.context(TableMetadataManagerSnafu)
}
async fn schema_exists(&self, catalog: &str, schema: &str) -> CatalogResult<bool> {
if self.system_catalog.schema_exist(schema) {
return Ok(true);
}
self.table_metadata_manager
.schema_manager()
.exists(SchemaNameKey::new(catalog, schema))
.await
.context(TableMetadataManagerSnafu)
}
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> CatalogResult<bool> {
if self.system_catalog.table_exist(schema, table) {
return Ok(true);
}
let key = TableNameKey::new(catalog, schema, table);
self.table_metadata_manager
.table_name_manager()
.get(key)
.await
.context(TableMetadataManagerSnafu)
.map(|x| x.is_some())
}
async fn table(
&self,
catalog: &str,
schema: &str,
table_name: &str,
) -> CatalogResult<Option<TableRef>> {
if let Some(table) = self.system_catalog.table(catalog, schema, table_name) {
return Ok(Some(table));
}
let key = TableNameKey::new(catalog, schema, table_name);
let Some(table_name_value) = self
.table_metadata_manager
.table_name_manager()
.get(key)
.await
.context(TableMetadataManagerSnafu)?
else {
return Ok(None);
};
let table_id = table_name_value.table_id();
let Some(table_info_value) = self
.table_metadata_manager
.table_info_manager()
.get(table_id)
.await
.context(TableMetadataManagerSnafu)?
.map(|v| v.into_inner())
else {
return Ok(None);
};
let table_info = Arc::new(
table_info_value
.table_info
.try_into()
.context(catalog_err::InvalidTableInfoInCatalogSnafu)?,
);
Ok(Some(DistTable::table(table_info)))
}
fn as_any(&self) -> &dyn Any {
self
}
}
// TODO: This struct can hold a static map of all system tables when
// the upper layer (e.g., procedure) can inform the catalog manager
// a new catalog is created.
/// Existing system tables:
/// - public.numbers
/// - information_schema.tables
/// - information_schema.columns
#[derive(Clone)]
struct SystemCatalog {
catalog_manager: Weak<KvBackendCatalogManager>,
}
impl SystemCatalog {
fn schema_names(&self) -> Vec<String> {
vec![INFORMATION_SCHEMA_NAME.to_string()]
}
fn table_names(&self, schema: &str) -> Vec<String> {
if schema == INFORMATION_SCHEMA_NAME {
vec![TABLES.to_string(), COLUMNS.to_string()]
} else if schema == DEFAULT_SCHEMA_NAME {
vec![NUMBERS_TABLE_NAME.to_string()]
} else {
vec![]
}
}
fn schema_exist(&self, schema: &str) -> bool {
schema == INFORMATION_SCHEMA_NAME
}
fn table_exist(&self, schema: &str, table: &str) -> bool {
if schema == INFORMATION_SCHEMA_NAME {
table == TABLES || table == COLUMNS
} else if schema == DEFAULT_SCHEMA_NAME {
table == NUMBERS_TABLE_NAME
} else {
false
}
}
fn table(&self, catalog: &str, schema: &str, table_name: &str) -> Option<TableRef> {
if schema == INFORMATION_SCHEMA_NAME {
let information_schema_provider =
InformationSchemaProvider::new(catalog.to_string(), self.catalog_manager.clone());
information_schema_provider.table(table_name)
} else if schema == DEFAULT_SCHEMA_NAME && table_name == NUMBERS_TABLE_NAME {
Some(NumbersTable::table(NUMBERS_TABLE_ID))
} else {
None
}
}
}

View File

@@ -55,14 +55,14 @@ impl TableEngine for MockTableEngine {
let data = vec![Arc::new(StringVector::from(vec!["a", "b", "c"])) as _];
let record_batch = RecordBatch::new(schema, data).unwrap();
let table: TableRef = Arc::new(MemTable::new_with_catalog(
let table = MemTable::new_with_catalog(
&request.table_name,
record_batch,
table_id,
request.catalog_name,
request.schema_name,
vec![0],
)) as Arc<_>;
);
let mut tables = self.tables.write().unwrap();
let _ = tables.insert(table_id, table.clone() as TableRef);

View File

@@ -17,79 +17,38 @@
#![feature(try_blocks)]
use std::any::Any;
use std::collections::HashMap;
use std::fmt::{Debug, Formatter};
use std::sync::Arc;
use api::v1::meta::{RegionStat, TableIdent, TableName};
use common_telemetry::{info, warn};
use snafu::ResultExt;
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::{TableId, TableType};
use futures::future::BoxFuture;
use table::metadata::TableId;
use table::requests::CreateTableRequest;
use table::TableRef;
use crate::error::{CreateTableSnafu, Result};
use crate::error::Result;
pub mod error;
pub mod information_schema;
pub mod local;
pub mod kvbackend;
pub mod memory;
mod metrics;
pub mod remote;
pub mod system;
pub mod table_source;
pub mod tables;
#[async_trait::async_trait]
pub trait CatalogManager: Send + Sync {
fn as_any(&self) -> &dyn Any;
/// Starts a catalog manager.
async fn start(&self) -> Result<()>;
/// Registers a catalog to catalog manager, returns whether the catalog exist before.
async fn register_catalog(self: Arc<Self>, name: String) -> Result<bool>;
/// Register a schema with catalog name and schema name. Retuens whether the
/// schema registered.
///
/// # Errors
///
/// This method will/should fail if catalog not exist
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool>;
/// Deregisters a database within given catalog/schema to catalog manager
async fn deregister_schema(&self, request: DeregisterSchemaRequest) -> Result<bool>;
/// Registers a table within given catalog/schema to catalog manager,
/// returns whether the table registered.
///
/// # Errors
///
/// This method will/should fail if catalog or schema not exist
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool>;
/// Deregisters a table within given catalog/schema to catalog manager
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<()>;
/// Rename a table to [RenameTableRequest::new_table_name], returns whether the table is renamed.
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool>;
/// Register a system table, should be called before starting the manager.
async fn register_system_table(&self, request: RegisterSystemTableRequest)
-> error::Result<()>;
async fn catalog_names(&self) -> Result<Vec<String>>;
async fn schema_names(&self, catalog: &str) -> Result<Vec<String>>;
async fn table_names(&self, catalog: &str, schema: &str) -> Result<Vec<String>>;
async fn catalog_exist(&self, catalog: &str) -> Result<bool>;
async fn catalog_exists(&self, catalog: &str) -> Result<bool>;
async fn schema_exist(&self, catalog: &str, schema: &str) -> Result<bool>;
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool>;
async fn table_exist(&self, catalog: &str, schema: &str, table: &str) -> Result<bool>;
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool>;
/// Returns the table by catalog, schema and table name.
async fn table(
@@ -103,7 +62,8 @@ pub trait CatalogManager: Send + Sync {
pub type CatalogManagerRef = Arc<dyn CatalogManager>;
/// Hook called after system table opening.
pub type OpenSystemTableHook = Arc<dyn Fn(TableRef) -> Result<()> + Send + Sync>;
pub type OpenSystemTableHook =
Box<dyn Fn(TableRef) -> BoxFuture<'static, Result<()>> + Send + Sync>;
/// Register system table request:
/// - When system table is already created and registered, the hook will be called
@@ -162,120 +122,3 @@ pub struct RegisterSchemaRequest {
pub catalog: String,
pub schema: String,
}
pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
manager: &'a M,
engine: TableEngineRef,
sys_table_requests: &'a mut Vec<RegisterSystemTableRequest>,
) -> Result<()> {
for req in sys_table_requests.drain(..) {
let catalog_name = &req.create_table_request.catalog_name;
let schema_name = &req.create_table_request.schema_name;
let table_name = &req.create_table_request.table_name;
let table_id = req.create_table_request.id;
let table = manager.table(catalog_name, schema_name, table_name).await?;
let table = if let Some(table) = table {
table
} else {
let table = engine
.create_table(&EngineContext::default(), req.create_table_request.clone())
.await
.with_context(|_| CreateTableSnafu {
table_info: common_catalog::format_full_table_name(
catalog_name,
schema_name,
table_name,
),
})?;
let _ = manager
.register_table(RegisterTableRequest {
catalog: catalog_name.clone(),
schema: schema_name.clone(),
table_name: table_name.clone(),
table_id,
table: table.clone(),
})
.await?;
info!("Created and registered system table: {table_name}");
table
};
if let Some(hook) = req.open_hook {
(hook)(table)?;
}
}
Ok(())
}
/// The stat of regions in the datanode node.
/// The number of regions can be got from len of vec.
///
/// Ignores any errors occurred during iterating regions. The intention of this method is to
/// collect region stats that will be carried in Datanode's heartbeat to Metasrv, so it's a
/// "try our best" job.
pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> (u64, Vec<RegionStat>) {
let mut region_number: u64 = 0;
let mut region_stats = Vec::new();
let Ok(catalog_names) = catalog_manager.catalog_names().await else {
return (region_number, region_stats);
};
for catalog_name in catalog_names {
let Ok(schema_names) = catalog_manager.schema_names(&catalog_name).await else {
continue;
};
for schema_name in schema_names {
let Ok(table_names) = catalog_manager
.table_names(&catalog_name, &schema_name)
.await
else {
continue;
};
for table_name in table_names {
let Ok(Some(table)) = catalog_manager
.table(&catalog_name, &schema_name, &table_name)
.await
else {
continue;
};
if table.table_type() != TableType::Base {
continue;
}
let table_info = table.table_info();
let region_numbers = &table_info.meta.region_numbers;
region_number += region_numbers.len() as u64;
let engine = &table_info.meta.engine;
let table_id = table_info.ident.table_id;
match table.region_stats() {
Ok(stats) => {
let stats = stats.into_iter().map(|stat| RegionStat {
region_id: stat.region_id,
table_ident: Some(TableIdent {
table_id,
table_name: Some(TableName {
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
}),
engine: engine.clone(),
}),
approximate_bytes: stat.disk_usage_bytes as i64,
attrs: HashMap::from([("engine_name".to_owned(), engine.clone())]),
..Default::default()
});
region_stats.extend(stats);
}
Err(e) => {
warn!("Failed to get region status, err: {:?}", e);
}
};
}
}
}
(region_number, region_stats)
}

View File

@@ -1,633 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MIN_USER_TABLE_ID,
MITO_ENGINE, NUMBERS_TABLE_ID, SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_ID,
SYSTEM_CATALOG_TABLE_NAME,
};
use common_catalog::format_full_table_name;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use common_telemetry::{error, info};
use datatypes::prelude::ScalarVector;
use datatypes::vectors::{BinaryVector, UInt8Vector};
use futures_util::lock::Mutex;
use metrics::increment_gauge;
use snafu::{ensure, OptionExt, ResultExt};
use table::engine::manager::TableEngineManagerRef;
use table::engine::EngineContext;
use table::metadata::TableId;
use table::requests::OpenTableRequest;
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
use table::table::TableIdProvider;
use table::TableRef;
use crate::error::{
self, CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu,
Result, SchemaExistsSnafu, SchemaNotFoundSnafu, SystemCatalogSnafu,
SystemCatalogTypeMismatchSnafu, TableEngineNotFoundSnafu, TableExistsSnafu, TableNotExistSnafu,
TableNotFoundSnafu, UnimplementedSnafu,
};
use crate::local::memory::MemoryCatalogManager;
use crate::system::{
decode_system_catalog, Entry, SystemCatalogTable, TableEntry, ENTRY_TYPE_INDEX, KEY_INDEX,
VALUE_INDEX,
};
use crate::tables::SystemCatalog;
use crate::{
handle_system_table_request, CatalogManager, DeregisterSchemaRequest, DeregisterTableRequest,
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
};
/// A `CatalogManager` consists of a system catalog and a bunch of user catalogs.
pub struct LocalCatalogManager {
system: Arc<SystemCatalog>,
catalogs: Arc<MemoryCatalogManager>,
engine_manager: TableEngineManagerRef,
next_table_id: AtomicU32,
init_lock: Mutex<bool>,
register_lock: Mutex<()>,
system_table_requests: Mutex<Vec<RegisterSystemTableRequest>>,
}
impl LocalCatalogManager {
/// Create a new [CatalogManager] with given user catalogs and mito engine
pub async fn try_new(engine_manager: TableEngineManagerRef) -> Result<Self> {
let engine = engine_manager
.engine(MITO_ENGINE)
.context(TableEngineNotFoundSnafu {
engine_name: MITO_ENGINE,
})?;
let table = SystemCatalogTable::new(engine.clone()).await?;
let memory_catalog_manager = crate::local::memory::new_memory_catalog_manager()?;
let system_catalog = Arc::new(SystemCatalog::new(table));
Ok(Self {
system: system_catalog,
catalogs: memory_catalog_manager,
engine_manager,
next_table_id: AtomicU32::new(MIN_USER_TABLE_ID),
init_lock: Mutex::new(false),
register_lock: Mutex::new(()),
system_table_requests: Mutex::new(Vec::default()),
})
}
/// Scan all entries from system catalog table
pub async fn init(&self) -> Result<()> {
self.init_system_catalog().await?;
let system_records = self.system.information_schema.system.records().await?;
let entries = self.collect_system_catalog_entries(system_records).await?;
let max_table_id = self.handle_system_catalog_entries(entries).await?;
info!(
"All system catalog entries processed, max table id: {}",
max_table_id
);
self.next_table_id
.store((max_table_id + 1).max(MIN_USER_TABLE_ID), Ordering::Relaxed);
*self.init_lock.lock().await = true;
// Processing system table hooks
let mut sys_table_requests = self.system_table_requests.lock().await;
let engine = self
.engine_manager
.engine(MITO_ENGINE)
.context(TableEngineNotFoundSnafu {
engine_name: MITO_ENGINE,
})?;
handle_system_table_request(self, engine, &mut sys_table_requests).await?;
Ok(())
}
async fn init_system_catalog(&self) -> Result<()> {
// register default catalog and default schema
self.catalogs
.register_catalog_sync(DEFAULT_CATALOG_NAME.to_string())?;
self.catalogs.register_schema_sync(RegisterSchemaRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
})?;
// register SystemCatalogTable
self.catalogs
.register_catalog_sync(SYSTEM_CATALOG_NAME.to_string())?;
self.catalogs.register_schema_sync(RegisterSchemaRequest {
catalog: SYSTEM_CATALOG_NAME.to_string(),
schema: INFORMATION_SCHEMA_NAME.to_string(),
})?;
let register_table_req = RegisterTableRequest {
catalog: SYSTEM_CATALOG_NAME.to_string(),
schema: INFORMATION_SCHEMA_NAME.to_string(),
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
table_id: SYSTEM_CATALOG_TABLE_ID,
table: self.system.information_schema.system.as_table_ref(),
};
self.catalogs.register_table(register_table_req).await?;
// Add numbers table for test
let register_number_table_req = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: NUMBERS_TABLE_NAME.to_string(),
table_id: NUMBERS_TABLE_ID,
table: NumbersTable::table(NUMBERS_TABLE_ID),
};
self.catalogs
.register_table(register_number_table_req)
.await?;
Ok(())
}
/// Collect stream of system catalog entries to `Vec<Entry>`
async fn collect_system_catalog_entries(
&self,
stream: SendableRecordBatchStream,
) -> Result<Vec<Entry>> {
let record_batch = common_recordbatch::util::collect(stream)
.await
.context(ReadSystemCatalogSnafu)?;
let rbs = record_batch
.into_iter()
.map(Self::record_batch_to_entry)
.collect::<Result<Vec<_>>>()?;
Ok(rbs.into_iter().flat_map(Vec::into_iter).collect::<_>())
}
/// Convert `RecordBatch` to a vector of `Entry`.
fn record_batch_to_entry(rb: RecordBatch) -> Result<Vec<Entry>> {
ensure!(
rb.num_columns() >= 6,
SystemCatalogSnafu {
msg: format!("Length mismatch: {}", rb.num_columns())
}
);
let entry_type = rb
.column(ENTRY_TYPE_INDEX)
.as_any()
.downcast_ref::<UInt8Vector>()
.with_context(|| SystemCatalogTypeMismatchSnafu {
data_type: rb.column(ENTRY_TYPE_INDEX).data_type(),
})?;
let key = rb
.column(KEY_INDEX)
.as_any()
.downcast_ref::<BinaryVector>()
.with_context(|| SystemCatalogTypeMismatchSnafu {
data_type: rb.column(KEY_INDEX).data_type(),
})?;
let value = rb
.column(VALUE_INDEX)
.as_any()
.downcast_ref::<BinaryVector>()
.with_context(|| SystemCatalogTypeMismatchSnafu {
data_type: rb.column(VALUE_INDEX).data_type(),
})?;
let mut res = Vec::with_capacity(rb.num_rows());
for ((t, k), v) in entry_type
.iter_data()
.zip(key.iter_data())
.zip(value.iter_data())
{
let entry = decode_system_catalog(t, k, v)?;
res.push(entry);
}
Ok(res)
}
/// Processes records from system catalog table and returns the max table id persisted
/// in system catalog table.
async fn handle_system_catalog_entries(&self, entries: Vec<Entry>) -> Result<TableId> {
let entries = Self::sort_entries(entries);
let mut max_table_id = 0;
for entry in entries {
match entry {
Entry::Catalog(c) => {
self.catalogs
.register_catalog_sync(c.catalog_name.clone())?;
info!("Register catalog: {}", c.catalog_name);
}
Entry::Schema(s) => {
let req = RegisterSchemaRequest {
catalog: s.catalog_name.clone(),
schema: s.schema_name.clone(),
};
let _ = self.catalogs.register_schema_sync(req)?;
info!("Registered schema: {:?}", s);
}
Entry::Table(t) => {
max_table_id = max_table_id.max(t.table_id);
if t.is_deleted {
continue;
}
self.open_and_register_table(&t).await?;
info!("Registered table: {:?}", t);
}
}
}
Ok(max_table_id)
}
/// Sort catalog entries to ensure catalog entries comes first, then schema entries,
/// and table entries is the last.
fn sort_entries(mut entries: Vec<Entry>) -> Vec<Entry> {
entries.sort();
entries
}
async fn open_and_register_table(&self, t: &TableEntry) -> Result<()> {
self.check_catalog_schema_exist(&t.catalog_name, &t.schema_name)
.await?;
let context = EngineContext {};
let open_request = OpenTableRequest {
catalog_name: t.catalog_name.clone(),
schema_name: t.schema_name.clone(),
table_name: t.table_name.clone(),
table_id: t.table_id,
region_numbers: vec![0],
};
let engine = self
.engine_manager
.engine(&t.engine)
.context(TableEngineNotFoundSnafu {
engine_name: &t.engine,
})?;
let table_ref = engine
.open_table(&context, open_request)
.await
.with_context(|_| OpenTableSnafu {
table_info: format!(
"{}.{}.{}, id: {}",
&t.catalog_name, &t.schema_name, &t.table_name, t.table_id
),
})?
.with_context(|| TableNotFoundSnafu {
table_info: format!(
"{}.{}.{}, id: {}",
&t.catalog_name, &t.schema_name, &t.table_name, t.table_id
),
})?;
let register_request = RegisterTableRequest {
catalog: t.catalog_name.clone(),
schema: t.schema_name.clone(),
table_name: t.table_name.clone(),
table_id: t.table_id,
table: table_ref,
};
let _ = self.catalogs.register_table(register_request).await?;
Ok(())
}
async fn check_state(&self) -> Result<()> {
let started = self.init_lock.lock().await;
ensure!(
*started,
IllegalManagerStateSnafu {
msg: "Catalog manager not started",
}
);
Ok(())
}
async fn check_catalog_schema_exist(
&self,
catalog_name: &str,
schema_name: &str,
) -> Result<()> {
if !self.catalogs.catalog_exist(catalog_name).await? {
return CatalogNotFoundSnafu { catalog_name }.fail()?;
}
if !self
.catalogs
.schema_exist(catalog_name, schema_name)
.await?
{
return SchemaNotFoundSnafu {
catalog: catalog_name,
schema: schema_name,
}
.fail()?;
}
Ok(())
}
}
#[async_trait::async_trait]
impl TableIdProvider for LocalCatalogManager {
async fn next_table_id(&self) -> table::Result<TableId> {
Ok(self.next_table_id.fetch_add(1, Ordering::Relaxed))
}
}
#[async_trait::async_trait]
impl CatalogManager for LocalCatalogManager {
/// Start [LocalCatalogManager] to load all information from system catalog table.
/// Make sure table engine is initialized before starting [MemoryCatalogManager].
async fn start(&self) -> Result<()> {
self.init().await
}
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
self.check_state().await?;
let catalog_name = request.catalog.clone();
let schema_name = request.schema.clone();
self.check_catalog_schema_exist(&catalog_name, &schema_name)
.await?;
{
let _lock = self.register_lock.lock().await;
if let Some(existing) = self
.catalogs
.table(&request.catalog, &request.schema, &request.table_name)
.await?
{
if existing.table_info().ident.table_id != request.table_id {
error!(
"Unexpected table register request: {:?}, existing: {:?}",
request,
existing.table_info()
);
return TableExistsSnafu {
table: format_full_table_name(
&catalog_name,
&schema_name,
&request.table_name,
),
}
.fail();
}
// Try to register table with same table id, just ignore.
Ok(false)
} else {
// table does not exist
let engine = request.table.table_info().meta.engine.to_string();
let table_name = request.table_name.clone();
let table_id = request.table_id;
let _ = self.catalogs.register_table(request).await?;
let _ = self
.system
.register_table(
catalog_name.clone(),
schema_name.clone(),
table_name,
table_id,
engine,
)
.await?;
increment_gauge!(
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
1.0,
&[crate::metrics::db_label(&catalog_name, &schema_name)],
);
Ok(true)
}
}
}
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
self.check_state().await?;
let catalog_name = &request.catalog;
let schema_name = &request.schema;
self.check_catalog_schema_exist(catalog_name, schema_name)
.await?;
ensure!(
self.catalogs
.table(catalog_name, schema_name, &request.new_table_name)
.await?
.is_none(),
TableExistsSnafu {
table: &request.new_table_name
}
);
let _lock = self.register_lock.lock().await;
let old_table = self
.catalogs
.table(catalog_name, schema_name, &request.table_name)
.await?
.context(TableNotExistSnafu {
table: &request.table_name,
})?;
let engine = old_table.table_info().meta.engine.to_string();
// rename table in system catalog
let _ = self
.system
.register_table(
catalog_name.clone(),
schema_name.clone(),
request.new_table_name.clone(),
request.table_id,
engine,
)
.await?;
self.catalogs.rename_table(request).await
}
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<()> {
self.check_state().await?;
{
let _ = self.register_lock.lock().await;
let DeregisterTableRequest {
catalog,
schema,
table_name,
} = &request;
let table_id = self
.catalogs
.table(catalog, schema, table_name)
.await?
.with_context(|| error::TableNotExistSnafu {
table: format_full_table_name(catalog, schema, table_name),
})?
.table_info()
.ident
.table_id;
self.system.deregister_table(&request, table_id).await?;
self.catalogs.deregister_table(request).await
}
}
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
self.check_state().await?;
let catalog_name = &request.catalog;
let schema_name = &request.schema;
if !self.catalogs.catalog_exist(catalog_name).await? {
return CatalogNotFoundSnafu { catalog_name }.fail()?;
}
{
let _lock = self.register_lock.lock().await;
ensure!(
!self
.catalogs
.schema_exist(catalog_name, schema_name)
.await?,
SchemaExistsSnafu {
schema: schema_name,
}
);
let _ = self
.system
.register_schema(request.catalog.clone(), schema_name.clone())
.await?;
self.catalogs.register_schema_sync(request)
}
}
async fn deregister_schema(&self, _request: DeregisterSchemaRequest) -> Result<bool> {
UnimplementedSnafu {
operation: "deregister schema",
}
.fail()
}
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
let catalog_name = request.create_table_request.catalog_name.clone();
let schema_name = request.create_table_request.schema_name.clone();
let mut sys_table_requests = self.system_table_requests.lock().await;
sys_table_requests.push(request);
increment_gauge!(
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
1.0,
&[crate::metrics::db_label(&catalog_name, &schema_name)],
);
Ok(())
}
async fn schema_exist(&self, catalog: &str, schema: &str) -> Result<bool> {
self.catalogs.schema_exist(catalog, schema).await
}
async fn table(
&self,
catalog_name: &str,
schema_name: &str,
table_name: &str,
) -> Result<Option<TableRef>> {
self.catalogs
.table(catalog_name, schema_name, table_name)
.await
}
async fn catalog_exist(&self, catalog: &str) -> Result<bool> {
if catalog.eq_ignore_ascii_case(SYSTEM_CATALOG_NAME) {
Ok(true)
} else {
self.catalogs.catalog_exist(catalog).await
}
}
async fn table_exist(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
self.catalogs.table_exist(catalog, schema, table).await
}
async fn catalog_names(&self) -> Result<Vec<String>> {
self.catalogs.catalog_names().await
}
async fn schema_names(&self, catalog_name: &str) -> Result<Vec<String>> {
self.catalogs.schema_names(catalog_name).await
}
async fn table_names(&self, catalog_name: &str, schema_name: &str) -> Result<Vec<String>> {
self.catalogs.table_names(catalog_name, schema_name).await
}
async fn register_catalog(self: Arc<Self>, name: String) -> Result<bool> {
self.catalogs.clone().register_catalog(name).await
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use mito::engine::MITO_ENGINE;
use super::*;
use crate::system::{CatalogEntry, SchemaEntry};
#[test]
fn test_sort_entry() {
let vec = vec![
Entry::Table(TableEntry {
catalog_name: "C1".to_string(),
schema_name: "S1".to_string(),
table_name: "T1".to_string(),
table_id: 1,
engine: MITO_ENGINE.to_string(),
is_deleted: false,
}),
Entry::Catalog(CatalogEntry {
catalog_name: "C2".to_string(),
}),
Entry::Schema(SchemaEntry {
catalog_name: "C1".to_string(),
schema_name: "S1".to_string(),
}),
Entry::Schema(SchemaEntry {
catalog_name: "C2".to_string(),
schema_name: "S2".to_string(),
}),
Entry::Catalog(CatalogEntry {
catalog_name: "".to_string(),
}),
Entry::Table(TableEntry {
catalog_name: "C1".to_string(),
schema_name: "S1".to_string(),
table_name: "T2".to_string(),
table_id: 2,
engine: MITO_ENGINE.to_string(),
is_deleted: false,
}),
];
let res = LocalCatalogManager::sort_entries(vec);
assert_matches!(res[0], Entry::Catalog(..));
assert_matches!(res[1], Entry::Catalog(..));
assert_matches!(res[2], Entry::Schema(..));
assert_matches!(res[3], Entry::Schema(..));
assert_matches!(res[4], Entry::Table(..));
assert_matches!(res[5], Entry::Table(..));
}
}

View File

@@ -12,5 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#[derive(Debug, Clone, Default)]
pub struct EngineConfig {}
pub mod manager;
pub use manager::{new_memory_catalog_manager, MemoryCatalogManager};

View File

@@ -15,129 +15,29 @@
use std::any::Any;
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::{Arc, RwLock, Weak};
use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MIN_USER_TABLE_ID,
};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME};
use metrics::{decrement_gauge, increment_gauge};
use snafu::OptionExt;
use table::metadata::TableId;
use table::table::TableIdProvider;
use table::TableRef;
use crate::error::{
CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, TableNotFoundSnafu,
};
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
use crate::information_schema::InformationSchemaProvider;
use crate::{
CatalogManager, DeregisterSchemaRequest, DeregisterTableRequest, RegisterSchemaRequest,
RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
};
use crate::{CatalogManager, DeregisterTableRequest, RegisterSchemaRequest, RegisterTableRequest};
type SchemaEntries = HashMap<String, HashMap<String, TableRef>>;
/// Simple in-memory list of catalogs
#[derive(Clone)]
pub struct MemoryCatalogManager {
/// Collection of catalogs containing schemas and ultimately Tables
pub catalogs: RwLock<HashMap<String, SchemaEntries>>,
pub table_id: AtomicU32,
}
#[async_trait::async_trait]
impl TableIdProvider for MemoryCatalogManager {
async fn next_table_id(&self) -> table::error::Result<TableId> {
Ok(self.table_id.fetch_add(1, Ordering::Relaxed))
}
catalogs: Arc<RwLock<HashMap<String, SchemaEntries>>>,
}
#[async_trait::async_trait]
impl CatalogManager for MemoryCatalogManager {
async fn start(&self) -> Result<()> {
self.table_id.store(MIN_USER_TABLE_ID, Ordering::Relaxed);
Ok(())
}
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
self.register_table_sync(request)
}
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
let mut catalogs = self.catalogs.write().unwrap();
let schema = catalogs
.get_mut(&request.catalog)
.with_context(|| CatalogNotFoundSnafu {
catalog_name: &request.catalog,
})?
.get_mut(&request.schema)
.with_context(|| SchemaNotFoundSnafu {
catalog: &request.catalog,
schema: &request.schema,
})?;
// check old and new table names
if !schema.contains_key(&request.table_name) {
return TableNotFoundSnafu {
table_info: request.table_name.to_string(),
}
.fail()?;
}
if schema.contains_key(&request.new_table_name) {
return TableExistsSnafu {
table: &request.new_table_name,
}
.fail();
}
let table = schema.remove(&request.table_name).unwrap();
let _ = schema.insert(request.new_table_name, table);
Ok(true)
}
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<()> {
self.deregister_table_sync(request)
}
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
self.register_schema_sync(request)
}
async fn deregister_schema(&self, request: DeregisterSchemaRequest) -> Result<bool> {
let mut catalogs = self.catalogs.write().unwrap();
let schemas = catalogs
.get_mut(&request.catalog)
.with_context(|| CatalogNotFoundSnafu {
catalog_name: &request.catalog,
})?;
let table_count = schemas
.remove(&request.schema)
.with_context(|| SchemaNotFoundSnafu {
catalog: &request.catalog,
schema: &request.schema,
})?
.len();
decrement_gauge!(
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
table_count as f64,
&[crate::metrics::db_label(&request.catalog, &request.schema)],
);
decrement_gauge!(
crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT,
1.0,
&[crate::metrics::db_label(&request.catalog, &request.schema)],
);
Ok(true)
}
async fn register_system_table(&self, _request: RegisterSystemTableRequest) -> Result<()> {
// TODO(ruihang): support register system table request
Ok(())
}
async fn schema_exist(&self, catalog: &str, schema: &str) -> Result<bool> {
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool> {
self.schema_exist_sync(catalog, schema)
}
@@ -159,11 +59,11 @@ impl CatalogManager for MemoryCatalogManager {
Ok(result)
}
async fn catalog_exist(&self, catalog: &str) -> Result<bool> {
async fn catalog_exists(&self, catalog: &str) -> Result<bool> {
self.catalog_exist_sync(catalog)
}
async fn table_exist(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
let catalogs = self.catalogs.read().unwrap();
Ok(catalogs
.get(catalog)
@@ -208,28 +108,27 @@ impl CatalogManager for MemoryCatalogManager {
.collect())
}
async fn register_catalog(self: Arc<Self>, name: String) -> Result<bool> {
self.register_catalog_sync(name)
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl MemoryCatalogManager {
pub fn new() -> Arc<Self> {
Arc::new(Self {
catalogs: Default::default(),
})
}
/// Creates a manager with some default setups
/// (e.g. default catalog/schema and information schema)
pub fn with_default_setup() -> Arc<Self> {
let manager = Arc::new(Self {
table_id: AtomicU32::new(MIN_USER_TABLE_ID),
catalogs: Default::default(),
});
// Safety: default catalog/schema is registered in order so no CatalogNotFound error will occur
manager
.register_catalog_sync(DEFAULT_CATALOG_NAME.to_string())
.unwrap();
manager.register_catalog_sync(DEFAULT_CATALOG_NAME).unwrap();
manager
.register_schema_sync(RegisterSchemaRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
@@ -257,12 +156,15 @@ impl MemoryCatalogManager {
}
/// Registers a catalog if it does not exist and returns false if the schema exists.
pub fn register_catalog_sync(self: &Arc<Self>, name: String) -> Result<bool> {
pub fn register_catalog_sync(&self, name: &str) -> Result<bool> {
let name = name.to_string();
let mut catalogs = self.catalogs.write().unwrap();
match catalogs.entry(name.clone()) {
Entry::Vacant(e) => {
let catalog = self.create_catalog_entry(name);
let arc_self = Arc::new(self.clone());
let catalog = arc_self.create_catalog_entry(name);
e.insert(catalog);
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
Ok(true)
@@ -361,7 +263,7 @@ impl MemoryCatalogManager {
let schema = &table.table_info().schema_name;
if !manager.catalog_exist_sync(catalog).unwrap() {
manager.register_catalog_sync(catalog.to_string()).unwrap();
manager.register_catalog_sync(catalog).unwrap();
}
if !manager.schema_exist_sync(catalog, schema).unwrap() {
@@ -393,8 +295,6 @@ pub fn new_memory_catalog_manager() -> Result<Arc<MemoryCatalogManager>> {
#[cfg(test)]
mod tests {
use common_catalog::consts::*;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
use super::*;
@@ -411,7 +311,7 @@ mod tests {
table: NumbersTable::table(NUMBERS_TABLE_ID),
};
let _ = catalog_list.register_table(register_request).await.unwrap();
catalog_list.register_table_sync(register_request).unwrap();
let table = catalog_list
.table(
DEFAULT_CATALOG_NAME,
@@ -428,130 +328,11 @@ mod tests {
.is_none());
}
#[tokio::test]
async fn test_mem_manager_rename_table() {
let catalog = MemoryCatalogManager::with_default_setup();
let table_name = "test_table";
assert!(!catalog
.table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
.await
.unwrap());
// register test table
let table_id = 2333;
let register_request = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
table_id,
table: NumbersTable::table(table_id),
};
assert!(catalog.register_table(register_request).await.unwrap());
assert!(catalog
.table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
.await
.unwrap());
// rename test table
let new_table_name = "test_table_renamed";
let rename_request = RenameTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
new_table_name: new_table_name.to_string(),
table_id,
};
let _ = catalog.rename_table(rename_request).await.unwrap();
// test old table name not exist
assert!(!catalog
.table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
.await
.unwrap());
// test new table name exists
assert!(catalog
.table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
.await
.unwrap());
let registered_table = catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
.await
.unwrap()
.unwrap();
assert_eq!(registered_table.table_info().ident.table_id, table_id);
let dup_register_request = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: new_table_name.to_string(),
table_id: table_id + 1,
table: NumbersTable::table(table_id + 1),
};
let result = catalog.register_table(dup_register_request).await;
let err = result.err().unwrap();
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
}
#[tokio::test]
async fn test_catalog_rename_table() {
let catalog = MemoryCatalogManager::with_default_setup();
let table_name = "num";
let table_id = 2333;
let table = NumbersTable::table(table_id);
// register table
let register_table_req = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
table_id,
table,
};
assert!(catalog.register_table(register_table_req).await.unwrap());
assert!(catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
.await
.unwrap()
.is_some());
// rename table
let new_table_name = "numbers_new";
let rename_table_req = RenameTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
new_table_name: new_table_name.to_string(),
table_id,
};
assert!(catalog.rename_table(rename_table_req).await.unwrap());
assert!(catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
.await
.unwrap()
.is_none());
assert!(catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
.await
.unwrap()
.is_some());
let registered_table = catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
.await
.unwrap()
.unwrap();
assert_eq!(registered_table.table_info().ident.table_id, table_id);
}
#[test]
pub fn test_register_catalog_sync() {
let list = MemoryCatalogManager::with_default_setup();
assert!(list
.register_catalog_sync("test_catalog".to_string())
.unwrap());
assert!(!list
.register_catalog_sync("test_catalog".to_string())
.unwrap());
assert!(list.register_catalog_sync("test_catalog").unwrap());
assert!(!list.register_catalog_sync("test_catalog").unwrap());
}
#[tokio::test]
@@ -566,7 +347,7 @@ mod tests {
table_id: 2333,
table: NumbersTable::table(2333),
};
let _ = catalog.register_table(register_table_req).await.unwrap();
catalog.register_table_sync(register_table_req).unwrap();
assert!(catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
.await
@@ -578,53 +359,11 @@ mod tests {
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
};
catalog
.deregister_table(deregister_table_req)
.await
.unwrap();
catalog.deregister_table_sync(deregister_table_req).unwrap();
assert!(catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
.await
.unwrap()
.is_none());
}
#[tokio::test]
async fn test_catalog_deregister_schema() {
let catalog = MemoryCatalogManager::with_default_setup();
// Registers a catalog, a schema, and a table.
let catalog_name = "foo_catalog".to_string();
let schema_name = "foo_schema".to_string();
let table_name = "foo_table".to_string();
let schema = RegisterSchemaRequest {
catalog: catalog_name.clone(),
schema: schema_name.clone(),
};
let table = RegisterTableRequest {
catalog: catalog_name.clone(),
schema: schema_name.clone(),
table_name,
table_id: 0,
table: NumbersTable::table(0),
};
catalog
.clone()
.register_catalog(catalog_name.clone())
.await
.unwrap();
catalog.register_schema(schema).await.unwrap();
catalog.register_table(table).await.unwrap();
let request = DeregisterSchemaRequest {
catalog: catalog_name.clone(),
schema: schema_name.clone(),
};
assert!(catalog.deregister_schema(request).await.unwrap());
assert!(!catalog
.schema_exist(&catalog_name, &schema_name)
.await
.unwrap());
}
}

View File

@@ -1,436 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::sync::Arc;
use async_trait::async_trait;
use common_catalog::consts::MITO_ENGINE;
use common_meta::ident::TableIdent;
use common_meta::key::catalog_name::CatalogNameKey;
use common_meta::key::datanode_table::DatanodeTableValue;
use common_meta::key::schema_name::SchemaNameKey;
use common_meta::key::TableMetadataManagerRef;
use common_telemetry::{error, info, warn};
use futures_util::TryStreamExt;
use metrics::increment_gauge;
use snafu::{ensure, OptionExt, ResultExt};
use table::engine::manager::TableEngineManagerRef;
use table::engine::EngineContext;
use table::requests::OpenTableRequest;
use table::TableRef;
use tokio::sync::Mutex;
use crate::error::{
OpenTableSnafu, ParallelOpenTableSnafu, Result, TableEngineNotFoundSnafu, TableExistsSnafu,
TableMetadataManagerSnafu, TableNotFoundSnafu, UnimplementedSnafu,
};
use crate::local::MemoryCatalogManager;
use crate::remote::region_alive_keeper::RegionAliveKeepers;
use crate::{
handle_system_table_request, CatalogManager, DeregisterSchemaRequest, DeregisterTableRequest,
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
};
/// Catalog manager based on metasrv.
pub struct RemoteCatalogManager {
node_id: u64,
engine_manager: TableEngineManagerRef,
system_table_requests: Mutex<Vec<RegisterSystemTableRequest>>,
region_alive_keepers: Arc<RegionAliveKeepers>,
memory_catalog_manager: Arc<MemoryCatalogManager>,
table_metadata_manager: TableMetadataManagerRef,
}
impl RemoteCatalogManager {
pub fn new(
engine_manager: TableEngineManagerRef,
node_id: u64,
region_alive_keepers: Arc<RegionAliveKeepers>,
table_metadata_manager: TableMetadataManagerRef,
) -> Self {
Self {
engine_manager,
node_id,
system_table_requests: Default::default(),
region_alive_keepers,
memory_catalog_manager: MemoryCatalogManager::with_default_setup(),
table_metadata_manager,
}
}
async fn initiate_catalogs(&self) -> Result<()> {
let tables = self
.table_metadata_manager
.datanode_table_manager()
.tables(self.node_id)
.try_collect::<Vec<_>>()
.await
.context(TableMetadataManagerSnafu)?;
let joins = tables
.into_iter()
.map(|datanode_table_value| {
let engine_manager = self.engine_manager.clone();
let memory_catalog_manager = self.memory_catalog_manager.clone();
let table_metadata_manager = self.table_metadata_manager.clone();
let region_alive_keepers = self.region_alive_keepers.clone();
common_runtime::spawn_bg(async move {
let table_id = datanode_table_value.table_id;
if let Err(e) = open_and_register_table(
engine_manager,
datanode_table_value,
memory_catalog_manager,
table_metadata_manager,
region_alive_keepers,
)
.await
{
// Note that we don't return error here if table opened failed. This is because
// we don't want those broken tables to impede the startup of Datanode.
// However, this could be changed in the future.
error!(e; "Failed to open or register table, id = {table_id}")
}
})
})
.collect::<Vec<_>>();
let _ = futures::future::try_join_all(joins)
.await
.context(ParallelOpenTableSnafu)?;
Ok(())
}
}
async fn open_and_register_table(
engine_manager: TableEngineManagerRef,
datanode_table_value: DatanodeTableValue,
memory_catalog_manager: Arc<MemoryCatalogManager>,
table_metadata_manager: TableMetadataManagerRef,
region_alive_keepers: Arc<RegionAliveKeepers>,
) -> Result<()> {
let context = EngineContext {};
let table_id = datanode_table_value.table_id;
let region_numbers = datanode_table_value.regions;
let table_info_value = table_metadata_manager
.table_info_manager()
.get(table_id)
.await
.context(TableMetadataManagerSnafu)?
.context(TableNotFoundSnafu {
table_info: format!("table id: {table_id}"),
})?;
let table_info = &table_info_value.table_info;
let catalog_name = table_info.catalog_name.clone();
let schema_name = table_info.schema_name.clone();
let table_name = table_info.name.clone();
let request = OpenTableRequest {
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
table_id,
region_numbers: region_numbers.clone(),
};
let engine =
engine_manager
.engine(&table_info.meta.engine)
.context(TableEngineNotFoundSnafu {
engine_name: &table_info.meta.engine,
})?;
let table_ident = TableIdent {
catalog: catalog_name,
schema: schema_name,
table: table_name,
table_id,
engine: table_info.meta.engine.clone(),
};
let table = engine
.open_table(&context, request)
.await
.with_context(|_| OpenTableSnafu {
table_info: table_ident.to_string(),
})?
.with_context(|| TableNotFoundSnafu {
table_info: table_ident.to_string(),
})?;
info!("Successfully opened table, {table_ident}");
if !memory_catalog_manager
.catalog_exist(&table_ident.catalog)
.await?
{
memory_catalog_manager.register_catalog_sync(table_ident.catalog.clone())?;
}
if !memory_catalog_manager
.schema_exist(&table_ident.catalog, &table_ident.schema)
.await?
{
memory_catalog_manager.register_schema_sync(RegisterSchemaRequest {
catalog: table_ident.catalog.clone(),
schema: table_ident.schema.clone(),
})?;
}
let request = RegisterTableRequest {
catalog: table_ident.catalog.clone(),
schema: table_ident.schema.clone(),
table_name: table_ident.table.clone(),
table_id,
table,
};
let registered =
register_table(&memory_catalog_manager, &region_alive_keepers, request).await?;
ensure!(
registered,
TableExistsSnafu {
table: table_ident.to_string(),
}
);
info!("Successfully registered table, {table_ident}");
Ok(())
}
async fn register_table(
memory_catalog_manager: &Arc<MemoryCatalogManager>,
region_alive_keepers: &Arc<RegionAliveKeepers>,
request: RegisterTableRequest,
) -> Result<bool> {
let table = request.table.clone();
let registered = memory_catalog_manager.register_table_sync(request)?;
if registered {
let table_info = table.table_info();
let table_ident = TableIdent {
catalog: table_info.catalog_name.clone(),
schema: table_info.schema_name.clone(),
table: table_info.name.clone(),
table_id: table_info.table_id(),
engine: table_info.meta.engine.clone(),
};
region_alive_keepers
.register_table(table_ident, table, memory_catalog_manager.clone())
.await?;
}
Ok(registered)
}
#[async_trait]
impl CatalogManager for RemoteCatalogManager {
async fn start(&self) -> Result<()> {
self.initiate_catalogs().await?;
let mut system_table_requests = self.system_table_requests.lock().await;
let engine = self
.engine_manager
.engine(MITO_ENGINE)
.context(TableEngineNotFoundSnafu {
engine_name: MITO_ENGINE,
})?;
handle_system_table_request(self, engine, &mut system_table_requests).await?;
info!("All system table opened");
Ok(())
}
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
register_table(
&self.memory_catalog_manager,
&self.region_alive_keepers,
request,
)
.await
}
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<()> {
let Some(table) = self
.memory_catalog_manager
.table(&request.catalog, &request.schema, &request.table_name)
.await?
else {
return Ok(());
};
let table_info = table.table_info();
let table_ident = TableIdent {
catalog: request.catalog.clone(),
schema: request.schema.clone(),
table: request.table_name.clone(),
table_id: table_info.ident.table_id,
engine: table_info.meta.engine.clone(),
};
if let Some(keeper) = self
.region_alive_keepers
.deregister_table(&table_ident)
.await
{
warn!(
"Table {} is deregistered from region alive keepers",
keeper.table_ident(),
);
}
self.memory_catalog_manager.deregister_table(request).await
}
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
self.memory_catalog_manager.register_schema_sync(request)
}
async fn deregister_schema(&self, _request: DeregisterSchemaRequest) -> Result<bool> {
UnimplementedSnafu {
operation: "deregister schema",
}
.fail()
}
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
self.memory_catalog_manager.rename_table(request).await
}
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
let catalog_name = request.create_table_request.catalog_name.clone();
let schema_name = request.create_table_request.schema_name.clone();
let mut requests = self.system_table_requests.lock().await;
requests.push(request);
increment_gauge!(
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
1.0,
&[crate::metrics::db_label(&catalog_name, &schema_name)],
);
Ok(())
}
async fn schema_exist(&self, catalog: &str, schema: &str) -> Result<bool> {
if !self.catalog_exist(catalog).await? {
return Ok(false);
}
if self
.memory_catalog_manager
.schema_exist(catalog, schema)
.await?
{
return Ok(true);
}
let remote_schema_exists = self
.table_metadata_manager
.schema_manager()
.exist(SchemaNameKey::new(catalog, schema))
.await
.context(TableMetadataManagerSnafu)?;
// Create schema locally if remote schema exists. Since local schema is managed by memory
// catalog manager, creating a local schema is relatively cheap (just a HashMap).
// Besides, if this method ("schema_exist) is called, it's very likely that someone wants to
// create a table in this schema. We should create the schema now.
if remote_schema_exists
&& self
.memory_catalog_manager
.register_schema(RegisterSchemaRequest {
catalog: catalog.to_string(),
schema: schema.to_string(),
})
.await?
{
info!("register schema '{catalog}/{schema}' on demand");
}
Ok(remote_schema_exists)
}
async fn table(
&self,
catalog_name: &str,
schema_name: &str,
table_name: &str,
) -> Result<Option<TableRef>> {
self.memory_catalog_manager
.table(catalog_name, schema_name, table_name)
.await
}
async fn catalog_exist(&self, catalog: &str) -> Result<bool> {
if self.memory_catalog_manager.catalog_exist(catalog).await? {
return Ok(true);
}
let key = CatalogNameKey::new(catalog);
let remote_catalog_exists = self
.table_metadata_manager
.catalog_manager()
.exist(key)
.await
.context(TableMetadataManagerSnafu)?;
// Create catalog locally if remote catalog exists. Since local catalog is managed by memory
// catalog manager, creating a local catalog is relatively cheap (just a HashMap).
// Besides, if this method ("catalog_exist) is called, it's very likely that someone wants to
// create a table in this catalog. We should create the catalog now.
if remote_catalog_exists
&& self
.memory_catalog_manager
.clone()
.register_catalog(catalog.to_string())
.await?
{
info!("register catalog '{catalog}' on demand");
}
Ok(remote_catalog_exists)
}
async fn table_exist(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
if !self.catalog_exist(catalog).await? {
return Ok(false);
}
if !self.schema_exist(catalog, schema).await? {
return Ok(false);
}
self.memory_catalog_manager
.table_exist(catalog, schema, table)
.await
}
async fn catalog_names(&self) -> Result<Vec<String>> {
self.memory_catalog_manager.catalog_names().await
}
async fn schema_names(&self, catalog_name: &str) -> Result<Vec<String>> {
self.memory_catalog_manager.schema_names(catalog_name).await
}
async fn table_names(&self, catalog_name: &str, schema_name: &str) -> Result<Vec<String>> {
self.memory_catalog_manager
.table_names(catalog_name, schema_name)
.await
}
async fn register_catalog(self: Arc<Self>, name: String) -> Result<bool> {
self.memory_catalog_manager.register_catalog_sync(name)
}
fn as_any(&self) -> &dyn Any {
self
}
}

View File

@@ -1,867 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::future::Future;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use async_trait::async_trait;
use common_meta::error::InvalidProtoMsgSnafu;
use common_meta::heartbeat::handler::{
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
};
use common_meta::ident::TableIdent;
use common_meta::RegionIdent;
use common_telemetry::{debug, error, info, warn};
use snafu::{OptionExt, ResultExt};
use store_api::storage::RegionNumber;
use table::engine::manager::TableEngineManagerRef;
use table::engine::{CloseTableResult, EngineContext, TableEngineRef};
use table::metadata::TableId;
use table::requests::CloseTableRequest;
use table::TableRef;
use tokio::sync::{mpsc, oneshot, Mutex};
use tokio::task::JoinHandle;
use tokio::time::{Duration, Instant};
use crate::error::{Result, TableEngineNotFoundSnafu};
use crate::local::MemoryCatalogManager;
use crate::DeregisterTableRequest;
/// [RegionAliveKeepers] manages all [RegionAliveKeeper] in a scope of tables.
pub struct RegionAliveKeepers {
table_engine_manager: TableEngineManagerRef,
keepers: Arc<Mutex<HashMap<TableId, Arc<RegionAliveKeeper>>>>,
heartbeat_interval_millis: u64,
started: AtomicBool,
/// The epoch when [RegionAliveKeepers] is created. It's used to get a monotonically non-decreasing
/// elapsed time when submitting heartbeats to Metasrv (because [Instant] is monotonically
/// non-decreasing). The heartbeat request will carry the duration since this epoch, and the
/// duration acts like an "invariant point" for region's keep alive lease.
epoch: Instant,
}
impl RegionAliveKeepers {
pub fn new(
table_engine_manager: TableEngineManagerRef,
heartbeat_interval_millis: u64,
) -> Self {
Self {
table_engine_manager,
keepers: Arc::new(Mutex::new(HashMap::new())),
heartbeat_interval_millis,
started: AtomicBool::new(false),
epoch: Instant::now(),
}
}
pub async fn find_keeper(&self, table_id: TableId) -> Option<Arc<RegionAliveKeeper>> {
self.keepers.lock().await.get(&table_id).cloned()
}
pub async fn register_table(
&self,
table_ident: TableIdent,
table: TableRef,
catalog_manager: Arc<MemoryCatalogManager>,
) -> Result<()> {
let table_id = table_ident.table_id;
let keeper = self.find_keeper(table_id).await;
if keeper.is_some() {
return Ok(());
}
let table_engine = self
.table_engine_manager
.engine(&table_ident.engine)
.context(TableEngineNotFoundSnafu {
engine_name: &table_ident.engine,
})?;
let keeper = Arc::new(RegionAliveKeeper::new(
table_engine,
catalog_manager,
table_ident.clone(),
self.heartbeat_interval_millis,
));
for r in table.table_info().meta.region_numbers.iter() {
keeper.register_region(*r).await;
}
let mut keepers = self.keepers.lock().await;
let _ = keepers.insert(table_id, keeper.clone());
if self.started.load(Ordering::Relaxed) {
keeper.start().await;
info!("RegionAliveKeeper for table {table_ident} is started!");
} else {
info!("RegionAliveKeeper for table {table_ident} is registered but not started yet!");
}
Ok(())
}
pub async fn deregister_table(
&self,
table_ident: &TableIdent,
) -> Option<Arc<RegionAliveKeeper>> {
let table_id = table_ident.table_id;
self.keepers.lock().await.remove(&table_id).map(|x| {
info!("Deregister RegionAliveKeeper for table {table_ident}");
x
})
}
pub async fn register_region(&self, region_ident: &RegionIdent) {
let table_id = region_ident.table_ident.table_id;
let Some(keeper) = self.find_keeper(table_id).await else {
// Alive keeper could be affected by lagging msg, just warn and ignore.
warn!("Alive keeper for region {region_ident} is not found!");
return;
};
keeper.register_region(region_ident.region_number).await
}
pub async fn deregister_region(&self, region_ident: &RegionIdent) {
let table_id = region_ident.table_ident.table_id;
let Some(keeper) = self.find_keeper(table_id).await else {
// Alive keeper could be affected by lagging msg, just warn and ignore.
warn!("Alive keeper for region {region_ident} is not found!");
return;
};
let _ = keeper.deregister_region(region_ident.region_number).await;
}
pub async fn start(&self) {
let keepers = self.keepers.lock().await;
for keeper in keepers.values() {
keeper.start().await;
}
self.started.store(true, Ordering::Relaxed);
info!(
"RegionAliveKeepers for tables {:?} are started!",
keepers.keys().map(|x| x.to_string()).collect::<Vec<_>>(),
);
}
pub fn epoch(&self) -> Instant {
self.epoch
}
}
#[async_trait]
impl HeartbeatResponseHandler for RegionAliveKeepers {
fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool {
!ctx.response.region_leases.is_empty()
}
async fn handle(
&self,
ctx: &mut HeartbeatResponseHandlerContext,
) -> common_meta::error::Result<HandleControl> {
let leases = ctx.response.region_leases.drain(..).collect::<Vec<_>>();
for lease in leases {
let table_ident: TableIdent = match lease
.table_ident
.context(InvalidProtoMsgSnafu {
err_msg: "'table_ident' is missing in RegionLease",
})
.and_then(|x| x.try_into())
{
Ok(x) => x,
Err(e) => {
error!(e; "");
continue;
}
};
let table_id = table_ident.table_id;
let Some(keeper) = self.keepers.lock().await.get(&table_id).cloned() else {
// Alive keeper could be affected by lagging msg, just warn and ignore.
warn!("Alive keeper for table {table_ident} is not found!");
continue;
};
let start_instant = self.epoch + Duration::from_millis(lease.duration_since_epoch);
let deadline = start_instant + Duration::from_secs(lease.lease_seconds);
keeper.keep_lived(lease.regions, deadline).await;
}
Ok(HandleControl::Continue)
}
}
/// [RegionAliveKeeper] starts a countdown for each region in a table. When deadline is reached,
/// the region will be closed.
/// The deadline is controlled by Metasrv. It works like "lease" for regions: a Datanode submits its
/// opened regions to Metasrv, in heartbeats. If Metasrv decides some region could be resided in this
/// Datanode, it will "extend" the region's "lease", with a deadline for [RegionAliveKeeper] to
/// countdown.
pub struct RegionAliveKeeper {
catalog_manager: Arc<MemoryCatalogManager>,
table_engine: TableEngineRef,
table_ident: TableIdent,
countdown_task_handles: Arc<Mutex<HashMap<RegionNumber, Arc<CountdownTaskHandle>>>>,
heartbeat_interval_millis: u64,
started: AtomicBool,
}
impl RegionAliveKeeper {
fn new(
table_engine: TableEngineRef,
catalog_manager: Arc<MemoryCatalogManager>,
table_ident: TableIdent,
heartbeat_interval_millis: u64,
) -> Self {
Self {
catalog_manager,
table_engine,
table_ident,
countdown_task_handles: Arc::new(Mutex::new(HashMap::new())),
heartbeat_interval_millis,
started: AtomicBool::new(false),
}
}
async fn find_handle(&self, region: &RegionNumber) -> Option<Arc<CountdownTaskHandle>> {
self.countdown_task_handles
.lock()
.await
.get(region)
.cloned()
}
async fn register_region(&self, region: RegionNumber) {
if self.find_handle(&region).await.is_some() {
return;
}
let countdown_task_handles = Arc::downgrade(&self.countdown_task_handles);
let on_task_finished = async move {
if let Some(x) = countdown_task_handles.upgrade() {
let _ = x.lock().await.remove(&region);
} // Else the countdown task handles map could be dropped because the keeper is dropped.
};
let catalog_manager = self.catalog_manager.clone();
let ident = self.table_ident.clone();
let handle = Arc::new(CountdownTaskHandle::new(
self.table_engine.clone(),
self.table_ident.clone(),
region,
move |result: Option<CloseTableResult>| {
if matches!(result, Some(CloseTableResult::Released(_))) {
let result = catalog_manager.deregister_table_sync(DeregisterTableRequest {
catalog: ident.catalog.to_string(),
schema: ident.schema.to_string(),
table_name: ident.table.to_string(),
});
info!(
"Deregister table: {} after countdown task finished, result: {result:?}",
ident.table_id
);
} else {
debug!("Countdown task returns: {result:?}");
}
on_task_finished
},
));
let mut handles = self.countdown_task_handles.lock().await;
let _ = handles.insert(region, handle.clone());
if self.started.load(Ordering::Relaxed) {
handle.start(self.heartbeat_interval_millis).await;
info!(
"Region alive countdown for region {region} in table {} is started!",
self.table_ident
);
} else {
info!(
"Region alive countdown for region {region} in table {} is registered but not started yet!",
self.table_ident
);
}
}
async fn deregister_region(&self, region: RegionNumber) -> Option<Arc<CountdownTaskHandle>> {
self.countdown_task_handles
.lock()
.await
.remove(&region)
.map(|x| {
info!(
"Deregister alive countdown for region {region} in table {}",
self.table_ident
);
x
})
}
async fn start(&self) {
let handles = self.countdown_task_handles.lock().await;
for handle in handles.values() {
handle.start(self.heartbeat_interval_millis).await;
}
self.started.store(true, Ordering::Relaxed);
info!(
"Region alive countdowns for regions {:?} in table {} are started!",
handles.keys().copied().collect::<Vec<_>>(),
self.table_ident
);
}
async fn keep_lived(&self, designated_regions: Vec<RegionNumber>, deadline: Instant) {
for region in designated_regions {
if let Some(handle) = self.find_handle(&region).await {
handle.reset_deadline(deadline).await;
}
// Else the region alive keeper might be triggered by lagging messages, we can safely ignore it.
}
}
pub async fn deadline(&self, region: RegionNumber) -> Option<Instant> {
let mut deadline = None;
if let Some(handle) = self.find_handle(&region).await {
let (s, r) = oneshot::channel();
if handle.tx.send(CountdownCommand::Deadline(s)).await.is_ok() {
deadline = r.await.ok()
}
}
deadline
}
pub fn table_ident(&self) -> &TableIdent {
&self.table_ident
}
}
#[derive(Debug)]
enum CountdownCommand {
Start(u64),
Reset(Instant),
Deadline(oneshot::Sender<Instant>),
}
struct CountdownTaskHandle {
tx: mpsc::Sender<CountdownCommand>,
handler: JoinHandle<()>,
table_ident: TableIdent,
region: RegionNumber,
}
impl CountdownTaskHandle {
/// Creates a new [CountdownTaskHandle] and starts the countdown task.
/// # Params
/// - `on_task_finished`: a callback to be invoked when the task is finished. Note that it will not
/// be invoked if the task is cancelled (by dropping the handle). This is because we want something
/// meaningful to be done when the task is finished, e.g. deregister the handle from the map.
/// While dropping the handle does not necessarily mean the task is finished.
fn new<Fut>(
table_engine: TableEngineRef,
table_ident: TableIdent,
region: RegionNumber,
on_task_finished: impl FnOnce(Option<CloseTableResult>) -> Fut + Send + 'static,
) -> Self
where
Fut: Future<Output = ()> + Send,
{
let (tx, rx) = mpsc::channel(1024);
let mut countdown_task = CountdownTask {
table_engine,
table_ident: table_ident.clone(),
region,
rx,
};
let handler = common_runtime::spawn_bg(async move {
let result = countdown_task.run().await;
on_task_finished(result).await;
});
Self {
tx,
handler,
table_ident,
region,
}
}
async fn start(&self, heartbeat_interval_millis: u64) {
if let Err(e) = self
.tx
.send(CountdownCommand::Start(heartbeat_interval_millis))
.await
{
warn!(
"Failed to start region alive keeper countdown: {e}. \
Maybe the task is stopped due to region been closed."
);
}
}
async fn reset_deadline(&self, deadline: Instant) {
if let Err(e) = self.tx.send(CountdownCommand::Reset(deadline)).await {
warn!(
"Failed to reset region alive keeper deadline: {e}. \
Maybe the task is stopped due to region been closed."
);
}
}
}
impl Drop for CountdownTaskHandle {
fn drop(&mut self) {
debug!(
"Aborting region alive countdown task for region {} in table {}",
self.region, self.table_ident,
);
self.handler.abort();
}
}
struct CountdownTask {
table_engine: TableEngineRef,
table_ident: TableIdent,
region: RegionNumber,
rx: mpsc::Receiver<CountdownCommand>,
}
impl CountdownTask {
// returns true if
async fn run(&mut self) -> Option<CloseTableResult> {
// 30 years. See `Instant::far_future`.
let far_future = Instant::now() + Duration::from_secs(86400 * 365 * 30);
// Make sure the alive countdown is not gonna happen before heartbeat task is started (the
// "start countdown" command will be sent from heartbeat task).
let countdown = tokio::time::sleep_until(far_future);
tokio::pin!(countdown);
let region = &self.region;
let table_ident = &self.table_ident;
loop {
tokio::select! {
command = self.rx.recv() => {
match command {
Some(CountdownCommand::Start(heartbeat_interval_millis)) => {
// Set first deadline in 4 heartbeats (roughly after 20 seconds from now if heartbeat
// interval is set to default 5 seconds), to make Datanode and Metasrv more tolerable to
// network or other jitters during startup.
let first_deadline = Instant::now() + Duration::from_millis(heartbeat_interval_millis) * 4;
countdown.set(tokio::time::sleep_until(first_deadline));
},
Some(CountdownCommand::Reset(deadline)) => {
if countdown.deadline() < deadline {
debug!(
"Reset deadline of region {region} of table {table_ident} to approximately {} seconds later",
(deadline - Instant::now()).as_secs_f32(),
);
countdown.set(tokio::time::sleep_until(deadline));
}
// Else the countdown could be either:
// - not started yet;
// - during startup protection;
// - received a lagging heartbeat message.
// All can be safely ignored.
},
None => {
info!(
"The handle of countdown task for region {region} of table {table_ident} \
is dropped, RegionAliveKeeper out."
);
break;
},
Some(CountdownCommand::Deadline(tx)) => {
let _ = tx.send(countdown.deadline());
}
}
}
() = &mut countdown => {
let result = self.close_region().await;
warn!(
"Region {region} of table {table_ident} is closed, result: {result:?}. \
RegionAliveKeeper out.",
);
return Some(result);
}
}
}
None
}
async fn close_region(&self) -> CloseTableResult {
let ctx = EngineContext::default();
let region = self.region;
let table_ident = &self.table_ident;
loop {
let request = CloseTableRequest {
catalog_name: table_ident.catalog.clone(),
schema_name: table_ident.schema.clone(),
table_name: table_ident.table.clone(),
table_id: table_ident.table_id,
region_numbers: vec![region],
flush: true,
};
match self.table_engine.close_table(&ctx, request).await {
Ok(result) => return result,
// If region is failed to close, immediately retry. Maybe we should panic instead?
Err(e) => error!(e;
"Failed to close region {region} of table {table_ident}. \
For the integrity of data, retry closing and retry without wait.",
),
}
}
}
}
#[cfg(test)]
mod test {
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use api::v1::meta::{HeartbeatResponse, RegionLease};
use common_meta::heartbeat::mailbox::HeartbeatMailbox;
use datatypes::schema::RawSchema;
use table::engine::manager::MemoryTableEngineManager;
use table::engine::TableEngine;
use table::requests::{CreateTableRequest, TableOptions};
use table::test_util::EmptyTable;
use super::*;
use crate::remote::mock::MockTableEngine;
async fn prepare_keepers() -> (TableIdent, RegionAliveKeepers) {
let table_engine = Arc::new(MockTableEngine::default());
let table_engine_manager = Arc::new(MemoryTableEngineManager::new(table_engine));
let keepers = RegionAliveKeepers::new(table_engine_manager, 5000);
let catalog = "my_catalog";
let schema = "my_schema";
let table = "my_table";
let table_ident = TableIdent {
catalog: catalog.to_string(),
schema: schema.to_string(),
table: table.to_string(),
table_id: 1,
engine: "MockTableEngine".to_string(),
};
let table = Arc::new(EmptyTable::new(CreateTableRequest {
id: 1,
catalog_name: catalog.to_string(),
schema_name: schema.to_string(),
table_name: table.to_string(),
desc: None,
schema: RawSchema {
column_schemas: vec![],
timestamp_index: None,
version: 0,
},
region_numbers: vec![1, 2, 3],
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: TableOptions::default(),
engine: "MockTableEngine".to_string(),
}));
let catalog_manager = MemoryCatalogManager::new_with_table(table.clone());
keepers
.register_table(table_ident.clone(), table, catalog_manager)
.await
.unwrap();
assert!(keepers
.keepers
.lock()
.await
.contains_key(&table_ident.table_id));
(table_ident, keepers)
}
#[tokio::test(flavor = "multi_thread")]
async fn test_handle_heartbeat_response() {
let (table_ident, keepers) = prepare_keepers().await;
keepers.start().await;
let startup_protection_until = Instant::now() + Duration::from_secs(21);
let duration_since_epoch = (Instant::now() - keepers.epoch).as_millis() as _;
let lease_seconds = 100;
let response = HeartbeatResponse {
region_leases: vec![RegionLease {
table_ident: Some(table_ident.clone().into()),
regions: vec![1, 3], // Not extending region 2's lease time.
duration_since_epoch,
lease_seconds,
}],
..Default::default()
};
let keep_alive_until = keepers.epoch
+ Duration::from_millis(duration_since_epoch)
+ Duration::from_secs(lease_seconds);
let (tx, _) = mpsc::channel(8);
let mailbox = Arc::new(HeartbeatMailbox::new(tx));
let mut ctx = HeartbeatResponseHandlerContext::new(mailbox, response);
assert!(keepers.handle(&mut ctx).await.unwrap() == HandleControl::Continue);
// sleep to wait for background task spawned in `handle`
tokio::time::sleep(Duration::from_secs(1)).await;
async fn test(
keeper: &Arc<RegionAliveKeeper>,
region_number: RegionNumber,
startup_protection_until: Instant,
keep_alive_until: Instant,
is_kept_live: bool,
) {
let deadline = keeper.deadline(region_number).await.unwrap();
if is_kept_live {
assert!(deadline > startup_protection_until && deadline == keep_alive_until);
} else {
assert!(deadline <= startup_protection_until);
}
}
let keeper = &keepers
.keepers
.lock()
.await
.get(&table_ident.table_id)
.cloned()
.unwrap();
// Test region 1 and 3 is kept lived. Their deadlines are updated to desired instant.
test(keeper, 1, startup_protection_until, keep_alive_until, true).await;
test(keeper, 3, startup_protection_until, keep_alive_until, true).await;
// Test region 2 is not kept lived. It's deadline is not updated: still during startup protection period.
test(keeper, 2, startup_protection_until, keep_alive_until, false).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_region_alive_keepers() {
let (table_ident, keepers) = prepare_keepers().await;
keepers
.register_region(&RegionIdent {
cluster_id: 1,
datanode_id: 1,
table_ident: table_ident.clone(),
region_number: 4,
})
.await;
keepers.start().await;
for keeper in keepers.keepers.lock().await.values() {
let regions = {
let handles = keeper.countdown_task_handles.lock().await;
handles.keys().copied().collect::<Vec<_>>()
};
for region in regions {
// assert countdown tasks are started
let deadline = keeper.deadline(region).await.unwrap();
assert!(deadline <= Instant::now() + Duration::from_secs(20));
}
}
keepers
.deregister_region(&RegionIdent {
cluster_id: 1,
datanode_id: 1,
table_ident: table_ident.clone(),
region_number: 1,
})
.await;
let mut regions = keepers
.find_keeper(table_ident.table_id)
.await
.unwrap()
.countdown_task_handles
.lock()
.await
.keys()
.copied()
.collect::<Vec<_>>();
regions.sort();
assert_eq!(regions, vec![2, 3, 4]);
let keeper = keepers.deregister_table(&table_ident).await.unwrap();
assert!(Arc::try_unwrap(keeper).is_ok(), "keeper is not dropped");
assert!(keepers.keepers.lock().await.is_empty());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_region_alive_keeper() {
let table_engine = Arc::new(MockTableEngine::default());
let table_ident = TableIdent {
catalog: "my_catalog".to_string(),
schema: "my_schema".to_string(),
table: "my_table".to_string(),
table_id: 1024,
engine: "mito".to_string(),
};
let catalog_manager = MemoryCatalogManager::with_default_setup();
let keeper = RegionAliveKeeper::new(table_engine, catalog_manager, table_ident, 1000);
let region = 1;
assert!(keeper.find_handle(&region).await.is_none());
keeper.register_region(region).await;
let _ = keeper.find_handle(&region).await.unwrap();
let ten_seconds_later = || Instant::now() + Duration::from_secs(10);
keeper.keep_lived(vec![1, 2, 3], ten_seconds_later()).await;
assert!(keeper.find_handle(&2).await.is_none());
assert!(keeper.find_handle(&3).await.is_none());
let far_future = Instant::now() + Duration::from_secs(86400 * 365 * 29);
// assert if keeper is not started, keep_lived is of no use
assert!(keeper.deadline(region).await.unwrap() > far_future);
keeper.start().await;
keeper.keep_lived(vec![1, 2, 3], ten_seconds_later()).await;
// assert keep_lived works if keeper is started
assert!(keeper.deadline(region).await.unwrap() <= ten_seconds_later());
let handle = keeper.deregister_region(region).await.unwrap();
assert!(Arc::try_unwrap(handle).is_ok(), "handle is not dropped");
assert!(keeper.find_handle(&region).await.is_none());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_countdown_task_handle() {
let table_engine = Arc::new(MockTableEngine::default());
let table_ident = TableIdent {
catalog: "my_catalog".to_string(),
schema: "my_schema".to_string(),
table: "my_table".to_string(),
table_id: 1024,
engine: "mito".to_string(),
};
let finished = Arc::new(AtomicBool::new(false));
let finished_clone = finished.clone();
let handle = CountdownTaskHandle::new(
table_engine.clone(),
table_ident.clone(),
1,
|_| async move { finished_clone.store(true, Ordering::Relaxed) },
);
let tx = handle.tx.clone();
// assert countdown task is running
tx.send(CountdownCommand::Start(5000)).await.unwrap();
assert!(!finished.load(Ordering::Relaxed));
drop(handle);
tokio::time::sleep(Duration::from_secs(1)).await;
// assert countdown task is stopped
assert!(tx
.try_send(CountdownCommand::Reset(
Instant::now() + Duration::from_secs(10)
))
.is_err());
// assert `on_task_finished` is not called (because the task is aborted by the handle's drop)
assert!(!finished.load(Ordering::Relaxed));
let finished = Arc::new(AtomicBool::new(false));
let finished_clone = finished.clone();
let handle = CountdownTaskHandle::new(table_engine, table_ident, 1, |_| async move {
finished_clone.store(true, Ordering::Relaxed)
});
handle.tx.send(CountdownCommand::Start(100)).await.unwrap();
tokio::time::sleep(Duration::from_secs(1)).await;
// assert `on_task_finished` is called when task is finished normally
assert!(finished.load(Ordering::Relaxed));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_countdown_task_run() {
let ctx = &EngineContext::default();
let catalog = "my_catalog";
let schema = "my_schema";
let table = "my_table";
let table_id = 1;
let request = CreateTableRequest {
id: table_id,
catalog_name: catalog.to_string(),
schema_name: schema.to_string(),
table_name: table.to_string(),
desc: None,
schema: RawSchema {
column_schemas: vec![],
timestamp_index: None,
version: 0,
},
region_numbers: vec![],
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: TableOptions::default(),
engine: "mito".to_string(),
};
let table_engine = Arc::new(MockTableEngine::default());
let _ = table_engine.create_table(ctx, request).await.unwrap();
let table_ident = TableIdent {
catalog: catalog.to_string(),
schema: schema.to_string(),
table: table.to_string(),
table_id,
engine: "mito".to_string(),
};
let (tx, rx) = mpsc::channel(10);
let mut task = CountdownTask {
table_engine: table_engine.clone(),
table_ident,
region: 1,
rx,
};
let _handle = common_runtime::spawn_bg(async move {
task.run().await;
});
async fn deadline(tx: &mpsc::Sender<CountdownCommand>) -> Instant {
let (s, r) = oneshot::channel();
tx.send(CountdownCommand::Deadline(s)).await.unwrap();
r.await.unwrap()
}
// if countdown task is not started, its deadline is set to far future
assert!(deadline(&tx).await > Instant::now() + Duration::from_secs(86400 * 365 * 29));
// start countdown in 250ms * 4 = 1s
tx.send(CountdownCommand::Start(250)).await.unwrap();
// assert deadline is correctly set
assert!(deadline(&tx).await <= Instant::now() + Duration::from_secs(1));
// reset countdown in 1.5s
tx.send(CountdownCommand::Reset(
Instant::now() + Duration::from_millis(1500),
))
.await
.unwrap();
// assert the table is closed after deadline is reached
assert!(table_engine.table_exists(ctx, table_id));
// spare 500ms for the task to close the table
tokio::time::sleep(Duration::from_millis(2000)).await;
assert!(!table_engine.table_exists(ctx, table_id));
}
}

View File

@@ -1,634 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::sync::Arc;
use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MITO_ENGINE,
SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_ID, SYSTEM_CATALOG_TABLE_NAME,
};
use common_recordbatch::SendableRecordBatchStream;
use common_telemetry::{debug, warn};
use common_time::util;
use datatypes::prelude::{ConcreteDataType, ScalarVector, VectorRef};
use datatypes::schema::{ColumnSchema, RawSchema};
use datatypes::vectors::{BinaryVector, TimestampMillisecondVector, UInt8Vector};
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::ScanRequest;
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::TableId;
use table::requests::{CreateTableRequest, InsertRequest, OpenTableRequest, TableOptions};
use table::TableRef;
use crate::error::{
self, CreateSystemCatalogSnafu, DeregisterTableSnafu, EmptyValueSnafu, Error,
InsertCatalogRecordSnafu, InvalidEntryTypeSnafu, InvalidKeySnafu, OpenSystemCatalogSnafu,
Result, ValueDeserializeSnafu,
};
use crate::DeregisterTableRequest;
pub const ENTRY_TYPE_INDEX: usize = 0;
pub const KEY_INDEX: usize = 1;
pub const VALUE_INDEX: usize = 3;
pub struct SystemCatalogTable(TableRef);
impl SystemCatalogTable {
pub async fn new(engine: TableEngineRef) -> Result<Self> {
let request = OpenTableRequest {
catalog_name: SYSTEM_CATALOG_NAME.to_string(),
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
table_id: SYSTEM_CATALOG_TABLE_ID,
region_numbers: vec![0],
};
let schema = build_system_catalog_schema();
let ctx = EngineContext::default();
if let Some(table) = engine
.open_table(&ctx, request)
.await
.context(OpenSystemCatalogSnafu)?
{
Ok(Self(table))
} else {
// system catalog table is not yet created, try to create
let request = CreateTableRequest {
id: SYSTEM_CATALOG_TABLE_ID,
catalog_name: SYSTEM_CATALOG_NAME.to_string(),
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
desc: Some("System catalog table".to_string()),
schema,
region_numbers: vec![0],
primary_key_indices: vec![ENTRY_TYPE_INDEX, KEY_INDEX],
create_if_not_exists: true,
table_options: TableOptions::default(),
engine: engine.name().to_string(),
};
let table = engine
.create_table(&ctx, request)
.await
.context(CreateSystemCatalogSnafu)?;
Ok(Self(table))
}
}
pub async fn register_table(
&self,
catalog: String,
schema: String,
table_name: String,
table_id: TableId,
engine: String,
) -> Result<usize> {
let insert_request =
build_table_insert_request(catalog, schema, table_name, table_id, engine);
self.0
.insert(insert_request)
.await
.context(InsertCatalogRecordSnafu)
}
pub(crate) async fn deregister_table(
&self,
request: &DeregisterTableRequest,
table_id: TableId,
) -> Result<()> {
let deletion_request = build_table_deletion_request(request, table_id);
self.0
.insert(deletion_request)
.await
.map(|x| {
if x != 1 {
let table = common_catalog::format_full_table_name(
&request.catalog,
&request.schema,
&request.table_name
);
warn!("Failed to delete table record from information_schema, unexpected returned result: {x}, table: {table}");
}
})
.with_context(|_| DeregisterTableSnafu {
request: request.clone(),
})
}
pub async fn register_schema(&self, catalog: String, schema: String) -> Result<usize> {
let insert_request = build_schema_insert_request(catalog, schema);
self.0
.insert(insert_request)
.await
.context(InsertCatalogRecordSnafu)
}
/// Create a stream of all entries inside system catalog table
pub async fn records(&self) -> Result<SendableRecordBatchStream> {
let full_projection = None;
let scan_req = ScanRequest {
sequence: None,
projection: full_projection,
filters: vec![],
output_ordering: None,
limit: None,
};
let stream = self
.0
.scan_to_stream(scan_req)
.await
.context(error::SystemCatalogTableScanSnafu)?;
Ok(stream)
}
pub fn as_table_ref(&self) -> TableRef {
self.0.clone()
}
}
/// Build system catalog table schema.
/// A system catalog table consists of 6 columns, namely
/// - entry_type: type of entry in current row, can be any variant of [EntryType].
/// - key: a binary encoded key of entry, differs according to different entry type.
/// - timestamp: currently not used.
/// - value: JSON-encoded value of entry's metadata.
/// - gmt_created: create time of this metadata.
/// - gmt_modified: last updated time of this metadata.
fn build_system_catalog_schema() -> RawSchema {
let cols = vec![
ColumnSchema::new(
"entry_type".to_string(),
ConcreteDataType::uint8_datatype(),
false,
),
ColumnSchema::new(
"key".to_string(),
ConcreteDataType::binary_datatype(),
false,
),
ColumnSchema::new(
"timestamp".to_string(),
ConcreteDataType::timestamp_millisecond_datatype(),
false,
)
.with_time_index(true),
ColumnSchema::new(
"value".to_string(),
ConcreteDataType::binary_datatype(),
false,
),
ColumnSchema::new(
"gmt_created".to_string(),
ConcreteDataType::timestamp_millisecond_datatype(),
false,
),
ColumnSchema::new(
"gmt_modified".to_string(),
ConcreteDataType::timestamp_millisecond_datatype(),
false,
),
];
RawSchema::new(cols)
}
/// Formats key string for table entry in system catalog
#[inline]
pub fn format_table_entry_key(catalog: &str, schema: &str, table_id: TableId) -> String {
format!("{catalog}.{schema}.{table_id}")
}
pub fn build_table_insert_request(
catalog: String,
schema: String,
table_name: String,
table_id: TableId,
engine: String,
) -> InsertRequest {
let entry_key = format_table_entry_key(&catalog, &schema, table_id);
build_insert_request(
EntryType::Table,
entry_key.as_bytes(),
serde_json::to_string(&TableEntryValue {
table_name,
engine,
is_deleted: false,
})
.unwrap()
.as_bytes(),
)
}
pub(crate) fn build_table_deletion_request(
request: &DeregisterTableRequest,
table_id: TableId,
) -> InsertRequest {
let entry_key = format_table_entry_key(&request.catalog, &request.schema, table_id);
build_insert_request(
EntryType::Table,
entry_key.as_bytes(),
serde_json::to_string(&TableEntryValue {
table_name: "".to_string(),
engine: "".to_string(),
is_deleted: true,
})
.unwrap()
.as_bytes(),
)
}
fn build_primary_key_columns(entry_type: EntryType, key: &[u8]) -> HashMap<String, VectorRef> {
HashMap::from([
(
"entry_type".to_string(),
Arc::new(UInt8Vector::from_slice([entry_type as u8])) as VectorRef,
),
(
"key".to_string(),
Arc::new(BinaryVector::from_slice(&[key])) as VectorRef,
),
(
"timestamp".to_string(),
// Timestamp in key part is intentionally left to 0
Arc::new(TimestampMillisecondVector::from_slice([0])) as VectorRef,
),
])
}
pub fn build_schema_insert_request(catalog_name: String, schema_name: String) -> InsertRequest {
let full_schema_name = format!("{catalog_name}.{schema_name}");
build_insert_request(
EntryType::Schema,
full_schema_name.as_bytes(),
serde_json::to_string(&SchemaEntryValue {})
.unwrap()
.as_bytes(),
)
}
pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) -> InsertRequest {
let primary_key_columns = build_primary_key_columns(entry_type, key);
let mut columns_values = HashMap::with_capacity(6);
columns_values.extend(primary_key_columns);
let _ = columns_values.insert(
"value".to_string(),
Arc::new(BinaryVector::from_slice(&[value])) as _,
);
let now = util::current_time_millis();
let _ = columns_values.insert(
"gmt_created".to_string(),
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
);
let _ = columns_values.insert(
"gmt_modified".to_string(),
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
);
InsertRequest {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
columns_values,
region_number: 0, // system catalog table has only one region
}
}
pub fn decode_system_catalog(
entry_type: Option<u8>,
key: Option<&[u8]>,
value: Option<&[u8]>,
) -> Result<Entry> {
debug!(
"Decode system catalog entry: {:?}, {:?}, {:?}",
entry_type, key, value
);
let entry_type = entry_type.context(InvalidKeySnafu { key: None })?;
let key = String::from_utf8_lossy(key.context(InvalidKeySnafu { key: None })?);
match EntryType::try_from(entry_type)? {
EntryType::Catalog => {
// As for catalog entry, the key is a string with format: `<catalog_name>`
// and the value is current not used.
let catalog_name = key.to_string();
Ok(Entry::Catalog(CatalogEntry { catalog_name }))
}
EntryType::Schema => {
// As for schema entry, the key is a string with format: `<catalog_name>.<schema_name>`
// and the value is current not used.
let schema_parts = key.split('.').collect::<Vec<_>>();
ensure!(
schema_parts.len() == 2,
InvalidKeySnafu {
key: Some(key.to_string())
}
);
Ok(Entry::Schema(SchemaEntry {
catalog_name: schema_parts[0].to_string(),
schema_name: schema_parts[1].to_string(),
}))
}
EntryType::Table => {
// As for table entry, the key is a string with format: `<catalog_name>.<schema_name>.<table_id>`
// and the value is a JSON string with format: `{"table_name": <table_name>}`
let table_parts = key.split('.').collect::<Vec<_>>();
ensure!(
table_parts.len() >= 3,
InvalidKeySnafu {
key: Some(key.to_string())
}
);
let value = value.context(EmptyValueSnafu)?;
debug!("Table meta value: {}", String::from_utf8_lossy(value));
let table_meta: TableEntryValue =
serde_json::from_slice(value).context(ValueDeserializeSnafu)?;
let table_id = table_parts[2].parse::<TableId>().unwrap();
Ok(Entry::Table(TableEntry {
catalog_name: table_parts[0].to_string(),
schema_name: table_parts[1].to_string(),
table_name: table_meta.table_name,
table_id,
engine: table_meta.engine,
is_deleted: table_meta.is_deleted,
}))
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum EntryType {
Catalog = 1,
Schema = 2,
Table = 3,
}
impl TryFrom<u8> for EntryType {
type Error = Error;
fn try_from(value: u8) -> std::result::Result<Self, Self::Error> {
match value {
b if b == Self::Catalog as u8 => Ok(Self::Catalog),
b if b == Self::Schema as u8 => Ok(Self::Schema),
b if b == Self::Table as u8 => Ok(Self::Table),
b => InvalidEntryTypeSnafu {
entry_type: Some(b),
}
.fail(),
}
}
}
#[derive(Debug, PartialEq, Eq, Ord, PartialOrd)]
pub enum Entry {
Catalog(CatalogEntry),
Schema(SchemaEntry),
Table(TableEntry),
}
#[derive(Debug, PartialEq, Eq, Ord, PartialOrd)]
pub struct CatalogEntry {
pub catalog_name: String,
}
#[derive(Debug, PartialEq, Eq, Ord, PartialOrd)]
pub struct SchemaEntry {
pub catalog_name: String,
pub schema_name: String,
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct SchemaEntryValue;
#[derive(Debug, PartialEq, Eq, Ord, PartialOrd)]
pub struct TableEntry {
pub catalog_name: String,
pub schema_name: String,
pub table_name: String,
pub table_id: TableId,
pub engine: String,
pub is_deleted: bool,
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct TableEntryValue {
pub table_name: String,
#[serde(default = "mito_engine")]
pub engine: String,
#[serde(default = "not_deleted")]
pub is_deleted: bool,
}
fn mito_engine() -> String {
MITO_ENGINE.to_string()
}
fn not_deleted() -> bool {
false
}
#[cfg(test)]
mod tests {
use common_recordbatch::RecordBatches;
use common_test_util::temp_dir::{create_temp_dir, TempDir};
use datatypes::value::Value;
use log_store::NoopLogStore;
use mito::config::EngineConfig;
use mito::engine::{MitoEngine, MITO_ENGINE};
use object_store::ObjectStore;
use storage::compaction::noop::NoopCompactionScheduler;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
use table::metadata::TableType;
use table::metadata::TableType::Base;
use super::*;
#[test]
pub fn test_decode_catalog_entry() {
let entry = decode_system_catalog(
Some(EntryType::Catalog as u8),
Some("some_catalog".as_bytes()),
None,
)
.unwrap();
if let Entry::Catalog(e) = entry {
assert_eq!("some_catalog", e.catalog_name);
} else {
panic!("Unexpected type: {entry:?}");
}
}
#[test]
pub fn test_decode_schema_entry() {
let entry = decode_system_catalog(
Some(EntryType::Schema as u8),
Some("some_catalog.some_schema".as_bytes()),
None,
)
.unwrap();
if let Entry::Schema(e) = entry {
assert_eq!("some_catalog", e.catalog_name);
assert_eq!("some_schema", e.schema_name);
} else {
panic!("Unexpected type: {entry:?}");
}
}
#[test]
pub fn test_decode_table() {
let entry = decode_system_catalog(
Some(EntryType::Table as u8),
Some("some_catalog.some_schema.42".as_bytes()),
Some("{\"table_name\":\"some_table\"}".as_bytes()),
)
.unwrap();
if let Entry::Table(e) = entry {
assert_eq!("some_catalog", e.catalog_name);
assert_eq!("some_schema", e.schema_name);
assert_eq!("some_table", e.table_name);
assert_eq!(42, e.table_id);
} else {
panic!("Unexpected type: {entry:?}");
}
}
#[test]
pub fn test_decode_mismatch() {
assert!(decode_system_catalog(
Some(EntryType::Table as u8),
Some("some_catalog.some_schema.42".as_bytes()),
None,
)
.is_err());
}
#[test]
pub fn test_entry_type() {
assert_eq!(EntryType::Catalog, EntryType::try_from(1).unwrap());
assert_eq!(EntryType::Schema, EntryType::try_from(2).unwrap());
assert_eq!(EntryType::Table, EntryType::try_from(3).unwrap());
assert!(EntryType::try_from(4).is_err());
}
pub async fn prepare_table_engine() -> (TempDir, TableEngineRef) {
let dir = create_temp_dir("system-table-test");
let store_dir = dir.path().to_string_lossy();
let mut builder = object_store::services::Fs::default();
let _ = builder.root(&store_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
let noop_compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
let table_engine = Arc::new(MitoEngine::new(
EngineConfig::default(),
EngineImpl::new(
StorageEngineConfig::default(),
Arc::new(NoopLogStore),
object_store.clone(),
noop_compaction_scheduler,
)
.unwrap(),
object_store,
));
(dir, table_engine)
}
#[tokio::test]
async fn test_system_table_type() {
let (_dir, table_engine) = prepare_table_engine().await;
let system_table = SystemCatalogTable::new(table_engine).await.unwrap();
assert_eq!(Base, system_table.as_table_ref().table_type());
}
#[tokio::test]
async fn test_system_table_info() {
let (_dir, table_engine) = prepare_table_engine().await;
let system_table = SystemCatalogTable::new(table_engine).await.unwrap();
let info = system_table.as_table_ref().table_info();
assert_eq!(TableType::Base, info.table_type);
assert_eq!(SYSTEM_CATALOG_TABLE_NAME, info.name);
assert_eq!(SYSTEM_CATALOG_TABLE_ID, info.ident.table_id);
assert_eq!(SYSTEM_CATALOG_NAME, info.catalog_name);
assert_eq!(INFORMATION_SCHEMA_NAME, info.schema_name);
}
#[tokio::test]
async fn test_system_catalog_table_records() {
let (_, table_engine) = prepare_table_engine().await;
let catalog_table = SystemCatalogTable::new(table_engine).await.unwrap();
let result = catalog_table
.register_table(
DEFAULT_CATALOG_NAME.to_string(),
DEFAULT_SCHEMA_NAME.to_string(),
"my_table".to_string(),
1,
MITO_ENGINE.to_string(),
)
.await
.unwrap();
assert_eq!(result, 1);
let records = catalog_table.records().await.unwrap();
let mut batches = RecordBatches::try_collect(records).await.unwrap().take();
assert_eq!(batches.len(), 1);
let batch = batches.remove(0);
assert_eq!(batch.num_rows(), 1);
let row = batch.rows().next().unwrap();
let Value::UInt8(entry_type) = row[0] else {
unreachable!()
};
let Value::Binary(key) = row[1].clone() else {
unreachable!()
};
let Value::Binary(value) = row[3].clone() else {
unreachable!()
};
let entry = decode_system_catalog(Some(entry_type), Some(&*key), Some(&*value)).unwrap();
let expected = Entry::Table(TableEntry {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "my_table".to_string(),
table_id: 1,
engine: MITO_ENGINE.to_string(),
is_deleted: false,
});
assert_eq!(entry, expected);
catalog_table
.deregister_table(
&DeregisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "my_table".to_string(),
},
1,
)
.await
.unwrap();
let records = catalog_table.records().await.unwrap();
let batches = RecordBatches::try_collect(records).await.unwrap().take();
assert_eq!(batches.len(), 1);
}
}

View File

@@ -123,7 +123,7 @@ mod tests {
use session::context::QueryContext;
use super::*;
use crate::local::MemoryCatalogManager;
use crate::memory::MemoryCatalogManager;
#[test]
fn test_validate_table_ref() {

View File

@@ -1,77 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The `tables` table in system catalog keeps a record of all tables created by user.
use std::sync::Arc;
use table::metadata::TableId;
use crate::system::SystemCatalogTable;
use crate::DeregisterTableRequest;
pub struct InformationSchema {
pub system: Arc<SystemCatalogTable>,
}
pub struct SystemCatalog {
pub information_schema: Arc<InformationSchema>,
}
impl SystemCatalog {
pub(crate) fn new(system: SystemCatalogTable) -> Self {
let schema = InformationSchema {
system: Arc::new(system),
};
Self {
information_schema: Arc::new(schema),
}
}
pub async fn register_table(
&self,
catalog: String,
schema: String,
table_name: String,
table_id: TableId,
engine: String,
) -> crate::error::Result<usize> {
self.information_schema
.system
.register_table(catalog, schema, table_name, table_id, engine)
.await
}
pub(crate) async fn deregister_table(
&self,
request: &DeregisterTableRequest,
table_id: TableId,
) -> crate::error::Result<()> {
self.information_schema
.system
.deregister_table(request, table_id)
.await
}
pub async fn register_schema(
&self,
catalog: String,
schema: String,
) -> crate::error::Result<usize> {
self.information_schema
.system
.register_schema(catalog, schema)
.await
}
}

View File

@@ -1,175 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(test)]
mod tests {
use std::sync::Arc;
use catalog::local::LocalCatalogManager;
use catalog::{CatalogManager, RegisterTableRequest, RenameTableRequest};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_telemetry::{error, info};
use common_test_util::temp_dir::TempDir;
use mito::config::EngineConfig;
use table::engine::manager::MemoryTableEngineManager;
use table::table::numbers::NumbersTable;
use tokio::sync::Mutex;
async fn create_local_catalog_manager(
) -> Result<(TempDir, LocalCatalogManager), catalog::error::Error> {
let (dir, object_store) =
mito::table::test_util::new_test_object_store("setup_mock_engine_and_table").await;
let mock_engine = Arc::new(mito::table::test_util::MockMitoEngine::new(
EngineConfig::default(),
mito::table::test_util::MockEngine::default(),
object_store,
));
let engine_manager = Arc::new(MemoryTableEngineManager::new(mock_engine.clone()));
let catalog_manager = LocalCatalogManager::try_new(engine_manager).await.unwrap();
catalog_manager.start().await?;
Ok((dir, catalog_manager))
}
#[tokio::test]
async fn test_rename_table() {
common_telemetry::init_default_ut_logging();
let (_dir, catalog_manager) = create_local_catalog_manager().await.unwrap();
// register table
let table_name = "test_table";
let table_id = 42;
let request = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
table_id,
table: NumbersTable::table(table_id),
};
assert!(catalog_manager.register_table(request).await.unwrap());
// rename table
let new_table_name = "table_t";
let rename_table_req = RenameTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
new_table_name: new_table_name.to_string(),
table_id,
};
assert!(catalog_manager
.rename_table(rename_table_req)
.await
.unwrap());
let registered_table = catalog_manager
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
.await
.unwrap()
.unwrap();
assert_eq!(registered_table.table_info().ident.table_id, table_id);
}
#[tokio::test]
async fn test_duplicate_register() {
let (_dir, catalog_manager) = create_local_catalog_manager().await.unwrap();
let request = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "test_table".to_string(),
table_id: 42,
table: NumbersTable::table(42),
};
assert!(catalog_manager
.register_table(request.clone())
.await
.unwrap());
// register table with same table id will succeed with 0 as return val.
assert!(!catalog_manager.register_table(request).await.unwrap());
let err = catalog_manager
.register_table(RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "test_table".to_string(),
table_id: 43,
table: NumbersTable::table(43),
})
.await
.unwrap_err();
assert!(
err.to_string()
.contains("Table `greptime.public.test_table` already exists"),
"Actual error message: {err}",
);
}
#[test]
fn test_concurrent_register() {
common_telemetry::init_default_ut_logging();
let rt = Arc::new(tokio::runtime::Builder::new_multi_thread().build().unwrap());
let (_dir, catalog_manager) =
rt.block_on(async { create_local_catalog_manager().await.unwrap() });
let catalog_manager = Arc::new(catalog_manager);
let succeed = Arc::new(Mutex::new(None));
let mut handles = Vec::with_capacity(8);
for i in 0..8 {
let catalog = catalog_manager.clone();
let succeed = succeed.clone();
let handle = rt.spawn(async move {
let table_id = 42 + i;
let table = NumbersTable::table(table_id);
let table_info = table.table_info();
let req = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "test_table".to_string(),
table_id,
table,
};
match catalog.register_table(req).await {
Ok(res) => {
if res {
let mut succeed = succeed.lock().await;
info!("Successfully registered table: {}", table_id);
*succeed = Some(table_info);
}
}
Err(_) => {
error!("Failed to register table {}", table_id);
}
}
});
handles.push(handle);
}
rt.block_on(async move {
for handle in handles {
handle.await.unwrap();
}
let guard = succeed.lock().await;
let table_info = guard.as_ref().unwrap();
let table_registered = catalog_manager
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "test_table")
.await
.unwrap()
.unwrap();
assert_eq!(
table_registered.table_info().ident.table_id,
table_info.ident.table_id
);
});
}
}

View File

@@ -1,453 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(assert_matches)]
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::collections::HashSet;
use std::sync::Arc;
use std::time::Duration;
use catalog::remote::mock::MockTableEngine;
use catalog::remote::region_alive_keeper::RegionAliveKeepers;
use catalog::remote::{CachedMetaKvBackend, RemoteCatalogManager};
use catalog::{CatalogManager, RegisterSchemaRequest, RegisterTableRequest};
use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MITO_ENGINE,
};
use common_meta::helper::CatalogValue;
use common_meta::ident::TableIdent;
use common_meta::key::catalog_name::CatalogNameKey;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::kv_backend::KvBackend;
use common_meta::rpc::store::{CompareAndPutRequest, PutRequest};
use datatypes::schema::RawSchema;
use table::engine::manager::{MemoryTableEngineManager, TableEngineManagerRef};
use table::engine::{EngineContext, TableEngineRef};
use table::requests::CreateTableRequest;
use table::test_util::EmptyTable;
use tokio::time::Instant;
struct TestingComponents {
catalog_manager: Arc<RemoteCatalogManager>,
table_engine_manager: TableEngineManagerRef,
region_alive_keepers: Arc<RegionAliveKeepers>,
}
impl TestingComponents {
fn table_engine(&self) -> TableEngineRef {
self.table_engine_manager.engine(MITO_ENGINE).unwrap()
}
}
#[tokio::test]
async fn test_cached_backend() {
let backend = CachedMetaKvBackend::wrap(Arc::new(MemoryKvBackend::default()));
let default_catalog_key = CatalogNameKey::new(DEFAULT_CATALOG_NAME).to_string();
let req = PutRequest::new()
.with_key(default_catalog_key.as_bytes())
.with_value(CatalogValue.as_bytes().unwrap());
backend.put(req).await.unwrap();
let ret = backend.get(b"__catalog_name/greptime").await.unwrap();
let _ = ret.unwrap();
let req = CompareAndPutRequest::new()
.with_key(b"__catalog_name/greptime".to_vec())
.with_expect(CatalogValue.as_bytes().unwrap())
.with_value(b"123".to_vec());
let _ = backend.compare_and_put(req).await.unwrap();
let ret = backend.get(b"__catalog_name/greptime").await.unwrap();
assert_eq!(b"123", ret.as_ref().unwrap().value.as_slice());
let req = PutRequest::new()
.with_key(b"__catalog_name/greptime".to_vec())
.with_value(b"1234".to_vec());
let _ = backend.put(req).await;
let ret = backend.get(b"__catalog_name/greptime").await.unwrap();
assert_eq!(b"1234", ret.unwrap().value.as_slice());
backend
.delete(b"__catalog_name/greptime", false)
.await
.unwrap();
let ret = backend.get(b"__catalog_name/greptime").await.unwrap();
assert!(ret.is_none());
}
async fn prepare_components(node_id: u64) -> TestingComponents {
let backend = Arc::new(MemoryKvBackend::default());
let req = PutRequest::new()
.with_key(b"__catalog_name/greptime".to_vec())
.with_value(b"".to_vec());
backend.put(req).await.unwrap();
let req = PutRequest::new()
.with_key(b"__schema_name/greptime-public".to_vec())
.with_value(b"".to_vec());
backend.put(req).await.unwrap();
let cached_backend = Arc::new(CachedMetaKvBackend::wrap(backend));
let table_engine = Arc::new(MockTableEngine::default());
let engine_manager = Arc::new(MemoryTableEngineManager::alias(
MITO_ENGINE.to_string(),
table_engine,
));
let region_alive_keepers = Arc::new(RegionAliveKeepers::new(engine_manager.clone(), 5000));
let catalog_manager = RemoteCatalogManager::new(
engine_manager.clone(),
node_id,
region_alive_keepers.clone(),
Arc::new(TableMetadataManager::new(cached_backend)),
);
catalog_manager.start().await.unwrap();
TestingComponents {
catalog_manager: Arc::new(catalog_manager),
table_engine_manager: engine_manager,
region_alive_keepers,
}
}
#[tokio::test]
async fn test_remote_catalog_default() {
common_telemetry::init_default_ut_logging();
let node_id = 42;
let TestingComponents {
catalog_manager, ..
} = prepare_components(node_id).await;
assert_eq!(
vec![DEFAULT_CATALOG_NAME.to_string()],
catalog_manager.catalog_names().await.unwrap()
);
let mut schema_names = catalog_manager
.schema_names(DEFAULT_CATALOG_NAME)
.await
.unwrap();
schema_names.sort_unstable();
assert_eq!(
vec![
INFORMATION_SCHEMA_NAME.to_string(),
DEFAULT_SCHEMA_NAME.to_string()
],
schema_names
);
}
#[tokio::test]
async fn test_remote_catalog_register_nonexistent() {
common_telemetry::init_default_ut_logging();
let node_id = 42;
let components = prepare_components(node_id).await;
// register a new table with an nonexistent catalog
let catalog_name = "nonexistent_catalog".to_string();
let schema_name = "nonexistent_schema".to_string();
let table_name = "fail_table".to_string();
// this schema has no effect
let table_schema = RawSchema::new(vec![]);
let table = components
.table_engine()
.create_table(
&EngineContext {},
CreateTableRequest {
id: 1,
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
desc: None,
schema: table_schema,
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: Default::default(),
engine: MITO_ENGINE.to_string(),
},
)
.await
.unwrap();
let reg_req = RegisterTableRequest {
catalog: catalog_name,
schema: schema_name,
table_name,
table_id: 1,
table,
};
let res = components.catalog_manager.register_table(reg_req).await;
// because nonexistent_catalog does not exist yet.
assert_matches!(
res.err().unwrap(),
catalog::error::Error::CatalogNotFound { .. }
);
}
#[tokio::test]
async fn test_register_table() {
let node_id = 42;
let components = prepare_components(node_id).await;
let mut schema_names = components
.catalog_manager
.schema_names(DEFAULT_CATALOG_NAME)
.await
.unwrap();
schema_names.sort_unstable();
assert_eq!(
vec![
INFORMATION_SCHEMA_NAME.to_string(),
DEFAULT_SCHEMA_NAME.to_string(),
],
schema_names
);
// register a new table with an nonexistent catalog
let catalog_name = DEFAULT_CATALOG_NAME.to_string();
let schema_name = DEFAULT_SCHEMA_NAME.to_string();
let table_name = "test_table".to_string();
let table_id = 1;
// this schema has no effect
let table_schema = RawSchema::new(vec![]);
let table = components
.table_engine()
.create_table(
&EngineContext {},
CreateTableRequest {
id: table_id,
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
desc: None,
schema: table_schema,
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: Default::default(),
engine: MITO_ENGINE.to_string(),
},
)
.await
.unwrap();
let reg_req = RegisterTableRequest {
catalog: catalog_name,
schema: schema_name,
table_name: table_name.clone(),
table_id,
table,
};
assert!(components
.catalog_manager
.register_table(reg_req)
.await
.unwrap());
assert_eq!(
vec![table_name],
components
.catalog_manager
.table_names(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
.await
.unwrap()
);
}
#[tokio::test]
async fn test_register_catalog_schema_table() {
let node_id = 42;
let components = prepare_components(node_id).await;
let catalog_name = "test_catalog".to_string();
let schema_name = "nonexistent_schema".to_string();
// register catalog to catalog manager
assert!(components
.catalog_manager
.clone()
.register_catalog(catalog_name.clone())
.await
.is_ok());
assert_eq!(
HashSet::<String>::from_iter(vec![
DEFAULT_CATALOG_NAME.to_string(),
catalog_name.clone()
]),
HashSet::from_iter(components.catalog_manager.catalog_names().await.unwrap())
);
let table_to_register = components
.table_engine()
.create_table(
&EngineContext {},
CreateTableRequest {
id: 2,
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: "".to_string(),
desc: None,
schema: RawSchema::new(vec![]),
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: Default::default(),
engine: MITO_ENGINE.to_string(),
},
)
.await
.unwrap();
let reg_req = RegisterTableRequest {
catalog: catalog_name.clone(),
schema: schema_name.clone(),
table_name: " fail_table".to_string(),
table_id: 2,
table: table_to_register,
};
// this register will fail since schema does not exist yet
assert_matches!(
components
.catalog_manager
.register_table(reg_req.clone())
.await
.unwrap_err(),
catalog::error::Error::SchemaNotFound { .. }
);
let register_schema_request = RegisterSchemaRequest {
catalog: catalog_name.to_string(),
schema: schema_name.to_string(),
};
assert!(components
.catalog_manager
.register_schema(register_schema_request)
.await
.expect("Register schema should not fail"));
assert!(components
.catalog_manager
.register_table(reg_req)
.await
.unwrap());
assert_eq!(
HashSet::from([schema_name.clone(), INFORMATION_SCHEMA_NAME.to_string()]),
components
.catalog_manager
.schema_names(&catalog_name)
.await
.unwrap()
.into_iter()
.collect()
)
}
#[tokio::test]
async fn test_register_table_before_and_after_region_alive_keeper_started() {
let components = prepare_components(42).await;
let catalog_manager = &components.catalog_manager;
let region_alive_keepers = &components.region_alive_keepers;
let table_before = TableIdent {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table: "table_before".to_string(),
table_id: 1,
engine: MITO_ENGINE.to_string(),
};
let request = RegisterTableRequest {
catalog: table_before.catalog.clone(),
schema: table_before.schema.clone(),
table_name: table_before.table.clone(),
table_id: table_before.table_id,
table: Arc::new(EmptyTable::new(CreateTableRequest {
id: table_before.table_id,
catalog_name: table_before.catalog.clone(),
schema_name: table_before.schema.clone(),
table_name: table_before.table.clone(),
desc: None,
schema: RawSchema::new(vec![]),
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: Default::default(),
engine: MITO_ENGINE.to_string(),
})),
};
assert!(catalog_manager.register_table(request).await.unwrap());
let keeper = region_alive_keepers
.find_keeper(table_before.table_id)
.await
.unwrap();
let deadline = keeper.deadline(0).await.unwrap();
let far_future = Instant::now() + Duration::from_secs(86400 * 365 * 29);
// assert region alive countdown is not started
assert!(deadline > far_future);
region_alive_keepers.start().await;
let table_after = TableIdent {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table: "table_after".to_string(),
table_id: 2,
engine: MITO_ENGINE.to_string(),
};
let request = RegisterTableRequest {
catalog: table_after.catalog.clone(),
schema: table_after.schema.clone(),
table_name: table_after.table.clone(),
table_id: table_after.table_id,
table: Arc::new(EmptyTable::new(CreateTableRequest {
id: table_after.table_id,
catalog_name: table_after.catalog.clone(),
schema_name: table_after.schema.clone(),
table_name: table_after.table.clone(),
desc: None,
schema: RawSchema::new(vec![]),
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: Default::default(),
engine: MITO_ENGINE.to_string(),
})),
};
assert!(catalog_manager.register_table(request).await.unwrap());
let keeper = region_alive_keepers
.find_keeper(table_after.table_id)
.await
.unwrap();
let deadline = keeper.deadline(0).await.unwrap();
// assert countdown is started for the table registered after [RegionAliveKeepers] started
assert!(deadline <= Instant::now() + Duration::from_secs(20));
let keeper = region_alive_keepers
.find_keeper(table_before.table_id)
.await
.unwrap();
let deadline = keeper.deadline(0).await.unwrap();
// assert countdown is started for the table registered before [RegionAliveKeepers] started, too
assert!(deadline <= Instant::now() + Duration::from_secs(20));
}
}

View File

@@ -11,10 +11,12 @@ testing = []
api = { workspace = true }
arrow-flight.workspace = true
async-stream.workspace = true
async-trait.workspace = true
common-base = { workspace = true }
common-catalog = { workspace = true }
common-error = { workspace = true }
common-grpc = { workspace = true }
common-macro = { workspace = true }
common-meta = { workspace = true }
common-query = { workspace = true }
common-recordbatch = { workspace = true }
@@ -25,10 +27,11 @@ datatypes = { workspace = true }
derive_builder.workspace = true
enum_dispatch = "0.3"
futures-util.workspace = true
moka = { version = "0.9", features = ["future"] }
moka = { workspace = true, features = ["future"] }
parking_lot = "0.12"
prost.workspace = true
rand.workspace = true
session = { workspace = true }
snafu.workspace = true
tokio-stream = { version = "0.1", features = ["net"] }
tokio.workspace = true

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::{ColumnDataType, ColumnDef, CreateTableExpr, TableId};
use api::v1::{ColumnDataType, ColumnDef, CreateTableExpr, SemanticType, TableId};
use client::{Client, Database};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
use prost::Message;
@@ -41,21 +41,27 @@ async fn run() {
column_defs: vec![
ColumnDef {
name: "timestamp".to_string(),
datatype: ColumnDataType::TimestampMillisecond as i32,
data_type: ColumnDataType::TimestampMillisecond as i32,
is_nullable: false,
default_constraint: vec![],
semantic_type: SemanticType::Timestamp as i32,
comment: String::new(),
},
ColumnDef {
name: "key".to_string(),
datatype: ColumnDataType::Uint64 as i32,
data_type: ColumnDataType::Uint64 as i32,
is_nullable: false,
default_constraint: vec![],
semantic_type: SemanticType::Tag as i32,
comment: String::new(),
},
ColumnDef {
name: "value".to_string(),
datatype: ColumnDataType::Uint64 as i32,
data_type: ColumnDataType::Uint64 as i32,
is_nullable: false,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
},
],
time_index: "timestamp".to_string(),
@@ -63,7 +69,6 @@ async fn run() {
create_if_not_exists: false,
table_options: Default::default(),
table_id: Some(TableId { id: 1024 }),
region_numbers: vec![0],
engine: MITO_ENGINE.to_string(),
};
@@ -73,7 +78,7 @@ async fn run() {
let logical = mock_logical_plan();
event!(Level::INFO, "plan size: {:#?}", logical.len());
let result = db.logical_plan(logical, None).await.unwrap();
let result = db.logical_plan(logical, 0).await.unwrap();
event!(Level::INFO, "result: {:#?}", result);
}

View File

@@ -42,14 +42,14 @@ async fn run() {
.insert(vec![to_insert_request(weather_records_1())])
.await
{
error!("Error: {e}");
error!("Error: {e:?}");
}
if let Err(e) = stream_inserter
.insert(vec![to_insert_request(weather_records_2())])
.await
{
error!("Error: {e}");
error!("Error: {e:?}");
}
let result = stream_inserter.finish().await;
@@ -59,7 +59,7 @@ async fn run() {
info!("Rows written: {rows}");
}
Err(e) => {
error!("Error: {e}");
error!("Error: {e:?}");
}
};
}
@@ -131,7 +131,7 @@ fn to_insert_request(records: Vec<WeatherRecord>) -> InsertRequest {
Column {
column_name: "ts".to_owned(),
values: Some(column::Values {
ts_millisecond_values: timestamp_millis,
timestamp_millisecond_values: timestamp_millis,
..Default::default()
}),
semantic_type: SemanticType::Timestamp as i32,
@@ -177,6 +177,5 @@ fn to_insert_request(records: Vec<WeatherRecord>) -> InsertRequest {
table_name: "weather_demo".to_owned(),
columns,
row_count: rows as u32,
..Default::default()
}
}

View File

@@ -138,24 +138,46 @@ impl Client {
Ok((addr, channel))
}
fn max_grpc_recv_message_size(&self) -> usize {
self.inner
.channel_manager
.config()
.max_recv_message_size
.as_bytes() as usize
}
fn max_grpc_send_message_size(&self) -> usize {
self.inner
.channel_manager
.config()
.max_send_message_size
.as_bytes() as usize
}
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
let (addr, channel) = self.find_channel()?;
Ok(FlightClient {
addr,
client: FlightServiceClient::new(channel),
client: FlightServiceClient::new(channel)
.max_decoding_message_size(self.max_grpc_recv_message_size())
.max_encoding_message_size(self.max_grpc_send_message_size()),
})
}
pub(crate) fn make_database_client(&self) -> Result<DatabaseClient> {
let (_, channel) = self.find_channel()?;
Ok(DatabaseClient {
inner: GreptimeDatabaseClient::new(channel),
inner: GreptimeDatabaseClient::new(channel)
.max_decoding_message_size(self.max_grpc_recv_message_size())
.max_encoding_message_size(self.max_grpc_send_message_size()),
})
}
pub(crate) fn raw_region_client(&self) -> Result<PbRegionClient<Channel>> {
let (_, channel) = self.find_channel()?;
Ok(PbRegionClient::new(channel))
Ok(PbRegionClient::new(channel)
.max_decoding_message_size(self.max_grpc_recv_message_size())
.max_encoding_message_size(self.max_grpc_send_message_size()))
}
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {

View File

@@ -13,12 +13,15 @@
// limitations under the License.
use std::fmt::{Debug, Formatter};
use std::sync::Arc;
use std::time::Duration;
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
use common_meta::datanode_manager::{Datanode, DatanodeManager};
use common_meta::peer::Peer;
use moka::future::{Cache, CacheBuilder};
use crate::region::RegionRequester;
use crate::Client;
pub struct DatanodeClients {
@@ -40,6 +43,15 @@ impl Debug for DatanodeClients {
}
}
#[async_trait::async_trait]
impl DatanodeManager for DatanodeClients {
async fn datanode(&self, datanode: &Peer) -> Arc<dyn Datanode> {
let client = self.get_client(datanode).await;
Arc::new(RegionRequester::new(client))
}
}
impl DatanodeClients {
pub fn new(config: ChannelConfig) -> Self {
Self {

View File

@@ -17,9 +17,9 @@ use api::v1::ddl_request::Expr as DdlExpr;
use api::v1::greptime_request::Request;
use api::v1::query_request::Query;
use api::v1::{
AlterExpr, AuthHeader, CompactTableExpr, CreateTableExpr, DdlRequest, DeleteRequests,
DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequests, PromRangeQuery, QueryRequest,
RequestHeader, RowInsertRequests, TruncateTableExpr,
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, DeleteRequests, DropTableExpr,
GreptimeRequest, InsertRequests, PromRangeQuery, QueryRequest, RequestHeader,
RowInsertRequests, TruncateTableExpr,
};
use arrow_flight::Ticket;
use async_stream::stream;
@@ -147,13 +147,13 @@ impl Database {
async fn handle(&self, request: Request) -> Result<u32> {
let mut client = self.client.make_database_client()?.inner;
let request = self.to_rpc_request(request, None);
let request = self.to_rpc_request(request, 0);
let response = client.handle(request).await?.into_inner();
from_grpc_response(response)
}
#[inline]
fn to_rpc_request(&self, request: Request, trace_id: Option<u64>) -> GreptimeRequest {
fn to_rpc_request(&self, request: Request, trace_id: u64) -> GreptimeRequest {
GreptimeRequest {
header: Some(RequestHeader {
catalog: self.catalog.clone(),
@@ -161,28 +161,27 @@ impl Database {
authorization: self.ctx.auth_header.clone(),
dbname: self.dbname.clone(),
trace_id,
span_id: None,
span_id: 0,
}),
request: Some(request),
}
}
pub async fn sql(&self, sql: &str) -> Result<Output> {
pub async fn sql<S>(&self, sql: S) -> Result<Output>
where
S: AsRef<str>,
{
let _timer = timer!(metrics::METRIC_GRPC_SQL);
self.do_get(
Request::Query(QueryRequest {
query: Some(Query::Sql(sql.to_string())),
query: Some(Query::Sql(sql.as_ref().to_string())),
}),
None,
0,
)
.await
}
pub async fn logical_plan(
&self,
logical_plan: Vec<u8>,
trace_id: Option<u64>,
) -> Result<Output> {
pub async fn logical_plan(&self, logical_plan: Vec<u8>, trace_id: u64) -> Result<Output> {
let _timer = timer!(metrics::METRIC_GRPC_LOGICAL_PLAN);
self.do_get(
Request::Query(QueryRequest {
@@ -210,7 +209,7 @@ impl Database {
step: step.to_string(),
})),
}),
None,
0,
)
.await
}
@@ -221,7 +220,7 @@ impl Database {
Request::Ddl(DdlRequest {
expr: Some(DdlExpr::CreateTable(expr)),
}),
None,
0,
)
.await
}
@@ -232,7 +231,7 @@ impl Database {
Request::Ddl(DdlRequest {
expr: Some(DdlExpr::Alter(expr)),
}),
None,
0,
)
.await
}
@@ -243,29 +242,7 @@ impl Database {
Request::Ddl(DdlRequest {
expr: Some(DdlExpr::DropTable(expr)),
}),
None,
)
.await
}
pub async fn flush_table(&self, expr: FlushTableExpr) -> Result<Output> {
let _timer = timer!(metrics::METRIC_GRPC_FLUSH_TABLE);
self.do_get(
Request::Ddl(DdlRequest {
expr: Some(DdlExpr::FlushTable(expr)),
}),
None,
)
.await
}
pub async fn compact_table(&self, expr: CompactTableExpr) -> Result<Output> {
let _timer = timer!(metrics::METRIC_GRPC_COMPACT_TABLE);
self.do_get(
Request::Ddl(DdlRequest {
expr: Some(DdlExpr::CompactTable(expr)),
}),
None,
0,
)
.await
}
@@ -276,12 +253,12 @@ impl Database {
Request::Ddl(DdlRequest {
expr: Some(DdlExpr::TruncateTable(expr)),
}),
None,
0,
)
.await
}
async fn do_get(&self, request: Request, trace_id: Option<u64>) -> Result<Output> {
async fn do_get(&self, request: Request, trace_id: u64) -> Result<Output> {
// FIXME(paomian): should be added some labels for metrics
let _timer = timer!(metrics::METRIC_GRPC_DO_GET);
let request = self.to_rpc_request(request, trace_id);
@@ -302,7 +279,7 @@ impl Database {
source: BoxedError::new(ServerSnafu { code, msg }.build()),
};
logging::error!(
"Failed to do Flight get, addr: {}, code: {}, source: {}",
"Failed to do Flight get, addr: {}, code: {}, source: {:?}",
client.addr(),
tonic_code,
error

View File

@@ -17,29 +17,37 @@ use std::any::Any;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_error::{GREPTIME_ERROR_CODE, GREPTIME_ERROR_MSG};
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
use tonic::{Code, Status};
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Illegal Flight messages, reason: {}", reason))]
IllegalFlightMessages { reason: String, location: Location },
#[snafu(display("Failed to do Flight get, code: {}, source: {}", tonic_code, source))]
#[snafu(display("Failed to do Flight get, code: {}", tonic_code))]
FlightGet {
addr: String,
tonic_code: Code,
source: BoxedError,
},
#[snafu(display("Failed to convert FlightData, source: {}", source))]
#[snafu(display("Failure occurs during handling request"))]
HandleRequest {
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to convert FlightData"))]
ConvertFlightData {
location: Location,
source: common_grpc::Error,
},
#[snafu(display("Column datatype error, source: {}", source))]
#[snafu(display("Column datatype error"))]
ColumnDataType {
location: Location,
source: api::error::Error,
@@ -51,17 +59,16 @@ pub enum Error {
#[snafu(display("Missing required field in protobuf, field: {}", field))]
MissingField { field: String, location: Location },
#[snafu(display(
"Failed to create gRPC channel, peer address: {}, source: {}",
addr,
source
))]
#[snafu(display("Failed to create gRPC channel, peer address: {}", addr))]
CreateChannel {
addr: String,
location: Location,
source: common_grpc::error::Error,
},
#[snafu(display("Failed to request RegionServer, code: {}", code))]
RegionServer { code: Code, source: BoxedError },
// Server error carried in Tonic Status's metadata.
#[snafu(display("{}", msg))]
Server { code: StatusCode, msg: String },
@@ -85,7 +92,9 @@ impl ErrorExt for Error {
| Error::ClientStreaming { .. } => StatusCode::Internal,
Error::Server { code, .. } => *code,
Error::FlightGet { source, .. } => source.status_code(),
Error::FlightGet { source, .. }
| Error::HandleRequest { source, .. }
| Error::RegionServer { source, .. } => source.status_code(),
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source, .. } => {
source.status_code()
}

View File

@@ -26,6 +26,8 @@ use api::v1::greptime_response::Response;
use api::v1::{AffectedRows, GreptimeResponse};
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::status_code::StatusCode;
pub use common_query::Output;
pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
use snafu::OptionExt;
pub use self::client::Client;

View File

@@ -21,8 +21,6 @@ pub const METRIC_GRPC_SQL: &str = "grpc.sql";
pub const METRIC_GRPC_LOGICAL_PLAN: &str = "grpc.logical_plan";
pub const METRIC_GRPC_ALTER: &str = "grpc.alter";
pub const METRIC_GRPC_DROP_TABLE: &str = "grpc.drop_table";
pub const METRIC_GRPC_FLUSH_TABLE: &str = "grpc.flush_table";
pub const METRIC_GRPC_COMPACT_TABLE: &str = "grpc.compact_table";
pub const METRIC_GRPC_TRUNCATE_TABLE: &str = "grpc.truncate_table";
pub const METRIC_GRPC_DO_GET: &str = "grpc.do_get";
pub(crate) const METRIC_REGION_REQUEST_GRPC: &str = "grpc.region_request";

View File

@@ -12,44 +12,146 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::region::{region_request, RegionRequest, RegionRequestHeader, RegionResponse};
use api::v1::region::{QueryRequest, RegionRequest, RegionResponse};
use api::v1::ResponseHeader;
use arrow_flight::Ticket;
use async_stream::stream;
use async_trait::async_trait;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_telemetry::timer;
use snafu::OptionExt;
use common_grpc::flight::{FlightDecoder, FlightMessage};
use common_meta::datanode_manager::{AffectedRows, Datanode};
use common_meta::error::{self as meta_error, Result as MetaResult};
use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
use common_telemetry::{error, timer};
use prost::Message;
use snafu::{location, Location, OptionExt, ResultExt};
use tokio_stream::StreamExt;
use crate::error::{IllegalDatabaseResponseSnafu, Result, ServerSnafu};
use crate::{metrics, Client};
type AffectedRows = u64;
use crate::error::Error::RegionServer;
use crate::error::{
self, ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
MissingFieldSnafu, Result, ServerSnafu,
};
use crate::{metrics, Client, Error};
#[derive(Debug)]
pub struct RegionRequester {
trace_id: Option<u64>,
span_id: Option<u64>,
client: Client,
}
#[async_trait]
impl Datanode for RegionRequester {
async fn handle(&self, request: RegionRequest) -> MetaResult<AffectedRows> {
self.handle_inner(request).await.map_err(|err| {
if matches!(err, RegionServer { .. }) {
meta_error::Error::RetryLater {
source: BoxedError::new(err),
}
} else {
meta_error::Error::External {
source: BoxedError::new(err),
location: location!(),
}
}
})
}
async fn handle_query(&self, request: QueryRequest) -> MetaResult<SendableRecordBatchStream> {
let ticket = Ticket {
ticket: request.encode_to_vec().into(),
};
self.do_get_inner(ticket)
.await
.map_err(BoxedError::new)
.context(meta_error::ExternalSnafu)
}
}
impl RegionRequester {
pub fn new(client: Client) -> Self {
// TODO(LFC): Pass in trace_id and span_id from some context when we have it.
Self {
trace_id: None,
span_id: None,
client,
}
Self { client }
}
pub async fn handle(self, request: region_request::Body) -> Result<AffectedRows> {
let request_type = request.as_ref().to_string();
pub async fn do_get_inner(&self, ticket: Ticket) -> Result<SendableRecordBatchStream> {
let mut flight_client = self.client.make_flight_client()?;
let response = flight_client
.mut_inner()
.do_get(ticket)
.await
.map_err(|e| {
let tonic_code = e.code();
let e: error::Error = e.into();
let code = e.status_code();
let msg = e.to_string();
let error = Error::FlightGet {
tonic_code,
addr: flight_client.addr().to_string(),
source: BoxedError::new(ServerSnafu { code, msg }.build()),
};
error!(
e; "Failed to do Flight get, addr: {}, code: {}",
flight_client.addr(),
tonic_code
);
error
})?;
let request = RegionRequest {
header: Some(RegionRequestHeader {
trace_id: self.trace_id,
span_id: self.span_id,
}),
body: Some(request),
let flight_data_stream = response.into_inner();
let mut decoder = FlightDecoder::default();
let mut flight_message_stream = flight_data_stream.map(move |flight_data| {
flight_data
.map_err(Error::from)
.and_then(|data| decoder.try_decode(data).context(ConvertFlightDataSnafu))
});
let Some(first_flight_message) = flight_message_stream.next().await else {
return IllegalFlightMessagesSnafu {
reason: "Expect the response not to be empty",
}
.fail();
};
let FlightMessage::Schema(schema) = first_flight_message? else {
return IllegalFlightMessagesSnafu {
reason: "Expect schema to be the first flight message",
}
.fail();
};
let stream = Box::pin(stream!({
while let Some(flight_message) = flight_message_stream.next().await {
let flight_message = flight_message
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
let FlightMessage::Recordbatch(record_batch) = flight_message else {
yield IllegalFlightMessagesSnafu {
reason: "A Schema message must be succeeded exclusively by a set of RecordBatch messages"
}
.fail()
.map_err(BoxedError::new)
.context(ExternalSnafu);
break;
};
yield Ok(record_batch);
}
}));
let record_batch_stream = RecordBatchStreamAdaptor {
schema,
stream,
output_ordering: None,
};
Ok(Box::pin(record_batch_stream))
}
async fn handle_inner(&self, request: RegionRequest) -> Result<AffectedRows> {
let request_type = request
.body
.as_ref()
.with_context(|| MissingFieldSnafu { field: "body" })?
.as_ref()
.to_string();
let _timer = timer!(
metrics::METRIC_REGION_REQUEST_GRPC,
@@ -61,15 +163,31 @@ impl RegionRequester {
let RegionResponse {
header,
affected_rows,
} = client.handle(request).await?.into_inner();
} = client
.handle(request)
.await
.map_err(|e| {
let code = e.code();
let err: error::Error = e.into();
// Uses `Error::RegionServer` instead of `Error::Server`
error::Error::RegionServer {
code,
source: BoxedError::new(err),
}
})?
.into_inner();
check_response_header(header)?;
Ok(affected_rows)
}
pub async fn handle(&self, request: RegionRequest) -> Result<AffectedRows> {
self.handle_inner(request).await
}
}
fn check_response_header(header: Option<ResponseHeader>) -> Result<()> {
pub fn check_response_header(header: Option<ResponseHeader>) -> Result<()> {
let status = header
.and_then(|header| header.status)
.context(IllegalDatabaseResponseSnafu {

View File

@@ -23,8 +23,12 @@ chrono.workspace = true
clap = { version = "3.1", features = ["derive"] }
client = { workspace = true }
common-base = { workspace = true }
common-catalog = { workspace = true }
common-config = { workspace = true }
common-error = { workspace = true }
common-macro = { workspace = true }
common-meta = { workspace = true }
common-procedure = { workspace = true }
common-query = { workspace = true }
common-recordbatch = { workspace = true }
common-telemetry = { workspace = true, features = [
@@ -35,18 +39,24 @@ datanode = { workspace = true }
datatypes = { workspace = true }
either = "1.8"
etcd-client.workspace = true
file-engine = { workspace = true }
frontend = { workspace = true }
futures.workspace = true
lazy_static.workspace = true
meta-client = { workspace = true }
meta-srv = { workspace = true }
metrics.workspace = true
mito2 = { workspace = true }
nu-ansi-term = "0.46"
partition = { workspace = true }
plugins.workspace = true
prost.workspace = true
query = { workspace = true }
rand.workspace = true
regex.workspace = true
rustyline = "10.1"
serde.workspace = true
serde_json.workspace = true
servers = { workspace = true }
session = { workspace = true }
snafu.workspace = true

View File

@@ -116,7 +116,7 @@ impl SubCommand {
Ok(Application::Metasrv(app))
}
(SubCommand::Standalone(cmd), Options::Standalone(opts)) => {
let app = cmd.build(opts.fe_opts, opts.dn_opts).await?;
let app = cmd.build(*opts).await?;
Ok(Application::Standalone(app))
}
(SubCommand::Cli(cmd), Options::Cli(_)) => {

View File

@@ -14,6 +14,7 @@
mod bench;
mod cmd;
mod export;
mod helper;
mod repl;
// TODO(weny): Removes it
@@ -27,6 +28,7 @@ use common_telemetry::logging::LoggingOptions;
pub use repl::Repl;
use upgrade::UpgradeCommand;
use self::export::ExportCommand;
use crate::error::Result;
use crate::options::{Options, TopLevelOptions};
@@ -78,17 +80,19 @@ impl Command {
#[derive(Parser)]
enum SubCommand {
Attach(AttachCommand),
// Attach(AttachCommand),
Upgrade(UpgradeCommand),
Bench(BenchTableMetadataCommand),
Export(ExportCommand),
}
impl SubCommand {
async fn build(self) -> Result<Instance> {
match self {
SubCommand::Attach(cmd) => cmd.build().await,
// SubCommand::Attach(cmd) => cmd.build().await,
SubCommand::Upgrade(cmd) => cmd.build().await,
SubCommand::Bench(cmd) => cmd.build().await,
SubCommand::Export(cmd) => cmd.build().await,
}
}
}
@@ -104,51 +108,9 @@ pub(crate) struct AttachCommand {
}
impl AttachCommand {
#[allow(dead_code)]
async fn build(self) -> Result<Instance> {
let repl = Repl::try_new(&self).await?;
Ok(Instance::Repl(repl))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_load_options() {
let cmd = Command {
cmd: SubCommand::Attach(AttachCommand {
grpc_addr: String::from(""),
meta_addr: None,
disable_helper: false,
}),
};
let opts = cmd.load_options(TopLevelOptions::default()).unwrap();
let logging_opts = opts.logging_options();
assert_eq!("/tmp/greptimedb/logs", logging_opts.dir);
assert!(logging_opts.level.is_none());
assert!(!logging_opts.enable_jaeger_tracing);
}
#[test]
fn test_top_level_options() {
let cmd = Command {
cmd: SubCommand::Attach(AttachCommand {
grpc_addr: String::from(""),
meta_addr: None,
disable_helper: false,
}),
};
let opts = cmd
.load_options(TopLevelOptions {
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
log_level: Some("debug".to_string()),
})
.unwrap();
let logging_opts = opts.logging_options();
assert_eq!("/tmp/greptimedb/test/logs", logging_opts.dir);
assert_eq!("debug", logging_opts.level.as_ref().unwrap());
}
}

View File

@@ -120,7 +120,6 @@ fn create_table_info(table_id: TableId, table_name: TableName) -> RawTableInfo {
created_on: chrono::DateTime::default(),
primary_key_indices: vec![],
next_column_id: columns as u32 + 1,
engine_options: Default::default(),
value_indices: vec![],
options: Default::default(),
region_numbers: (1..=100).collect(),

395
src/cmd/src/cli/export.rs Normal file
View File

@@ -0,0 +1,395 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::Path;
use std::sync::Arc;
use async_trait::async_trait;
use clap::{Parser, ValueEnum};
use client::{Client, Database, DEFAULT_SCHEMA_NAME};
use common_query::Output;
use common_recordbatch::util::collect;
use common_telemetry::{debug, error, info, warn};
use datatypes::scalars::ScalarVector;
use datatypes::vectors::{StringVector, Vector};
use snafu::{OptionExt, ResultExt};
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
use tokio::sync::Semaphore;
use crate::cli::{Instance, Tool};
use crate::error::{
CollectRecordBatchesSnafu, ConnectServerSnafu, EmptyResultSnafu, Error, FileIoSnafu,
InvalidDatabaseNameSnafu, NotDataFromOutputSnafu, RequestDatabaseSnafu, Result,
};
type TableReference = (String, String, String);
#[derive(Debug, Default, Clone, ValueEnum)]
enum ExportTarget {
/// Corresponding to `SHOW CREATE TABLE`
#[default]
CreateTable,
/// Corresponding to `EXPORT TABLE`
TableData,
}
#[derive(Debug, Default, Parser)]
pub struct ExportCommand {
/// Server address to connect
#[clap(long)]
addr: String,
/// Directory to put the exported data. E.g.: /tmp/greptimedb-export
#[clap(long)]
output_dir: String,
/// The name of the catalog to export. Default to "greptime-*"".
#[clap(long, default_value = "")]
database: String,
/// Parallelism of the export.
#[clap(long, short = 'j', default_value = "1")]
export_jobs: usize,
/// Max retry times for each job.
#[clap(long, default_value = "3")]
max_retry: usize,
/// Things to export
#[clap(long, short = 't', value_enum)]
target: ExportTarget,
}
impl ExportCommand {
pub async fn build(&self) -> Result<Instance> {
let client = Client::with_urls([self.addr.clone()]);
client
.health_check()
.await
.with_context(|_| ConnectServerSnafu {
addr: self.addr.clone(),
})?;
let (catalog, schema) = split_database(&self.database)?;
let database_client = Database::new(
catalog.clone(),
schema.clone().unwrap_or(DEFAULT_SCHEMA_NAME.to_string()),
client,
);
Ok(Instance::Tool(Box::new(Export {
client: database_client,
catalog,
schema,
output_dir: self.output_dir.clone(),
parallelism: self.export_jobs,
target: self.target.clone(),
})))
}
}
pub struct Export {
client: Database,
catalog: String,
schema: Option<String>,
output_dir: String,
parallelism: usize,
target: ExportTarget,
}
impl Export {
/// Iterate over all db names.
///
/// Newbie: `db_name` is catalog + schema.
async fn iter_db_names(&self) -> Result<Vec<(String, String)>> {
if let Some(schema) = &self.schema {
Ok(vec![(self.catalog.clone(), schema.clone())])
} else {
let mut client = self.client.clone();
client.set_catalog(self.catalog.clone());
let result =
client
.sql("show databases")
.await
.with_context(|_| RequestDatabaseSnafu {
sql: "show databases".to_string(),
})?;
let Output::Stream(stream) = result else {
NotDataFromOutputSnafu.fail()?
};
let record_batch = collect(stream)
.await
.context(CollectRecordBatchesSnafu)?
.pop()
.context(EmptyResultSnafu)?;
let schemas = record_batch
.column(0)
.as_any()
.downcast_ref::<StringVector>()
.unwrap();
let mut result = Vec::with_capacity(schemas.len());
for i in 0..schemas.len() {
let schema = schemas.get_data(i).unwrap().to_owned();
result.push((self.catalog.clone(), schema));
}
Ok(result)
}
}
/// Return a list of [`TableReference`] to be exported.
/// Includes all tables under the given `catalog` and `schema`
async fn get_table_list(&self, catalog: &str, schema: &str) -> Result<Vec<TableReference>> {
// TODO: SQL injection hurts
let sql = format!(
"select table_catalog, table_schema, table_name from \
information_schema.tables where table_type = \'BASE TABLE\'\
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'",
);
let mut client = self.client.clone();
client.set_catalog(catalog);
client.set_schema(schema);
let result = client
.sql(&sql)
.await
.with_context(|_| RequestDatabaseSnafu { sql })?;
let Output::Stream(stream) = result else {
NotDataFromOutputSnafu.fail()?
};
let Some(record_batch) = collect(stream)
.await
.context(CollectRecordBatchesSnafu)?
.pop()
else {
return Ok(vec![]);
};
debug!("Fetched table list: {}", record_batch.pretty_print());
if record_batch.num_rows() == 0 {
return Ok(vec![]);
}
let mut result = Vec::with_capacity(record_batch.num_rows());
let catalog_column = record_batch
.column(0)
.as_any()
.downcast_ref::<StringVector>()
.unwrap();
let schema_column = record_batch
.column(1)
.as_any()
.downcast_ref::<StringVector>()
.unwrap();
let table_column = record_batch
.column(2)
.as_any()
.downcast_ref::<StringVector>()
.unwrap();
for i in 0..record_batch.num_rows() {
let catalog = catalog_column.get_data(i).unwrap().to_owned();
let schema = schema_column.get_data(i).unwrap().to_owned();
let table = table_column.get_data(i).unwrap().to_owned();
result.push((catalog, schema, table));
}
Ok(result)
}
async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> {
let sql = format!("show create table {}.{}.{}", catalog, schema, table);
let mut client = self.client.clone();
client.set_catalog(catalog);
client.set_schema(schema);
let result = client
.sql(&sql)
.await
.with_context(|_| RequestDatabaseSnafu { sql })?;
let Output::Stream(stream) = result else {
NotDataFromOutputSnafu.fail()?
};
let record_batch = collect(stream)
.await
.context(CollectRecordBatchesSnafu)?
.pop()
.context(EmptyResultSnafu)?;
let create_table = record_batch
.column(1)
.as_any()
.downcast_ref::<StringVector>()
.unwrap()
.get_data(0)
.unwrap();
Ok(format!("{create_table};\n"))
}
async fn export_create_table(&self) -> Result<()> {
let semaphore = Arc::new(Semaphore::new(self.parallelism));
let db_names = self.iter_db_names().await?;
let db_count = db_names.len();
let mut tasks = Vec::with_capacity(db_names.len());
for (catalog, schema) in db_names {
let semaphore_moved = semaphore.clone();
tasks.push(async move {
let _permit = semaphore_moved.acquire().await.unwrap();
let table_list = self.get_table_list(&catalog, &schema).await?;
let table_count = table_list.len();
tokio::fs::create_dir_all(&self.output_dir)
.await
.context(FileIoSnafu)?;
let output_file =
Path::new(&self.output_dir).join(format!("{catalog}-{schema}.sql"));
let mut file = File::create(output_file).await.context(FileIoSnafu)?;
for (c, s, t) in table_list {
match self.show_create_table(&c, &s, &t).await {
Err(e) => {
error!(e; "Failed to export table {}.{}.{}", c, s, t)
}
Ok(create_table) => {
file.write_all(create_table.as_bytes())
.await
.context(FileIoSnafu)?;
}
}
}
info!("finished exporting {catalog}.{schema} with {table_count} tables",);
Ok::<(), Error>(())
});
}
let success = futures::future::join_all(tasks)
.await
.into_iter()
.filter(|r| match r {
Ok(_) => true,
Err(e) => {
error!(e; "export job failed");
false
}
})
.count();
info!("success {success}/{db_count} jobs");
Ok(())
}
async fn export_table_data(&self) -> Result<()> {
let semaphore = Arc::new(Semaphore::new(self.parallelism));
let db_names = self.iter_db_names().await?;
let db_count = db_names.len();
let mut tasks = Vec::with_capacity(db_names.len());
for (catalog, schema) in db_names {
let semaphore_moved = semaphore.clone();
tasks.push(async move {
let _permit = semaphore_moved.acquire().await.unwrap();
tokio::fs::create_dir_all(&self.output_dir)
.await
.context(FileIoSnafu)?;
let output_dir = Path::new(&self.output_dir).join(format!("{catalog}-{schema}/"));
let mut client = self.client.clone();
client.set_catalog(catalog.clone());
client.set_schema(schema.clone());
// copy database to
let sql = format!(
"copy database {} to '{}' with (format='parquet');",
schema,
output_dir.to_str().unwrap()
);
client
.sql(sql.clone())
.await
.context(RequestDatabaseSnafu { sql })?;
info!("finished exporting {catalog}.{schema} data");
// export copy from sql
let dir_filenames = match output_dir.read_dir() {
Ok(dir) => dir,
Err(_) => {
warn!("empty database {catalog}.{schema}");
return Ok(());
}
};
let copy_from_file =
Path::new(&self.output_dir).join(format!("{catalog}-{schema}_copy_from.sql"));
let mut file = File::create(copy_from_file).await.context(FileIoSnafu)?;
let copy_from_sql = dir_filenames
.into_iter()
.map(|file| {
let file = file.unwrap();
let filename = file.file_name().into_string().unwrap();
format!(
"copy {} from '{}' with (format='parquet');\n",
filename.replace(".parquet", ""),
file.path().to_str().unwrap()
)
})
.collect::<Vec<_>>()
.join("");
file.write_all(copy_from_sql.as_bytes())
.await
.context(FileIoSnafu)?;
info!("finished exporting {catalog}.{schema} copy_from.sql");
Ok::<(), Error>(())
});
}
let success = futures::future::join_all(tasks)
.await
.into_iter()
.filter(|r| match r {
Ok(_) => true,
Err(e) => {
error!(e; "export job failed");
false
}
})
.count();
info!("success {success}/{db_count} jobs");
Ok(())
}
}
#[async_trait]
impl Tool for Export {
async fn do_work(&self) -> Result<()> {
match self.target {
ExportTarget::CreateTable => self.export_create_table().await,
ExportTarget::TableData => self.export_table_data().await,
}
}
}
/// Split at `-`.
fn split_database(database: &str) -> Result<(String, Option<String>)> {
let (catalog, schema) = database
.split_once('-')
.with_context(|| InvalidDatabaseNameSnafu {
database: database.to_string(),
})?;
if schema == "*" {
Ok((catalog.to_string(), None))
} else {
Ok((catalog.to_string(), Some(schema.to_string())))
}
}

View File

@@ -16,20 +16,16 @@ use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use catalog::remote::CachedMetaKvBackend;
use catalog::kvbackend::{CachedMetaKvBackend, KvBackendCatalogManager};
use client::client_manager::DatanodeClients;
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_base::Plugins;
use common_error::ext::ErrorExt;
use common_meta::key::TableMetadataManager;
use common_query::Output;
use common_recordbatch::RecordBatches;
use common_telemetry::logging;
use either::Either;
use frontend::catalog::FrontendCatalogManager;
use meta_client::client::MetaClientBuilder;
use partition::manager::PartitionRuleManager;
use partition::route::TableRoutes;
use query::datafusion::DatafusionQueryEngine;
use query::logical_optimizer::LogicalOptimizer;
use query::parser::QueryLanguageParser;
@@ -39,7 +35,7 @@ use query::QueryEngine;
use rustyline::error::ReadlineError;
use rustyline::Editor;
use session::context::QueryContext;
use snafu::{ErrorCompat, ResultExt};
use snafu::ResultExt;
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use crate::cli::cmd::ReplCommand;
@@ -152,7 +148,7 @@ impl Repl {
.await
.map_err(|e| {
let status_code = e.status_code();
let root_cause = e.iter_chain().last().unwrap();
let root_cause = e.output_msg();
println!("Error: {}({status_code}), {root_cause}", status_code as u32)
})
.is_ok()
@@ -180,7 +176,7 @@ impl Repl {
.encode(&plan)
.context(SubstraitEncodeLogicalPlanSnafu)?;
self.database.logical_plan(plan.to_vec(), None).await
self.database.logical_plan(plan.to_vec(), 0).await
} else {
self.database.sql(&sql).await
}
@@ -254,24 +250,19 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
let cached_meta_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
let table_routes = Arc::new(TableRoutes::new(meta_client));
let partition_manager = Arc::new(PartitionRuleManager::new(table_routes));
let datanode_clients = Arc::new(DatanodeClients::default());
let catalog_list = Arc::new(FrontendCatalogManager::new(
let catalog_list = KvBackendCatalogManager::new(
cached_meta_backend.clone(),
cached_meta_backend.clone(),
partition_manager,
datanode_clients,
Arc::new(TableMetadataManager::new(cached_meta_backend)),
));
let plugins: Arc<Plugins> = Default::default();
);
let plugins: Plugins = Default::default();
let state = Arc::new(QueryEngineState::new(
catalog_list,
None,
None,
false,
None,
None,
plugins.clone(),
));

View File

@@ -17,15 +17,15 @@ use std::sync::Arc;
use async_trait::async_trait;
use clap::Parser;
use client::api::v1::meta::TableRouteValue;
use common_meta::ddl::utils::region_storage_path;
use common_meta::error as MetaError;
use common_meta::helper::{CatalogKey as v1CatalogKey, SchemaKey as v1SchemaKey, TableGlobalValue};
use common_meta::key::catalog_name::{CatalogNameKey, CatalogNameValue};
use common_meta::key::datanode_table::{DatanodeTableKey, DatanodeTableValue};
use common_meta::key::datanode_table::{DatanodeTableKey, DatanodeTableValue, RegionInfo};
use common_meta::key::schema_name::{SchemaNameKey, SchemaNameValue};
use common_meta::key::table_info::{TableInfoKey, TableInfoValue};
use common_meta::key::table_name::{TableNameKey, TableNameValue};
use common_meta::key::table_region::{TableRegionKey, TableRegionValue};
use common_meta::key::table_route::{NextTableRouteKey, TableRouteValue as NextTableRouteValue};
use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue};
use common_meta::key::{RegionDistribution, TableMetaKey};
use common_meta::range_stream::PaginationStream;
use common_meta::rpc::router::TableRoute;
@@ -39,6 +39,7 @@ use meta_srv::service::store::etcd::EtcdStore;
use meta_srv::service::store::kv::{KvBackendAdapter, KvStoreRef};
use prost::Message;
use snafu::ResultExt;
use v1_helper::{CatalogKey as v1CatalogKey, SchemaKey as v1SchemaKey, TableGlobalValue};
use crate::cli::{Instance, Tool};
use crate::error::{self, ConnectEtcdSnafu, Result};
@@ -154,7 +155,7 @@ impl MigrateTableMetadata {
let new_table_value = NextTableRouteValue::new(table_route.region_routes);
let table_id = table_route.table.id as u32;
let new_key = NextTableRouteKey::new(table_id);
let new_key = TableRouteKey::new(table_id);
info!("Creating '{new_key}'");
if self.dryrun {
@@ -386,6 +387,11 @@ impl MigrateTableMetadata {
async fn create_datanode_table_keys(&self, value: &TableGlobalValue) {
let table_id = value.table_id();
let engine = value.table_info.meta.engine.as_str();
let region_storage_path = region_storage_path(
&value.table_info.catalog_name,
&value.table_info.schema_name,
);
let region_distribution: RegionDistribution =
value.regions_id_map.clone().into_iter().collect();
@@ -394,7 +400,18 @@ impl MigrateTableMetadata {
.map(|(datanode_id, regions)| {
let k = DatanodeTableKey::new(datanode_id, table_id);
info!("Creating DatanodeTableKey '{k}' => {regions:?}");
(k, DatanodeTableValue::new(table_id, regions))
(
k,
DatanodeTableValue::new(
table_id,
regions,
RegionInfo {
engine: engine.to_string(),
region_storage_path: region_storage_path.clone(),
region_options: (&value.table_info.meta.options).into(),
},
),
)
})
.collect::<Vec<_>>();
@@ -409,3 +426,148 @@ impl MigrateTableMetadata {
}
}
}
#[deprecated(since = "0.4.0", note = "Used for migrate old version(v0.3) metadata")]
mod v1_helper {
use std::collections::HashMap;
use std::fmt::{Display, Formatter};
use err::{DeserializeCatalogEntryValueSnafu, Error, InvalidCatalogSnafu};
use lazy_static::lazy_static;
use regex::Regex;
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
use table::metadata::{RawTableInfo, TableId};
pub const CATALOG_KEY_PREFIX: &str = "__c";
pub const SCHEMA_KEY_PREFIX: &str = "__s";
/// The pattern of a valid catalog, schema or table name.
const NAME_PATTERN: &str = "[a-zA-Z_:][a-zA-Z0-9_:]*";
lazy_static! {
static ref CATALOG_KEY_PATTERN: Regex =
Regex::new(&format!("^{CATALOG_KEY_PREFIX}-({NAME_PATTERN})$")).unwrap();
}
lazy_static! {
static ref SCHEMA_KEY_PATTERN: Regex = Regex::new(&format!(
"^{SCHEMA_KEY_PREFIX}-({NAME_PATTERN})-({NAME_PATTERN})$"
))
.unwrap();
}
/// Table global info contains necessary info for a datanode to create table regions, including
/// table id, table meta(schema...), region id allocation across datanodes.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct TableGlobalValue {
/// Id of datanode that created the global table info kv. only for debugging.
pub node_id: u64,
/// Allocation of region ids across all datanodes.
pub regions_id_map: HashMap<u64, Vec<u32>>,
pub table_info: RawTableInfo,
}
impl TableGlobalValue {
pub fn table_id(&self) -> TableId {
self.table_info.ident.table_id
}
}
pub struct CatalogKey {
pub catalog_name: String,
}
impl Display for CatalogKey {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(CATALOG_KEY_PREFIX)?;
f.write_str("-")?;
f.write_str(&self.catalog_name)
}
}
impl CatalogKey {
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
let key = s.as_ref();
let captures = CATALOG_KEY_PATTERN
.captures(key)
.context(InvalidCatalogSnafu { key })?;
ensure!(captures.len() == 2, InvalidCatalogSnafu { key });
Ok(Self {
catalog_name: captures[1].to_string(),
})
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CatalogValue;
pub struct SchemaKey {
pub catalog_name: String,
pub schema_name: String,
}
impl Display for SchemaKey {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(SCHEMA_KEY_PREFIX)?;
f.write_str("-")?;
f.write_str(&self.catalog_name)?;
f.write_str("-")?;
f.write_str(&self.schema_name)
}
}
impl SchemaKey {
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
let key = s.as_ref();
let captures = SCHEMA_KEY_PATTERN
.captures(key)
.context(InvalidCatalogSnafu { key })?;
ensure!(captures.len() == 3, InvalidCatalogSnafu { key });
Ok(Self {
catalog_name: captures[1].to_string(),
schema_name: captures[2].to_string(),
})
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct SchemaValue;
macro_rules! define_catalog_value {
( $($val_ty: ty), *) => {
$(
impl $val_ty {
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
serde_json::from_str(s.as_ref())
.context(DeserializeCatalogEntryValueSnafu { raw: s.as_ref() })
}
pub fn from_bytes(bytes: impl AsRef<[u8]>) -> Result<Self, Error> {
Self::parse(&String::from_utf8_lossy(bytes.as_ref()))
}
}
)*
}
}
define_catalog_value!(TableGlobalValue);
mod err {
use snafu::{Location, Snafu};
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Invalid catalog info: {}", key))]
InvalidCatalog { key: String, location: Location },
#[snafu(display("Failed to deserialize catalog entry value: {}", raw))]
DeserializeCatalogEntryValue {
raw: String,
location: Location,
source: serde_json::error::Error,
},
}
}
}

View File

@@ -16,7 +16,8 @@ use std::time::Duration;
use clap::Parser;
use common_telemetry::logging;
use datanode::datanode::{Datanode, DatanodeOptions};
use datanode::config::DatanodeOptions;
use datanode::datanode::{Datanode, DatanodeBuilder};
use meta_client::MetaClientOptions;
use servers::Mode;
use snafu::ResultExt;
@@ -30,6 +31,10 @@ pub struct Instance {
impl Instance {
pub async fn start(&mut self) -> Result<()> {
plugins::start_datanode_plugins(self.datanode.plugins())
.await
.context(StartDatanodeSnafu)?;
self.datanode.start().await.context(StartDatanodeSnafu)
}
@@ -129,7 +134,7 @@ impl StartCommand {
}
if let Some(metasrv_addrs) = &self.metasrv_addr {
opts.meta_client_options
opts.meta_client
.get_or_insert_with(MetaClientOptions::default)
.metasrv_addrs = metasrv_addrs.clone();
opts.mode = Mode::Distributed;
@@ -151,24 +156,29 @@ impl StartCommand {
}
if let Some(http_addr) = &self.http_addr {
opts.http_opts.addr = http_addr.clone();
opts.http.addr = http_addr.clone();
}
if let Some(http_timeout) = self.http_timeout {
opts.http_opts.timeout = Duration::from_secs(http_timeout)
opts.http.timeout = Duration::from_secs(http_timeout)
}
// Disable dashboard in datanode.
opts.http_opts.disable_dashboard = true;
opts.http.disable_dashboard = true;
Ok(Options::Datanode(Box::new(opts)))
}
async fn build(self, opts: DatanodeOptions) -> Result<Instance> {
async fn build(self, mut opts: DatanodeOptions) -> Result<Instance> {
let plugins = plugins::setup_datanode_plugins(&mut opts)
.await
.context(StartDatanodeSnafu)?;
logging::info!("Datanode start command: {:#?}", self);
logging::info!("Datanode options: {:#?}", opts);
let datanode = Datanode::new(opts, Default::default())
let datanode = DatanodeBuilder::new(opts, None, plugins)
.build()
.await
.context(StartDatanodeSnafu)?;
@@ -183,9 +193,8 @@ mod tests {
use common_base::readable_size::ReadableSize;
use common_test_util::temp_dir::create_named_temp_file;
use datanode::datanode::{
CompactionConfig, FileConfig, ObjectStoreConfig, RegionManifestConfig,
};
use datanode::config::{CompactionConfig, FileConfig, ObjectStoreConfig, RegionManifestConfig};
use servers::heartbeat_options::HeartbeatOptions;
use servers::Mode;
use super::*;
@@ -202,11 +211,14 @@ mod tests {
rpc_hostname = "127.0.0.1"
rpc_runtime_size = 8
[meta_client_options]
[heartbeat]
interval = "300ms"
[meta_client]
metasrv_addrs = ["127.0.0.1:3002"]
timeout_millis = 3000
connect_timeout_millis = 5000
ddl_timeout_millis= 10000
timeout = "3s"
connect_timeout = "5s"
ddl_timeout = "10s"
tcp_nodelay = true
[wal]
@@ -249,25 +261,33 @@ mod tests {
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
assert_eq!(Some(42), options.node_id);
assert_eq!("/other/wal", options.wal.dir.unwrap());
assert_eq!(Duration::from_secs(600), options.wal.purge_interval);
assert_eq!(1024 * 1024 * 1024, options.wal.file_size.0);
assert_eq!(1024 * 1024 * 1024 * 50, options.wal.purge_threshold.0);
assert!(!options.wal.sync_write);
let HeartbeatOptions {
interval: heart_beat_interval,
..
} = options.heartbeat;
assert_eq!(300, heart_beat_interval.as_millis());
let MetaClientOptions {
metasrv_addrs: metasrv_addr,
timeout_millis,
connect_timeout_millis,
timeout,
connect_timeout,
ddl_timeout,
tcp_nodelay,
ddl_timeout_millis,
} = options.meta_client_options.unwrap();
..
} = options.meta_client.unwrap();
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
assert_eq!(5000, connect_timeout_millis);
assert_eq!(10000, ddl_timeout_millis);
assert_eq!(3000, timeout_millis);
assert_eq!(5000, connect_timeout.as_millis());
assert_eq!(10000, ddl_timeout.as_millis());
assert_eq!(3000, timeout.as_millis());
assert!(tcp_nodelay);
assert_eq!("/tmp/greptimedb/", options.storage.data_home);
assert!(matches!(
@@ -360,9 +380,9 @@ mod tests {
rpc_hostname = "127.0.0.1"
rpc_runtime_size = 8
[meta_client_options]
timeout_millis = 3000
connect_timeout_millis = 5000
[meta_client]
timeout = "3s"
connect_timeout = "5s"
tcp_nodelay = true
[wal]
@@ -413,10 +433,10 @@ mod tests {
Some("99"),
),
(
// meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
// meta_client.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
[
env_prefix.to_string(),
"meta_client_options".to_uppercase(),
"meta_client".to_uppercase(),
"metasrv_addrs".to_uppercase(),
]
.join(ENV_VAR_SEP),
@@ -443,7 +463,7 @@ mod tests {
Some(Duration::from_secs(9))
);
assert_eq!(
opts.meta_client_options.unwrap().metasrv_addrs,
opts.meta_client.unwrap().metasrv_addrs,
vec![
"127.0.0.1:3001".to_string(),
"127.0.0.1:3002".to_string(),

View File

@@ -16,56 +16,76 @@ use std::any::Any;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use config::ConfigError;
use rustyline::error::ReadlineError;
use snafu::{Location, Snafu};
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Failed to iter stream, source: {}", source))]
#[snafu(display("Failed to create default catalog and schema"))]
InitMetadata {
location: Location,
source: common_meta::error::Error,
},
#[snafu(display("Failed to iter stream"))]
IterStream {
location: Location,
source: common_meta::error::Error,
},
#[snafu(display("Failed to start datanode, source: {}", source))]
#[snafu(display("Failed to start procedure manager"))]
StartProcedureManager {
location: Location,
source: common_procedure::error::Error,
},
#[snafu(display("Failed to stop procedure manager"))]
StopProcedureManager {
location: Location,
source: common_procedure::error::Error,
},
#[snafu(display("Failed to start datanode"))]
StartDatanode {
location: Location,
source: datanode::error::Error,
},
#[snafu(display("Failed to shutdown datanode, source: {}", source))]
#[snafu(display("Failed to shutdown datanode"))]
ShutdownDatanode {
location: Location,
source: datanode::error::Error,
},
#[snafu(display("Failed to start frontend, source: {}", source))]
#[snafu(display("Failed to start frontend"))]
StartFrontend {
location: Location,
source: frontend::error::Error,
},
#[snafu(display("Failed to shutdown frontend, source: {}", source))]
#[snafu(display("Failed to shutdown frontend"))]
ShutdownFrontend {
location: Location,
source: frontend::error::Error,
},
#[snafu(display("Failed to build meta server, source: {}", source))]
#[snafu(display("Failed to build meta server"))]
BuildMetaServer {
location: Location,
source: meta_srv::error::Error,
},
#[snafu(display("Failed to start meta server, source: {}", source))]
#[snafu(display("Failed to start meta server"))]
StartMetaServer {
location: Location,
source: meta_srv::error::Error,
},
#[snafu(display("Failed to shutdown meta server, source: {}", source))]
#[snafu(display("Failed to shutdown meta server"))]
ShutdownMetaServer {
location: Location,
source: meta_srv::error::Error,
@@ -77,13 +97,7 @@ pub enum Error {
#[snafu(display("Illegal config: {}", msg))]
IllegalConfig { msg: String, location: Location },
#[snafu(display("Illegal auth config: {}", source))]
IllegalAuthConfig {
location: Location,
source: auth::error::Error,
},
#[snafu(display("Unsupported selector type, {} source: {}", selector_type, source))]
#[snafu(display("Unsupported selector type: {}", selector_type))]
UnsupportedSelectorType {
selector_type: String,
location: Location,
@@ -93,80 +107,124 @@ pub enum Error {
#[snafu(display("Invalid REPL command: {reason}"))]
InvalidReplCommand { reason: String },
#[snafu(display("Cannot create REPL: {}", source))]
#[snafu(display("Cannot create REPL"))]
ReplCreation {
source: ReadlineError,
#[snafu(source)]
error: ReadlineError,
location: Location,
},
#[snafu(display("Error reading command: {}", source))]
#[snafu(display("Error reading command"))]
Readline {
source: ReadlineError,
#[snafu(source)]
error: ReadlineError,
location: Location,
},
#[snafu(display("Failed to request database, sql: {sql}, source: {source}"))]
#[snafu(display("Failed to request database, sql: {sql}"))]
RequestDatabase {
sql: String,
location: Location,
source: client::Error,
},
#[snafu(display("Failed to collect RecordBatches, source: {source}"))]
#[snafu(display("Failed to collect RecordBatches"))]
CollectRecordBatches {
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to pretty print Recordbatches, source: {source}"))]
#[snafu(display("Failed to pretty print Recordbatches"))]
PrettyPrintRecordBatches {
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to start Meta client, source: {}", source))]
#[snafu(display("Failed to start Meta client"))]
StartMetaClient {
location: Location,
source: meta_client::error::Error,
},
#[snafu(display("Failed to parse SQL: {}, source: {}", sql, source))]
#[snafu(display("Failed to parse SQL: {}", sql))]
ParseSql {
sql: String,
location: Location,
source: query::error::Error,
},
#[snafu(display("Failed to plan statement, source: {}", source))]
#[snafu(display("Failed to plan statement"))]
PlanStatement {
location: Location,
source: query::error::Error,
},
#[snafu(display("Failed to encode logical plan in substrait, source: {}", source))]
#[snafu(display("Failed to encode logical plan in substrait"))]
SubstraitEncodeLogicalPlan {
location: Location,
source: substrait::error::Error,
},
#[snafu(display("Failed to load layered config, source: {}", source))]
#[snafu(display("Failed to load layered config"))]
LoadLayeredConfig {
source: ConfigError,
#[snafu(source)]
error: ConfigError,
location: Location,
},
#[snafu(display("Failed to start catalog manager, source: {}", source))]
#[snafu(display("Failed to start catalog manager"))]
StartCatalogManager {
location: Location,
source: catalog::error::Error,
},
#[snafu(display("Failed to connect to Etcd at {etcd_addr}, source: {}", source))]
#[snafu(display("Failed to connect to Etcd at {etcd_addr}"))]
ConnectEtcd {
etcd_addr: String,
source: etcd_client::Error,
#[snafu(source)]
error: etcd_client::Error,
location: Location,
},
#[snafu(display("Failed to connect server at {addr}"))]
ConnectServer {
addr: String,
source: client::error::Error,
location: Location,
},
#[snafu(display("Failed to serde json"))]
SerdeJson {
#[snafu(source)]
error: serde_json::error::Error,
location: Location,
},
#[snafu(display("Expect data from output, but got another thing"))]
NotDataFromOutput { location: Location },
#[snafu(display("Empty result from output"))]
EmptyResult { location: Location },
#[snafu(display("Failed to manipulate file"))]
FileIo {
location: Location,
#[snafu(source)]
error: std::io::Error,
},
#[snafu(display("Invalid database name: {}", database))]
InvalidDatabaseName {
location: Location,
database: String,
},
#[snafu(display("Failed to create directory {}", dir))]
CreateDir {
dir: String,
#[snafu(source)]
error: std::io::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -182,14 +240,21 @@ impl ErrorExt for Error {
Error::ShutdownMetaServer { source, .. } => source.status_code(),
Error::BuildMetaServer { source, .. } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
Error::IterStream { source, .. } => source.status_code(),
Error::IterStream { source, .. } | Error::InitMetadata { source, .. } => {
source.status_code()
}
Error::ConnectServer { source, .. } => source.status_code(),
Error::MissingConfig { .. }
| Error::LoadLayeredConfig { .. }
| Error::IllegalConfig { .. }
| Error::InvalidReplCommand { .. }
| Error::IllegalAuthConfig { .. }
| Error::ConnectEtcd { .. } => StatusCode::InvalidArguments,
| Error::ConnectEtcd { .. }
| Error::NotDataFromOutput { .. }
| Error::CreateDir { .. }
| Error::EmptyResult { .. }
| Error::InvalidDatabaseName { .. } => StatusCode::InvalidArguments,
Error::StartProcedureManager { source, .. }
| Error::StopProcedureManager { source, .. } => source.status_code(),
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
Error::RequestDatabase { source, .. } => source.status_code(),
Error::CollectRecordBatches { source, .. }
@@ -200,6 +265,8 @@ impl ErrorExt for Error {
}
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
Error::StartCatalogManager { source, .. } => source.status_code(),
Error::SerdeJson { .. } | Error::FileIo { .. } => StatusCode::Unexpected,
}
}

View File

@@ -12,21 +12,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use std::time::Duration;
use auth::UserProviderRef;
use clap::Parser;
use common_base::Plugins;
use common_telemetry::logging;
use frontend::frontend::FrontendOptions;
use frontend::instance::{FrontendInstance, Instance as FeInstance};
use frontend::service_config::InfluxdbOptions;
use meta_client::MetaClientOptions;
use servers::tls::{TlsMode, TlsOption};
use servers::Mode;
use snafu::ResultExt;
use crate::error::{self, IllegalAuthConfigSnafu, Result, StartCatalogManagerSnafu};
use crate::error::{self, Result, StartFrontendSnafu};
use crate::options::{Options, TopLevelOptions};
pub struct Instance {
@@ -35,16 +32,11 @@ pub struct Instance {
impl Instance {
pub async fn start(&mut self) -> Result<()> {
self.frontend
.catalog_manager()
.start()
plugins::start_frontend_plugins(self.frontend.plugins().clone())
.await
.context(StartCatalogManagerSnafu)?;
.context(StartFrontendSnafu)?;
self.frontend
.start()
.await
.context(error::StartFrontendSnafu)
self.frontend.start().await.context(StartFrontendSnafu)
}
pub async fn stop(&self) -> Result<()> {
@@ -95,7 +87,9 @@ pub struct StartCommand {
#[clap(long)]
http_addr: Option<String>,
#[clap(long)]
grpc_addr: Option<String>,
http_timeout: Option<u64>,
#[clap(long)]
rpc_addr: Option<String>,
#[clap(long)]
mysql_addr: Option<String>,
#[clap(long)]
@@ -145,86 +139,75 @@ impl StartCommand {
);
if let Some(addr) = &self.http_addr {
if let Some(http_opts) = &mut opts.http_options {
http_opts.addr = addr.clone()
}
opts.http.addr = addr.clone()
}
if let Some(http_timeout) = self.http_timeout {
opts.http.timeout = Duration::from_secs(http_timeout)
}
if let Some(disable_dashboard) = self.disable_dashboard {
opts.http_options
.get_or_insert_with(Default::default)
.disable_dashboard = disable_dashboard;
opts.http.disable_dashboard = disable_dashboard;
}
if let Some(addr) = &self.grpc_addr {
if let Some(grpc_opts) = &mut opts.grpc_options {
grpc_opts.addr = addr.clone()
}
if let Some(addr) = &self.rpc_addr {
opts.grpc.addr = addr.clone()
}
if let Some(addr) = &self.mysql_addr {
if let Some(mysql_opts) = &mut opts.mysql_options {
mysql_opts.addr = addr.clone();
mysql_opts.tls = tls_opts.clone();
}
opts.mysql.enable = true;
opts.mysql.addr = addr.clone();
opts.mysql.tls = tls_opts.clone();
}
if let Some(addr) = &self.postgres_addr {
if let Some(postgres_opts) = &mut opts.postgres_options {
postgres_opts.addr = addr.clone();
postgres_opts.tls = tls_opts;
}
opts.postgres.enable = true;
opts.postgres.addr = addr.clone();
opts.postgres.tls = tls_opts;
}
if let Some(addr) = &self.opentsdb_addr {
if let Some(opentsdb_addr) = &mut opts.opentsdb_options {
opentsdb_addr.addr = addr.clone();
}
opts.opentsdb.enable = true;
opts.opentsdb.addr = addr.clone();
}
if let Some(enable) = self.influxdb_enable {
opts.influxdb_options = Some(InfluxdbOptions { enable });
opts.influxdb.enable = enable;
}
if let Some(metasrv_addrs) = &self.metasrv_addr {
opts.meta_client_options
opts.meta_client
.get_or_insert_with(MetaClientOptions::default)
.metasrv_addrs = metasrv_addrs.clone();
opts.mode = Mode::Distributed;
}
opts.user_provider = self.user_provider.clone();
Ok(Options::Frontend(Box::new(opts)))
}
async fn build(self, opts: FrontendOptions) -> Result<Instance> {
async fn build(self, mut opts: FrontendOptions) -> Result<Instance> {
let plugins = plugins::setup_frontend_plugins(&mut opts)
.await
.context(StartFrontendSnafu)?;
logging::info!("Frontend start command: {:#?}", self);
logging::info!("Frontend options: {:#?}", opts);
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone())
.await
.context(error::StartFrontendSnafu)?;
.context(StartFrontendSnafu)?;
instance
.build_servers(&opts)
.await
.context(error::StartFrontendSnafu)?;
.context(StartFrontendSnafu)?;
Ok(Instance { frontend: instance })
}
}
pub fn load_frontend_plugins(user_provider: &Option<String>) -> Result<Plugins> {
let plugins = Plugins::new();
if let Some(provider) = user_provider {
let provider = auth::user_provider_from_option(provider).context(IllegalAuthConfigSnafu)?;
plugins.insert::<UserProviderRef>(provider);
}
Ok(plugins)
}
#[cfg(test)]
mod tests {
use std::io::Write;
@@ -234,6 +217,7 @@ mod tests {
use common_base::readable_size::ReadableSize;
use common_test_util::temp_dir::create_named_temp_file;
use frontend::service_config::GrpcOptions;
use servers::http::HttpOptions;
use super::*;
use crate::options::ENV_VAR_SEP;
@@ -255,40 +239,29 @@ mod tests {
unreachable!()
};
assert_eq!(opts.http_options.as_ref().unwrap().addr, "127.0.0.1:1234");
assert_eq!(
ReadableSize::mb(64),
opts.http_options.as_ref().unwrap().body_limit
);
assert_eq!(opts.mysql_options.as_ref().unwrap().addr, "127.0.0.1:5678");
assert_eq!(
opts.postgres_options.as_ref().unwrap().addr,
"127.0.0.1:5432"
);
assert_eq!(
opts.opentsdb_options.as_ref().unwrap().addr,
"127.0.0.1:4321"
);
assert_eq!(opts.http.addr, "127.0.0.1:1234");
assert_eq!(ReadableSize::mb(64), opts.http.body_limit);
assert_eq!(opts.mysql.addr, "127.0.0.1:5678");
assert_eq!(opts.postgres.addr, "127.0.0.1:5432");
assert_eq!(opts.opentsdb.addr, "127.0.0.1:4321");
let default_opts = FrontendOptions::default();
assert_eq!(opts.grpc.addr, default_opts.grpc.addr);
assert!(opts.mysql.enable);
assert_eq!(opts.mysql.runtime_size, default_opts.mysql.runtime_size);
assert!(opts.postgres.enable);
assert_eq!(
opts.grpc_options.unwrap().addr,
default_opts.grpc_options.unwrap().addr
opts.postgres.runtime_size,
default_opts.postgres.runtime_size
);
assert!(opts.opentsdb.enable);
assert_eq!(
opts.mysql_options.as_ref().unwrap().runtime_size,
default_opts.mysql_options.as_ref().unwrap().runtime_size
);
assert_eq!(
opts.postgres_options.as_ref().unwrap().runtime_size,
default_opts.postgres_options.as_ref().unwrap().runtime_size
);
assert_eq!(
opts.opentsdb_options.as_ref().unwrap().runtime_size,
default_opts.opentsdb_options.as_ref().unwrap().runtime_size
opts.opentsdb.runtime_size,
default_opts.opentsdb.runtime_size
);
assert!(!opts.influxdb_options.unwrap().enable);
assert!(!opts.influxdb.enable);
}
#[test]
@@ -297,7 +270,7 @@ mod tests {
let toml_str = r#"
mode = "distributed"
[http_options]
[http]
addr = "127.0.0.1:4000"
timeout = "30s"
body_limit = "2GB"
@@ -319,19 +292,10 @@ mod tests {
unreachable!()
};
assert_eq!(Mode::Distributed, fe_opts.mode);
assert_eq!(
"127.0.0.1:4000".to_string(),
fe_opts.http_options.as_ref().unwrap().addr
);
assert_eq!(
Duration::from_secs(30),
fe_opts.http_options.as_ref().unwrap().timeout
);
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
assert_eq!(
ReadableSize::gb(2),
fe_opts.http_options.as_ref().unwrap().body_limit
);
assert_eq!(ReadableSize::gb(2), fe_opts.http.body_limit);
assert_eq!("debug", fe_opts.logging.level.as_ref().unwrap());
assert_eq!("/tmp/greptimedb/test/logs".to_string(), fe_opts.logging.dir);
@@ -339,14 +303,17 @@ mod tests {
#[tokio::test]
async fn test_try_from_start_command_to_anymap() {
let command = StartCommand {
let mut fe_opts = FrontendOptions {
http: HttpOptions {
disable_dashboard: false,
..Default::default()
},
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
disable_dashboard: Some(false),
..Default::default()
};
let plugins = load_frontend_plugins(&command.user_provider);
let plugins = plugins.unwrap();
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
let provider = plugins.get::<UserProviderRef>().unwrap();
let result = provider
.authenticate(
@@ -382,15 +349,15 @@ mod tests {
let toml_str = r#"
mode = "distributed"
[http_options]
[http]
addr = "127.0.0.1:4000"
[meta_client_options]
timeout_millis = 3000
connect_timeout_millis = 5000
[meta_client]
timeout = "3s"
connect_timeout = "5s"
tcp_nodelay = true
[mysql_options]
[mysql]
addr = "127.0.0.1:4002"
"#;
write!(file, "{}", toml_str).unwrap();
@@ -399,40 +366,40 @@ mod tests {
temp_env::with_vars(
[
(
// mysql_options.addr = 127.0.0.1:14002
// mysql.addr = 127.0.0.1:14002
[
env_prefix.to_string(),
"mysql_options".to_uppercase(),
"mysql".to_uppercase(),
"addr".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("127.0.0.1:14002"),
),
(
// mysql_options.runtime_size = 11
// mysql.runtime_size = 11
[
env_prefix.to_string(),
"mysql_options".to_uppercase(),
"mysql".to_uppercase(),
"runtime_size".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("11"),
),
(
// http_options.addr = 127.0.0.1:24000
// http.addr = 127.0.0.1:24000
[
env_prefix.to_string(),
"http_options".to_uppercase(),
"http".to_uppercase(),
"addr".to_uppercase(),
]
.join(ENV_VAR_SEP),
Some("127.0.0.1:24000"),
),
(
// meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
// meta_client.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
[
env_prefix.to_string(),
"meta_client_options".to_uppercase(),
"meta_client".to_uppercase(),
"metasrv_addrs".to_uppercase(),
]
.join(ENV_VAR_SEP),
@@ -457,9 +424,9 @@ mod tests {
};
// Should be read from env, env > default values.
assert_eq!(fe_opts.mysql_options.as_ref().unwrap().runtime_size, 11);
assert_eq!(fe_opts.mysql.runtime_size, 11);
assert_eq!(
fe_opts.meta_client_options.unwrap().metasrv_addrs,
fe_opts.meta_client.unwrap().metasrv_addrs,
vec![
"127.0.0.1:3001".to_string(),
"127.0.0.1:3002".to_string(),
@@ -468,22 +435,13 @@ mod tests {
);
// Should be read from config file, config file > env > default values.
assert_eq!(
fe_opts.mysql_options.as_ref().unwrap().addr,
"127.0.0.1:4002"
);
assert_eq!(fe_opts.mysql.addr, "127.0.0.1:4002");
// Should be read from cli, cli > config file > env > default values.
assert_eq!(
fe_opts.http_options.as_ref().unwrap().addr,
"127.0.0.1:14000"
);
assert_eq!(fe_opts.http.addr, "127.0.0.1:14000");
// Should be default value.
assert_eq!(
fe_opts.grpc_options.as_ref().unwrap().addr,
GrpcOptions::default().addr
);
assert_eq!(fe_opts.grpc.addr, GrpcOptions::default().addr);
},
);
}

View File

@@ -20,7 +20,7 @@ use meta_srv::bootstrap::MetaSrvInstance;
use meta_srv::metasrv::MetaSrvOptions;
use snafu::ResultExt;
use crate::error::{self, Result};
use crate::error::{self, Result, StartMetaServerSnafu};
use crate::options::{Options, TopLevelOptions};
pub struct Instance {
@@ -29,10 +29,10 @@ pub struct Instance {
impl Instance {
pub async fn start(&mut self) -> Result<()> {
self.instance
.start()
plugins::start_meta_srv_plugins(self.instance.plugins())
.await
.context(error::StartMetaServerSnafu)
.context(StartMetaServerSnafu)?;
self.instance.start().await.context(StartMetaServerSnafu)
}
pub async fn stop(&self) -> Result<()> {
@@ -93,7 +93,7 @@ struct StartCommand {
#[clap(long)]
use_memory_store: Option<bool>,
#[clap(long)]
disable_region_failover: Option<bool>,
enable_region_failover: Option<bool>,
#[clap(long)]
http_addr: Option<String>,
#[clap(long)]
@@ -140,30 +140,33 @@ impl StartCommand {
opts.use_memory_store = use_memory_store;
}
if let Some(disable_region_failover) = self.disable_region_failover {
opts.disable_region_failover = disable_region_failover;
if let Some(enable_region_failover) = self.enable_region_failover {
opts.enable_region_failover = enable_region_failover;
}
if let Some(http_addr) = &self.http_addr {
opts.http_opts.addr = http_addr.clone();
opts.http.addr = http_addr.clone();
}
if let Some(http_timeout) = self.http_timeout {
opts.http_opts.timeout = Duration::from_secs(http_timeout);
opts.http.timeout = Duration::from_secs(http_timeout);
}
// Disable dashboard in metasrv.
opts.http_opts.disable_dashboard = true;
opts.http.disable_dashboard = true;
Ok(Options::Metasrv(Box::new(opts)))
}
async fn build(self, opts: MetaSrvOptions) -> Result<Instance> {
logging::info!("MetaSrv start command: {:#?}", self);
async fn build(self, mut opts: MetaSrvOptions) -> Result<Instance> {
let plugins = plugins::setup_meta_srv_plugins(&mut opts)
.await
.context(StartMetaServerSnafu)?;
logging::info!("MetaSrv start command: {:#?}", self);
logging::info!("MetaSrv options: {:#?}", opts);
let instance = MetaSrvInstance::new(opts)
let instance = MetaSrvInstance::new(opts, plugins)
.await
.context(error::BuildMetaServerSnafu)?;
@@ -207,7 +210,6 @@ mod tests {
bind_addr = "127.0.0.1:3002"
server_addr = "127.0.0.1:3002"
store_addr = "127.0.0.1:2379"
datanode_lease_secs = 15
selector = "LeaseBased"
use_memory_store = false
@@ -229,7 +231,6 @@ mod tests {
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
assert_eq!("127.0.0.1:2379".to_string(), options.store_addr);
assert_eq!(15, options.datanode_lease_secs);
assert_eq!(SelectorType::LeaseBased, options.selector);
assert_eq!("debug", options.logging.level.as_ref().unwrap());
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
@@ -266,7 +267,7 @@ mod tests {
selector = "LeaseBased"
use_memory_store = false
[http_options]
[http]
addr = "127.0.0.1:4000"
[logging]
@@ -289,10 +290,10 @@ mod tests {
Some("127.0.0.1:13002"),
),
(
// http_options.addr = 127.0.0.1:24000
// http.addr = 127.0.0.1:24000
[
env_prefix.to_string(),
"http_options".to_uppercase(),
"http".to_uppercase(),
"addr".to_uppercase(),
]
.join(ENV_VAR_SEP),
@@ -320,7 +321,7 @@ mod tests {
assert_eq!(opts.server_addr, "127.0.0.1:3002");
// Should be read from cli, cli > config file > env > default values.
assert_eq!(opts.http_opts.addr, "127.0.0.1:14000");
assert_eq!(opts.http.addr, "127.0.0.1:14000");
// Should be default value.
assert_eq!(opts.store_addr, "127.0.0.1:2379");

View File

@@ -12,22 +12,27 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use common_config::KvStoreConfig;
use common_telemetry::logging::LoggingOptions;
use config::{Config, Environment, File, FileFormat};
use datanode::datanode::DatanodeOptions;
use datanode::config::{DatanodeOptions, ProcedureConfig};
use frontend::frontend::FrontendOptions;
use meta_srv::metasrv::MetaSrvOptions;
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use crate::error::{LoadLayeredConfigSnafu, Result};
use crate::error::{LoadLayeredConfigSnafu, Result, SerdeJsonSnafu};
pub const ENV_VAR_SEP: &str = "__";
pub const ENV_LIST_SEP: &str = ",";
/// Options mixed up from datanode, frontend and metasrv.
pub struct MixOptions {
pub fe_opts: FrontendOptions,
pub dn_opts: DatanodeOptions,
pub data_home: String,
pub procedure: ProcedureConfig,
pub metadata_store: KvStoreConfig,
pub frontend: FrontendOptions,
pub datanode: DatanodeOptions,
pub logging: LoggingOptions,
}
@@ -89,9 +94,16 @@ impl Options {
.ignore_empty(true)
};
// Workaround: Replacement for `Config::try_from(&default_opts)` due to
// `ConfigSerializer` cannot handle the case of an empty struct contained
// within an iterative structure.
// See: https://github.com/mehcode/config-rs/issues/461
let json_str = serde_json::to_string(&default_opts).context(SerdeJsonSnafu)?;
let default_config = File::from_str(&json_str, FileFormat::Json);
// Add default values and environment variables as the sources of the configuration.
let mut layered_config = Config::builder()
.add_source(Config::try_from(&default_opts).context(LoadLayeredConfigSnafu)?)
.add_source(default_config)
.add_source(env_source);
// Add config file as the source of the configuration if it is specified.
@@ -115,7 +127,7 @@ mod tests {
use std::time::Duration;
use common_test_util::temp_dir::create_named_temp_file;
use datanode::datanode::{DatanodeOptions, ObjectStoreConfig};
use datanode::config::{DatanodeOptions, ObjectStoreConfig};
use super::*;
@@ -131,9 +143,9 @@ mod tests {
mysql_addr = "127.0.0.1:4406"
mysql_runtime_size = 2
[meta_client_options]
timeout_millis = 3000
connect_timeout_millis = 5000
[meta_client]
timeout = "3s"
connect_timeout = "5s"
tcp_nodelay = true
[wal]
@@ -212,10 +224,10 @@ mod tests {
Some("/other/wal/dir"),
),
(
// meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
// meta_client.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
[
env_prefix.to_string(),
"meta_client_options".to_uppercase(),
"meta_client".to_uppercase(),
"metasrv_addrs".to_uppercase(),
]
.join(ENV_VAR_SEP),
@@ -243,7 +255,7 @@ mod tests {
Some(Duration::from_secs(42))
);
assert_eq!(
opts.meta_client_options.unwrap().metasrv_addrs,
opts.meta_client.unwrap().metasrv_addrs,
vec![
"127.0.0.1:3001".to_string(),
"127.0.0.1:3002".to_string(),

View File

@@ -13,18 +13,28 @@
// limitations under the License.
use std::sync::Arc;
use std::{fs, path};
use catalog::kvbackend::KvBackendCatalogManager;
use catalog::CatalogManagerRef;
use clap::Parser;
use common_base::Plugins;
use common_config::{metadata_store_dir, KvStoreConfig, WalConfig};
use common_meta::cache_invalidator::DummyKvCacheInvalidator;
use common_meta::kv_backend::KvBackendRef;
use common_procedure::ProcedureManagerRef;
use common_telemetry::info;
use common_telemetry::logging::LoggingOptions;
use datanode::datanode::{Datanode, DatanodeOptions, ProcedureConfig, StorageConfig, WalConfig};
use datanode::instance::InstanceRef;
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
use datanode::datanode::{Datanode, DatanodeBuilder};
use datanode::region_server::RegionServer;
use file_engine::config::EngineConfig as FileEngineConfig;
use frontend::frontend::FrontendOptions;
use frontend::instance::{FrontendInstance, Instance as FeInstance};
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
use frontend::service_config::{
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
};
use mito2::config::MitoConfig;
use serde::{Deserialize, Serialize};
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
@@ -32,10 +42,10 @@ use servers::Mode;
use snafu::ResultExt;
use crate::error::{
IllegalConfigSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu,
StartFrontendSnafu,
CreateDirSnafu, IllegalConfigSnafu, InitMetadataSnafu, Result, ShutdownDatanodeSnafu,
ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
StopProcedureManagerSnafu,
};
use crate::frontend::load_frontend_plugins;
use crate::options::{MixOptions, Options, TopLevelOptions};
#[derive(Parser)]
@@ -45,12 +55,8 @@ pub struct Command {
}
impl Command {
pub async fn build(
self,
fe_opts: FrontendOptions,
dn_opts: DatanodeOptions,
) -> Result<Instance> {
self.subcmd.build(fe_opts, dn_opts).await
pub async fn build(self, opts: MixOptions) -> Result<Instance> {
self.subcmd.build(opts).await
}
pub fn load_options(&self, top_level_options: TopLevelOptions) -> Result<Options> {
@@ -64,9 +70,9 @@ enum SubCommand {
}
impl SubCommand {
async fn build(self, fe_opts: FrontendOptions, dn_opts: DatanodeOptions) -> Result<Instance> {
async fn build(self, opts: MixOptions) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.build(fe_opts, dn_opts).await,
SubCommand::Start(cmd) => cmd.build(opts).await,
}
}
@@ -81,38 +87,46 @@ impl SubCommand {
#[serde(default)]
pub struct StandaloneOptions {
pub mode: Mode,
pub enable_memory_catalog: bool,
pub enable_telemetry: bool,
pub http_options: Option<HttpOptions>,
pub grpc_options: Option<GrpcOptions>,
pub mysql_options: Option<MysqlOptions>,
pub postgres_options: Option<PostgresOptions>,
pub opentsdb_options: Option<OpentsdbOptions>,
pub influxdb_options: Option<InfluxdbOptions>,
pub prom_store_options: Option<PromStoreOptions>,
pub http: HttpOptions,
pub grpc: GrpcOptions,
pub mysql: MysqlOptions,
pub postgres: PostgresOptions,
pub opentsdb: OpentsdbOptions,
pub influxdb: InfluxdbOptions,
pub prom_store: PromStoreOptions,
pub wal: WalConfig,
pub storage: StorageConfig,
pub metadata_store: KvStoreConfig,
pub procedure: ProcedureConfig,
pub logging: LoggingOptions,
pub user_provider: Option<String>,
/// Options for different store engines.
pub region_engine: Vec<RegionEngineConfig>,
}
impl Default for StandaloneOptions {
fn default() -> Self {
Self {
mode: Mode::Standalone,
enable_memory_catalog: false,
enable_telemetry: true,
http_options: Some(HttpOptions::default()),
grpc_options: Some(GrpcOptions::default()),
mysql_options: Some(MysqlOptions::default()),
postgres_options: Some(PostgresOptions::default()),
opentsdb_options: Some(OpentsdbOptions::default()),
influxdb_options: Some(InfluxdbOptions::default()),
prom_store_options: Some(PromStoreOptions::default()),
http: HttpOptions::default(),
grpc: GrpcOptions::default(),
mysql: MysqlOptions::default(),
postgres: PostgresOptions::default(),
opentsdb: OpentsdbOptions::default(),
influxdb: InfluxdbOptions::default(),
prom_store: PromStoreOptions::default(),
wal: WalConfig::default(),
storage: StorageConfig::default(),
metadata_store: KvStoreConfig::default(),
procedure: ProcedureConfig::default(),
logging: LoggingOptions::default(),
user_provider: None,
region_engine: vec![
RegionEngineConfig::Mito(MitoConfig::default()),
RegionEngineConfig::File(FileEngineConfig::default()),
],
}
}
}
@@ -121,26 +135,27 @@ impl StandaloneOptions {
fn frontend_options(self) -> FrontendOptions {
FrontendOptions {
mode: self.mode,
http_options: self.http_options,
grpc_options: self.grpc_options,
mysql_options: self.mysql_options,
postgres_options: self.postgres_options,
opentsdb_options: self.opentsdb_options,
influxdb_options: self.influxdb_options,
prom_store_options: self.prom_store_options,
meta_client_options: None,
http: self.http,
grpc: self.grpc,
mysql: self.mysql,
postgres: self.postgres,
opentsdb: self.opentsdb,
influxdb: self.influxdb,
prom_store: self.prom_store,
meta_client: None,
logging: self.logging,
user_provider: self.user_provider,
..Default::default()
}
}
fn datanode_options(self) -> DatanodeOptions {
DatanodeOptions {
enable_memory_catalog: self.enable_memory_catalog,
node_id: Some(0),
enable_telemetry: self.enable_telemetry,
wal: self.wal,
storage: self.storage,
procedure: self.procedure,
region_engine: self.region_engine,
..Default::default()
}
}
@@ -149,17 +164,20 @@ impl StandaloneOptions {
pub struct Instance {
datanode: Datanode,
frontend: FeInstance,
procedure_manager: ProcedureManagerRef,
}
impl Instance {
pub async fn start(&mut self) -> Result<()> {
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
self.datanode
.start_instance()
.await
.context(StartDatanodeSnafu)?;
self.datanode.start().await.context(StartDatanodeSnafu)?;
info!("Datanode instance started");
self.procedure_manager
.start()
.await
.context(StartProcedureManagerSnafu)?;
self.frontend.start().await.context(StartFrontendSnafu)?;
Ok(())
}
@@ -170,8 +188,13 @@ impl Instance {
.await
.context(ShutdownFrontendSnafu)?;
self.procedure_manager
.stop()
.await
.context(StopProcedureManagerSnafu)?;
self.datanode
.shutdown_instance()
.shutdown()
.await
.context(ShutdownDatanodeSnafu)?;
info!("Datanode instance stopped.");
@@ -196,8 +219,6 @@ struct StartCommand {
influxdb_enable: bool,
#[clap(short, long)]
config_file: Option<String>,
#[clap(short = 'm', long = "memory-catalog")]
enable_memory_catalog: bool,
#[clap(long)]
tls_mode: Option<TlsMode>,
#[clap(long)]
@@ -218,8 +239,6 @@ impl StartCommand {
None,
)?;
opts.enable_memory_catalog = self.enable_memory_catalog;
opts.mode = Mode::Standalone;
if let Some(dir) = top_level_options.log_dir {
@@ -237,9 +256,7 @@ impl StartCommand {
);
if let Some(addr) = &self.http_addr {
if let Some(http_opts) = &mut opts.http_options {
http_opts.addr = addr.clone()
}
opts.http.addr = addr.clone()
}
if let Some(addr) = &self.rpc_addr {
@@ -253,48 +270,58 @@ impl StartCommand {
}
.fail();
}
if let Some(grpc_opts) = &mut opts.grpc_options {
grpc_opts.addr = addr.clone()
}
opts.grpc.addr = addr.clone()
}
if let Some(addr) = &self.mysql_addr {
if let Some(mysql_opts) = &mut opts.mysql_options {
mysql_opts.addr = addr.clone();
mysql_opts.tls = tls_opts.clone();
}
opts.mysql.enable = true;
opts.mysql.addr = addr.clone();
opts.mysql.tls = tls_opts.clone();
}
if let Some(addr) = &self.postgres_addr {
if let Some(postgres_opts) = &mut opts.postgres_options {
postgres_opts.addr = addr.clone();
postgres_opts.tls = tls_opts;
}
opts.postgres.enable = true;
opts.postgres.addr = addr.clone();
opts.postgres.tls = tls_opts;
}
if let Some(addr) = &self.opentsdb_addr {
if let Some(opentsdb_addr) = &mut opts.opentsdb_options {
opentsdb_addr.addr = addr.clone();
}
opts.opentsdb.enable = true;
opts.opentsdb.addr = addr.clone();
}
if self.influxdb_enable {
opts.influxdb_options = Some(InfluxdbOptions { enable: true });
opts.influxdb.enable = self.influxdb_enable;
}
let fe_opts = opts.clone().frontend_options();
opts.user_provider = self.user_provider.clone();
let metadata_store = opts.metadata_store.clone();
let procedure = opts.procedure.clone();
let frontend = opts.clone().frontend_options();
let logging = opts.logging.clone();
let dn_opts = opts.datanode_options();
let datanode = opts.datanode_options();
Ok(Options::Standalone(Box::new(MixOptions {
fe_opts,
dn_opts,
procedure,
metadata_store,
data_home: datanode.storage.data_home.to_string(),
frontend,
datanode,
logging,
})))
}
async fn build(self, fe_opts: FrontendOptions, dn_opts: DatanodeOptions) -> Result<Instance> {
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
#[allow(unreachable_code)]
#[allow(unused_variables)]
#[allow(clippy::diverging_sub_expression)]
async fn build(self, opts: MixOptions) -> Result<Instance> {
let mut fe_opts = opts.frontend;
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts)
.await
.context(StartFrontendSnafu)?;
let dn_opts = opts.datanode;
info!("Standalone start command: {:#?}", self);
info!(
@@ -302,30 +329,79 @@ impl StartCommand {
fe_opts, dn_opts
);
let datanode = Datanode::new(dn_opts.clone(), Default::default())
.await
.context(StartDatanodeSnafu)?;
// Ensure the data_home directory exists.
fs::create_dir_all(path::Path::new(&opts.data_home)).context(CreateDirSnafu {
dir: &opts.data_home,
})?;
let mut frontend = build_frontend(plugins.clone(), datanode.get_instance()).await?;
let metadata_dir = metadata_store_dir(&opts.data_home);
let (kv_store, procedure_manager) = FeInstance::try_build_standalone_components(
metadata_dir,
opts.metadata_store,
opts.procedure,
)
.await
.context(StartFrontendSnafu)?;
let datanode =
DatanodeBuilder::new(dn_opts.clone(), Some(kv_store.clone()), Default::default())
.build()
.await
.context(StartDatanodeSnafu)?;
let region_server = datanode.region_server();
let catalog_manager = KvBackendCatalogManager::new(
kv_store.clone(),
Arc::new(DummyKvCacheInvalidator),
Arc::new(StandaloneDatanodeManager(region_server.clone())),
);
catalog_manager
.table_metadata_manager_ref()
.init()
.await
.context(InitMetadataSnafu)?;
// TODO: build frontend instance like in distributed mode
let mut frontend = build_frontend(
fe_plugins,
kv_store,
procedure_manager.clone(),
catalog_manager,
region_server,
)
.await?;
frontend
.build_servers(&fe_opts)
.await
.context(StartFrontendSnafu)?;
Ok(Instance { datanode, frontend })
Ok(Instance {
datanode,
frontend,
procedure_manager,
})
}
}
/// Build frontend instance in standalone mode
async fn build_frontend(
plugins: Arc<Plugins>,
datanode_instance: InstanceRef,
plugins: Plugins,
kv_store: KvBackendRef,
procedure_manager: ProcedureManagerRef,
catalog_manager: CatalogManagerRef,
region_server: RegionServer,
) -> Result<FeInstance> {
let mut frontend_instance = FeInstance::try_new_standalone(datanode_instance.clone())
.await
.context(StartFrontendSnafu)?;
frontend_instance.set_plugins(plugins.clone());
let frontend_instance = FeInstance::try_new_standalone(
kv_store,
procedure_manager,
catalog_manager,
plugins,
region_server,
)
.await
.context(StartFrontendSnafu)?;
Ok(frontend_instance)
}
@@ -345,13 +421,13 @@ mod tests {
#[tokio::test]
async fn test_try_from_start_command_to_anymap() {
let command = StartCommand {
let mut fe_opts = FrontendOptions {
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
..Default::default()
};
let plugins = load_frontend_plugins(&command.user_provider);
let plugins = plugins.unwrap();
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
let provider = plugins.get::<UserProviderRef>().unwrap();
let result = provider
.authenticate(
@@ -399,9 +475,9 @@ mod tests {
checkpoint_margin = 9
gc_duration = '7s'
[http_options]
[http]
addr = "127.0.0.1:4000"
timeout = "30s"
timeout = "33s"
body_limit = "128MB"
[logging]
@@ -419,40 +495,24 @@ mod tests {
else {
unreachable!()
};
let fe_opts = options.fe_opts;
let dn_opts = options.dn_opts;
let fe_opts = options.frontend;
let dn_opts = options.datanode;
let logging_opts = options.logging;
assert_eq!(Mode::Standalone, fe_opts.mode);
assert_eq!(
"127.0.0.1:4000".to_string(),
fe_opts.http_options.as_ref().unwrap().addr
);
assert_eq!(
Duration::from_secs(30),
fe_opts.http_options.as_ref().unwrap().timeout
);
assert_eq!(
ReadableSize::mb(128),
fe_opts.http_options.as_ref().unwrap().body_limit
);
assert_eq!(
"127.0.0.1:4001".to_string(),
fe_opts.grpc_options.unwrap().addr
);
assert_eq!(
"127.0.0.1:4002",
fe_opts.mysql_options.as_ref().unwrap().addr
);
assert_eq!(2, fe_opts.mysql_options.as_ref().unwrap().runtime_size);
assert_eq!(
None,
fe_opts.mysql_options.as_ref().unwrap().reject_no_database
);
assert!(fe_opts.influxdb_options.as_ref().unwrap().enable);
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
assert_eq!(Duration::from_secs(33), fe_opts.http.timeout);
assert_eq!(ReadableSize::mb(128), fe_opts.http.body_limit);
assert_eq!("127.0.0.1:4001".to_string(), fe_opts.grpc.addr);
assert!(fe_opts.mysql.enable);
assert_eq!("127.0.0.1:4002", fe_opts.mysql.addr);
assert_eq!(2, fe_opts.mysql.runtime_size);
assert_eq!(None, fe_opts.mysql.reject_no_database);
assert!(fe_opts.influxdb.enable);
assert_eq!("/tmp/greptimedb/test/wal", dn_opts.wal.dir.unwrap());
match &dn_opts.storage.store {
datanode::datanode::ObjectStoreConfig::S3(s3_config) => {
datanode::config::ObjectStoreConfig::S3(s3_config) => {
assert_eq!(
"Secret([REDACTED alloc::string::String])".to_string(),
format!("{:?}", s3_config.access_key_id)
@@ -494,7 +554,7 @@ mod tests {
let toml_str = r#"
mode = "standalone"
[http_options]
[http]
addr = "127.0.0.1:4000"
[logging]
@@ -526,10 +586,10 @@ mod tests {
Some("info"),
),
(
// http_options.addr = 127.0.0.1:24000
// http.addr = 127.0.0.1:24000
[
env_prefix.to_string(),
"http_options".to_uppercase(),
"http".to_uppercase(),
"addr".to_uppercase(),
]
.join(ENV_VAR_SEP),
@@ -560,21 +620,33 @@ mod tests {
assert_eq!(opts.logging.level.as_ref().unwrap(), "debug");
// Should be read from cli, cli > config file > env > default values.
assert_eq!(
opts.fe_opts.http_options.as_ref().unwrap().addr,
"127.0.0.1:14000"
);
assert_eq!(
ReadableSize::mb(64),
opts.fe_opts.http_options.as_ref().unwrap().body_limit
);
assert_eq!(opts.frontend.http.addr, "127.0.0.1:14000");
assert_eq!(ReadableSize::mb(64), opts.frontend.http.body_limit);
// Should be default value.
assert_eq!(
opts.fe_opts.grpc_options.unwrap().addr,
GrpcOptions::default().addr
);
assert_eq!(opts.frontend.grpc.addr, GrpcOptions::default().addr);
},
);
}
#[test]
fn test_load_default_standalone_options() {
let options: StandaloneOptions =
Options::load_layered_options(None, "GREPTIMEDB_FRONTEND", None).unwrap();
let default_options = StandaloneOptions::default();
assert_eq!(options.mode, default_options.mode);
assert_eq!(options.enable_telemetry, default_options.enable_telemetry);
assert_eq!(options.http, default_options.http);
assert_eq!(options.grpc, default_options.grpc);
assert_eq!(options.mysql, default_options.mysql);
assert_eq!(options.postgres, default_options.postgres);
assert_eq!(options.opentsdb, default_options.opentsdb);
assert_eq!(options.influxdb, default_options.influxdb);
assert_eq!(options.prom_store, default_options.prom_store);
assert_eq!(options.wal, default_options.wal);
assert_eq!(options.metadata_store, default_options.metadata_store);
assert_eq!(options.procedure, default_options.procedure);
assert_eq!(options.logging, default_options.logging);
assert_eq!(options.region_engine, default_options.region_engine);
}
}

View File

@@ -9,6 +9,7 @@ anymap = "1.0.0-beta.2"
bitvec = "1.0"
bytes = { version = "1.1", features = ["serde"] }
common-error = { workspace = true }
common-macro = { workspace = true }
paste = "1.0"
serde = { version = "1.0", features = ["derive"] }
snafu.workspace = true

View File

@@ -17,11 +17,13 @@ use std::io::{Read, Write};
use bytes::{Buf, BufMut, BytesMut};
use common_error::ext::ErrorExt;
use common_macro::stack_trace_debug;
use paste::paste;
use snafu::{ensure, Location, ResultExt, Snafu};
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display(
"Destination buffer overflow, src_len: {}, dst_len: {}",
@@ -37,9 +39,10 @@ pub enum Error {
#[snafu(display("Buffer underflow"))]
Underflow { location: Location },
#[snafu(display("IO operation reach EOF, source: {}", source))]
#[snafu(display("IO operation reach EOF"))]
Eof {
source: std::io::Error,
#[snafu(source)]
error: std::io::Error,
location: Location,
},
}

View File

@@ -15,7 +15,6 @@
pub mod bit_vec;
pub mod buffer;
pub mod bytes;
pub mod paths;
#[allow(clippy::all)]
pub mod readable_size;
@@ -24,6 +23,8 @@ use std::sync::{Arc, Mutex, MutexGuard};
pub use bit_vec::BitVec;
/// [`Plugins`] is a wrapper of Arc contents.
/// Make it Cloneable and we can treat it like an Arc struct.
#[derive(Default, Clone)]
pub struct Plugins {
inner: Arc<Mutex<anymap::Map<dyn Any + Send + Sync>>>,

View File

@@ -14,10 +14,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// This file is copied from https://github.com/tikv/raft-engine/blob/8dd2a39f359ff16f5295f35343f626e0c10132fa/src/util.rs without any modification.
// This file is copied from https://github.com/tikv/raft-engine/blob/8dd2a39f359ff16f5295f35343f626e0c10132fa/src/util.rs
use std::fmt;
use std::fmt::{Display, Write};
use std::fmt::{self, Debug, Display, Write};
use std::ops::{Div, Mul};
use std::str::FromStr;
@@ -34,7 +33,7 @@ pub const GIB: u64 = MIB * BINARY_DATA_MAGNITUDE;
pub const TIB: u64 = GIB * BINARY_DATA_MAGNITUDE;
pub const PIB: u64 = TIB * BINARY_DATA_MAGNITUDE;
#[derive(Clone, Debug, Copy, PartialEq, Eq, PartialOrd)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd)]
pub struct ReadableSize(pub u64);
impl ReadableSize {
@@ -155,6 +154,12 @@ impl FromStr for ReadableSize {
}
}
impl Debug for ReadableSize {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self)
}
}
impl Display for ReadableSize {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.0 >= PIB {

View File

@@ -6,6 +6,7 @@ license.workspace = true
[dependencies]
common-error = { workspace = true }
common-macro = { workspace = true }
serde.workspace = true
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }

View File

@@ -41,7 +41,7 @@ pub fn default_engine() -> &'static str {
MITO_ENGINE
}
pub const IMMUTABLE_FILE_ENGINE: &str = "file";
pub const FILE_ENGINE: &str = "file";
pub const SEMANTIC_TYPE_PRIMARY_KEY: &str = "TAG";
pub const SEMANTIC_TYPE_FIELD: &str = "FIELD";

View File

@@ -16,34 +16,24 @@ use std::any::Any;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Invalid catalog info: {}", key))]
InvalidCatalog { key: String, location: Location },
#[snafu(display("Failed to deserialize catalog entry value: {}", raw))]
DeserializeCatalogEntryValue {
raw: String,
#[snafu(display("Invalid full table name: {}", table_name))]
InvalidFullTableName {
table_name: String,
location: Location,
source: serde_json::error::Error,
},
#[snafu(display("Failed to serialize catalog entry value"))]
SerializeCatalogEntryValue {
location: Location,
source: serde_json::error::Error,
},
}
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::InvalidCatalog { .. }
| Error::DeserializeCatalogEntryValue { .. }
| Error::SerializeCatalogEntryValue { .. } => StatusCode::Unexpected,
Error::InvalidFullTableName { .. } => StatusCode::Unexpected,
}
}

View File

@@ -0,0 +1,10 @@
[package]
name = "common-config"
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
common-base.workspace = true
humantime-serde.workspace = true
serde.workspace = true

View File

@@ -0,0 +1,73 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::time::Duration;
use common_base::readable_size::ReadableSize;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[serde(default)]
pub struct WalConfig {
// wal directory
pub dir: Option<String>,
// wal file size in bytes
pub file_size: ReadableSize,
// wal purge threshold in bytes
pub purge_threshold: ReadableSize,
// purge interval in seconds
#[serde(with = "humantime_serde")]
pub purge_interval: Duration,
// read batch size
pub read_batch_size: usize,
// whether to sync log file after every write
pub sync_write: bool,
}
impl Default for WalConfig {
fn default() -> Self {
Self {
dir: None,
file_size: ReadableSize::mb(256), // log file size 256MB
purge_threshold: ReadableSize::gb(4), // purge threshold 4GB
purge_interval: Duration::from_secs(600),
read_batch_size: 128,
sync_write: false,
}
}
}
pub fn metadata_store_dir(store_dir: &str) -> String {
format!("{store_dir}/metadata")
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[serde(default)]
pub struct KvStoreConfig {
// Kv file size in bytes
pub file_size: ReadableSize,
// Kv purge threshold in bytes
pub purge_threshold: ReadableSize,
}
impl Default for KvStoreConfig {
fn default() -> Self {
Self {
// log file size 256MB
file_size: ReadableSize::mb(256),
// purge threshold 4GB
purge_threshold: ReadableSize::gb(4),
}
}
}

View File

@@ -18,14 +18,17 @@ async-compression = { version = "0.3", features = [
async-trait.workspace = true
bytes = "1.1"
common-error = { workspace = true }
common-macro = { workspace = true }
common-runtime = { workspace = true }
datafusion.workspace = true
derive_builder.workspace = true
futures.workspace = true
lazy_static.workspace = true
object-store = { workspace = true }
orc-rust = "0.2"
paste = "1.0"
regex = "1.7"
serde.workspace = true
snafu.workspace = true
strum.workspace = true
tokio-util.workspace = true

View File

@@ -20,12 +20,13 @@ use async_compression::tokio::bufread::{BzDecoder, GzipDecoder, XzDecoder, ZstdD
use async_compression::tokio::write;
use bytes::Bytes;
use futures::Stream;
use serde::{Deserialize, Serialize};
use strum::EnumIter;
use tokio::io::{AsyncRead, AsyncWriteExt, BufReader};
use tokio_util::io::{ReaderStream, StreamReader};
use crate::error::{self, Error, Result};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EnumIter)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EnumIter, Serialize, Deserialize)]
pub enum CompressionType {
/// Gzip-ed file
Gzip,

View File

@@ -17,12 +17,14 @@ use std::any::Any;
use arrow_schema::ArrowError;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use datafusion::parquet::errors::ParquetError;
use snafu::{Location, Snafu};
use url::ParseError;
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Unsupported compression type: {}", compression_type))]
UnsupportedCompressionType {
@@ -30,10 +32,11 @@ pub enum Error {
location: Location,
},
#[snafu(display("Unsupported backend protocol: {}", protocol))]
#[snafu(display("Unsupported backend protocol: {}, url: {}", protocol, url))]
UnsupportedBackendProtocol {
protocol: String,
location: Location,
url: String,
},
#[snafu(display("Unsupported format protocol: {}", format))]
@@ -42,95 +45,109 @@ pub enum Error {
#[snafu(display("empty host: {}", url))]
EmptyHostPath { url: String, location: Location },
#[snafu(display("Invalid url: {}, error :{}", url, source))]
#[snafu(display("Invalid url: {}", url))]
InvalidUrl {
url: String,
source: ParseError,
#[snafu(source)]
error: ParseError,
location: Location,
},
#[snafu(display("Failed to build backend, source: {}", source))]
#[snafu(display("Failed to build backend"))]
BuildBackend {
source: object_store::Error,
#[snafu(source)]
error: object_store::Error,
location: Location,
},
#[snafu(display("Failed to build orc reader, source: {}", source))]
#[snafu(display("Failed to build orc reader"))]
OrcReader {
location: Location,
source: orc_rust::error::Error,
#[snafu(source)]
error: orc_rust::error::Error,
},
#[snafu(display("Failed to read object from path: {}, source: {}", path, source))]
#[snafu(display("Failed to read object from path: {}", path))]
ReadObject {
path: String,
location: Location,
source: object_store::Error,
#[snafu(source)]
error: object_store::Error,
},
#[snafu(display("Failed to write object to path: {}, source: {}", path, source))]
#[snafu(display("Failed to write object to path: {}", path))]
WriteObject {
path: String,
location: Location,
source: object_store::Error,
#[snafu(source)]
error: object_store::Error,
},
#[snafu(display("Failed to write: {}", source))]
#[snafu(display("Failed to write"))]
AsyncWrite {
source: std::io::Error,
#[snafu(source)]
error: std::io::Error,
location: Location,
},
#[snafu(display("Failed to write record batch: {}", source))]
#[snafu(display("Failed to write record batch"))]
WriteRecordBatch {
location: Location,
source: ArrowError,
#[snafu(source)]
error: ArrowError,
},
#[snafu(display("Failed to encode record batch: {}", source))]
#[snafu(display("Failed to encode record batch"))]
EncodeRecordBatch {
location: Location,
source: ParquetError,
#[snafu(source)]
error: ParquetError,
},
#[snafu(display("Failed to read record batch: {}", source))]
#[snafu(display("Failed to read record batch"))]
ReadRecordBatch {
location: Location,
source: datafusion::error::DataFusionError,
#[snafu(source)]
error: datafusion::error::DataFusionError,
},
#[snafu(display("Failed to read parquet source: {}", source))]
#[snafu(display("Failed to read parquet"))]
ReadParquetSnafu {
location: Location,
source: datafusion::parquet::errors::ParquetError,
#[snafu(source)]
error: datafusion::parquet::errors::ParquetError,
},
#[snafu(display("Failed to convert parquet to schema: {}", source))]
#[snafu(display("Failed to convert parquet to schema"))]
ParquetToSchema {
location: Location,
source: datafusion::parquet::errors::ParquetError,
#[snafu(source)]
error: datafusion::parquet::errors::ParquetError,
},
#[snafu(display("Failed to infer schema from file, source: {}", source))]
#[snafu(display("Failed to infer schema from file"))]
InferSchema {
location: Location,
source: arrow_schema::ArrowError,
#[snafu(source)]
error: arrow_schema::ArrowError,
},
#[snafu(display("Failed to list object in path: {}, source: {}", path, source))]
#[snafu(display("Failed to list object in path: {}", path))]
ListObjects {
path: String,
location: Location,
source: object_store::Error,
#[snafu(source)]
error: object_store::Error,
},
#[snafu(display("Invalid connection: {}", msg))]
InvalidConnection { msg: String, location: Location },
#[snafu(display("Failed to join handle: {}", source))]
#[snafu(display("Failed to join handle"))]
JoinHandle {
location: Location,
source: tokio::task::JoinError,
#[snafu(source)]
error: tokio::task::JoinError,
},
#[snafu(display("Failed to parse format {} with value: {}", key, value))]
@@ -140,9 +157,10 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to merge schema: {}", source))]
#[snafu(display("Failed to merge schema"))]
MergeSchema {
source: arrow_schema::ArrowError,
#[snafu(source)]
error: arrow_schema::ArrowError,
location: Location,
},

Some files were not shown because too many files have changed in this diff Show More