Compare commits

..

78 Commits

Author SHA1 Message Date
Lei, HUANG
c9c2b3c91f fix: revert memtable pk rb cache to rwlock (#2565)
* fix: revert memtable pk rb cache to rwlock

* feat: refine
2023-10-10 20:51:05 +08:00
Yingwen
7f75190fce chore: update Cargo.lock (#2564) 2023-10-10 16:28:50 +08:00
Yingwen
0a394c73a2 chore: bump version to 0.4.0 (#2563) 2023-10-10 16:16:15 +08:00
JeremyHi
ae95f23e05 feat: add metrics for region server (#2552)
* feat: add metircs for region server

* fix: add comment and remove unused code
2023-10-10 07:40:16 +00:00
Lei, HUANG
6b39f5923d feat: add compaction metrics (#2560)
* feat: add compaction metrics

* feat: add compaction request total count

* fix: CR comments
2023-10-10 07:38:39 +00:00
JeremyHi
ed725d030f fix: support multi addrs while using etcd (#2562)
fix: support multi addrs while useing etcd
2023-10-10 07:30:48 +00:00
Wei
4fe7e162af fix: human_time mismatch (#2558)
* fix: human_time mismatch.

* fix: add comment
2023-10-10 07:22:12 +00:00
Yingwen
8a5ef826b9 fix(mito): Do not write to memtables if writing wal is failed (#2561)
* feat: add writes total metrics

* fix: don't write memtable if write ctx is failed

* feat: write rows metrics
2023-10-10 06:55:57 +00:00
Ruihang Xia
07be50403e feat: add basic metrics to query (#2559)
* add metrics to merge scan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* count series in promql

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* tweak label name

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* tweak label name

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* document metric label

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-10-10 06:55:25 +00:00
Lei, HUANG
8bdef9a348 feat: memtable filter push down (#2539)
* feat: memtable support filter pushdown to prune primary keys

* fix: switch to next time series when pk not selected

* fix: allow predicate evaluation failure

* fix: some clippy warnings

* fix: panic when no primary key in schema

* feat: cache decoded record batch for primary key

* refactor: use arcswap instead of rwlock

* fix: format toml
2023-10-10 04:03:10 +00:00
Yingwen
d4577e7372 feat(mito): add metrics to mito engine (#2556)
* feat: allow discarding a timer

* feat: flush metrics

* feat: flush bytes and region count metrics

* refactor: add as_str to get static string

* feat: add handle request elapsed metrics

* feat: add some write related metrics

* style: fix clippy
2023-10-10 03:53:17 +00:00
dennis zhuang
88f26673f0 fix: adds back http_timeout for frontend subcommand (#2555) 2023-10-10 03:05:16 +00:00
Baasit
19f300fc5a feat: renaming kv directory to metadata (#2549)
* fix: renamed kv directory to metadata directory

* fix: changed function name

* fix: changed function name
2023-10-09 11:43:17 +00:00
Weny Xu
cc83764331 fix: check table exists before allocating table id (#2546)
* fix: check table exists before allocating table_id

* chore: apply suggestions from CR
2023-10-09 11:40:10 +00:00
Yingwen
81aa7a4caf chore(mito): change default batch size/row group size (#2550) 2023-10-09 11:10:12 +00:00
Yingwen
d68dd1f3eb fix: schema validation is skipped once we need to fill a column (#2548)
* test: test different order

* test: add tests for missing and invalid columns

* fix: do not skip schema validation while missing columns

* chore: use field_columns()

* test: add tests for different column order
2023-10-09 09:20:51 +00:00
Lei, HUANG
9b3470b049 feat: android image builder dockerfile (#2541)
* feat: android image builder dockerfile

* feat: add building android dev-builder to ci config file

* fix: add build arg

* feat: use makefile to build image and add strip command
2023-10-09 09:10:14 +00:00
Weny Xu
8cc862ff8a refactor: refactor cache invalidator (#2540) 2023-10-09 08:19:18 +00:00
Weny Xu
81ccb58fb4 refactor!: compare with origin bytes during the transactions (#2538)
* refactor: compare with origin bytes during the transaction

* refactor: use serialize_str instead

* Update src/common/meta/src/key.rs

Co-authored-by: JeremyHi <jiachun_feng@proton.me>

* chore: apply suggestions from CR

---------

Co-authored-by: JeremyHi <jiachun_feng@proton.me>
2023-10-09 08:17:19 +00:00
Weny Xu
ce3c10a86e refactor: de/encode protobuf-encoded byte array with base64 (#2545) 2023-10-09 05:31:44 +00:00
shuiyisong
007f7ba03c refactor: extract plugins crate (#2487)
* chore: move frontend plugins fn

* chore: move datanode plugins to fn

* chore: add opt plugins

* chore: add plugins to meta-srv

* chore: setup meta plugins, wait for router extension

* chore: try use configurator for grpc too

* chore: minor fix fmt

* chore: minor fix fmt

* chore: add start meta_srv for hook

* chore: merge develop

* chore: minor fix

* chore: replace Arc<Plugins> with PluginsRef

* chore: fix header

* chore: remove empty file

* chore: modify comments

* chore: remove PluginsRef type alias

* chore: remove `OptPlugins`
2023-10-09 04:54:27 +00:00
Weny Xu
dfe68a7e0b refactor: check push result out of loop (#2511)
* refactor: check push result out of loop

* chore: apply suggestions from CR
2023-10-09 02:49:48 +00:00
Ruihang Xia
d5e4fcaaff feat: dist plan optimize part 2 (#2543)
* allow udf and scalar fn

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* put CountWildcardRule before dist planner

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* bump datafusion to fix first_value/last_value

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use retain instead

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-10-09 02:18:36 +00:00
Yingwen
17b385a985 fix: compiler errors under pprof and mem-prof features (#2537)
* fix: compiler errors under pprof feature

* fix: compiler errors under mem-prof feature
2023-10-08 08:28:45 +00:00
shuiyisong
067917845f fix: carry dbname from frontend to datanode (#2520)
* chore: add dbname in region request header for tracking purpose

* chore: fix handle read

* chore: add write meter

* chore: add meter-core to dep

* chore: add converter between RegionRequestHeader and QueryContext & update proto version
2023-10-08 06:30:23 +00:00
Weny Xu
a680133acc feat: enable no delay for mysql, opentsdb, http (#2530)
* refactor: enable no delay for mysql, opentsdb, http

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-10-08 06:19:52 +00:00
Yingwen
0593c3bde3 fix(mito): pruning for mito2 (#2525)
* fix: pruning for mito2

* chore: refactor projection parameters; add some tests; customize row group size for each flush task.

* chore: pass whole RegionFlushRequest

---------

Co-authored-by: Lei, HUANG <mrsatangel@gmail.com>
2023-10-08 03:45:15 +00:00
Lei, HUANG
0292445476 fix: timestamp range filter (#2533)
* fix: timestamp range filter

* fix: rebase develop

* fix: some style issues
2023-10-08 03:29:02 +00:00
dennis zhuang
ff15bc41d6 feat: improve object storage cache (#2522)
* feat: refactor object storage cache with moka

* chore: minor fixes

* fix: concurrent issues and invalidate cache after write/delete

* chore: minor changes

* fix: cargo lock

* refactor: rename

* chore: change DEFAULT_OBJECT_STORE_CACHE_SIZE to 256Mib

* fix: typo

* chore: style

* fix: toml format

* chore: toml

* fix: toml format

* Update src/object-store/src/layers/lru_cache/read_cache.rs

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>

* chore: update Cargo.toml

Co-authored-by: Yingwen <realevenyag@gmail.com>

* chore: update src/object-store/Cargo.toml

Co-authored-by: Yingwen <realevenyag@gmail.com>

* chore: refactor and apply suggestions

* fix: typo

* feat: adds back allow list for caching

* chore: cr suggestion

Co-authored-by: Yingwen <realevenyag@gmail.com>

* chore: cr suggestion

Co-authored-by: Yingwen <realevenyag@gmail.com>

* refactor: wrap inner Accessor with Arc

* chore: remove run_pending_task in read and write path

* chore: the arc is unnecessary

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-10-08 03:27:49 +00:00
Yingwen
657542c0b8 feat(mito): Cache repeated vector for tags (#2523)
* feat: add vector_cache to CacheManager

* feat: cache repeated vectors

* feat: skip decoding pk if output doesn't contain tags

* test: add TestRegionMetadataBuilder

* test: test ProjectionMapper

* test: test vector cache

* test: test projection mapper convert

* style: fix clippy

* feat: do not cache vector if it is too large

* docs: update comment
2023-10-07 11:36:00 +00:00
Ning Sun
0ad3fb6040 fix: mysql timezone settings (#2534)
* fix: restore time zone settings for mysql

* test: add integration test for time zone

* test: fix unit test for check
2023-10-07 10:21:32 +00:00
Bamboo1
b44e39f897 feat: the schema of RegionMetadata is not output during debug (#2498)
* feat: the schema of RegionMetadata is not output during debug because column_metadatas contains duplicate information

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* feat: the id_to_index of RegionMetadata is not output during debug

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* feat: add debug trait

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* feat: use default debug in ConcreteDataType

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* chore: add std::fmt

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* test: add debug trait test

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* chore: typo

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* chore: resolve conversation

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* chore: format

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

* fix: test bug

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>

---------

Signed-off-by: ZhuZiyi <zyzhu2001@gmail.com>
2023-10-07 08:01:54 +00:00
Weny Xu
f50f2a84a9 fix: open region missing options (#2473)
* fix: open region missing options

* refactor: remove redundant clone

* chore: apply suggestions from CR

* chore: apply suggestions

* chore: apply suggestions

* test: add test for initialize_region_server

* feat: introduce RegionInfo
2023-10-07 07:17:16 +00:00
Yingwen
fe783c7c1f perf(mito): Use a heap to merge batches for the same key (#2521)
* feat: merge by heap

* fix: fix heap order

* feat: avoid pop/push next and refactor some functions

* feat: replace merge_batches and fixe tests

* test: add test that a key is deleted

* fix: skip empty batch

* style: clippy

* chore: fix typos
2023-10-07 02:56:08 +00:00
Weny Xu
00fe7d104e feat: enable tcp no_delay by default for internal services (#2527) 2023-10-07 02:35:28 +00:00
Zhenchi
201acd152d fix: missing file engine with default options (#2519)
Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-09-28 10:25:12 +00:00
Niwaka
04dbd835a1 feat: support greatest function (#2490)
* feat: support greatest function

* feat: make greatest take date_type as input

* fix: move sqlness test into common/function/time.sql

* fix: avoid using unwarp

* fix: use downcast

* refactor: simplify arrow cast
2023-09-28 10:25:09 +00:00
Wenjie0329
e3d333258b docs: add event banner (#2518) 2023-09-28 08:08:43 +00:00
Ruihang Xia
10ecc30817 feat: pushdown aggr, limit and sort plan (#2495)
* check partition for aggr plan

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* handle empty partition rule

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove CheckPartition option

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update some valid sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* opt-out promql plan and update sqlness

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix limit

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix insert select subquery

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update unit test result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/query/src/dist_plan/analyzer.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-09-28 06:35:45 +00:00
JeremyHi
52ac093110 fix: drop table 0 rows affected (#2515) 2023-09-28 06:21:18 +00:00
Zhenchi
1f1d72bdb8 feat: defensively specify limit parameter for file stream (#2517)
Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-09-28 06:14:27 +00:00
Zhenchi
7edafc3407 feat: push down filters to region engine (#2513)
feat: pushdown filters to region engine

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-09-27 13:50:44 +00:00
LFC
ccd6de8d6b fix: allow .(dot) literal in table name (#2483)
* fix: allow `.`(dot) literal in table name

* fix: resolve PR comments
2023-09-27 11:50:07 +00:00
shuiyisong
ee8d472aae chore: tune return msg (#2506)
* chore: test return msg

* fix: test_child_error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore: fix test

* chore: minor fix grpc return value

* chore: format return msg

* chore: use root error as return value

* chore: fix empty err display

* chore: iter through external error

* chore: remove err msg

* chore: remove unused field

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-27 10:40:25 +00:00
Weny Xu
9282e59a3b fix: re-create heartbeat stream ASAP (#2499)
* chore: set default connect_timeout_millis to 1000

* fix: re-create heartbeat stream ASAP

* chore: apply suggestions
2023-09-27 04:00:16 +00:00
Ruihang Xia
fbe2f2df46 refactor: simplify warn! and error! macros (#2503)
* refactor: simplify the error! and warn! macros

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* support display format

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* err.msg to err

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-27 03:07:03 +00:00
Yingwen
db6ceda5f0 fix(mito): fix region drop task runs multiple times but never clean the dir (#2504)
fix: fix region drop task runs multiple times but never clean the directory
2023-09-27 02:58:17 +00:00
Ruihang Xia
e352fb4495 fix: check for table scan before expanding (#2491)
* fix: check for table scan before expanding

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* change assert_ok to unwrap

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy warning

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update sqlness result

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* don't skip dml

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* uncomment ignored tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-26 12:12:08 +00:00
Yingwen
a6116bb866 feat(mito): Add cache manager (#2488)
* feat: add cache manager

* feat: add cache to reader builder

* feat: add AsyncFileReaderCache

* feat: Impl AsyncFileReaderCache

* chore: move moka dep to workspace

* feat: add moka cache to the manager

* feat: implement parquet meta cache

* test: test cache manager

* feat: consider vec size

* style: fix clippy

* test: fix config api test

* feat: divide cache

* test: test disabling meta cache

* test: fix config api test

* feat: remove meta cache if file is purged
2023-09-26 11:46:19 +00:00
Ruihang Xia
515ce825bd feat: stack trace style debug print for error (#2489)
* impl macro stack_trace_debug

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* manually mark external error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* ignore warnings

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy warnings

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* use debug print

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* simplify the error and warn macro

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix ut

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* add docs

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* replace snafu backtrace with location

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-26 11:23:21 +00:00
Vanish
7fc9604735 feat: distribute truncate table in region server (#2414)
* feat: distribute truncate table

* chore: add metrics for truncate table

* test: add sqlness test

* chore: cr

* test: add multi truncate

* chore: add trace id to the header
2023-09-26 11:14:14 +00:00
Zhenchi
a4282415f7 fix: convert datetime to chrono datetime (#2497)
* fix: convert datetime to chrono datetime

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: typo

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* fix the bad fix

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-09-26 09:04:12 +00:00
Zhenchi
0bf26642a4 feat: re-support query engine execute dml (#2484)
* feat: re-support query engine execute dml

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: remove region_number in InsertRequest

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* chore: add doc comments

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

---------

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2023-09-26 08:37:04 +00:00
Weny Xu
230a3026ad fix: dn doesn't have chance to send a heartbeat to the new leader (#2471)
* refactor: set meta leader lease secs to 3s

* fix: correct default heartbeat interval

* refactor: ask meta leader in parallel

* feat: configure heartbeat client timeout to 500ms

* fix: trigger to send heartbeat immediately after fail

* fix: fix clippy
2023-09-26 05:05:38 +00:00
Wei
54e506a494 refactor: datetime time unit (#2469)
* refactor: datetime time unit

* Update src/common/time/src/datetime.rs

Co-authored-by: Yingwen <realevenyag@gmail.com>

* chore: cr.

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-09-25 10:00:56 +00:00
Yingwen
7ecfaa240f refactor(mito): remove #[allow(dead_code)] (#2479) 2023-09-25 09:20:00 +00:00
LFC
c0f080df26 fix: print root cause error message to user facing interface (#2486) 2023-09-25 08:44:49 +00:00
Niwaka
f9351e4fb5 chore: add integration test for issue2437 (#2481) 2023-09-25 06:23:16 +00:00
zyy17
00272d53cc chore: fix typo (#2477) 2023-09-24 06:47:14 +00:00
JeremyHi
7310ec0bb3 chore: refactor options (#2476) 2023-09-24 02:12:33 +00:00
Yingwen
73842f10e7 fix(mito): normalize region dir in RegionOpener (#2475)
fix: normalize region dir in RegionOpener
2023-09-23 10:06:00 +00:00
Yingwen
32d1d68441 fix(mito): reset is_sorted to true after the merger finishing one series (#2474)
fix: reset is_sorted flag to true after the merger finishing one series
2023-09-23 10:05:34 +00:00
Ning Sun
ffa729cdf5 feat: implement storage for OTLP histogram (#2282)
* feat: implement new histogram data model

* feat:  use prometheus table format for histogram

* refactor: remove duplicated code

* fix: histogram tag column

* fix: use accumulated count in buckets

* refactor: using row based protocol for otlp WIP

* refactor: use row based writer for otlp.

Also updated row writer for owned keys

* refactor: use row writers for otlp

* test: add integration tests for histogram

* refactor: change le column name
2023-09-23 07:59:14 +00:00
JeremyHi
9d0de25bff chore: typo (#2470) 2023-09-22 09:47:34 +00:00
Wei
aef9e7bfc3 refactor: not allowed int64 type as time index (#2460)
* refactor: remove is_timestamp_compatible.

* chore: fmt

* refactor: remove int64 to timestamp match

* chore

* chore: apply suggestions from code review

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* chore: fmt

---------

Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-09-22 06:28:02 +00:00
Yingwen
c6e95ffe63 fix(mito): compaction scheduler schedules more tasks than expected (#2466)
* test: test on_compaction_finished

* fix: avoid submit same region to compact

* feat: persist and recover compaction time window

* test: fix test

* test: sort like result
2023-09-22 06:13:12 +00:00
Yingwen
c9f8b9c7c3 feat: update proto and remove create_if_not_exists (#2467) 2023-09-22 03:24:49 +00:00
Baasit
688e64632d feat: support for show full tables (#2410)
* feat: added show tables command

* fix(tests): fixed parser and statement unit tests

* chore: implemeted display trait for table type

* fix: handled no tabletype and error for usopprted command in show databse

* chore: removed full as a show kind, instead as a show option

* chore(tests): fixed failing test and added more tests for show full

* chore: refactored table types to use filters

* fix: changed table_type to tables
2023-09-22 02:34:57 +00:00
JeremyHi
8e5eaf5472 chore: remove unused region_stats method form table (#2458)
chore: remove unused region_status method form table
2023-09-22 02:27:29 +00:00
LinFeng
621c6f371b feat: limit grpc message size (#2459)
* feat: add two grpc config options

Those options are for:
* Limit receiving(decoding) message size
* Limit sending(enoding) message size

* test: add integration tests for message size limit
2023-09-22 02:07:46 +00:00
Ruihang Xia
4c7ad44605 refactor: remove SqlStatementExecutor (#2464)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-22 01:57:48 +00:00
Weny Xu
6306aeabf0 chore: bump opendal to 0.40 (#2465) 2023-09-21 14:25:23 +00:00
JeremyHi
40781ec754 fix: test on windows (#2462)
* fix: test on windows

* fix: fix windows root

* fix: use relative path instead of root

* fix: remove incorrect replace

* fix: fix tests

---------

Co-authored-by: WenyXu <wenymedia@gmail.com>
2023-09-21 10:57:56 +00:00
zyy17
c7b490e1a0 ci: expand upload retry timeout (#2461) 2023-09-21 10:02:15 +00:00
Ruihang Xia
e3f53a8060 fix: add slash after generated region_dir (#2463)
* fix: add slash after generated region_dir

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update ut

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-09-21 07:46:05 +00:00
Weny Xu
580d11b1e1 refactor: unify table metadata cache invalidator (#2449)
* refactor: unify table metadata cache invalidator

* chore: apply from suggestions
2023-09-21 03:45:49 +00:00
shuiyisong
20f4f7971a refactor: remove source and location in snafu display (#2428)
* refactor: remove source pt 1

* refactor: remove source pt 2

* refactor: remove source pt 3

* refactor: remove location pt 1

* refactor: remove location pt 2

* chore: remove rustc files

* chore: fix error case

* chore: fix test

* chore: fix test

* chore: fix cr issue

Co-authored-by: fys <40801205+fengys1996@users.noreply.github.com>

---------

Co-authored-by: fys <40801205+fengys1996@users.noreply.github.com>
2023-09-21 02:55:24 +00:00
dennis zhuang
9863e501f1 test: revert ignored tests (#2455) 2023-09-21 02:33:18 +00:00
466 changed files with 9917 additions and 4912 deletions

View File

@@ -62,6 +62,16 @@ runs:
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
IMAGE_TAG=${{ inputs.version }}
- name: Build and push android dev builder image to dockerhub
shell: bash
run:
make dev-builder \
BASE_IMAGE=android \
BUILDX_MULTI_PLATFORM_BUILD=true \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
IMAGE_TAG=${{ inputs.version }}
- name: Login to ACR
uses: docker/login-action@v2
continue-on-error: true

View File

@@ -37,7 +37,7 @@ inputs:
upload-retry-timeout:
description: Timeout for uploading artifacts to S3
required: false
default: "10" # minutes
default: "30" # minutes
working-dir:
description: Working directory to upload the artifacts
required: false

View File

@@ -194,38 +194,37 @@ jobs:
runs-on: windows-latest-8-cores
timeout-minutes: 60
steps:
- run: 'echo "temporary disabled"'
# - run: git config --global core.autocrlf false
# - uses: actions/checkout@v3
# - uses: arduino/setup-protoc@v1
# with:
# repo-token: ${{ secrets.GITHUB_TOKEN }}
# - name: Install Rust toolchain
# uses: dtolnay/rust-toolchain@master
# with:
# toolchain: ${{ env.RUST_TOOLCHAIN }}
# components: llvm-tools-preview
# - name: Rust Cache
# uses: Swatinem/rust-cache@v2
# - name: Install Cargo Nextest
# uses: taiki-e/install-action@nextest
# - name: Install Python
# uses: actions/setup-python@v4
# with:
# python-version: '3.10'
# - name: Install PyArrow Package
# run: pip install pyarrow
# - name: Install WSL distribution
# uses: Vampire/setup-wsl@v2
# with:
# distribution: Ubuntu-22.04
# - name: Running tests
# run: cargo nextest run -F pyo3_backend,dashboard
# env:
# RUST_BACKTRACE: 1
# CARGO_INCREMENTAL: 0
# GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
# GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
# GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
# GT_S3_REGION: ${{ secrets.S3_REGION }}
# UNITTEST_LOG_DIR: "__unittest_logs"
- run: git config --global core.autocrlf false
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: llvm-tools-preview
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Install Cargo Nextest
uses: taiki-e/install-action@nextest
- name: Install Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install PyArrow Package
run: pip install pyarrow
- name: Install WSL distribution
uses: Vampire/setup-wsl@v2
with:
distribution: Ubuntu-22.04
- name: Running tests
run: cargo nextest run -F pyo3_backend,dashboard
env:
RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
GT_S3_REGION: ${{ secrets.S3_REGION }}
UNITTEST_LOG_DIR: "__unittest_logs"

View File

@@ -91,7 +91,7 @@ env:
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
NIGHTLY_RELEASE_PREFIX: nightly
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
NEXT_RELEASE_VERSION: v0.4.0
NEXT_RELEASE_VERSION: v0.5.0
jobs:
allocate-runners:

345
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -39,6 +39,7 @@ members = [
"src/object-store",
"src/operator",
"src/partition",
"src/plugins",
"src/promql",
"src/query",
"src/script",
@@ -54,39 +55,43 @@ members = [
resolver = "2"
[workspace.package]
version = "0.4.0-nightly"
version = "0.4.0"
edition = "2021"
license = "Apache-2.0"
[workspace.dependencies]
aquamarine = "0.3"
arrow = { version = "43.0" }
etcd-client = "0.11"
arrow-array = "43.0"
arrow-flight = "43.0"
arrow-schema = { version = "43.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
chrono = { version = "0.4", features = ["serde"] }
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
derive_builder = "0.12"
etcd-client = "0.11"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "9d3f28d07d29607d0e3c1823f4a4d2bc229d05b9" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "1f1dd532a111e3834cc3019c5605e2993ffb9dc3" }
humantime-serde = "1.1"
itertools = "0.10"
lazy_static = "1.4"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
metrics = "0.20"
moka = "0.12"
once_cell = "1.18"
opentelemetry-proto = { version = "0.2", features = ["gen-tonic", "metrics"] }
parquet = "43.0"
paste = "1.0"
prost = "0.11"
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
rand = "0.8"
regex = "1.8"
reqwest = { version = "0.11", default-features = false, features = [
@@ -108,8 +113,6 @@ tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.7"
tonic = { version = "0.9", features = ["tls"] }
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
metrics = "0.20"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
## workspaces members
api = { path = "src/api" }
auth = { path = "src/auth" }
@@ -122,19 +125,18 @@ common-config = { path = "src/common/config" }
common-datasource = { path = "src/common/datasource" }
common-error = { path = "src/common/error" }
common-function = { path = "src/common/function" }
common-macro = { path = "src/common/macro" }
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
common-grpc = { path = "src/common/grpc" }
common-grpc-expr = { path = "src/common/grpc-expr" }
common-macro = { path = "src/common/macro" }
common-mem-prof = { path = "src/common/mem-prof" }
common-meta = { path = "src/common/meta" }
common-pprof = { path = "src/common/pprof" }
common-procedure = { path = "src/common/procedure" }
common-procedure-test = { path = "src/common/procedure-test" }
common-pprof = { path = "src/common/pprof" }
common-query = { path = "src/common/query" }
common-recordbatch = { path = "src/common/recordbatch" }
common-runtime = { path = "src/common/runtime" }
substrait = { path = "src/common/substrait" }
common-telemetry = { path = "src/common/telemetry" }
common-test-util = { path = "src/common/test-util" }
common-time = { path = "src/common/time" }
@@ -148,20 +150,20 @@ meta-client = { path = "src/meta-client" }
meta-srv = { path = "src/meta-srv" }
mito = { path = "src/mito" }
mito2 = { path = "src/mito2" }
operator = { path = "src/operator" }
object-store = { path = "src/object-store" }
operator = { path = "src/operator" }
partition = { path = "src/partition" }
plugins = { path = "src/plugins" }
promql = { path = "src/promql" }
query = { path = "src/query" }
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
script = { path = "src/script" }
servers = { path = "src/servers" }
session = { path = "src/session" }
sql = { path = "src/sql" }
storage = { path = "src/storage" }
store-api = { path = "src/store-api" }
substrait = { path = "src/common/substrait" }
table = { path = "src/table" }
table-procedure = { path = "src/table-procedure" }
[workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git"

View File

@@ -55,11 +55,15 @@ else
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
endif
ifneq ($(strip $(CARGO_BUILD_EXTRA_OPTS)),)
CARGO_BUILD_OPTS += ${CARGO_BUILD_EXTRA_OPTS}
endif
##@ Build
.PHONY: build
build: ## Build debug version greptime.
cargo build ${CARGO_BUILD_OPTS}
cargo ${CARGO_EXTENSION} build ${CARGO_BUILD_OPTS}
.POHNY: build-by-dev-builder
build-by-dev-builder: ## Build greptime by dev-builder.
@@ -67,11 +71,34 @@ build-by-dev-builder: ## Build greptime by dev-builder.
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
make build \
CARGO_EXTENSION="${CARGO_EXTENSION}" \
CARGO_PROFILE=${CARGO_PROFILE} \
FEATURES=${FEATURES} \
TARGET_DIR=${TARGET_DIR} \
TARGET=${TARGET} \
RELEASE=${RELEASE}
RELEASE=${RELEASE} \
CARGO_BUILD_EXTRA_OPTS="${CARGO_BUILD_EXTRA_OPTS}"
.PHONY: build-android-bin
build-android-bin: ## Build greptime binary for android.
docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
make build \
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
CARGO_PROFILE=release \
FEATURES="${FEATURES}" \
TARGET_DIR="${TARGET_DIR}" \
TARGET="${TARGET}" \
RELEASE="${RELEASE}" \
CARGO_BUILD_EXTRA_OPTS="--bin greptime --no-default-features"
.PHONY: strip-android-bin
strip-android-bin: ## Strip greptime binary for android.
docker run --network=host \
-v ${PWD}:/greptimedb \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip /greptimedb/target/aarch64-linux-android/release/greptime'
.PHONY: clean
clean: ## Clean the project.

View File

@@ -27,6 +27,14 @@
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
</p>
## Upcoming Event
Come and meet us in **KubeCon + CloudNativeCon North America 2023!**
<p align="center">
<picture>
<img alt="KubeCon + CloudNativeCon North Logo" src="./docs/banner/KCCNC_NA_2023_1000x200_Email Banner.png" width="800px">
</picture>
</p>
## What is GreptimeDB
GreptimeDB is an open-source time-series database with a special focus on

View File

@@ -13,17 +13,19 @@ rpc_runtime_size = 8
require_lease_before_startup = false
[heartbeat]
# Interval for sending heartbeat messages to the Metasrv in milliseconds, 5000 by default.
interval_millis = 5000
# Interval for sending heartbeat messages to the Metasrv in milliseconds, 3000 by default.
interval_millis = 3000
# Metasrv client options.
[meta_client]
# Metasrv address list.
metasrv_addrs = ["127.0.0.1:3002"]
# Heartbeat timeout in milliseconds, 500 by default.
heartbeat_timeout_millis = 500
# Operation timeout in milliseconds, 3000 by default.
timeout_millis = 3000
# Connect server timeout in milliseconds, 5000 by default.
connect_timeout_millis = 5000
connect_timeout_millis = 1000
# `TCP_NODELAY` option for accepted connections, true by default.
tcp_nodelay = true
@@ -45,6 +47,12 @@ type = "File"
# TTL for all tables. Disabled by default.
# global_ttl = "7d"
# Cache configuration for object storage such as 'S3' etc.
# The local file cache directory
# cache_path = "/path/local_cache"
# The local file cache capacity in bytes.
# cache_capacity = "256Mib"
# Compaction options, see `standalone.example.toml`.
[storage.compaction]
max_inflight_tasks = 4

View File

@@ -8,7 +8,7 @@ interval_millis = 5000
retry_interval_millis = 5000
# HTTP server options, see `standalone.example.toml`.
[http_options]
[http]
addr = "127.0.0.1:4000"
timeout = "30s"
body_limit = "64MB"
@@ -62,7 +62,7 @@ metasrv_addrs = ["127.0.0.1:3002"]
timeout_millis = 3000
# DDL timeouts options.
ddl_timeout_millis = 10000
connect_timeout_millis = 5000
connect_timeout_millis = 1000
tcp_nodelay = true
# Log options, see `standalone.example.toml`

View File

@@ -115,6 +115,10 @@ data_home = "/tmp/greptimedb/"
type = "File"
# TTL for all tables. Disabled by default.
# global_ttl = "7d"
# Cache configuration for object storage such as 'S3' etc.
# cache_path = "/path/local_cache"
# The local file cache capacity in bytes.
# cache_capacity = "256Mib"
# Compaction options.
[storage.compaction]

View File

@@ -0,0 +1,41 @@
FROM --platform=linux/amd64 saschpe/android-ndk:34-jdk17.0.8_7-ndk25.2.9519653-cmake3.22.1
ENV LANG en_US.utf8
WORKDIR /greptimedb
# Rename libunwind to libgcc
RUN cp ${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/14.0.7/lib/linux/aarch64/libunwind.a ${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/14.0.7/lib/linux/aarch64/libgcc.a
# Install dependencies.
RUN apt-get update && apt-get install -y \
libssl-dev \
protobuf-compiler \
curl \
git \
build-essential \
pkg-config \
python3 \
python3-dev \
python3-pip \
&& pip3 install --upgrade pip \
&& pip3 install pyarrow
# Trust workdir
RUN git config --global --add safe.directory /greptimedb
# Install Rust.
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /root/.cargo/bin/:$PATH
# Add android toolchains
ARG RUST_TOOLCHAIN
RUN rustup toolchain install ${RUST_TOOLCHAIN}
RUN rustup target add aarch64-linux-android
# Install cargo-ndk
RUN cargo install cargo-ndk
ENV ANDROID_NDK_HOME $NDK_ROOT
# Builder entrypoint.
CMD ["cargo", "ndk", "--platform", "23", "-t", "aarch64-linux-android", "build", "--bin", "greptime", "--profile", "release", "--no-default-features"]

Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

View File

@@ -7,6 +7,7 @@ license.workspace = true
[dependencies]
common-base = { workspace = true }
common-error = { workspace = true }
common-macro = { workspace = true }
common-time = { workspace = true }
datatypes = { workspace = true }
greptime-proto.workspace = true

View File

@@ -16,14 +16,16 @@ use std::any::Any;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use datatypes::prelude::ConcreteDataType;
use snafu::prelude::*;
use snafu::Location;
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Unknown proto column datatype: {}", datatype))]
UnknownColumnDataType { datatype: i32, location: Location },
@@ -34,22 +36,14 @@ pub enum Error {
location: Location,
},
#[snafu(display(
"Failed to convert column default constraint, column: {}, source: {}",
column,
source
))]
#[snafu(display("Failed to convert column default constraint, column: {}", column))]
ConvertColumnDefaultConstraint {
column: String,
location: Location,
source: datatypes::error::Error,
},
#[snafu(display(
"Invalid column default constraint, column: {}, source: {}",
column,
source
))]
#[snafu(display("Invalid column default constraint, column: {}", column))]
InvalidColumnDefaultConstraint {
column: String,
location: Location,

View File

@@ -4,8 +4,6 @@ version.workspace = true
edition.workspace = true
license.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
default = []
testing = []
@@ -14,6 +12,7 @@ testing = []
api.workspace = true
async-trait.workspace = true
common-error.workspace = true
common-macro.workspace = true
digest = "0.10"
hex = { version = "0.4" }
secrecy = { version = "0.8", features = ["serde", "alloc"] }

View File

@@ -14,10 +14,12 @@
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Invalid config value: {}, {}", value, msg))]
InvalidConfig { value: String, msg: String },
@@ -28,13 +30,14 @@ pub enum Error {
#[snafu(display("Internal state error: {}", msg))]
InternalState { msg: String },
#[snafu(display("IO error, source: {}", source))]
#[snafu(display("IO error"))]
Io {
source: std::io::Error,
#[snafu(source)]
error: std::io::Error,
location: Location,
},
#[snafu(display("Auth failed, source: {}", source))]
#[snafu(display("Auth failed"))]
AuthBackend {
location: Location,
source: BoxedError,

View File

@@ -16,6 +16,7 @@ async-trait = "0.1"
common-catalog = { workspace = true }
common-error = { workspace = true }
common-grpc = { workspace = true }
common-macro = { workspace = true }
common-meta = { workspace = true }
common-query = { workspace = true }
common-recordbatch = { workspace = true }
@@ -30,7 +31,7 @@ futures-util.workspace = true
lazy_static.workspace = true
meta-client = { workspace = true }
metrics.workspace = true
moka = { version = "0.11", features = ["future"] }
moka = { workspace = true, features = ["future"] }
parking_lot = "0.12"
partition.workspace = true
regex.workspace = true

View File

@@ -17,53 +17,48 @@ use std::fmt::Debug;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use datafusion::error::DataFusionError;
use datatypes::prelude::ConcreteDataType;
use snafu::{Location, Snafu};
use table::metadata::TableId;
use tokio::task::JoinError;
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Failed to list catalogs, source: {}", source))]
#[snafu(display("Failed to list catalogs"))]
ListCatalogs {
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to list {}'s schemas, source: {}", catalog, source))]
#[snafu(display("Failed to list {}'s schemas", catalog))]
ListSchemas {
location: Location,
catalog: String,
source: BoxedError,
},
#[snafu(display(
"Failed to re-compile script due to internal error, source: {}",
source
))]
#[snafu(display("Failed to re-compile script due to internal error"))]
CompileScriptInternal {
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to open system catalog table, source: {}", source))]
#[snafu(display("Failed to open system catalog table"))]
OpenSystemCatalog {
location: Location,
source: table::error::Error,
},
#[snafu(display("Failed to create system catalog table, source: {}", source))]
#[snafu(display("Failed to create system catalog table"))]
CreateSystemCatalog {
location: Location,
source: table::error::Error,
},
#[snafu(display(
"Failed to create table, table info: {}, source: {}",
table_info,
source
))]
#[snafu(display("Failed to create table, table info: {}", table_info))]
CreateTable {
table_info: String,
location: Location,
@@ -97,13 +92,14 @@ pub enum Error {
#[snafu(display("Catalog value is not present"))]
EmptyValue { location: Location },
#[snafu(display("Failed to deserialize value, source: {}", source))]
#[snafu(display("Failed to deserialize value"))]
ValueDeserialize {
source: serde_json::error::Error,
#[snafu(source)]
error: serde_json::error::Error,
location: Location,
},
#[snafu(display("Table engine not found: {}, source: {}", engine_name, source))]
#[snafu(display("Table engine not found: {}", engine_name))]
TableEngineNotFound {
engine_name: String,
location: Location,
@@ -141,15 +137,18 @@ pub enum Error {
#[snafu(display("Operation {} not supported", op))]
NotSupported { op: String, location: Location },
#[snafu(display("Failed to open table {table_id}, source: {source}, at {location}"))]
#[snafu(display("Failed to open table {table_id}"))]
OpenTable {
table_id: TableId,
location: Location,
source: table::error::Error,
},
#[snafu(display("Failed to open table in parallel, source: {}", source))]
ParallelOpenTable { source: JoinError },
#[snafu(display("Failed to open table in parallel"))]
ParallelOpenTable {
#[snafu(source)]
error: JoinError,
},
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
TableNotFound {
@@ -163,58 +162,52 @@ pub enum Error {
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to create recordbatch, source: {}", source))]
#[snafu(display("Failed to create recordbatch"))]
CreateRecordBatch {
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display(
"Failed to insert table creation record to system catalog, source: {}",
source
))]
#[snafu(display("Failed to insert table creation record to system catalog"))]
InsertCatalogRecord {
location: Location,
source: table::error::Error,
},
#[snafu(display("Failed to scan system catalog table, source: {}", source))]
#[snafu(display("Failed to scan system catalog table"))]
SystemCatalogTableScan {
location: Location,
source: table::error::Error,
},
#[snafu(display("{source}"))]
#[snafu(display(""))]
Internal {
location: Location,
source: BoxedError,
},
#[snafu(display(
"Failed to upgrade weak catalog manager reference. location: {}",
location
))]
#[snafu(display("Failed to upgrade weak catalog manager reference"))]
UpgradeWeakCatalogManagerRef { location: Location },
#[snafu(display("Failed to execute system catalog table scan, source: {}", source))]
#[snafu(display("Failed to execute system catalog table scan"))]
SystemCatalogTableScanExec {
location: Location,
source: common_query::error::Error,
},
#[snafu(display("Cannot parse catalog value, source: {}", source))]
#[snafu(display("Cannot parse catalog value"))]
InvalidCatalogValue {
location: Location,
source: common_catalog::error::Error,
},
#[snafu(display("Failed to perform metasrv operation, source: {}", source))]
#[snafu(display("Failed to perform metasrv operation"))]
MetaSrv {
location: Location,
source: meta_client::error::Error,
},
#[snafu(display("Invalid table info in catalog, source: {}", source))]
#[snafu(display("Invalid table info in catalog"))]
InvalidTableInfoInCatalog {
location: Location,
source: datatypes::error::Error,
@@ -223,14 +216,14 @@ pub enum Error {
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
QueryAccessDenied { catalog: String, schema: String },
#[snafu(display("{}: {}", msg, source))]
#[snafu(display(""))]
Datafusion {
msg: String,
source: DataFusionError,
#[snafu(source)]
error: DataFusionError,
location: Location,
},
#[snafu(display("Table schema mismatch, source: {}", source))]
#[snafu(display("Table schema mismatch"))]
TableSchemaMismatch {
location: Location,
source: table::error::Error,
@@ -239,7 +232,7 @@ pub enum Error {
#[snafu(display("A generic error has occurred, msg: {}", msg))]
Generic { msg: String, location: Location },
#[snafu(display("Table metadata manager error: {}", source))]
#[snafu(display("Table metadata manager error"))]
TableMetadataManager {
source: common_meta::error::Error,
location: Location,

View File

@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
pub use client::{CachedMetaKvBackend, MetaKvBackend};
mod client;
@@ -22,18 +20,3 @@ mod manager;
#[cfg(feature = "testing")]
pub mod mock;
pub use manager::KvBackendCatalogManager;
/// KvBackend cache invalidator
#[async_trait::async_trait]
pub trait KvCacheInvalidator: Send + Sync {
async fn invalidate_key(&self, key: &[u8]);
}
pub type KvCacheInvalidatorRef = Arc<dyn KvCacheInvalidator>;
pub struct DummyKvCacheInvalidator;
#[async_trait::async_trait]
impl KvCacheInvalidator for DummyKvCacheInvalidator {
async fn invalidate_key(&self, _key: &[u8]) {}
}

View File

@@ -18,6 +18,7 @@ use std::sync::Arc;
use std::time::Duration;
use common_error::ext::BoxedError;
use common_meta::cache_invalidator::KvCacheInvalidator;
use common_meta::error::Error::{CacheNotGet, GetKvCache};
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, Result};
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
@@ -28,12 +29,11 @@ use common_meta::rpc::store::{
RangeRequest, RangeResponse,
};
use common_meta::rpc::KeyValue;
use common_telemetry::timer;
use common_telemetry::{debug, timer};
use meta_client::client::MetaClient;
use moka::future::{Cache, CacheBuilder};
use snafu::{OptionExt, ResultExt};
use super::KvCacheInvalidator;
use crate::metrics::{METRIC_CATALOG_KV_GET, METRIC_CATALOG_KV_REMOTE_GET};
const CACHE_MAX_CAPACITY: u64 = 10000;
@@ -197,7 +197,8 @@ impl KvBackend for CachedMetaKvBackend {
#[async_trait::async_trait]
impl KvCacheInvalidator for CachedMetaKvBackend {
async fn invalidate_key(&self, key: &[u8]) {
self.cache.invalidate(key).await
self.cache.invalidate(key).await;
debug!("invalidated cache key: {}", String::from_utf8_lossy(key));
}
}

View File

@@ -18,18 +18,15 @@ use std::sync::{Arc, Weak};
use common_catalog::consts::{DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID};
use common_error::ext::BoxedError;
use common_meta::cache_invalidator::{CacheInvalidator, Context};
use common_meta::cache_invalidator::{CacheInvalidator, CacheInvalidatorRef, Context};
use common_meta::datanode_manager::DatanodeManagerRef;
use common_meta::error::Result as MetaResult;
use common_meta::key::catalog_name::CatalogNameKey;
use common_meta::key::schema_name::SchemaNameKey;
use common_meta::key::table_info::TableInfoKey;
use common_meta::key::table_name::TableNameKey;
use common_meta::key::table_route::TableRouteKey;
use common_meta::key::{TableMetaKey, TableMetadataManager, TableMetadataManagerRef};
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef;
use common_meta::table_name::TableName;
use common_telemetry::debug;
use futures_util::TryStreamExt;
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
use snafu::prelude::*;
@@ -43,7 +40,6 @@ use crate::error::{
TableMetadataManagerSnafu,
};
use crate::information_schema::{InformationSchemaProvider, COLUMNS, TABLES};
use crate::kvbackend::KvCacheInvalidatorRef;
use crate::CatalogManager;
/// Access all existing catalog, schema and tables.
@@ -56,7 +52,7 @@ pub struct KvBackendCatalogManager {
// TODO(LFC): Maybe use a real implementation for Standalone mode.
// Now we use `NoopKvCacheInvalidator` for Standalone mode. In Standalone mode, the KV backend
// is implemented by RaftEngine. Maybe we need a cache for it?
backend_cache_invalidator: KvCacheInvalidatorRef,
cache_invalidator: CacheInvalidatorRef,
partition_manager: PartitionRuleManagerRef,
table_metadata_manager: TableMetadataManagerRef,
datanode_manager: DatanodeManagerRef,
@@ -66,53 +62,29 @@ pub struct KvBackendCatalogManager {
#[async_trait::async_trait]
impl CacheInvalidator for KvBackendCatalogManager {
async fn invalidate_table_name(&self, _ctx: &Context, table_name: TableName) -> MetaResult<()> {
let key: TableNameKey = (&table_name).into();
self.backend_cache_invalidator
.invalidate_key(&key.as_raw_key())
.await;
debug!(
"invalidated cache key: {}",
String::from_utf8_lossy(&key.as_raw_key())
);
Ok(())
async fn invalidate_table_name(&self, ctx: &Context, table_name: TableName) -> MetaResult<()> {
self.cache_invalidator
.invalidate_table_name(ctx, table_name)
.await
}
async fn invalidate_table_id(&self, _ctx: &Context, table_id: TableId) -> MetaResult<()> {
let key = TableInfoKey::new(table_id);
self.backend_cache_invalidator
.invalidate_key(&key.as_raw_key())
.await;
debug!(
"invalidated cache key: {}",
String::from_utf8_lossy(&key.as_raw_key())
);
let key = &TableRouteKey { table_id };
self.backend_cache_invalidator
.invalidate_key(&key.as_raw_key())
.await;
debug!(
"invalidated cache key: {}",
String::from_utf8_lossy(&key.as_raw_key())
);
Ok(())
async fn invalidate_table_id(&self, ctx: &Context, table_id: TableId) -> MetaResult<()> {
self.cache_invalidator
.invalidate_table_id(ctx, table_id)
.await
}
}
impl KvBackendCatalogManager {
pub fn new(
backend: KvBackendRef,
backend_cache_invalidator: KvCacheInvalidatorRef,
cache_invalidator: CacheInvalidatorRef,
datanode_manager: DatanodeManagerRef,
) -> Arc<Self> {
Arc::new_cyclic(|me| Self {
backend_cache_invalidator,
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
cache_invalidator,
datanode_manager,
system_catalog: SystemCatalog {
catalog_manager: me.clone(),
@@ -131,12 +103,6 @@ impl KvBackendCatalogManager {
pub fn datanode_manager(&self) -> DatanodeManagerRef {
self.datanode_manager.clone()
}
pub async fn invalidate_schema(&self, catalog: &str, schema: &str) {
let key = SchemaNameKey::new(catalog, schema).as_raw_key();
self.backend_cache_invalidator.invalidate_key(&key).await;
}
}
#[async_trait::async_trait]
@@ -253,6 +219,7 @@ impl CatalogManager for KvBackendCatalogManager {
.get(table_id)
.await
.context(TableMetadataManagerSnafu)?
.map(|v| v.into_inner())
else {
return Ok(None);
};

View File

@@ -20,10 +20,8 @@ use std::any::Any;
use std::fmt::{Debug, Formatter};
use std::sync::Arc;
use api::v1::meta::RegionStat;
use common_telemetry::warn;
use futures::future::BoxFuture;
use table::metadata::{TableId, TableType};
use table::metadata::TableId;
use table::requests::CreateTableRequest;
use table::TableRef;
@@ -124,66 +122,3 @@ pub struct RegisterSchemaRequest {
pub catalog: String,
pub schema: String,
}
/// The stat of regions in the datanode node.
/// The number of regions can be got from len of vec.
///
/// Ignores any errors occurred during iterating regions. The intention of this method is to
/// collect region stats that will be carried in Datanode's heartbeat to Metasrv, so it's a
/// "try our best" job.
pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> (u64, Vec<RegionStat>) {
let mut region_number: u64 = 0;
let mut region_stats = Vec::new();
let Ok(catalog_names) = catalog_manager.catalog_names().await else {
return (region_number, region_stats);
};
for catalog_name in catalog_names {
let Ok(schema_names) = catalog_manager.schema_names(&catalog_name).await else {
continue;
};
for schema_name in schema_names {
let Ok(table_names) = catalog_manager
.table_names(&catalog_name, &schema_name)
.await
else {
continue;
};
for table_name in table_names {
let Ok(Some(table)) = catalog_manager
.table(&catalog_name, &schema_name, &table_name)
.await
else {
continue;
};
if table.table_type() != TableType::Base {
continue;
}
let table_info = table.table_info();
let region_numbers = &table_info.meta.region_numbers;
region_number += region_numbers.len() as u64;
let engine = &table_info.meta.engine;
match table.region_stats() {
Ok(stats) => {
let stats = stats.into_iter().map(|stat| RegionStat {
region_id: stat.region_id,
approximate_bytes: stat.disk_usage_bytes as i64,
engine: engine.clone(),
..Default::default()
});
region_stats.extend(stats);
}
Err(e) => {
warn!("Failed to get region status, err: {:?}", e);
}
};
}
}
}
(region_number, region_stats)
}

View File

@@ -16,6 +16,7 @@ common-base = { workspace = true }
common-catalog = { workspace = true }
common-error = { workspace = true }
common-grpc = { workspace = true }
common-macro = { workspace = true }
common-meta = { workspace = true }
common-query = { workspace = true }
common-recordbatch = { workspace = true }
@@ -26,7 +27,7 @@ datatypes = { workspace = true }
derive_builder.workspace = true
enum_dispatch = "0.3"
futures-util.workspace = true
moka = { version = "0.9", features = ["future"] }
moka = { workspace = true, features = ["future"] }
parking_lot = "0.12"
prost.workspace = true
rand.workspace = true

View File

@@ -42,14 +42,14 @@ async fn run() {
.insert(vec![to_insert_request(weather_records_1())])
.await
{
error!("Error: {e}");
error!("Error: {e:?}");
}
if let Err(e) = stream_inserter
.insert(vec![to_insert_request(weather_records_2())])
.await
{
error!("Error: {e}");
error!("Error: {e:?}");
}
let result = stream_inserter.finish().await;
@@ -59,7 +59,7 @@ async fn run() {
info!("Rows written: {rows}");
}
Err(e) => {
error!("Error: {e}");
error!("Error: {e:?}");
}
};
}

View File

@@ -138,8 +138,12 @@ impl Client {
Ok((addr, channel))
}
fn max_grpc_message_size(&self) -> usize {
self.inner.channel_manager.config().max_message_size
fn max_grpc_recv_message_size(&self) -> usize {
self.inner.channel_manager.config().max_recv_message_size
}
fn max_grpc_send_message_size(&self) -> usize {
self.inner.channel_manager.config().max_send_message_size
}
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
@@ -147,7 +151,8 @@ impl Client {
Ok(FlightClient {
addr,
client: FlightServiceClient::new(channel)
.max_decoding_message_size(self.max_grpc_message_size()),
.max_decoding_message_size(self.max_grpc_recv_message_size())
.max_encoding_message_size(self.max_grpc_send_message_size()),
})
}
@@ -155,13 +160,16 @@ impl Client {
let (_, channel) = self.find_channel()?;
Ok(DatabaseClient {
inner: GreptimeDatabaseClient::new(channel)
.max_decoding_message_size(self.max_grpc_message_size()),
.max_decoding_message_size(self.max_grpc_recv_message_size())
.max_encoding_message_size(self.max_grpc_send_message_size()),
})
}
pub(crate) fn raw_region_client(&self) -> Result<PbRegionClient<Channel>> {
let (_, channel) = self.find_channel()?;
Ok(PbRegionClient::new(channel).max_decoding_message_size(self.max_grpc_message_size()))
Ok(PbRegionClient::new(channel)
.max_decoding_message_size(self.max_grpc_recv_message_size())
.max_encoding_message_size(self.max_grpc_send_message_size()))
}
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {

View File

@@ -276,7 +276,7 @@ impl Database {
source: BoxedError::new(ServerSnafu { code, msg }.build()),
};
logging::error!(
"Failed to do Flight get, addr: {}, code: {}, source: {}",
"Failed to do Flight get, addr: {}, code: {}, source: {:?}",
client.addr(),
tonic_code,
error

View File

@@ -17,39 +17,37 @@ use std::any::Any;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_error::{GREPTIME_ERROR_CODE, GREPTIME_ERROR_MSG};
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
use tonic::{Code, Status};
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Illegal Flight messages, reason: {}", reason))]
IllegalFlightMessages { reason: String, location: Location },
#[snafu(display("Failed to do Flight get, code: {}, source: {}", tonic_code, source))]
#[snafu(display("Failed to do Flight get, code: {}", tonic_code))]
FlightGet {
addr: String,
tonic_code: Code,
source: BoxedError,
},
#[snafu(display(
"Failure occurs during handling request, location: {}, source: {}",
location,
source
))]
#[snafu(display("Failure occurs during handling request"))]
HandleRequest {
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to convert FlightData, source: {}", source))]
#[snafu(display("Failed to convert FlightData"))]
ConvertFlightData {
location: Location,
source: common_grpc::Error,
},
#[snafu(display("Column datatype error, source: {}", source))]
#[snafu(display("Column datatype error"))]
ColumnDataType {
location: Location,
source: api::error::Error,
@@ -61,18 +59,14 @@ pub enum Error {
#[snafu(display("Missing required field in protobuf, field: {}", field))]
MissingField { field: String, location: Location },
#[snafu(display(
"Failed to create gRPC channel, peer address: {}, source: {}",
addr,
source
))]
#[snafu(display("Failed to create gRPC channel, peer address: {}", addr))]
CreateChannel {
addr: String,
location: Location,
source: common_grpc::error::Error,
},
#[snafu(display("Failed to request RegionServer, code: {}, source: {}", code, source))]
#[snafu(display("Failed to request RegionServer, code: {}", code))]
RegionServer { code: Code, source: BoxedError },
// Server error carried in Tonic Status's metadata.

View File

@@ -26,6 +26,7 @@ common-base = { workspace = true }
common-catalog = { workspace = true }
common-config = { workspace = true }
common-error = { workspace = true }
common-macro = { workspace = true }
common-meta = { workspace = true }
common-procedure = { workspace = true }
common-query = { workspace = true }
@@ -38,14 +39,17 @@ datanode = { workspace = true }
datatypes = { workspace = true }
either = "1.8"
etcd-client.workspace = true
file-engine = { workspace = true }
frontend = { workspace = true }
futures.workspace = true
lazy_static.workspace = true
meta-client = { workspace = true }
meta-srv = { workspace = true }
metrics.workspace = true
mito2 = { workspace = true }
nu-ansi-term = "0.46"
partition = { workspace = true }
plugins.workspace = true
prost.workspace = true
query = { workspace = true }
rand.workspace = true

View File

@@ -35,7 +35,7 @@ use query::QueryEngine;
use rustyline::error::ReadlineError;
use rustyline::Editor;
use session::context::QueryContext;
use snafu::{ErrorCompat, ResultExt};
use snafu::ResultExt;
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use crate::cli::cmd::ReplCommand;
@@ -148,7 +148,7 @@ impl Repl {
.await
.map_err(|e| {
let status_code = e.status_code();
let root_cause = e.iter_chain().last().unwrap();
let root_cause = e.output_msg();
println!("Error: {}({status_code}), {root_cause}", status_code as u32)
})
.is_ok()
@@ -257,10 +257,11 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
cached_meta_backend.clone(),
datanode_clients,
);
let plugins: Arc<Plugins> = Default::default();
let plugins: Plugins = Default::default();
let state = Arc::new(QueryEngineState::new(
catalog_list,
None,
None,
false,
plugins.clone(),
));

View File

@@ -20,7 +20,7 @@ use client::api::v1::meta::TableRouteValue;
use common_meta::ddl::utils::region_storage_path;
use common_meta::error as MetaError;
use common_meta::key::catalog_name::{CatalogNameKey, CatalogNameValue};
use common_meta::key::datanode_table::{DatanodeTableKey, DatanodeTableValue};
use common_meta::key::datanode_table::{DatanodeTableKey, DatanodeTableValue, RegionInfo};
use common_meta::key::schema_name::{SchemaNameKey, SchemaNameValue};
use common_meta::key::table_info::{TableInfoKey, TableInfoValue};
use common_meta::key::table_name::{TableNameKey, TableNameValue};
@@ -405,8 +405,11 @@ impl MigrateTableMetadata {
DatanodeTableValue::new(
table_id,
regions,
engine.to_string(),
region_storage_path.clone(),
RegionInfo {
engine: engine.to_string(),
region_storage_path: region_storage_path.clone(),
region_options: (&value.table_info.meta.options).into(),
},
),
)
})

View File

@@ -31,6 +31,10 @@ pub struct Instance {
impl Instance {
pub async fn start(&mut self) -> Result<()> {
plugins::start_datanode_plugins(self.datanode.plugins())
.await
.context(StartDatanodeSnafu)?;
self.datanode.start().await.context(StartDatanodeSnafu)
}
@@ -159,11 +163,15 @@ impl StartCommand {
Ok(Options::Datanode(Box::new(opts)))
}
async fn build(self, opts: DatanodeOptions) -> Result<Instance> {
async fn build(self, mut opts: DatanodeOptions) -> Result<Instance> {
let plugins = plugins::setup_datanode_plugins(&mut opts)
.await
.context(StartDatanodeSnafu)?;
logging::info!("Datanode start command: {:#?}", self);
logging::info!("Datanode options: {:#?}", opts);
let datanode = DatanodeBuilder::new(opts, None, Default::default())
let datanode = DatanodeBuilder::new(opts, None, plugins)
.build()
.await
.context(StartDatanodeSnafu)?;
@@ -255,6 +263,7 @@ mod tests {
connect_timeout_millis,
tcp_nodelay,
ddl_timeout_millis,
..
} = options.meta_client.unwrap();
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);

View File

@@ -16,62 +16,64 @@ use std::any::Any;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use config::ConfigError;
use rustyline::error::ReadlineError;
use snafu::{Location, Snafu};
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Failed to create default catalog and schema, source: {}", source))]
#[snafu(display("Failed to create default catalog and schema"))]
InitMetadata {
location: Location,
source: common_meta::error::Error,
},
#[snafu(display("Failed to iter stream, source: {}", source))]
#[snafu(display("Failed to iter stream"))]
IterStream {
location: Location,
source: common_meta::error::Error,
},
#[snafu(display("Failed to start datanode, source: {}", source))]
#[snafu(display("Failed to start datanode"))]
StartDatanode {
location: Location,
source: datanode::error::Error,
},
#[snafu(display("Failed to shutdown datanode, source: {}", source))]
#[snafu(display("Failed to shutdown datanode"))]
ShutdownDatanode {
location: Location,
source: datanode::error::Error,
},
#[snafu(display("Failed to start frontend, source: {}", source))]
#[snafu(display("Failed to start frontend"))]
StartFrontend {
location: Location,
source: frontend::error::Error,
},
#[snafu(display("Failed to shutdown frontend, source: {}", source))]
#[snafu(display("Failed to shutdown frontend"))]
ShutdownFrontend {
location: Location,
source: frontend::error::Error,
},
#[snafu(display("Failed to build meta server, source: {}", source))]
#[snafu(display("Failed to build meta server"))]
BuildMetaServer {
location: Location,
source: meta_srv::error::Error,
},
#[snafu(display("Failed to start meta server, source: {}", source))]
#[snafu(display("Failed to start meta server"))]
StartMetaServer {
location: Location,
source: meta_srv::error::Error,
},
#[snafu(display("Failed to shutdown meta server, source: {}", source))]
#[snafu(display("Failed to shutdown meta server"))]
ShutdownMetaServer {
location: Location,
source: meta_srv::error::Error,
@@ -83,13 +85,7 @@ pub enum Error {
#[snafu(display("Illegal config: {}", msg))]
IllegalConfig { msg: String, location: Location },
#[snafu(display("Illegal auth config: {}", source))]
IllegalAuthConfig {
location: Location,
source: auth::error::Error,
},
#[snafu(display("Unsupported selector type, {} source: {}", selector_type, source))]
#[snafu(display("Unsupported selector type: {}", selector_type))]
UnsupportedSelectorType {
selector_type: String,
location: Location,
@@ -99,78 +95,89 @@ pub enum Error {
#[snafu(display("Invalid REPL command: {reason}"))]
InvalidReplCommand { reason: String },
#[snafu(display("Cannot create REPL: {}", source))]
#[snafu(display("Cannot create REPL"))]
ReplCreation {
source: ReadlineError,
#[snafu(source)]
error: ReadlineError,
location: Location,
},
#[snafu(display("Error reading command: {}", source))]
#[snafu(display("Error reading command"))]
Readline {
source: ReadlineError,
#[snafu(source)]
error: ReadlineError,
location: Location,
},
#[snafu(display("Failed to request database, sql: {sql}, source: {source}"))]
#[snafu(display("Failed to request database, sql: {sql}"))]
RequestDatabase {
sql: String,
location: Location,
source: client::Error,
},
#[snafu(display("Failed to collect RecordBatches, source: {source}"))]
#[snafu(display("Failed to collect RecordBatches"))]
CollectRecordBatches {
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to pretty print Recordbatches, source: {source}"))]
#[snafu(display("Failed to pretty print Recordbatches"))]
PrettyPrintRecordBatches {
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to start Meta client, source: {}", source))]
#[snafu(display("Failed to start Meta client"))]
StartMetaClient {
location: Location,
source: meta_client::error::Error,
},
#[snafu(display("Failed to parse SQL: {}, source: {}", sql, source))]
#[snafu(display("Failed to parse SQL: {}", sql))]
ParseSql {
sql: String,
location: Location,
source: query::error::Error,
},
#[snafu(display("Failed to plan statement, source: {}", source))]
#[snafu(display("Failed to plan statement"))]
PlanStatement {
location: Location,
source: query::error::Error,
},
#[snafu(display("Failed to encode logical plan in substrait, source: {}", source))]
#[snafu(display("Failed to encode logical plan in substrait"))]
SubstraitEncodeLogicalPlan {
location: Location,
source: substrait::error::Error,
},
#[snafu(display("Failed to load layered config, source: {}", source))]
#[snafu(display("Failed to load layered config"))]
LoadLayeredConfig {
source: ConfigError,
#[snafu(source)]
error: ConfigError,
location: Location,
},
#[snafu(display("Failed to start catalog manager, source: {}", source))]
#[snafu(display("Failed to start catalog manager"))]
StartCatalogManager {
location: Location,
source: catalog::error::Error,
},
#[snafu(display("Failed to connect to Etcd at {etcd_addr}, source: {}", source))]
#[snafu(display("Failed to connect to Etcd at {etcd_addr}"))]
ConnectEtcd {
etcd_addr: String,
source: etcd_client::Error,
#[snafu(source)]
error: etcd_client::Error,
location: Location,
},
#[snafu(display("Failed to serde json"))]
SerdeJson {
#[snafu(source)]
error: serde_json::error::Error,
location: Location,
},
}
@@ -195,7 +202,6 @@ impl ErrorExt for Error {
| Error::LoadLayeredConfig { .. }
| Error::IllegalConfig { .. }
| Error::InvalidReplCommand { .. }
| Error::IllegalAuthConfig { .. }
| Error::ConnectEtcd { .. } => StatusCode::InvalidArguments,
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
@@ -208,6 +214,8 @@ impl ErrorExt for Error {
}
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
Error::StartCatalogManager { source, .. } => source.status_code(),
Error::SerdeJson { .. } => StatusCode::Unexpected,
}
}

View File

@@ -12,11 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use std::time::Duration;
use auth::UserProviderRef;
use clap::Parser;
use common_base::Plugins;
use common_telemetry::logging;
use frontend::frontend::FrontendOptions;
use frontend::instance::{FrontendInstance, Instance as FeInstance};
@@ -25,7 +23,7 @@ use servers::tls::{TlsMode, TlsOption};
use servers::Mode;
use snafu::ResultExt;
use crate::error::{self, IllegalAuthConfigSnafu, Result};
use crate::error::{self, Result, StartFrontendSnafu};
use crate::options::{Options, TopLevelOptions};
pub struct Instance {
@@ -34,10 +32,11 @@ pub struct Instance {
impl Instance {
pub async fn start(&mut self) -> Result<()> {
self.frontend
.start()
plugins::start_frontend_plugins(self.frontend.plugins().clone())
.await
.context(error::StartFrontendSnafu)
.context(StartFrontendSnafu)?;
self.frontend.start().await.context(StartFrontendSnafu)
}
pub async fn stop(&self) -> Result<()> {
@@ -88,6 +87,8 @@ pub struct StartCommand {
#[clap(long)]
http_addr: Option<String>,
#[clap(long)]
http_timeout: Option<u64>,
#[clap(long)]
grpc_addr: Option<String>,
#[clap(long)]
mysql_addr: Option<String>,
@@ -141,6 +142,10 @@ impl StartCommand {
opts.http.addr = addr.clone()
}
if let Some(http_timeout) = self.http_timeout {
opts.http.timeout = Duration::from_secs(http_timeout)
}
if let Some(disable_dashboard) = self.disable_dashboard {
opts.http.disable_dashboard = disable_dashboard;
}
@@ -177,38 +182,32 @@ impl StartCommand {
opts.mode = Mode::Distributed;
}
opts.user_provider = self.user_provider.clone();
Ok(Options::Frontend(Box::new(opts)))
}
async fn build(self, opts: FrontendOptions) -> Result<Instance> {
async fn build(self, mut opts: FrontendOptions) -> Result<Instance> {
let plugins = plugins::setup_frontend_plugins(&mut opts)
.await
.context(StartFrontendSnafu)?;
logging::info!("Frontend start command: {:#?}", self);
logging::info!("Frontend options: {:#?}", opts);
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone())
.await
.context(error::StartFrontendSnafu)?;
.context(StartFrontendSnafu)?;
instance
.build_servers(&opts)
.await
.context(error::StartFrontendSnafu)?;
.context(StartFrontendSnafu)?;
Ok(Instance { frontend: instance })
}
}
pub fn load_frontend_plugins(user_provider: &Option<String>) -> Result<Plugins> {
let plugins = Plugins::new();
if let Some(provider) = user_provider {
let provider = auth::user_provider_from_option(provider).context(IllegalAuthConfigSnafu)?;
plugins.insert::<UserProviderRef>(provider);
}
Ok(plugins)
}
#[cfg(test)]
mod tests {
use std::io::Write;
@@ -218,6 +217,7 @@ mod tests {
use common_base::readable_size::ReadableSize;
use common_test_util::temp_dir::create_named_temp_file;
use frontend::service_config::GrpcOptions;
use servers::http::HttpOptions;
use super::*;
use crate::options::ENV_VAR_SEP;
@@ -303,14 +303,17 @@ mod tests {
#[tokio::test]
async fn test_try_from_start_command_to_anymap() {
let command = StartCommand {
let mut fe_opts = FrontendOptions {
http: HttpOptions {
disable_dashboard: false,
..Default::default()
},
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
disable_dashboard: Some(false),
..Default::default()
};
let plugins = load_frontend_plugins(&command.user_provider);
let plugins = plugins.unwrap();
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
let provider = plugins.get::<UserProviderRef>().unwrap();
let result = provider
.authenticate(
@@ -383,7 +386,7 @@ mod tests {
Some("11"),
),
(
// http_options.addr = 127.0.0.1:24000
// http.addr = 127.0.0.1:24000
[
env_prefix.to_string(),
"http".to_uppercase(),

View File

@@ -20,7 +20,7 @@ use meta_srv::bootstrap::MetaSrvInstance;
use meta_srv::metasrv::MetaSrvOptions;
use snafu::ResultExt;
use crate::error::{self, Result};
use crate::error::{self, Result, StartMetaServerSnafu};
use crate::options::{Options, TopLevelOptions};
pub struct Instance {
@@ -29,10 +29,10 @@ pub struct Instance {
impl Instance {
pub async fn start(&mut self) -> Result<()> {
self.instance
.start()
plugins::start_meta_srv_plugins(self.instance.plugins())
.await
.context(error::StartMetaServerSnafu)
.context(StartMetaServerSnafu)?;
self.instance.start().await.context(StartMetaServerSnafu)
}
pub async fn stop(&self) -> Result<()> {
@@ -158,12 +158,15 @@ impl StartCommand {
Ok(Options::Metasrv(Box::new(opts)))
}
async fn build(self, opts: MetaSrvOptions) -> Result<Instance> {
logging::info!("MetaSrv start command: {:#?}", self);
async fn build(self, mut opts: MetaSrvOptions) -> Result<Instance> {
let plugins = plugins::setup_meta_srv_plugins(&mut opts)
.await
.context(StartMetaServerSnafu)?;
logging::info!("MetaSrv start command: {:#?}", self);
logging::info!("MetaSrv options: {:#?}", opts);
let instance = MetaSrvInstance::new(opts)
let instance = MetaSrvInstance::new(opts, plugins)
.await
.context(error::BuildMetaServerSnafu)?;

View File

@@ -21,7 +21,7 @@ use meta_srv::metasrv::MetaSrvOptions;
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use crate::error::{LoadLayeredConfigSnafu, Result};
use crate::error::{LoadLayeredConfigSnafu, Result, SerdeJsonSnafu};
pub const ENV_VAR_SEP: &str = "__";
pub const ENV_LIST_SEP: &str = ",";
@@ -29,11 +29,11 @@ pub const ENV_LIST_SEP: &str = ",";
/// Options mixed up from datanode, frontend and metasrv.
pub struct MixOptions {
pub data_home: String,
pub procedure_cfg: ProcedureConfig,
pub kv_store_cfg: KvStoreConfig,
pub fe_opts: FrontendOptions,
pub dn_opts: DatanodeOptions,
pub logging_opts: LoggingOptions,
pub procedure: ProcedureConfig,
pub kv_store: KvStoreConfig,
pub frontend: FrontendOptions,
pub datanode: DatanodeOptions,
pub logging: LoggingOptions,
}
pub enum Options {
@@ -56,7 +56,7 @@ impl Options {
Options::Datanode(opts) => &opts.logging,
Options::Frontend(opts) => &opts.logging,
Options::Metasrv(opts) => &opts.logging,
Options::Standalone(opts) => &opts.logging_opts,
Options::Standalone(opts) => &opts.logging,
Options::Cli(opts) => opts,
}
}
@@ -94,9 +94,16 @@ impl Options {
.ignore_empty(true)
};
// Workaround: Replacement for `Config::try_from(&default_opts)` due to
// `ConfigSerializer` cannot handle the case of an empty struct contained
// within an iterative structure.
// See: https://github.com/mehcode/config-rs/issues/461
let json_str = serde_json::to_string(&default_opts).context(SerdeJsonSnafu)?;
let default_config = File::from_str(&json_str, FileFormat::Json);
// Add default values and environment variables as the sources of the configuration.
let mut layered_config = Config::builder()
.add_source(Config::try_from(&default_opts).context(LoadLayeredConfigSnafu)?)
.add_source(default_config)
.add_source(env_source);
// Add config file as the source of the configuration if it is specified.

View File

@@ -14,23 +14,26 @@
use std::sync::Arc;
use catalog::kvbackend::{DummyKvCacheInvalidator, KvBackendCatalogManager};
use catalog::kvbackend::KvBackendCatalogManager;
use catalog::CatalogManagerRef;
use clap::Parser;
use common_base::Plugins;
use common_config::{kv_store_dir, KvStoreConfig, WalConfig};
use common_config::{metadata_store_dir, KvStoreConfig, WalConfig};
use common_meta::cache_invalidator::DummyKvCacheInvalidator;
use common_meta::kv_backend::KvBackendRef;
use common_procedure::ProcedureManagerRef;
use common_telemetry::info;
use common_telemetry::logging::LoggingOptions;
use datanode::config::{DatanodeOptions, ProcedureConfig, StorageConfig};
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
use datanode::datanode::{Datanode, DatanodeBuilder};
use datanode::region_server::RegionServer;
use file_engine::config::EngineConfig as FileEngineConfig;
use frontend::frontend::FrontendOptions;
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
use frontend::service_config::{
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
};
use mito2::config::MitoConfig;
use serde::{Deserialize, Serialize};
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
@@ -41,7 +44,6 @@ use crate::error::{
IllegalConfigSnafu, InitMetadataSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu,
StartDatanodeSnafu, StartFrontendSnafu,
};
use crate::frontend::load_frontend_plugins;
use crate::options::{MixOptions, Options, TopLevelOptions};
#[derive(Parser)]
@@ -84,18 +86,20 @@ impl SubCommand {
pub struct StandaloneOptions {
pub mode: Mode,
pub enable_telemetry: bool,
pub http_options: HttpOptions,
pub grpc_options: GrpcOptions,
pub mysql_options: MysqlOptions,
pub postgres_options: PostgresOptions,
pub opentsdb_options: OpentsdbOptions,
pub influxdb_options: InfluxdbOptions,
pub prom_store_options: PromStoreOptions,
pub http: HttpOptions,
pub grpc: GrpcOptions,
pub mysql: MysqlOptions,
pub postgres: PostgresOptions,
pub opentsdb: OpentsdbOptions,
pub influxdb: InfluxdbOptions,
pub prom_store: PromStoreOptions,
pub wal: WalConfig,
pub storage: StorageConfig,
pub kv_store: KvStoreConfig,
pub procedure: ProcedureConfig,
pub logging: LoggingOptions,
/// Options for different store engines.
pub region_engine: Vec<RegionEngineConfig>,
}
impl Default for StandaloneOptions {
@@ -103,18 +107,22 @@ impl Default for StandaloneOptions {
Self {
mode: Mode::Standalone,
enable_telemetry: true,
http_options: HttpOptions::default(),
grpc_options: GrpcOptions::default(),
mysql_options: MysqlOptions::default(),
postgres_options: PostgresOptions::default(),
opentsdb_options: OpentsdbOptions::default(),
influxdb_options: InfluxdbOptions::default(),
prom_store_options: PromStoreOptions::default(),
http: HttpOptions::default(),
grpc: GrpcOptions::default(),
mysql: MysqlOptions::default(),
postgres: PostgresOptions::default(),
opentsdb: OpentsdbOptions::default(),
influxdb: InfluxdbOptions::default(),
prom_store: PromStoreOptions::default(),
wal: WalConfig::default(),
storage: StorageConfig::default(),
kv_store: KvStoreConfig::default(),
procedure: ProcedureConfig::default(),
logging: LoggingOptions::default(),
region_engine: vec![
RegionEngineConfig::Mito(MitoConfig::default()),
RegionEngineConfig::File(FileEngineConfig::default()),
],
}
}
}
@@ -123,13 +131,13 @@ impl StandaloneOptions {
fn frontend_options(self) -> FrontendOptions {
FrontendOptions {
mode: self.mode,
http: self.http_options,
grpc: self.grpc_options,
mysql: self.mysql_options,
postgres: self.postgres_options,
opentsdb: self.opentsdb_options,
influxdb: self.influxdb_options,
prom_store: self.prom_store_options,
http: self.http,
grpc: self.grpc,
mysql: self.mysql,
postgres: self.postgres,
opentsdb: self.opentsdb,
influxdb: self.influxdb,
prom_store: self.prom_store,
meta_client: None,
logging: self.logging,
..Default::default()
@@ -142,6 +150,7 @@ impl StandaloneOptions {
enable_telemetry: self.enable_telemetry,
wal: self.wal,
storage: self.storage,
region_engine: self.region_engine,
..Default::default()
}
}
@@ -231,7 +240,7 @@ impl StartCommand {
);
if let Some(addr) = &self.http_addr {
opts.http_options.addr = addr.clone()
opts.http.addr = addr.clone()
}
if let Some(addr) = &self.rpc_addr {
@@ -245,42 +254,42 @@ impl StartCommand {
}
.fail();
}
opts.grpc_options.addr = addr.clone()
opts.grpc.addr = addr.clone()
}
if let Some(addr) = &self.mysql_addr {
opts.mysql_options.enable = true;
opts.mysql_options.addr = addr.clone();
opts.mysql_options.tls = tls_opts.clone();
opts.mysql.enable = true;
opts.mysql.addr = addr.clone();
opts.mysql.tls = tls_opts.clone();
}
if let Some(addr) = &self.postgres_addr {
opts.postgres_options.enable = true;
opts.postgres_options.addr = addr.clone();
opts.postgres_options.tls = tls_opts;
opts.postgres.enable = true;
opts.postgres.addr = addr.clone();
opts.postgres.tls = tls_opts;
}
if let Some(addr) = &self.opentsdb_addr {
opts.opentsdb_options.enable = true;
opts.opentsdb_options.addr = addr.clone();
opts.opentsdb.enable = true;
opts.opentsdb.addr = addr.clone();
}
if self.influxdb_enable {
opts.influxdb_options.enable = self.influxdb_enable;
opts.influxdb.enable = self.influxdb_enable;
}
let kv_store_cfg = opts.kv_store.clone();
let procedure_cfg = opts.procedure.clone();
let fe_opts = opts.clone().frontend_options();
let logging_opts = opts.logging.clone();
let dn_opts = opts.datanode_options();
let kv_store = opts.kv_store.clone();
let procedure = opts.procedure.clone();
let frontend = opts.clone().frontend_options();
let logging = opts.logging.clone();
let datanode = opts.datanode_options();
Ok(Options::Standalone(Box::new(MixOptions {
procedure_cfg,
kv_store_cfg,
data_home: dn_opts.storage.data_home.to_string(),
fe_opts,
dn_opts,
logging_opts,
procedure,
kv_store,
data_home: datanode.storage.data_home.to_string(),
frontend,
datanode,
logging,
})))
}
@@ -288,9 +297,12 @@ impl StartCommand {
#[allow(unused_variables)]
#[allow(clippy::diverging_sub_expression)]
async fn build(self, opts: MixOptions) -> Result<Instance> {
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
let fe_opts = opts.fe_opts;
let dn_opts = opts.dn_opts;
let mut fe_opts = opts.frontend;
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts)
.await
.context(StartFrontendSnafu)?;
let dn_opts = opts.datanode;
info!("Standalone start command: {:#?}", self);
info!(
@@ -298,17 +310,17 @@ impl StartCommand {
fe_opts, dn_opts
);
let kv_dir = kv_store_dir(&opts.data_home);
let metadata_dir = metadata_store_dir(&opts.data_home);
let (kv_store, procedure_manager) = FeInstance::try_build_standalone_components(
kv_dir,
opts.kv_store_cfg,
opts.procedure_cfg,
metadata_dir,
opts.kv_store,
opts.procedure,
)
.await
.context(StartFrontendSnafu)?;
let datanode =
DatanodeBuilder::new(dn_opts.clone(), Some(kv_store.clone()), plugins.clone())
DatanodeBuilder::new(dn_opts.clone(), Some(kv_store.clone()), Default::default())
.build()
.await
.context(StartDatanodeSnafu)?;
@@ -328,7 +340,7 @@ impl StartCommand {
// TODO: build frontend instance like in distributed mode
let mut frontend = build_frontend(
plugins,
fe_plugins,
kv_store,
procedure_manager,
catalog_manager,
@@ -347,7 +359,7 @@ impl StartCommand {
/// Build frontend instance in standalone mode
async fn build_frontend(
plugins: Arc<Plugins>,
plugins: Plugins,
kv_store: KvBackendRef,
procedure_manager: ProcedureManagerRef,
catalog_manager: CatalogManagerRef,
@@ -381,13 +393,13 @@ mod tests {
#[tokio::test]
async fn test_try_from_start_command_to_anymap() {
let command = StartCommand {
let mut fe_opts = FrontendOptions {
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
..Default::default()
};
let plugins = load_frontend_plugins(&command.user_provider);
let plugins = plugins.unwrap();
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
let provider = plugins.get::<UserProviderRef>().unwrap();
let result = provider
.authenticate(
@@ -435,9 +447,9 @@ mod tests {
checkpoint_margin = 9
gc_duration = '7s'
[http_options]
[http]
addr = "127.0.0.1:4000"
timeout = "30s"
timeout = "33s"
body_limit = "128MB"
[logging]
@@ -455,12 +467,12 @@ mod tests {
else {
unreachable!()
};
let fe_opts = options.fe_opts;
let dn_opts = options.dn_opts;
let logging_opts = options.logging_opts;
let fe_opts = options.frontend;
let dn_opts = options.datanode;
let logging_opts = options.logging;
assert_eq!(Mode::Standalone, fe_opts.mode);
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
assert_eq!(Duration::from_secs(33), fe_opts.http.timeout);
assert_eq!(ReadableSize::mb(128), fe_opts.http.body_limit);
assert_eq!("127.0.0.1:4001".to_string(), fe_opts.grpc.addr);
assert!(fe_opts.mysql.enable);
@@ -502,8 +514,8 @@ mod tests {
unreachable!()
};
assert_eq!("/tmp/greptimedb/test/logs", opts.logging_opts.dir);
assert_eq!("debug", opts.logging_opts.level.unwrap());
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
assert_eq!("debug", opts.logging.level.unwrap());
}
#[test]
@@ -547,7 +559,7 @@ mod tests {
// http.addr = 127.0.0.1:24000
[
env_prefix.to_string(),
"http_options".to_uppercase(),
"http".to_uppercase(),
"addr".to_uppercase(),
]
.join(ENV_VAR_SEP),
@@ -572,18 +584,39 @@ mod tests {
};
// Should be read from env, env > default values.
assert_eq!(opts.logging_opts.dir, "/other/log/dir");
assert_eq!(opts.logging.dir, "/other/log/dir");
// Should be read from config file, config file > env > default values.
assert_eq!(opts.logging_opts.level.as_ref().unwrap(), "debug");
assert_eq!(opts.logging.level.as_ref().unwrap(), "debug");
// Should be read from cli, cli > config file > env > default values.
assert_eq!(opts.fe_opts.http.addr, "127.0.0.1:14000");
assert_eq!(ReadableSize::mb(64), opts.fe_opts.http.body_limit);
assert_eq!(opts.frontend.http.addr, "127.0.0.1:14000");
assert_eq!(ReadableSize::mb(64), opts.frontend.http.body_limit);
// Should be default value.
assert_eq!(opts.fe_opts.grpc.addr, GrpcOptions::default().addr);
assert_eq!(opts.frontend.grpc.addr, GrpcOptions::default().addr);
},
);
}
#[test]
fn test_load_default_standalone_options() {
let options: StandaloneOptions =
Options::load_layered_options(None, "GREPTIMEDB_FRONTEND", None).unwrap();
let default_options = StandaloneOptions::default();
assert_eq!(options.mode, default_options.mode);
assert_eq!(options.enable_telemetry, default_options.enable_telemetry);
assert_eq!(options.http, default_options.http);
assert_eq!(options.grpc, default_options.grpc);
assert_eq!(options.mysql, default_options.mysql);
assert_eq!(options.postgres, default_options.postgres);
assert_eq!(options.opentsdb, default_options.opentsdb);
assert_eq!(options.influxdb, default_options.influxdb);
assert_eq!(options.prom_store, default_options.prom_store);
assert_eq!(options.wal, default_options.wal);
assert_eq!(options.kv_store, default_options.kv_store);
assert_eq!(options.procedure, default_options.procedure);
assert_eq!(options.logging, default_options.logging);
assert_eq!(options.region_engine, default_options.region_engine);
}
}

View File

@@ -9,6 +9,7 @@ anymap = "1.0.0-beta.2"
bitvec = "1.0"
bytes = { version = "1.1", features = ["serde"] }
common-error = { workspace = true }
common-macro = { workspace = true }
paste = "1.0"
serde = { version = "1.0", features = ["derive"] }
snafu.workspace = true

View File

@@ -17,11 +17,13 @@ use std::io::{Read, Write};
use bytes::{Buf, BufMut, BytesMut};
use common_error::ext::ErrorExt;
use common_macro::stack_trace_debug;
use paste::paste;
use snafu::{ensure, Location, ResultExt, Snafu};
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display(
"Destination buffer overflow, src_len: {}, dst_len: {}",
@@ -37,9 +39,10 @@ pub enum Error {
#[snafu(display("Buffer underflow"))]
Underflow { location: Location },
#[snafu(display("IO operation reach EOF, source: {}", source))]
#[snafu(display("IO operation reach EOF"))]
Eof {
source: std::io::Error,
#[snafu(source)]
error: std::io::Error,
location: Location,
},
}

View File

@@ -23,6 +23,8 @@ use std::sync::{Arc, Mutex, MutexGuard};
pub use bit_vec::BitVec;
/// [`Plugins`] is a wrapper of Arc contents.
/// Make it Cloneable and we can treat it like an Arc struct.
#[derive(Default, Clone)]
pub struct Plugins {
inner: Arc<Mutex<anymap::Map<dyn Any + Send + Sync>>>,

View File

@@ -6,6 +6,7 @@ license.workspace = true
[dependencies]
common-error = { workspace = true }
common-macro = { workspace = true }
serde.workspace = true
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }

View File

@@ -16,10 +16,12 @@ use std::any::Any;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Invalid full table name: {}", table_name))]
InvalidFullTableName {

View File

@@ -13,9 +13,6 @@
// limitations under the License.
use consts::DEFAULT_CATALOG_NAME;
use snafu::ensure;
use crate::error::Result;
pub mod consts;
pub mod error;
@@ -26,17 +23,6 @@ pub fn format_full_table_name(catalog: &str, schema: &str, table: &str) -> Strin
format!("{catalog}.{schema}.{table}")
}
pub fn parse_full_table_name(table_name: &str) -> Result<(&str, &str, &str)> {
let result = table_name.split('.').collect::<Vec<_>>();
ensure!(
result.len() == 3,
error::InvalidFullTableNameSnafu { table_name }
);
Ok((result[0], result[1], result[2]))
}
/// Build db name from catalog and schema string
pub fn build_db_string(catalog: &str, schema: &str) -> String {
if catalog == DEFAULT_CATALOG_NAME {

View File

@@ -17,7 +17,7 @@ use std::time::Duration;
use common_base::readable_size::ReadableSize;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[serde(default)]
pub struct WalConfig {
// wal file size in bytes
@@ -45,11 +45,11 @@ impl Default for WalConfig {
}
}
pub fn kv_store_dir(store_dir: &str) -> String {
format!("{store_dir}/kv")
pub fn metadata_store_dir(store_dir: &str) -> String {
format!("{store_dir}/metadata")
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[serde(default)]
pub struct KvStoreConfig {
// Kv file size in bytes

View File

@@ -18,10 +18,12 @@ async-compression = { version = "0.3", features = [
async-trait.workspace = true
bytes = "1.1"
common-error = { workspace = true }
common-macro = { workspace = true }
common-runtime = { workspace = true }
datafusion.workspace = true
derive_builder.workspace = true
futures.workspace = true
lazy_static.workspace = true
object-store = { workspace = true }
orc-rust = "0.2"
paste = "1.0"

View File

@@ -17,12 +17,14 @@ use std::any::Any;
use arrow_schema::ArrowError;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use datafusion::parquet::errors::ParquetError;
use snafu::{Location, Snafu};
use url::ParseError;
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Unsupported compression type: {}", compression_type))]
UnsupportedCompressionType {
@@ -30,10 +32,11 @@ pub enum Error {
location: Location,
},
#[snafu(display("Unsupported backend protocol: {}", protocol))]
#[snafu(display("Unsupported backend protocol: {}, url: {}", protocol, url))]
UnsupportedBackendProtocol {
protocol: String,
location: Location,
url: String,
},
#[snafu(display("Unsupported format protocol: {}", format))]
@@ -42,95 +45,109 @@ pub enum Error {
#[snafu(display("empty host: {}", url))]
EmptyHostPath { url: String, location: Location },
#[snafu(display("Invalid url: {}, error :{}", url, source))]
#[snafu(display("Invalid url: {}", url))]
InvalidUrl {
url: String,
source: ParseError,
#[snafu(source)]
error: ParseError,
location: Location,
},
#[snafu(display("Failed to build backend, source: {}", source))]
#[snafu(display("Failed to build backend"))]
BuildBackend {
source: object_store::Error,
#[snafu(source)]
error: object_store::Error,
location: Location,
},
#[snafu(display("Failed to build orc reader, source: {}", source))]
#[snafu(display("Failed to build orc reader"))]
OrcReader {
location: Location,
source: orc_rust::error::Error,
#[snafu(source)]
error: orc_rust::error::Error,
},
#[snafu(display("Failed to read object from path: {}, source: {}", path, source))]
#[snafu(display("Failed to read object from path: {}", path))]
ReadObject {
path: String,
location: Location,
source: object_store::Error,
#[snafu(source)]
error: object_store::Error,
},
#[snafu(display("Failed to write object to path: {}, source: {}", path, source))]
#[snafu(display("Failed to write object to path: {}", path))]
WriteObject {
path: String,
location: Location,
source: object_store::Error,
#[snafu(source)]
error: object_store::Error,
},
#[snafu(display("Failed to write: {}", source))]
#[snafu(display("Failed to write"))]
AsyncWrite {
source: std::io::Error,
#[snafu(source)]
error: std::io::Error,
location: Location,
},
#[snafu(display("Failed to write record batch: {}", source))]
#[snafu(display("Failed to write record batch"))]
WriteRecordBatch {
location: Location,
source: ArrowError,
#[snafu(source)]
error: ArrowError,
},
#[snafu(display("Failed to encode record batch: {}", source))]
#[snafu(display("Failed to encode record batch"))]
EncodeRecordBatch {
location: Location,
source: ParquetError,
#[snafu(source)]
error: ParquetError,
},
#[snafu(display("Failed to read record batch: {}", source))]
#[snafu(display("Failed to read record batch"))]
ReadRecordBatch {
location: Location,
source: datafusion::error::DataFusionError,
#[snafu(source)]
error: datafusion::error::DataFusionError,
},
#[snafu(display("Failed to read parquet source: {}", source))]
#[snafu(display("Failed to read parquet"))]
ReadParquetSnafu {
location: Location,
source: datafusion::parquet::errors::ParquetError,
#[snafu(source)]
error: datafusion::parquet::errors::ParquetError,
},
#[snafu(display("Failed to convert parquet to schema: {}", source))]
#[snafu(display("Failed to convert parquet to schema"))]
ParquetToSchema {
location: Location,
source: datafusion::parquet::errors::ParquetError,
#[snafu(source)]
error: datafusion::parquet::errors::ParquetError,
},
#[snafu(display("Failed to infer schema from file, source: {}", source))]
#[snafu(display("Failed to infer schema from file"))]
InferSchema {
location: Location,
source: arrow_schema::ArrowError,
#[snafu(source)]
error: arrow_schema::ArrowError,
},
#[snafu(display("Failed to list object in path: {}, source: {}", path, source))]
#[snafu(display("Failed to list object in path: {}", path))]
ListObjects {
path: String,
location: Location,
source: object_store::Error,
#[snafu(source)]
error: object_store::Error,
},
#[snafu(display("Invalid connection: {}", msg))]
InvalidConnection { msg: String, location: Location },
#[snafu(display("Failed to join handle: {}", source))]
#[snafu(display("Failed to join handle"))]
JoinHandle {
location: Location,
source: tokio::task::JoinError,
#[snafu(source)]
error: tokio::task::JoinError,
},
#[snafu(display("Failed to parse format {} with value: {}", key, value))]
@@ -140,9 +157,10 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to merge schema: {}", source))]
#[snafu(display("Failed to merge schema"))]
MergeSchema {
source: arrow_schema::ArrowError,
#[snafu(source)]
error: arrow_schema::ArrowError,
location: Location,
},

View File

@@ -27,7 +27,7 @@ pub enum Source {
pub struct Lister {
object_store: ObjectStore,
source: Source,
path: String,
root: String,
regex: Option<Regex>,
}
@@ -35,13 +35,13 @@ impl Lister {
pub fn new(
object_store: ObjectStore,
source: Source,
path: String,
root: String,
regex: Option<Regex>,
) -> Self {
Lister {
object_store,
source,
path,
root,
regex,
}
}
@@ -51,9 +51,9 @@ impl Lister {
Source::Dir => {
let streamer = self
.object_store
.lister_with(&self.path)
.lister_with("/")
.await
.context(error::ListObjectsSnafu { path: &self.path })?;
.context(error::ListObjectsSnafu { path: &self.root })?;
streamer
.try_filter(|f| {
@@ -66,22 +66,21 @@ impl Lister {
})
.try_collect::<Vec<_>>()
.await
.context(error::ListObjectsSnafu { path: &self.path })
.context(error::ListObjectsSnafu { path: &self.root })
}
Source::Filename(filename) => {
// make sure this file exists
let file_full_path = format!("{}{}", self.path, filename);
let _ = self.object_store.stat(&file_full_path).await.context(
let _ = self.object_store.stat(filename).await.with_context(|_| {
error::ListObjectsSnafu {
path: &file_full_path,
},
)?;
path: format!("{}{}", &self.root, filename),
}
})?;
Ok(self
.object_store
.list_with(&self.path)
.list_with("/")
.await
.context(error::ListObjectsSnafu { path: &self.path })?
.context(error::ListObjectsSnafu { path: &self.root })?
.into_iter()
.find(|f| f.name() == filename)
.map(|f| vec![f])

View File

@@ -16,19 +16,29 @@ pub mod fs;
pub mod s3;
use std::collections::HashMap;
use lazy_static::lazy_static;
use object_store::ObjectStore;
use regex::Regex;
use snafu::{OptionExt, ResultExt};
use url::{ParseError, Url};
use self::fs::build_fs_backend;
use self::s3::build_s3_backend;
use crate::error::{self, Result};
use crate::util::find_dir_and_filename;
pub const FS_SCHEMA: &str = "FS";
pub const S3_SCHEMA: &str = "S3";
/// Returns `(schema, Option<host>, path)`
pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
#[cfg(windows)]
{
// On Windows, the url may start with `C:/`.
if let Some(_) = handle_windows_path(url) {
return Ok((FS_SCHEMA.to_string(), None, url.to_string()));
}
}
let parsed_url = Url::parse(url);
match parsed_url {
Ok(url) => Ok((
@@ -44,17 +54,47 @@ pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
}
pub fn build_backend(url: &str, connection: &HashMap<String, String>) -> Result<ObjectStore> {
let (schema, host, _path) = parse_url(url)?;
let (schema, host, path) = parse_url(url)?;
let (root, _) = find_dir_and_filename(&path);
match schema.to_uppercase().as_str() {
S3_SCHEMA => {
let host = host.context(error::EmptyHostPathSnafu {
url: url.to_string(),
})?;
Ok(build_s3_backend(&host, "/", connection)?)
Ok(build_s3_backend(&host, &root, connection)?)
}
FS_SCHEMA => Ok(build_fs_backend("/")?),
FS_SCHEMA => Ok(build_fs_backend(&root)?),
_ => error::UnsupportedBackendProtocolSnafu { protocol: schema }.fail(),
_ => error::UnsupportedBackendProtocolSnafu {
protocol: schema,
url,
}
.fail(),
}
}
lazy_static! {
static ref DISK_SYMBOL_PATTERN: Regex = Regex::new("^([A-Za-z]:/)").unwrap();
}
pub fn handle_windows_path(url: &str) -> Option<String> {
DISK_SYMBOL_PATTERN
.captures(url)
.map(|captures| captures[0].to_string())
}
#[cfg(test)]
mod tests {
use super::handle_windows_path;
#[test]
fn test_handle_windows_path() {
assert_eq!(
handle_windows_path("C:/to/path/file"),
Some("C:/".to_string())
);
assert_eq!(handle_windows_path("https://google.com"), None);
assert_eq!(handle_windows_path("s3://bucket/path/to"), None);
}
}

View File

@@ -13,11 +13,12 @@
// limitations under the License.
use std::any::Any;
use std::sync::Arc;
use crate::status_code::StatusCode;
/// Extension to [`Error`](std::error::Error) in std.
pub trait ErrorExt: std::error::Error {
pub trait ErrorExt: StackError {
/// Map this error to [StatusCode].
fn status_code(&self) -> StatusCode {
StatusCode::Unknown
@@ -33,6 +34,63 @@ pub trait ErrorExt: std::error::Error {
/// Returns the error as [Any](std::any::Any) so that it can be
/// downcast to a specific implementation.
fn as_any(&self) -> &dyn Any;
fn output_msg(&self) -> String
where
Self: Sized,
{
let error = self.last();
if let Some(external_error) = error.source() {
let external_root = external_error.sources().last().unwrap();
if error.to_string().is_empty() {
format!("{external_root}")
} else {
format!("{error}: {external_root}")
}
} else {
format!("{error}")
}
}
}
pub trait StackError: std::error::Error {
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>);
fn next(&self) -> Option<&dyn StackError>;
fn last(&self) -> &dyn StackError
where
Self: Sized,
{
let Some(mut result) = self.next() else {
return self;
};
while let Some(err) = result.next() {
result = err;
}
result
}
}
impl<T: ?Sized + StackError> StackError for Arc<T> {
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
self.as_ref().debug_fmt(layer, buf)
}
fn next(&self) -> Option<&dyn StackError> {
self.as_ref().next()
}
}
impl<T: StackError> StackError for Box<T> {
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
self.as_ref().debug_fmt(layer, buf)
}
fn next(&self) -> Option<&dyn StackError> {
self.as_ref().next()
}
}
/// An opaque boxed error based on errors that implement [ErrorExt] trait.
@@ -90,6 +148,16 @@ impl crate::snafu::ErrorCompat for BoxedError {
}
}
impl StackError for BoxedError {
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
self.inner.debug_fmt(layer, buf)
}
fn next(&self) -> Option<&dyn StackError> {
self.inner.next()
}
}
/// Error type with plain error message
#[derive(Debug)]
pub struct PlainError {
@@ -128,3 +196,13 @@ impl crate::ext::ErrorExt for PlainError {
self as _
}
}
impl StackError for PlainError {
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
buf.push(format!("{}: {}", layer, self.msg))
}
fn next(&self) -> Option<&dyn StackError> {
None
}
}

View File

@@ -50,6 +50,7 @@ mod tests {
use snafu::{GenerateImplicitData, Location};
use super::*;
use crate::ext::StackError;
#[derive(Debug, Snafu)]
#[snafu(display("This is a leaf error"))]
@@ -65,6 +66,14 @@ mod tests {
}
}
impl StackError for Leaf {
fn debug_fmt(&self, _: usize, _: &mut Vec<String>) {}
fn next(&self) -> Option<&dyn StackError> {
None
}
}
#[derive(Debug, Snafu)]
#[snafu(display("This is a leaf with location"))]
struct LeafWithLocation {
@@ -81,6 +90,14 @@ mod tests {
}
}
impl StackError for LeafWithLocation {
fn debug_fmt(&self, _: usize, _: &mut Vec<String>) {}
fn next(&self) -> Option<&dyn StackError> {
None
}
}
#[derive(Debug, Snafu)]
#[snafu(display("Internal error"))]
struct Internal {
@@ -99,6 +116,17 @@ mod tests {
}
}
impl StackError for Internal {
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
buf.push(format!("{}: Internal error, at {}", layer, self.location));
self.source.debug_fmt(layer + 1, buf);
}
fn next(&self) -> Option<&dyn StackError> {
Some(&self.source)
}
}
#[test]
fn test_debug_format() {
let err = Leaf;

View File

@@ -11,6 +11,7 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(error_iter)]
pub mod ext;
pub mod format;

View File

@@ -19,7 +19,7 @@ use std::fmt;
use snafu::Location;
use crate::ext::ErrorExt;
use crate::ext::{ErrorExt, StackError};
use crate::status_code::StatusCode;
/// A mock error mainly for test.
@@ -69,3 +69,11 @@ impl ErrorExt for MockError {
self
}
}
impl StackError for MockError {
fn debug_fmt(&self, _: usize, _: &mut Vec<String>) {}
fn next(&self) -> Option<&dyn StackError> {
None
}
}

View File

@@ -12,8 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
mod greatest;
mod to_unixtime;
use greatest::GreatestFunction;
use to_unixtime::ToUnixtimeFunction;
use crate::scalars::function_registry::FunctionRegistry;
@@ -23,5 +25,6 @@ pub(crate) struct TimestampFunction;
impl TimestampFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(ToUnixtimeFunction));
registry.register(Arc::new(GreatestFunction));
}
}

View File

@@ -0,0 +1,175 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::{self};
use common_query::error::{
self, ArrowComputeSnafu, InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu,
};
use common_query::prelude::{Signature, Volatility};
use datatypes::arrow::array::AsArray;
use datatypes::arrow::compute::cast;
use datatypes::arrow::compute::kernels::comparison::gt_dyn;
use datatypes::arrow::compute::kernels::zip;
use datatypes::arrow::datatypes::{DataType as ArrowDataType, Date32Type};
use datatypes::prelude::ConcreteDataType;
use datatypes::vectors::{Helper, VectorRef};
use snafu::{ensure, ResultExt};
use crate::scalars::function::{Function, FunctionContext};
#[derive(Clone, Debug, Default)]
pub struct GreatestFunction;
const NAME: &str = "greatest";
impl Function for GreatestFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::date_datatype())
}
fn signature(&self) -> Signature {
Signature::uniform(
2,
vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::date_datatype(),
],
Volatility::Immutable,
)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 2,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect exactly two, have: {}",
columns.len()
),
}
);
match columns[0].data_type() {
ConcreteDataType::String(_) => {
let column1 = cast(&columns[0].to_arrow_array(), &ArrowDataType::Date32)
.context(ArrowComputeSnafu)?;
let column1 = column1.as_primitive::<Date32Type>();
let column2 = cast(&columns[1].to_arrow_array(), &ArrowDataType::Date32)
.context(ArrowComputeSnafu)?;
let column2 = column2.as_primitive::<Date32Type>();
let boolean_array = gt_dyn(&column1, &column2).context(ArrowComputeSnafu)?;
let result =
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
}
ConcreteDataType::Date(_) => {
let column1 = columns[0].to_arrow_array();
let column1 = column1.as_primitive::<Date32Type>();
let column2 = columns[1].to_arrow_array();
let column2 = column2.as_primitive::<Date32Type>();
let boolean_array = gt_dyn(&column1, &column2).context(ArrowComputeSnafu)?;
let result =
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
}
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
}
.fail(),
}
}
}
impl fmt::Display for GreatestFunction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "GREATEST")
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use std::sync::Arc;
use common_time::Date;
use datatypes::prelude::ConcreteDataType;
use datatypes::types::DateType;
use datatypes::value::Value;
use datatypes::vectors::{DateVector, StringVector, Vector};
use super::GreatestFunction;
use crate::scalars::function::FunctionContext;
use crate::scalars::Function;
#[test]
fn test_greatest_takes_string_vector() {
let function = GreatestFunction;
assert_eq!(
function.return_type(&[]).unwrap(),
ConcreteDataType::Date(DateType)
);
let columns = vec![
Arc::new(StringVector::from(vec![
"1970-01-01".to_string(),
"2012-12-23".to_string(),
])) as _,
Arc::new(StringVector::from(vec![
"2001-02-01".to_string(),
"1999-01-01".to_string(),
])) as _,
];
let result = function.eval(FunctionContext::default(), &columns).unwrap();
let result = result.as_any().downcast_ref::<DateVector>().unwrap();
assert_eq!(result.len(), 2);
assert_eq!(
result.get(0),
Value::Date(Date::from_str("2001-02-01").unwrap())
);
assert_eq!(
result.get(1),
Value::Date(Date::from_str("2012-12-23").unwrap())
);
}
#[test]
fn test_greatest_takes_date_vector() {
let function = GreatestFunction;
assert_eq!(
function.return_type(&[]).unwrap(),
ConcreteDataType::Date(DateType)
);
let columns = vec![
Arc::new(DateVector::from_slice(vec![-1, 2])) as _,
Arc::new(DateVector::from_slice(vec![0, 1])) as _,
];
let result = function.eval(FunctionContext::default(), &columns).unwrap();
let result = result.as_any().downcast_ref::<DateVector>().unwrap();
assert_eq!(result.len(), 2);
assert_eq!(
result.get(0),
Value::Date(Date::from_str("1970-01-01").unwrap())
);
assert_eq!(
result.get(1),
Value::Date(Date::from_str("1970-01-03").unwrap())
);
}
}

View File

@@ -10,6 +10,7 @@ async-trait.workspace = true
common-base = { workspace = true }
common-catalog = { workspace = true }
common-error = { workspace = true }
common-macro = { workspace = true }
common-query = { workspace = true }
common-telemetry = { workspace = true }
common-time = { workspace = true }

View File

@@ -16,15 +16,17 @@ use std::any::Any;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Illegal delete request, reason: {reason}"))]
IllegalDeleteRequest { reason: String, location: Location },
#[snafu(display("Column datatype error, source: {}", source))]
#[snafu(display("Column datatype error"))]
ColumnDataType {
location: Location,
source: api::error::Error,
@@ -49,7 +51,7 @@ pub enum Error {
#[snafu(display("Invalid column proto: {}", err_msg))]
InvalidColumnProto { err_msg: String, location: Location },
#[snafu(display("Failed to create vector, source: {}", source))]
#[snafu(display("Failed to create vector"))]
CreateVector {
location: Location,
source: datatypes::error::Error,
@@ -58,11 +60,7 @@ pub enum Error {
#[snafu(display("Missing required field in protobuf, field: {}", field))]
MissingField { field: String, location: Location },
#[snafu(display(
"Invalid column proto definition, column: {}, source: {}",
column,
source
))]
#[snafu(display("Invalid column proto definition, column: {}", column))]
InvalidColumnDef {
column: String,
location: Location,

View File

@@ -11,6 +11,7 @@ async-trait = "0.1"
backtrace = "0.3"
common-base = { workspace = true }
common-error = { workspace = true }
common-macro = { workspace = true }
common-recordbatch = { workspace = true }
common-runtime = { workspace = true }
common-telemetry = { workspace = true }

View File

@@ -30,8 +30,9 @@ use crate::error::{CreateChannelSnafu, InvalidConfigFilePathSnafu, InvalidTlsCon
const RECYCLE_CHANNEL_INTERVAL_SECS: u64 = 60;
pub const DEFAULT_GRPC_REQUEST_TIMEOUT_SECS: u64 = 10;
pub const DEFAULT_GRPC_CONNECT_TIMEOUT_SECS: u64 = 10;
pub const DEFAULT_MAX_GRPC_MESSAGE_SIZE: usize = 512 * 1024 * 1024;
pub const DEFAULT_GRPC_CONNECT_TIMEOUT_SECS: u64 = 1;
pub const DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE: usize = 512 * 1024 * 1024;
pub const DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE: usize = 512 * 1024 * 1024;
lazy_static! {
static ref ID: AtomicU64 = AtomicU64::new(0);
@@ -248,9 +249,10 @@ pub struct ChannelConfig {
pub tcp_keepalive: Option<Duration>,
pub tcp_nodelay: bool,
pub client_tls: Option<ClientTlsOption>,
// Max gRPC message size
// TODO(dennis): make it configurable
pub max_message_size: usize,
// Max gRPC receiving(decoding) message size
pub max_recv_message_size: usize,
// Max gRPC sending(encoding) message size
pub max_send_message_size: usize,
}
impl Default for ChannelConfig {
@@ -269,7 +271,8 @@ impl Default for ChannelConfig {
tcp_keepalive: None,
tcp_nodelay: true,
client_tls: None,
max_message_size: DEFAULT_MAX_GRPC_MESSAGE_SIZE,
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
}
}
}
@@ -534,7 +537,8 @@ mod tests {
tcp_keepalive: None,
tcp_nodelay: true,
client_tls: None,
max_message_size: DEFAULT_MAX_GRPC_MESSAGE_SIZE,
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
},
default_cfg
);
@@ -577,7 +581,8 @@ mod tests {
client_cert_path: "some_cert_path".to_string(),
client_key_path: "some_key_path".to_string(),
}),
max_message_size: DEFAULT_MAX_GRPC_MESSAGE_SIZE,
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
},
cfg
);

View File

@@ -17,19 +17,22 @@ use std::io;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Invalid client tls config, {}", msg))]
InvalidTlsConfig { msg: String },
#[snafu(display("Invalid config file path, {}", source))]
#[snafu(display("Invalid config file path"))]
InvalidConfigFilePath {
source: io::Error,
#[snafu(source)]
error: io::Error,
location: Location,
},
@@ -46,13 +49,14 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to create gRPC channel, source: {}", source))]
#[snafu(display("Failed to create gRPC channel"))]
CreateChannel {
source: tonic::transport::Error,
#[snafu(source)]
error: tonic::transport::Error,
location: Location,
},
#[snafu(display("Failed to create RecordBatch, source: {}", source))]
#[snafu(display("Failed to create RecordBatch"))]
CreateRecordBatch {
location: Location,
source: common_recordbatch::error::Error,
@@ -61,16 +65,17 @@ pub enum Error {
#[snafu(display("Failed to convert Arrow type: {}", from))]
Conversion { from: String, location: Location },
#[snafu(display("Failed to decode FlightData, source: {}", source))]
#[snafu(display("Failed to decode FlightData"))]
DecodeFlightData {
source: api::DecodeError,
#[snafu(source)]
error: api::DecodeError,
location: Location,
},
#[snafu(display("Invalid FlightData, reason: {}", reason))]
InvalidFlightData { reason: String, location: Location },
#[snafu(display("Failed to convert Arrow Schema, source: {}", source))]
#[snafu(display("Failed to convert Arrow Schema"))]
ConvertArrowSchema {
location: Location,
source: datatypes::error::Error,

View File

@@ -13,6 +13,15 @@ common-telemetry = { workspace = true }
proc-macro2 = "1.0.66"
quote = "1.0"
syn = "1.0"
syn2 = { version = "2.0", package = "syn", features = [
"derive",
"parsing",
"printing",
"clone-impls",
"proc-macro",
"extra-traits",
"full",
] }
[dev-dependencies]
arc-swap = "1.0"

View File

@@ -15,6 +15,7 @@
mod aggr_func;
mod print_caller;
mod range_fn;
mod stack_trace_debug;
use aggr_func::{impl_aggr_func_type_store, impl_as_aggr_func_creator};
use print_caller::process_print_caller;
@@ -87,3 +88,23 @@ pub fn range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
pub fn print_caller(args: TokenStream, input: TokenStream) -> TokenStream {
process_print_caller(args, input)
}
/// Attribute macro to derive [std::fmt::Debug] for the annotated `Error` type.
///
/// The generated `Debug` implementation will print the error in a stack trace style. E.g.:
/// ```plaintext
/// 0: Foo error, at src/common/catalog/src/error.rs:80:10
/// 1: Bar error, at src/common/function/src/error.rs:90:10
/// 2: Root cause, invalid table name, at src/common/catalog/src/error.rs:100:10
/// ```
///
/// Notes on using this macro:
/// - `#[snafu(display)]` must present on each enum variants,
/// and should not include `location` and `source`.
/// - Only our internal error can be named `source`.
/// All external error should be `error` with an `#[snafu(source)]` annotation.
/// - `common_error` crate must be accessible.
#[proc_macro_attribute]
pub fn stack_trace_debug(args: TokenStream, input: TokenStream) -> TokenStream {
stack_trace_debug::stack_trace_style_impl(args.into(), input.into()).into()
}

View File

@@ -0,0 +1,278 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! implement `::common_error::ext::StackError`
use proc_macro2::{Span, TokenStream as TokenStream2};
use quote::{quote, quote_spanned};
use syn2::spanned::Spanned;
use syn2::{parenthesized, Attribute, Ident, ItemEnum, Variant};
pub fn stack_trace_style_impl(args: TokenStream2, input: TokenStream2) -> TokenStream2 {
let input_cloned: TokenStream2 = input.clone();
let error_enum_definition: ItemEnum = syn2::parse2(input_cloned).unwrap();
let enum_name = error_enum_definition.ident;
let mut variants = vec![];
for error_variant in error_enum_definition.variants {
let variant = ErrorVariant::from_enum_variant(error_variant);
variants.push(variant);
}
let debug_fmt_fn = build_debug_fmt_impl(enum_name.clone(), variants.clone());
let next_fn = build_next_impl(enum_name.clone(), variants);
let debug_impl = build_debug_impl(enum_name.clone());
quote! {
#args
#input
impl ::common_error::ext::StackError for #enum_name {
#debug_fmt_fn
#next_fn
}
#debug_impl
}
}
/// Generate `debug_fmt` fn.
///
/// The generated fn will be like:
/// ```rust, ignore
/// fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>);
/// ```
fn build_debug_fmt_impl(enum_name: Ident, variants: Vec<ErrorVariant>) -> TokenStream2 {
let match_arms = variants
.iter()
.map(|v| v.to_debug_match_arm())
.collect::<Vec<_>>();
quote! {
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
use #enum_name::*;
match self {
#(#match_arms)*
}
}
}
}
/// Generate `next` fn.
///
/// The generated fn will be like:
/// ```rust, ignore
/// fn next(&self) -> Option<&dyn ::common_error::ext::StackError>;
/// ```
fn build_next_impl(enum_name: Ident, variants: Vec<ErrorVariant>) -> TokenStream2 {
let match_arms = variants
.iter()
.map(|v| v.to_next_match_arm())
.collect::<Vec<_>>();
quote! {
fn next(&self) -> Option<&dyn ::common_error::ext::StackError> {
use #enum_name::*;
match self {
#(#match_arms)*
}
}
}
}
/// Implement [std::fmt::Debug] via `debug_fmt`
fn build_debug_impl(enum_name: Ident) -> TokenStream2 {
quote! {
impl std::fmt::Debug for #enum_name {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use ::common_error::ext::StackError;
let mut buf = vec![];
self.debug_fmt(0, &mut buf);
write!(f, "{}", buf.join("\n"))
}
}
}
}
#[derive(Clone, Debug)]
struct ErrorVariant {
name: Ident,
fields: Vec<Ident>,
has_location: bool,
has_source: bool,
has_external_cause: bool,
display: TokenStream2,
span: Span,
cfg_attr: Option<Attribute>,
}
impl ErrorVariant {
/// Construct self from [Variant]
fn from_enum_variant(variant: Variant) -> Self {
let span = variant.span();
let mut has_location = false;
let mut has_source = false;
let mut has_external_cause = false;
for field in &variant.fields {
if let Some(ident) = &field.ident {
if ident == "location" {
has_location = true;
} else if ident == "source" {
has_source = true;
} else if ident == "error" {
has_external_cause = true;
}
}
}
let mut display = None;
let mut cfg_attr = None;
for attr in variant.attrs {
if attr.path().is_ident("snafu") {
attr.parse_nested_meta(|meta| {
if meta.path.is_ident("display") {
let content;
parenthesized!(content in meta.input);
let display_ts: TokenStream2 = content.parse()?;
display = Some(display_ts);
Ok(())
} else {
Err(meta.error("unrecognized repr"))
}
})
.expect("Each error should contains a display attribute");
}
if attr.path().is_ident("cfg") {
cfg_attr = Some(attr);
}
}
let field_ident = variant
.fields
.iter()
.map(|f| f.ident.clone().unwrap_or_else(|| Ident::new("_", f.span())))
.collect();
Self {
name: variant.ident,
fields: field_ident,
has_location,
has_source,
has_external_cause,
display: display.unwrap(),
span,
cfg_attr,
}
}
/// Convert self into an match arm that will be used in [build_debug_impl].
///
/// The generated match arm will be like:
/// ```rust, ignore
/// ErrorKindWithSource { source, .. } => {
/// debug_fmt(source, layer + 1, buf);
/// },
/// ErrorKindWithoutSource { .. } => {
/// buf.push(format!("{layer}: {}, at {}", format!(#display), location)));
/// }
/// ```
///
/// The generated code assumes fn `debug_fmt`, var `layer`, var `buf` are in scope.
fn to_debug_match_arm(&self) -> TokenStream2 {
let name = &self.name;
let fields = &self.fields;
let display = &self.display;
let cfg = if let Some(cfg) = &self.cfg_attr {
quote_spanned!(cfg.span() => #cfg)
} else {
quote! {}
};
match (self.has_location, self.has_source, self.has_external_cause) {
(true, true, _) => quote_spanned! {
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),*, } => {
buf.push(format!("{layer}: {}, at {}", format!(#display), location));
source.debug_fmt(layer + 1, buf);
},
},
(true, false, true) => quote_spanned! {
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } => {
buf.push(format!("{layer}: {}, at {}", format!(#display), location));
buf.push(format!("{}: {:?}", layer + 1, error));
},
},
(true, false, false) => quote_spanned! {
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } => {
buf.push(format!("{layer}: {}, at {}", format!(#display), location));
},
},
(false, true, _) => quote_spanned! {
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } => {
buf.push(format!("{layer}: {}", format!(#display)));
source.debug_fmt(layer + 1, buf);
},
},
(false, false, true) => quote_spanned! {
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } => {
buf.push(format!("{layer}: {}", format!(#display)));
buf.push(format!("{}: {:?}", layer + 1, error));
},
},
(false, false, false) => quote_spanned! {
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } => {
buf.push(format!("{layer}: {}", format!(#display)));
},
},
}
}
/// Convert self into an match arm that will be used in [build_next_impl].
///
/// The generated match arm will be like:
/// ```rust, ignore
/// ErrorKindWithSource { source, .. } => {
/// Some(source)
/// },
/// ErrorKindWithoutSource { .. } => {
/// None
/// }
/// ```
fn to_next_match_arm(&self) -> TokenStream2 {
let name = &self.name;
let fields = &self.fields;
let cfg = if let Some(cfg) = &self.cfg_attr {
quote_spanned!(cfg.span() => #cfg)
} else {
quote! {}
};
if self.has_source {
quote_spanned! {
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } => {
Some(source)
},
}
} else {
quote_spanned! {
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } =>{
None
}
}
}
}
}

View File

@@ -6,6 +6,7 @@ license.workspace = true
[dependencies]
common-error = { workspace = true }
common-macro = { workspace = true }
snafu.workspace = true
tempfile = "3.4"
tokio.workspace = true

View File

@@ -16,14 +16,16 @@ use std::any::Any;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use snafu::Snafu;
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("{source}"))]
#[snafu(display("Internal error"))]
Internal { source: BoxedError },
#[snafu(display("Memory profiling is not supported"))]

View File

@@ -17,13 +17,18 @@ use std::path::PathBuf;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Failed to read OPT_PROF, source: {}", source))]
ReadOptProf { source: tikv_jemalloc_ctl::Error },
#[snafu(display("Failed to read OPT_PROF"))]
ReadOptProf {
#[snafu(source)]
error: tikv_jemalloc_ctl::Error,
},
#[snafu(display("Memory profiling is not enabled"))]
ProfilingNotEnabled,
@@ -31,20 +36,18 @@ pub enum Error {
#[snafu(display("Failed to build temp file from given path: {:?}", path))]
BuildTempPath { path: PathBuf, location: Location },
#[snafu(display("Failed to open temp file: {}, source: {}", path, source))]
#[snafu(display("Failed to open temp file: {}", path))]
OpenTempFile {
path: String,
source: std::io::Error,
#[snafu(source)]
error: std::io::Error,
},
#[snafu(display(
"Failed to dump profiling data to temp file: {:?}, source: {}",
path,
source
))]
#[snafu(display("Failed to dump profiling data to temp file: {:?}", path))]
DumpProfileData {
path: PathBuf,
source: tikv_jemalloc_ctl::Error,
#[snafu(source)]
error: tikv_jemalloc_ctl::Error,
},
}

View File

@@ -12,9 +12,12 @@ api = { workspace = true }
arrow-flight.workspace = true
async-stream.workspace = true
async-trait.workspace = true
base64 = "0.21"
bytes = "1.4"
common-catalog = { workspace = true }
common-error = { workspace = true }
common-grpc-expr.workspace = true
common-macro = { workspace = true }
common-procedure = { workspace = true }
common-recordbatch = { workspace = true }
common-runtime = { workspace = true }

View File

@@ -17,8 +17,27 @@ use std::sync::Arc;
use table::metadata::TableId;
use crate::error::Result;
use crate::key::table_info::TableInfoKey;
use crate::key::table_name::TableNameKey;
use crate::key::table_route::TableRouteKey;
use crate::key::TableMetaKey;
use crate::table_name::TableName;
/// KvBackend cache invalidator
#[async_trait::async_trait]
pub trait KvCacheInvalidator: Send + Sync {
async fn invalidate_key(&self, key: &[u8]);
}
pub type KvCacheInvalidatorRef = Arc<dyn KvCacheInvalidator>;
pub struct DummyKvCacheInvalidator;
#[async_trait::async_trait]
impl KvCacheInvalidator for DummyKvCacheInvalidator {
async fn invalidate_key(&self, _key: &[u8]) {}
}
/// Places context of invalidating cache. e.g., span id, trace id etc.
#[derive(Default)]
pub struct Context {
@@ -47,3 +66,27 @@ impl CacheInvalidator for DummyCacheInvalidator {
Ok(())
}
}
#[async_trait::async_trait]
impl<T> CacheInvalidator for T
where
T: KvCacheInvalidator,
{
async fn invalidate_table_name(&self, _ctx: &Context, table_name: TableName) -> Result<()> {
let key: TableNameKey = (&table_name).into();
self.invalidate_key(&key.as_raw_key()).await;
Ok(())
}
async fn invalidate_table_id(&self, _ctx: &Context, table_id: TableId) -> Result<()> {
let key = TableInfoKey::new(table_id);
self.invalidate_key(&key.as_raw_key()).await;
let key = &TableRouteKey { table_id };
self.invalidate_key(&key.as_raw_key()).await;
Ok(())
}
}

View File

@@ -28,6 +28,7 @@ use crate::rpc::router::RegionRoute;
pub mod alter_table;
pub mod create_table;
pub mod drop_table;
pub mod truncate_table;
pub mod utils;
#[derive(Debug, Default)]

View File

@@ -45,6 +45,7 @@ use crate::error::{
use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
use crate::key::table_route::TableRouteValue;
use crate::key::DeserializedValueWithBytes;
use crate::metrics;
use crate::rpc::ddl::AlterTableTask;
use crate::rpc::router::{find_leader_regions, find_leaders};
@@ -63,7 +64,7 @@ impl AlterTableProcedure {
pub fn new(
cluster_id: u64,
task: AlterTableTask,
table_info_value: TableInfoValue,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
context: DdlContext,
) -> Result<Self> {
let alter_kind = task
@@ -191,7 +192,8 @@ impl AlterTableProcedure {
.await?
.with_context(|| TableRouteNotFoundSnafu {
table_name: table_ref.to_string(),
})?;
})?
.into_inner();
let leaders = find_leaders(&region_routes);
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
@@ -413,7 +415,7 @@ pub struct AlterTableData {
state: AlterTableState,
task: AlterTableTask,
/// Table info value before alteration.
table_info_value: TableInfoValue,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
cluster_id: u64,
/// Next column id of the table if the task adds columns to the table.
next_column_id: Option<ColumnId>,
@@ -422,7 +424,7 @@ pub struct AlterTableData {
impl AlterTableData {
pub fn new(
task: AlterTableTask,
table_info_value: TableInfoValue,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
cluster_id: u64,
next_column_id: Option<ColumnId>,
) -> Self {

View File

@@ -161,7 +161,6 @@ impl CreateTableProcedure {
engine: create_table_expr.engine.to_string(),
column_defs,
primary_key,
create_if_not_exists: true,
path: String::new(),
options: create_table_expr.table_options.clone(),
})
@@ -200,8 +199,8 @@ impl CreateTableProcedure {
for request in requests {
let request = RegionRequest {
header: Some(RegionRequestHeader {
trace_id: 0,
span_id: 0,
trace_id: common_telemetry::trace_id().unwrap_or_default(),
..Default::default()
}),
body: Some(request),
};

View File

@@ -39,6 +39,7 @@ use crate::error::{self, Result};
use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
use crate::key::table_route::TableRouteValue;
use crate::key::DeserializedValueWithBytes;
use crate::metrics;
use crate::rpc::ddl::DropTableTask;
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
@@ -55,8 +56,8 @@ impl DropTableProcedure {
pub fn new(
cluster_id: u64,
task: DropTableTask,
table_route_value: TableRouteValue,
table_info_value: TableInfoValue,
table_route_value: DeserializedValueWithBytes<TableRouteValue>,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
context: DdlContext,
) -> Self {
Self {
@@ -156,8 +157,8 @@ impl DropTableProcedure {
let request = RegionRequest {
header: Some(RegionRequestHeader {
trace_id: 0,
span_id: 0,
trace_id: common_telemetry::trace_id().unwrap_or_default(),
..Default::default()
}),
body: Some(region_request::Body::Drop(PbDropRegionRequest {
region_id: region_id.as_u64(),
@@ -231,16 +232,16 @@ pub struct DropTableData {
pub state: DropTableState,
pub cluster_id: u64,
pub task: DropTableTask,
pub table_route_value: TableRouteValue,
pub table_info_value: TableInfoValue,
pub table_route_value: DeserializedValueWithBytes<TableRouteValue>,
pub table_info_value: DeserializedValueWithBytes<TableInfoValue>,
}
impl DropTableData {
pub fn new(
cluster_id: u64,
task: DropTableTask,
table_route_value: TableRouteValue,
table_info_value: TableInfoValue,
table_route_value: DeserializedValueWithBytes<TableRouteValue>,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
) -> Self {
Self {
state: DropTableState::Prepare,

View File

@@ -0,0 +1,235 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::region::{
region_request, RegionRequest, RegionRequestHeader, TruncateRequest as PbTruncateRegionRequest,
};
use async_trait::async_trait;
use common_procedure::error::{FromJsonSnafu, ToJsonSnafu};
use common_procedure::{
Context as ProcedureContext, LockKey, Procedure, Result as ProcedureResult, Status,
};
use common_telemetry::debug;
use futures::future::join_all;
use serde::{Deserialize, Serialize};
use snafu::{ensure, ResultExt};
use store_api::storage::RegionId;
use strum::AsRefStr;
use table::engine::TableReference;
use table::metadata::{RawTableInfo, TableId};
use super::utils::handle_retry_error;
use crate::ddl::utils::handle_operate_region_error;
use crate::ddl::DdlContext;
use crate::error::{Result, TableNotFoundSnafu};
use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
use crate::key::DeserializedValueWithBytes;
use crate::metrics;
use crate::rpc::ddl::TruncateTableTask;
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
use crate::table_name::TableName;
pub struct TruncateTableProcedure {
context: DdlContext,
data: TruncateTableData,
}
#[async_trait]
impl Procedure for TruncateTableProcedure {
fn type_name(&self) -> &str {
Self::TYPE_NAME
}
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
let state = &self.data.state;
let _timer = common_telemetry::timer!(
metrics::METRIC_META_PROCEDURE_TRUNCATE_TABLE,
&[("step", state.as_ref().to_string())]
);
match self.data.state {
TruncateTableState::Prepare => self.on_prepare().await,
TruncateTableState::DatanodeTruncateRegions => {
self.on_datanode_truncate_regions().await
}
}
.map_err(handle_retry_error)
}
fn dump(&self) -> ProcedureResult<String> {
serde_json::to_string(&self.data).context(ToJsonSnafu)
}
fn lock_key(&self) -> LockKey {
let table_ref = &self.data.table_ref();
let key = common_catalog::format_full_table_name(
table_ref.catalog,
table_ref.schema,
table_ref.table,
);
LockKey::single(key)
}
}
impl TruncateTableProcedure {
pub(crate) const TYPE_NAME: &'static str = "metasrv-procedure::TruncateTable";
pub(crate) fn new(
cluster_id: u64,
task: TruncateTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
region_routes: Vec<RegionRoute>,
context: DdlContext,
) -> Self {
Self {
context,
data: TruncateTableData::new(cluster_id, task, table_info_value, region_routes),
}
}
pub(crate) fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
let data = serde_json::from_str(json).context(FromJsonSnafu)?;
Ok(Self { context, data })
}
// Checks whether the table exists.
async fn on_prepare(&mut self) -> Result<Status> {
let table_ref = &self.data.table_ref();
let manager = &self.context.table_metadata_manager;
let exist = manager
.table_name_manager()
.exists(TableNameKey::new(
table_ref.catalog,
table_ref.schema,
table_ref.table,
))
.await?;
ensure!(
exist,
TableNotFoundSnafu {
table_name: table_ref.to_string()
}
);
self.data.state = TruncateTableState::DatanodeTruncateRegions;
Ok(Status::executing(true))
}
async fn on_datanode_truncate_regions(&mut self) -> Result<Status> {
let table_id = self.data.table_id();
let region_routes = &self.data.region_routes;
let leaders = find_leaders(region_routes);
let mut truncate_region_tasks = Vec::with_capacity(leaders.len());
for datanode in leaders {
let requester = self.context.datanode_manager.datanode(&datanode).await;
let regions = find_leader_regions(region_routes, &datanode);
for region in regions {
let region_id = RegionId::new(table_id, region);
debug!(
"Truncating table {} region {} on Datanode {:?}",
self.data.table_ref(),
region_id,
datanode
);
let request = RegionRequest {
header: Some(RegionRequestHeader {
trace_id: common_telemetry::trace_id().unwrap_or_default(),
..Default::default()
}),
body: Some(region_request::Body::Truncate(PbTruncateRegionRequest {
region_id: region_id.as_u64(),
})),
};
let datanode = datanode.clone();
let requester = requester.clone();
truncate_region_tasks.push(async move {
if let Err(err) = requester.handle(request).await {
return Err(handle_operate_region_error(datanode)(err));
}
Ok(())
});
}
}
join_all(truncate_region_tasks)
.await
.into_iter()
.collect::<Result<Vec<_>>>()?;
Ok(Status::Done)
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct TruncateTableData {
state: TruncateTableState,
cluster_id: u64,
task: TruncateTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
region_routes: Vec<RegionRoute>,
}
impl TruncateTableData {
pub fn new(
cluster_id: u64,
task: TruncateTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
region_routes: Vec<RegionRoute>,
) -> Self {
Self {
state: TruncateTableState::Prepare,
cluster_id,
task,
table_info_value,
region_routes,
}
}
pub fn table_ref(&self) -> TableReference {
self.task.table_ref()
}
pub fn table_name(&self) -> TableName {
self.task.table_name()
}
fn table_info(&self) -> &RawTableInfo {
&self.table_info_value.table_info
}
fn table_id(&self) -> TableId {
self.table_info().ident.table_id
}
}
#[derive(Debug, Serialize, Deserialize, AsRefStr)]
enum TruncateTableState {
/// Prepares to truncate the table
Prepare,
/// Truncates regions on Datanode
DatanodeTruncateRegions,
}

View File

@@ -15,7 +15,7 @@
use std::sync::Arc;
use common_procedure::{watcher, ProcedureId, ProcedureManagerRef, ProcedureWithId};
use common_telemetry::{error, info};
use common_telemetry::info;
use snafu::{OptionExt, ResultExt};
use crate::cache_invalidator::CacheInvalidatorRef;
@@ -23,18 +23,19 @@ use crate::datanode_manager::DatanodeManagerRef;
use crate::ddl::alter_table::AlterTableProcedure;
use crate::ddl::create_table::CreateTableProcedure;
use crate::ddl::drop_table::DropTableProcedure;
use crate::ddl::truncate_table::TruncateTableProcedure;
use crate::ddl::{
DdlContext, DdlTaskExecutor, ExecutorContext, TableMetadataAllocatorContext,
TableMetadataAllocatorRef,
};
use crate::error::{
self, RegisterProcedureLoaderSnafu, Result, SubmitProcedureSnafu, TableNotFoundSnafu,
UnsupportedSnafu, WaitProcedureSnafu,
WaitProcedureSnafu,
};
use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
use crate::key::table_route::TableRouteValue;
use crate::key::TableMetadataManagerRef;
use crate::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
use crate::rpc::ddl::DdlTask::{AlterTable, CreateTable, DropTable, TruncateTable};
use crate::rpc::ddl::{
AlterTableTask, CreateTableTask, DropTableTask, SubmitDdlTaskRequest, SubmitDdlTaskResponse,
@@ -122,6 +123,20 @@ impl DdlManager {
)
.context(RegisterProcedureLoaderSnafu {
type_name: AlterTableProcedure::TYPE_NAME,
})?;
let context = self.create_context();
self.procedure_manager
.register_loader(
TruncateTableProcedure::TYPE_NAME,
Box::new(move |json| {
let context = context.clone();
TruncateTableProcedure::from_json(json, context).map(|p| Box::new(p) as _)
}),
)
.context(RegisterProcedureLoaderSnafu {
type_name: TruncateTableProcedure::TYPE_NAME,
})
}
@@ -129,7 +144,7 @@ impl DdlManager {
&self,
cluster_id: u64,
alter_table_task: AlterTableTask,
table_info_value: TableInfoValue,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
) -> Result<ProcedureId> {
let context = self.create_context();
@@ -161,8 +176,8 @@ impl DdlManager {
&self,
cluster_id: u64,
drop_table_task: DropTableTask,
table_info_value: TableInfoValue,
table_route_value: TableRouteValue,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
table_route_value: DeserializedValueWithBytes<TableRouteValue>,
) -> Result<ProcedureId> {
let context = self.create_context();
@@ -183,15 +198,21 @@ impl DdlManager {
&self,
cluster_id: u64,
truncate_table_task: TruncateTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
region_routes: Vec<RegionRoute>,
) -> Result<ProcedureId> {
error!("Truncate table procedure is not supported, cluster_id = {}, truncate_table_task = {:?}, region_routes = {:?}",
cluster_id, truncate_table_task, region_routes);
let context = self.create_context();
let procedure = TruncateTableProcedure::new(
cluster_id,
truncate_table_task,
table_info_value,
region_routes,
context,
);
UnsupportedSnafu {
operation: "TRUNCATE TABLE",
}
.fail()
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
self.submit_procedure(procedure_with_id).await
}
async fn submit_procedure(&self, procedure_with_id: ProcedureWithId) -> Result<ProcedureId> {
@@ -216,32 +237,34 @@ async fn handle_truncate_table_task(
cluster_id: u64,
truncate_table_task: TruncateTableTask,
) -> Result<SubmitDdlTaskResponse> {
let truncate_table = &truncate_table_task.truncate_table;
let table_id = truncate_table
.table_id
.as_ref()
.context(error::UnexpectedSnafu {
err_msg: "expected table id ",
})?
.id;
let table_id = truncate_table_task.table_id;
let table_metadata_manager = &ddl_manager.table_metadata_manager();
let table_ref = truncate_table_task.table_ref();
let table_route_value = ddl_manager
.table_metadata_manager()
.table_route_manager()
.get(table_id)
.await?
.with_context(|| error::TableRouteNotFoundSnafu {
table_name: table_ref.to_string(),
})?;
let (table_info_value, table_route_value) =
table_metadata_manager.get_full_table_info(table_id).await?;
let table_route = table_route_value.region_routes;
let table_info_value = table_info_value.with_context(|| error::TableInfoNotFoundSnafu {
table_name: table_ref.to_string(),
})?;
let table_route_value = table_route_value.with_context(|| error::TableRouteNotFoundSnafu {
table_name: table_ref.to_string(),
})?;
let table_route = table_route_value.into_inner().region_routes;
let id = ddl_manager
.submit_truncate_table_task(cluster_id, truncate_table_task, table_route)
.submit_truncate_table_task(
cluster_id,
truncate_table_task,
table_info_value,
table_route,
)
.await?;
info!("Table: {table_id} is truncated via procedure_id {id:?}");
Ok(SubmitDdlTaskResponse {
key: id.to_string().into(),
..Default::default()

View File

@@ -29,3 +29,9 @@ pub const REGION_LEASE_SECS: u64 =
/// When creating table or region failover, a target node needs to be selected.
/// If the node's lease has expired, the `Selector` will not select it.
pub const DATANODE_LEASE_SECS: u64 = REGION_LEASE_SECS;
/// The lease seconds of metasrv leader.
pub const META_LEASE_SECS: u64 = 3;
// In a lease, there are two opportunities for renewal.
pub const META_KEEP_ALIVE_INTERVAL_SECS: u64 = META_LEASE_SECS / 2;

View File

@@ -16,6 +16,7 @@ use std::str::Utf8Error;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use serde_json::error::Error as JsonError;
use snafu::{Location, Snafu};
use store_api::storage::RegionNumber;
@@ -23,8 +24,9 @@ use table::metadata::TableId;
use crate::peer::Peer;
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Failed to get sequence: {}", err_msg))]
NextSequence { err_msg: String, location: Location },
@@ -46,56 +48,49 @@ pub enum Error {
location: Location,
},
#[snafu(display(
"Failed to register procedure loader, type name: {}, source: {}",
type_name,
source
))]
#[snafu(display("Failed to register procedure loader, type name: {}", type_name))]
RegisterProcedureLoader {
type_name: String,
location: Location,
source: common_procedure::error::Error,
},
#[snafu(display("Failed to submit procedure, source: {source}"))]
#[snafu(display("Failed to submit procedure"))]
SubmitProcedure {
location: Location,
source: common_procedure::Error,
},
#[snafu(display("Unsupported operation {}, location: {}", operation, location))]
#[snafu(display("Unsupported operation {}", operation))]
Unsupported {
operation: String,
location: Location,
},
#[snafu(display("Failed to wait procedure done, source: {source}"))]
#[snafu(display("Failed to wait procedure done"))]
WaitProcedure {
location: Location,
source: common_procedure::Error,
},
#[snafu(display("Failed to convert RawTableInfo into TableInfo: {}", source))]
#[snafu(display("Failed to convert RawTableInfo into TableInfo"))]
ConvertRawTableInfo {
location: Location,
source: datatypes::Error,
},
#[snafu(display("Primary key '{key}' not found when creating region request, at {location}"))]
#[snafu(display("Primary key '{key}' not found when creating region request"))]
PrimaryKeyNotFound { key: String, location: Location },
#[snafu(display(
"Failed to build table meta for table: {}, source: {}",
table_name,
source
))]
#[snafu(display("Failed to build table meta for table: {}", table_name))]
BuildTableMeta {
table_name: String,
source: table::metadata::TableMetaBuilderError,
#[snafu(source)]
error: table::metadata::TableMetaBuilderError,
location: Location,
},
#[snafu(display("Table occurs error, source: {}", source))]
#[snafu(display("Table occurs error"))]
Table {
location: Location,
source: table::error::Error,
@@ -107,22 +102,25 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to decode protobuf, source: {}", source))]
#[snafu(display("Failed to decode protobuf"))]
DecodeProto {
location: Location,
source: prost::DecodeError,
#[snafu(source)]
error: prost::DecodeError,
},
#[snafu(display("Failed to encode object into json, source: {}", source))]
#[snafu(display("Failed to encode object into json"))]
EncodeJson {
location: Location,
source: JsonError,
#[snafu(source)]
error: JsonError,
},
#[snafu(display("Failed to decode object from json, source: {}", source))]
#[snafu(display("Failed to decode object from json"))]
DecodeJson {
location: Location,
source: JsonError,
#[snafu(source)]
error: JsonError,
},
#[snafu(display("Payload not exist"))]
@@ -131,9 +129,10 @@ pub enum Error {
#[snafu(display("Failed to send message: {err_msg}"))]
SendMessage { err_msg: String, location: Location },
#[snafu(display("Failed to serde json, source: {}", source))]
#[snafu(display("Failed to serde json"))]
SerdeJson {
source: serde_json::error::Error,
#[snafu(source)]
error: serde_json::error::Error,
location: Location,
},
@@ -154,13 +153,13 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to convert alter table request, source: {source}, at {location}"))]
#[snafu(display("Failed to convert alter table request"))]
ConvertAlterTableRequest {
source: common_grpc_expr::error::Error,
location: Location,
},
#[snafu(display("Invalid protobuf message: {err_msg}, at {location}"))]
#[snafu(display("Invalid protobuf message: {err_msg}"))]
InvalidProtoMsg { err_msg: String, location: Location },
#[snafu(display("Unexpected: {err_msg}"))]
@@ -182,10 +181,11 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to convert raw key to str, source: {}", source))]
#[snafu(display("Failed to convert raw key to str"))]
ConvertRawKey {
location: Location,
source: Utf8Error,
#[snafu(source)]
error: Utf8Error,
},
#[snafu(display("Table nod found, table: {}", table_name))]
@@ -222,29 +222,29 @@ pub enum Error {
location: Location,
},
#[snafu(display("Invalid catalog value, source: {}", source))]
#[snafu(display("Invalid catalog value"))]
InvalidCatalogValue {
source: common_catalog::error::Error,
location: Location,
},
#[snafu(display("{}", source))]
#[snafu(display("External error"))]
External {
location: Location,
source: BoxedError,
},
#[snafu(display("Invalid heartbeat response, location: {}", location))]
#[snafu(display("Invalid heartbeat response"))]
InvalidHeartbeatResponse { location: Location },
#[snafu(display("Failed to operate on datanode: {}, source: {}", peer, source))]
#[snafu(display("Failed to operate on datanode: {}", peer))]
OperateDatanode {
location: Location,
peer: Peer,
source: BoxedError,
},
#[snafu(display("Retry later, source: {}", source))]
#[snafu(display("Retry later"))]
RetryLater { source: BoxedError },
}

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::fmt::{Display, Formatter};
use serde::{Deserialize, Serialize};
@@ -73,13 +74,15 @@ impl Display for OpenRegion {
pub struct OpenRegion {
pub region_ident: RegionIdent,
pub region_storage_path: String,
pub options: HashMap<String, String>,
}
impl OpenRegion {
pub fn new(region_ident: RegionIdent, path: &str) -> Self {
pub fn new(region_ident: RegionIdent, path: &str, options: HashMap<String, String>) -> Self {
Self {
region_ident,
region_storage_path: path.to_string(),
options,
}
}
}
@@ -127,12 +130,13 @@ mod tests {
engine: "mito2".to_string(),
},
"test/foo",
HashMap::new(),
));
let serialized = serde_json::to_string(&open_region).unwrap();
assert_eq!(
r#"{"OpenRegion":{"region_ident":{"cluster_id":1,"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"},"region_storage_path":"test/foo"}}"#,
r#"{"OpenRegion":{"region_ident":{"cluster_id":1,"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"},"region_storage_path":"test/foo","options":{}}}"#,
serialized
);

View File

@@ -55,13 +55,18 @@ pub mod table_region;
#[allow(deprecated)]
pub mod table_route;
use std::collections::BTreeMap;
use std::collections::{BTreeMap, HashMap};
use std::fmt::Debug;
use std::ops::Deref;
use std::sync::Arc;
use bytes::Bytes;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use datanode_table::{DatanodeTableKey, DatanodeTableManager, DatanodeTableValue};
use lazy_static::lazy_static;
use regex::Regex;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::RegionNumber;
use table::metadata::{RawTableInfo, TableId};
@@ -69,6 +74,7 @@ use table_info::{TableInfoKey, TableInfoManager, TableInfoValue};
use table_name::{TableNameKey, TableNameManager, TableNameValue};
use self::catalog_name::{CatalogManager, CatalogNameKey, CatalogNameValue};
use self::datanode_table::RegionInfo;
use self::schema_name::{SchemaManager, SchemaNameKey, SchemaNameValue};
use self::table_route::{TableRouteManager, TableRouteValue};
use crate::ddl::utils::region_storage_path;
@@ -80,7 +86,7 @@ use crate::DatanodeId;
pub const REMOVED_PREFIX: &str = "__removed";
const NAME_PATTERN: &str = "[a-zA-Z_:-][a-zA-Z0-9_:-]*";
const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
@@ -154,6 +160,116 @@ macro_rules! ensure_values {
};
}
/// A struct containing a deserialized value(`inner`) and an original bytes.
///
/// - Serialize behaviors:
///
/// The `inner` field will be ignored.
///
/// - Deserialize behaviors:
///
/// The `inner` field will be deserialized from the `bytes` field.
pub struct DeserializedValueWithBytes<T: DeserializeOwned + Serialize> {
// The original bytes of the inner.
bytes: Bytes,
// The value was deserialized from the original bytes.
inner: T,
}
impl<T: DeserializeOwned + Serialize> Deref for DeserializedValueWithBytes<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<T: DeserializeOwned + Serialize + Debug> Debug for DeserializedValueWithBytes<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"DeserializedValueWithBytes(inner: {:?}, bytes: {:?})",
self.inner, self.bytes
)
}
}
impl<T: DeserializeOwned + Serialize> Serialize for DeserializedValueWithBytes<T> {
/// - Serialize behaviors:
///
/// The `inner` field will be ignored.
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
// Safety: The original bytes are always JSON encoded.
// It's more efficiently than `serialize_bytes`.
serializer.serialize_str(&String::from_utf8_lossy(&self.bytes))
}
}
impl<'de, T: DeserializeOwned + Serialize> Deserialize<'de> for DeserializedValueWithBytes<T> {
/// - Deserialize behaviors:
///
/// The `inner` field will be deserialized from the `bytes` field.
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let buf = String::deserialize(deserializer)?;
let bytes = Bytes::from(buf);
let value = DeserializedValueWithBytes::from_inner_bytes(bytes)
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
Ok(value)
}
}
impl<T: Serialize + DeserializeOwned + Clone> Clone for DeserializedValueWithBytes<T> {
fn clone(&self) -> Self {
Self {
bytes: self.bytes.clone(),
inner: self.inner.clone(),
}
}
}
impl<T: Serialize + DeserializeOwned> DeserializedValueWithBytes<T> {
/// Returns a struct containing a deserialized value and an original `bytes`.
/// It accepts original bytes of inner.
pub fn from_inner_bytes(bytes: Bytes) -> Result<Self> {
let inner = serde_json::from_slice(&bytes).context(error::SerdeJsonSnafu)?;
Ok(Self { bytes, inner })
}
/// Returns a struct containing a deserialized value and an original `bytes`.
/// It accepts original bytes of inner.
pub fn from_inner_slice(bytes: &[u8]) -> Result<Self> {
Self::from_inner_bytes(Bytes::copy_from_slice(bytes))
}
pub fn into_inner(self) -> T {
self.inner
}
/// Returns original `bytes`
pub fn into_bytes(&self) -> Vec<u8> {
self.bytes.to_vec()
}
#[cfg(feature = "testing")]
/// Notes: used for test purpose.
pub fn from_inner(inner: T) -> Self {
let bytes = serde_json::to_vec(&inner).unwrap();
Self {
bytes: Bytes::from(bytes),
inner,
}
}
}
impl TableMetadataManager {
pub fn new(kv_backend: KvBackendRef) -> Self {
TableMetadataManager {
@@ -211,7 +327,10 @@ impl TableMetadataManager {
pub async fn get_full_table_info(
&self,
table_id: TableId,
) -> Result<(Option<TableInfoValue>, Option<TableRouteValue>)> {
) -> Result<(
Option<DeserializedValueWithBytes<TableInfoValue>>,
Option<DeserializedValueWithBytes<TableRouteValue>>,
)> {
let (get_table_route_txn, table_route_decoder) =
self.table_route_manager.build_get_txn(table_id);
@@ -256,6 +375,7 @@ impl TableMetadataManager {
.table_name_manager()
.build_create_txn(&table_name, table_id)?;
let region_options = (&table_info.meta.options).into();
// Creates table info.
let table_info_value = TableInfoValue::new(table_info);
let (create_table_info_txn, on_create_table_info_failure) = self
@@ -268,6 +388,7 @@ impl TableMetadataManager {
table_id,
&engine,
&region_storage_path,
region_options,
distribution,
)?;
@@ -288,15 +409,17 @@ impl TableMetadataManager {
// Checks whether metadata was already created.
if !r.succeeded {
let remote_table_info =
on_create_table_info_failure(&r.responses)?.context(error::UnexpectedSnafu {
let remote_table_info = on_create_table_info_failure(&r.responses)?
.context(error::UnexpectedSnafu {
err_msg: "Reads the empty table info during the create table metadata",
})?;
})?
.into_inner();
let remote_table_route =
on_create_table_route_failure(&r.responses)?.context(error::UnexpectedSnafu {
let remote_table_route = on_create_table_route_failure(&r.responses)?
.context(error::UnexpectedSnafu {
err_msg: "Reads the empty table route during the create table metadata",
})?;
})?
.into_inner();
let op_name = "the creating table metadata";
ensure_values!(remote_table_info, table_info_value, op_name);
@@ -310,8 +433,8 @@ impl TableMetadataManager {
/// The caller MUST ensure it has the exclusive access to `TableNameKey`.
pub async fn delete_table_metadata(
&self,
table_info_value: &TableInfoValue,
table_route_value: &TableRouteValue,
table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
) -> Result<()> {
let table_info = &table_info_value.table_info;
let table_id = table_info.ident.table_id;
@@ -361,7 +484,7 @@ impl TableMetadataManager {
/// and the new `TableNameKey` MUST be empty.
pub async fn rename_table(
&self,
current_table_info_value: TableInfoValue,
current_table_info_value: DeserializedValueWithBytes<TableInfoValue>,
new_table_name: String,
) -> Result<()> {
let current_table_info = &current_table_info_value.table_info;
@@ -386,9 +509,11 @@ impl TableMetadataManager {
table_id,
)?;
let new_table_info_value = current_table_info_value.with_update(move |table_info| {
table_info.name = new_table_name;
});
let new_table_info_value = current_table_info_value
.inner
.with_update(move |table_info| {
table_info.name = new_table_name;
});
// Updates table info.
let (update_table_info_txn, on_update_table_info_failure) = self
@@ -401,10 +526,11 @@ impl TableMetadataManager {
// Checks whether metadata was already updated.
if !r.succeeded {
let remote_table_info =
on_update_table_info_failure(&r.responses)?.context(error::UnexpectedSnafu {
let remote_table_info = on_update_table_info_failure(&r.responses)?
.context(error::UnexpectedSnafu {
err_msg: "Reads the empty table info during the rename table metadata",
})?;
})?
.into_inner();
let op_name = "the renaming table metadata";
ensure_values!(remote_table_info, new_table_info_value, op_name);
@@ -416,7 +542,7 @@ impl TableMetadataManager {
/// Updates table info and returns an error if different metadata exists.
pub async fn update_table_info(
&self,
current_table_info_value: TableInfoValue,
current_table_info_value: DeserializedValueWithBytes<TableInfoValue>,
new_table_info: RawTableInfo,
) -> Result<()> {
let table_id = current_table_info_value.table_info.ident.table_id;
@@ -432,10 +558,11 @@ impl TableMetadataManager {
// Checks whether metadata was already updated.
if !r.succeeded {
let remote_table_info =
on_update_table_info_failure(&r.responses)?.context(error::UnexpectedSnafu {
let remote_table_info = on_update_table_info_failure(&r.responses)?
.context(error::UnexpectedSnafu {
err_msg: "Reads the empty table info during the updating table info",
})?;
})?
.into_inner();
let op_name = "the updating table info";
ensure_values!(remote_table_info, new_table_info_value, op_name);
@@ -446,10 +573,10 @@ impl TableMetadataManager {
pub async fn update_table_route(
&self,
table_id: TableId,
engine: &str,
region_storage_path: &str,
current_table_route_value: TableRouteValue,
region_info: RegionInfo,
current_table_route_value: DeserializedValueWithBytes<TableRouteValue>,
new_region_routes: Vec<RegionRoute>,
new_region_options: &HashMap<String, String>,
) -> Result<()> {
// Updates the datanode table key value pairs.
let current_region_distribution =
@@ -458,10 +585,10 @@ impl TableMetadataManager {
let update_datanode_table_txn = self.datanode_table_manager().build_update_txn(
table_id,
engine,
region_storage_path,
region_info,
current_region_distribution,
new_region_distribution,
new_region_options,
)?;
// Updates the table_route.
@@ -477,10 +604,11 @@ impl TableMetadataManager {
// Checks whether metadata was already updated.
if !r.succeeded {
let remote_table_route =
on_update_table_route_failure(&r.responses)?.context(error::UnexpectedSnafu {
let remote_table_route = on_update_table_route_failure(&r.responses)?
.context(error::UnexpectedSnafu {
err_msg: "Reads the empty table route during the updating table route",
})?;
})?
.into_inner();
let op_name = "the updating table route";
ensure_values!(remote_table_route, new_table_route_value, op_name);
@@ -553,9 +681,10 @@ impl_optional_meta_value! {
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;
use bytes::Bytes;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, SchemaBuilder};
use futures::TryStreamExt;
@@ -563,14 +692,43 @@ mod tests {
use super::datanode_table::DatanodeTableKey;
use crate::ddl::utils::region_storage_path;
use crate::key::datanode_table::RegionInfo;
use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
use crate::key::table_route::TableRouteValue;
use crate::key::{to_removed_key, TableMetadataManager};
use crate::key::{to_removed_key, DeserializedValueWithBytes, TableMetadataManager};
use crate::kv_backend::memory::MemoryKvBackend;
use crate::peer::Peer;
use crate::rpc::router::{region_distribution, Region, RegionRoute};
#[test]
fn test_deserialized_value_with_bytes() {
let region_route = new_test_region_route();
let region_routes = vec![region_route.clone()];
let expected_region_routes =
TableRouteValue::new(vec![region_route.clone(), region_route.clone()]);
let expected = serde_json::to_vec(&expected_region_routes).unwrap();
// Serialize behaviors:
// The inner field will be ignored.
let value = DeserializedValueWithBytes {
// ignored
inner: TableRouteValue::new(region_routes.clone()),
bytes: Bytes::from(expected.clone()),
};
let encoded = serde_json::to_vec(&value).unwrap();
// Deserialize behaviors:
// The inner field will be deserialized from the bytes field.
let decoded: DeserializedValueWithBytes<TableRouteValue> =
serde_json::from_slice(&encoded).unwrap();
assert_eq!(decoded.inner, expected_region_routes);
assert_eq!(decoded.bytes, expected);
}
#[test]
fn test_to_removed_key() {
let key = "test_key";
@@ -660,8 +818,14 @@ mod tests {
.await
.unwrap();
assert_eq!(remote_table_info.unwrap().table_info, table_info);
assert_eq!(remote_table_route.unwrap().region_routes, region_routes);
assert_eq!(
remote_table_info.unwrap().into_inner().table_info,
table_info
);
assert_eq!(
remote_table_route.unwrap().into_inner().region_routes,
region_routes
);
}
#[tokio::test]
@@ -674,7 +838,8 @@ mod tests {
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
let table_id = table_info.ident.table_id;
let datanode_id = 2;
let table_route_value = TableRouteValue::new(region_routes.clone());
let table_route_value =
DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes.clone()));
// creates metadata.
table_metadata_manager
@@ -682,7 +847,8 @@ mod tests {
.await
.unwrap();
let table_info_value = TableInfoValue::new(table_info.clone());
let table_info_value =
DeserializedValueWithBytes::from_inner(TableInfoValue::new(table_info.clone()));
// deletes metadata.
table_metadata_manager
@@ -723,7 +889,8 @@ mod tests {
.get_removed(table_id)
.await
.unwrap()
.unwrap();
.unwrap()
.into_inner();
assert_eq!(removed_table_info.table_info, table_info);
let removed_table_route = table_metadata_manager
@@ -731,7 +898,8 @@ mod tests {
.get_removed(table_id)
.await
.unwrap()
.unwrap();
.unwrap()
.into_inner();
assert_eq!(removed_table_route.region_routes, region_routes);
}
@@ -750,7 +918,9 @@ mod tests {
.await
.unwrap();
let new_table_name = "another_name".to_string();
let table_info_value = TableInfoValue::new(table_info.clone());
let table_info_value =
DeserializedValueWithBytes::from_inner(TableInfoValue::new(table_info.clone()));
table_metadata_manager
.rename_table(table_info_value.clone(), new_table_name.clone())
.await
@@ -762,7 +932,8 @@ mod tests {
.unwrap();
let mut modified_table_info = table_info.clone();
modified_table_info.name = "hi".to_string();
let modified_table_info_value = table_info_value.update(modified_table_info);
let modified_table_info_value =
DeserializedValueWithBytes::from_inner(table_info_value.update(modified_table_info));
// if the table_info_value is wrong, it should return an error.
// The ABA problem.
assert!(table_metadata_manager
@@ -816,7 +987,8 @@ mod tests {
.unwrap();
let mut new_table_info = table_info.clone();
new_table_info.name = "hi".to_string();
let current_table_info_value = TableInfoValue::new(table_info.clone());
let current_table_info_value =
DeserializedValueWithBytes::from_inner(TableInfoValue::new(table_info.clone()));
// should be ok.
table_metadata_manager
.update_table_info(current_table_info_value.clone(), new_table_info.clone())
@@ -834,12 +1006,15 @@ mod tests {
.get(table_id)
.await
.unwrap()
.unwrap();
.unwrap()
.into_inner();
assert_eq!(updated_table_info.table_info, new_table_info);
let mut wrong_table_info = table_info.clone();
wrong_table_info.name = "wrong".to_string();
let wrong_table_info_value = current_table_info_value.update(wrong_table_info);
let wrong_table_info_value = DeserializedValueWithBytes::from_inner(
current_table_info_value.update(wrong_table_info),
);
// if the current_table_info_value is wrong, it should return an error.
// The ABA problem.
assert!(table_metadata_manager
@@ -878,7 +1053,8 @@ mod tests {
let engine = table_info.meta.engine.as_str();
let region_storage_path =
region_storage_path(&table_info.catalog_name, &table_info.schema_name);
let current_table_route_value = TableRouteValue::new(region_routes.clone());
let current_table_route_value =
DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes.clone()));
// creates metadata.
table_metadata_manager
.create_table_metadata(table_info.clone(), region_routes.clone())
@@ -894,10 +1070,14 @@ mod tests {
table_metadata_manager
.update_table_route(
table_id,
engine,
&region_storage_path,
RegionInfo {
engine: engine.to_string(),
region_storage_path: region_storage_path.to_string(),
region_options: HashMap::new(),
},
current_table_route_value.clone(),
new_region_routes.clone(),
&HashMap::new(),
)
.await
.unwrap();
@@ -907,24 +1087,36 @@ mod tests {
table_metadata_manager
.update_table_route(
table_id,
engine,
&region_storage_path,
RegionInfo {
engine: engine.to_string(),
region_storage_path: region_storage_path.to_string(),
region_options: HashMap::new(),
},
current_table_route_value.clone(),
new_region_routes.clone(),
&HashMap::new(),
)
.await
.unwrap();
let current_table_route_value = current_table_route_value.update(new_region_routes.clone());
let current_table_route_value = DeserializedValueWithBytes::from_inner(
current_table_route_value
.inner
.update(new_region_routes.clone()),
);
let new_region_routes = vec![new_region_route(2, 4), new_region_route(5, 5)];
// it should be ok.
table_metadata_manager
.update_table_route(
table_id,
engine,
&region_storage_path,
RegionInfo {
engine: engine.to_string(),
region_storage_path: region_storage_path.to_string(),
region_options: HashMap::new(),
},
current_table_route_value.clone(),
new_region_routes.clone(),
&HashMap::new(),
)
.await
.unwrap();
@@ -932,19 +1124,24 @@ mod tests {
// if the current_table_route_value is wrong, it should return an error.
// The ABA problem.
let wrong_table_route_value = current_table_route_value.update(vec![
new_region_route(1, 1),
new_region_route(2, 2),
new_region_route(3, 3),
new_region_route(4, 4),
]);
let wrong_table_route_value =
DeserializedValueWithBytes::from_inner(current_table_route_value.update(vec![
new_region_route(1, 1),
new_region_route(2, 2),
new_region_route(3, 3),
new_region_route(4, 4),
]));
assert!(table_metadata_manager
.update_table_route(
table_id,
engine,
&region_storage_path,
RegionInfo {
engine: engine.to_string(),
region_storage_path: region_storage_path.to_string(),
region_options: HashMap::new(),
},
wrong_table_route_value,
new_region_routes
new_region_routes,
&HashMap::new(),
)
.await
.is_err());

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::sync::Arc;
use futures::stream::BoxStream;
@@ -32,6 +33,21 @@ use crate::rpc::store::RangeRequest;
use crate::rpc::KeyValue;
use crate::DatanodeId;
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
/// RegionInfo
/// For compatible reason, DON'T modify the field name.
pub struct RegionInfo {
#[serde(default)]
// The table engine, it SHOULD be immutable after created.
pub engine: String,
// The region storage path, it SHOULD be immutable after created.
#[serde(default)]
pub region_storage_path: String,
// The region options.
#[serde(default)]
pub region_options: HashMap<String, String>,
}
pub struct DatanodeTableKey {
datanode_id: DatanodeId,
table_id: TableId,
@@ -85,25 +101,17 @@ impl TableMetaKey for DatanodeTableKey {
pub struct DatanodeTableValue {
pub table_id: TableId,
pub regions: Vec<RegionNumber>,
#[serde(default)]
pub engine: String,
#[serde(default)]
pub region_storage_path: String,
#[serde(flatten)]
pub region_info: RegionInfo,
version: u64,
}
impl DatanodeTableValue {
pub fn new(
table_id: TableId,
regions: Vec<RegionNumber>,
engine: String,
region_storage_path: String,
) -> Self {
pub fn new(table_id: TableId, regions: Vec<RegionNumber>, region_info: RegionInfo) -> Self {
Self {
table_id,
regions,
engine,
region_storage_path,
region_info,
version: 0,
}
}
@@ -156,6 +164,7 @@ impl DatanodeTableManager {
table_id: TableId,
engine: &str,
region_storage_path: &str,
region_options: HashMap<String, String>,
distribution: RegionDistribution,
) -> Result<Txn> {
let txns = distribution
@@ -165,8 +174,11 @@ impl DatanodeTableManager {
let val = DatanodeTableValue::new(
table_id,
regions,
engine.to_string(),
region_storage_path.to_string(),
RegionInfo {
engine: engine.to_string(),
region_storage_path: region_storage_path.to_string(),
region_options: region_options.clone(),
},
);
Ok(TxnOp::Put(key.as_raw_key(), val.try_as_raw_value()?))
@@ -182,10 +194,10 @@ impl DatanodeTableManager {
pub(crate) fn build_update_txn(
&self,
table_id: TableId,
engine: &str,
region_storage_path: &str,
region_info: RegionInfo,
current_region_distribution: RegionDistribution,
new_region_distribution: RegionDistribution,
new_region_options: &HashMap<String, String>,
) -> Result<Txn> {
let mut opts = Vec::new();
@@ -197,33 +209,20 @@ impl DatanodeTableManager {
opts.push(TxnOp::Delete(raw_key))
}
}
let need_update_options = region_info.region_options != *new_region_options;
for (datanode, regions) in new_region_distribution.into_iter() {
if let Some(current_region) = current_region_distribution.get(&datanode) {
// Updates if need.
if *current_region != regions {
let key = DatanodeTableKey::new(datanode, table_id);
let raw_key = key.as_raw_key();
let val = DatanodeTableValue::new(
table_id,
regions,
engine.to_string(),
region_storage_path.to_string(),
)
.try_as_raw_value()?;
opts.push(TxnOp::Put(raw_key, val));
}
} else {
// New datanodes
let need_update =
if let Some(current_region) = current_region_distribution.get(&datanode) {
// Updates if need.
*current_region != regions || need_update_options
} else {
true
};
if need_update {
let key = DatanodeTableKey::new(datanode, table_id);
let raw_key = key.as_raw_key();
let val = DatanodeTableValue::new(
table_id,
regions,
engine.to_string(),
region_storage_path.to_string(),
)
.try_as_raw_value()?;
let val = DatanodeTableValue::new(table_id, regions, region_info.clone())
.try_as_raw_value()?;
opts.push(TxnOp::Put(raw_key, val));
}
}
@@ -270,11 +269,10 @@ mod tests {
let value = DatanodeTableValue {
table_id: 42,
regions: vec![1, 2, 3],
engine: Default::default(),
region_storage_path: Default::default(),
region_info: RegionInfo::default(),
version: 1,
};
let literal = br#"{"table_id":42,"regions":[1,2,3],"engine":"","region_storage_path":"","version":1}"#;
let literal = br#"{"table_id":42,"regions":[1,2,3],"engine":"","region_storage_path":"","region_options":{},"version":1}"#;
let raw_value = value.try_as_raw_value().unwrap();
assert_eq!(raw_value, literal);

View File

@@ -16,7 +16,7 @@ use serde::{Deserialize, Serialize};
use table::engine::TableReference;
use table::metadata::{RawTableInfo, TableId};
use super::TABLE_INFO_KEY_PREFIX;
use super::{DeserializedValueWithBytes, TABLE_INFO_KEY_PREFIX};
use crate::error::Result;
use crate::key::{to_removed_key, TableMetaKey};
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse};
@@ -103,7 +103,7 @@ impl TableInfoManager {
table_id: TableId,
) -> (
Txn,
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<TableInfoValue>>,
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>>,
) {
let key = TableInfoKey::new(table_id);
let raw_key = key.as_raw_key();
@@ -119,7 +119,7 @@ impl TableInfoManager {
table_info_value: &TableInfoValue,
) -> Result<(
Txn,
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<TableInfoValue>>,
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>>,
)> {
let key = TableInfoKey::new(table_id);
let raw_key = key.as_raw_key();
@@ -143,15 +143,15 @@ impl TableInfoManager {
pub(crate) fn build_update_txn(
&self,
table_id: TableId,
current_table_info_value: &TableInfoValue,
current_table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
new_table_info_value: &TableInfoValue,
) -> Result<(
Txn,
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<TableInfoValue>>,
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>>,
)> {
let key = TableInfoKey::new(table_id);
let raw_key = key.as_raw_key();
let raw_value = current_table_info_value.try_as_raw_value()?;
let raw_value = current_table_info_value.into_bytes();
let txn = Txn::new()
.when(vec![Compare::with_value(
@@ -172,11 +172,11 @@ impl TableInfoManager {
pub(crate) fn build_delete_txn(
&self,
table_id: TableId,
table_info_value: &TableInfoValue,
table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
) -> Result<Txn> {
let key = TableInfoKey::new(table_id);
let raw_key = key.as_raw_key();
let raw_value = table_info_value.try_as_raw_value()?;
let raw_value = table_info_value.into_bytes();
let removed_key = to_removed_key(&String::from_utf8_lossy(&raw_key));
let txn = Txn::new().and_then(vec![
@@ -189,7 +189,8 @@ impl TableInfoManager {
fn build_decode_fn(
raw_key: Vec<u8>,
) -> impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<TableInfoValue>> {
) -> impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>>
{
move |kvs: &Vec<TxnOpResponse>| {
kvs.iter()
.filter_map(|resp| {
@@ -201,29 +202,35 @@ impl TableInfoManager {
})
.flat_map(|r| &r.kvs)
.find(|kv| kv.key == raw_key)
.map(|kv| TableInfoValue::try_from_raw_value(&kv.value))
.map(|kv| DeserializedValueWithBytes::from_inner_slice(&kv.value))
.transpose()
}
}
#[cfg(test)]
pub async fn get_removed(&self, table_id: TableId) -> Result<Option<TableInfoValue>> {
pub async fn get_removed(
&self,
table_id: TableId,
) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>> {
let key = TableInfoKey::new(table_id).to_string();
let removed_key = to_removed_key(&key).into_bytes();
self.kv_backend
.get(&removed_key)
.await?
.map(|x| TableInfoValue::try_from_raw_value(&x.value))
.map(|x| DeserializedValueWithBytes::from_inner_slice(&x.value))
.transpose()
}
pub async fn get(&self, table_id: TableId) -> Result<Option<TableInfoValue>> {
pub async fn get(
&self,
table_id: TableId,
) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>> {
let key = TableInfoKey::new(table_id);
let raw_key = key.as_raw_key();
self.kv_backend
.get(&raw_key)
.await?
.map(|x| TableInfoValue::try_from_raw_value(&x.value))
.map(|x| DeserializedValueWithBytes::from_inner_slice(&x.value))
.transpose()
}
}

View File

@@ -268,6 +268,8 @@ mod tests {
test_ok("my_table");
test_ok("cpu:metrics");
test_ok(":cpu:metrics");
test_ok("sys.cpu.system");
test_ok("foo-bar");
}
#[test]

View File

@@ -17,6 +17,7 @@ use std::fmt::Display;
use serde::{Deserialize, Serialize};
use table::metadata::TableId;
use super::DeserializedValueWithBytes;
use crate::error::Result;
use crate::key::{to_removed_key, RegionDistribution, TableMetaKey, TABLE_ROUTE_PREFIX};
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse};
@@ -81,7 +82,7 @@ impl TableRouteManager {
table_id: TableId,
) -> (
Txn,
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<TableRouteValue>>,
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
) {
let key = TableRouteKey::new(table_id);
let raw_key = key.as_raw_key();
@@ -97,7 +98,7 @@ impl TableRouteManager {
table_route_value: &TableRouteValue,
) -> Result<(
Txn,
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<TableRouteValue>>,
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
)> {
let key = TableRouteKey::new(table_id);
let raw_key = key.as_raw_key();
@@ -121,15 +122,15 @@ impl TableRouteManager {
pub(crate) fn build_update_txn(
&self,
table_id: TableId,
current_table_route_value: &TableRouteValue,
current_table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
new_table_route_value: &TableRouteValue,
) -> Result<(
Txn,
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<TableRouteValue>>,
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
)> {
let key = TableRouteKey::new(table_id);
let raw_key = key.as_raw_key();
let raw_value = current_table_route_value.try_as_raw_value()?;
let raw_value = current_table_route_value.into_bytes();
let new_raw_value: Vec<u8> = new_table_route_value.try_as_raw_value()?;
let txn = Txn::new()
@@ -148,11 +149,11 @@ impl TableRouteManager {
pub(crate) fn build_delete_txn(
&self,
table_id: TableId,
table_route_value: &TableRouteValue,
table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
) -> Result<Txn> {
let key = TableRouteKey::new(table_id);
let raw_key = key.as_raw_key();
let raw_value = table_route_value.try_as_raw_value()?;
let raw_value = table_route_value.into_bytes();
let removed_key = to_removed_key(&String::from_utf8_lossy(&raw_key));
let txn = Txn::new().and_then(vec![
@@ -165,7 +166,8 @@ impl TableRouteManager {
fn build_decode_fn(
raw_key: Vec<u8>,
) -> impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<TableRouteValue>> {
) -> impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>
{
move |response: &Vec<TxnOpResponse>| {
response
.iter()
@@ -178,28 +180,34 @@ impl TableRouteManager {
})
.flat_map(|r| &r.kvs)
.find(|kv| kv.key == raw_key)
.map(|kv| TableRouteValue::try_from_raw_value(&kv.value))
.map(|kv| DeserializedValueWithBytes::from_inner_slice(&kv.value))
.transpose()
}
}
pub async fn get(&self, table_id: TableId) -> Result<Option<TableRouteValue>> {
pub async fn get(
&self,
table_id: TableId,
) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>> {
let key = TableRouteKey::new(table_id);
self.kv_backend
.get(&key.as_raw_key())
.await?
.map(|kv| TableRouteValue::try_from_raw_value(&kv.value))
.map(|kv| DeserializedValueWithBytes::from_inner_slice(&kv.value))
.transpose()
}
#[cfg(test)]
pub async fn get_removed(&self, table_id: TableId) -> Result<Option<TableRouteValue>> {
pub async fn get_removed(
&self,
table_id: TableId,
) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>> {
let key = TableRouteKey::new(table_id).to_string();
let removed_key = to_removed_key(&key).into_bytes();
self.kv_backend
.get(&removed_key)
.await?
.map(|x| TableRouteValue::try_from_raw_value(&x.value))
.map(|x| DeserializedValueWithBytes::from_inner_slice(&x.value))
.transpose()
}
@@ -209,7 +217,7 @@ impl TableRouteManager {
) -> Result<Option<RegionDistribution>> {
self.get(table_id)
.await?
.map(|table_route| region_distribution(&table_route.region_routes))
.map(|table_route| region_distribution(&table_route.into_inner().region_routes))
.transpose()
}
}

View File

@@ -19,3 +19,4 @@ pub(crate) const METRIC_META_CREATE_SCHEMA: &str = "meta.create_schema";
pub(crate) const METRIC_META_PROCEDURE_CREATE_TABLE: &str = "meta.procedure.create_table";
pub(crate) const METRIC_META_PROCEDURE_DROP_TABLE: &str = "meta.procedure.drop_table";
pub(crate) const METRIC_META_PROCEDURE_ALTER_TABLE: &str = "meta.procedure.alter_table";
pub(crate) const METRIC_META_PROCEDURE_TRUNCATE_TABLE: &str = "meta.procedure.truncate_table";

View File

@@ -21,6 +21,8 @@ use api::v1::meta::{
SubmitDdlTaskResponse as PbSubmitDdlTaskResponse, TruncateTableTask as PbTruncateTableTask,
};
use api::v1::{AlterExpr, CreateTableExpr, DropTableExpr, TruncateTableExpr};
use base64::engine::general_purpose;
use base64::Engine as _;
use prost::Message;
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
@@ -65,8 +67,18 @@ impl DdlTask {
DdlTask::AlterTable(AlterTableTask { alter_table })
}
pub fn new_truncate_table(truncate_table: TruncateTableExpr) -> Self {
DdlTask::TruncateTable(TruncateTableTask { truncate_table })
pub fn new_truncate_table(
catalog: String,
schema: String,
table: String,
table_id: TableId,
) -> Self {
DdlTask::TruncateTable(TruncateTableTask {
catalog,
schema,
table,
table_id,
})
}
}
@@ -112,7 +124,12 @@ impl TryFrom<SubmitDdlTaskRequest> for PbSubmitDdlTaskRequest {
alter_table: Some(task.alter_table),
}),
DdlTask::TruncateTable(task) => Task::TruncateTableTask(PbTruncateTableTask {
truncate_table: Some(task.truncate_table),
truncate_table: Some(TruncateTableExpr {
catalog_name: task.catalog,
schema_name: task.schema,
table_name: task.table,
table_id: Some(api::v1::TableId { id: task.table_id }),
}),
}),
};
@@ -272,7 +289,8 @@ impl Serialize for CreateTableTask {
table_info,
};
let buf = pb.encode_to_vec();
serializer.serialize_bytes(&buf)
let encoded = general_purpose::STANDARD_NO_PAD.encode(buf);
serializer.serialize_str(&encoded)
}
}
@@ -281,7 +299,10 @@ impl<'de> Deserialize<'de> for CreateTableTask {
where
D: serde::Deserializer<'de>,
{
let buf = Vec::<u8>::deserialize(deserializer)?;
let encoded = String::deserialize(deserializer)?;
let buf = general_purpose::STANDARD_NO_PAD
.decode(encoded)
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
let expr: PbCreateTableTask = PbCreateTableTask::decode(&*buf)
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
@@ -338,7 +359,8 @@ impl Serialize for AlterTableTask {
alter_table: Some(self.alter_table.clone()),
};
let buf = pb.encode_to_vec();
serializer.serialize_bytes(&buf)
let encoded = general_purpose::STANDARD_NO_PAD.encode(buf);
serializer.serialize_str(&encoded)
}
}
@@ -347,7 +369,10 @@ impl<'de> Deserialize<'de> for AlterTableTask {
where
D: serde::Deserializer<'de>,
{
let buf = Vec::<u8>::deserialize(deserializer)?;
let encoded = String::deserialize(deserializer)?;
let buf = general_purpose::STANDARD_NO_PAD
.decode(encoded)
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
let expr: PbAlterTableTask = PbAlterTableTask::decode(&*buf)
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
@@ -358,27 +383,28 @@ impl<'de> Deserialize<'de> for AlterTableTask {
}
}
#[derive(Debug, PartialEq)]
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct TruncateTableTask {
pub truncate_table: TruncateTableExpr,
pub catalog: String,
pub schema: String,
pub table: String,
pub table_id: TableId,
}
impl TruncateTableTask {
pub fn table_ref(&self) -> TableReference {
TableReference {
catalog: &self.truncate_table.catalog_name,
schema: &self.truncate_table.schema_name,
table: &self.truncate_table.table_name,
catalog: &self.catalog,
schema: &self.schema,
table: &self.table,
}
}
pub fn table_name(&self) -> TableName {
let table = &self.truncate_table;
TableName {
catalog_name: table.catalog_name.to_string(),
schema_name: table.schema_name.to_string(),
table_name: table.table_name.to_string(),
catalog_name: self.catalog.to_string(),
schema_name: self.schema.to_string(),
table_name: self.table.to_string(),
}
}
}
@@ -388,39 +414,20 @@ impl TryFrom<PbTruncateTableTask> for TruncateTableTask {
fn try_from(pb: PbTruncateTableTask) -> Result<Self> {
let truncate_table = pb.truncate_table.context(error::InvalidProtoMsgSnafu {
err_msg: "expected truncate_table",
err_msg: "expected drop table",
})?;
Ok(TruncateTableTask { truncate_table })
}
}
impl Serialize for TruncateTableTask {
fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let pb = PbTruncateTableTask {
truncate_table: Some(self.truncate_table.clone()),
};
let buf = pb.encode_to_vec();
serializer.serialize_bytes(&buf)
}
}
impl<'de> Deserialize<'de> for TruncateTableTask {
fn deserialize<D>(deserializer: D) -> result::Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let buf = Vec::<u8>::deserialize(deserializer)?;
let task: PbTruncateTableTask = PbTruncateTableTask::decode(&*buf)
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
let task = TruncateTableTask::try_from(task)
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
Ok(task)
Ok(Self {
catalog: truncate_table.catalog_name,
schema: truncate_table.schema_name,
table: truncate_table.table_name,
table_id: truncate_table
.table_id
.context(error::InvalidProtoMsgSnafu {
err_msg: "expected table_id",
})?
.id,
})
}
}
@@ -428,12 +435,12 @@ impl<'de> Deserialize<'de> for TruncateTableTask {
mod tests {
use std::sync::Arc;
use api::v1::CreateTableExpr;
use api::v1::{AlterExpr, CreateTableExpr};
use datatypes::schema::SchemaBuilder;
use table::metadata::RawTableInfo;
use table::test_util::table_info::test_table_info;
use super::CreateTableTask;
use super::{AlterTableTask, CreateTableTask};
#[test]
fn test_basic_ser_de_create_table_task() {
@@ -450,4 +457,16 @@ mod tests {
let de = serde_json::from_slice(&output).unwrap();
assert_eq!(task, de);
}
#[test]
fn test_basic_ser_de_alter_table_task() {
let task = AlterTableTask {
alter_table: AlterExpr::default(),
};
let output = serde_json::to_vec(&task).unwrap();
let de = serde_json::from_slice(&output).unwrap();
assert_eq!(task, de);
}
}

View File

@@ -9,6 +9,7 @@ async-stream.workspace = true
async-trait.workspace = true
backon = "0.4"
common-error = { workspace = true }
common-macro = { workspace = true }
common-runtime = { workspace = true }
common-telemetry = { workspace = true }
futures.workspace = true

View File

@@ -18,26 +18,26 @@ use std::sync::Arc;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
use crate::procedure::ProcedureId;
/// Procedure error.
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display(
"Failed to execute procedure due to external error, source: {}",
source
))]
#[snafu(display("Failed to execute procedure due to external error"))]
External { source: BoxedError },
#[snafu(display("Loader {} is already registered", name))]
LoaderConflict { name: String, location: Location },
#[snafu(display("Failed to serialize to json, source: {}", source))]
#[snafu(display("Failed to serialize to json"))]
ToJson {
source: serde_json::Error,
#[snafu(source)]
error: serde_json::Error,
location: Location,
},
@@ -47,83 +47,85 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to put state, key: '{key}', source: {source}"))]
#[snafu(display("Failed to put state, key: '{key}'"))]
PutState {
key: String,
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to delete {}, source: {}", key, source))]
#[snafu(display("Failed to delete {}", key))]
DeleteState {
key: String,
source: object_store::Error,
#[snafu(source)]
error: object_store::Error,
},
#[snafu(display("Failed to delete keys: '{keys}', source: {source}"))]
#[snafu(display("Failed to delete keys: '{keys}'"))]
DeleteStates {
keys: String,
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to list state, path: '{path}', source: {source}"))]
#[snafu(display("Failed to list state, path: '{path}'"))]
ListState {
path: String,
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to deserialize from json, source: {}", source))]
#[snafu(display("Failed to deserialize from json"))]
FromJson {
source: serde_json::Error,
#[snafu(source)]
error: serde_json::Error,
location: Location,
},
#[snafu(display("Procedure exec failed, source: {}", source))]
#[snafu(display("Procedure exec failed"))]
RetryLater { source: BoxedError },
#[snafu(display("Procedure panics, procedure_id: {}", procedure_id))]
ProcedurePanic { procedure_id: ProcedureId },
#[snafu(display("Failed to wait watcher, source: {}", source))]
#[snafu(display("Failed to wait watcher"))]
WaitWatcher {
source: tokio::sync::watch::error::RecvError,
#[snafu(source)]
error: tokio::sync::watch::error::RecvError,
location: Location,
},
#[snafu(display("Failed to execute procedure, source: {}", source))]
#[snafu(display("Failed to execute procedure"))]
ProcedureExec {
source: Arc<Error>,
location: Location,
},
#[snafu(display(
"Procedure retry exceeded max times, procedure_id: {}, source:{}",
procedure_id,
source
))]
#[snafu(display("Procedure retry exceeded max times, procedure_id: {}", procedure_id))]
RetryTimesExceeded {
source: Arc<Error>,
procedure_id: ProcedureId,
},
#[snafu(display("Corrupted data, error: {source}"))]
CorruptedData { source: FromUtf8Error },
#[snafu(display("Corrupted data, error: "))]
CorruptedData {
#[snafu(source)]
error: FromUtf8Error,
},
#[snafu(display("Failed to start the remove_outdated_meta method, error: {}", source))]
#[snafu(display("Failed to start the remove_outdated_meta method, error"))]
StartRemoveOutdatedMetaTask {
source: common_runtime::error::Error,
location: Location,
},
#[snafu(display("Failed to stop the remove_outdated_meta method, error: {}", source))]
#[snafu(display("Failed to stop the remove_outdated_meta method, error"))]
StopRemoveOutdatedMetaTask {
source: common_runtime::error::Error,
location: Location,
},
#[snafu(display("Subprocedure {} failed, source: {}", subprocedure_id, source))]
#[snafu(display("Subprocedure {} failed", subprocedure_id))]
SubprocedureFailed {
subprocedure_id: ProcedureId,
source: Arc<Error>,

View File

@@ -229,7 +229,7 @@ impl ManagerContext {
let procedure = loader(&message.data)
.map_err(|e| {
logging::error!(
"Failed to load procedure data, key: {}, source: {}",
"Failed to load procedure data, key: {}, source: {:?}",
procedure_id,
e
);

View File

@@ -453,7 +453,7 @@ mod tests {
use std::sync::Arc;
use async_trait::async_trait;
use common_error::ext::PlainError;
use common_error::ext::{ErrorExt, PlainError};
use common_error::mock::MockError;
use common_error::status_code::StatusCode;
use common_test_util::temp_dir::create_temp_dir;
@@ -942,7 +942,7 @@ mod tests {
// Run the runner and execute the procedure.
runner.run().await;
let err = meta.state().error().unwrap().to_string();
let err = meta.state().error().unwrap().output_msg();
assert!(err.contains("subprocedure failed"), "{err}");
}
}

View File

@@ -224,7 +224,7 @@ impl ProcedureStore {
serde_json::from_slice(value)
.map_err(|e| {
// `e` doesn't impl ErrorExt so we print it as normal error.
logging::error!("Failed to parse value, key: {:?}, source: {}", key, e);
logging::error!("Failed to parse value, key: {:?}, source: {:?}", key, e);
e
})
.ok()

View File

@@ -8,6 +8,7 @@ license.workspace = true
api = { workspace = true }
async-trait.workspace = true
common-error = { workspace = true }
common-macro = { workspace = true }
common-recordbatch = { workspace = true }
common-time = { workspace = true }
datafusion-common.workspace = true

View File

@@ -17,6 +17,7 @@ use std::any::Any;
use arrow::error::ArrowError;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use common_recordbatch::error::Error as RecordbatchError;
use datafusion_common::DataFusionError;
use datatypes::arrow;
@@ -26,28 +27,27 @@ use datatypes::prelude::ConcreteDataType;
use snafu::{Location, Snafu};
use statrs::StatsError;
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Fail to execute Python UDF, source: {}", msg))]
#[snafu(display("Failed to execute Python UDF: {}", msg))]
PyUdf {
// TODO(discord9): find a way that prevent circle depend(query<-script<-query) and can use script's error type
msg: String,
location: Location,
},
#[snafu(display(
"Fail to create temporary recordbatch when eval Python UDF, source: {}",
source
))]
#[snafu(display("Failed to create temporary recordbatch when eval Python UDF"))]
UdfTempRecordBatch {
location: Location,
source: RecordbatchError,
},
#[snafu(display("Fail to execute function, source: {}", source))]
#[snafu(display("Failed to execute function"))]
ExecuteFunction {
source: DataFusionError,
#[snafu(source)]
error: DataFusionError,
location: Location,
},
@@ -58,25 +58,26 @@ pub enum Error {
location: Location,
},
#[snafu(display("Fail to generate function, source: {}", source))]
#[snafu(display("Failed to generate function"))]
GenerateFunction {
source: StatsError,
#[snafu(source)]
error: StatsError,
location: Location,
},
#[snafu(display("Fail to cast scalar value into vector: {}", source))]
#[snafu(display("Failed to cast scalar value into vector"))]
FromScalarValue {
location: Location,
source: DataTypeError,
},
#[snafu(display("Fail to cast arrow array into vector: {}", source))]
#[snafu(display("Failed to cast arrow array into vector"))]
FromArrowArray {
location: Location,
source: DataTypeError,
},
#[snafu(display("Fail to cast arrow array into vector: {:?}, {}", data_type, source))]
#[snafu(display("Failed to cast arrow array into vector: {:?}", data_type))]
IntoVector {
location: Location,
source: DataTypeError,
@@ -110,56 +111,53 @@ pub enum Error {
#[snafu(display("Not expected to run ExecutionPlan more than once"))]
ExecuteRepeatedly { location: Location },
#[snafu(display("General DataFusion error, source: {}", source))]
#[snafu(display("General DataFusion error"))]
GeneralDataFusion {
source: DataFusionError,
#[snafu(source)]
error: DataFusionError,
location: Location,
},
#[snafu(display(
"Failed to convert DataFusion's recordbatch stream, source: {}",
source
))]
#[snafu(display("Failed to convert DataFusion's recordbatch stream"))]
ConvertDfRecordBatchStream {
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to convert arrow schema, source: {}", source))]
#[snafu(display("Failed to convert arrow schema"))]
ConvertArrowSchema {
location: Location,
source: DataTypeError,
},
#[snafu(display("Failed to execute physical plan, source: {}", source))]
#[snafu(display("Failed to execute physical plan"))]
ExecutePhysicalPlan {
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to cast array to {:?}, source: {}", typ, source))]
#[snafu(display("Failed to cast array to {:?}", typ))]
TypeCast {
source: ArrowError,
#[snafu(source)]
error: ArrowError,
typ: arrow::datatypes::DataType,
location: Location,
},
#[snafu(display(
"Failed to perform compute operation on arrow arrays, source: {}",
source
))]
#[snafu(display("Failed to perform compute operation on arrow arrays"))]
ArrowCompute {
source: ArrowError,
#[snafu(source)]
error: ArrowError,
location: Location,
},
#[snafu(display("Query engine fail to cast value: {}", source))]
#[snafu(display("Query engine fail to cast value"))]
ToScalarValue {
location: Location,
source: DataTypeError,
},
#[snafu(display("Failed to get scalar vector, {}", source))]
#[snafu(display("Failed to get scalar vector"))]
GetScalarVector {
location: Location,
source: DataTypeError,

View File

@@ -6,6 +6,7 @@ license.workspace = true
[dependencies]
common-error = { workspace = true }
common-macro = { workspace = true }
datafusion-common.workspace = true
datafusion.workspace = true
datatypes = { workspace = true }

View File

@@ -17,27 +17,30 @@ use std::any::Any;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use datatypes::prelude::ConcreteDataType;
use snafu::{Location, Snafu};
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Fail to create datafusion record batch, source: {}", source))]
#[snafu(display("Fail to create datafusion record batch"))]
NewDfRecordBatch {
source: datatypes::arrow::error::ArrowError,
#[snafu(source)]
error: datatypes::arrow::error::ArrowError,
location: Location,
},
#[snafu(display("Data types error, source: {}", source))]
#[snafu(display("Data types error"))]
DataTypes {
location: Location,
source: datatypes::error::Error,
},
#[snafu(display("External error, location: {}, source: {}", location, source))]
#[snafu(display("External error"))]
External {
location: Location,
source: BoxedError,
@@ -46,38 +49,41 @@ pub enum Error {
#[snafu(display("Failed to create RecordBatches, reason: {}", reason))]
CreateRecordBatches { reason: String, location: Location },
#[snafu(display("Failed to convert Arrow schema, source: {}", source))]
#[snafu(display("Failed to convert Arrow schema"))]
SchemaConversion {
source: datatypes::error::Error,
location: Location,
},
#[snafu(display("Failed to poll stream, source: {}", source))]
#[snafu(display(""))]
PollStream {
source: datafusion::error::DataFusionError,
#[snafu(source)]
error: datafusion::error::DataFusionError,
location: Location,
},
#[snafu(display("Fail to format record batch, source: {}", source))]
#[snafu(display("Fail to format record batch"))]
Format {
source: datatypes::arrow::error::ArrowError,
#[snafu(source)]
error: datatypes::arrow::error::ArrowError,
location: Location,
},
#[snafu(display("Failed to init Recordbatch stream, source: {}", source))]
#[snafu(display("Failed to init Recordbatch stream"))]
InitRecordbatchStream {
source: datafusion_common::DataFusionError,
#[snafu(source)]
error: datafusion_common::DataFusionError,
location: Location,
},
#[snafu(display(
"Failed to project Arrow RecordBatch with schema {:?} and projection {:?}, source: {}",
"Failed to project Arrow RecordBatch with schema {:?} and projection {:?}",
schema,
projection,
source
))]
ProjectArrowRecordBatch {
source: datatypes::arrow::error::ArrowError,
#[snafu(source)]
error: datatypes::arrow::error::ArrowError,
location: Location,
schema: datatypes::schema::SchemaRef,
projection: Vec<usize>,
@@ -91,10 +97,9 @@ pub enum Error {
},
#[snafu(display(
"Failed to cast vector of type '{:?}' to type '{:?}', source: {}",
"Failed to cast vector of type '{:?}' to type '{:?}'",
from_type,
to_type,
source
))]
CastVector {
from_type: ConcreteDataType,

View File

@@ -7,6 +7,7 @@ license.workspace = true
[dependencies]
async-trait.workspace = true
common-error = { workspace = true }
common-macro = { workspace = true }
common-telemetry = { workspace = true }
metrics.workspace = true
once_cell.workspace = true

View File

@@ -15,31 +15,31 @@
use std::any::Any;
use common_error::ext::ErrorExt;
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
use tokio::task::JoinError;
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub(crate)))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Failed to build runtime, source: {}", source))]
#[snafu(display("Failed to build runtime"))]
BuildRuntime {
source: std::io::Error,
#[snafu(source)]
error: std::io::Error,
location: Location,
},
#[snafu(display("Repeated task {} is already started", name))]
IllegalState { name: String, location: Location },
#[snafu(display(
"Failed to wait for repeated task {} to stop, source: {}",
name,
source
))]
#[snafu(display("Failed to wait for repeated task {} to stop", name))]
WaitGcTaskStop {
name: String,
source: JoinError,
#[snafu(source)]
error: JoinError,
location: Location,
},
}

View File

@@ -11,6 +11,7 @@ bytes = "1.1"
catalog = { workspace = true }
common-catalog = { workspace = true }
common-error = { workspace = true }
common-macro = { workspace = true }
common-telemetry = { workspace = true }
datafusion-common.workspace = true
datafusion-expr.workspace = true

View File

@@ -16,13 +16,15 @@ use std::any::Any;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use datafusion::error::DataFusionError;
use datatypes::prelude::ConcreteDataType;
use prost::{DecodeError, EncodeError};
use snafu::{Location, Snafu};
#[derive(Debug, Snafu)]
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Unsupported physical plan: {}", name))]
UnsupportedPlan { name: String, location: Location },
@@ -39,15 +41,17 @@ pub enum Error {
#[snafu(display("Unsupported substrait type: {}", ty))]
UnsupportedSubstraitType { ty: String, location: Location },
#[snafu(display("Failed to decode substrait relation, source: {}", source))]
#[snafu(display("Failed to decode substrait relation"))]
DecodeRel {
source: DecodeError,
#[snafu(source)]
error: DecodeError,
location: Location,
},
#[snafu(display("Failed to encode substrait relation, source: {}", source))]
#[snafu(display("Failed to encode substrait relation"))]
EncodeRel {
source: EncodeError,
#[snafu(source)]
error: EncodeError,
location: Location,
},
@@ -67,13 +71,14 @@ pub enum Error {
#[snafu(display("Invalid parameters: {}", reason))]
InvalidParameters { reason: String, location: Location },
#[snafu(display("Internal error from DataFusion: {}", source))]
#[snafu(display("Internal error from DataFusion"))]
DFInternal {
source: DataFusionError,
#[snafu(source)]
error: DataFusionError,
location: Location,
},
#[snafu(display("Internal error: {}", source))]
#[snafu(display("Internal error"))]
Internal {
location: Location,
source: BoxedError,
@@ -95,28 +100,30 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to convert DataFusion schema, source: {}", source))]
#[snafu(display("Failed to convert DataFusion schema"))]
ConvertDfSchema {
location: Location,
source: datatypes::error::Error,
},
#[snafu(display("Unable to resolve table: {table_name}, error: {source}"))]
#[snafu(display("Unable to resolve table: {table_name}, error: "))]
ResolveTable {
table_name: String,
location: Location,
source: catalog::error::Error,
},
#[snafu(display("Failed to encode DataFusion plan, source: {}", source))]
#[snafu(display("Failed to encode DataFusion plan"))]
EncodeDfPlan {
source: datafusion::error::DataFusionError,
#[snafu(source)]
error: datafusion::error::DataFusionError,
location: Location,
},
#[snafu(display("Failed to decode DataFusion plan, source: {}", source))]
#[snafu(display("Failed to decode DataFusion plan"))]
DecodeDfPlan {
source: datafusion::error::DataFusionError,
#[snafu(source)]
error: datafusion::error::DataFusionError,
location: Location,
},
}

View File

@@ -38,94 +38,40 @@ macro_rules! error {
// error!(e; target: "my_target", "a {} event", "log")
($e:expr; target: $target:expr, $($arg:tt)+) => ({
use $crate::common_error::ext::ErrorExt;
use std::error::Error;
match ($e.source(), $e.location_opt()) {
(Some(source), Some(location)) => {
$crate::log!(
target: $target,
$crate::logging::Level::ERROR,
err.msg = %$e,
err.code = %$e.status_code(),
err.source = source,
err.location = %location,
$($arg)+
)
},
(Some(source), None) => {
$crate::log!(
target: $target,
$crate::logging::Level::ERROR,
err.msg = %$e,
err.code = %$e.status_code(),
err.source = source,
$($arg)+
)
},
(None, Some(location)) => {
$crate::log!(
target: $target,
$crate::logging::Level::ERROR,
err.msg = %$e,
err.code = %$e.status_code(),
err.location = %location,
$($arg)+
)
},
(None, None) => {
$crate::log!(
target: $target,
$crate::logging::Level::ERROR,
err.msg = %$e,
err.code = %$e.status_code(),
$($arg)+
)
}
}
$crate::log!(
target: $target,
$crate::logging::Level::ERROR,
err = ?$e,
$($arg)+
)
});
// error!(%e; target: "my_target", "a {} event", "log")
(%$e:expr; target: $target:expr, $($arg:tt)+) => ({
$crate::log!(
target: $target,
$crate::logging::Level::ERROR,
err = %$e,
$($arg)+
)
});
// error!(e; "a {} event", "log")
($e:expr; $($arg:tt)+) => ({
use std::error::Error;
use $crate::common_error::ext::ErrorExt;
match ($e.source(), $e.location_opt()) {
(Some(source), Some(location)) => {
$crate::log!(
$crate::logging::Level::ERROR,
err.msg = %$e,
err.code = %$e.status_code(),
err.source = source,
err.location = %location,
$($arg)+
)
},
(Some(source), None) => {
$crate::log!(
$crate::logging::Level::ERROR,
err.msg = %$e,
err.code = %$e.status_code(),
err.source = source,
$($arg)+
)
},
(None, Some(location)) => {
$crate::log!(
$crate::logging::Level::ERROR,
err.msg = %$e,
err.code = %$e.status_code(),
err.location = %location,
$($arg)+
)
},
(None, None) => {
$crate::log!(
$crate::logging::Level::ERROR,
err.msg = %$e,
err.code = %$e.status_code(),
$($arg)+
)
}
}
$crate::log!(
$crate::logging::Level::ERROR,
err = ?$e,
$($arg)+
)
});
// error!(%e; "a {} event", "log")
(%$e:expr; $($arg:tt)+) => ({
$crate::log!(
$crate::logging::Level::ERROR,
err = %$e,
$($arg)+
)
});
// error!("a {} event", "log")
@@ -144,46 +90,20 @@ macro_rules! warn {
// warn!(e; "a {} event", "log")
($e:expr; $($arg:tt)+) => ({
use std::error::Error;
use $crate::common_error::ext::ErrorExt;
match ($e.source(), $e.location_opt()) {
(Some(source), Some(location)) => {
$crate::log!(
$crate::logging::Level::WARN,
err.msg = %$e,
err.code = %$e.status_code(),
err.source = source,
err.location = %location,
$($arg)+
)
},
(Some(source), None) => {
$crate::log!(
$crate::logging::Level::WARN,
err.msg = %$e,
err.code = %$e.status_code(),
err.source = source,
$($arg)+
)
},
(None, Some(location)) => {
$crate::log!(
$crate::logging::Level::WARN,
err.msg = %$e,
err.code = %$e.status_code(),
err.location = %location,
$($arg)+
)
},
(None, None) => {
$crate::log!(
$crate::logging::Level::WARN,
err.msg = %$e,
err.code = %$e.status_code(),
$($arg)+
)
}
}
$crate::log!(
$crate::logging::Level::WARN,
err = ?$e,
$($arg)+
)
});
// warn!(%e; "a {} event", "log")
(%$e:expr; $($arg:tt)+) => ({
$crate::log!(
$crate::logging::Level::WARN,
err = %$e,
$($arg)+
)
});
// warn!("a {} event", "log")
@@ -305,8 +225,10 @@ mod tests {
error!(target: "my_target", "hello {}", "world");
// Supports both owned and reference type.
error!(err; target: "my_target", "hello {}", "world");
error!(%err; target: "my_target", "hello {}", "world");
error!(err_ref; target: "my_target", "hello {}", "world");
error!(err_ref2; "hello {}", "world");
error!(%err_ref2; "hello {}", "world");
error!("hello {}", "world");
let root_err = MockError::with_source(err);

View File

@@ -14,6 +14,7 @@
// metric stuffs, inspired by databend
use std::fmt;
use std::sync::{Arc, Once, RwLock};
use std::time::{Duration, Instant};
@@ -63,6 +64,7 @@ pub fn try_handle() -> Option<PrometheusHandle> {
pub struct Timer {
start: Instant,
histogram: Histogram,
observed: bool,
}
impl From<Histogram> for Timer {
@@ -71,12 +73,22 @@ impl From<Histogram> for Timer {
}
}
impl fmt::Debug for Timer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Timer")
.field("start", &self.start)
.field("observed", &self.observed)
.finish()
}
}
impl Timer {
/// Creates a timer from given histogram.
pub fn from_histogram(histogram: Histogram) -> Self {
Self {
start: Instant::now(),
histogram,
observed: false,
}
}
@@ -85,6 +97,7 @@ impl Timer {
Self {
start: Instant::now(),
histogram: register_histogram!(name),
observed: false,
}
}
@@ -93,6 +106,7 @@ impl Timer {
Self {
start: Instant::now(),
histogram: register_histogram!(name, labels),
observed: false,
}
}
@@ -100,11 +114,18 @@ impl Timer {
pub fn elapsed(&self) -> Duration {
self.start.elapsed()
}
/// Discards the timer result.
pub fn discard(mut self) {
self.observed = true;
}
}
impl Drop for Timer {
fn drop(&mut self) {
self.histogram.record(self.elapsed())
if !self.observed {
self.histogram.record(self.elapsed())
}
}
}

View File

@@ -9,6 +9,7 @@ arrow.workspace = true
chrono-tz = "0.8"
chrono.workspace = true
common-error = { workspace = true }
common-macro = { workspace = true }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }

Some files were not shown because too many files have changed in this diff Show More