Compare commits

..

79 Commits

Author SHA1 Message Date
shuiyisong
59ddfa84ec fix: check and clippy
Signed-off-by: shuiyisong <xixing.sys@gmail.com>
2025-11-26 18:35:21 +08:00
evenyag
dd043eadc4 feat: add file_scan_cost
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
7e6af2c7ee feat: collect the whole fetch time
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
87d3b17f4d feat: update parquet fetch metrics
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
5acac3d403 chore: fix compiler errors
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
f9c66ba0de feat: implement debug for new metrics
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
37847a8df6 feat: debug print metrics in ScanMetricsSet
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
6e06ac9e5c feat: init verbose metrics
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
09effc8128 feat: add fetch metrics to ReaderMetrics
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
c14728e3ae feat: collect more metrics for memory row group
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
cce4d56e00 feat: add apply metrics
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
69cf13b33a feat: add parquet metadata metrics
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
c83a282b39 feat: collect parquet row group metrics
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
5329efcdba feat: collect fulltext dir metrics for applier
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
50b5c90d53 feat: collect read metrics in appliers
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
fea2966dec feat: collect cache metrics for inverted and bloom index
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
e00452c4db feat: collect metadata fetch metrics for inverted index
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
7a31b2a8ea refactor: rename elapsed to fetch_elapsed
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
f363d73f72 feat: add metrics for range_read and metadata
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
7a6befcad3 feat: collect read metrics for inverted index
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
d6c75ec55f feat: implement BloomFilterReadMetrics for BloomFilterReader
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
5b8f1d819f feat: add metrics to fulltext index applier
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
b68286e8af feat: add metrics to bloom applier
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
4519607bc6 feat: add inverted applier metrics
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
shuiyisong
5472bdfc0f chore: return 404 if trace not found (#7304)
* chore: return 404 if trace not found

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: add test and fix

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

---------

Signed-off-by: shuiyisong <xixing.sys@gmail.com>
2025-11-26 09:39:28 +00:00
discord9
6485a26fa3 refactor: load metadata using offical impl (#7302)
* refactor: load metadata using offical impl

Signed-off-by: discord9 <discord9@163.com>

* pcr

Signed-off-by: discord9 <discord9@163.com>

---------

Signed-off-by: discord9 <discord9@163.com>
2025-11-26 08:52:04 +00:00
Weny Xu
69865c831d feat: batch region migration for failover (#7245)
* refactor: support multiple rows per event in event recorder

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: batch region migration for failover

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* test: add tests

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* fix: fix unit tests

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>
2025-11-26 08:31:56 +00:00
shuiyisong
713525797a chore: optimize search traces from Grafana (#7298)
* chore: minor update

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: add test

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: update ua setup

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

---------

Signed-off-by: shuiyisong <xixing.sys@gmail.com>
2025-11-26 08:06:15 +00:00
WaterWhisperer
09d1074e23 feat: add building option to build images base on distroless image (#7240)
Signed-off-by: WaterWhisperer <waterwhisperer24@qq.com>
2025-11-26 05:13:05 +00:00
dennis zhuang
1ebd25adbb ci: add multi lang tests workflow into release and nightly workflows (#7300)
* ci: add multi lang tests workflow into release and nightly workflows

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: emoji

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* refactor: apply suggestions

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* ci: add notification when multi lang tests fails

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: revert ci and add nodejs

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

---------

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-11-26 04:35:04 +00:00
dennis zhuang
c66f661494 chore: return meaningful message when content type mismatch in otel (#7301)
* chore: return meaningful message when content type mismatch in otel

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* refactor: extract duplicated code

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: use a new error for failing to decode loki request

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

---------

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>
2025-11-26 03:20:52 +00:00
Sicong Hu
2783a5218e feat: implement manual type for async index build (#7104)
* feat: prepare for index_build command

Signed-off-by: SNC123 <sinhco@outlook.com>

* feat: impl manual index build

Signed-off-by: SNC123 <sinhco@outlook.com>

* chore: clippy and fmt

Signed-off-by: SNC123 <sinhco@outlook.com>

* test: add idempotency check for manual build

Signed-off-by: SNC123 <sinhco@outlook.com>

* chore: apply suggestions

Signed-off-by: SNC123 <sinhco@outlook.com>

* chore: update proto

Signed-off-by: SNC123 <sinhco@outlook.com>

* chore: apply suggestions

Signed-off-by: SNC123 <sinhco@outlook.com>

* chore: fmt

Signed-off-by: SNC123 <sinhco@outlook.com>

* chore: update proto souce to greptimedb

Signed-off-by: SNC123 <sinhco@outlook.com>

* fix: cargo.lock

Signed-off-by: SNC123 <sinhco@outlook.com>

---------

Signed-off-by: SNC123 <sinhco@outlook.com>
2025-11-25 15:21:30 +00:00
Weny Xu
6b6d1ce7c4 feat: introduce remap_manifests for RegionEngine (#7265)
* refactor: consolidate RegionManifestOptions creation logic

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat: introduce`remap_manifests` for `RegionEngine`

Signed-off-by: WenyXu <wenymedia@gmail.com>

* Apply suggestions from code review

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-11-25 12:09:20 +00:00
dennis zhuang
7e4f0af065 fix: mysql binary date type and multi-lang ci tests (#7291)
* fix: mysql binary date type

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* test: add unit test

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* fix: typo

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* ci: add multi lang integration tests ci

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* fix: path and branch

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* ci: prevent resuse runner

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* fix: ci

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* ci: Multi-language Integration Tests trigged only when pushing to main

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

---------

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>
2025-11-25 08:26:50 +00:00
yihong
d811c4f060 fix: pre-commit all files failed (#7290)
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-11-25 07:27:46 +00:00
dennis zhuang
be3c26f2b8 fix: postgres timezone setting by default (#7289)
Signed-off-by: Dennis Zhuang <killme2008@gmail.com>
2025-11-25 03:00:43 +00:00
Ning Sun
9eb44071b1 fix: postgres show statement describe and timestamp text parsing (#7286) 2025-11-24 19:01:50 +00:00
ZonaHe
77e507cbe8 feat: update dashboard to v0.11.8 (#7281)
Co-authored-by: sunchanglong <sunchanglong@users.noreply.github.com>
2025-11-24 14:02:33 +00:00
Ruihang Xia
5bf72ab327 feat: decode_primary_key method for debugging (#7284)
* initial impl

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* third param

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: support convert Dictionary type to ConcreteDataType

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* change to list array

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* simplify file_stream::create_stream

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* simplify FileRegion

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* type alias

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix format

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove staled test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-11-24 12:41:54 +00:00
shuiyisong
9f4902b10a feat: reloadable tls client config (#7230)
* feat: add ReloadableClientTlsConfig

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* refactor: merge tls option with the reloadable

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: rename function

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: update comment

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: extract tls loader

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: minor comment update

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: add serde default to watch field

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: minor update

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* chore: add log

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

* fix: add error log

Signed-off-by: shuiyisong <xixing.sys@gmail.com>

---------

Signed-off-by: shuiyisong <xixing.sys@gmail.com>
2025-11-24 11:52:11 +00:00
Ruihang Xia
b32ca3ad86 perf: parallelize file source region (#7285)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-11-24 11:37:48 +00:00
fys
d180cc8f4b chore: add INFORMATION_SCHEMA_ALERTS_TABLE_ID const value (#7288) 2025-11-24 11:32:14 +00:00
LFC
b099abc3a3 refactor: pub HttpOutputWriter for external use (#7287)
Signed-off-by: luofucong <luofc@foxmail.com>
2025-11-24 11:29:08 +00:00
discord9
52a576cf6d feat: basic gc scheduler (#7263)
* feat: basic gc scheduler

Signed-off-by: discord9 <discord9@163.com>

* refactor: rm dup code

Signed-off-by: discord9 <discord9@163.com>

* docs: todo for cleaner code

Signed-off-by: discord9 <discord9@163.com>

* chore

Signed-off-by: discord9 <discord9@163.com>

* feat: rm retry path

Signed-off-by: discord9 <discord9@163.com>

* per review

Signed-off-by: discord9 <discord9@163.com>

* feat: skip first full listing after metasrv start

Signed-off-by: discord9 <discord9@163.com>

---------

Signed-off-by: discord9 <discord9@163.com>
2025-11-24 07:57:18 +00:00
jeremyhi
c0d0b99a32 feat: track query memory pool (#7219)
Signed-off-by: jeremyhi <fengjiachun@gmail.com>
2025-11-24 06:18:23 +00:00
shuiyisong
7d575d18ee fix: unlimit trace_id query in jaeger API (#7283)
fix: unlimit trace_id query in jaeger API

Signed-off-by: shuiyisong <xixing.sys@gmail.com>
2025-11-24 03:41:48 +00:00
LFC
ff99bce37c refactor: make json value use json type (#7248)
Signed-off-by: luofucong <luofc@foxmail.com>
2025-11-24 02:40:48 +00:00
Ning Sun
2f447e6f91 fix: postgres extended query paramater parsing and type check (#7276)
* fix: postgres extended query paramater parsing and type check

* test: update sqlness output

* feat: implement FromSqlText for pg_interval

* chore: toml format
2025-11-24 02:40:35 +00:00
fys
c9a7b1fd68 docs(config): clarify store_addrs format (#7279) 2025-11-21 22:26:52 +00:00
Ruihang Xia
8c3da5e81f feat: simplify file_stream::create_stream (#7275)
* simplify file_stream::create_stream

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* simplify FileRegion

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix merge error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* remove unused errors

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-11-21 12:17:59 +00:00
Ruihang Xia
c152a45d44 feat: support Dictionary type (#7277)
* feat: support Dictionary type

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix format

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update proto

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-11-21 11:21:32 +00:00
Ruihang Xia
c054c13e48 perf: avoid unnecessary merge sort (#7274)
* perf: avoid unnecessary merge sort

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fantastic if chain

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* more comments

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix typo

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-11-21 09:02:25 +00:00
LFC
4a7c16586b refactor: remove Vectors from RecordBatch completely (#7184)
* refactor: remove `Vector`s from `RecordBatch` completely

Signed-off-by: luofucong <luofc@foxmail.com>

* resolve PR comments

Signed-off-by: luofucong <luofc@foxmail.com>

* resolve PR comments

Signed-off-by: luofucong <luofc@foxmail.com>

---------

Signed-off-by: luofucong <luofc@foxmail.com>
2025-11-21 08:53:35 +00:00
fys
c5173fccfc chore: add default value to sparse_primary_key_encoding config item (#7273) 2025-11-21 08:22:55 +00:00
LFC
c02754b44c feat: udf json_get_object (#7241)
Signed-off-by: luofucong <luofc@foxmail.com>
2025-11-21 04:50:38 +00:00
dennis zhuang
0b4f00feef fix!: align numeric type aliases with PostgreSQL and MySQL (#7270)
* fix: align numeric type aliases with those used in PostgreSQL and MySQL

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: update create_type_alias test

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* chore: fix colon

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* fix: clone

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* fix: style

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

---------

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-11-21 04:49:17 +00:00
Ruihang Xia
c13febe35d feat: simplify merge scan stream (#7269)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-11-21 03:50:21 +00:00
Ning Sun
29d23e0ba1 fix: return sqlalchemy compatible version string in version() (#7271) 2025-11-21 03:30:11 +00:00
Ruihang Xia
25fab2ba7d feat: don't validate external table's region schema (#7268)
* feat: don't validate external table's region schema

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix format

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-11-21 03:28:14 +00:00
dennis zhuang
ec8263b464 fix: log not print (#7272)
fix: log missing

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>
2025-11-21 03:14:45 +00:00
Weny Xu
01ea7e1468 chore: add tests for election reset and region lease failure handling (#7266)
Signed-off-by: WenyXu <wenymedia@gmail.com>
2025-11-20 12:06:51 +00:00
WaterWhisperer
7f1da17150 feat: support alter database compaction options (#7251)
Signed-off-by: WaterWhisperer <waterwhisperer24@qq.com>
2025-11-20 01:39:35 +00:00
discord9
0cee4fa115 feat: gc get ref from manifest (#7260)
feat: get file ref from other manifest

Signed-off-by: discord9 <discord9@163.com>
2025-11-19 12:13:28 +00:00
discord9
e59612043d feat: gc scheduler ctx&procedure (#7252)
* feat: gc ctx&procedure

Signed-off-by: discord9 <discord9@163.com>

* fix: handle region not found case

Signed-off-by: discord9 <discord9@163.com>

* docs: more explain&todo

Signed-off-by: discord9 <discord9@163.com>

* per review

Signed-off-by: discord9 <discord9@163.com>

* chore: add time for region gc

Signed-off-by: discord9 <discord9@163.com>

* fix: explain why loader for gc region should fail

Signed-off-by: discord9 <discord9@163.com>

---------

Signed-off-by: discord9 <discord9@163.com>
2025-11-19 08:35:17 +00:00
Ruihang Xia
5d8819e7af fix: dynamic reload tracing layer loses trace id (#7257)
* not working

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* works

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

* clean up

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
2025-11-19 06:16:56 +00:00
Yingwen
8b7b5c17c7 ci: update review code owners (#7250)
* ci: update review code owners

Signed-off-by: evenyag <realevenyag@gmail.com>

* ci: at least two owners

Signed-off-by: evenyag <realevenyag@gmail.com>

* chore: Update index owners

Co-authored-by: jeremyhi <jiachun_feng@proton.me>
Signed-off-by: evenyag <realevenyag@gmail.com>

---------

Signed-off-by: evenyag <realevenyag@gmail.com>
Co-authored-by: jeremyhi <jiachun_feng@proton.me>
2025-11-18 11:50:14 +00:00
Yingwen
ee35ec0a39 feat: split batches before merge (#7225)
* feat: split batches by rule in build_flat_sources()

It checks the num_series and splits batches when the series cardinality
is low

Signed-off-by: evenyag <realevenyag@gmail.com>

* fix: panic when no num_series available

Signed-off-by: evenyag <realevenyag@gmail.com>

* fix: don't subtract file index if checking mem range

Signed-off-by: evenyag <realevenyag@gmail.com>

* chore: update comments and control flow

Signed-off-by: evenyag <realevenyag@gmail.com>

* style: fix clippy

Signed-off-by: evenyag <realevenyag@gmail.com>

---------

Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-18 08:19:39 +00:00
McKnight22
605f3270e5 feat: implement compressed CSV/JSON export functionality (#7162)
* feat: implement compressed CSV/JSON export functionality

- Add CompressedWriter for real-time compression during CSV/JSON export
- Support GZIP, BZIP2, XZ, ZSTD compression formats
- Remove LazyBufferedWriter dependency for simplified architecture
- Implement Encoder -> Compressor -> FileWriter data flow
- Add tests for compressed CSV/JSON export

Signed-off-by: McKnight22 <tao.wang.22@outlook.com>

* feat: implement compressed CSV/JSON export functionality

- refactor and extend compressed_writer tests
- add coverage for Bzip2 and Xz compression

Signed-off-by: McKnight22 <tao.wang.22@outlook.com>

* feat: implement compressed CSV/JSON export functionality

- Switch to threshold-based chunked flushing
- Avoid unnecessary writes on empty buffers
- Replace direct write_all() calls with the new helper for consistency

Signed-off-by: McKnight22 <tao.wang.22@outlook.com>

* feat: implement compressed CSV/JSON import (COPY FROM) functionality

- Add support for reading compressed CSV and JSON in COPY FROM
- Support GZIP, BZIP2, XZ, ZSTD compression formats
- Add tests for compressed CSV/JSON import

Signed-off-by: McKnight22 <tao.wang.22@outlook.com>

* feat: implement compressed CSV/JSON export/import functionality

- Fix review comments

Signed-off-by: McKnight22 <tao.wang.22@outlook.com>

* feat: implement compressed CSV/JSON export/import functionality

- Move temp_dir out of the loop

Signed-off-by: McKnight22 <tao.wang.22@outlook.com>

* feat: implement compressed CSV/JSON export/import functionality

- Fix unreasonable locking logic

Co-authored-by: jeremyhi <jiachun_feng@proton.me>
Signed-off-by: McKnight22 <tao.wang.22@outlook.com>

---------

Signed-off-by: McKnight22 <tao.wang.22@outlook.com>
Co-authored-by: jeremyhi <jiachun_feng@proton.me>
2025-11-18 02:55:58 +00:00
LFC
4e9f419de7 refactor: make show tables fast under large tables (#7231)
fix: `show tables` is too slow under large tables

Signed-off-by: luofucong <luofc@foxmail.com>
2025-11-18 02:51:59 +00:00
discord9
29bbff3c90 feat: gc worker only local regions&test (#7203)
* feat: gc worker only on local region

Signed-off-by: discord9 <discord9@163.com>

* more check

Signed-off-by: discord9 <discord9@163.com>

* chore: stuff

Signed-off-by: discord9 <discord9@163.com>

* fix: ignore async index file for now

Signed-off-by: discord9 <discord9@163.com>

* fix: file removal rate calc

Signed-off-by: discord9 <discord9@163.com>

* chore: per review

Signed-off-by: discord9 <discord9@163.com>

* chore: per review

Signed-off-by: discord9 <discord9@163.com>

* clippy

Signed-off-by: discord9 <discord9@163.com>

---------

Signed-off-by: discord9 <discord9@163.com>
2025-11-18 02:45:09 +00:00
dennis zhuang
ff2a12a49d build: update opensrv-mysql to 0.10 (#7246)
* build: update opensrv-mysql to 0.10

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* fix: format tomal

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

* fix: format tomal

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>

---------

Signed-off-by: Dennis Zhuang <killme2008@gmail.com>
2025-11-18 02:38:16 +00:00
Yingwen
77483ad7d4 fix: allow compacting L1 files under append mode (#7239)
* fix: allow compacting L1 files under append mode

Signed-off-by: evenyag <realevenyag@gmail.com>

* feat: limit the number of compaction input files

Signed-off-by: evenyag <realevenyag@gmail.com>

---------

Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-17 12:46:30 +00:00
Weny Xu
6adc348fcd feat: support parallel table operations in COPY DATABASE (#7213)
* feat: support parallel table operations in COPY DATABASE

Signed-off-by: WenyXu <wenymedia@gmail.com>

* feat(cli): add a new `parallelism` parameter to control the parallelism during export

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: add sqlness tests

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: clippy

Signed-off-by: WenyXu <wenymedia@gmail.com>

* chore: apply suggestions from CR

Signed-off-by: WenyXu <wenymedia@gmail.com>

* refactor(cli): improve parallelism configuration for data export and import

Signed-off-by: WenyXu <wenymedia@gmail.com>

---------

Signed-off-by: WenyXu <wenymedia@gmail.com>
2025-11-17 12:22:51 +00:00
Ruihang Xia
cc61af7c65 feat: dynamic enable or disable trace (#6609)
* wip

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* set `TRACE_RELOAD_HANDLE`

Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>

* wrap http api

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update dependencies

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* type alias and unwrap_or_else

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* better error handling

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* simplify

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* lazy initialize tracer

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* integration test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
Co-authored-by: Zhenchi <zhongzc_arch@outlook.com>
2025-11-17 12:16:46 +00:00
Ruihang Xia
1eb8d6b76b feat: build partition sources in parallel (#7243)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-11-17 11:44:48 +00:00
discord9
6c93c7d299 chore: bump version to beta.2 (#7238)
* chore: bump version to beta.2

Signed-off-by: discord9 <discord9@163.com>

* test: fix sqlness

Signed-off-by: discord9 <discord9@163.com>

---------

Signed-off-by: discord9 <discord9@163.com>
2025-11-17 08:57:59 +00:00
LFC
cdf9d18c36 refactor: create JsonValue for json value (#7214)
* refactor: create `JsonValue` for json value

Signed-off-by: luofucong <luofc@foxmail.com>

* resolve PR comments

Signed-off-by: luofucong <luofc@foxmail.com>

* update proto

Signed-off-by: luofucong <luofc@foxmail.com>

---------

Signed-off-by: luofucong <luofc@foxmail.com>
2025-11-17 08:21:17 +00:00
LFC
32168e8ca8 ci: dev-build with large page size (#7228)
* ci: able to build greptimedb with large page size in dev-build

Signed-off-by: luofucong <luofc@foxmail.com>

* Apply suggestions from code review

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Signed-off-by: luofucong <luofc@foxmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-11-17 02:38:16 +00:00
WaterWhisperer
de9ae6066f refactor: remove export_metrics and related configuration (#7236)
Signed-off-by: WaterWhisperer <waterwhisperer24@qq.com>
2025-11-17 02:32:22 +00:00
333 changed files with 16156 additions and 4978 deletions

22
.github/CODEOWNERS vendored
View File

@@ -5,23 +5,23 @@
* @GreptimeTeam/db-approver
## [Module] Database Engine
/src/index @zhongzc
/src/index @evenyag @discord9 @WenyXu
/src/mito2 @evenyag @v0y4g3r @waynexia
/src/query @evenyag
/src/query @evenyag @waynexia @discord9
## [Module] Distributed
/src/common/meta @MichaelScofield
/src/common/procedure @MichaelScofield
/src/meta-client @MichaelScofield
/src/meta-srv @MichaelScofield
/src/common/meta @MichaelScofield @WenyXu
/src/common/procedure @MichaelScofield @WenyXu
/src/meta-client @MichaelScofield @WenyXu
/src/meta-srv @MichaelScofield @WenyXu
## [Module] Write Ahead Log
/src/log-store @v0y4g3r
/src/store-api @v0y4g3r
/src/log-store @v0y4g3r @WenyXu
/src/store-api @v0y4g3r @evenyag
## [Module] Metrics Engine
/src/metric-engine @waynexia
/src/promql @waynexia
/src/metric-engine @waynexia @WenyXu
/src/promql @waynexia @evenyag @discord9
## [Module] Flow
/src/flow @zhongzc @waynexia
/src/flow @discord9 @waynexia

View File

@@ -32,9 +32,23 @@ inputs:
description: Image Registry
required: false
default: 'docker.io'
large-page-size:
description: Build GreptimeDB with large page size (65536).
required: false
default: 'false'
runs:
using: composite
steps:
- name: Set extra build environment variables
shell: bash
run: |
if [[ '${{ inputs.large-page-size }}' == 'true' ]]; then
echo 'EXTRA_BUILD_ENVS="JEMALLOC_SYS_WITH_LG_PAGE=16"' >> $GITHUB_ENV
else
echo 'EXTRA_BUILD_ENVS=' >> $GITHUB_ENV
fi
- name: Build greptime binary
shell: bash
if: ${{ inputs.build-android-artifacts == 'false' }}
@@ -45,7 +59,8 @@ runs:
FEATURES=${{ inputs.features }} \
BASE_IMAGE=${{ inputs.base-image }} \
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }}
IMAGE_REGISTRY=${{ inputs.image-registry }} \
EXTRA_BUILD_ENVS=$EXTRA_BUILD_ENVS
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts

View File

@@ -27,6 +27,10 @@ inputs:
description: Working directory to build the artifacts
required: false
default: .
large-page-size:
description: Build GreptimeDB with large page size (65536).
required: false
default: 'false'
runs:
using: composite
steps:
@@ -59,6 +63,7 @@ runs:
working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
large-page-size: ${{ inputs.large-page-size }}
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
shell: bash
@@ -77,6 +82,7 @@ runs:
working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
large-page-size: ${{ inputs.large-page-size }}
- name: Build greptime on android base image
uses: ./.github/actions/build-greptime-binary
@@ -89,3 +95,4 @@ runs:
build-android-artifacts: true
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
large-page-size: ${{ inputs.large-page-size }}

View File

@@ -4,10 +4,11 @@ name: GreptimeDB Development Build
on:
workflow_dispatch: # Allows you to run this workflow manually.
inputs:
repository:
description: The public repository to build
large-page-size:
description: Build GreptimeDB with large page size (65536).
type: boolean
required: false
default: GreptimeTeam/greptimedb
default: false
commit: # Note: We only pull the source code and use the current workflow to build the artifacts.
description: The commit to build
required: true
@@ -181,6 +182,7 @@ jobs:
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
large-page-size: ${{ inputs.large-page-size }}
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
@@ -214,6 +216,7 @@ jobs:
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
large-page-size: ${{ inputs.large-page-size }}
release-images-to-dockerhub:
name: Build and push images to DockerHub

57
.github/workflows/multi-lang-tests.yml vendored Normal file
View File

@@ -0,0 +1,57 @@
name: Multi-language Integration Tests
on:
push:
branches:
- main
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build-greptimedb:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Build GreptimeDB binary
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- uses: Swatinem/rust-cache@v2
with:
shared-key: "multi-lang-build"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin --force
- name: Build greptime binary
shell: bash
run: cargo gc -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
- name: Pack greptime binary
shell: bash
run: |
mkdir bin && \
mv ./target/debug/greptime bin
- name: Print greptime binary info
run: ls -lh bin
- name: Upload greptime binary
uses: actions/upload-artifact@v4
with:
name: greptime-bin
path: bin/
retention-days: 1
run-multi-lang-tests:
name: Run Multi-language SDK Tests
needs: build-greptimedb
uses: ./.github/workflows/run-multi-lang-tests.yml
with:
artifact-name: greptime-bin

View File

@@ -174,6 +174,18 @@ jobs:
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
run-multi-lang-tests:
name: Run Multi-language SDK Tests
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
]
uses: ./.github/workflows/run-multi-lang-tests.yml
with:
artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
artifact-is-tarball: true
release-images-to-dockerhub:
name: Build and push images to DockerHub
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
@@ -301,7 +313,8 @@ jobs:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team
needs: [
release-images-to-dockerhub
release-images-to-dockerhub,
run-multi-lang-tests,
]
runs-on: ubuntu-latest
permissions:
@@ -319,17 +332,17 @@ jobs:
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }}
- name: Notify nightly build successful result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }}
with:
payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
- name: Notify nightly build failed result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' || needs.run-multi-lang-tests.result == 'failure' }}
with:
payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}

View File

@@ -215,6 +215,18 @@ jobs:
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
run-multi-lang-tests:
name: Run Multi-language SDK Tests
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
]
uses: ./.github/workflows/run-multi-lang-tests.yml
with:
artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
artifact-is-tarball: true
build-macos-artifacts:
name: Build macOS artifacts
strategy:
@@ -303,6 +315,7 @@ jobs:
allocate-runners,
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
run-multi-lang-tests,
]
runs-on: ubuntu-latest
outputs:
@@ -381,6 +394,7 @@ jobs:
build-macos-artifacts,
build-windows-artifacts,
release-images-to-dockerhub,
run-multi-lang-tests,
]
runs-on: ubuntu-latest
steps:

View File

@@ -0,0 +1,194 @@
# Reusable workflow for running multi-language SDK tests against GreptimeDB
# Used by: multi-lang-tests.yml, release.yml, nightly-build.yml
# Supports both direct binary artifacts and tarball artifacts
name: Run Multi-language SDK Tests
on:
workflow_call:
inputs:
artifact-name:
required: true
type: string
description: 'Name of the artifact containing greptime binary'
http-port:
required: false
type: string
default: '4000'
description: 'HTTP server port'
mysql-port:
required: false
type: string
default: '4002'
description: 'MySQL server port'
postgres-port:
required: false
type: string
default: '4003'
description: 'PostgreSQL server port'
db-name:
required: false
type: string
default: 'test_db'
description: 'Test database name'
username:
required: false
type: string
default: 'greptime_user'
description: 'Authentication username'
password:
required: false
type: string
default: 'greptime_pwd'
description: 'Authentication password'
timeout-minutes:
required: false
type: number
default: 30
description: 'Job timeout in minutes'
artifact-is-tarball:
required: false
type: boolean
default: false
description: 'Whether the artifact is a tarball (tar.gz) that needs to be extracted'
jobs:
run-tests:
name: Run Multi-language SDK Tests
runs-on: ubuntu-latest
timeout-minutes: ${{ inputs.timeout-minutes }}
steps:
- name: Checkout greptimedb-tests repository
uses: actions/checkout@v4
with:
repository: GreptimeTeam/greptimedb-tests
persist-credentials: false
- name: Download pre-built greptime binary
uses: actions/download-artifact@v4
with:
name: ${{ inputs.artifact-name }}
path: artifact
- name: Setup greptime binary
run: |
mkdir -p bin
if [ "${{ inputs.artifact-is-tarball }}" = "true" ]; then
# Extract tarball and find greptime binary
tar -xzf artifact/*.tar.gz -C artifact
find artifact -name "greptime" -type f -exec cp {} bin/greptime \;
else
# Direct binary format
if [ -f artifact/greptime ]; then
cp artifact/greptime bin/greptime
else
cp artifact/* bin/greptime
fi
fi
chmod +x ./bin/greptime
ls -lh ./bin/greptime
./bin/greptime --version
- name: Setup Java 17
uses: actions/setup-java@v4
with:
distribution: 'temurin'
java-version: '17'
cache: 'maven'
- name: Setup Python 3.8
uses: actions/setup-python@v5
with:
python-version: '3.8'
- name: Setup Go 1.24
uses: actions/setup-go@v5
with:
go-version: '1.24'
cache: true
cache-dependency-path: go-tests/go.sum
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '18'
- name: Install Python dependencies
run: |
pip install mysql-connector-python psycopg2-binary
python3 -c "import mysql.connector; print(f'mysql-connector-python {mysql.connector.__version__}')"
python3 -c "import psycopg2; print(f'psycopg2 {psycopg2.__version__}')"
- name: Install Go dependencies
working-directory: go-tests
run: |
go mod download
go mod verify
go version
- name: Kill existing GreptimeDB processes
run: |
pkill -f greptime || true
sleep 2
- name: Start GreptimeDB standalone
run: |
./bin/greptime standalone start \
--http-addr 0.0.0.0:${{ inputs.http-port }} \
--rpc-addr 0.0.0.0:4001 \
--mysql-addr 0.0.0.0:${{ inputs.mysql-port }} \
--postgres-addr 0.0.0.0:${{ inputs.postgres-port }} \
--user-provider=static_user_provider:cmd:${{ inputs.username }}=${{ inputs.password }} > /tmp/greptimedb.log 2>&1 &
- name: Wait for GreptimeDB to be ready
run: |
echo "Waiting for GreptimeDB..."
for i in {1..60}; do
if curl -sf http://localhost:${{ inputs.http-port }}/health > /dev/null; then
echo "✅ GreptimeDB is ready"
exit 0
fi
sleep 2
done
echo "❌ GreptimeDB failed to start"
cat /tmp/greptimedb.log
exit 1
- name: Run multi-language tests
env:
DB_NAME: ${{ inputs.db-name }}
MYSQL_HOST: 127.0.0.1
MYSQL_PORT: ${{ inputs.mysql-port }}
POSTGRES_HOST: 127.0.0.1
POSTGRES_PORT: ${{ inputs.postgres-port }}
HTTP_HOST: 127.0.0.1
HTTP_PORT: ${{ inputs.http-port }}
GREPTIME_USERNAME: ${{ inputs.username }}
GREPTIME_PASSWORD: ${{ inputs.password }}
run: |
chmod +x ./run_tests.sh
./run_tests.sh
- name: Collect logs on failure
if: failure()
run: |
echo "=== GreptimeDB Logs ==="
cat /tmp/greptimedb.log || true
- name: Upload test logs on failure
if: failure()
uses: actions/upload-artifact@v4
with:
name: test-logs
path: |
/tmp/greptimedb.log
java-tests/target/surefire-reports/
python-tests/.pytest_cache/
go-tests/*.log
**/test-output/
retention-days: 7
- name: Cleanup
if: always()
run: |
pkill -f greptime || true

182
Cargo.lock generated
View File

@@ -212,7 +212,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
[[package]]
name = "api"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"arrow-schema",
"common-base",
@@ -733,7 +733,7 @@ dependencies = [
[[package]]
name = "auth"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"async-trait",
@@ -1383,7 +1383,7 @@ dependencies = [
[[package]]
name = "cache"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"catalog",
"common-error",
@@ -1418,7 +1418,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "catalog"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"arrow",
@@ -1763,7 +1763,7 @@ checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675"
[[package]]
name = "cli"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"async-stream",
"async-trait",
@@ -1816,7 +1816,7 @@ dependencies = [
[[package]]
name = "client"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"arc-swap",
@@ -1849,7 +1849,7 @@ dependencies = [
"snafu 0.8.6",
"store-api",
"substrait 0.37.3",
"substrait 1.0.0-beta.1",
"substrait 1.0.0-beta.2",
"tokio",
"tokio-stream",
"tonic 0.13.1",
@@ -1889,7 +1889,7 @@ dependencies = [
[[package]]
name = "cmd"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"async-trait",
"auth",
@@ -2012,7 +2012,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]]
name = "common-base"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"anymap2",
"async-trait",
@@ -2036,14 +2036,14 @@ dependencies = [
[[package]]
name = "common-catalog"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"const_format",
]
[[package]]
name = "common-config"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"common-base",
"common-error",
@@ -2067,7 +2067,7 @@ dependencies = [
[[package]]
name = "common-datasource"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"arrow",
"arrow-schema",
@@ -2102,7 +2102,7 @@ dependencies = [
[[package]]
name = "common-decimal"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"bigdecimal 0.4.8",
"common-error",
@@ -2115,7 +2115,7 @@ dependencies = [
[[package]]
name = "common-error"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"common-macro",
"http 1.3.1",
@@ -2126,7 +2126,7 @@ dependencies = [
[[package]]
name = "common-event-recorder"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"async-trait",
@@ -2148,7 +2148,7 @@ dependencies = [
[[package]]
name = "common-frontend"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"async-trait",
@@ -2170,7 +2170,7 @@ dependencies = [
[[package]]
name = "common-function"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"ahash 0.8.12",
"api",
@@ -2208,6 +2208,7 @@ dependencies = [
"hyperloglogplus",
"jsonb",
"memchr",
"mito-codec",
"nalgebra",
"num",
"num-traits",
@@ -2229,7 +2230,7 @@ dependencies = [
[[package]]
name = "common-greptimedb-telemetry"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"async-trait",
"common-runtime",
@@ -2246,7 +2247,7 @@ dependencies = [
[[package]]
name = "common-grpc"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"arrow-flight",
@@ -2265,11 +2266,13 @@ dependencies = [
"hyper 1.6.0",
"hyper-util",
"lazy_static",
"notify",
"prost 0.13.5",
"rand 0.9.1",
"serde",
"serde_json",
"snafu 0.8.6",
"tempfile",
"tokio",
"tokio-util",
"tonic 0.13.1",
@@ -2279,7 +2282,7 @@ dependencies = [
[[package]]
name = "common-grpc-expr"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"common-base",
@@ -2299,7 +2302,7 @@ dependencies = [
[[package]]
name = "common-macro"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"greptime-proto",
"once_cell",
@@ -2310,7 +2313,7 @@ dependencies = [
[[package]]
name = "common-mem-prof"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"anyhow",
"common-error",
@@ -2326,7 +2329,7 @@ dependencies = [
[[package]]
name = "common-meta"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"anymap2",
"api",
@@ -2398,7 +2401,7 @@ dependencies = [
[[package]]
name = "common-options"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"common-grpc",
"humantime-serde",
@@ -2407,11 +2410,11 @@ dependencies = [
[[package]]
name = "common-plugins"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
[[package]]
name = "common-pprof"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"common-error",
"common-macro",
@@ -2423,7 +2426,7 @@ dependencies = [
[[package]]
name = "common-procedure"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"async-stream",
@@ -2452,7 +2455,7 @@ dependencies = [
[[package]]
name = "common-procedure-test"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"async-trait",
"common-procedure",
@@ -2462,7 +2465,7 @@ dependencies = [
[[package]]
name = "common-query"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"async-trait",
@@ -2488,7 +2491,7 @@ dependencies = [
[[package]]
name = "common-recordbatch"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"arc-swap",
"common-base",
@@ -2512,7 +2515,7 @@ dependencies = [
[[package]]
name = "common-runtime"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"async-trait",
"clap 4.5.40",
@@ -2541,7 +2544,7 @@ dependencies = [
[[package]]
name = "common-session"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"serde",
"strum 0.27.1",
@@ -2549,7 +2552,7 @@ dependencies = [
[[package]]
name = "common-sql"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"common-base",
"common-decimal",
@@ -2567,7 +2570,7 @@ dependencies = [
[[package]]
name = "common-stat"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"common-base",
"common-runtime",
@@ -2582,7 +2585,7 @@ dependencies = [
[[package]]
name = "common-telemetry"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"backtrace",
"common-base",
@@ -2611,7 +2614,7 @@ dependencies = [
[[package]]
name = "common-test-util"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"client",
"common-grpc",
@@ -2624,7 +2627,7 @@ dependencies = [
[[package]]
name = "common-time"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"arrow",
"chrono",
@@ -2642,7 +2645,7 @@ dependencies = [
[[package]]
name = "common-version"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"build-data",
"cargo-manifest",
@@ -2653,7 +2656,7 @@ dependencies = [
[[package]]
name = "common-wal"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"common-base",
"common-error",
@@ -2676,7 +2679,7 @@ dependencies = [
[[package]]
name = "common-workload"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"common-telemetry",
"serde",
@@ -3913,7 +3916,7 @@ dependencies = [
[[package]]
name = "datanode"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"arrow-flight",
@@ -3977,7 +3980,7 @@ dependencies = [
[[package]]
name = "datatypes"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"arrow",
"arrow-array",
@@ -4649,7 +4652,7 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
name = "file-engine"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"async-trait",
@@ -4781,7 +4784,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
[[package]]
name = "flow"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"arrow",
@@ -4850,7 +4853,7 @@ dependencies = [
"sql",
"store-api",
"strum 0.27.1",
"substrait 1.0.0-beta.1",
"substrait 1.0.0-beta.2",
"table",
"tokio",
"tonic 0.13.1",
@@ -4905,7 +4908,7 @@ checksum = "28dd6caf6059519a65843af8fe2a3ae298b14b80179855aeb4adc2c1934ee619"
[[package]]
name = "frontend"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"arc-swap",
@@ -5348,7 +5351,7 @@ dependencies = [
[[package]]
name = "greptime-proto"
version = "0.1.0"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=14b9dc40bdc8288742b0cefc7bb024303b7429ef#14b9dc40bdc8288742b0cefc7bb024303b7429ef"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=0df99f09f1d6785055b2d9da96fc4ecc2bdf6803#0df99f09f1d6785055b2d9da96fc4ecc2bdf6803"
dependencies = [
"prost 0.13.5",
"prost-types 0.13.5",
@@ -6116,7 +6119,7 @@ dependencies = [
[[package]]
name = "index"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"async-trait",
"asynchronous-codec",
@@ -7045,7 +7048,7 @@ checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
[[package]]
name = "log-query"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"chrono",
"common-error",
@@ -7057,7 +7060,7 @@ dependencies = [
[[package]]
name = "log-store"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"async-stream",
"async-trait",
@@ -7364,7 +7367,7 @@ dependencies = [
[[package]]
name = "meta-client"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"async-trait",
@@ -7392,7 +7395,7 @@ dependencies = [
[[package]]
name = "meta-srv"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"async-trait",
@@ -7440,6 +7443,7 @@ dependencies = [
"lazy_static",
"local-ip-address",
"once_cell",
"ordered-float 4.6.0",
"parking_lot 0.12.4",
"prometheus",
"prost 0.13.5",
@@ -7490,7 +7494,7 @@ dependencies = [
[[package]]
name = "metric-engine"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"aquamarine",
@@ -7585,7 +7589,7 @@ dependencies = [
[[package]]
name = "mito-codec"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"bytes",
@@ -7610,7 +7614,7 @@ dependencies = [
[[package]]
name = "mito2"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"aquamarine",
@@ -8348,7 +8352,7 @@ dependencies = [
[[package]]
name = "object-store"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"anyhow",
"bytes",
@@ -8527,7 +8531,7 @@ dependencies = [
[[package]]
name = "opensrv-mysql"
version = "0.8.0"
source = "git+https://github.com/datafuselabs/opensrv?rev=a1fb4da215c8693c7e4f62be249a01b7fec52997#a1fb4da215c8693c7e4f62be249a01b7fec52997"
source = "git+https://github.com/datafuselabs/opensrv?tag=v0.10.0#074bd8fb81da3c9e6d6a098a482f3380478b9c0b"
dependencies = [
"async-trait",
"byteorder",
@@ -8633,7 +8637,7 @@ dependencies = [
[[package]]
name = "operator"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"ahash 0.8.12",
"api",
@@ -8659,6 +8663,7 @@ dependencies = [
"common-recordbatch",
"common-runtime",
"common-sql",
"common-stat",
"common-telemetry",
"common-test-util",
"common-time",
@@ -8670,6 +8675,7 @@ dependencies = [
"futures",
"futures-util",
"humantime",
"itertools 0.14.0",
"jsonb",
"lazy_static",
"meta-client",
@@ -8691,7 +8697,7 @@ dependencies = [
"sql",
"sqlparser",
"store-api",
"substrait 1.0.0-beta.1",
"substrait 1.0.0-beta.2",
"table",
"tokio",
"tokio-util",
@@ -8977,7 +8983,7 @@ dependencies = [
[[package]]
name = "partition"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"async-trait",
@@ -9181,10 +9187,21 @@ dependencies = [
]
[[package]]
name = "pgwire"
version = "0.34.2"
name = "pg_interval"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4f56a81b4fcc69016028f657a68f9b8e8a2a4b7d07684ca3298f2d3e7ff199ce"
checksum = "fe46640b465e284b048ef065cbed8ef17a622878d310c724578396b4cfd00df2"
dependencies = [
"bytes",
"chrono",
"postgres-types",
]
[[package]]
name = "pgwire"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d331bb0eef5bc83a221c0a85b1f205bccf094d4f72a26ae1d68a1b1c535123b7"
dependencies = [
"async-trait",
"base64 0.22.1",
@@ -9200,6 +9217,7 @@ dependencies = [
"ring",
"rust_decimal",
"rustls-pki-types",
"ryu",
"serde",
"serde_json",
"stringprep",
@@ -9322,7 +9340,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "pipeline"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"ahash 0.8.12",
"api",
@@ -9478,7 +9496,7 @@ dependencies = [
[[package]]
name = "plugins"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"auth",
"clap 4.5.40",
@@ -9778,7 +9796,7 @@ dependencies = [
[[package]]
name = "promql"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"ahash 0.8.12",
"async-trait",
@@ -10061,7 +10079,7 @@ dependencies = [
[[package]]
name = "puffin"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"async-compression 0.4.19",
"async-trait",
@@ -10103,7 +10121,7 @@ dependencies = [
[[package]]
name = "query"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"ahash 0.8.12",
"api",
@@ -10170,7 +10188,7 @@ dependencies = [
"sql",
"sqlparser",
"store-api",
"substrait 1.0.0-beta.1",
"substrait 1.0.0-beta.2",
"table",
"tokio",
"tokio-stream",
@@ -11506,7 +11524,7 @@ dependencies = [
[[package]]
name = "servers"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"ahash 0.8.12",
"api",
@@ -11583,6 +11601,7 @@ dependencies = [
"otel-arrow-rust",
"parking_lot 0.12.4",
"permutation",
"pg_interval",
"pgwire",
"pin-project",
"pipeline",
@@ -11624,6 +11643,7 @@ dependencies = [
"tower 0.5.2",
"tower-http 0.6.6",
"tracing",
"tracing-opentelemetry",
"urlencoding",
"uuid",
"vrl",
@@ -11632,7 +11652,7 @@ dependencies = [
[[package]]
name = "session"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"ahash 0.8.12",
"api",
@@ -11966,7 +11986,7 @@ dependencies = [
[[package]]
name = "sql"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"arrow-buffer",
@@ -12026,7 +12046,7 @@ dependencies = [
[[package]]
name = "sqlness-runner"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"async-trait",
"clap 4.5.40",
@@ -12303,7 +12323,7 @@ dependencies = [
[[package]]
name = "standalone"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"async-trait",
"catalog",
@@ -12344,7 +12364,7 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "store-api"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"aquamarine",
@@ -12557,7 +12577,7 @@ dependencies = [
[[package]]
name = "substrait"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"async-trait",
"bytes",
@@ -12680,7 +12700,7 @@ dependencies = [
[[package]]
name = "table"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"async-trait",
@@ -12919,7 +12939,7 @@ dependencies = [
"getrandom 0.3.3",
"once_cell",
"rustix 1.0.7",
"windows-sys 0.59.0",
"windows-sys 0.61.2",
]
[[package]]
@@ -12949,7 +12969,7 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683"
[[package]]
name = "tests-fuzz"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"arbitrary",
"async-trait",
@@ -12993,7 +13013,7 @@ dependencies = [
[[package]]
name = "tests-integration"
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
dependencies = [
"api",
"arrow-flight",
@@ -13067,7 +13087,7 @@ dependencies = [
"sqlx",
"standalone",
"store-api",
"substrait 1.0.0-beta.1",
"substrait 1.0.0-beta.2",
"table",
"tempfile",
"time",

View File

@@ -74,7 +74,7 @@ members = [
resolver = "2"
[workspace.package]
version = "1.0.0-beta.1"
version = "1.0.0-beta.2"
edition = "2024"
license = "Apache-2.0"
@@ -148,7 +148,7 @@ etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62d
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "14b9dc40bdc8288742b0cefc7bb024303b7429ef" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0df99f09f1d6785055b2d9da96fc4ecc2bdf6803" }
hex = "0.4"
http = "1"
humantime = "2.1"
@@ -234,6 +234,7 @@ tower = "0.5"
tower-http = "0.6"
tracing = "0.1"
tracing-appender = "0.2"
tracing-opentelemetry = "0.31.0"
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
typetag = "0.2"
uuid = { version = "1.17", features = ["serde", "v4", "fast-rng"] }

View File

@@ -17,6 +17,8 @@ CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
SQLNESS_OPTS ?=
EXTRA_BUILD_ENVS ?=
ASSEMBLED_EXTRA_BUILD_ENV := $(foreach var,$(EXTRA_BUILD_ENVS),-e $(var))
# The arguments for running integration tests.
ETCD_VERSION ?= v3.5.9
@@ -83,6 +85,7 @@ build: ## Build debug version greptime.
.PHONY: build-by-dev-builder
build-by-dev-builder: ## Build greptime by dev-builder.
docker run --network=host \
${ASSEMBLED_EXTRA_BUILD_ENV} \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
make build \

View File

@@ -210,14 +210,6 @@
| `slow_query.record_type` | String | Unset | The record type of slow queries. It can be `system_table` or `log`. |
| `slow_query.threshold` | String | Unset | The threshold of slow query. |
| `slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommended to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | Unset | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
@@ -335,12 +327,6 @@
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. |
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. |
| `slow_query.ttl` | String | `90d` | The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`. |
| `export_metrics` | -- | -- | The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
@@ -354,7 +340,7 @@
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `data_home` | String | `./greptimedb_data` | The working home directory. |
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
| `store_addrs` | Array | -- | Store server address(es). The format depends on the selected backend.<br/><br/>For etcd: a list of "host:port" endpoints.<br/>e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]<br/><br/>For PostgreSQL: a connection string in libpq format or URI.<br/>e.g.<br/>- "host=localhost port=5432 user=postgres password=<PASSWORD> dbname=postgres"<br/>- "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"<br/>The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html<br/><br/>For mysql store, the format is a MySQL connection URL.<br/>e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem" |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store`<br/>- `mysql_store` |
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
@@ -370,12 +356,11 @@
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `backend_tls` | -- | -- | TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)<br/>When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here |
| `backend_tls` | -- | -- | TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)<br/>When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here<br/><br/>Note: if TLS is configured in both this section and the `store_addrs` connection string, the<br/>settings here will override the TLS settings in `store_addrs`. |
| `backend_tls.mode` | String | `prefer` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- "disable" - No TLS<br/>- "prefer" (default) - Try TLS, fallback to plain<br/>- "require" - Require TLS<br/>- "verify_ca" - Require TLS and verify CA<br/>- "verify_full" - Require TLS and verify hostname |
| `backend_tls.cert_path` | String | `""` | Path to client certificate file (for client authentication)<br/>Like "/path/to/client.crt" |
| `backend_tls.key_path` | String | `""` | Path to client private key file (for client authentication)<br/>Like "/path/to/client.key" |
| `backend_tls.ca_cert_path` | String | `""` | Path to CA certificate file (for server certificate verification)<br/>Required when using custom CAs or self-signed certificates<br/>Leave empty to use system root certificates only<br/>Like "/path/to/ca.crt" |
| `backend_tls.watch` | Bool | `false` | Watch for certificate file changes and auto reload |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:3002` | The address to bind the gRPC server. |
| `grpc.server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
@@ -430,12 +415,6 @@
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
@@ -608,12 +587,6 @@
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |

View File

@@ -712,21 +712,6 @@ otlp_export_protocol = "http"
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
[export_metrics.remote_write]
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.

View File

@@ -329,21 +329,6 @@ sample_ratio = 1.0
## The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`.
ttl = "90d"
## The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
[export_metrics.remote_write]
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.

View File

@@ -1,11 +1,19 @@
## The working home directory.
data_home = "./greptimedb_data"
## Store server address default to etcd store.
## For postgres store, the format is:
## "password=password dbname=postgres user=postgres host=localhost port=5432"
## For etcd store, the format is:
## "127.0.0.1:2379"
## Store server address(es). The format depends on the selected backend.
##
## For etcd: a list of "host:port" endpoints.
## e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]
##
## For PostgreSQL: a connection string in libpq format or URI.
## e.g.
## - "host=localhost port=5432 user=postgres password=<PASSWORD> dbname=postgres"
## - "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"
## The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html
##
## For mysql store, the format is a MySQL connection URL.
## e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem"
store_addrs = ["127.0.0.1:2379"]
## If it's not empty, the metasrv will store all data with this key prefix.
@@ -75,6 +83,9 @@ node_max_idle_time = "24hours"
## TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)
## When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here
##
## Note: if TLS is configured in both this section and the `store_addrs` connection string, the
## settings here will override the TLS settings in `store_addrs`.
[backend_tls]
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
## - "disable" - No TLS
@@ -98,9 +109,6 @@ key_path = ""
## Like "/path/to/ca.crt"
ca_cert_path = ""
## Watch for certificate file changes and auto reload
watch = false
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
@@ -323,21 +331,6 @@ otlp_export_protocol = "http"
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
[export_metrics.remote_write]
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.

View File

@@ -820,27 +820,6 @@ default_ratio = 1.0
## @toml2docs:none-default
#+ sample_ratio = 1.0
## The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommended to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## @toml2docs:none-default
db = "greptime_metrics"
[export_metrics.remote_write]
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.

View File

@@ -1,10 +1,10 @@
FROM centos:7 as builder
FROM centos:7 AS builder
ARG CARGO_PROFILE
ARG FEATURES
ARG OUTPUT_DIR
ENV LANG en_US.utf8
ENV LANG=en_US.utf8
WORKDIR /greptimedb
# Install dependencies
@@ -22,7 +22,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
# Install Rust
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
ENV PATH=/usr/local/bin:/root/.cargo/bin/:$PATH
# Build the project in release mode.
RUN --mount=target=.,rw \
@@ -33,7 +33,7 @@ RUN --mount=target=.,rw \
TARGET_DIR=/out/target
# Export the binary to the clean image.
FROM centos:7 as base
FROM centos:7 AS base
ARG OUTPUT_DIR
@@ -45,7 +45,7 @@ RUN yum install -y epel-release \
WORKDIR /greptime
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENV PATH=/greptime/bin/:$PATH
ENV MALLOC_CONF="prof:true,prof_active:false"

View File

@@ -0,0 +1,65 @@
FROM ubuntu:22.04 AS builder
ARG CARGO_PROFILE
ARG FEATURES
ARG OUTPUT_DIR
ENV LANG=en_US.utf8
WORKDIR /greptimedb
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
# Install dependencies.
RUN --mount=type=cache,target=/var/cache/apt \
apt-get update && apt-get install -y \
libssl-dev \
protobuf-compiler \
curl \
git \
build-essential \
pkg-config
# Install Rust.
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH=/root/.cargo/bin/:$PATH
# Build the project in release mode.
RUN --mount=target=. \
--mount=type=cache,target=/root/.cargo/registry \
make build \
CARGO_PROFILE=${CARGO_PROFILE} \
FEATURES=${FEATURES} \
TARGET_DIR=/out/target
FROM ubuntu:22.04 AS libs
ARG TARGETARCH
# Copy required library dependencies based on architecture
RUN if [ "$TARGETARCH" = "amd64" ]; then \
cp /lib/x86_64-linux-gnu/libz.so.1.2.11 /lib/x86_64-linux-gnu/libz.so.1; \
elif [ "$TARGETARCH" = "arm64" ]; then \
cp /lib/aarch64-linux-gnu/libz.so.1.2.11 /lib/aarch64-linux-gnu/libz.so.1; \
else \
echo "Unsupported architecture: $TARGETARCH" && exit 1; \
fi
# Export the binary to the clean distroless image.
FROM gcr.io/distroless/cc-debian12:latest AS base
ARG OUTPUT_DIR
ARG TARGETARCH
# Copy required library dependencies
COPY --from=libs /lib /lib
COPY --from=busybox:stable /bin/busybox /bin/busybox
WORKDIR /greptime
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/greptime
ENV PATH=/greptime/bin/:$PATH
ENV MALLOC_CONF="prof:true,prof_active:false"
ENTRYPOINT ["greptime"]

View File

@@ -1,10 +1,10 @@
FROM ubuntu:22.04 as builder
FROM ubuntu:22.04 AS builder
ARG CARGO_PROFILE
ARG FEATURES
ARG OUTPUT_DIR
ENV LANG en_US.utf8
ENV LANG=en_US.utf8
WORKDIR /greptimedb
RUN apt-get update && \
@@ -23,7 +23,7 @@ RUN --mount=type=cache,target=/var/cache/apt \
# Install Rust.
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /root/.cargo/bin/:$PATH
ENV PATH=/root/.cargo/bin/:$PATH
# Build the project in release mode.
RUN --mount=target=. \
@@ -35,7 +35,7 @@ RUN --mount=target=. \
# Export the binary to the clean image.
# TODO(zyy17): Maybe should use the more secure container image.
FROM ubuntu:22.04 as base
FROM ubuntu:22.04 AS base
ARG OUTPUT_DIR
@@ -45,7 +45,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
WORKDIR /greptime
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENV PATH=/greptime/bin/:$PATH
ENV MALLOC_CONF="prof:true,prof_active:false"

View File

@@ -13,7 +13,7 @@ ARG TARGETARCH
ADD $TARGETARCH/greptime /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENV PATH=/greptime/bin/:$PATH
ENV MALLOC_CONF="prof:true,prof_active:false"

View File

@@ -0,0 +1,40 @@
FROM ubuntu:22.04 AS libs
ARG TARGETARCH
# Copy required library dependencies based on architecture
# TARGETARCH values: amd64, arm64
# Ubuntu library paths: x86_64-linux-gnu, aarch64-linux-gnu
RUN if [ "$TARGETARCH" = "amd64" ]; then \
mkdir -p /output/x86_64-linux-gnu && \
cp /lib/x86_64-linux-gnu/libz.so.1.2.11 /output/x86_64-linux-gnu/libz.so.1; \
elif [ "$TARGETARCH" = "arm64" ]; then \
mkdir -p /output/aarch64-linux-gnu && \
cp /lib/aarch64-linux-gnu/libz.so.1.2.11 /output/aarch64-linux-gnu/libz.so.1; \
else \
echo "Unsupported architecture: $TARGETARCH" && exit 1; \
fi
FROM gcr.io/distroless/cc-debian12:latest
# The root path under which contains all the dependencies to build this Dockerfile.
ARG DOCKER_BUILD_ROOT=.
# The binary name of GreptimeDB executable.
# Defaults to "greptime", but sometimes in other projects it might be different.
ARG TARGET_BIN=greptime
ARG TARGETARCH
# Copy required library dependencies
COPY --from=libs /output /lib
COPY --from=busybox:stable /bin/busybox /bin/busybox
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
ENV PATH=/greptime/bin/:$PATH
ENV TARGET_BIN=$TARGET_BIN
ENV MALLOC_CONF="prof:true,prof_active:false"
ENTRYPOINT ["greptime"]

View File

@@ -14,7 +14,7 @@ ARG TARGETARCH
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENV PATH=/greptime/bin/:$PATH
ENV TARGET_BIN=$TARGET_BIN

View File

@@ -13,4 +13,19 @@ Log Level changed from Some("info") to "trace,flow=debug"%
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follows the same rule of `RUST_LOG`.
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).
# Enable/Disable Trace on the Fly
## HTTP API
example:
```bash
curl --data "true" 127.0.0.1:4000/debug/enable_trace
```
And database will reply with something like:
```
trace enabled%
```
Possible values are "true" or "false".

View File

@@ -106,6 +106,37 @@ This mechanism may be too complex to implement at once. We can consider a two-ph
Also the read replica shouldn't be later in manifest version for more than the lingering time of obsolete files, otherwise it might ref to files that are already deleted by the GC worker.
- need to upload tmp manifest to object storage, which may introduce additional complexity and potential performance overhead. But since long-running queries are typically not frequent, the performance impact is expected to be minimal.
one potential race condition with region-migration is illustrated below:
```mermaid
sequenceDiagram
participant gc_worker as GC Worker(same dn as region 1)
participant region1 as Region 1 (Leader → Follower)
participant region2 as Region 2 (Follower → Leader)
participant region_dir as Region Directory
gc_worker->>region1: Start GC, get region manifest
activate region1
region1-->>gc_worker: Region 1 manifest
deactivate region1
gc_worker->>region_dir: Scan region directory
Note over region1,region2: Region Migration Occurs
region1-->>region2: Downgrade to Follower
region2-->>region1: Becomes Leader
region2->>region_dir: Add new file
gc_worker->>region_dir: Continue scanning
gc_worker-->>region_dir: Discovers new file
Note over gc_worker: New file not in Region 1's manifest
gc_worker->>gc_worker: Mark file as orphan(incorrectly)
```
which could cause gc worker to incorrectly mark the new file as orphan and delete it, if config the lingering time for orphan files(files not mentioned anywhere(in used or unused)) is not long enough.
A good enough solution could be to use lock to prevent gc worker to happen on the region if region migration is happening on the region, and vise versa.
The race condition between gc worker and repartition also needs to be considered carefully. For now, acquiring lock for both region-migration and repartition during gc worker process could be a simple solution.
## Conclusion and Rationale

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashSet;
use std::collections::{BTreeMap, HashSet};
use std::sync::Arc;
use common_decimal::Decimal128;
@@ -20,6 +20,7 @@ use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECIS
use common_time::time::Time;
use common_time::timestamp::TimeUnit;
use common_time::{Date, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp};
use datatypes::json::value::{JsonNumber, JsonValue, JsonValueRef, JsonVariant};
use datatypes::prelude::{ConcreteDataType, ValueRef};
use datatypes::types::{
IntervalType, JsonFormat, StructField, StructType, TimeType, TimestampType,
@@ -34,9 +35,9 @@ use greptime_proto::v1::greptime_request::Request;
use greptime_proto::v1::query_request::Query;
use greptime_proto::v1::value::ValueData;
use greptime_proto::v1::{
self, ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, JsonNativeTypeExtension,
JsonTypeExtension, ListTypeExtension, QueryRequest, Row, SemanticType, StructTypeExtension,
VectorTypeExtension,
self, ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, DictionaryTypeExtension,
JsonList, JsonNativeTypeExtension, JsonObject, JsonTypeExtension, ListTypeExtension,
QueryRequest, Row, SemanticType, StructTypeExtension, VectorTypeExtension, json_value,
};
use paste::paste;
use snafu::prelude::*;
@@ -215,6 +216,26 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
ConcreteDataType::null_datatype()
}
}
ColumnDataType::Dictionary => {
if let Some(TypeExt::DictionaryType(d)) = datatype_wrapper
.datatype_ext
.as_ref()
.and_then(|datatype_ext| datatype_ext.type_ext.as_ref())
{
let key_type = ColumnDataTypeWrapper {
datatype: d.key_datatype(),
datatype_ext: d.key_datatype_extension.clone().map(|ext| *ext),
};
let value_type = ColumnDataTypeWrapper {
datatype: d.value_datatype(),
datatype_ext: d.value_datatype_extension.clone().map(|ext| *ext),
};
ConcreteDataType::dictionary_datatype(key_type.into(), value_type.into())
} else {
// invalid state: type extension not found
ConcreteDataType::null_datatype()
}
}
}
}
}
@@ -338,13 +359,30 @@ impl ColumnDataTypeWrapper {
}),
}
}
pub fn dictionary_datatype(
key_type: ColumnDataTypeWrapper,
value_type: ColumnDataTypeWrapper,
) -> Self {
ColumnDataTypeWrapper {
datatype: ColumnDataType::Dictionary,
datatype_ext: Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::DictionaryType(Box::new(DictionaryTypeExtension {
key_datatype: key_type.datatype().into(),
key_datatype_extension: key_type.datatype_ext.map(Box::new),
value_datatype: value_type.datatype().into(),
value_datatype_extension: value_type.datatype_ext.map(Box::new),
}))),
}),
}
}
}
impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
type Error = error::Error;
fn try_from(datatype: ConcreteDataType) -> Result<Self> {
let column_datatype = match datatype {
let column_datatype = match &datatype {
ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
ConcreteDataType::Int8(_) => ColumnDataType::Int8,
ConcreteDataType::Int16(_) => ColumnDataType::Int16,
@@ -381,9 +419,8 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
ConcreteDataType::Vector(_) => ColumnDataType::Vector,
ConcreteDataType::List(_) => ColumnDataType::List,
ConcreteDataType::Struct(_) => ColumnDataType::Struct,
ConcreteDataType::Null(_)
| ConcreteDataType::Dictionary(_)
| ConcreteDataType::Duration(_) => {
ConcreteDataType::Dictionary(_) => ColumnDataType::Dictionary,
ConcreteDataType::Null(_) | ConcreteDataType::Duration(_) => {
return error::IntoColumnDataTypeSnafu { from: datatype }.fail();
}
};
@@ -405,7 +442,9 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
}),
JsonFormat::Native(inner) => {
let inner_type = ColumnDataTypeWrapper::try_from(*inner.clone())?;
let inner_type = ColumnDataTypeWrapper::try_from(
ConcreteDataType::from(inner.as_ref()),
)?;
Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::JsonNativeType(Box::new(
JsonNativeTypeExtension {
@@ -463,6 +502,25 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
None
}
}
ColumnDataType::Dictionary => {
if let ConcreteDataType::Dictionary(dict_type) = &datatype {
let key_type = ColumnDataTypeWrapper::try_from(dict_type.key_type().clone())?;
let value_type =
ColumnDataTypeWrapper::try_from(dict_type.value_type().clone())?;
Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::DictionaryType(Box::new(
DictionaryTypeExtension {
key_datatype: key_type.datatype.into(),
key_datatype_extension: key_type.datatype_ext.map(Box::new),
value_datatype: value_type.datatype.into(),
value_datatype_extension: value_type.datatype_ext.map(Box::new),
},
))),
})
} else {
None
}
}
_ => None,
};
Ok(Self {
@@ -601,6 +659,9 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
struct_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Dictionary => Values {
..Default::default()
},
}
}
@@ -801,21 +862,8 @@ pub fn pb_value_to_value_ref<'a>(
}
ValueData::JsonValue(inner_value) => {
let json_datatype_ext = datatype_ext
.as_ref()
.and_then(|ext| {
if let Some(TypeExt::JsonNativeType(l)) = &ext.type_ext {
Some(l)
} else {
None
}
})
.expect("json value must contain datatype ext");
ValueRef::Json(Box::new(pb_value_to_value_ref(
inner_value,
json_datatype_ext.datatype_extension.as_deref(),
)))
let value = decode_json_value(inner_value);
ValueRef::Json(Box::new(value))
}
}
}
@@ -938,12 +986,72 @@ pub fn to_proto_value(value: Value) -> v1::Value {
})),
},
Value::Json(v) => v1::Value {
value_data: Some(ValueData::JsonValue(Box::new(to_proto_value(*v)))),
value_data: Some(ValueData::JsonValue(encode_json_value(*v))),
},
Value::Duration(_) => v1::Value { value_data: None },
}
}
fn encode_json_value(value: JsonValue) -> v1::JsonValue {
fn helper(json: JsonVariant) -> v1::JsonValue {
let value = match json {
JsonVariant::Null => None,
JsonVariant::Bool(x) => Some(json_value::Value::Boolean(x)),
JsonVariant::Number(x) => Some(match x {
JsonNumber::PosInt(i) => json_value::Value::Uint(i),
JsonNumber::NegInt(i) => json_value::Value::Int(i),
JsonNumber::Float(f) => json_value::Value::Float(f.0),
}),
JsonVariant::String(x) => Some(json_value::Value::Str(x)),
JsonVariant::Array(x) => Some(json_value::Value::Array(JsonList {
items: x.into_iter().map(helper).collect::<Vec<_>>(),
})),
JsonVariant::Object(x) => {
let entries = x
.into_iter()
.map(|(key, v)| v1::json_object::Entry {
key,
value: Some(helper(v)),
})
.collect::<Vec<_>>();
Some(json_value::Value::Object(JsonObject { entries }))
}
};
v1::JsonValue { value }
}
helper(value.into_variant())
}
fn decode_json_value(value: &v1::JsonValue) -> JsonValueRef<'_> {
let Some(value) = &value.value else {
return JsonValueRef::null();
};
match value {
json_value::Value::Boolean(x) => (*x).into(),
json_value::Value::Int(x) => (*x).into(),
json_value::Value::Uint(x) => (*x).into(),
json_value::Value::Float(x) => (*x).into(),
json_value::Value::Str(x) => (x.as_str()).into(),
json_value::Value::Array(array) => array
.items
.iter()
.map(|x| decode_json_value(x).into_variant())
.collect::<Vec<_>>()
.into(),
json_value::Value::Object(x) => x
.entries
.iter()
.filter_map(|entry| {
entry
.value
.as_ref()
.map(|v| (entry.key.as_str(), decode_json_value(v).into_variant()))
})
.collect::<BTreeMap<_, _>>()
.into(),
}
}
fn convert_list_to_pb_values(list_value: ListValue) -> Vec<v1::Value> {
list_value
.take_items()
@@ -1065,9 +1173,7 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
.collect();
Some(ValueData::StructValue(v1::StructValue { items }))
}
Value::Json(inner_value) => Some(ValueData::JsonValue(Box::new(value_to_grpc_value(
*inner_value,
)))),
Value::Json(v) => Some(ValueData::JsonValue(encode_json_value(*v))),
Value::Duration(_) => unreachable!(),
},
}
@@ -1259,6 +1365,9 @@ mod tests {
let values = values_with_capacity(ColumnDataType::Json, 2);
assert_eq!(2, values.json_values.capacity());
assert_eq!(2, values.string_values.capacity());
let values = values_with_capacity(ColumnDataType::Dictionary, 2);
assert!(values.bool_values.is_empty());
}
#[test]
@@ -1355,6 +1464,17 @@ mod tests {
ConcreteDataType::list_datatype(Arc::new(ConcreteDataType::string_datatype())),
ColumnDataTypeWrapper::list_datatype(ColumnDataTypeWrapper::string_datatype()).into()
);
assert_eq!(
ConcreteDataType::dictionary_datatype(
ConcreteDataType::int32_datatype(),
ConcreteDataType::string_datatype()
),
ColumnDataTypeWrapper::dictionary_datatype(
ColumnDataTypeWrapper::int32_datatype(),
ColumnDataTypeWrapper::string_datatype()
)
.into()
);
let struct_type = StructType::new(Arc::new(vec![
StructField::new("id".to_string(), ConcreteDataType::int64_datatype(), true),
StructField::new(
@@ -1525,6 +1645,18 @@ mod tests {
ColumnDataTypeWrapper::vector_datatype(3),
ConcreteDataType::vector_datatype(3).try_into().unwrap()
);
assert_eq!(
ColumnDataTypeWrapper::dictionary_datatype(
ColumnDataTypeWrapper::int32_datatype(),
ColumnDataTypeWrapper::string_datatype()
),
ConcreteDataType::dictionary_datatype(
ConcreteDataType::int32_datatype(),
ConcreteDataType::string_datatype()
)
.try_into()
.unwrap()
);
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
assert!(result.is_err());
@@ -1580,6 +1712,20 @@ mod tests {
datatype_extension: Some(Box::new(ColumnDataTypeExtension {
type_ext: Some(TypeExt::StructType(StructTypeExtension {
fields: vec![
v1::StructField {
name: "address".to_string(),
datatype: ColumnDataTypeWrapper::string_datatype()
.datatype()
.into(),
datatype_extension: None
},
v1::StructField {
name: "age".to_string(),
datatype: ColumnDataTypeWrapper::int64_datatype()
.datatype()
.into(),
datatype_extension: None
},
v1::StructField {
name: "id".to_string(),
datatype: ColumnDataTypeWrapper::int64_datatype()
@@ -1594,20 +1740,6 @@ mod tests {
.into(),
datatype_extension: None
},
v1::StructField {
name: "age".to_string(),
datatype: ColumnDataTypeWrapper::int32_datatype()
.datatype()
.into(),
datatype_extension: None
},
v1::StructField {
name: "address".to_string(),
datatype: ColumnDataTypeWrapper::string_datatype()
.datatype()
.into(),
datatype_extension: None
}
]
}))
}))
@@ -1778,4 +1910,199 @@ mod tests {
_ => panic!("Unexpected value type"),
}
}
#[test]
fn test_encode_decode_json_value() {
let json = JsonValue::null();
let proto = encode_json_value(json.clone());
assert!(proto.value.is_none());
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = true.into();
let proto = encode_json_value(json.clone());
assert_eq!(proto.value, Some(json_value::Value::Boolean(true)));
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = (-1i64).into();
let proto = encode_json_value(json.clone());
assert_eq!(proto.value, Some(json_value::Value::Int(-1)));
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = 1u64.into();
let proto = encode_json_value(json.clone());
assert_eq!(proto.value, Some(json_value::Value::Uint(1)));
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = 1.0f64.into();
let proto = encode_json_value(json.clone());
assert_eq!(proto.value, Some(json_value::Value::Float(1.0)));
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = "s".into();
let proto = encode_json_value(json.clone());
assert_eq!(proto.value, Some(json_value::Value::Str("s".to_string())));
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = [1i64, 2, 3].into();
let proto = encode_json_value(json.clone());
assert_eq!(
proto.value,
Some(json_value::Value::Array(JsonList {
items: vec![
v1::JsonValue {
value: Some(json_value::Value::Int(1))
},
v1::JsonValue {
value: Some(json_value::Value::Int(2))
},
v1::JsonValue {
value: Some(json_value::Value::Int(3))
}
]
}))
);
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = [(); 0].into();
let proto = encode_json_value(json.clone());
assert_eq!(
proto.value,
Some(json_value::Value::Array(JsonList { items: vec![] }))
);
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = [("k3", 3i64), ("k2", 2i64), ("k1", 1i64)].into();
let proto = encode_json_value(json.clone());
assert_eq!(
proto.value,
Some(json_value::Value::Object(JsonObject {
entries: vec![
v1::json_object::Entry {
key: "k1".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Int(1))
}),
},
v1::json_object::Entry {
key: "k2".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Int(2))
}),
},
v1::json_object::Entry {
key: "k3".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Int(3))
}),
},
]
}))
);
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = [("null", ()); 0].into();
let proto = encode_json_value(json.clone());
assert_eq!(
proto.value,
Some(json_value::Value::Object(JsonObject { entries: vec![] }))
);
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
let json: JsonValue = [
("null", JsonVariant::from(())),
("bool", false.into()),
("list", ["hello", "world"].into()),
(
"object",
[
("positive_i", JsonVariant::from(42u64)),
("negative_i", (-42i64).into()),
("nested", [("what", "blah")].into()),
]
.into(),
),
]
.into();
let proto = encode_json_value(json.clone());
assert_eq!(
proto.value,
Some(json_value::Value::Object(JsonObject {
entries: vec![
v1::json_object::Entry {
key: "bool".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Boolean(false))
}),
},
v1::json_object::Entry {
key: "list".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Array(JsonList {
items: vec![
v1::JsonValue {
value: Some(json_value::Value::Str("hello".to_string()))
},
v1::JsonValue {
value: Some(json_value::Value::Str("world".to_string()))
},
]
}))
}),
},
v1::json_object::Entry {
key: "null".to_string(),
value: Some(v1::JsonValue { value: None }),
},
v1::json_object::Entry {
key: "object".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Object(JsonObject {
entries: vec![
v1::json_object::Entry {
key: "negative_i".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Int(-42))
}),
},
v1::json_object::Entry {
key: "nested".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Object(JsonObject {
entries: vec![v1::json_object::Entry {
key: "what".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Str(
"blah".to_string()
))
}),
},]
}))
}),
},
v1::json_object::Entry {
key: "positive_i".to_string(),
value: Some(v1::JsonValue {
value: Some(json_value::Value::Uint(42))
}),
},
]
}))
}),
},
]
}))
);
let value = decode_json_value(&proto);
assert_eq!(json.as_ref(), value);
}
}

View File

@@ -22,7 +22,6 @@ mod procedure_info;
pub mod process_list;
pub mod region_peers;
mod region_statistics;
mod runtime_metrics;
pub mod schemata;
mod ssts;
mod table_constraints;
@@ -65,7 +64,6 @@ use crate::system_schema::information_schema::information_memory_table::get_sche
use crate::system_schema::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
use crate::system_schema::information_schema::partitions::InformationSchemaPartitions;
use crate::system_schema::information_schema::region_peers::InformationSchemaRegionPeers;
use crate::system_schema::information_schema::runtime_metrics::InformationSchemaMetrics;
use crate::system_schema::information_schema::schemata::InformationSchemaSchemata;
use crate::system_schema::information_schema::ssts::{
InformationSchemaSstsIndexMeta, InformationSchemaSstsManifest, InformationSchemaSstsStorage,
@@ -216,7 +214,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
self.catalog_name.clone(),
self.catalog_manager.clone(),
)) as _),
RUNTIME_METRICS => Some(Arc::new(InformationSchemaMetrics::new())),
PARTITIONS => Some(Arc::new(InformationSchemaPartitions::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
@@ -311,10 +308,6 @@ impl InformationSchemaProvider {
// authentication details, and other critical information.
// Only put these tables under `greptime` catalog to prevent info leak.
if self.catalog_name == DEFAULT_CATALOG_NAME {
tables.insert(
RUNTIME_METRICS.to_string(),
self.build_table(RUNTIME_METRICS).unwrap(),
);
tables.insert(
BUILD_INFO.to_string(),
self.build_table(BUILD_INFO).unwrap(),

View File

@@ -1,265 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID;
use common_error::ext::BoxedError;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use common_time::util::current_time_millis;
use datafusion::execution::TaskContext;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datatypes::prelude::{ConcreteDataType, MutableVector};
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::{
ConstantVector, Float64VectorBuilder, StringVectorBuilder, TimestampMillisecondVector,
VectorRef,
};
use itertools::Itertools;
use snafu::ResultExt;
use store_api::storage::{ScanRequest, TableId};
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
use crate::system_schema::information_schema::{InformationTable, RUNTIME_METRICS};
#[derive(Debug)]
pub(super) struct InformationSchemaMetrics {
schema: SchemaRef,
}
const METRIC_NAME: &str = "metric_name";
const METRIC_VALUE: &str = "value";
const METRIC_LABELS: &str = "labels";
const PEER_ADDR: &str = "peer_addr";
const PEER_TYPE: &str = "peer_type";
const TIMESTAMP: &str = "timestamp";
/// The `information_schema.runtime_metrics` virtual table.
/// It provides the GreptimeDB runtime metrics for the users by SQL.
impl InformationSchemaMetrics {
pub(super) fn new() -> Self {
Self {
schema: Self::schema(),
}
}
fn schema() -> SchemaRef {
Arc::new(Schema::new(vec![
ColumnSchema::new(METRIC_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(METRIC_VALUE, ConcreteDataType::float64_datatype(), false),
ColumnSchema::new(METRIC_LABELS, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(
TIMESTAMP,
ConcreteDataType::timestamp_millisecond_datatype(),
false,
),
]))
}
fn builder(&self) -> InformationSchemaMetricsBuilder {
InformationSchemaMetricsBuilder::new(self.schema.clone())
}
}
impl InformationTable for InformationSchemaMetrics {
fn table_id(&self) -> TableId {
INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID
}
fn table_name(&self) -> &'static str {
RUNTIME_METRICS
}
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_metrics(Some(request))
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
));
Ok(Box::pin(
RecordBatchStreamAdapter::try_new(stream)
.map_err(BoxedError::new)
.context(InternalSnafu)?,
))
}
}
struct InformationSchemaMetricsBuilder {
schema: SchemaRef,
metric_names: StringVectorBuilder,
metric_values: Float64VectorBuilder,
metric_labels: StringVectorBuilder,
peer_addrs: StringVectorBuilder,
peer_types: StringVectorBuilder,
}
impl InformationSchemaMetricsBuilder {
fn new(schema: SchemaRef) -> Self {
Self {
schema,
metric_names: StringVectorBuilder::with_capacity(42),
metric_values: Float64VectorBuilder::with_capacity(42),
metric_labels: StringVectorBuilder::with_capacity(42),
peer_addrs: StringVectorBuilder::with_capacity(42),
peer_types: StringVectorBuilder::with_capacity(42),
}
}
fn add_metric(
&mut self,
metric_name: &str,
labels: String,
metric_value: f64,
peer: Option<&str>,
peer_type: &str,
) {
self.metric_names.push(Some(metric_name));
self.metric_values.push(Some(metric_value));
self.metric_labels.push(Some(&labels));
self.peer_addrs.push(peer);
self.peer_types.push(Some(peer_type));
}
async fn make_metrics(&mut self, _request: Option<ScanRequest>) -> Result<RecordBatch> {
let metric_families = prometheus::gather();
let write_request =
common_telemetry::metric::convert_metric_to_write_request(metric_families, None, 0);
for ts in write_request.timeseries {
//Safety: always has `__name__` label
let metric_name = ts
.labels
.iter()
.find_map(|label| {
if label.name == "__name__" {
Some(label.value.clone())
} else {
None
}
})
.unwrap();
self.add_metric(
&metric_name,
ts.labels
.into_iter()
.filter_map(|label| {
if label.name == "__name__" {
None
} else {
Some(format!("{}={}", label.name, label.value))
}
})
.join(", "),
// Safety: always has a sample
ts.samples[0].value,
// The peer column is always `None` for standalone
None,
"STANDALONE",
);
}
// FIXME(dennis): fetching other peers metrics
self.finish()
}
fn finish(&mut self) -> Result<RecordBatch> {
let rows_num = self.metric_names.len();
let timestamps = Arc::new(ConstantVector::new(
Arc::new(TimestampMillisecondVector::from_slice([
current_time_millis(),
])),
rows_num,
));
let columns: Vec<VectorRef> = vec![
Arc::new(self.metric_names.finish()),
Arc::new(self.metric_values.finish()),
Arc::new(self.metric_labels.finish()),
Arc::new(self.peer_addrs.finish()),
Arc::new(self.peer_types.finish()),
timestamps,
];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
}
}
impl DfPartitionStream for InformationSchemaMetrics {
fn schema(&self) -> &ArrowSchemaRef {
self.schema.arrow_schema()
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_metrics(None)
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
))
}
}
#[cfg(test)]
mod tests {
use common_recordbatch::RecordBatches;
use super::*;
#[tokio::test]
async fn test_make_metrics() {
let metrics = InformationSchemaMetrics::new();
let stream = metrics.to_stream(ScanRequest::default()).unwrap();
let batches = RecordBatches::try_collect(stream).await.unwrap();
let result_literal = batches.pretty_print().unwrap();
assert!(result_literal.contains(METRIC_NAME));
assert!(result_literal.contains(METRIC_VALUE));
assert!(result_literal.contains(METRIC_LABELS));
assert!(result_literal.contains(PEER_ADDR));
assert!(result_literal.contains(PEER_TYPE));
assert!(result_literal.contains(TIMESTAMP));
}
}

View File

@@ -38,7 +38,6 @@ pub const TABLE_PRIVILEGES: &str = "table_privileges";
pub const TRIGGERS: &str = "triggers";
pub const GLOBAL_STATUS: &str = "global_status";
pub const SESSION_STATUS: &str = "session_status";
pub const RUNTIME_METRICS: &str = "runtime_metrics";
pub const PARTITIONS: &str = "partitions";
pub const REGION_PEERS: &str = "region_peers";
pub const TABLE_CONSTRAINTS: &str = "table_constraints";

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashSet;
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
@@ -255,14 +254,17 @@ impl InformationSchemaTablesBuilder {
// TODO(dennis): `region_stats` API is not stable in distributed cluster because of network issue etc.
// But we don't want the statements such as `show tables` fail,
// so using `unwrap_or_else` here instead of `?` operator.
let region_stats = information_extension
.region_stats()
.await
.map_err(|e| {
error!(e; "Failed to call region_stats");
e
})
.unwrap_or_else(|_| vec![]);
let region_stats = {
let mut x = information_extension
.region_stats()
.await
.unwrap_or_else(|e| {
error!(e; "Failed to find region stats in information_schema, fallback to all empty");
vec![]
});
x.sort_unstable_by_key(|x| x.id);
x
};
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
@@ -273,16 +275,16 @@ impl InformationSchemaTablesBuilder {
// TODO(dennis): make it working for metric engine
let table_region_stats =
if table_info.meta.engine == MITO_ENGINE || table_info.is_physical_table() {
let region_ids = table_info
table_info
.meta
.region_numbers
.iter()
.map(|n| RegionId::new(table_info.ident.table_id, *n))
.collect::<HashSet<_>>();
region_stats
.iter()
.filter(|stat| region_ids.contains(&stat.id))
.flat_map(|region_id| {
region_stats
.binary_search_by_key(&region_id, |x| x.id)
.map(|i| &region_stats[i])
})
.collect::<Vec<_>>()
} else {
vec![]

View File

@@ -67,9 +67,17 @@ pub struct ExportCommand {
#[clap(long, default_value_t = default_database())]
database: String,
/// Parallelism of the export.
#[clap(long, short = 'j', default_value = "1")]
export_jobs: usize,
/// The number of databases exported in parallel.
/// For example, if there are 20 databases and `db_parallelism` is 4,
/// 4 databases will be exported concurrently.
#[clap(long, short = 'j', default_value = "1", alias = "export-jobs")]
db_parallelism: usize,
/// The number of tables exported in parallel within a single database.
/// For example, if a database has 30 tables and `parallelism` is 8,
/// 8 tables will be exported concurrently.
#[clap(long, default_value = "4")]
table_parallelism: usize,
/// Max retry times for each job.
#[clap(long, default_value = "3")]
@@ -210,10 +218,11 @@ impl ExportCommand {
schema,
database_client,
output_dir: self.output_dir.clone(),
parallelism: self.export_jobs,
export_jobs: self.db_parallelism,
target: self.target.clone(),
start_time: self.start_time.clone(),
end_time: self.end_time.clone(),
parallelism: self.table_parallelism,
s3: self.s3,
ddl_local_dir: self.ddl_local_dir.clone(),
s3_bucket: self.s3_bucket.clone(),
@@ -251,10 +260,11 @@ pub struct Export {
schema: Option<String>,
database_client: DatabaseClient,
output_dir: Option<String>,
parallelism: usize,
export_jobs: usize,
target: ExportTarget,
start_time: Option<String>,
end_time: Option<String>,
parallelism: usize,
s3: bool,
ddl_local_dir: Option<String>,
s3_bucket: Option<String>,
@@ -464,7 +474,7 @@ impl Export {
async fn export_create_table(&self) -> Result<()> {
let timer = Instant::now();
let semaphore = Arc::new(Semaphore::new(self.parallelism));
let semaphore = Arc::new(Semaphore::new(self.export_jobs));
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
let operator = Arc::new(self.build_prefer_fs_operator().await?);
@@ -625,13 +635,13 @@ impl Export {
async fn export_database_data(&self) -> Result<()> {
let timer = Instant::now();
let semaphore = Arc::new(Semaphore::new(self.parallelism));
let semaphore = Arc::new(Semaphore::new(self.export_jobs));
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
let mut tasks = Vec::with_capacity(db_count);
let operator = Arc::new(self.build_operator().await?);
let fs_first_operator = Arc::new(self.build_prefer_fs_operator().await?);
let with_options = build_with_options(&self.start_time, &self.end_time);
let with_options = build_with_options(&self.start_time, &self.end_time, self.parallelism);
for schema in db_names {
let semaphore_moved = semaphore.clone();
@@ -888,7 +898,11 @@ impl Tool for Export {
}
/// Builds the WITH options string for SQL commands, assuming consistent syntax across S3 and local exports.
fn build_with_options(start_time: &Option<String>, end_time: &Option<String>) -> String {
fn build_with_options(
start_time: &Option<String>,
end_time: &Option<String>,
parallelism: usize,
) -> String {
let mut options = vec!["format = 'parquet'".to_string()];
if let Some(start) = start_time {
options.push(format!("start_time = '{}'", start));
@@ -896,5 +910,6 @@ fn build_with_options(start_time: &Option<String>, end_time: &Option<String>) ->
if let Some(end) = end_time {
options.push(format!("end_time = '{}'", end));
}
options.push(format!("parallelism = {}", parallelism));
options.join(", ")
}

View File

@@ -56,9 +56,11 @@ pub struct ImportCommand {
#[clap(long, default_value_t = default_database())]
database: String,
/// Parallelism of the import.
#[clap(long, short = 'j', default_value = "1")]
import_jobs: usize,
/// The number of databases imported in parallel.
/// For example, if there are 20 databases and `db_parallelism` is 4,
/// 4 databases will be imported concurrently.
#[clap(long, short = 'j', default_value = "1", alias = "import-jobs")]
db_parallelism: usize,
/// Max retry times for each job.
#[clap(long, default_value = "3")]
@@ -109,7 +111,7 @@ impl ImportCommand {
schema,
database_client,
input_dir: self.input_dir.clone(),
parallelism: self.import_jobs,
parallelism: self.db_parallelism,
target: self.target.clone(),
}))
}

View File

@@ -21,7 +21,7 @@ use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
use api::v1::region::region_client::RegionClient as PbRegionClient;
use arrow_flight::flight_service_client::FlightServiceClient;
use common_grpc::channel_manager::{
ChannelConfig, ChannelManager, ClientTlsOption, load_tls_config,
ChannelConfig, ChannelManager, ClientTlsOption, load_client_tls_config,
};
use parking_lot::RwLock;
use snafu::{OptionExt, ResultExt};
@@ -95,9 +95,9 @@ impl Client {
U: AsRef<str>,
A: AsRef<[U]>,
{
let channel_config = ChannelConfig::default().client_tls_config(client_tls);
let tls_config = load_tls_config(channel_config.client_tls.as_ref())
.context(error::CreateTlsChannelSnafu)?;
let channel_config = ChannelConfig::default().client_tls_config(client_tls.clone());
let tls_config =
load_client_tls_config(Some(client_tls)).context(error::CreateTlsChannelSnafu)?;
let channel_manager = ChannelManager::with_config(channel_config, tls_config);
Ok(Self::with_manager_and_urls(channel_manager, urls))
}

View File

@@ -435,10 +435,10 @@ impl Database {
.context(ExternalSnafu)?;
match flight_message {
FlightMessage::RecordBatch(arrow_batch) => {
yield RecordBatch::try_from_df_record_batch(
yield Ok(RecordBatch::from_df_record_batch(
schema_cloned.clone(),
arrow_batch,
)
))
}
FlightMessage::Metrics(_) => {}
FlightMessage::AffectedRows(_) | FlightMessage::Schema(_) => {

View File

@@ -182,10 +182,8 @@ impl RegionRequester {
match flight_message {
FlightMessage::RecordBatch(record_batch) => {
let result_to_yield = RecordBatch::try_from_df_record_batch(
schema_cloned.clone(),
record_batch,
);
let result_to_yield =
RecordBatch::from_df_record_batch(schema_cloned.clone(), record_batch);
// get the next message from the stream. normally it should be a metrics message.
if let Some(next_flight_message_result) = flight_message_stream.next().await
@@ -219,7 +217,7 @@ impl RegionRequester {
stream_ended = true;
}
yield result_to_yield;
yield Ok(result_to_yield);
}
FlightMessage::Metrics(s) => {
// just a branch in case of some metrics message comes after other things.

View File

@@ -161,6 +161,7 @@ impl ObjbenchCommand {
level: 0,
file_size,
available_indexes: Default::default(),
indexes: Default::default(),
index_file_size: 0,
index_file_id: None,
num_rows,

View File

@@ -99,13 +99,6 @@ pub enum Error {
source: flow::Error,
},
#[snafu(display("Servers error"))]
Servers {
#[snafu(implicit)]
location: Location,
source: servers::error::Error,
},
#[snafu(display("Failed to start frontend"))]
StartFrontend {
#[snafu(implicit)]
@@ -336,7 +329,6 @@ impl ErrorExt for Error {
Error::ShutdownFrontend { source, .. } => source.status_code(),
Error::StartMetaServer { source, .. } => source.status_code(),
Error::ShutdownMetaServer { source, .. } => source.status_code(),
Error::Servers { source, .. } => source.status_code(),
Error::BuildMetaServer { source, .. } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
Error::BuildCli { source, .. } => source.status_code(),

View File

@@ -43,7 +43,6 @@ use frontend::instance::builder::FrontendBuilder;
use frontend::server::Services;
use meta_client::{MetaClientOptions, MetaClientType};
use servers::addrs;
use servers::export_metrics::ExportMetricsTask;
use servers::grpc::GrpcOptions;
use servers::tls::{TlsMode, TlsOption};
use snafu::{OptionExt, ResultExt};
@@ -458,9 +457,6 @@ impl StartCommand {
.context(error::StartFrontendSnafu)?;
let instance = Arc::new(instance);
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
.context(error::ServersSnafu)?;
let servers = Services::new(opts, instance.clone(), plugins)
.build()
.context(error::StartFrontendSnafu)?;
@@ -469,7 +465,6 @@ impl StartCommand {
instance,
servers,
heartbeat_task,
export_metrics_task,
};
Ok(Instance::new(frontend, guard))

View File

@@ -57,7 +57,6 @@ use frontend::instance::StandaloneDatanodeManager;
use frontend::instance::builder::FrontendBuilder;
use frontend::server::Services;
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
use servers::export_metrics::ExportMetricsTask;
use servers::tls::{TlsMode, TlsOption};
use snafu::ResultExt;
use standalone::StandaloneInformationExtension;
@@ -565,9 +564,6 @@ impl StartCommand {
.context(StartFlownodeSnafu)?;
flow_streaming_engine.set_frontend_invoker(invoker).await;
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
.context(error::ServersSnafu)?;
let servers = Services::new(opts, fe_instance.clone(), plugins.clone())
.build()
.context(error::StartFrontendSnafu)?;
@@ -576,7 +572,6 @@ impl StartCommand {
instance: fe_instance,
servers,
heartbeat_task: None,
export_metrics_task,
};
#[cfg(feature = "enterprise")]

View File

@@ -31,7 +31,6 @@ use meta_srv::selector::SelectorType;
use metric_engine::config::EngineConfig as MetricEngineConfig;
use mito2::config::MitoConfig;
use query::options::QueryOptions;
use servers::export_metrics::ExportMetricsOption;
use servers::grpc::GrpcOptions;
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
@@ -95,11 +94,6 @@ fn test_load_datanode_example_config() {
tracing_sample_ratio: Some(Default::default()),
..Default::default()
},
export_metrics: ExportMetricsOption {
self_import: None,
remote_write: Some(Default::default()),
..Default::default()
},
grpc: GrpcOptions::default()
.with_bind_addr("127.0.0.1:3001")
.with_server_addr("127.0.0.1:3001"),
@@ -146,11 +140,6 @@ fn test_load_frontend_example_config() {
..Default::default()
},
},
export_metrics: ExportMetricsOption {
self_import: None,
remote_write: Some(Default::default()),
..Default::default()
},
grpc: GrpcOptions {
bind_addr: "127.0.0.1:4001".to_string(),
server_addr: "127.0.0.1:4001".to_string(),
@@ -201,11 +190,6 @@ fn test_load_metasrv_example_config() {
tcp_nodelay: true,
},
},
export_metrics: ExportMetricsOption {
self_import: None,
remote_write: Some(Default::default()),
..Default::default()
},
backend_tls: Some(TlsOption {
mode: TlsMode::Prefer,
cert_path: String::new(),
@@ -317,11 +301,6 @@ fn test_load_standalone_example_config() {
tracing_sample_ratio: Some(Default::default()),
..Default::default()
},
export_metrics: ExportMetricsOption {
self_import: Some(Default::default()),
remote_write: Some(Default::default()),
..Default::default()
},
http: HttpOptions {
cors_allowed_origins: vec!["https://example.com".to_string()],
..Default::default()

View File

@@ -86,8 +86,6 @@ pub const INFORMATION_SCHEMA_TRIGGERS_TABLE_ID: u32 = 24;
pub const INFORMATION_SCHEMA_GLOBAL_STATUS_TABLE_ID: u32 = 25;
/// id for information_schema.SESSION_STATUS
pub const INFORMATION_SCHEMA_SESSION_STATUS_TABLE_ID: u32 = 26;
/// id for information_schema.RUNTIME_METRICS
pub const INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID: u32 = 27;
/// id for information_schema.PARTITIONS
pub const INFORMATION_SCHEMA_PARTITIONS_TABLE_ID: u32 = 28;
/// id for information_schema.REGION_PEERS
@@ -112,6 +110,8 @@ pub const INFORMATION_SCHEMA_SSTS_MANIFEST_TABLE_ID: u32 = 37;
pub const INFORMATION_SCHEMA_SSTS_STORAGE_TABLE_ID: u32 = 38;
/// id for information_schema.ssts_index_meta
pub const INFORMATION_SCHEMA_SSTS_INDEX_META_TABLE_ID: u32 = 39;
/// id for information_schema.alerts
pub const INFORMATION_SCHEMA_ALERTS_TABLE_ID: u32 = 40;
// ----- End of information_schema tables -----

View File

@@ -12,28 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::future::Future;
use arrow::record_batch::RecordBatch;
use async_trait::async_trait;
use datafusion::parquet::format::FileMetaData;
use snafu::{OptionExt, ResultExt};
use tokio::io::{AsyncWrite, AsyncWriteExt};
use crate::error::{self, Result};
use crate::share_buffer::SharedBuffer;
pub struct LazyBufferedWriter<T, U, F> {
path: String,
writer_factory: F,
writer: Option<T>,
/// None stands for [`LazyBufferedWriter`] closed.
encoder: Option<U>,
buffer: SharedBuffer,
rows_written: usize,
bytes_written: u64,
threshold: usize,
}
use crate::error::Result;
pub trait DfRecordBatchEncoder {
fn write(&mut self, batch: &RecordBatch) -> Result<()>;
@@ -43,126 +26,3 @@ pub trait DfRecordBatchEncoder {
pub trait ArrowWriterCloser {
async fn close(mut self) -> Result<FileMetaData>;
}
impl<
T: AsyncWrite + Send + Unpin,
U: DfRecordBatchEncoder + ArrowWriterCloser,
F: Fn(String) -> Fut,
Fut: Future<Output = Result<T>>,
> LazyBufferedWriter<T, U, F>
{
/// Closes `LazyBufferedWriter` and optionally flushes all data to underlying storage
/// if any row's been written.
pub async fn close_with_arrow_writer(mut self) -> Result<(FileMetaData, u64)> {
let encoder = self
.encoder
.take()
.context(error::BufferedWriterClosedSnafu)?;
let metadata = encoder.close().await?;
// It's important to shut down! flushes all pending writes
self.close_inner_writer().await?;
Ok((metadata, self.bytes_written))
}
}
impl<
T: AsyncWrite + Send + Unpin,
U: DfRecordBatchEncoder,
F: Fn(String) -> Fut,
Fut: Future<Output = Result<T>>,
> LazyBufferedWriter<T, U, F>
{
/// Closes the writer and flushes the buffer data.
pub async fn close_inner_writer(&mut self) -> Result<()> {
// Use `rows_written` to keep a track of if any rows have been written.
// If no row's been written, then we can simply close the underlying
// writer without flush so that no file will be actually created.
if self.rows_written != 0 {
self.bytes_written += self.try_flush(true).await?;
}
if let Some(writer) = &mut self.writer {
writer.shutdown().await.context(error::AsyncWriteSnafu)?;
}
Ok(())
}
pub fn new(
threshold: usize,
buffer: SharedBuffer,
encoder: U,
path: impl AsRef<str>,
writer_factory: F,
) -> Self {
Self {
path: path.as_ref().to_string(),
threshold,
encoder: Some(encoder),
buffer,
rows_written: 0,
bytes_written: 0,
writer_factory,
writer: None,
}
}
pub async fn write(&mut self, batch: &RecordBatch) -> Result<()> {
let encoder = self
.encoder
.as_mut()
.context(error::BufferedWriterClosedSnafu)?;
encoder.write(batch)?;
self.rows_written += batch.num_rows();
self.bytes_written += self.try_flush(false).await?;
Ok(())
}
async fn try_flush(&mut self, all: bool) -> Result<u64> {
let mut bytes_written: u64 = 0;
// Once buffered data size reaches threshold, split the data in chunks (typically 4MB)
// and write to underlying storage.
while self.buffer.buffer.lock().unwrap().len() >= self.threshold {
let chunk = {
let mut buffer = self.buffer.buffer.lock().unwrap();
buffer.split_to(self.threshold)
};
let size = chunk.len();
self.maybe_init_writer()
.await?
.write_all(&chunk)
.await
.context(error::AsyncWriteSnafu)?;
bytes_written += size as u64;
}
if all {
bytes_written += self.try_flush_all().await?;
}
Ok(bytes_written)
}
/// Only initiates underlying file writer when rows have been written.
async fn maybe_init_writer(&mut self) -> Result<&mut T> {
if let Some(ref mut writer) = self.writer {
Ok(writer)
} else {
let writer = (self.writer_factory)(self.path.clone()).await?;
Ok(self.writer.insert(writer))
}
}
async fn try_flush_all(&mut self) -> Result<u64> {
let remain = self.buffer.buffer.lock().unwrap().split();
let size = remain.len();
self.maybe_init_writer()
.await?
.write_all(&remain)
.await
.context(error::AsyncWriteSnafu)?;
Ok(size as u64)
}
}

View File

@@ -0,0 +1,202 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use async_compression::tokio::write::{BzEncoder, GzipEncoder, XzEncoder, ZstdEncoder};
use snafu::ResultExt;
use tokio::io::{AsyncWrite, AsyncWriteExt};
use crate::compression::CompressionType;
use crate::error::{self, Result};
/// A compressed writer that wraps an underlying async writer with compression.
///
/// This writer supports multiple compression formats including GZIP, BZIP2, XZ, and ZSTD.
/// It provides transparent compression for any async writer implementation.
pub struct CompressedWriter {
inner: Box<dyn AsyncWrite + Unpin + Send>,
compression_type: CompressionType,
}
impl CompressedWriter {
/// Creates a new compressed writer with the specified compression type.
///
/// # Arguments
///
/// * `writer` - The underlying writer to wrap with compression
/// * `compression_type` - The type of compression to apply
pub fn new(
writer: impl AsyncWrite + Unpin + Send + 'static,
compression_type: CompressionType,
) -> Self {
let inner: Box<dyn AsyncWrite + Unpin + Send> = match compression_type {
CompressionType::Gzip => Box::new(GzipEncoder::new(writer)),
CompressionType::Bzip2 => Box::new(BzEncoder::new(writer)),
CompressionType::Xz => Box::new(XzEncoder::new(writer)),
CompressionType::Zstd => Box::new(ZstdEncoder::new(writer)),
CompressionType::Uncompressed => Box::new(writer),
};
Self {
inner,
compression_type,
}
}
/// Returns the compression type used by this writer.
pub fn compression_type(&self) -> CompressionType {
self.compression_type
}
/// Flush the writer and shutdown compression
pub async fn shutdown(mut self) -> Result<()> {
self.inner
.shutdown()
.await
.context(error::AsyncWriteSnafu)?;
Ok(())
}
}
impl AsyncWrite for CompressedWriter {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.inner).poll_write(cx, buf)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_flush(cx)
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_shutdown(cx)
}
}
/// A trait for converting async writers into compressed writers.
///
/// This trait is automatically implemented for all types that implement [`AsyncWrite`].
pub trait IntoCompressedWriter {
/// Converts this writer into a [`CompressedWriter`] with the specified compression type.
///
/// # Arguments
///
/// * `self` - The underlying writer to wrap with compression
/// * `compression_type` - The type of compression to apply
fn into_compressed_writer(self, compression_type: CompressionType) -> CompressedWriter
where
Self: AsyncWrite + Unpin + Send + 'static + Sized,
{
CompressedWriter::new(self, compression_type)
}
}
impl<W: AsyncWrite + Unpin + Send + 'static> IntoCompressedWriter for W {}
#[cfg(test)]
mod tests {
use tokio::io::{AsyncReadExt, AsyncWriteExt, duplex};
use super::*;
#[tokio::test]
async fn test_compressed_writer_gzip() {
let (duplex_writer, mut duplex_reader) = duplex(1024);
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Gzip);
let original = b"test data for gzip compression";
writer.write_all(original).await.unwrap();
writer.shutdown().await.unwrap();
let mut buffer = Vec::new();
duplex_reader.read_to_end(&mut buffer).await.unwrap();
// The compressed data should be different from the original
assert_ne!(buffer, original);
assert!(!buffer.is_empty());
}
#[tokio::test]
async fn test_compressed_writer_bzip2() {
let (duplex_writer, mut duplex_reader) = duplex(1024);
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Bzip2);
let original = b"test data for bzip2 compression";
writer.write_all(original).await.unwrap();
writer.shutdown().await.unwrap();
let mut buffer = Vec::new();
duplex_reader.read_to_end(&mut buffer).await.unwrap();
// The compressed data should be different from the original
assert_ne!(buffer, original);
assert!(!buffer.is_empty());
}
#[tokio::test]
async fn test_compressed_writer_xz() {
let (duplex_writer, mut duplex_reader) = duplex(1024);
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Xz);
let original = b"test data for xz compression";
writer.write_all(original).await.unwrap();
writer.shutdown().await.unwrap();
let mut buffer = Vec::new();
duplex_reader.read_to_end(&mut buffer).await.unwrap();
// The compressed data should be different from the original
assert_ne!(buffer, original);
assert!(!buffer.is_empty());
}
#[tokio::test]
async fn test_compressed_writer_zstd() {
let (duplex_writer, mut duplex_reader) = duplex(1024);
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Zstd);
let original = b"test data for zstd compression";
writer.write_all(original).await.unwrap();
writer.shutdown().await.unwrap();
let mut buffer = Vec::new();
duplex_reader.read_to_end(&mut buffer).await.unwrap();
// The compressed data should be different from the original
assert_ne!(buffer, original);
assert!(!buffer.is_empty());
}
#[tokio::test]
async fn test_compressed_writer_uncompressed() {
let (duplex_writer, mut duplex_reader) = duplex(1024);
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Uncompressed);
let original = b"test data for uncompressed";
writer.write_all(original).await.unwrap();
writer.shutdown().await.unwrap();
let mut buffer = Vec::new();
duplex_reader.read_to_end(&mut buffer).await.unwrap();
// Uncompressed data should be the same as the original
assert_eq!(buffer, original);
}
}

View File

@@ -194,12 +194,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Buffered writer closed"))]
BufferedWriterClosed {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to write parquet file, path: {}", path))]
WriteParquet {
path: String,
@@ -208,6 +202,14 @@ pub enum Error {
#[snafu(source)]
error: parquet::errors::ParquetError,
},
#[snafu(display("Failed to build file stream"))]
BuildFileStream {
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: datafusion::error::DataFusionError,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -239,7 +241,7 @@ impl ErrorExt for Error {
| ReadRecordBatch { .. }
| WriteRecordBatch { .. }
| EncodeRecordBatch { .. }
| BufferedWriterClosed { .. }
| BuildFileStream { .. }
| OrcReader { .. } => StatusCode::Unexpected,
}
}

View File

@@ -30,12 +30,22 @@ use arrow::record_batch::RecordBatch;
use arrow_schema::{ArrowError, Schema as ArrowSchema};
use async_trait::async_trait;
use bytes::{Buf, Bytes};
use datafusion::datasource::physical_plan::FileOpenFuture;
use common_recordbatch::DfSendableRecordBatchStream;
use datafusion::datasource::file_format::file_compression_type::FileCompressionType as DfCompressionType;
use datafusion::datasource::listing::PartitionedFile;
use datafusion::datasource::object_store::ObjectStoreUrl;
use datafusion::datasource::physical_plan::{
FileGroup, FileOpenFuture, FileScanConfigBuilder, FileSource, FileStream,
};
use datafusion::error::{DataFusionError, Result as DataFusionResult};
use datafusion::physical_plan::SendableRecordBatchStream;
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
use datatypes::arrow::datatypes::SchemaRef;
use futures::{StreamExt, TryStreamExt};
use object_store::ObjectStore;
use object_store_opendal::OpendalStore;
use snafu::ResultExt;
use tokio::io::AsyncWriteExt;
use tokio_util::compat::FuturesAsyncWriteCompatExt;
use self::csv::CsvFormat;
@@ -43,7 +53,8 @@ use self::json::JsonFormat;
use self::orc::OrcFormat;
use self::parquet::ParquetFormat;
use crate::DEFAULT_WRITE_BUFFER_SIZE;
use crate::buffered_writer::{DfRecordBatchEncoder, LazyBufferedWriter};
use crate::buffered_writer::DfRecordBatchEncoder;
use crate::compressed_writer::{CompressedWriter, IntoCompressedWriter};
use crate::compression::CompressionType;
use crate::error::{self, Result};
use crate::share_buffer::SharedBuffer;
@@ -195,33 +206,128 @@ pub async fn infer_schemas(
ArrowSchema::try_merge(schemas).context(error::MergeSchemaSnafu)
}
pub async fn stream_to_file<T: DfRecordBatchEncoder, U: Fn(SharedBuffer) -> T>(
/// Writes data to a compressed writer if the data is not empty.
///
/// Does nothing if `data` is empty; otherwise writes all data and returns any error.
async fn write_to_compressed_writer(
compressed_writer: &mut CompressedWriter,
data: &[u8],
) -> Result<()> {
if !data.is_empty() {
compressed_writer
.write_all(data)
.await
.context(error::AsyncWriteSnafu)?;
}
Ok(())
}
/// Streams [SendableRecordBatchStream] to a file with optional compression support.
/// Data is buffered and flushed according to the given `threshold`.
/// Ensures that writer resources are cleanly released and that an empty file is not
/// created if no rows are written.
///
/// Returns the total number of rows successfully written.
pub async fn stream_to_file<E>(
mut stream: SendableRecordBatchStream,
store: ObjectStore,
path: &str,
threshold: usize,
concurrency: usize,
encoder_factory: U,
) -> Result<usize> {
compression_type: CompressionType,
encoder_factory: impl Fn(SharedBuffer) -> E,
) -> Result<usize>
where
E: DfRecordBatchEncoder,
{
// Create the file writer with OpenDAL's built-in buffering
let writer = store
.writer_with(path)
.concurrent(concurrency)
.chunk(DEFAULT_WRITE_BUFFER_SIZE.as_bytes() as usize)
.await
.with_context(|_| error::WriteObjectSnafu { path })?
.into_futures_async_write()
.compat_write();
// Apply compression if needed
let mut compressed_writer = writer.into_compressed_writer(compression_type);
// Create a buffer for the encoder
let buffer = SharedBuffer::with_capacity(threshold);
let encoder = encoder_factory(buffer.clone());
let mut writer = LazyBufferedWriter::new(threshold, buffer, encoder, path, |path| async {
store
.writer_with(&path)
.concurrent(concurrency)
.chunk(DEFAULT_WRITE_BUFFER_SIZE.as_bytes() as usize)
.await
.map(|v| v.into_futures_async_write().compat_write())
.context(error::WriteObjectSnafu { path })
});
let mut encoder = encoder_factory(buffer.clone());
let mut rows = 0;
// Process each record batch
while let Some(batch) = stream.next().await {
let batch = batch.context(error::ReadRecordBatchSnafu)?;
writer.write(&batch).await?;
// Write batch using the encoder
encoder.write(&batch)?;
rows += batch.num_rows();
loop {
let chunk = {
let mut buffer_guard = buffer.buffer.lock().unwrap();
if buffer_guard.len() < threshold {
break;
}
buffer_guard.split_to(threshold)
};
write_to_compressed_writer(&mut compressed_writer, &chunk).await?;
}
}
writer.close_inner_writer().await?;
// If no row's been written, just simply close the underlying writer
// without flush so that no file will be actually created.
if rows != 0 {
// Final flush of any remaining data
let final_data = {
let mut buffer_guard = buffer.buffer.lock().unwrap();
buffer_guard.split()
};
write_to_compressed_writer(&mut compressed_writer, &final_data).await?;
}
// Shutdown compression and close writer
compressed_writer.shutdown().await?;
Ok(rows)
}
/// Creates a [FileStream] for reading data from a file with optional column projection
/// and compression support.
///
/// Returns [SendableRecordBatchStream].
pub async fn file_to_stream(
store: &ObjectStore,
filename: &str,
file_schema: SchemaRef,
file_source: Arc<dyn FileSource>,
projection: Option<Vec<usize>>,
compression_type: CompressionType,
) -> Result<DfSendableRecordBatchStream> {
let df_compression: DfCompressionType = compression_type.into();
let config = FileScanConfigBuilder::new(
ObjectStoreUrl::local_filesystem(),
file_schema,
file_source.clone(),
)
.with_file_group(FileGroup::new(vec![PartitionedFile::new(
filename.to_string(),
0,
)]))
.with_projection(projection)
.with_file_compression_type(df_compression)
.build();
let store = Arc::new(OpendalStore::new(store.clone()));
let file_opener = file_source
.with_projection(&config)
.create_file_opener(store, &config, 0);
let stream = FileStream::new(&config, 0, file_opener, &ExecutionPlanMetricsSet::new())
.context(error::BuildFileStreamSnafu)?;
Ok(Box::pin(stream))
}

View File

@@ -157,19 +157,27 @@ pub async fn stream_to_csv(
concurrency: usize,
format: &CsvFormat,
) -> Result<usize> {
stream_to_file(stream, store, path, threshold, concurrency, |buffer| {
let mut builder = WriterBuilder::new();
if let Some(timestamp_format) = &format.timestamp_format {
builder = builder.with_timestamp_format(timestamp_format.to_owned())
}
if let Some(date_format) = &format.date_format {
builder = builder.with_date_format(date_format.to_owned())
}
if let Some(time_format) = &format.time_format {
builder = builder.with_time_format(time_format.to_owned())
}
builder.build(buffer)
})
stream_to_file(
stream,
store,
path,
threshold,
concurrency,
format.compression_type,
|buffer| {
let mut builder = WriterBuilder::new();
if let Some(timestamp_format) = &format.timestamp_format {
builder = builder.with_timestamp_format(timestamp_format.to_owned())
}
if let Some(date_format) = &format.date_format {
builder = builder.with_date_format(date_format.to_owned())
}
if let Some(time_format) = &format.time_format {
builder = builder.with_time_format(time_format.to_owned())
}
builder.build(buffer)
},
)
.await
}
@@ -181,13 +189,21 @@ impl DfRecordBatchEncoder for csv::Writer<SharedBuffer> {
#[cfg(test)]
mod tests {
use std::sync::Arc;
use common_recordbatch::adapter::DfRecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, RecordBatches};
use common_test_util::find_workspace_path;
use datafusion::datasource::physical_plan::{CsvSource, FileSource};
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
use datatypes::vectors::{Float64Vector, StringVector, UInt32Vector, VectorRef};
use futures::TryStreamExt;
use super::*;
use crate::file_format::{
FORMAT_COMPRESSION_TYPE, FORMAT_DELIMITER, FORMAT_HAS_HEADER,
FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat,
FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat, file_to_stream,
};
use crate::test_util::{format_schema, test_store};
@@ -297,4 +313,166 @@ mod tests {
}
);
}
#[tokio::test]
async fn test_compressed_csv() {
// Create test data
let column_schemas = vec![
ColumnSchema::new("id", ConcreteDataType::uint32_datatype(), false),
ColumnSchema::new("name", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("value", ConcreteDataType::float64_datatype(), false),
];
let schema = Arc::new(Schema::new(column_schemas));
// Create multiple record batches with different data
let batch1_columns: Vec<VectorRef> = vec![
Arc::new(UInt32Vector::from_slice(vec![1, 2, 3])),
Arc::new(StringVector::from(vec!["Alice", "Bob", "Charlie"])),
Arc::new(Float64Vector::from_slice(vec![10.5, 20.3, 30.7])),
];
let batch1 = RecordBatch::new(schema.clone(), batch1_columns).unwrap();
let batch2_columns: Vec<VectorRef> = vec![
Arc::new(UInt32Vector::from_slice(vec![4, 5, 6])),
Arc::new(StringVector::from(vec!["David", "Eva", "Frank"])),
Arc::new(Float64Vector::from_slice(vec![40.1, 50.2, 60.3])),
];
let batch2 = RecordBatch::new(schema.clone(), batch2_columns).unwrap();
let batch3_columns: Vec<VectorRef> = vec![
Arc::new(UInt32Vector::from_slice(vec![7, 8, 9])),
Arc::new(StringVector::from(vec!["Grace", "Henry", "Ivy"])),
Arc::new(Float64Vector::from_slice(vec![70.4, 80.5, 90.6])),
];
let batch3 = RecordBatch::new(schema.clone(), batch3_columns).unwrap();
// Combine all batches into a RecordBatches collection
let recordbatches = RecordBatches::try_new(schema, vec![batch1, batch2, batch3]).unwrap();
// Test with different compression types
let compression_types = vec![
CompressionType::Gzip,
CompressionType::Bzip2,
CompressionType::Xz,
CompressionType::Zstd,
];
// Create a temporary file path
let temp_dir = common_test_util::temp_dir::create_temp_dir("test_compressed_csv");
for compression_type in compression_types {
let format = CsvFormat {
compression_type,
..CsvFormat::default()
};
// Use correct format without Debug formatter
let compressed_file_name =
format!("test_compressed_csv.{}", compression_type.file_extension());
let compressed_file_path = temp_dir.path().join(&compressed_file_name);
let compressed_file_path_str = compressed_file_path.to_str().unwrap();
// Create a simple file store for testing
let store = test_store("/");
// Export CSV with compression
let rows = stream_to_csv(
Box::pin(DfRecordBatchStreamAdapter::new(recordbatches.as_stream())),
store,
compressed_file_path_str,
1024,
1,
&format,
)
.await
.unwrap();
assert_eq!(rows, 9);
// Verify compressed file was created and has content
assert!(compressed_file_path.exists());
let file_size = std::fs::metadata(&compressed_file_path).unwrap().len();
assert!(file_size > 0);
// Verify the file is actually compressed
let file_content = std::fs::read(&compressed_file_path).unwrap();
// Compressed files should not start with CSV header
// They should have compression magic bytes
match compression_type {
CompressionType::Gzip => {
// Gzip magic bytes: 0x1f 0x8b
assert_eq!(file_content[0], 0x1f, "Gzip file should start with 0x1f");
assert_eq!(
file_content[1], 0x8b,
"Gzip file should have 0x8b as second byte"
);
}
CompressionType::Bzip2 => {
// Bzip2 magic bytes: 'BZ'
assert_eq!(file_content[0], b'B', "Bzip2 file should start with 'B'");
assert_eq!(
file_content[1], b'Z',
"Bzip2 file should have 'Z' as second byte"
);
}
CompressionType::Xz => {
// XZ magic bytes: 0xFD '7zXZ'
assert_eq!(file_content[0], 0xFD, "XZ file should start with 0xFD");
}
CompressionType::Zstd => {
// Zstd magic bytes: 0x28 0xB5 0x2F 0xFD
assert_eq!(file_content[0], 0x28, "Zstd file should start with 0x28");
assert_eq!(
file_content[1], 0xB5,
"Zstd file should have 0xB5 as second byte"
);
}
_ => {}
}
// Verify the compressed file can be decompressed and content matches original data
let store = test_store("/");
let schema = Arc::new(
CsvFormat {
compression_type,
..Default::default()
}
.infer_schema(&store, compressed_file_path_str)
.await
.unwrap(),
);
let csv_source = CsvSource::new(true, b',', b'"')
.with_schema(schema.clone())
.with_batch_size(8192);
let stream = file_to_stream(
&store,
compressed_file_path_str,
schema.clone(),
csv_source.clone(),
None,
compression_type,
)
.await
.unwrap();
let batches = stream.try_collect::<Vec<_>>().await.unwrap();
let pretty_print = arrow::util::pretty::pretty_format_batches(&batches)
.unwrap()
.to_string();
let expected = r#"+----+---------+-------+
| id | name | value |
+----+---------+-------+
| 1 | Alice | 10.5 |
| 2 | Bob | 20.3 |
| 3 | Charlie | 30.7 |
| 4 | David | 40.1 |
| 5 | Eva | 50.2 |
| 6 | Frank | 60.3 |
| 7 | Grace | 70.4 |
| 8 | Henry | 80.5 |
| 9 | Ivy | 90.6 |
+----+---------+-------+"#;
assert_eq!(expected, pretty_print);
}
}
}

View File

@@ -115,10 +115,17 @@ pub async fn stream_to_json(
path: &str,
threshold: usize,
concurrency: usize,
format: &JsonFormat,
) -> Result<usize> {
stream_to_file(stream, store, path, threshold, concurrency, |buffer| {
json::LineDelimitedWriter::new(buffer)
})
stream_to_file(
stream,
store,
path,
threshold,
concurrency,
format.compression_type,
json::LineDelimitedWriter::new,
)
.await
}
@@ -130,10 +137,21 @@ impl DfRecordBatchEncoder for json::Writer<SharedBuffer, LineDelimited> {
#[cfg(test)]
mod tests {
use std::sync::Arc;
use common_recordbatch::adapter::DfRecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, RecordBatches};
use common_test_util::find_workspace_path;
use datafusion::datasource::physical_plan::{FileSource, JsonSource};
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
use datatypes::vectors::{Float64Vector, StringVector, UInt32Vector, VectorRef};
use futures::TryStreamExt;
use super::*;
use crate::file_format::{FORMAT_COMPRESSION_TYPE, FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat};
use crate::file_format::{
FORMAT_COMPRESSION_TYPE, FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat, file_to_stream,
};
use crate::test_util::{format_schema, test_store};
fn test_data_root() -> String {
@@ -203,4 +221,165 @@ mod tests {
}
);
}
#[tokio::test]
async fn test_compressed_json() {
// Create test data
let column_schemas = vec![
ColumnSchema::new("id", ConcreteDataType::uint32_datatype(), false),
ColumnSchema::new("name", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("value", ConcreteDataType::float64_datatype(), false),
];
let schema = Arc::new(Schema::new(column_schemas));
// Create multiple record batches with different data
let batch1_columns: Vec<VectorRef> = vec![
Arc::new(UInt32Vector::from_slice(vec![1, 2, 3])),
Arc::new(StringVector::from(vec!["Alice", "Bob", "Charlie"])),
Arc::new(Float64Vector::from_slice(vec![10.5, 20.3, 30.7])),
];
let batch1 = RecordBatch::new(schema.clone(), batch1_columns).unwrap();
let batch2_columns: Vec<VectorRef> = vec![
Arc::new(UInt32Vector::from_slice(vec![4, 5, 6])),
Arc::new(StringVector::from(vec!["David", "Eva", "Frank"])),
Arc::new(Float64Vector::from_slice(vec![40.1, 50.2, 60.3])),
];
let batch2 = RecordBatch::new(schema.clone(), batch2_columns).unwrap();
let batch3_columns: Vec<VectorRef> = vec![
Arc::new(UInt32Vector::from_slice(vec![7, 8, 9])),
Arc::new(StringVector::from(vec!["Grace", "Henry", "Ivy"])),
Arc::new(Float64Vector::from_slice(vec![70.4, 80.5, 90.6])),
];
let batch3 = RecordBatch::new(schema.clone(), batch3_columns).unwrap();
// Combine all batches into a RecordBatches collection
let recordbatches = RecordBatches::try_new(schema, vec![batch1, batch2, batch3]).unwrap();
// Test with different compression types
let compression_types = vec![
CompressionType::Gzip,
CompressionType::Bzip2,
CompressionType::Xz,
CompressionType::Zstd,
];
// Create a temporary file path
let temp_dir = common_test_util::temp_dir::create_temp_dir("test_compressed_json");
for compression_type in compression_types {
let format = JsonFormat {
compression_type,
..JsonFormat::default()
};
let compressed_file_name =
format!("test_compressed_json.{}", compression_type.file_extension());
let compressed_file_path = temp_dir.path().join(&compressed_file_name);
let compressed_file_path_str = compressed_file_path.to_str().unwrap();
// Create a simple file store for testing
let store = test_store("/");
// Export JSON with compression
let rows = stream_to_json(
Box::pin(DfRecordBatchStreamAdapter::new(recordbatches.as_stream())),
store,
compressed_file_path_str,
1024,
1,
&format,
)
.await
.unwrap();
assert_eq!(rows, 9);
// Verify compressed file was created and has content
assert!(compressed_file_path.exists());
let file_size = std::fs::metadata(&compressed_file_path).unwrap().len();
assert!(file_size > 0);
// Verify the file is actually compressed
let file_content = std::fs::read(&compressed_file_path).unwrap();
// Compressed files should not start with '{' (JSON character)
// They should have compression magic bytes
match compression_type {
CompressionType::Gzip => {
// Gzip magic bytes: 0x1f 0x8b
assert_eq!(file_content[0], 0x1f, "Gzip file should start with 0x1f");
assert_eq!(
file_content[1], 0x8b,
"Gzip file should have 0x8b as second byte"
);
}
CompressionType::Bzip2 => {
// Bzip2 magic bytes: 'BZ'
assert_eq!(file_content[0], b'B', "Bzip2 file should start with 'B'");
assert_eq!(
file_content[1], b'Z',
"Bzip2 file should have 'Z' as second byte"
);
}
CompressionType::Xz => {
// XZ magic bytes: 0xFD '7zXZ'
assert_eq!(file_content[0], 0xFD, "XZ file should start with 0xFD");
}
CompressionType::Zstd => {
// Zstd magic bytes: 0x28 0xB5 0x2F 0xFD
assert_eq!(file_content[0], 0x28, "Zstd file should start with 0x28");
assert_eq!(
file_content[1], 0xB5,
"Zstd file should have 0xB5 as second byte"
);
}
_ => {}
}
// Verify the compressed file can be decompressed and content matches original data
let store = test_store("/");
let schema = Arc::new(
JsonFormat {
compression_type,
..Default::default()
}
.infer_schema(&store, compressed_file_path_str)
.await
.unwrap(),
);
let json_source = JsonSource::new()
.with_schema(schema.clone())
.with_batch_size(8192);
let stream = file_to_stream(
&store,
compressed_file_path_str,
schema.clone(),
json_source.clone(),
None,
compression_type,
)
.await
.unwrap();
let batches = stream.try_collect::<Vec<_>>().await.unwrap();
let pretty_print = arrow::util::pretty::pretty_format_batches(&batches)
.unwrap()
.to_string();
let expected = r#"+----+---------+-------+
| id | name | value |
+----+---------+-------+
| 1 | Alice | 10.5 |
| 2 | Bob | 20.3 |
| 3 | Charlie | 30.7 |
| 4 | David | 40.1 |
| 5 | Eva | 50.2 |
| 6 | Frank | 60.3 |
| 7 | Grace | 70.4 |
| 8 | Henry | 80.5 |
| 9 | Ivy | 90.6 |
+----+---------+-------+"#;
assert_eq!(expected, pretty_print);
}
}
}

View File

@@ -16,6 +16,7 @@
#![feature(type_alias_impl_trait)]
pub mod buffered_writer;
pub mod compressed_writer;
pub mod compression;
pub mod error;
pub mod file_format;

View File

@@ -28,7 +28,7 @@ use object_store::ObjectStore;
use object_store::services::Fs;
use crate::file_format::csv::{CsvFormat, stream_to_csv};
use crate::file_format::json::stream_to_json;
use crate::file_format::json::{JsonFormat, stream_to_json};
use crate::test_util;
pub const TEST_BATCH_SIZE: usize = 100;
@@ -122,13 +122,16 @@ pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usi
let output_path = format!("{}/{}", dir.path().display(), "output");
let json_format = JsonFormat::default();
assert!(
stream_to_json(
Box::pin(stream),
tmp_store.clone(),
&output_path,
threshold(size),
8
8,
&json_format,
)
.await
.is_ok()

View File

@@ -97,9 +97,9 @@ pub trait Event: Send + Sync + Debug {
vec![]
}
/// Add the extra row to the event with the default row.
fn extra_row(&self) -> Result<Row> {
Ok(Row { values: vec![] })
/// Add the extra rows to the event with the default row.
fn extra_rows(&self) -> Result<Vec<Row>> {
Ok(vec![Row { values: vec![] }])
}
/// Returns the event as any type.
@@ -159,15 +159,17 @@ pub fn build_row_inserts_request(events: &[&Box<dyn Event>]) -> Result<RowInsert
let mut rows: Vec<Row> = Vec::with_capacity(events.len());
for event in events {
let extra_row = event.extra_row()?;
let mut values = Vec::with_capacity(3 + extra_row.values.len());
values.extend([
ValueData::StringValue(event.event_type().to_string()).into(),
ValueData::BinaryValue(event.json_payload()?.into_bytes()).into(),
ValueData::TimestampNanosecondValue(event.timestamp().value()).into(),
]);
values.extend(extra_row.values);
rows.push(Row { values });
let extra_rows = event.extra_rows()?;
for extra_row in extra_rows {
let mut values = Vec::with_capacity(3 + extra_row.values.len());
values.extend([
ValueData::StringValue(event.event_type().to_string()).into(),
ValueData::BinaryValue(event.json_payload()?.into_bytes()).into(),
ValueData::TimestampNanosecondValue(event.timestamp().value()).into(),
]);
values.extend(extra_row.values);
rows.push(Row { values });
}
}
Ok(RowInsertRequests {

View File

@@ -107,8 +107,8 @@ impl Event for SlowQueryEvent {
]
}
fn extra_row(&self) -> Result<Row> {
Ok(Row {
fn extra_rows(&self) -> Result<Vec<Row>> {
Ok(vec![Row {
values: vec![
ValueData::U64Value(self.cost).into(),
ValueData::U64Value(self.threshold).into(),
@@ -119,7 +119,7 @@ impl Event for SlowQueryEvent {
ValueData::TimestampMillisecondValue(self.promql_start.unwrap_or(0)).into(),
ValueData::TimestampMillisecondValue(self.promql_end.unwrap_or(0)).into(),
],
})
}])
}
fn json_payload(&self) -> Result<String> {

View File

@@ -47,6 +47,7 @@ h3o = { version = "0.6", optional = true }
hyperloglogplus = "0.4"
jsonb.workspace = true
memchr = "2.7"
mito-codec.workspace = true
nalgebra.workspace = true
num = "0.4"
num-traits = "0.2"

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
mod build_index_table;
mod flush_compact_region;
mod flush_compact_table;
mod migrate_region;
@@ -26,6 +27,7 @@ use reconcile_catalog::ReconcileCatalogFunction;
use reconcile_database::ReconcileDatabaseFunction;
use reconcile_table::ReconcileTableFunction;
use crate::admin::build_index_table::BuildIndexFunction;
use crate::flush_flow::FlushFlowFunction;
use crate::function_registry::FunctionRegistry;
@@ -40,6 +42,7 @@ impl AdminFunction {
registry.register(CompactRegionFunction::factory());
registry.register(FlushTableFunction::factory());
registry.register(CompactTableFunction::factory());
registry.register(BuildIndexFunction::factory());
registry.register(FlushFlowFunction::factory());
registry.register(ReconcileCatalogFunction::factory());
registry.register(ReconcileDatabaseFunction::factory());

View File

@@ -0,0 +1,80 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use arrow::datatypes::DataType as ArrowDataType;
use common_error::ext::BoxedError;
use common_macro::admin_fn;
use common_query::error::{
InvalidFuncArgsSnafu, MissingTableMutationHandlerSnafu, Result, TableMutationSnafu,
UnsupportedInputDataTypeSnafu,
};
use datafusion_expr::{Signature, Volatility};
use datatypes::prelude::*;
use session::context::QueryContextRef;
use session::table_name::table_name_to_full_name;
use snafu::{ResultExt, ensure};
use table::requests::BuildIndexTableRequest;
use crate::handlers::TableMutationHandlerRef;
#[admin_fn(
name = BuildIndexFunction,
display_name = build_index,
sig_fn = build_index_signature,
ret = uint64
)]
pub(crate) async fn build_index(
table_mutation_handler: &TableMutationHandlerRef,
query_ctx: &QueryContextRef,
params: &[ValueRef<'_>],
) -> Result<Value> {
ensure!(
params.len() == 1,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect 1, have: {}",
params.len()
),
}
);
let ValueRef::String(table_name) = params[0] else {
return UnsupportedInputDataTypeSnafu {
function: "build_index",
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
}
.fail();
};
let (catalog_name, schema_name, table_name) = table_name_to_full_name(table_name, query_ctx)
.map_err(BoxedError::new)
.context(TableMutationSnafu)?;
let affected_rows = table_mutation_handler
.build_index(
BuildIndexTableRequest {
catalog_name,
schema_name,
table_name,
},
query_ctx.clone(),
)
.await?;
Ok(Value::from(affected_rows as u64))
}
fn build_index_signature() -> Signature {
Signature::uniform(1, vec![ArrowDataType::Utf8], Volatility::Immutable)
}

View File

@@ -34,6 +34,7 @@ use crate::scalars::json::JsonFunction;
use crate::scalars::matches::MatchesFunction;
use crate::scalars::matches_term::MatchesTermFunction;
use crate::scalars::math::MathFunction;
use crate::scalars::primary_key::DecodePrimaryKeyFunction;
use crate::scalars::string::register_string_functions;
use crate::scalars::timestamp::TimestampFunction;
use crate::scalars::uddsketch_calc::UddSketchCalcFunction;
@@ -143,6 +144,7 @@ pub static FUNCTION_REGISTRY: LazyLock<Arc<FunctionRegistry>> = LazyLock::new(||
ExpressionFunction::register(&function_registry);
UddSketchCalcFunction::register(&function_registry);
HllCalcFunction::register(&function_registry);
DecodePrimaryKeyFunction::register(&function_registry);
// Full text search function
MatchesFunction::register(&function_registry);

View File

@@ -25,7 +25,9 @@ use common_query::Output;
use common_query::error::Result;
use session::context::QueryContextRef;
use store_api::storage::RegionId;
use table::requests::{CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest};
use table::requests::{
BuildIndexTableRequest, CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest,
};
/// A trait for handling table mutations in `QueryEngine`.
#[async_trait]
@@ -47,6 +49,13 @@ pub trait TableMutationHandler: Send + Sync {
ctx: QueryContextRef,
) -> Result<AffectedRows>;
/// Trigger an index build task for the table.
async fn build_index(
&self,
request: BuildIndexTableRequest,
ctx: QueryContextRef,
) -> Result<AffectedRows>;
/// Trigger a flush task for a table region.
async fn flush_region(&self, region_id: RegionId, ctx: QueryContextRef)
-> Result<AffectedRows>;

View File

@@ -20,6 +20,7 @@ pub mod json;
pub mod matches;
pub mod matches_term;
pub mod math;
pub mod primary_key;
pub(crate) mod string;
pub mod vector;

View File

@@ -19,7 +19,7 @@ mod json_path_match;
mod json_to_string;
mod parse_json;
use json_get::{JsonGetBool, JsonGetFloat, JsonGetInt, JsonGetString};
use json_get::{JsonGetBool, JsonGetFloat, JsonGetInt, JsonGetObject, JsonGetString};
use json_is::{
JsonIsArray, JsonIsBool, JsonIsFloat, JsonIsInt, JsonIsNull, JsonIsObject, JsonIsString,
};
@@ -39,6 +39,7 @@ impl JsonFunction {
registry.register_scalar(JsonGetFloat::default());
registry.register_scalar(JsonGetString::default());
registry.register_scalar(JsonGetBool::default());
registry.register_scalar(JsonGetObject::default());
registry.register_scalar(JsonIsNull::default());
registry.register_scalar(JsonIsInt::default());

View File

@@ -16,10 +16,13 @@ use std::fmt::{self, Display};
use std::sync::Arc;
use arrow::compute;
use datafusion_common::DataFusionError;
use datafusion_common::arrow::array::{
Array, AsArray, BooleanBuilder, Float64Builder, Int64Builder, StringViewBuilder,
Array, AsArray, BinaryViewBuilder, BooleanBuilder, Float64Builder, Int64Builder,
StringViewBuilder,
};
use datafusion_common::arrow::datatypes::DataType;
use datafusion_expr::type_coercion::aggregates::STRINGS;
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature};
use crate::function::{Function, extract_args};
@@ -212,13 +215,92 @@ impl Display for JsonGetString {
}
}
/// Get the object from JSON value by path.
pub(super) struct JsonGetObject {
signature: Signature,
}
impl JsonGetObject {
const NAME: &'static str = "json_get_object";
}
impl Default for JsonGetObject {
fn default() -> Self {
Self {
signature: helper::one_of_sigs2(
vec![
DataType::Binary,
DataType::LargeBinary,
DataType::BinaryView,
],
STRINGS.to_vec(),
),
}
}
}
impl Function for JsonGetObject {
fn name(&self) -> &str {
Self::NAME
}
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
Ok(DataType::BinaryView)
}
fn signature(&self) -> &Signature {
&self.signature
}
fn invoke_with_args(
&self,
args: ScalarFunctionArgs,
) -> datafusion_common::Result<ColumnarValue> {
let [arg0, arg1] = extract_args(self.name(), &args)?;
let arg0 = compute::cast(&arg0, &DataType::BinaryView)?;
let jsons = arg0.as_binary_view();
let arg1 = compute::cast(&arg1, &DataType::Utf8View)?;
let paths = arg1.as_string_view();
let len = jsons.len();
let mut builder = BinaryViewBuilder::with_capacity(len);
for i in 0..len {
let json = jsons.is_valid(i).then(|| jsons.value(i));
let path = paths.is_valid(i).then(|| paths.value(i));
let result = if let (Some(json), Some(path)) = (json, path) {
let result = jsonb::jsonpath::parse_json_path(path.as_bytes()).and_then(|path| {
let mut data = Vec::new();
let mut offset = Vec::new();
jsonb::get_by_path(json, path, &mut data, &mut offset)
.map(|()| jsonb::is_object(&data).then_some(data))
});
result.map_err(|e| DataFusionError::Execution(e.to_string()))?
} else {
None
};
builder.append_option(result);
}
Ok(ColumnarValue::Array(Arc::new(builder.finish())))
}
}
impl Display for JsonGetObject {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", Self::NAME.to_ascii_uppercase())
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use arrow_schema::Field;
use datafusion_common::arrow::array::{BinaryArray, StringArray};
use datafusion_common::ScalarValue;
use datafusion_common::arrow::array::{BinaryArray, BinaryViewArray, StringArray};
use datafusion_common::arrow::datatypes::{Float64Type, Int64Type};
use datatypes::types::parse_string_to_jsonb;
use super::*;
@@ -425,4 +507,49 @@ mod tests {
assert_eq!(*gt, result);
}
}
#[test]
fn test_json_get_object() -> datafusion_common::Result<()> {
let udf = JsonGetObject::default();
assert_eq!("json_get_object", udf.name());
assert_eq!(
DataType::BinaryView,
udf.return_type(&[DataType::BinaryView, DataType::Utf8View])?
);
let json_value = parse_string_to_jsonb(r#"{"a": {"b": {"c": {"d": 1}}}}"#).unwrap();
let paths = vec!["$", "$.a", "$.a.b", "$.a.b.c", "$.a.b.c.d", "$.e", "$.a.e"];
let number_rows = paths.len();
let args = ScalarFunctionArgs {
args: vec![
ColumnarValue::Scalar(ScalarValue::Binary(Some(json_value))),
ColumnarValue::Array(Arc::new(StringArray::from_iter_values(paths))),
],
arg_fields: vec![],
number_rows,
return_field: Arc::new(Field::new("x", DataType::Binary, false)),
config_options: Arc::new(Default::default()),
};
let result = udf
.invoke_with_args(args)
.and_then(|x| x.to_array(number_rows))?;
let result = result.as_binary_view();
let expected = &BinaryViewArray::from_iter(
vec![
Some(r#"{"a": {"b": {"c": {"d": 1}}}}"#),
Some(r#"{"b": {"c": {"d": 1}}}"#),
Some(r#"{"c": {"d": 1}}"#),
Some(r#"{"d": 1}"#),
None,
None,
None,
]
.into_iter()
.map(|x| x.and_then(|s| parse_string_to_jsonb(s).ok())),
);
assert_eq!(result, expected);
Ok(())
}
}

View File

@@ -32,7 +32,15 @@ impl Default for JsonToStringFunction {
fn default() -> Self {
Self {
// TODO(LFC): Use a more clear type here instead of "Binary" for Json input, once we have a "Json" type.
signature: Signature::exact(vec![DataType::Binary], Volatility::Immutable),
signature: Signature::uniform(
1,
vec![
DataType::Binary,
DataType::LargeBinary,
DataType::BinaryView,
],
Volatility::Immutable,
),
}
}
}
@@ -57,7 +65,8 @@ impl Function for JsonToStringFunction {
args: ScalarFunctionArgs,
) -> datafusion_common::Result<ColumnarValue> {
let [arg0] = extract_args(self.name(), &args)?;
let jsons = arg0.as_binary::<i32>();
let arg0 = arrow::compute::cast(&arg0, &DataType::BinaryView)?;
let jsons = arg0.as_binary_view();
let size = jsons.len();
let mut builder = StringViewBuilder::with_capacity(size);

View File

@@ -0,0 +1,521 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::fmt::{self, Display};
use std::sync::Arc;
use datafusion_common::arrow::array::{
Array, ArrayRef, BinaryArray, BinaryViewArray, DictionaryArray, ListBuilder, StringBuilder,
};
use datafusion_common::arrow::datatypes::{DataType, Field};
use datafusion_common::{DataFusionError, ScalarValue};
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature, Volatility};
use datatypes::arrow::datatypes::UInt32Type;
use datatypes::value::Value;
use mito_codec::row_converter::{
CompositeValues, PrimaryKeyCodec, SortField, build_primary_key_codec_with_fields,
};
use store_api::codec::PrimaryKeyEncoding;
use store_api::metadata::RegionMetadata;
use store_api::storage::ColumnId;
use store_api::storage::consts::{PRIMARY_KEY_COLUMN_NAME, ReservedColumnId};
use crate::function::{Function, extract_args};
use crate::function_registry::FunctionRegistry;
type NameValuePair = (String, Option<String>);
#[derive(Clone, Debug)]
pub(crate) struct DecodePrimaryKeyFunction {
signature: Signature,
}
const NAME: &str = "decode_primary_key";
const NULL_VALUE_LITERAL: &str = "null";
impl Default for DecodePrimaryKeyFunction {
fn default() -> Self {
Self {
signature: Signature::any(3, Volatility::Immutable),
}
}
}
impl DecodePrimaryKeyFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register_scalar(Self::default());
}
fn return_data_type() -> DataType {
DataType::List(Arc::new(Field::new("item", DataType::Utf8, true)))
}
}
impl Function for DecodePrimaryKeyFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
Ok(Self::return_data_type())
}
fn signature(&self) -> &Signature {
&self.signature
}
fn invoke_with_args(
&self,
args: ScalarFunctionArgs,
) -> datafusion_common::Result<ColumnarValue> {
let [encoded, _, _] = extract_args(self.name(), &args)?;
let number_rows = args.number_rows;
let encoding = parse_encoding(&args.args[1])?;
let metadata = parse_region_metadata(&args.args[2])?;
let codec = build_codec(&metadata, encoding);
let name_lookup: HashMap<_, _> = metadata
.column_metadatas
.iter()
.map(|c| (c.column_id, c.column_schema.name.clone()))
.collect();
let decoded_rows = decode_primary_keys(encoded, number_rows, codec.as_ref(), &name_lookup)?;
let array = build_list_array(&decoded_rows)?;
Ok(ColumnarValue::Array(array))
}
}
impl Display for DecodePrimaryKeyFunction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "DECODE_PRIMARY_KEY")
}
}
fn parse_encoding(arg: &ColumnarValue) -> datafusion_common::Result<PrimaryKeyEncoding> {
let encoding = match arg {
ColumnarValue::Scalar(ScalarValue::Utf8(Some(v)))
| ColumnarValue::Scalar(ScalarValue::LargeUtf8(Some(v))) => v.as_str(),
ColumnarValue::Scalar(value) => {
return Err(DataFusionError::Execution(format!(
"encoding must be a string literal, got {value:?}"
)));
}
ColumnarValue::Array(_) => {
return Err(DataFusionError::Execution(
"encoding must be a scalar string".to_string(),
));
}
};
match encoding.to_ascii_lowercase().as_str() {
"dense" => Ok(PrimaryKeyEncoding::Dense),
"sparse" => Ok(PrimaryKeyEncoding::Sparse),
_ => Err(DataFusionError::Execution(format!(
"unsupported primary key encoding: {encoding}"
))),
}
}
fn build_codec(
metadata: &RegionMetadata,
encoding: PrimaryKeyEncoding,
) -> Arc<dyn PrimaryKeyCodec> {
let fields = metadata.primary_key_columns().map(|c| {
(
c.column_id,
SortField::new(c.column_schema.data_type.clone()),
)
});
build_primary_key_codec_with_fields(encoding, fields)
}
fn parse_region_metadata(arg: &ColumnarValue) -> datafusion_common::Result<RegionMetadata> {
let json = match arg {
ColumnarValue::Scalar(ScalarValue::Utf8(Some(v)))
| ColumnarValue::Scalar(ScalarValue::LargeUtf8(Some(v))) => v.as_str(),
ColumnarValue::Scalar(value) => {
return Err(DataFusionError::Execution(format!(
"region metadata must be a string literal, got {value:?}"
)));
}
ColumnarValue::Array(_) => {
return Err(DataFusionError::Execution(
"region metadata must be a scalar string".to_string(),
));
}
};
RegionMetadata::from_json(json)
.map_err(|e| DataFusionError::Execution(format!("failed to parse region metadata: {e:?}")))
}
fn decode_primary_keys(
encoded: ArrayRef,
number_rows: usize,
codec: &dyn PrimaryKeyCodec,
name_lookup: &HashMap<ColumnId, String>,
) -> datafusion_common::Result<Vec<Vec<NameValuePair>>> {
if let Some(dict) = encoded
.as_any()
.downcast_ref::<DictionaryArray<UInt32Type>>()
{
decode_dictionary(dict, number_rows, codec, name_lookup)
} else if let Some(array) = encoded.as_any().downcast_ref::<BinaryArray>() {
decode_binary_array(array, codec, name_lookup)
} else if let Some(array) = encoded.as_any().downcast_ref::<BinaryViewArray>() {
decode_binary_view_array(array, codec, name_lookup)
} else {
Err(DataFusionError::Execution(format!(
"column {PRIMARY_KEY_COLUMN_NAME} must be binary or dictionary(binary) array"
)))
}
}
fn decode_dictionary(
dict: &DictionaryArray<UInt32Type>,
number_rows: usize,
codec: &dyn PrimaryKeyCodec,
name_lookup: &HashMap<ColumnId, String>,
) -> datafusion_common::Result<Vec<Vec<NameValuePair>>> {
let values = dict
.values()
.as_any()
.downcast_ref::<BinaryArray>()
.ok_or_else(|| {
DataFusionError::Execution("primary key dictionary values are not binary".to_string())
})?;
let mut decoded_values = Vec::with_capacity(values.len());
for i in 0..values.len() {
let pk = values.value(i);
let pairs = decode_one(pk, codec, name_lookup)?;
decoded_values.push(pairs);
}
let mut rows = Vec::with_capacity(number_rows);
let keys = dict.keys();
for i in 0..number_rows {
let dict_index = keys.value(i) as usize;
rows.push(decoded_values[dict_index].clone());
}
Ok(rows)
}
fn decode_binary_array(
array: &BinaryArray,
codec: &dyn PrimaryKeyCodec,
name_lookup: &HashMap<ColumnId, String>,
) -> datafusion_common::Result<Vec<Vec<NameValuePair>>> {
(0..array.len())
.map(|i| decode_one(array.value(i), codec, name_lookup))
.collect()
}
fn decode_binary_view_array(
array: &BinaryViewArray,
codec: &dyn PrimaryKeyCodec,
name_lookup: &HashMap<ColumnId, String>,
) -> datafusion_common::Result<Vec<Vec<NameValuePair>>> {
(0..array.len())
.map(|i| decode_one(array.value(i), codec, name_lookup))
.collect()
}
fn decode_one(
pk: &[u8],
codec: &dyn PrimaryKeyCodec,
name_lookup: &HashMap<ColumnId, String>,
) -> datafusion_common::Result<Vec<NameValuePair>> {
let decoded = codec
.decode(pk)
.map_err(|e| DataFusionError::Execution(format!("failed to decode primary key: {e}")))?;
Ok(match decoded {
CompositeValues::Dense(values) => values
.into_iter()
.map(|(column_id, value)| (column_name(column_id, name_lookup), value_to_string(value)))
.collect(),
CompositeValues::Sparse(values) => {
let mut values: Vec<_> = values
.iter()
.map(|(column_id, value)| {
(
*column_id,
column_name(*column_id, name_lookup),
value_to_string(value.clone()),
)
})
.collect();
values.sort_by_key(|(column_id, _, _)| {
(ReservedColumnId::is_reserved(*column_id), *column_id)
});
values
.into_iter()
.map(|(_, name, value)| (name, value))
.collect()
}
})
}
fn column_name(column_id: ColumnId, name_lookup: &HashMap<ColumnId, String>) -> String {
if let Some(name) = name_lookup.get(&column_id) {
return name.clone();
}
if column_id == ReservedColumnId::table_id() {
return "__table_id".to_string();
}
if column_id == ReservedColumnId::tsid() {
return "__tsid".to_string();
}
column_id.to_string()
}
fn value_to_string(value: Value) -> Option<String> {
match value {
Value::Null => None,
_ => Some(value.to_string()),
}
}
fn build_list_array(rows: &[Vec<NameValuePair>]) -> datafusion_common::Result<ArrayRef> {
let mut builder = ListBuilder::new(StringBuilder::new());
for row in rows {
for (key, value) in row {
let value = value.as_deref().unwrap_or(NULL_VALUE_LITERAL);
builder.values().append_value(format!("{key} : {value}"));
}
builder.append(true);
}
Ok(Arc::new(builder.finish()))
}
#[cfg(test)]
mod tests {
use api::v1::SemanticType;
use datafusion_common::ScalarValue;
use datatypes::arrow::array::builder::BinaryDictionaryBuilder;
use datatypes::arrow::array::{BinaryArray, ListArray, StringArray};
use datatypes::arrow::datatypes::UInt32Type;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::ColumnSchema;
use datatypes::value::Value;
use mito_codec::row_converter::{
DensePrimaryKeyCodec, PrimaryKeyCodecExt, SortField, SparsePrimaryKeyCodec,
};
use store_api::codec::PrimaryKeyEncoding;
use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder};
use store_api::storage::consts::ReservedColumnId;
use store_api::storage::{ColumnId, RegionId};
use super::*;
fn pk_field() -> Arc<Field> {
Arc::new(Field::new_dictionary(
PRIMARY_KEY_COLUMN_NAME,
DataType::UInt32,
DataType::Binary,
false,
))
}
fn region_metadata_json(
columns: &[(ColumnId, &str, ConcreteDataType)],
encoding: PrimaryKeyEncoding,
) -> String {
let mut builder = RegionMetadataBuilder::new(RegionId::new(1, 1));
builder.push_column_metadata(ColumnMetadata {
column_schema: ColumnSchema::new(
"ts",
ConcreteDataType::timestamp_millisecond_datatype(),
false,
),
semantic_type: SemanticType::Timestamp,
column_id: 100,
});
builder.primary_key_encoding(encoding);
for (id, name, ty) in columns {
builder.push_column_metadata(ColumnMetadata {
column_schema: ColumnSchema::new((*name).to_string(), ty.clone(), true),
semantic_type: SemanticType::Tag,
column_id: *id,
});
}
builder.primary_key(columns.iter().map(|(id, _, _)| *id).collect());
builder.build().unwrap().to_json().unwrap()
}
fn list_row(list: &ListArray, row_idx: usize) -> Vec<String> {
let values = list.value(row_idx);
let values = values.as_any().downcast_ref::<StringArray>().unwrap();
(0..values.len())
.map(|i| values.value(i).to_string())
.collect()
}
#[test]
fn test_decode_dense_primary_key() {
let columns = vec![
(0, "host", ConcreteDataType::string_datatype()),
(1, "core", ConcreteDataType::int64_datatype()),
];
let metadata_json = region_metadata_json(&columns, PrimaryKeyEncoding::Dense);
let codec = DensePrimaryKeyCodec::with_fields(
columns
.iter()
.map(|(id, _, ty)| (*id, SortField::new(ty.clone())))
.collect(),
);
let rows = vec![
vec![Value::from("a"), Value::from(1_i64)],
vec![Value::from("b"), Value::from(2_i64)],
vec![Value::from("a"), Value::from(1_i64)],
];
let mut builder = BinaryDictionaryBuilder::<UInt32Type>::new();
for row in &rows {
let encoded = codec.encode(row.iter().map(|v| v.as_value_ref())).unwrap();
builder.append(encoded.as_slice()).unwrap();
}
let dict_array: ArrayRef = Arc::new(builder.finish());
let args = ScalarFunctionArgs {
args: vec![
ColumnarValue::Array(dict_array),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("dense".to_string()))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some(metadata_json))),
],
arg_fields: vec![
pk_field(),
Arc::new(Field::new("encoding", DataType::Utf8, false)),
Arc::new(Field::new("region_metadata", DataType::Utf8, false)),
],
number_rows: 3,
return_field: Arc::new(Field::new(
"decoded",
DecodePrimaryKeyFunction::return_data_type(),
false,
)),
config_options: Default::default(),
};
let func = DecodePrimaryKeyFunction::default();
let result = func
.invoke_with_args(args)
.and_then(|v| v.to_array(3))
.unwrap();
let list = result.as_any().downcast_ref::<ListArray>().unwrap();
let expected = [
vec!["host : a".to_string(), "core : 1".to_string()],
vec!["host : b".to_string(), "core : 2".to_string()],
vec!["host : a".to_string(), "core : 1".to_string()],
];
for (row_idx, expected_row) in expected.iter().enumerate() {
assert_eq!(*expected_row, list_row(list, row_idx));
}
}
#[test]
fn test_decode_sparse_primary_key() {
let columns = vec![
(10, "k0", ConcreteDataType::string_datatype()),
(11, "k1", ConcreteDataType::string_datatype()),
];
let metadata_json = region_metadata_json(&columns, PrimaryKeyEncoding::Sparse);
let codec = SparsePrimaryKeyCodec::schemaless();
let rows = vec![
vec![
(ReservedColumnId::table_id(), Value::UInt32(1)),
(ReservedColumnId::tsid(), Value::UInt64(100)),
(10, Value::from("a")),
(11, Value::from("b")),
],
vec![
(ReservedColumnId::table_id(), Value::UInt32(1)),
(ReservedColumnId::tsid(), Value::UInt64(200)),
(10, Value::from("c")),
(11, Value::from("d")),
],
];
let mut encoded_values = Vec::with_capacity(rows.len());
for row in &rows {
let mut buf = Vec::new();
codec.encode_values(row, &mut buf).unwrap();
encoded_values.push(buf);
}
let pk_array: ArrayRef = Arc::new(BinaryArray::from_iter_values(
encoded_values.iter().cloned(),
));
let args = ScalarFunctionArgs {
args: vec![
ColumnarValue::Array(pk_array),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("sparse".to_string()))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some(metadata_json))),
],
arg_fields: vec![
pk_field(),
Arc::new(Field::new("encoding", DataType::Utf8, false)),
Arc::new(Field::new("region_metadata", DataType::Utf8, false)),
],
number_rows: rows.len(),
return_field: Arc::new(Field::new(
"decoded",
DecodePrimaryKeyFunction::return_data_type(),
false,
)),
config_options: Default::default(),
};
let func = DecodePrimaryKeyFunction::default();
let result = func
.invoke_with_args(args)
.and_then(|v| v.to_array(rows.len()))
.unwrap();
let list = result.as_any().downcast_ref::<ListArray>().unwrap();
let expected = [
vec![
"k0 : a".to_string(),
"k1 : b".to_string(),
"__tsid : 100".to_string(),
"__table_id : 1".to_string(),
],
vec![
"k0 : c".to_string(),
"k1 : d".to_string(),
"__tsid : 200".to_string(),
"__table_id : 1".to_string(),
],
];
for (row_idx, expected_row) in expected.iter().enumerate() {
assert_eq!(*expected_row, list_row(list, row_idx));
}
}
}

View File

@@ -44,7 +44,8 @@ impl FunctionState {
use session::context::QueryContextRef;
use store_api::storage::RegionId;
use table::requests::{
CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest,
BuildIndexTableRequest, CompactTableRequest, DeleteRequest, FlushTableRequest,
InsertRequest,
};
use crate::handlers::{FlowServiceHandler, ProcedureServiceHandler, TableMutationHandler};
@@ -120,6 +121,14 @@ impl FunctionState {
Ok(ROWS)
}
async fn build_index(
&self,
_request: BuildIndexTableRequest,
_ctx: QueryContextRef,
) -> Result<AffectedRows> {
Ok(ROWS)
}
async fn flush_region(
&self,
_region_id: RegionId,

View File

@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
mod version;
use std::sync::Arc;
use common_catalog::consts::{
@@ -27,7 +25,6 @@ use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature, TypeSignatur
use datafusion_pg_catalog::pg_catalog::{self, PgCatalogStaticTables};
use datatypes::arrow::datatypes::{DataType, Field};
use derive_more::derive::Display;
use version::PGVersionFunction;
use crate::function::{Function, find_function_context};
use crate::function_registry::FunctionRegistry;
@@ -183,7 +180,6 @@ impl PGCatalogFunction {
let static_tables =
Arc::new(PgCatalogStaticTables::try_new().expect("load postgres static tables"));
registry.register_scalar(PGVersionFunction::default());
registry.register_scalar(CurrentSchemaFunction::default());
registry.register_scalar(CurrentSchemasFunction::new());
registry.register_scalar(SessionUserFunction::default());

View File

@@ -1,61 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use datafusion::arrow::datatypes::DataType;
use datafusion_common::ScalarValue;
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature, Volatility};
use crate::function::Function;
#[derive(Clone, Debug)]
pub(crate) struct PGVersionFunction {
signature: Signature,
}
impl Default for PGVersionFunction {
fn default() -> Self {
Self {
signature: Signature::exact(vec![], Volatility::Immutable),
}
}
}
impl fmt::Display for PGVersionFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "pg_catalog.VERSION")
}
}
impl Function for PGVersionFunction {
fn name(&self) -> &str {
"pg_catalog.version"
}
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
Ok(DataType::Utf8View)
}
fn signature(&self) -> &Signature {
&self.signature
}
fn invoke_with_args(&self, _: ScalarFunctionArgs) -> datafusion_common::Result<ColumnarValue> {
Ok(ColumnarValue::Scalar(ScalarValue::Utf8View(Some(format!(
"PostgreSQL 16.3 GreptimeDB {}",
common_version::version()
)))))
}
}

View File

@@ -50,7 +50,7 @@ impl Function for VersionFunction {
)
}
Channel::Postgres => {
format!("16.3-greptimedb-{}", common_version::version())
format!("PostgreSQL 16.3 GreptimeDB {}", common_version::version())
}
_ => common_version::version().to_string(),
};

View File

@@ -23,6 +23,7 @@ datatypes.workspace = true
flatbuffers = "25.2"
hyper.workspace = true
lazy_static.workspace = true
notify.workspace = true
prost.workspace = true
serde.workspace = true
serde_json.workspace = true
@@ -37,6 +38,7 @@ vec1 = "1.12"
criterion = "0.4"
hyper-util = { workspace = true, features = ["tokio"] }
rand.workspace = true
tempfile.workspace = true
[[bench]]
name = "bench_main"

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::Path;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering};
use std::time::Duration;
@@ -30,6 +31,7 @@ use tonic::transport::{
use tower::Service;
use crate::error::{CreateChannelSnafu, InvalidConfigFilePathSnafu, Result};
use crate::reloadable_tls::{ReloadableTlsConfig, TlsConfigLoader, maybe_watch_tls_config};
const RECYCLE_CHANNEL_INTERVAL_SECS: u64 = 60;
pub const DEFAULT_GRPC_REQUEST_TIMEOUT_SECS: u64 = 10;
@@ -50,7 +52,7 @@ pub struct ChannelManager {
struct Inner {
id: u64,
config: ChannelConfig,
client_tls_config: Option<ClientTlsConfig>,
reloadable_client_tls_config: Option<Arc<ReloadableClientTlsConfig>>,
pool: Arc<Pool>,
channel_recycle_started: AtomicBool,
cancel: CancellationToken,
@@ -78,7 +80,7 @@ impl Inner {
Self {
id,
config,
client_tls_config: None,
reloadable_client_tls_config: None,
pool,
channel_recycle_started: AtomicBool::new(false),
cancel,
@@ -91,13 +93,17 @@ impl ChannelManager {
Default::default()
}
/// unified with config function that support tls config
/// use [`load_tls_config`] to load tls config from file system
pub fn with_config(config: ChannelConfig, tls_config: Option<ClientTlsConfig>) -> Self {
/// Create a ChannelManager with configuration and optional TLS config
///
/// Use [`load_client_tls_config`] to create TLS configuration from `ClientTlsOption`.
/// The TLS config supports both static (watch disabled) and dynamic reloading (watch enabled).
/// If you want to use dynamic reloading, please **manually** invoke [`maybe_watch_client_tls_config`] after this method.
pub fn with_config(
config: ChannelConfig,
reloadable_tls_config: Option<Arc<ReloadableClientTlsConfig>>,
) -> Self {
let mut inner = Inner::with_config(config.clone());
if let Some(tls_config) = tls_config {
inner.client_tls_config = Some(tls_config);
}
inner.reloadable_client_tls_config = reloadable_tls_config;
Self {
inner: Arc::new(inner),
}
@@ -172,8 +178,21 @@ impl ChannelManager {
self.pool().retain_channel(f);
}
/// Clear all channels to force reconnection.
/// This should be called when TLS configuration changes to ensure new connections use updated certificates.
pub fn clear_all_channels(&self) {
self.pool().retain_channel(|_, _| false);
}
fn build_endpoint(&self, addr: &str) -> Result<Endpoint> {
let http_prefix = if self.inner.client_tls_config.is_some() {
// Get the latest TLS config from reloadable config (which handles both static and dynamic cases)
let tls_config = self
.inner
.reloadable_client_tls_config
.as_ref()
.and_then(|c| c.get_config());
let http_prefix = if tls_config.is_some() {
"https"
} else {
"http"
@@ -212,9 +231,9 @@ impl ChannelManager {
if let Some(enabled) = self.config().http2_adaptive_window {
endpoint = endpoint.http2_adaptive_window(enabled);
}
if let Some(tls_config) = &self.inner.client_tls_config {
if let Some(tls_config) = tls_config {
endpoint = endpoint
.tls_config(tls_config.clone())
.tls_config(tls_config)
.context(CreateChannelSnafu { addr })?;
}
@@ -248,7 +267,7 @@ impl ChannelManager {
}
}
pub fn load_tls_config(tls_option: Option<&ClientTlsOption>) -> Result<Option<ClientTlsConfig>> {
fn load_tls_config(tls_option: Option<&ClientTlsOption>) -> Result<Option<ClientTlsConfig>> {
let path_config = match tls_option {
Some(path_config) if path_config.enabled => path_config,
_ => return Ok(None),
@@ -276,13 +295,69 @@ pub fn load_tls_config(tls_option: Option<&ClientTlsOption>) -> Result<Option<Cl
Ok(Some(tls_config))
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
impl TlsConfigLoader<ClientTlsConfig> for ClientTlsOption {
type Error = crate::error::Error;
fn load(&self) -> Result<Option<ClientTlsConfig>> {
load_tls_config(Some(self))
}
fn watch_paths(&self) -> Vec<&Path> {
let mut paths = Vec::new();
if let Some(cert_path) = &self.client_cert_path {
paths.push(Path::new(cert_path.as_str()));
}
if let Some(key_path) = &self.client_key_path {
paths.push(Path::new(key_path.as_str()));
}
if let Some(ca_path) = &self.server_ca_cert_path {
paths.push(Path::new(ca_path.as_str()));
}
paths
}
fn watch_enabled(&self) -> bool {
self.enabled && self.watch
}
}
/// Type alias for client-side reloadable TLS config
pub type ReloadableClientTlsConfig = ReloadableTlsConfig<ClientTlsConfig, ClientTlsOption>;
/// Load client TLS configuration from `ClientTlsOption` and return a `ReloadableClientTlsConfig`.
/// This is the primary way to create TLS configuration for the ChannelManager.
pub fn load_client_tls_config(
tls_option: Option<ClientTlsOption>,
) -> Result<Option<Arc<ReloadableClientTlsConfig>>> {
match tls_option {
Some(option) if option.enabled => {
let reloadable = ReloadableClientTlsConfig::try_new(option)?;
Ok(Some(Arc::new(reloadable)))
}
_ => Ok(None),
}
}
pub fn maybe_watch_client_tls_config(
client_tls_config: Arc<ReloadableClientTlsConfig>,
channel_manager: ChannelManager,
) -> Result<()> {
maybe_watch_tls_config(client_tls_config, move || {
// Clear all existing channels to force reconnection with new certificates
channel_manager.clear_all_channels();
info!("Cleared all existing channels to use new TLS certificates.");
})
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
pub struct ClientTlsOption {
/// Whether to enable TLS for client.
pub enabled: bool,
pub server_ca_cert_path: Option<String>,
pub client_cert_path: Option<String>,
pub client_key_path: Option<String>,
#[serde(default)]
pub watch: bool,
}
#[derive(Clone, Debug, PartialEq, Eq)]
@@ -602,6 +677,7 @@ mod tests {
server_ca_cert_path: Some("some_server_path".to_string()),
client_cert_path: Some("some_cert_path".to_string()),
client_key_path: Some("some_key_path".to_string()),
watch: false,
});
assert_eq!(
@@ -623,6 +699,7 @@ mod tests {
server_ca_cert_path: Some("some_server_path".to_string()),
client_cert_path: Some("some_cert_path".to_string()),
client_key_path: Some("some_key_path".to_string()),
watch: false,
}),
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,

View File

@@ -38,6 +38,15 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to watch config file path: {}", path))]
FileWatch {
path: String,
#[snafu(source)]
error: notify::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display(
"Write type mismatch, column name: {}, expected: {}, actual: {}",
column_name,
@@ -108,6 +117,7 @@ impl ErrorExt for Error {
match self {
Error::InvalidTlsConfig { .. }
| Error::InvalidConfigFilePath { .. }
| Error::FileWatch { .. }
| Error::TypeMismatch { .. }
| Error::InvalidFlightData { .. }
| Error::NotSupported { .. } => StatusCode::InvalidArguments,

View File

@@ -16,6 +16,7 @@ pub mod channel_manager;
pub mod error;
pub mod flight;
pub mod precision;
pub mod reloadable_tls;
pub mod select;
pub use arrow_flight::FlightData;

View File

@@ -0,0 +1,163 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::Path;
use std::result::Result as StdResult;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use common_telemetry::{error, info};
use notify::{EventKind, RecursiveMode, Watcher};
use snafu::ResultExt;
use crate::error::{FileWatchSnafu, Result};
/// A trait for loading TLS configuration from an option type
pub trait TlsConfigLoader<T> {
type Error;
/// Load the TLS configuration
fn load(&self) -> StdResult<Option<T>, Self::Error>;
/// Get paths to certificate files for watching
fn watch_paths(&self) -> Vec<&Path>;
/// Check if watching is enabled
fn watch_enabled(&self) -> bool;
}
/// A mutable container for TLS config
///
/// This struct allows dynamic reloading of certificates and keys.
/// It's generic over the config type (e.g., ServerConfig, ClientTlsConfig)
/// and the option type (e.g., TlsOption, ClientTlsOption).
#[derive(Debug)]
pub struct ReloadableTlsConfig<T, O>
where
O: TlsConfigLoader<T>,
{
tls_option: O,
config: RwLock<Option<T>>,
version: AtomicUsize,
}
impl<T, O> ReloadableTlsConfig<T, O>
where
O: TlsConfigLoader<T>,
{
/// Create config by loading configuration from the option type
pub fn try_new(tls_option: O) -> StdResult<Self, O::Error> {
let config = tls_option.load()?;
Ok(Self {
tls_option,
config: RwLock::new(config),
version: AtomicUsize::new(0),
})
}
/// Reread certificates and keys from file system.
pub fn reload(&self) -> StdResult<(), O::Error> {
let config = self.tls_option.load()?;
*self.config.write().unwrap() = config;
self.version.fetch_add(1, Ordering::Relaxed);
Ok(())
}
/// Get the config held by this container
pub fn get_config(&self) -> Option<T>
where
T: Clone,
{
self.config.read().unwrap().clone()
}
/// Get associated option
pub fn get_tls_option(&self) -> &O {
&self.tls_option
}
/// Get version of current config
///
/// this version will auto increase when config get reloaded.
pub fn get_version(&self) -> usize {
self.version.load(Ordering::Relaxed)
}
}
/// Watch TLS configuration files for changes and reload automatically
///
/// This is a generic function that works with any ReloadableTlsConfig.
/// When changes are detected, it calls the provided callback after reloading.
///
/// T: the original TLS config
/// O: the compiled TLS option
/// F: the hook function to be called after reloading
/// E: the error type for the loading operation
pub fn maybe_watch_tls_config<T, O, F, E>(
tls_config: Arc<ReloadableTlsConfig<T, O>>,
on_reload: F,
) -> Result<()>
where
T: Send + Sync + 'static,
O: TlsConfigLoader<T, Error = E> + Send + Sync + 'static,
E: std::error::Error + Send + Sync + 'static,
F: Fn() + Send + 'static,
{
if !tls_config.get_tls_option().watch_enabled() {
return Ok(());
}
let tls_config_for_watcher = tls_config.clone();
let (tx, rx) = channel::<notify::Result<notify::Event>>();
let mut watcher = notify::recommended_watcher(tx).context(FileWatchSnafu { path: "<none>" })?;
// Watch all paths returned by the TlsConfigLoader
for path in tls_config.get_tls_option().watch_paths() {
watcher
.watch(path, RecursiveMode::NonRecursive)
.with_context(|_| FileWatchSnafu {
path: path.display().to_string(),
})?;
}
info!("Spawning background task for watching TLS cert/key file changes");
std::thread::spawn(move || {
let _watcher = watcher;
loop {
match rx.recv() {
Ok(Ok(event)) => {
if let EventKind::Modify(_) | EventKind::Create(_) = event.kind {
info!("Detected TLS cert/key file change: {:?}", event);
if let Err(err) = tls_config_for_watcher.reload() {
error!("Failed to reload TLS config: {}", err);
} else {
info!("Reloaded TLS cert/key file successfully.");
on_reload();
}
}
}
Ok(Err(err)) => {
error!("Failed to watch TLS cert/key file: {}", err);
}
Err(err) => {
error!("TLS cert/key file watcher channel closed: {}", err);
}
}
}
});
Ok(())
}

View File

@@ -13,14 +13,15 @@
// limitations under the License.
use common_grpc::channel_manager::{
ChannelConfig, ChannelManager, ClientTlsOption, load_tls_config,
ChannelConfig, ChannelManager, ClientTlsOption, load_client_tls_config,
maybe_watch_client_tls_config,
};
#[tokio::test]
async fn test_mtls_config() {
// test no config
let config = ChannelConfig::new();
let re = load_tls_config(config.client_tls.as_ref());
let re = load_client_tls_config(config.client_tls.clone());
assert!(re.is_ok());
assert!(re.unwrap().is_none());
@@ -30,9 +31,10 @@ async fn test_mtls_config() {
server_ca_cert_path: Some("tests/tls/wrong_ca.pem".to_string()),
client_cert_path: Some("tests/tls/wrong_client.pem".to_string()),
client_key_path: Some("tests/tls/wrong_client.key".to_string()),
watch: false,
});
let re = load_tls_config(config.client_tls.as_ref());
let re = load_client_tls_config(config.client_tls.clone());
assert!(re.is_err());
// test corrupted file content
@@ -41,9 +43,10 @@ async fn test_mtls_config() {
server_ca_cert_path: Some("tests/tls/ca.pem".to_string()),
client_cert_path: Some("tests/tls/client.pem".to_string()),
client_key_path: Some("tests/tls/corrupted".to_string()),
watch: false,
});
let tls_config = load_tls_config(config.client_tls.as_ref()).unwrap();
let tls_config = load_client_tls_config(config.client_tls.clone()).unwrap();
let re = ChannelManager::with_config(config, tls_config);
let re = re.get("127.0.0.1:0");
@@ -55,10 +58,112 @@ async fn test_mtls_config() {
server_ca_cert_path: Some("tests/tls/ca.pem".to_string()),
client_cert_path: Some("tests/tls/client.pem".to_string()),
client_key_path: Some("tests/tls/client.key".to_string()),
watch: false,
});
let tls_config = load_tls_config(config.client_tls.as_ref()).unwrap();
let tls_config = load_client_tls_config(config.client_tls.clone()).unwrap();
let re = ChannelManager::with_config(config, tls_config);
let re = re.get("127.0.0.1:0");
let _ = re.unwrap();
}
#[tokio::test]
async fn test_reloadable_client_tls_config() {
common_telemetry::init_default_ut_logging();
let dir = tempfile::tempdir().unwrap();
let cert_path = dir.path().join("client.pem");
let key_path = dir.path().join("client.key");
std::fs::copy("tests/tls/client.pem", &cert_path).expect("failed to copy cert to tmpdir");
std::fs::copy("tests/tls/client.key", &key_path).expect("failed to copy key to tmpdir");
assert!(std::fs::exists(&cert_path).unwrap());
assert!(std::fs::exists(&key_path).unwrap());
let client_tls_option = ClientTlsOption {
enabled: true,
server_ca_cert_path: Some("tests/tls/ca.pem".to_string()),
client_cert_path: Some(
cert_path
.clone()
.into_os_string()
.into_string()
.expect("failed to convert path to string"),
),
client_key_path: Some(
key_path
.clone()
.into_os_string()
.into_string()
.expect("failed to convert path to string"),
),
watch: true,
};
let reloadable_config = load_client_tls_config(Some(client_tls_option))
.expect("failed to load tls config")
.expect("tls config should be present");
let config = ChannelConfig::new();
let manager = ChannelManager::with_config(config, Some(reloadable_config.clone()));
maybe_watch_client_tls_config(reloadable_config.clone(), manager.clone())
.expect("failed to watch client config");
assert_eq!(0, reloadable_config.get_version());
assert!(reloadable_config.get_config().is_some());
// Create a channel to verify it gets cleared on reload
let _ = manager.get("127.0.0.1:0").expect("failed to get channel");
// Simulate file change by copying a different key file
let tmp_file = key_path.with_extension("tmp");
std::fs::copy("tests/tls/server.key", &tmp_file).expect("Failed to copy temp key file");
std::fs::rename(&tmp_file, &key_path).expect("Failed to rename temp key file");
const MAX_RETRIES: usize = 30;
let mut retries = 0;
let mut version_updated = false;
while retries < MAX_RETRIES {
if reloadable_config.get_version() > 0 {
version_updated = true;
break;
}
std::thread::sleep(std::time::Duration::from_millis(100));
retries += 1;
}
assert!(version_updated, "TLS config did not reload in time");
assert!(reloadable_config.get_version() > 0);
assert!(reloadable_config.get_config().is_some());
}
#[tokio::test]
async fn test_channel_manager_with_reloadable_tls() {
common_telemetry::init_default_ut_logging();
let client_tls_option = ClientTlsOption {
enabled: true,
server_ca_cert_path: Some("tests/tls/ca.pem".to_string()),
client_cert_path: Some("tests/tls/client.pem".to_string()),
client_key_path: Some("tests/tls/client.key".to_string()),
watch: false,
};
let reloadable_config = load_client_tls_config(Some(client_tls_option))
.expect("failed to load tls config")
.expect("tls config should be present");
let config = ChannelConfig::new();
let manager = ChannelManager::with_config(config, Some(reloadable_config.clone()));
// Test that we can get a channel
let channel = manager.get("127.0.0.1:0");
assert!(channel.is_ok());
// Test that config is properly set
assert_eq!(0, reloadable_config.get_version());
assert!(reloadable_config.get_config().is_some());
}

View File

@@ -12,8 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use greptime_proto::v1::ColumnDataTypeExtension;
use greptime_proto::v1::column_data_type_extension::TypeExt;
use proc_macro2::TokenStream as TokenStream2;
use proc_macro2::{Span, TokenStream as TokenStream2};
use quote::quote;
use syn::spanned::Spanned;
use syn::{DeriveInput, Result};
@@ -69,57 +70,7 @@ fn impl_schema_method(fields: &[ParsedField<'_>]) -> Result<TokenStream2> {
let semantic_type_val = convert_semantic_type_to_proto_semantic_type(column_attribute.semantic_type) as i32;
let semantic_type = syn::LitInt::new(&semantic_type_val.to_string(), ident.span());
let extension = match extension {
Some(ext) => {
match ext.type_ext {
Some(TypeExt::DecimalType(ext)) => {
let precision = syn::LitInt::new(&ext.precision.to_string(), ident.span());
let scale = syn::LitInt::new(&ext.scale.to_string(), ident.span());
quote! {
Some(ColumnDataTypeExtension { type_ext: Some(TypeExt::DecimalType(DecimalTypeExtension { precision: #precision, scale: #scale })) })
}
}
Some(TypeExt::JsonType(ext)) => {
let json_type = syn::LitInt::new(&ext.to_string(), ident.span());
quote! {
Some(ColumnDataTypeExtension { type_ext: Some(TypeExt::JsonType(#json_type)) })
}
}
Some(TypeExt::VectorType(ext)) => {
let dim = syn::LitInt::new(&ext.dim.to_string(), ident.span());
quote! {
Some(ColumnDataTypeExtension { type_ext: Some(TypeExt::VectorType(VectorTypeExtension { dim: #dim })) })
}
}
// TODO(sunng87): revisit all these implementations
Some(TypeExt::ListType(ext)) => {
let item_type = syn::Ident::new(&ext.datatype.to_string(), ident.span());
quote! {
Some(ColumnDataTypeExtension { type_ext: Some(TypeExt::ListType(ListTypeExtension { item_type: #item_type })) })
}
}
Some(TypeExt::StructType(ext)) => {
let fields = ext.fields.iter().map(|field| {
let field_name = syn::Ident::new(&field.name.clone(), ident.span());
let field_type = syn::Ident::new(&field.datatype.to_string(), ident.span());
quote! {
StructField { name: #field_name, type_: #field_type }
}
}).collect::<Vec<_>>();
quote! {
Some(ColumnDataTypeExtension { type_ext: Some(TypeExt::StructType(StructTypeExtension { fields: [#(#fields),*] })) })
}
}
Some(TypeExt::JsonNativeType(ext)) => {
let inner = syn::Ident::new(&ext.datatype.to_string(), ident.span());
quote! {
Some(ColumnDataTypeExtension { type_ext: Some(TypeExt::JsonNativeType(JsonNativeTypeExtension { datatype: #inner })) })
}
}
None => {
quote! { None }
}
}
}
Some(ext) => column_data_type_extension_to_tokens(&ext, ident.span()),
None => quote! { None },
};
@@ -141,3 +92,125 @@ fn impl_schema_method(fields: &[ParsedField<'_>]) -> Result<TokenStream2> {
}
})
}
fn column_data_type_extension_to_tokens(
extension: &ColumnDataTypeExtension,
span: Span,
) -> TokenStream2 {
match extension.type_ext.as_ref() {
Some(TypeExt::DecimalType(ext)) => {
let precision = syn::LitInt::new(&ext.precision.to_string(), span);
let scale = syn::LitInt::new(&ext.scale.to_string(), span);
quote! {
Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::DecimalType(DecimalTypeExtension {
precision: #precision,
scale: #scale,
})),
})
}
}
Some(TypeExt::JsonType(ext)) => {
let json_type = syn::LitInt::new(&ext.to_string(), span);
quote! {
Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::JsonType(#json_type)),
})
}
}
Some(TypeExt::VectorType(ext)) => {
let dim = syn::LitInt::new(&ext.dim.to_string(), span);
quote! {
Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::VectorType(VectorTypeExtension { dim: #dim })),
})
}
}
Some(TypeExt::ListType(ext)) => {
let datatype = syn::LitInt::new(&ext.datatype.to_string(), span);
let datatype_extension = ext
.datatype_extension
.as_deref()
.map(|ext| column_data_type_extension_to_tokens(ext, span))
.unwrap_or_else(|| quote! { None });
quote! {
Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::ListType(Box::new(ListTypeExtension {
datatype: #datatype,
datatype_extension: #datatype_extension,
}))),
})
}
}
Some(TypeExt::StructType(ext)) => {
let fields = ext.fields.iter().map(|field| {
let field_name = &field.name;
let datatype = syn::LitInt::new(&field.datatype.to_string(), span);
let datatype_extension = field
.datatype_extension
.as_ref()
.map(|ext| column_data_type_extension_to_tokens(ext, span))
.unwrap_or_else(|| quote! { None });
quote! {
greptime_proto::v1::StructField {
name: #field_name.to_string(),
datatype: #datatype,
datatype_extension: #datatype_extension,
}
}
});
quote! {
Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::StructType(StructTypeExtension {
fields: vec![#(#fields),*],
})),
})
}
}
Some(TypeExt::JsonNativeType(ext)) => {
let inner = syn::LitInt::new(&ext.datatype.to_string(), span);
let datatype_extension = ext
.datatype_extension
.as_deref()
.map(|ext| column_data_type_extension_to_tokens(ext, span))
.unwrap_or_else(|| quote! { None });
quote! {
Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::JsonNativeType(Box::new(
JsonNativeTypeExtension {
datatype: #inner,
datatype_extension: #datatype_extension,
},
))),
})
}
}
Some(TypeExt::DictionaryType(ext)) => {
let key_datatype = syn::LitInt::new(&ext.key_datatype.to_string(), span);
let value_datatype = syn::LitInt::new(&ext.value_datatype.to_string(), span);
let key_datatype_extension = ext
.key_datatype_extension
.as_deref()
.map(|ext| column_data_type_extension_to_tokens(ext, span))
.unwrap_or_else(|| quote! { None });
let value_datatype_extension = ext
.value_datatype_extension
.as_deref()
.map(|ext| column_data_type_extension_to_tokens(ext, span))
.unwrap_or_else(|| quote! { None });
quote! {
Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::DictionaryType(Box::new(
DictionaryTypeExtension {
key_datatype: #key_datatype,
key_datatype_extension: #key_datatype_extension,
value_datatype: #value_datatype,
value_datatype_extension: #value_datatype_extension,
},
))),
})
}
}
None => quote! { None },
}
}

View File

@@ -309,5 +309,8 @@ pub(crate) fn convert_column_data_type_to_value_data_ident(
ColumnDataType::Vector => format_ident!("VectorValue"),
ColumnDataType::List => format_ident!("ListValue"),
ColumnDataType::Struct => format_ident!("StructValue"),
ColumnDataType::Dictionary => {
panic!("Dictionary data type is not supported in row macros yet")
}
}
}

View File

@@ -132,6 +132,8 @@ pub enum RegionManifestInfo {
Mito {
manifest_version: u64,
flushed_entry_id: u64,
/// Number of files removed in the manifest's `removed_files` field.
file_removed_cnt: u64,
},
Metric {
data_manifest_version: u64,
@@ -271,9 +273,11 @@ impl From<store_api::region_engine::RegionManifestInfo> for RegionManifestInfo {
store_api::region_engine::RegionManifestInfo::Mito {
manifest_version,
flushed_entry_id,
file_removed_cnt,
} => RegionManifestInfo::Mito {
manifest_version,
flushed_entry_id,
file_removed_cnt,
},
store_api::region_engine::RegionManifestInfo::Metric {
data_manifest_version,

View File

@@ -47,6 +47,9 @@ fn build_new_schema_value(
SetDatabaseOption::Ttl(ttl) => {
value.ttl = Some(*ttl);
}
SetDatabaseOption::Other(key, val) => {
value.extra_options.insert(key.clone(), val.clone());
}
}
}
}
@@ -54,6 +57,9 @@ fn build_new_schema_value(
for key in keys.0.iter() {
match key {
UnsetDatabaseOption::Ttl => value.ttl = None,
UnsetDatabaseOption::Other(key) => {
value.extra_options.remove(key);
}
}
}
}
@@ -234,4 +240,41 @@ mod tests {
build_new_schema_value(current_schema_value, &unset_ttl_alter_kind).unwrap();
assert_eq!(new_schema_value.ttl, None);
}
#[test]
fn test_build_new_schema_value_with_compaction_options() {
let set_compaction = AlterDatabaseKind::SetDatabaseOptions(SetDatabaseOptions(vec![
SetDatabaseOption::Other("compaction.type".to_string(), "twcs".to_string()),
SetDatabaseOption::Other("compaction.twcs.time_window".to_string(), "1d".to_string()),
]));
let current_schema_value = SchemaNameValue::default();
let new_schema_value =
build_new_schema_value(current_schema_value.clone(), &set_compaction).unwrap();
assert_eq!(
new_schema_value.extra_options.get("compaction.type"),
Some(&"twcs".to_string())
);
assert_eq!(
new_schema_value
.extra_options
.get("compaction.twcs.time_window"),
Some(&"1d".to_string())
);
let unset_compaction = AlterDatabaseKind::UnsetDatabaseOptions(UnsetDatabaseOptions(vec![
UnsetDatabaseOption::Other("compaction.type".to_string()),
]));
let new_schema_value = build_new_schema_value(new_schema_value, &unset_compaction).unwrap();
assert_eq!(new_schema_value.extra_options.get("compaction.type"), None);
assert_eq!(
new_schema_value
.extra_options
.get("compaction.twcs.time_window"),
Some(&"1d".to_string())
);
}
}

View File

@@ -182,7 +182,7 @@ fn alter_request_handler(_peer: Peer, request: RegionRequest) -> Result<RegionRe
let region_id = RegionId::from(req.region_id);
response.extensions.insert(
MANIFEST_INFO_EXTENSION_KEY.to_string(),
RegionManifestInfo::encode_list(&[(region_id, RegionManifestInfo::mito(1, 1))])
RegionManifestInfo::encode_list(&[(region_id, RegionManifestInfo::mito(1, 1, 0))])
.unwrap(),
);
response.extensions.insert(

View File

@@ -420,20 +420,25 @@ where
/// Instruction to get file references for specified regions.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct GetFileRefs {
/// List of region IDs to get file references for.
pub region_ids: Vec<RegionId>,
/// List of region IDs to get file references from active FileHandles (in-memory).
pub query_regions: Vec<RegionId>,
/// Mapping from the source region ID (where to read the manifest) to
/// the target region IDs (whose file references to look for).
/// Key: The region ID of the manifest.
/// Value: The list of region IDs to find references for in that manifest.
pub related_regions: HashMap<RegionId, Vec<RegionId>>,
}
impl Display for GetFileRefs {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "GetFileRefs(region_ids={:?})", self.region_ids)
write!(f, "GetFileRefs(region_ids={:?})", self.query_regions)
}
}
/// Instruction to trigger garbage collection for a region.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct GcRegions {
/// The region ID to perform GC on.
/// The region ID to perform GC on, only regions that are currently on the given datanode can be garbage collected, regions not on the datanode will report errors.
pub regions: Vec<RegionId>,
/// The file references manifest containing temporary file references.
pub file_refs_manifest: FileRefsManifest,

View File

@@ -67,6 +67,7 @@ impl LeaderRegionManifestInfo {
RegionManifestInfo::Mito {
manifest_version,
flushed_entry_id,
file_removed_cnt: _,
} => LeaderRegionManifestInfo::Mito {
manifest_version,
flushed_entry_id,

View File

@@ -47,6 +47,7 @@ use serde_with::{DefaultOnNull, serde_as};
use session::context::{QueryContextBuilder, QueryContextRef};
use snafu::{OptionExt, ResultExt};
use table::metadata::{RawTableInfo, TableId};
use table::requests::validate_database_option;
use table::table_name::TableName;
use table::table_reference::TableReference;
@@ -1059,14 +1060,21 @@ impl TryFrom<PbOption> for SetDatabaseOption {
type Error = error::Error;
fn try_from(PbOption { key, value }: PbOption) -> Result<Self> {
match key.to_ascii_lowercase().as_str() {
let key_lower = key.to_ascii_lowercase();
match key_lower.as_str() {
TTL_KEY => {
let ttl = DatabaseTimeToLive::from_humantime_or_str(&value)
.map_err(|_| InvalidSetDatabaseOptionSnafu { key, value }.build())?;
Ok(SetDatabaseOption::Ttl(ttl))
}
_ => InvalidSetDatabaseOptionSnafu { key, value }.fail(),
_ => {
if validate_database_option(&key_lower) {
Ok(SetDatabaseOption::Other(key_lower, value))
} else {
InvalidSetDatabaseOptionSnafu { key, value }.fail()
}
}
}
}
}
@@ -1074,20 +1082,29 @@ impl TryFrom<PbOption> for SetDatabaseOption {
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
pub enum SetDatabaseOption {
Ttl(DatabaseTimeToLive),
Other(String, String),
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
pub enum UnsetDatabaseOption {
Ttl,
Other(String),
}
impl TryFrom<&str> for UnsetDatabaseOption {
type Error = error::Error;
fn try_from(key: &str) -> Result<Self> {
match key.to_ascii_lowercase().as_str() {
let key_lower = key.to_ascii_lowercase();
match key_lower.as_str() {
TTL_KEY => Ok(UnsetDatabaseOption::Ttl),
_ => InvalidUnsetDatabaseOptionSnafu { key }.fail(),
_ => {
if validate_database_option(&key_lower) {
Ok(UnsetDatabaseOption::Other(key_lower))
} else {
InvalidUnsetDatabaseOptionSnafu { key }.fail()
}
}
}
}
}

View File

@@ -246,6 +246,14 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Loader for {type_name} is not implemented: {reason}"))]
ProcedureLoaderNotImplemented {
#[snafu(implicit)]
location: Location,
type_name: String,
reason: String,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -266,7 +274,8 @@ impl ErrorExt for Error {
Error::ToJson { .. }
| Error::DeleteState { .. }
| Error::FromJson { .. }
| Error::WaitWatcher { .. } => StatusCode::Internal,
| Error::WaitWatcher { .. }
| Error::ProcedureLoaderNotImplemented { .. } => StatusCode::Internal,
Error::RetryTimesExceeded { .. }
| Error::RollbackTimesExceeded { .. }

View File

@@ -92,25 +92,96 @@ impl Event for ProcedureEvent {
schema
}
fn extra_row(&self) -> Result<Row> {
let error_str = match &self.state {
ProcedureState::Failed { error } => format!("{:?}", error),
ProcedureState::PrepareRollback { error } => format!("{:?}", error),
ProcedureState::RollingBack { error } => format!("{:?}", error),
ProcedureState::Retrying { error } => format!("{:?}", error),
ProcedureState::Poisoned { error, .. } => format!("{:?}", error),
_ => "".to_string(),
};
let mut row = vec![
ValueData::StringValue(self.procedure_id.to_string()).into(),
ValueData::StringValue(self.state.as_str_name().to_string()).into(),
ValueData::StringValue(error_str).into(),
];
row.append(&mut self.internal_event.extra_row()?.values);
Ok(Row { values: row })
fn extra_rows(&self) -> Result<Vec<Row>> {
let mut internal_event_extra_rows = self.internal_event.extra_rows()?;
let mut rows = Vec::with_capacity(internal_event_extra_rows.len());
for internal_event_extra_row in internal_event_extra_rows.iter_mut() {
let error_str = match &self.state {
ProcedureState::Failed { error } => format!("{:?}", error),
ProcedureState::PrepareRollback { error } => format!("{:?}", error),
ProcedureState::RollingBack { error } => format!("{:?}", error),
ProcedureState::Retrying { error } => format!("{:?}", error),
ProcedureState::Poisoned { error, .. } => format!("{:?}", error),
_ => "".to_string(),
};
let mut values = Vec::with_capacity(3 + internal_event_extra_row.values.len());
values.extend([
ValueData::StringValue(self.procedure_id.to_string()).into(),
ValueData::StringValue(self.state.as_str_name().to_string()).into(),
ValueData::StringValue(error_str).into(),
]);
values.append(&mut internal_event_extra_row.values);
rows.push(Row { values });
}
Ok(rows)
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[cfg(test)]
mod tests {
use api::v1::value::ValueData;
use api::v1::{ColumnDataType, ColumnSchema, Row, SemanticType};
use common_event_recorder::Event;
use crate::{ProcedureEvent, ProcedureId, ProcedureState};
#[derive(Debug)]
struct TestEvent;
impl Event for TestEvent {
fn event_type(&self) -> &str {
"test_event"
}
fn extra_schema(&self) -> Vec<ColumnSchema> {
vec![ColumnSchema {
column_name: "test_event_column".to_string(),
datatype: ColumnDataType::String.into(),
semantic_type: SemanticType::Field.into(),
..Default::default()
}]
}
fn extra_rows(&self) -> common_event_recorder::error::Result<Vec<Row>> {
Ok(vec![
Row {
values: vec![ValueData::StringValue("test_event1".to_string()).into()],
},
Row {
values: vec![ValueData::StringValue("test_event2".to_string()).into()],
},
])
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
}
#[test]
fn test_procedure_event_extra_rows() {
let procedure_event = ProcedureEvent::new(
ProcedureId::random(),
Box::new(TestEvent {}),
ProcedureState::Running,
);
let procedure_event_extra_rows = procedure_event.extra_rows().unwrap();
assert_eq!(procedure_event_extra_rows.len(), 2);
assert_eq!(procedure_event_extra_rows[0].values.len(), 4);
assert_eq!(
procedure_event_extra_rows[0].values[3],
ValueData::StringValue("test_event1".to_string()).into()
);
assert_eq!(procedure_event_extra_rows[1].values.len(), 4);
assert_eq!(
procedure_event_extra_rows[1].values[3],
ValueData::StringValue("test_event2".to_string()).into()
);
}
}

View File

@@ -52,9 +52,6 @@ pub enum Error {
data_type: ArrowDatatype,
},
#[snafu(display("Failed to downcast vector: {}", err_msg))]
DowncastVector { err_msg: String },
#[snafu(display("Invalid input type: {}", err_msg))]
InvalidInputType {
#[snafu(implicit)]
@@ -209,8 +206,7 @@ pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::DowncastVector { .. }
| Error::InvalidInputState { .. }
Error::InvalidInputState { .. }
| Error::ToScalarValue { .. }
| Error::GetScalarVector { .. }
| Error::ArrowCompute { .. }

View File

@@ -314,10 +314,10 @@ impl Stream for RecordBatchStreamAdapter {
metric_collector.record_batch_metrics,
);
}
Poll::Ready(Some(RecordBatch::try_from_df_record_batch(
Poll::Ready(Some(Ok(RecordBatch::from_df_record_batch(
self.schema(),
df_record_batch,
)))
))))
}
Poll::Ready(None) => {
if let Metrics::Unresolved(df_plan) | Metrics::PartialResolved(df_plan, _) =

View File

@@ -133,18 +133,6 @@ pub enum Error {
source: datatypes::error::Error,
},
#[snafu(display(
"Failed to downcast vector of type '{:?}' to type '{:?}'",
from_type,
to_type
))]
DowncastVector {
from_type: ConcreteDataType,
to_type: ConcreteDataType,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Error occurs when performing arrow computation"))]
ArrowCompute {
#[snafu(source)]
@@ -217,8 +205,6 @@ impl ErrorExt for Error {
| Error::PhysicalExpr { .. }
| Error::RecordBatchSliceIndexOverflow { .. } => StatusCode::Internal,
Error::DowncastVector { .. } => StatusCode::Unexpected,
Error::PollStream { .. } => StatusCode::EngineExecuteQuery,
Error::ArrowCompute { .. } => StatusCode::IllegalState,

View File

@@ -30,19 +30,20 @@ use adapter::RecordBatchMetrics;
use arc_swap::ArcSwapOption;
use common_base::readable_size::ReadableSize;
pub use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
use datatypes::arrow::array::{ArrayRef, AsArray, StringBuilder};
use datatypes::arrow::compute::SortOptions;
pub use datatypes::arrow::record_batch::RecordBatch as DfRecordBatch;
use datatypes::arrow::util::pretty;
use datatypes::prelude::{ConcreteDataType, VectorRef};
use datatypes::scalars::{ScalarVector, ScalarVectorBuilder};
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::types::{JsonFormat, jsonb_to_string};
use datatypes::vectors::{BinaryVector, StringVectorBuilder};
use error::Result;
use futures::task::{Context, Poll};
use futures::{Stream, TryStreamExt};
pub use recordbatch::RecordBatch;
use snafu::{OptionExt, ResultExt, ensure};
use snafu::{ResultExt, ensure};
use crate::error::NewDfRecordBatchSnafu;
pub trait RecordBatchStream: Stream<Item = Result<RecordBatch>> {
fn name(&self) -> &str {
@@ -92,20 +93,14 @@ pub fn map_json_type_to_string(
mapped_schema: &SchemaRef,
) -> Result<RecordBatch> {
let mut vectors = Vec::with_capacity(original_schema.column_schemas().len());
for (vector, schema) in batch.columns.iter().zip(original_schema.column_schemas()) {
for (vector, schema) in batch.columns().iter().zip(original_schema.column_schemas()) {
if let ConcreteDataType::Json(j) = &schema.data_type {
if matches!(&j.format, JsonFormat::Jsonb) {
let mut string_vector_builder = StringVectorBuilder::with_capacity(vector.len());
let binary_vector = vector
.as_any()
.downcast_ref::<BinaryVector>()
.with_context(|| error::DowncastVectorSnafu {
from_type: schema.data_type.clone(),
to_type: ConcreteDataType::binary_datatype(),
})?;
for value in binary_vector.iter_data() {
let mut string_vector_builder = StringBuilder::new();
let binary_vector = vector.as_binary::<i32>();
for value in binary_vector.iter() {
let Some(value) = value else {
string_vector_builder.push(None);
string_vector_builder.append_null();
continue;
};
let string_value =
@@ -113,11 +108,11 @@ pub fn map_json_type_to_string(
from_type: schema.data_type.clone(),
to_type: ConcreteDataType::string_datatype(),
})?;
string_vector_builder.push(Some(string_value.as_str()));
string_vector_builder.append_value(string_value);
}
let string_vector = string_vector_builder.finish();
vectors.push(Arc::new(string_vector) as VectorRef);
vectors.push(Arc::new(string_vector) as ArrayRef);
} else {
vectors.push(vector.clone());
}
@@ -126,7 +121,15 @@ pub fn map_json_type_to_string(
}
}
RecordBatch::new(mapped_schema.clone(), vectors)
let record_batch = datatypes::arrow::record_batch::RecordBatch::try_new(
mapped_schema.arrow_schema().clone(),
vectors,
)
.context(NewDfRecordBatchSnafu)?;
Ok(RecordBatch::from_df_record_batch(
mapped_schema.clone(),
record_batch,
))
}
/// Maps the json type to string in the schema.
@@ -755,11 +758,7 @@ impl Stream for MemoryTrackedStream {
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
match Pin::new(&mut self.inner).poll_next(cx) {
Poll::Ready(Some(Ok(batch))) => {
let additional = batch
.columns()
.iter()
.map(|c| c.memory_size())
.sum::<usize>();
let additional = batch.buffer_memory_size();
if let Err(e) = self.permit.track(additional, self.total_tracked) {
return Poll::Ready(Some(Err(e)));

View File

@@ -20,7 +20,7 @@ use datafusion::arrow::util::pretty::pretty_format_batches;
use datafusion_common::arrow::array::ArrayRef;
use datafusion_common::arrow::compute;
use datafusion_common::arrow::datatypes::{DataType as ArrowDataType, SchemaRef as ArrowSchemaRef};
use datatypes::arrow::array::RecordBatchOptions;
use datatypes::arrow::array::{Array, AsArray, RecordBatchOptions};
use datatypes::prelude::DataType;
use datatypes::schema::SchemaRef;
use datatypes::vectors::{Helper, VectorRef};
@@ -30,15 +30,14 @@ use snafu::{OptionExt, ResultExt, ensure};
use crate::DfRecordBatch;
use crate::error::{
self, ArrowComputeSnafu, CastVectorSnafu, ColumnNotExistsSnafu, DataTypesSnafu,
ProjectArrowRecordBatchSnafu, Result,
self, ArrowComputeSnafu, ColumnNotExistsSnafu, DataTypesSnafu, ProjectArrowRecordBatchSnafu,
Result,
};
/// A two-dimensional batch of column-oriented data with a defined schema.
#[derive(Clone, Debug, PartialEq)]
pub struct RecordBatch {
pub schema: SchemaRef,
pub columns: Vec<VectorRef>,
df_record_batch: DfRecordBatch,
}
@@ -65,7 +64,6 @@ impl RecordBatch {
Ok(RecordBatch {
schema,
columns,
df_record_batch,
})
}
@@ -91,14 +89,8 @@ impl RecordBatch {
/// Create an empty [`RecordBatch`] from `schema`.
pub fn new_empty(schema: SchemaRef) -> RecordBatch {
let df_record_batch = DfRecordBatch::new_empty(schema.arrow_schema().clone());
let columns = schema
.column_schemas()
.iter()
.map(|col| col.data_type.create_mutable_vector(0).to_vector())
.collect();
RecordBatch {
schema,
columns,
df_record_batch,
}
}
@@ -113,17 +105,12 @@ impl RecordBatch {
.context(error::NewDfRecordBatchSnafu)?;
Ok(RecordBatch {
schema,
columns: vec![],
df_record_batch,
})
}
pub fn try_project(&self, indices: &[usize]) -> Result<Self> {
let schema = Arc::new(self.schema.try_project(indices).context(DataTypesSnafu)?);
let mut columns = Vec::with_capacity(indices.len());
for index in indices {
columns.push(self.columns[*index].clone());
}
let df_record_batch = self.df_record_batch.project(indices).with_context(|_| {
ProjectArrowRecordBatchSnafu {
schema: self.schema.clone(),
@@ -133,7 +120,6 @@ impl RecordBatch {
Ok(Self {
schema,
columns,
df_record_batch,
})
}
@@ -141,21 +127,11 @@ impl RecordBatch {
/// Create a new [`RecordBatch`] from `schema` and `df_record_batch`.
///
/// This method doesn't check the schema.
pub fn try_from_df_record_batch(
schema: SchemaRef,
df_record_batch: DfRecordBatch,
) -> Result<RecordBatch> {
let columns = df_record_batch
.columns()
.iter()
.map(|c| Helper::try_into_vector(c.clone()).context(error::DataTypesSnafu))
.collect::<Result<Vec<_>>>()?;
Ok(RecordBatch {
pub fn from_df_record_batch(schema: SchemaRef, df_record_batch: DfRecordBatch) -> RecordBatch {
RecordBatch {
schema,
columns,
df_record_batch,
})
}
}
#[inline]
@@ -169,23 +145,22 @@ impl RecordBatch {
}
#[inline]
pub fn columns(&self) -> &[VectorRef] {
&self.columns
pub fn columns(&self) -> &[ArrayRef] {
self.df_record_batch.columns()
}
#[inline]
pub fn column(&self, idx: usize) -> &VectorRef {
&self.columns[idx]
pub fn column(&self, idx: usize) -> &ArrayRef {
self.df_record_batch.column(idx)
}
pub fn column_by_name(&self, name: &str) -> Option<&VectorRef> {
let idx = self.schema.column_index_by_name(name)?;
Some(&self.columns[idx])
pub fn column_by_name(&self, name: &str) -> Option<&ArrayRef> {
self.df_record_batch.column_by_name(name)
}
#[inline]
pub fn num_columns(&self) -> usize {
self.columns.len()
self.df_record_batch.num_columns()
}
#[inline]
@@ -201,9 +176,14 @@ impl RecordBatch {
let mut vectors = HashMap::with_capacity(self.num_columns());
// column schemas in recordbatch must match its vectors, otherwise it's corrupted
for (vector_schema, vector) in self.schema.column_schemas().iter().zip(self.columns.iter())
for (field, array) in self
.df_record_batch
.schema()
.fields()
.iter()
.zip(self.df_record_batch.columns().iter())
{
let column_name = &vector_schema.name;
let column_name = field.name();
let column_schema =
table_schema
.column_schema_by_name(column_name)
@@ -211,15 +191,12 @@ impl RecordBatch {
table_name,
column_name,
})?;
let vector = if vector_schema.data_type != column_schema.data_type {
vector
.cast(&column_schema.data_type)
.with_context(|_| CastVectorSnafu {
from_type: vector.data_type(),
to_type: column_schema.data_type.clone(),
})?
let vector = if field.data_type() != &column_schema.data_type.as_arrow_type() {
let array = compute::cast(array, &column_schema.data_type.as_arrow_type())
.context(ArrowComputeSnafu)?;
Helper::try_into_vector(array).context(DataTypesSnafu)?
} else {
vector.clone()
Helper::try_into_vector(array).context(DataTypesSnafu)?
};
let _ = vectors.insert(column_name.clone(), vector);
@@ -244,8 +221,69 @@ impl RecordBatch {
visit_index: offset + len
}
);
let columns = self.columns.iter().map(|vector| vector.slice(offset, len));
RecordBatch::new(self.schema.clone(), columns)
let sliced = self.df_record_batch.slice(offset, len);
Ok(RecordBatch::from_df_record_batch(
self.schema.clone(),
sliced,
))
}
/// Returns the total number of bytes of memory pointed to by the arrays in this `RecordBatch`.
///
/// The buffers store bytes in the Arrow memory format, and include the data as well as the validity map.
/// Note that this does not always correspond to the exact memory usage of an array,
/// since multiple arrays can share the same buffers or slices thereof.
pub fn buffer_memory_size(&self) -> usize {
self.df_record_batch
.columns()
.iter()
.map(|array| array.get_buffer_memory_size())
.sum()
}
/// Iterate the values as strings in the column at index `i`.
///
/// Note that if the underlying array is not a valid GreptimeDB vector, an empty iterator is
/// returned.
///
/// # Panics
/// if index `i` is out of bound.
pub fn iter_column_as_string(&self, i: usize) -> Box<dyn Iterator<Item = Option<String>> + '_> {
macro_rules! iter {
($column: ident) => {
Box::new(
(0..$column.len())
.map(|i| $column.is_valid(i).then(|| $column.value(i).to_string())),
)
};
}
let column = self.df_record_batch.column(i);
match column.data_type() {
ArrowDataType::Utf8 => {
let column = column.as_string::<i32>();
let iter = iter!(column);
iter as _
}
ArrowDataType::LargeUtf8 => {
let column = column.as_string::<i64>();
iter!(column)
}
ArrowDataType::Utf8View => {
let column = column.as_string_view();
iter!(column)
}
_ => {
if let Ok(column) = Helper::try_into_vector(column) {
Box::new(
(0..column.len())
.map(move |i| (!column.is_null(i)).then(|| column.get(i).to_string())),
)
} else {
Box::new(std::iter::empty())
}
}
}
}
}
@@ -259,8 +297,9 @@ impl Serialize for RecordBatch {
let mut s = serializer.serialize_struct("record", 2)?;
s.serialize_field("schema", &**self.schema.arrow_schema())?;
let vec = self
.columns
let columns = self.df_record_batch.columns();
let columns = Helper::try_into_vectors(columns).map_err(Error::custom)?;
let vec = columns
.iter()
.map(|c| c.serialize_to_json())
.collect::<std::result::Result<Vec<_>, _>>()
@@ -278,27 +317,14 @@ pub fn merge_record_batches(schema: SchemaRef, batches: &[RecordBatch]) -> Resul
return Ok(RecordBatch::new_empty(schema));
}
let n_rows = batches.iter().map(|b| b.num_rows()).sum();
let n_columns = schema.num_columns();
// Collect arrays from each batch
let mut merged_columns = Vec::with_capacity(n_columns);
for col_idx in 0..n_columns {
let mut acc = schema.column_schemas()[col_idx]
.data_type
.create_mutable_vector(n_rows);
for batch in batches {
let column = batch.column(col_idx);
acc.extend_slice_of(column.as_ref(), 0, column.len())
.context(error::DataTypesSnafu)?;
}
merged_columns.push(acc.to_vector());
}
let record_batch = compute::concat_batches(
schema.arrow_schema(),
batches.iter().map(|x| x.df_record_batch()),
)
.context(ArrowComputeSnafu)?;
// Create a new RecordBatch with merged columns
RecordBatch::new(schema, merged_columns)
Ok(RecordBatch::from_df_record_batch(schema, record_batch))
}
#[cfg(test)]
@@ -326,21 +352,21 @@ mod tests {
let c2 = Arc::new(UInt32Vector::from_slice([4, 5, 6]));
let columns: Vec<VectorRef> = vec![c1, c2];
let expected = vec![
Arc::new(UInt32Array::from_iter_values([1, 2, 3])) as ArrayRef,
Arc::new(UInt32Array::from_iter_values([4, 5, 6])),
];
let batch = RecordBatch::new(schema.clone(), columns.clone()).unwrap();
assert_eq!(3, batch.num_rows());
assert_eq!(&columns, batch.columns());
for (i, expect) in columns.iter().enumerate().take(batch.num_columns()) {
let column = batch.column(i);
assert_eq!(expect, column);
}
assert_eq!(expected, batch.df_record_batch().columns());
assert_eq!(schema, batch.schema);
assert_eq!(columns[0], *batch.column_by_name("c1").unwrap());
assert_eq!(columns[1], *batch.column_by_name("c2").unwrap());
assert_eq!(&expected[0], batch.column_by_name("c1").unwrap());
assert_eq!(&expected[1], batch.column_by_name("c2").unwrap());
assert!(batch.column_by_name("c3").is_none());
let converted =
RecordBatch::try_from_df_record_batch(schema, batch.df_record_batch().clone()).unwrap();
let converted = RecordBatch::from_df_record_batch(schema, batch.df_record_batch().clone());
assert_eq!(batch, converted);
assert_eq!(*batch.df_record_batch(), converted.into_df_record_batch());
}
@@ -385,12 +411,12 @@ mod tests {
let recordbatch = recordbatch.slice(1, 2).expect("recordbatch slice");
let expected = &UInt32Array::from_iter_values([2u32, 3]);
let array = recordbatch.column(0).to_arrow_array();
let array = recordbatch.column(0);
let actual = array.as_primitive::<UInt32Type>();
assert_eq!(expected, actual);
let expected = &StringArray::from(vec!["hello", "greptime"]);
let array = recordbatch.column(1).to_arrow_array();
let array = recordbatch.column(1);
let actual = array.as_string::<i32>();
assert_eq!(expected, actual);

View File

@@ -211,8 +211,7 @@ pub fn sql_value_to_value(
| Value::Duration(_)
| Value::IntervalYearMonth(_)
| Value::IntervalDayTime(_)
| Value::IntervalMonthDayNano(_)
| Value::Json(_) => match unary_op {
| Value::IntervalMonthDayNano(_) => match unary_op {
UnaryOperator::Plus => {}
UnaryOperator::Minus => {
value = value
@@ -222,7 +221,11 @@ pub fn sql_value_to_value(
_ => return InvalidUnaryOpSnafu { unary_op, value }.fail(),
},
Value::String(_) | Value::Binary(_) | Value::List(_) | Value::Struct(_) => {
Value::String(_)
| Value::Binary(_)
| Value::List(_)
| Value::Struct(_)
| Value::Json(_) => {
return InvalidUnaryOpSnafu { unary_op, value }.fail();
}
}

View File

@@ -35,5 +35,5 @@ tokio.workspace = true
tracing = "0.1"
tracing-appender.workspace = true
tracing-log = "0.2"
tracing-opentelemetry = "0.31.0"
tracing-opentelemetry.workspace = true
tracing-subscriber.workspace = true

View File

@@ -21,7 +21,10 @@ mod panic_hook;
pub mod tracing_context;
mod tracing_sampler;
pub use logging::{RELOAD_HANDLE, init_default_ut_logging, init_global_logging};
pub use logging::{
LOG_RELOAD_HANDLE, TRACE_RELOAD_HANDLE, get_or_init_tracer, init_default_ut_logging,
init_global_logging,
};
pub use metric::dump_metrics;
pub use panic_hook::set_panic_hook;
pub use {common_error, tracing, tracing_subscriber};

View File

@@ -16,7 +16,7 @@
use std::collections::HashMap;
use std::env;
use std::io::IsTerminal;
use std::sync::{Arc, Mutex, Once};
use std::sync::{Arc, Mutex, Once, RwLock};
use std::time::Duration;
use common_base::serde::empty_string_as_default;
@@ -25,15 +25,17 @@ use opentelemetry::trace::TracerProvider;
use opentelemetry::{KeyValue, global};
use opentelemetry_otlp::{Protocol, SpanExporter, WithExportConfig, WithHttpConfig};
use opentelemetry_sdk::propagation::TraceContextPropagator;
use opentelemetry_sdk::trace::Sampler;
use opentelemetry_sdk::trace::{Sampler, Tracer};
use opentelemetry_semantic_conventions::resource;
use serde::{Deserialize, Serialize};
use tracing::callsite;
use tracing::metadata::LevelFilter;
use tracing_appender::non_blocking::WorkerGuard;
use tracing_appender::rolling::{RollingFileAppender, Rotation};
use tracing_log::LogTracer;
use tracing_subscriber::filter::{FilterFn, Targets};
use tracing_subscriber::fmt::Layer;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::layer::{Layered, SubscriberExt};
use tracing_subscriber::prelude::*;
use tracing_subscriber::{EnvFilter, Registry, filter};
@@ -48,10 +50,192 @@ pub const DEFAULT_OTLP_HTTP_ENDPOINT: &str = "http://localhost:4318/v1/traces";
/// The default logs directory.
pub const DEFAULT_LOGGING_DIR: &str = "logs";
// Handle for reloading log level
pub static RELOAD_HANDLE: OnceCell<tracing_subscriber::reload::Handle<Targets, Registry>> =
/// Handle for reloading log level
pub static LOG_RELOAD_HANDLE: OnceCell<tracing_subscriber::reload::Handle<Targets, Registry>> =
OnceCell::new();
type DynSubscriber = Layered<tracing_subscriber::reload::Layer<Targets, Registry>, Registry>;
type OtelTraceLayer = tracing_opentelemetry::OpenTelemetryLayer<DynSubscriber, Tracer>;
#[derive(Clone)]
pub struct TraceReloadHandle {
inner: Arc<RwLock<Option<OtelTraceLayer>>>,
}
impl TraceReloadHandle {
fn new(inner: Arc<RwLock<Option<OtelTraceLayer>>>) -> Self {
Self { inner }
}
pub fn reload(&self, new_layer: Option<OtelTraceLayer>) {
let mut guard = self.inner.write().unwrap();
*guard = new_layer;
drop(guard);
callsite::rebuild_interest_cache();
}
}
/// A tracing layer that can be dynamically reloaded.
///
/// Mostly copied from [`tracing_subscriber::reload::Layer`].
struct TraceLayer {
inner: Arc<RwLock<Option<OtelTraceLayer>>>,
}
impl TraceLayer {
fn new(initial: Option<OtelTraceLayer>) -> (Self, TraceReloadHandle) {
let inner = Arc::new(RwLock::new(initial));
(
Self {
inner: inner.clone(),
},
TraceReloadHandle::new(inner),
)
}
fn with_layer<R>(&self, f: impl FnOnce(&OtelTraceLayer) -> R) -> Option<R> {
self.inner
.read()
.ok()
.and_then(|guard| guard.as_ref().map(f))
}
fn with_layer_mut<R>(&self, f: impl FnOnce(&mut OtelTraceLayer) -> R) -> Option<R> {
self.inner
.write()
.ok()
.and_then(|mut guard| guard.as_mut().map(f))
}
}
impl tracing_subscriber::Layer<DynSubscriber> for TraceLayer {
fn on_register_dispatch(&self, subscriber: &tracing::Dispatch) {
let _ = self.with_layer(|layer| layer.on_register_dispatch(subscriber));
}
fn on_layer(&mut self, subscriber: &mut DynSubscriber) {
let _ = self.with_layer_mut(|layer| layer.on_layer(subscriber));
}
fn register_callsite(
&self,
metadata: &'static tracing::Metadata<'static>,
) -> tracing::subscriber::Interest {
self.with_layer(|layer| layer.register_callsite(metadata))
.unwrap_or_else(tracing::subscriber::Interest::always)
}
fn enabled(
&self,
metadata: &tracing::Metadata<'_>,
ctx: tracing_subscriber::layer::Context<'_, DynSubscriber>,
) -> bool {
self.with_layer(|layer| layer.enabled(metadata, ctx))
.unwrap_or(true)
}
fn on_new_span(
&self,
attrs: &tracing::span::Attributes<'_>,
id: &tracing::span::Id,
ctx: tracing_subscriber::layer::Context<'_, DynSubscriber>,
) {
let _ = self.with_layer(|layer| layer.on_new_span(attrs, id, ctx));
}
fn max_level_hint(&self) -> Option<LevelFilter> {
self.with_layer(|layer| layer.max_level_hint()).flatten()
}
fn on_record(
&self,
span: &tracing::span::Id,
values: &tracing::span::Record<'_>,
ctx: tracing_subscriber::layer::Context<'_, DynSubscriber>,
) {
let _ = self.with_layer(|layer| layer.on_record(span, values, ctx));
}
fn on_follows_from(
&self,
span: &tracing::span::Id,
follows: &tracing::span::Id,
ctx: tracing_subscriber::layer::Context<'_, DynSubscriber>,
) {
let _ = self.with_layer(|layer| layer.on_follows_from(span, follows, ctx));
}
fn event_enabled(
&self,
event: &tracing::Event<'_>,
ctx: tracing_subscriber::layer::Context<'_, DynSubscriber>,
) -> bool {
self.with_layer(|layer| layer.event_enabled(event, ctx))
.unwrap_or(true)
}
fn on_event(
&self,
event: &tracing::Event<'_>,
ctx: tracing_subscriber::layer::Context<'_, DynSubscriber>,
) {
let _ = self.with_layer(|layer| layer.on_event(event, ctx));
}
fn on_enter(
&self,
id: &tracing::span::Id,
ctx: tracing_subscriber::layer::Context<'_, DynSubscriber>,
) {
let _ = self.with_layer(|layer| layer.on_enter(id, ctx));
}
fn on_exit(
&self,
id: &tracing::span::Id,
ctx: tracing_subscriber::layer::Context<'_, DynSubscriber>,
) {
let _ = self.with_layer(|layer| layer.on_exit(id, ctx));
}
fn on_close(
&self,
id: tracing::span::Id,
ctx: tracing_subscriber::layer::Context<'_, DynSubscriber>,
) {
let _ = self.with_layer(|layer| layer.on_close(id, ctx));
}
fn on_id_change(
&self,
old: &tracing::span::Id,
new: &tracing::span::Id,
ctx: tracing_subscriber::layer::Context<'_, DynSubscriber>,
) {
let _ = self.with_layer(|layer| layer.on_id_change(old, new, ctx));
}
unsafe fn downcast_raw(&self, id: std::any::TypeId) -> Option<*const ()> {
self.inner.read().ok().and_then(|guard| {
guard
.as_ref()
.and_then(|layer| unsafe { layer.downcast_raw(id) })
})
}
}
/// Handle for reloading trace level
pub static TRACE_RELOAD_HANDLE: OnceCell<TraceReloadHandle> = OnceCell::new();
static TRACER: OnceCell<Mutex<TraceState>> = OnceCell::new();
#[derive(Debug)]
enum TraceState {
Ready(Tracer),
Deferred(TraceContext),
}
/// The logging options that used to initialize the logger.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
@@ -167,6 +351,13 @@ impl PartialEq for LoggingOptions {
impl Eq for LoggingOptions {}
#[derive(Clone, Debug)]
struct TraceContext {
app_name: String,
node_id: String,
logging_opts: LoggingOptions,
}
impl Default for LoggingOptions {
fn default() -> Self {
Self {
@@ -242,6 +433,7 @@ pub fn init_global_logging(
) -> Vec<WorkerGuard> {
static START: Once = Once::new();
let mut guards = vec![];
let node_id = node_id.unwrap_or_else(|| "none".to_string());
START.call_once(|| {
// Enable log compatible layer to convert log record to tracing span.
@@ -357,10 +549,37 @@ pub fn init_global_logging(
let (dyn_filter, reload_handle) = tracing_subscriber::reload::Layer::new(filter.clone());
RELOAD_HANDLE
LOG_RELOAD_HANDLE
.set(reload_handle)
.expect("reload handle already set, maybe init_global_logging get called twice?");
let mut initial_tracer = None;
let trace_state = if opts.enable_otlp_tracing {
let tracer = create_tracer(app_name, &node_id, opts);
initial_tracer = Some(tracer.clone());
TraceState::Ready(tracer)
} else {
TraceState::Deferred(TraceContext {
app_name: app_name.to_string(),
node_id: node_id.clone(),
logging_opts: opts.clone(),
})
};
TRACER
.set(Mutex::new(trace_state))
.expect("trace state already initialized");
let initial_trace_layer = initial_tracer
.as_ref()
.map(|tracer| tracing_opentelemetry::layer().with_tracer(tracer.clone()));
let (dyn_trace_layer, trace_reload_handle) = TraceLayer::new(initial_trace_layer);
TRACE_RELOAD_HANDLE
.set(trace_reload_handle)
.unwrap_or_else(|_| panic!("failed to set trace reload handle"));
// Must enable 'tokio_unstable' cfg to use this feature.
// For example: `RUSTFLAGS="--cfg tokio_unstable" cargo run -F common-telemetry/console -- standalone start`
#[cfg(feature = "tokio-console")]
@@ -383,6 +602,7 @@ pub fn init_global_logging(
Registry::default()
.with(dyn_filter)
.with(dyn_trace_layer)
.with(tokio_console_layer)
.with(stdout_logging_layer)
.with(file_logging_layer)
@@ -396,53 +616,61 @@ pub fn init_global_logging(
#[cfg(not(feature = "tokio-console"))]
let subscriber = Registry::default()
.with(dyn_filter)
.with(dyn_trace_layer)
.with(stdout_logging_layer)
.with(file_logging_layer)
.with(err_file_logging_layer)
.with(slow_query_logging_layer);
if opts.enable_otlp_tracing {
global::set_text_map_propagator(TraceContextPropagator::new());
global::set_text_map_propagator(TraceContextPropagator::new());
let sampler = opts
.tracing_sample_ratio
.as_ref()
.map(create_sampler)
.map(Sampler::ParentBased)
.unwrap_or(Sampler::ParentBased(Box::new(Sampler::AlwaysOn)));
let provider = opentelemetry_sdk::trace::SdkTracerProvider::builder()
.with_batch_exporter(build_otlp_exporter(opts))
.with_sampler(sampler)
.with_resource(
opentelemetry_sdk::Resource::builder_empty()
.with_attributes([
KeyValue::new(resource::SERVICE_NAME, app_name.to_string()),
KeyValue::new(
resource::SERVICE_INSTANCE_ID,
node_id.unwrap_or("none".to_string()),
),
KeyValue::new(resource::SERVICE_VERSION, common_version::version()),
KeyValue::new(resource::PROCESS_PID, std::process::id().to_string()),
])
.build(),
)
.build();
let tracer = provider.tracer("greptimedb");
tracing::subscriber::set_global_default(
subscriber.with(tracing_opentelemetry::layer().with_tracer(tracer)),
)
tracing::subscriber::set_global_default(subscriber)
.expect("error setting global tracing subscriber");
} else {
tracing::subscriber::set_global_default(subscriber)
.expect("error setting global tracing subscriber");
}
});
guards
}
fn create_tracer(app_name: &str, node_id: &str, opts: &LoggingOptions) -> Tracer {
let sampler = opts
.tracing_sample_ratio
.as_ref()
.map(create_sampler)
.map(Sampler::ParentBased)
.unwrap_or(Sampler::ParentBased(Box::new(Sampler::AlwaysOn)));
let resource = opentelemetry_sdk::Resource::builder_empty()
.with_attributes([
KeyValue::new(resource::SERVICE_NAME, app_name.to_string()),
KeyValue::new(resource::SERVICE_INSTANCE_ID, node_id.to_string()),
KeyValue::new(resource::SERVICE_VERSION, common_version::version()),
KeyValue::new(resource::PROCESS_PID, std::process::id().to_string()),
])
.build();
opentelemetry_sdk::trace::SdkTracerProvider::builder()
.with_batch_exporter(build_otlp_exporter(opts))
.with_sampler(sampler)
.with_resource(resource)
.build()
.tracer("greptimedb")
}
/// Ensure that the OTLP tracer has been constructed, building it lazily if needed.
pub fn get_or_init_tracer() -> Result<Tracer, &'static str> {
let state = TRACER.get().ok_or("trace state is not initialized")?;
let mut guard = state.lock().expect("trace state lock poisoned");
match &mut *guard {
TraceState::Ready(tracer) => Ok(tracer.clone()),
TraceState::Deferred(context) => {
let tracer = create_tracer(&context.app_name, &context.node_id, &context.logging_opts);
*guard = TraceState::Ready(tracer.clone());
Ok(tracer)
}
}
}
fn build_otlp_exporter(opts: &LoggingOptions) -> SpanExporter {
let protocol = opts
.otlp_export_protocol

View File

@@ -28,7 +28,6 @@ use mito2::config::MitoConfig;
pub(crate) use object_store::config::ObjectStoreConfig;
use query::options::QueryOptions;
use serde::{Deserialize, Serialize};
use servers::export_metrics::ExportMetricsOption;
use servers::grpc::GrpcOptions;
use servers::heartbeat_options::HeartbeatOptions;
use servers::http::HttpOptions;
@@ -82,7 +81,6 @@ pub struct DatanodeOptions {
pub region_engine: Vec<RegionEngineConfig>,
pub logging: LoggingOptions,
pub enable_telemetry: bool,
pub export_metrics: ExportMetricsOption,
pub tracing: TracingOptions,
pub query: QueryOptions,
pub memory: MemoryOptions,
@@ -138,7 +136,6 @@ impl Default for DatanodeOptions {
logging: LoggingOptions::default(),
heartbeat: HeartbeatOptions::datanode_default(),
enable_telemetry: true,
export_metrics: ExportMetricsOption::default(),
tracing: TracingOptions::default(),
query: QueryOptions::default(),
memory: MemoryOptions::default(),

View File

@@ -48,7 +48,6 @@ use object_store::manager::{ObjectStoreManager, ObjectStoreManagerRef};
use object_store::util::normalize_dir;
use query::QueryEngineFactory;
use query::dummy_catalog::{DummyCatalogManager, TableProviderFactoryRef};
use servers::export_metrics::ExportMetricsTask;
use servers::server::ServerHandlers;
use snafu::{OptionExt, ResultExt, ensure};
use store_api::path_utils::WAL_DIR;
@@ -84,7 +83,6 @@ pub struct Datanode {
greptimedb_telemetry_task: Arc<GreptimeDBTelemetryTask>,
leases_notifier: Option<Arc<Notify>>,
plugins: Plugins,
export_metrics_task: Option<ExportMetricsTask>,
}
impl Datanode {
@@ -96,10 +94,6 @@ impl Datanode {
self.start_telemetry();
if let Some(t) = self.export_metrics_task.as_ref() {
t.start(None).context(StartServerSnafu)?
}
self.services.start_all().await.context(StartServerSnafu)
}
@@ -319,10 +313,6 @@ impl DatanodeBuilder {
None
};
let export_metrics_task =
ExportMetricsTask::try_new(&self.opts.export_metrics, Some(&self.plugins))
.context(StartServerSnafu)?;
Ok(Datanode {
services: ServerHandlers::default(),
heartbeat_task,
@@ -331,7 +321,6 @@ impl DatanodeBuilder {
region_event_receiver,
leases_notifier,
plugins: self.plugins.clone(),
export_metrics_task,
})
}

View File

@@ -39,9 +39,8 @@ impl InstructionHandler for GetFileRefsHandler {
error: Some("MitoEngine not found".to_string()),
}));
};
match mito_engine
.get_snapshot_of_unmanifested_refs(get_file_refs.region_ids)
.get_snapshot_of_file_refs(get_file_refs.query_regions, get_file_refs.related_regions)
.await
{
Ok(all_file_refs) => {

View File

@@ -15,7 +15,7 @@
use common_meta::instruction::{GcRegions, GcRegionsReply, InstructionReply};
use common_telemetry::{debug, warn};
use mito2::gc::LocalGcWorker;
use snafu::{OptionExt, ResultExt};
use snafu::{OptionExt, ResultExt, ensure};
use store_api::storage::{FileRefsManifest, RegionId};
use crate::error::{GcMitoEngineSnafu, InvalidGcArgsSnafu, Result, UnexpectedSnafu};
@@ -35,20 +35,6 @@ impl InstructionHandler for GcRegionsHandler {
let region_ids = gc_regions.regions.clone();
debug!("Received gc regions instruction: {:?}", region_ids);
let is_same_table = region_ids.windows(2).all(|w| {
let t1 = w[0].table_id();
let t2 = w[1].table_id();
t1 == t2
});
if !is_same_table {
return Some(InstructionReply::GcRegions(GcRegionsReply {
result: Err(format!(
"Regions to GC should belong to the same table, found: {:?}",
region_ids
)),
}));
}
let (region_id, gc_worker) = match self
.create_gc_worker(
ctx,
@@ -103,6 +89,8 @@ impl InstructionHandler for GcRegionsHandler {
}
impl GcRegionsHandler {
/// Create a GC worker for the given region IDs.
/// Return the first region ID(after sort by given region id) and the GC worker.
async fn create_gc_worker(
&self,
ctx: &HandlerContext,
@@ -112,22 +100,37 @@ impl GcRegionsHandler {
) -> Result<(RegionId, LocalGcWorker)> {
// always use the smallest region id on datanode as the target region id
region_ids.sort_by_key(|r| r.region_number());
let mito_engine = ctx
.region_server
.mito_engine()
.with_context(|| UnexpectedSnafu {
violated: "MitoEngine not found".to_string(),
})?;
let region_id = *region_ids.first().with_context(|| UnexpectedSnafu {
violated: "No region ids provided".to_string(),
let region_id = *region_ids.first().with_context(|| InvalidGcArgsSnafu {
msg: "No region ids provided".to_string(),
})?;
let mito_config = mito_engine.mito_config();
// also need to ensure all regions are on this datanode
ensure!(
region_ids
.iter()
.all(|rid| mito_engine.find_region(*rid).is_some()),
InvalidGcArgsSnafu {
msg: format!(
"Some regions are not on current datanode:{:?}",
region_ids
.iter()
.filter(|rid| mito_engine.find_region(**rid).is_none())
.collect::<Vec<_>>()
),
}
);
// Find the access layer from one of the regions that exists on this datanode
let access_layer = region_ids
.iter()
.find_map(|rid| mito_engine.find_region(*rid))
let access_layer = mito_engine
.find_region(region_id)
.with_context(|| InvalidGcArgsSnafu {
msg: format!(
"None of the regions is on current datanode:{:?}",
@@ -136,14 +139,22 @@ impl GcRegionsHandler {
})?
.access_layer();
// if region happen to be dropped before this but after gc scheduler send gc instr,
// need to deal with it properly(it is ok for region to be dropped after GC worker started)
// region not found here can only be drop table/database case, since region migration is prevented by lock in gc procedure
// TODO(discord9): add integration test for this drop case
let mito_regions = region_ids
.iter()
.filter_map(|rid| mito_engine.find_region(*rid).map(|r| (*rid, r)))
.collect();
let cache_manager = mito_engine.cache_manager();
let gc_worker = LocalGcWorker::try_new(
access_layer.clone(),
Some(cache_manager),
region_ids.into_iter().collect(),
Default::default(),
mito_config.clone().into(),
mito_regions,
mito_engine.mito_config().gc.clone(),
file_ref_manifest.clone(),
&mito_engine.gc_limiter(),
full_file_listing,

View File

@@ -518,7 +518,7 @@ impl RegionServer {
let manifest_info = match manifest_info {
ManifestInfo::MitoManifestInfo(info) => {
RegionManifestInfo::mito(info.data_manifest_version, 0)
RegionManifestInfo::mito(info.data_manifest_version, 0, 0)
}
ManifestInfo::MetricManifestInfo(info) => RegionManifestInfo::metric(
info.data_manifest_version,

View File

@@ -34,7 +34,8 @@ use session::context::QueryContextRef;
use store_api::metadata::RegionMetadataRef;
use store_api::region_engine::{
RegionEngine, RegionManifestInfo, RegionRole, RegionScannerRef, RegionStatistic,
SetRegionRoleStateResponse, SettableRegionRoleState, SyncManifestResponse,
RemapManifestsRequest, RemapManifestsResponse, SetRegionRoleStateResponse,
SettableRegionRoleState, SyncManifestResponse,
};
use store_api::region_request::{AffectedRows, RegionRequest};
use store_api::storage::{RegionId, ScanRequest, SequenceNumber};
@@ -291,6 +292,13 @@ impl RegionEngine for MockRegionEngine {
unimplemented!()
}
async fn remap_manifests(
&self,
_request: RemapManifestsRequest,
) -> Result<RemapManifestsResponse, BoxedError> {
unimplemented!()
}
fn as_any(&self) -> &dyn Any {
self
}

View File

@@ -12,9 +12,117 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use arrow::array::{ArrayRef, AsArray};
use arrow::datatypes::{
DataType, DurationMicrosecondType, DurationMillisecondType, DurationNanosecondType,
DurationSecondType, Time32MillisecondType, Time32SecondType, Time64MicrosecondType,
Time64NanosecondType, TimeUnit, TimestampMicrosecondType, TimestampMillisecondType,
TimestampNanosecondType, TimestampSecondType,
};
use common_time::time::Time;
use common_time::{Duration, Timestamp};
pub type BinaryArray = arrow::array::BinaryArray;
pub type MutableBinaryArray = arrow::array::BinaryBuilder;
pub type StringArray = arrow::array::StringArray;
pub type MutableStringArray = arrow::array::StringBuilder;
pub type LargeStringArray = arrow::array::LargeStringArray;
pub type MutableLargeStringArray = arrow::array::LargeStringBuilder;
/// Get the [Timestamp] value at index `i` of the timestamp array.
///
/// Note: This method does not check for nulls and the value is arbitrary
/// if [`is_null`](arrow::array::Array::is_null) returns true for the index.
///
/// # Panics
/// 1. if index `i` is out of bounds;
/// 2. or the array is not timestamp type.
pub fn timestamp_array_value(array: &ArrayRef, i: usize) -> Timestamp {
let DataType::Timestamp(time_unit, _) = &array.data_type() else {
unreachable!()
};
let v = match time_unit {
TimeUnit::Second => {
let array = array.as_primitive::<TimestampSecondType>();
array.value(i)
}
TimeUnit::Millisecond => {
let array = array.as_primitive::<TimestampMillisecondType>();
array.value(i)
}
TimeUnit::Microsecond => {
let array = array.as_primitive::<TimestampMicrosecondType>();
array.value(i)
}
TimeUnit::Nanosecond => {
let array = array.as_primitive::<TimestampNanosecondType>();
array.value(i)
}
};
Timestamp::new(v, time_unit.into())
}
/// Get the [Time] value at index `i` of the time array.
///
/// Note: This method does not check for nulls and the value is arbitrary
/// if [`is_null`](arrow::array::Array::is_null) returns true for the index.
///
/// # Panics
/// 1. if index `i` is out of bounds;
/// 2. or the array is not `Time32` or `Time64` type.
pub fn time_array_value(array: &ArrayRef, i: usize) -> Time {
match array.data_type() {
DataType::Time32(time_unit) | DataType::Time64(time_unit) => match time_unit {
TimeUnit::Second => {
let array = array.as_primitive::<Time32SecondType>();
Time::new_second(array.value(i) as i64)
}
TimeUnit::Millisecond => {
let array = array.as_primitive::<Time32MillisecondType>();
Time::new_millisecond(array.value(i) as i64)
}
TimeUnit::Microsecond => {
let array = array.as_primitive::<Time64MicrosecondType>();
Time::new_microsecond(array.value(i))
}
TimeUnit::Nanosecond => {
let array = array.as_primitive::<Time64NanosecondType>();
Time::new_nanosecond(array.value(i))
}
},
_ => unreachable!(),
}
}
/// Get the [Duration] value at index `i` of the duration array.
///
/// Note: This method does not check for nulls and the value is arbitrary
/// if [`is_null`](arrow::array::Array::is_null) returns true for the index.
///
/// # Panics
/// 1. if index `i` is out of bounds;
/// 2. or the array is not duration type.
pub fn duration_array_value(array: &ArrayRef, i: usize) -> Duration {
let DataType::Duration(time_unit) = array.data_type() else {
unreachable!();
};
let v = match time_unit {
TimeUnit::Second => {
let array = array.as_primitive::<DurationSecondType>();
array.value(i)
}
TimeUnit::Millisecond => {
let array = array.as_primitive::<DurationMillisecondType>();
array.value(i)
}
TimeUnit::Microsecond => {
let array = array.as_primitive::<DurationMicrosecondType>();
array.value(i)
}
TimeUnit::Nanosecond => {
let array = array.as_primitive::<DurationNanosecondType>();
array.value(i)
}
};
Duration::new(v, time_unit.into())
}

View File

@@ -33,8 +33,8 @@ use crate::types::{
BinaryType, BooleanType, DateType, Decimal128Type, DictionaryType, DurationMicrosecondType,
DurationMillisecondType, DurationNanosecondType, DurationSecondType, DurationType, Float32Type,
Float64Type, Int8Type, Int16Type, Int32Type, Int64Type, IntervalDayTimeType,
IntervalMonthDayNanoType, IntervalType, IntervalYearMonthType, JsonFormat, JsonType, ListType,
NullType, StringType, StructType, TimeMillisecondType, TimeType, TimestampMicrosecondType,
IntervalMonthDayNanoType, IntervalType, IntervalYearMonthType, JsonType, ListType, NullType,
StringType, StructType, TimeMillisecondType, TimeType, TimestampMicrosecondType,
TimestampMillisecondType, TimestampNanosecondType, TimestampSecondType, TimestampType,
UInt8Type, UInt16Type, UInt32Type, UInt64Type, VectorType,
};
@@ -677,7 +677,7 @@ impl ConcreteDataType {
}
pub fn json_native_datatype(inner_type: ConcreteDataType) -> ConcreteDataType {
ConcreteDataType::Json(JsonType::new(JsonFormat::Native(Box::new(inner_type))))
ConcreteDataType::Json(JsonType::new_native((&inner_type).into()))
}
}

View File

@@ -189,7 +189,7 @@ pub enum Error {
location: Location,
},
#[snafu(display("Invalid JSON text: {}", value))]
#[snafu(display("Invalid JSON: {}", value))]
InvalidJson {
value: String,
#[snafu(implicit)]

View File

@@ -19,18 +19,19 @@
//! The struct will carry all the fields of the Json object. We will not flatten any json object in this implementation.
//!
use std::collections::HashSet;
pub mod value;
use std::collections::{BTreeMap, HashSet};
use std::sync::Arc;
use common_base::bytes::StringBytes;
use ordered_float::OrderedFloat;
use serde::{Deserialize, Serialize};
use serde_json::{Map, Value as Json};
use snafu::{ResultExt, ensure};
use crate::data_type::{ConcreteDataType, DataType};
use crate::error::{self, Error};
use crate::types::{ListType, StructField, StructType};
use crate::json::value::{JsonValue, JsonVariant};
use crate::types::json_type::{JsonNativeType, JsonNumberType, JsonObjectType};
use crate::types::{StructField, StructType};
use crate::value::{ListValue, StructValue, Value};
/// The configuration of JSON encoding
@@ -102,7 +103,7 @@ impl JsonStructureSettings {
pub fn encode_with_type(
&self,
json: Json,
data_type: Option<&ConcreteDataType>,
data_type: Option<&JsonNativeType>,
) -> Result<Value, Error> {
let context = JsonContext {
key_path: String::new(),
@@ -146,70 +147,65 @@ impl<'a> JsonContext<'a> {
/// Main encoding function with key path tracking
pub fn encode_json_with_context<'a>(
json: Json,
data_type: Option<&ConcreteDataType>,
data_type: Option<&JsonNativeType>,
context: &JsonContext<'a>,
) -> Result<Value, Error> {
) -> Result<JsonValue, Error> {
// Check if the entire encoding should be unstructured
if matches!(context.settings, JsonStructureSettings::UnstructuredRaw) {
let json_string = json.to_string();
let struct_value = StructValue::try_new(
vec![Value::String(json_string.into())],
StructType::new(Arc::new(vec![StructField::new(
JsonStructureSettings::RAW_FIELD.to_string(),
ConcreteDataType::string_datatype(),
true,
)])),
)?;
return Ok(Value::Struct(struct_value));
return Ok([(JsonStructureSettings::RAW_FIELD, json_string)].into());
}
// Check if current key should be treated as unstructured
if context.is_unstructured_key() {
return Ok(Value::String(json.to_string().into()));
return Ok(json.to_string().into());
}
match json {
Json::Object(json_object) => {
ensure!(
matches!(data_type, Some(ConcreteDataType::Struct(_)) | None),
error::InvalidJsonSnafu {
value: "JSON object can only be encoded to Struct type".to_string(),
let object_type = match data_type.as_ref() {
Some(JsonNativeType::Object(x)) => Some(x),
None => None,
_ => {
return error::InvalidJsonSnafu {
value: "JSON object value must be encoded with object type",
}
.fail();
}
);
let data_type = data_type.and_then(|x| x.as_struct());
let struct_value = encode_json_object_with_context(json_object, data_type, context)?;
Ok(Value::Struct(struct_value))
};
encode_json_object_with_context(json_object, object_type, context)
}
Json::Array(json_array) => {
let item_type = if let Some(ConcreteDataType::List(list_type)) = data_type {
Some(list_type.item_type())
} else {
None
let item_type = match data_type.as_ref() {
Some(JsonNativeType::Array(x)) => Some(x.as_ref()),
None => None,
_ => {
return error::InvalidJsonSnafu {
value: "JSON array value must be encoded with array type",
}
.fail();
}
};
let list_value = encode_json_array_with_context(json_array, item_type, context)?;
Ok(Value::List(list_value))
encode_json_array_with_context(json_array, item_type, context)
}
_ => {
// For non-collection types, verify type compatibility
if let Some(expected_type) = data_type {
let (value, actual_type) =
encode_json_value_with_context(json, Some(expected_type), context)?;
if &actual_type == expected_type {
let value = encode_json_value_with_context(json, Some(expected_type), context)?;
let actual_type = value.json_type().native_type();
if actual_type == expected_type {
Ok(value)
} else {
Err(error::InvalidJsonSnafu {
value: format!(
"JSON value type {} does not match expected type {}",
actual_type.name(),
expected_type.name()
actual_type, expected_type
),
}
.build())
}
} else {
let (value, _) = encode_json_value_with_context(json, None, context)?;
Ok(value)
encode_json_value_with_context(json, None, context)
}
}
}
@@ -217,31 +213,21 @@ pub fn encode_json_with_context<'a>(
fn encode_json_object_with_context<'a>(
mut json_object: Map<String, Json>,
fields: Option<&StructType>,
fields: Option<&JsonObjectType>,
context: &JsonContext<'a>,
) -> Result<StructValue, Error> {
let total_json_keys = json_object.len();
let mut items = Vec::with_capacity(total_json_keys);
let mut struct_fields = Vec::with_capacity(total_json_keys);
) -> Result<JsonValue, Error> {
let mut object = BTreeMap::new();
// First, process fields from the provided schema in their original order
if let Some(fields) = fields {
for field in fields.fields().iter() {
let field_name = field.name();
for (field_name, field_type) in fields {
if let Some(value) = json_object.remove(field_name) {
let field_context = context.with_key(field_name);
let (value, data_type) =
encode_json_value_with_context(value, Some(field.data_type()), &field_context)?;
items.push(value);
struct_fields.push(StructField::new(
field_name.to_string(),
data_type,
true, // JSON fields are always nullable
));
let value =
encode_json_value_with_context(value, Some(field_type), &field_context)?;
object.insert(field_name.clone(), value.into_variant());
} else {
// Field exists in schema but not in JSON - add null value
items.push(Value::Null);
struct_fields.push(field.clone());
object.insert(field_name.clone(), ().into());
}
}
}
@@ -250,139 +236,111 @@ fn encode_json_object_with_context<'a>(
for (key, value) in json_object {
let field_context = context.with_key(&key);
let (value, data_type) = encode_json_value_with_context(value, None, &field_context)?;
items.push(value);
let value = encode_json_value_with_context(value, None, &field_context)?;
struct_fields.push(StructField::new(
key.clone(),
data_type,
true, // JSON fields are always nullable
));
object.insert(key, value.into_variant());
}
let struct_type = StructType::new(Arc::new(struct_fields));
StructValue::try_new(items, struct_type)
Ok(JsonValue::new(JsonVariant::Object(object)))
}
fn encode_json_array_with_context<'a>(
json_array: Vec<Json>,
item_type: Option<&ConcreteDataType>,
item_type: Option<&JsonNativeType>,
context: &JsonContext<'a>,
) -> Result<ListValue, Error> {
) -> Result<JsonValue, Error> {
let json_array_len = json_array.len();
let mut items = Vec::with_capacity(json_array_len);
let mut element_type = None;
let mut element_type = item_type.cloned();
for (index, value) in json_array.into_iter().enumerate() {
let array_context = context.with_key(&index.to_string());
let (item_value, item_type) =
encode_json_value_with_context(value, item_type, &array_context)?;
items.push(item_value);
let item_value =
encode_json_value_with_context(value, element_type.as_ref(), &array_context)?;
let item_type = item_value.json_type().native_type().clone();
items.push(item_value.into_variant());
// Determine the common type for the list
if let Some(current_type) = &element_type {
// For now, we'll use the first non-null type we encounter
// In a more sophisticated implementation, we might want to find a common supertype
if *current_type == ConcreteDataType::null_datatype()
&& item_type != ConcreteDataType::null_datatype()
{
element_type = Some(item_type);
}
// It's valid for json array to have different types of items, for example,
// ["a string", 1]. However, the `JsonValue` will be converted to Arrow list array,
// which requires all items have exactly same type. So we forbid the different types
// case here. Besides, it's not common for items in a json array to differ. So I think
// we are good here.
ensure!(
item_type == *current_type,
error::InvalidJsonSnafu {
value: "all items in json array must have the same type"
}
);
} else {
element_type = Some(item_type);
}
}
// Use provided item_type if available, otherwise determine from elements
let element_type = if let Some(item_type) = item_type {
item_type.clone()
} else {
element_type.unwrap_or_else(ConcreteDataType::string_datatype)
};
Ok(ListValue::new(items, Arc::new(element_type)))
Ok(JsonValue::new(JsonVariant::Array(items)))
}
/// Helper function to encode a JSON value to a Value and determine its ConcreteDataType with context
fn encode_json_value_with_context<'a>(
json: Json,
expected_type: Option<&ConcreteDataType>,
expected_type: Option<&JsonNativeType>,
context: &JsonContext<'a>,
) -> Result<(Value, ConcreteDataType), Error> {
) -> Result<JsonValue, Error> {
// Check if current key should be treated as unstructured
if context.is_unstructured_key() {
return Ok((
Value::String(json.to_string().into()),
ConcreteDataType::string_datatype(),
));
return Ok(json.to_string().into());
}
match json {
Json::Null => Ok((Value::Null, ConcreteDataType::null_datatype())),
Json::Bool(b) => Ok((Value::Boolean(b), ConcreteDataType::boolean_datatype())),
Json::Null => Ok(JsonValue::null()),
Json::Bool(b) => Ok(b.into()),
Json::Number(n) => {
if let Some(i) = n.as_i64() {
// Use int64 for all integer numbers when possible
if let Some(expected) = expected_type
&& let Ok(value) = try_convert_to_expected_type(i, expected)
{
return Ok((value, expected.clone()));
return Ok(value);
}
Ok((Value::Int64(i), ConcreteDataType::int64_datatype()))
Ok(i.into())
} else if let Some(u) = n.as_u64() {
// Use int64 for unsigned integers that fit, otherwise use u64
if let Some(expected) = expected_type
&& let Ok(value) = try_convert_to_expected_type(u, expected)
{
return Ok((value, expected.clone()));
return Ok(value);
}
if u <= i64::MAX as u64 {
Ok((Value::Int64(u as i64), ConcreteDataType::int64_datatype()))
Ok((u as i64).into())
} else {
Ok((Value::UInt64(u), ConcreteDataType::uint64_datatype()))
Ok(u.into())
}
} else if let Some(f) = n.as_f64() {
// Try to use the expected type if provided
if let Some(expected) = expected_type
&& let Ok(value) = try_convert_to_expected_type(f, expected)
{
return Ok((value, expected.clone()));
return Ok(value);
}
// Default to f64 for floating point numbers
Ok((
Value::Float64(OrderedFloat(f)),
ConcreteDataType::float64_datatype(),
))
Ok(f.into())
} else {
// Fallback to string representation
Ok((
Value::String(StringBytes::from(n.to_string())),
ConcreteDataType::string_datatype(),
))
Ok(n.to_string().into())
}
}
Json::String(s) => {
if let Some(expected) = expected_type
&& let Ok(value) = try_convert_to_expected_type(s.as_str(), expected)
{
return Ok((value, expected.clone()));
return Ok(value);
}
Ok((
Value::String(StringBytes::from(s.clone())),
ConcreteDataType::string_datatype(),
))
}
Json::Array(arr) => {
let list_value = encode_json_array_with_context(arr, expected_type, context)?;
let datatype = ConcreteDataType::List(ListType::new(list_value.datatype()));
Ok((Value::List(list_value), datatype))
}
Json::Object(obj) => {
let struct_value = encode_json_object_with_context(obj, None, context)?;
let data_type = ConcreteDataType::Struct(struct_value.struct_type().clone());
Ok((Value::Struct(struct_value), data_type))
Ok(s.into())
}
Json::Array(arr) => encode_json_array_with_context(arr, expected_type, context),
Json::Object(obj) => encode_json_object_with_context(obj, None, context),
}
}
@@ -402,7 +360,6 @@ pub fn decode_value_with_context<'a>(
}
match value {
Value::Json(inner) => decode_value_with_context(*inner, context),
Value::Struct(struct_value) => decode_struct_with_context(struct_value, context),
Value::List(list_value) => decode_list_with_context(list_value, context),
_ => decode_primitive_value(value),
@@ -569,11 +526,13 @@ fn decode_struct_with_settings<'a>(
key_path: field_context.key_path.clone(),
settings: &JsonStructureSettings::Structured(None),
};
let (decoded_value, data_type) = encode_json_value_with_context(
let decoded_value = encode_json_value_with_context(
json_value,
None, // Don't force a specific type, let it be inferred from JSON
&structured_context,
)?;
)?
.into_value();
let data_type = decoded_value.data_type();
items.push(decoded_value);
struct_fields.push(StructField::new(
@@ -651,8 +610,9 @@ fn decode_unstructured_raw_struct(struct_value: StructValue) -> Result<StructVal
key_path: String::new(),
settings: &JsonStructureSettings::Structured(None),
};
let (decoded_value, data_type) =
encode_json_value_with_context(json_value, None, &context)?;
let decoded_value =
encode_json_value_with_context(json_value, None, &context)?.into_value();
let data_type = decoded_value.data_type();
if let Value::Struct(decoded_struct) = decoded_value {
return Ok(decoded_struct);
@@ -678,22 +638,48 @@ fn decode_unstructured_raw_struct(struct_value: StructValue) -> Result<StructVal
/// Helper function to try converting a value to an expected type
fn try_convert_to_expected_type<T>(
value: T,
expected_type: &ConcreteDataType,
) -> Result<Value, Error>
expected_type: &JsonNativeType,
) -> Result<JsonValue, Error>
where
T: Into<Value>,
T: Into<JsonValue>,
{
let value = value.into();
expected_type.try_cast(value.clone()).ok_or_else(|| {
let cast_error = || {
error::CastTypeSnafu {
msg: format!(
"Cannot cast from {} to {}",
value.data_type().name(),
expected_type.name()
),
msg: format!("Cannot cast value {value} to {expected_type}"),
}
.build()
})
.fail()
};
let actual_type = value.json_type().native_type();
match (actual_type, expected_type) {
(x, y) if x == y => Ok(value),
(JsonNativeType::Number(x), JsonNativeType::Number(y)) => match (x, y) {
(JsonNumberType::U64, JsonNumberType::I64) => {
if let Some(i) = value.as_i64() {
Ok(i.into())
} else {
cast_error()
}
}
(JsonNumberType::I64, JsonNumberType::U64) => {
if let Some(i) = value.as_u64() {
Ok(i.into())
} else {
cast_error()
}
}
(_, JsonNumberType::F64) => {
if let Some(f) = value.as_f64() {
Ok(f.into())
} else {
cast_error()
}
}
_ => cast_error(),
},
(_, JsonNativeType::String) => Ok(value.to_string().into()),
_ => cast_error(),
}
}
#[cfg(test)]
@@ -702,6 +688,7 @@ mod tests {
use serde_json::json;
use super::*;
use crate::data_type::ConcreteDataType;
use crate::types::ListType;
#[test]
@@ -898,15 +885,15 @@ mod tests {
let json = Json::from(42);
let settings = JsonStructureSettings::Structured(None);
let result = settings
.encode_with_type(json.clone(), Some(&ConcreteDataType::int8_datatype()))
.encode_with_type(json.clone(), Some(&JsonNativeType::u64()))
.unwrap()
.into_json_inner()
.unwrap();
assert_eq!(result, Value::Int8(42));
assert_eq!(result, Value::UInt64(42));
// Test with expected string type
let result = settings
.encode_with_type(json, Some(&ConcreteDataType::string_datatype()))
.encode_with_type(json, Some(&JsonNativeType::String))
.unwrap()
.into_json_inner()
.unwrap();
@@ -917,23 +904,11 @@ mod tests {
fn test_encode_json_array_mixed_types() {
let json = json!([1, "hello", true, 3.15]);
let settings = JsonStructureSettings::Structured(None);
let result = settings
.encode_with_type(json, None)
.unwrap()
.into_json_inner()
.unwrap();
if let Value::List(list_value) = result {
assert_eq!(list_value.items().len(), 4);
// The first non-null type should determine the list type
// In this case, it should be string since we can't find a common numeric type
assert_eq!(
list_value.datatype(),
Arc::new(ConcreteDataType::int64_datatype())
);
} else {
panic!("Expected List value");
}
let result = settings.encode_with_type(json, None);
assert_eq!(
result.unwrap_err().to_string(),
"Invalid JSON: all items in json array must have the same type"
);
}
#[test]
@@ -951,7 +926,7 @@ mod tests {
// Empty arrays default to string type
assert_eq!(
list_value.datatype(),
Arc::new(ConcreteDataType::string_datatype())
Arc::new(ConcreteDataType::null_datatype())
);
} else {
panic!("Expected List value");
@@ -987,16 +962,10 @@ mod tests {
});
// Define expected struct type
let fields = vec![
StructField::new(
"name".to_string(),
ConcreteDataType::string_datatype(),
true,
),
StructField::new("age".to_string(), ConcreteDataType::int64_datatype(), true),
];
let struct_type = StructType::new(Arc::new(fields));
let concrete_type = ConcreteDataType::Struct(struct_type);
let concrete_type = JsonNativeType::Object(JsonObjectType::from([
("name".to_string(), JsonNativeType::String),
("age".to_string(), JsonNativeType::i64()),
]));
let settings = JsonStructureSettings::Structured(None);
let result = settings
@@ -1008,15 +977,15 @@ mod tests {
if let Value::Struct(struct_value) = result {
assert_eq!(struct_value.items().len(), 2);
let struct_fields = struct_value.struct_type().fields();
assert_eq!(struct_fields[0].name(), "name");
assert_eq!(struct_fields[0].name(), "age");
assert_eq!(
struct_fields[0].data_type(),
&ConcreteDataType::string_datatype()
&ConcreteDataType::int64_datatype()
);
assert_eq!(struct_fields[1].name(), "age");
assert_eq!(struct_fields[1].name(), "name");
assert_eq!(
struct_fields[1].data_type(),
&ConcreteDataType::int64_datatype()
&ConcreteDataType::string_datatype()
);
} else {
panic!("Expected Struct value");
@@ -1032,34 +1001,24 @@ mod tests {
});
// Define schema with specific field order
let fields = vec![
StructField::new(
"a_field".to_string(),
ConcreteDataType::string_datatype(),
true,
),
StructField::new(
"m_field".to_string(),
ConcreteDataType::string_datatype(),
true,
),
StructField::new(
"z_field".to_string(),
ConcreteDataType::string_datatype(),
true,
),
];
let struct_type = StructType::new(Arc::new(fields));
let json_type = JsonObjectType::from([
("a_field".to_string(), JsonNativeType::String),
("m_field".to_string(), JsonNativeType::String),
("z_field".to_string(), JsonNativeType::String),
]);
let result = encode_json_object_with_context(
let Value::Struct(result) = encode_json_object_with_context(
json.as_object().unwrap().clone(),
Some(&struct_type),
Some(&json_type),
&JsonContext {
key_path: String::new(),
settings: &JsonStructureSettings::Structured(None),
},
)
.unwrap();
.map(|x| x.into_value())
.unwrap() else {
unreachable!()
};
// Verify field order is preserved from schema
let struct_fields = result.struct_type().fields();
@@ -1083,37 +1042,35 @@ mod tests {
});
// Define schema with only name and age
let fields = vec![
StructField::new(
"name".to_string(),
ConcreteDataType::string_datatype(),
true,
),
StructField::new("age".to_string(), ConcreteDataType::int64_datatype(), true),
];
let struct_type = StructType::new(Arc::new(fields));
let json_type = JsonObjectType::from([
("name".to_string(), JsonNativeType::String),
("age".to_string(), JsonNativeType::i64()),
]);
let result = encode_json_object_with_context(
let Value::Struct(result) = encode_json_object_with_context(
json.as_object().unwrap().clone(),
Some(&struct_type),
Some(&json_type),
&JsonContext {
key_path: String::new(),
settings: &JsonStructureSettings::Structured(None),
},
)
.unwrap();
.map(|x| x.into_value())
.unwrap() else {
unreachable!()
};
// Verify schema fields come first in order
// verify fields are sorted in json value
let struct_fields = result.struct_type().fields();
assert_eq!(struct_fields[0].name(), "name");
assert_eq!(struct_fields[0].name(), "active");
assert_eq!(struct_fields[1].name(), "age");
assert_eq!(struct_fields[2].name(), "active");
assert_eq!(struct_fields[2].name(), "name");
// Verify values are correct
let items = result.items();
assert_eq!(items[0], Value::String("Alice".into()));
assert_eq!(items[0], Value::Boolean(true));
assert_eq!(items[1], Value::Int64(25));
assert_eq!(items[2], Value::Boolean(true));
assert_eq!(items[2], Value::String("Alice".into()));
}
#[test]
@@ -1124,35 +1081,33 @@ mod tests {
});
// Define schema with name and age
let fields = vec![
StructField::new(
"name".to_string(),
ConcreteDataType::string_datatype(),
true,
),
StructField::new("age".to_string(), ConcreteDataType::int64_datatype(), true),
];
let struct_type = StructType::new(Arc::new(fields));
let json_type = JsonObjectType::from([
("name".to_string(), JsonNativeType::String),
("age".to_string(), JsonNativeType::i64()),
]);
let result = encode_json_object_with_context(
let Value::Struct(result) = encode_json_object_with_context(
json.as_object().unwrap().clone(),
Some(&struct_type),
Some(&json_type),
&JsonContext {
key_path: String::new(),
settings: &JsonStructureSettings::Structured(None),
},
)
.unwrap();
.map(|x| x.into_value())
.unwrap() else {
unreachable!()
};
// Verify both schema fields are present
let struct_fields = result.struct_type().fields();
assert_eq!(struct_fields[0].name(), "name");
assert_eq!(struct_fields[1].name(), "age");
assert_eq!(struct_fields[0].name(), "age");
assert_eq!(struct_fields[1].name(), "name");
// Verify values - name has value, age is null
let items = result.items();
assert_eq!(items[0], Value::String("Bob".into()));
assert_eq!(items[1], Value::Null);
assert_eq!(items[0], Value::Null);
assert_eq!(items[1], Value::String("Bob".into()));
}
#[test]
@@ -1175,21 +1130,22 @@ mod tests {
#[test]
fn test_encode_json_array_with_item_type() {
let json = json!([1, 2, 3]);
let item_type = Arc::new(ConcreteDataType::int8_datatype());
let list_type = ListType::new(item_type.clone());
let concrete_type = ConcreteDataType::List(list_type);
let item_type = Arc::new(ConcreteDataType::uint64_datatype());
let settings = JsonStructureSettings::Structured(None);
let result = settings
.encode_with_type(json, Some(&concrete_type))
.encode_with_type(
json,
Some(&JsonNativeType::Array(Box::new(JsonNativeType::u64()))),
)
.unwrap()
.into_json_inner()
.unwrap();
if let Value::List(list_value) = result {
assert_eq!(list_value.items().len(), 3);
assert_eq!(list_value.items()[0], Value::Int8(1));
assert_eq!(list_value.items()[1], Value::Int8(2));
assert_eq!(list_value.items()[2], Value::Int8(3));
assert_eq!(list_value.items()[0], Value::UInt64(1));
assert_eq!(list_value.items()[1], Value::UInt64(2));
assert_eq!(list_value.items()[2], Value::UInt64(3));
assert_eq!(list_value.datatype(), item_type);
} else {
panic!("Expected List value");
@@ -1199,12 +1155,13 @@ mod tests {
#[test]
fn test_encode_json_array_empty_with_item_type() {
let json = json!([]);
let item_type = Arc::new(ConcreteDataType::string_datatype());
let list_type = ListType::new(item_type.clone());
let concrete_type = ConcreteDataType::List(list_type);
let item_type = Arc::new(ConcreteDataType::null_datatype());
let settings = JsonStructureSettings::Structured(None);
let result = settings
.encode_with_type(json, Some(&concrete_type))
.encode_with_type(
json,
Some(&JsonNativeType::Array(Box::new(JsonNativeType::Null))),
)
.unwrap()
.into_json_inner()
.unwrap();
@@ -1219,6 +1176,7 @@ mod tests {
#[cfg(test)]
mod decode_tests {
use ordered_float::OrderedFloat;
use serde_json::json;
use super::*;
@@ -1473,7 +1431,7 @@ mod tests {
// Test encoding JSON number with expected int64 type
let json = Json::from(42);
let result = settings
.encode_with_type(json, Some(&ConcreteDataType::int64_datatype()))
.encode_with_type(json, Some(&JsonNativeType::i64()))
.unwrap()
.into_json_inner()
.unwrap();
@@ -1482,7 +1440,7 @@ mod tests {
// Test encoding JSON string with expected string type
let json = Json::String("hello".to_string());
let result = settings
.encode_with_type(json, Some(&ConcreteDataType::string_datatype()))
.encode_with_type(json, Some(&JsonNativeType::String))
.unwrap()
.into_json_inner()
.unwrap();
@@ -1491,7 +1449,7 @@ mod tests {
// Test encoding JSON boolean with expected boolean type
let json = Json::Bool(true);
let result = settings
.encode_with_type(json, Some(&ConcreteDataType::boolean_datatype()))
.encode_with_type(json, Some(&JsonNativeType::Bool))
.unwrap()
.into_json_inner()
.unwrap();
@@ -1503,12 +1461,12 @@ mod tests {
// Test encoding JSON number with mismatched string type
let json = Json::from(42);
let settings = JsonStructureSettings::Structured(None);
let result = settings.encode_with_type(json, Some(&ConcreteDataType::string_datatype()));
let result = settings.encode_with_type(json, Some(&JsonNativeType::String));
assert!(result.is_ok()); // Should succeed due to type conversion
// Test encoding JSON object with mismatched non-struct type
let json = json!({"name": "test"});
let result = settings.encode_with_type(json, Some(&ConcreteDataType::int64_datatype()));
let result = settings.encode_with_type(json, Some(&JsonNativeType::i64()));
assert!(result.is_err()); // Should fail - object can't be converted to int64
}
@@ -1516,12 +1474,13 @@ mod tests {
fn test_encode_json_array_with_list_type() {
let json = json!([1, 2, 3]);
let item_type = Arc::new(ConcreteDataType::int64_datatype());
let list_type = ListType::new(item_type.clone());
let concrete_type = ConcreteDataType::List(list_type);
let settings = JsonStructureSettings::Structured(None);
let result = settings
.encode_with_type(json, Some(&concrete_type))
.encode_with_type(
json,
Some(&JsonNativeType::Array(Box::new(JsonNativeType::i64()))),
)
.unwrap()
.into_json_inner()
.unwrap();
@@ -1543,7 +1502,7 @@ mod tests {
let json = Json::Null;
let settings = JsonStructureSettings::Structured(None);
let result = settings
.encode_with_type(json.clone(), Some(&ConcreteDataType::null_datatype()))
.encode_with_type(json.clone(), Some(&JsonNativeType::Null))
.unwrap()
.into_json_inner()
.unwrap();
@@ -1552,7 +1511,7 @@ mod tests {
// Test float with float64 type
let json = Json::from(3.15);
let result = settings
.encode_with_type(json, Some(&ConcreteDataType::float64_datatype()))
.encode_with_type(json, Some(&JsonNativeType::f64()))
.unwrap()
.into_json_inner()
.unwrap();
@@ -1644,20 +1603,11 @@ mod tests {
}
// Test with encode_with_type (with type)
let struct_type = StructType::new(Arc::new(vec![
StructField::new(
"name".to_string(),
ConcreteDataType::string_datatype(),
true,
),
StructField::new("age".to_string(), ConcreteDataType::int64_datatype(), true),
StructField::new(
"active".to_string(),
ConcreteDataType::boolean_datatype(),
true,
),
let concrete_type = JsonNativeType::Object(JsonObjectType::from([
("name".to_string(), JsonNativeType::String),
("age".to_string(), JsonNativeType::i64()),
("active".to_string(), JsonNativeType::Bool),
]));
let concrete_type = ConcreteDataType::Struct(struct_type);
let result2 = settings
.encode_with_type(json, Some(&concrete_type))
@@ -2153,20 +2103,11 @@ mod tests {
)])),
);
let decoded_struct = settings.decode_struct(array_struct).unwrap();
let fields = decoded_struct.struct_type().fields();
let decoded_fields: Vec<&str> = fields.iter().map(|f| f.name()).collect();
assert!(decoded_fields.contains(&"value"));
if let Value::List(list_value) = &decoded_struct.items()[0] {
assert_eq!(list_value.items().len(), 4);
assert_eq!(list_value.items()[0], Value::Int64(1));
assert_eq!(list_value.items()[1], Value::String("hello".into()));
assert_eq!(list_value.items()[2], Value::Boolean(true));
assert_eq!(list_value.items()[3], Value::Float64(OrderedFloat(3.15)));
} else {
panic!("Expected array to be decoded as ListValue");
}
let decoded_struct = settings.decode_struct(array_struct);
assert_eq!(
decoded_struct.unwrap_err().to_string(),
"Invalid JSON: all items in json array must have the same type"
);
}
#[test]

View File

@@ -0,0 +1,683 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::BTreeMap;
use std::fmt::{Display, Formatter};
use std::hash::{Hash, Hasher};
use std::sync::{Arc, OnceLock};
use num_traits::ToPrimitive;
use ordered_float::OrderedFloat;
use serde::{Deserialize, Serialize};
use serde_json::Number;
use crate::data_type::ConcreteDataType;
use crate::types::json_type::JsonNativeType;
use crate::types::{JsonType, StructField, StructType};
use crate::value::{ListValue, ListValueRef, StructValue, StructValueRef, Value, ValueRef};
/// Number in json, can be a positive integer, a negative integer, or a floating number.
/// Each of which is represented as `u64`, `i64` and `f64`.
///
/// This follows how `serde_json` designs number.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum JsonNumber {
PosInt(u64),
NegInt(i64),
Float(OrderedFloat<f64>),
}
impl JsonNumber {
fn as_u64(&self) -> Option<u64> {
match self {
JsonNumber::PosInt(n) => Some(*n),
JsonNumber::NegInt(n) => (*n >= 0).then_some(*n as u64),
_ => None,
}
}
fn as_i64(&self) -> Option<i64> {
match self {
JsonNumber::PosInt(n) => (*n <= i64::MAX as u64).then_some(*n as i64),
JsonNumber::NegInt(n) => Some(*n),
_ => None,
}
}
fn as_f64(&self) -> f64 {
match self {
JsonNumber::PosInt(n) => *n as f64,
JsonNumber::NegInt(n) => *n as f64,
JsonNumber::Float(n) => n.0,
}
}
}
impl From<u64> for JsonNumber {
fn from(i: u64) -> Self {
Self::PosInt(i)
}
}
impl From<i64> for JsonNumber {
fn from(n: i64) -> Self {
Self::NegInt(n)
}
}
impl From<f64> for JsonNumber {
fn from(i: f64) -> Self {
Self::Float(i.into())
}
}
impl Display for JsonNumber {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::PosInt(x) => write!(f, "{x}"),
Self::NegInt(x) => write!(f, "{x}"),
Self::Float(x) => write!(f, "{x}"),
}
}
}
/// Variants of json.
///
/// This follows how [serde_json::Value] designs except that we only choose to use [BTreeMap] to
/// preserve the fields order by their names in the json object. (By default `serde_json` uses
/// [BTreeMap], too. But it additionally supports "IndexMap" which preserves the order by insertion
/// times of fields.)
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum JsonVariant {
Null,
Bool(bool),
Number(JsonNumber),
String(String),
Array(Vec<JsonVariant>),
Object(BTreeMap<String, JsonVariant>),
}
impl JsonVariant {
fn native_type(&self) -> JsonNativeType {
match self {
JsonVariant::Null => JsonNativeType::Null,
JsonVariant::Bool(_) => JsonNativeType::Bool,
JsonVariant::Number(n) => match n {
JsonNumber::PosInt(_) => JsonNativeType::u64(),
JsonNumber::NegInt(_) => JsonNativeType::i64(),
JsonNumber::Float(_) => JsonNativeType::f64(),
},
JsonVariant::String(_) => JsonNativeType::String,
JsonVariant::Array(array) => {
let item_type = if let Some(first) = array.first() {
first.native_type()
} else {
JsonNativeType::Null
};
JsonNativeType::Array(Box::new(item_type))
}
JsonVariant::Object(object) => JsonNativeType::Object(
object
.iter()
.map(|(k, v)| (k.clone(), v.native_type()))
.collect(),
),
}
}
fn json_type(&self) -> JsonType {
JsonType::new_native(self.native_type())
}
fn as_ref(&self) -> JsonVariantRef<'_> {
match self {
JsonVariant::Null => JsonVariantRef::Null,
JsonVariant::Bool(x) => (*x).into(),
JsonVariant::Number(x) => match x {
JsonNumber::PosInt(i) => (*i).into(),
JsonNumber::NegInt(i) => (*i).into(),
JsonNumber::Float(f) => (f.0).into(),
},
JsonVariant::String(x) => x.as_str().into(),
JsonVariant::Array(array) => {
JsonVariantRef::Array(array.iter().map(|x| x.as_ref()).collect())
}
JsonVariant::Object(object) => JsonVariantRef::Object(
object
.iter()
.map(|(k, v)| (k.as_str(), v.as_ref()))
.collect(),
),
}
}
}
impl From<()> for JsonVariant {
fn from(_: ()) -> Self {
Self::Null
}
}
impl From<bool> for JsonVariant {
fn from(v: bool) -> Self {
Self::Bool(v)
}
}
impl<T: Into<JsonNumber>> From<T> for JsonVariant {
fn from(v: T) -> Self {
Self::Number(v.into())
}
}
impl From<&str> for JsonVariant {
fn from(v: &str) -> Self {
Self::String(v.to_string())
}
}
impl From<String> for JsonVariant {
fn from(v: String) -> Self {
Self::String(v)
}
}
impl<const N: usize, T: Into<JsonVariant>> From<[T; N]> for JsonVariant {
fn from(vs: [T; N]) -> Self {
Self::Array(vs.into_iter().map(|x| x.into()).collect())
}
}
impl<K: Into<String>, V: Into<JsonVariant>, const N: usize> From<[(K, V); N]> for JsonVariant {
fn from(vs: [(K, V); N]) -> Self {
Self::Object(vs.into_iter().map(|(k, v)| (k.into(), v.into())).collect())
}
}
impl Display for JsonVariant {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::Null => write!(f, "null"),
Self::Bool(x) => write!(f, "{x}"),
Self::Number(x) => write!(f, "{x}"),
Self::String(x) => write!(f, "{x}"),
Self::Array(array) => write!(
f,
"[{}]",
array
.iter()
.map(|x| x.to_string())
.collect::<Vec<_>>()
.join(", ")
),
Self::Object(object) => {
write!(
f,
"{{ {} }}",
object
.iter()
.map(|(k, v)| format!("{k}: {v}"))
.collect::<Vec<_>>()
.join(", ")
)
}
}
}
}
/// Represents any valid JSON value.
#[derive(Debug, Eq, Serialize, Deserialize)]
pub struct JsonValue {
#[serde(skip)]
json_type: OnceLock<JsonType>,
json_variant: JsonVariant,
}
impl JsonValue {
pub fn null() -> Self {
().into()
}
pub(crate) fn new(json_variant: JsonVariant) -> Self {
Self {
json_type: OnceLock::new(),
json_variant,
}
}
pub(crate) fn data_type(&self) -> ConcreteDataType {
ConcreteDataType::Json(self.json_type().clone())
}
pub(crate) fn json_type(&self) -> &JsonType {
self.json_type.get_or_init(|| self.json_variant.json_type())
}
pub(crate) fn is_null(&self) -> bool {
matches!(self.json_variant, JsonVariant::Null)
}
pub(crate) fn as_i64(&self) -> Option<i64> {
match self.json_variant {
JsonVariant::Number(n) => n.as_i64(),
_ => None,
}
}
pub(crate) fn as_u64(&self) -> Option<u64> {
match self.json_variant {
JsonVariant::Number(n) => n.as_u64(),
_ => None,
}
}
pub(crate) fn as_f64(&self) -> Option<f64> {
match self.json_variant {
JsonVariant::Number(n) => Some(n.as_f64()),
_ => None,
}
}
pub(crate) fn as_f64_lossy(&self) -> Option<f64> {
match self.json_variant {
JsonVariant::Number(n) => Some(match n {
JsonNumber::PosInt(i) => i as f64,
JsonNumber::NegInt(i) => i as f64,
JsonNumber::Float(f) => f.0,
}),
_ => None,
}
}
pub(crate) fn as_bool(&self) -> Option<bool> {
match self.json_variant {
JsonVariant::Bool(b) => Some(b),
_ => None,
}
}
pub fn as_ref(&self) -> JsonValueRef<'_> {
JsonValueRef {
json_type: OnceLock::new(),
json_variant: self.json_variant.as_ref(),
}
}
pub fn into_variant(self) -> JsonVariant {
self.json_variant
}
pub(crate) fn into_value(self) -> Value {
fn helper(v: JsonVariant) -> Value {
match v {
JsonVariant::Null => Value::Null,
JsonVariant::Bool(x) => Value::Boolean(x),
JsonVariant::Number(x) => match x {
JsonNumber::PosInt(i) => Value::UInt64(i),
JsonNumber::NegInt(i) => Value::Int64(i),
JsonNumber::Float(f) => Value::Float64(f),
},
JsonVariant::String(x) => Value::String(x.into()),
JsonVariant::Array(array) => {
let item_type = if let Some(first) = array.first() {
first.native_type()
} else {
JsonNativeType::Null
};
Value::List(ListValue::new(
array.into_iter().map(helper).collect(),
Arc::new((&item_type).into()),
))
}
JsonVariant::Object(object) => {
let mut fields = Vec::with_capacity(object.len());
let mut items = Vec::with_capacity(object.len());
for (k, v) in object {
fields.push(StructField::new(k, (&v.native_type()).into(), true));
items.push(helper(v));
}
Value::Struct(StructValue::new(items, StructType::new(Arc::new(fields))))
}
}
}
helper(self.json_variant)
}
}
impl<T: Into<JsonVariant>> From<T> for JsonValue {
fn from(v: T) -> Self {
Self {
json_type: OnceLock::new(),
json_variant: v.into(),
}
}
}
impl From<JsonValue> for serde_json::Value {
fn from(v: JsonValue) -> Self {
fn helper(v: JsonVariant) -> serde_json::Value {
match v {
JsonVariant::Null => serde_json::Value::Null,
JsonVariant::Bool(x) => serde_json::Value::Bool(x),
JsonVariant::Number(x) => match x {
JsonNumber::PosInt(i) => serde_json::Value::Number(i.into()),
JsonNumber::NegInt(i) => serde_json::Value::Number(i.into()),
JsonNumber::Float(f) => {
if let Some(x) = Number::from_f64(f.0) {
serde_json::Value::Number(x)
} else {
serde_json::Value::String("NaN".into())
}
}
},
JsonVariant::String(x) => serde_json::Value::String(x),
JsonVariant::Array(array) => {
serde_json::Value::Array(array.into_iter().map(helper).collect())
}
JsonVariant::Object(object) => serde_json::Value::Object(
object.into_iter().map(|(k, v)| (k, helper(v))).collect(),
),
}
}
helper(v.json_variant)
}
}
impl Clone for JsonValue {
fn clone(&self) -> Self {
let Self {
json_type: _,
json_variant,
} = self;
Self {
json_type: OnceLock::new(),
json_variant: json_variant.clone(),
}
}
}
impl PartialEq<JsonValue> for JsonValue {
fn eq(&self, other: &JsonValue) -> bool {
let Self {
json_type: _,
json_variant,
} = self;
json_variant.eq(&other.json_variant)
}
}
impl Hash for JsonValue {
fn hash<H: Hasher>(&self, state: &mut H) {
let Self {
json_type: _,
json_variant,
} = self;
json_variant.hash(state);
}
}
impl Display for JsonValue {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.json_variant)
}
}
/// References of variants of json.
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
pub enum JsonVariantRef<'a> {
Null,
Bool(bool),
Number(JsonNumber),
String(&'a str),
Array(Vec<JsonVariantRef<'a>>),
Object(BTreeMap<&'a str, JsonVariantRef<'a>>),
}
impl JsonVariantRef<'_> {
fn json_type(&self) -> JsonType {
fn native_type(v: &JsonVariantRef<'_>) -> JsonNativeType {
match v {
JsonVariantRef::Null => JsonNativeType::Null,
JsonVariantRef::Bool(_) => JsonNativeType::Bool,
JsonVariantRef::Number(n) => match n {
JsonNumber::PosInt(_) => JsonNativeType::u64(),
JsonNumber::NegInt(_) => JsonNativeType::i64(),
JsonNumber::Float(_) => JsonNativeType::f64(),
},
JsonVariantRef::String(_) => JsonNativeType::String,
JsonVariantRef::Array(array) => {
let item_type = if let Some(first) = array.first() {
native_type(first)
} else {
JsonNativeType::Null
};
JsonNativeType::Array(Box::new(item_type))
}
JsonVariantRef::Object(object) => JsonNativeType::Object(
object
.iter()
.map(|(k, v)| (k.to_string(), native_type(v)))
.collect(),
),
}
}
JsonType::new_native(native_type(self))
}
}
impl From<()> for JsonVariantRef<'_> {
fn from(_: ()) -> Self {
Self::Null
}
}
impl From<bool> for JsonVariantRef<'_> {
fn from(v: bool) -> Self {
Self::Bool(v)
}
}
impl<T: Into<JsonNumber>> From<T> for JsonVariantRef<'_> {
fn from(v: T) -> Self {
Self::Number(v.into())
}
}
impl<'a> From<&'a str> for JsonVariantRef<'a> {
fn from(v: &'a str) -> Self {
Self::String(v)
}
}
impl<'a, const N: usize, T: Into<JsonVariantRef<'a>>> From<[T; N]> for JsonVariantRef<'a> {
fn from(vs: [T; N]) -> Self {
Self::Array(vs.into_iter().map(|x| x.into()).collect())
}
}
impl<'a, V: Into<JsonVariantRef<'a>>, const N: usize> From<[(&'a str, V); N]>
for JsonVariantRef<'a>
{
fn from(vs: [(&'a str, V); N]) -> Self {
Self::Object(vs.into_iter().map(|(k, v)| (k, v.into())).collect())
}
}
impl<'a> From<Vec<JsonVariantRef<'a>>> for JsonVariantRef<'a> {
fn from(v: Vec<JsonVariantRef<'a>>) -> Self {
Self::Array(v)
}
}
impl<'a> From<BTreeMap<&'a str, JsonVariantRef<'a>>> for JsonVariantRef<'a> {
fn from(v: BTreeMap<&'a str, JsonVariantRef<'a>>) -> Self {
Self::Object(v)
}
}
impl From<JsonVariantRef<'_>> for JsonVariant {
fn from(v: JsonVariantRef) -> Self {
match v {
JsonVariantRef::Null => Self::Null,
JsonVariantRef::Bool(x) => Self::Bool(x),
JsonVariantRef::Number(x) => Self::Number(x),
JsonVariantRef::String(x) => Self::String(x.to_string()),
JsonVariantRef::Array(array) => {
Self::Array(array.into_iter().map(Into::into).collect())
}
JsonVariantRef::Object(object) => Self::Object(
object
.into_iter()
.map(|(k, v)| (k.to_string(), v.into()))
.collect(),
),
}
}
}
/// Reference to representation of any valid JSON value.
#[derive(Debug, Serialize)]
pub struct JsonValueRef<'a> {
#[serde(skip)]
json_type: OnceLock<JsonType>,
json_variant: JsonVariantRef<'a>,
}
impl<'a> JsonValueRef<'a> {
pub fn null() -> Self {
().into()
}
pub(crate) fn data_type(&self) -> ConcreteDataType {
ConcreteDataType::Json(self.json_type().clone())
}
pub(crate) fn json_type(&self) -> &JsonType {
self.json_type.get_or_init(|| self.json_variant.json_type())
}
pub fn into_variant(self) -> JsonVariantRef<'a> {
self.json_variant
}
pub(crate) fn is_null(&self) -> bool {
matches!(self.json_variant, JsonVariantRef::Null)
}
pub fn is_object(&self) -> bool {
matches!(self.json_variant, JsonVariantRef::Object(_))
}
pub(crate) fn as_f32(&self) -> Option<f32> {
match self.json_variant {
JsonVariantRef::Number(JsonNumber::Float(f)) => f.to_f32(),
_ => None,
}
}
pub(crate) fn as_f64(&self) -> Option<f64> {
match self.json_variant {
JsonVariantRef::Number(JsonNumber::Float(f)) => Some(f.0),
_ => None,
}
}
pub fn as_value_ref(&self) -> ValueRef<'_> {
fn helper<'a>(v: &'a JsonVariantRef) -> ValueRef<'a> {
match v {
JsonVariantRef::Null => ValueRef::Null,
JsonVariantRef::Bool(x) => ValueRef::Boolean(*x),
JsonVariantRef::Number(x) => match x {
JsonNumber::PosInt(i) => ValueRef::UInt64(*i),
JsonNumber::NegInt(i) => ValueRef::Int64(*i),
JsonNumber::Float(f) => ValueRef::Float64(*f),
},
JsonVariantRef::String(x) => ValueRef::String(x),
JsonVariantRef::Array(array) => {
let val = array.iter().map(helper).collect::<Vec<_>>();
let item_datatype = if let Some(first) = val.first() {
first.data_type()
} else {
ConcreteDataType::null_datatype()
};
ValueRef::List(ListValueRef::RefList {
val,
item_datatype: Arc::new(item_datatype),
})
}
JsonVariantRef::Object(object) => {
let mut fields = Vec::with_capacity(object.len());
let mut val = Vec::with_capacity(object.len());
for (k, v) in object.iter() {
let v = helper(v);
fields.push(StructField::new(k.to_string(), v.data_type(), true));
val.push(v);
}
ValueRef::Struct(StructValueRef::RefList {
val,
fields: StructType::new(Arc::new(fields)),
})
}
}
}
helper(&self.json_variant)
}
pub(crate) fn data_size(&self) -> usize {
size_of_val(self)
}
}
impl<'a, T: Into<JsonVariantRef<'a>>> From<T> for JsonValueRef<'a> {
fn from(v: T) -> Self {
Self {
json_type: OnceLock::new(),
json_variant: v.into(),
}
}
}
impl From<JsonValueRef<'_>> for JsonValue {
fn from(v: JsonValueRef<'_>) -> Self {
Self {
json_type: OnceLock::new(),
json_variant: v.json_variant.into(),
}
}
}
impl PartialEq for JsonValueRef<'_> {
fn eq(&self, other: &Self) -> bool {
let Self {
json_type: _,
json_variant,
} = self;
json_variant == &other.json_variant
}
}
impl Eq for JsonValueRef<'_> {}
impl Clone for JsonValueRef<'_> {
fn clone(&self) -> Self {
let Self {
json_type: _,
json_variant,
} = self;
Self {
json_type: OnceLock::new(),
json_variant: json_variant.clone(),
}
}
}

Some files were not shown because too many files have changed in this diff Show More