Compare commits

..

23 Commits

Author SHA1 Message Date
Weny Xu
c02b5dae93 chore: bump version to 0.9.5 (#4853) 2024-10-18 08:07:13 +00:00
Weny Xu
081c6d9e74 fix: flush metric metadata region (#4852)
* fix: flush metric metadata region

* chore: apply suggestions from CR
2024-10-18 07:21:35 +00:00
Weny Xu
ca6e02980e fix: overwrite entry_id if entry id is less than start_offset (#4842)
* fix: overwrite entry_id if entry id is less than start_offset

* feat: add `overwrite_entry_start_id` to options

* chore: update config.md
2024-10-18 06:31:02 +00:00
Weny Xu
74bdba4613 fix: fix metadata forward compatibility issue (#4846) 2024-10-18 06:26:41 +00:00
Weny Xu
2e0e82ddc8 chore: update greptime-proto to b4d3011 (#4850) 2024-10-18 04:10:22 +00:00
Yingwen
e0c4157ad8 feat: Seq scanner scans data by time range (#4809)
* feat: seq scan by partition

* feat: part metrics

* chore: remove unused codes

* chore: fmt stream

* feat: build ranges returns smallvec

* feat: move scan mem/file ranges to util and reuse

* feat: log metrics

* chore: correct some metrics

* feat: get explain info from ranges

* test: group test and remove unused codes

* chore: fix clippy

* feat: change PartitionRange end to exclusive

* test: add tests
2024-10-17 11:05:12 +00:00
discord9
613e07afb4 feat: window sort physical plan (#4814)
* WIP

* feat: range split& tests

* WIP: split range

* add sort exprs

* chore: typo

* WIP

* feat: find successive runs

* WIP

* READY FOR REVIEW PART ONE: more tests

* refactor: break into smaller functions

* feat: precompute working range(need testing)

* tests: on working range

* tests: on working range

* feat: support rev working range

* feat(to be tested): core logic of merge sort

* fix: poll results

* fix: find_slice_from_range&test

* chore: remove some unused util func&fields

* chore: typos

* chore: impl exec plan for WindowedSortExec

* test(WIP): window sort stream

* test: window sort stream

* chore: remove unused

* fix: fetch

* fix: WIP intersection remaining

* test: fix and test!

* chore: remove outdated comments

* chore: rename test

* chore: remove dbg line

* chore: sorted runs

* feat: handling unexpected data

* chore: unused

* chore: remove a print in test

* chore: per review

* docs: wrong comment

* chore: more test cases
2024-10-16 11:50:25 +00:00
Weny Xu
0ce93f0b88 chore: add more metrics for region migration (#4838) 2024-10-16 09:36:57 +00:00
Ning Sun
c231eee7c1 fix: respect feature flags for geo function (#4836) 2024-10-16 07:46:31 +00:00
Yiran
176f2df5b3 fix: dead links (#4837) 2024-10-16 07:43:14 +00:00
localhost
4622412dfe feat: add API to write OpenTelemetry logs to GreptimeDB (#4755)
* chore: otlp logs api

* feat: add API to write OpenTelemetry logs to GreptimeDB

* chore: fix test data schema error

* chore: modify the underlying data structure of the pipeline value map type from hashmap to btremap to keep key order

* chore: fix by pr comment

* chore: resolve conflicts and add some test

* chore: remove useless error

* chore: change otlp header name

* chore: fmt code

* chore: fix integration test for otlp log write api

* chore: fix by pr comment

* chore: set otlp body with fulltext default
2024-10-16 04:36:08 +00:00
jeremyhi
59ec90299b refactor: metasrv cannot be cloned (#4834)
* refactor: metasrv cannot be cloned

* chore: remove MetasrvInstance's clone
2024-10-15 11:36:48 +00:00
discord9
16b8cdc3d5 chore: bump version v0.9.4 (#4833) 2024-10-15 10:48:03 +00:00
Weny Xu
3197b8b535 feat: introduce default customizers (#4831)
* feat: introduce `DefaultHeartbeatHandlerGroupBuilderCustomizer` and `DefaultLeadershipChangeNotifierCustomizer`

* chore: code styling
2024-10-15 09:48:13 +00:00
zyy17
972c2441af chore: bump promql-parser to v0.4.1 and use to_string() for EvalStmt (#4832)
chore: bump promql-parser to v0.4.1 and use to_string() for EvalStmt
2024-10-15 08:50:37 +00:00
Ning Sun
bb8b54b5d3 feat: add some s2 geo functions (#4823)
* feat: add first batch of s2 functions

* refactor: update reusable code from main

* test: add sqlness tests for s2

* feat: add tostring function for s2

* Update src/common/function/src/scalars/geo/s2.rs

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

* Apply suggestions from code review

* one more change

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
2024-10-15 06:47:29 +00:00
Weny Xu
b5233e500b feat: defer HeartbeatHandlerGroup construction and enhance LeadershipChangeNotifier (#4826)
* feat: enhance `HeartbeatHandlerGroup`

* chore: apply suggestions from CR

* chore: minor refactoring

* chore: code styling

* chore: apply suggestions from CR
2024-10-15 03:35:31 +00:00
Ruihang Xia
b61a388d04 refactor: replace info logs with debug logs in region server (#4829)
* refactor: replace info logs with debug logs in region server

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: update error handling for closing and opening nonexistent regions

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-10-14 12:46:07 +00:00
Ruihang Xia
06e565d25a feat: cache logical region's metadata (#4827)
* feat: cache logical region's metadata

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: implement logical region locking for metadata operations

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: correct typo in comment for MetadataRegion struct

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-10-14 08:44:13 +00:00
Yingwen
3b2ce31a19 feat: enable prof features by default (#4815)
* feat: enable prof by default

* docs: don't need to build with features

* feat: add common-pprof as optional dep for pprof feature

* build: remove optional

* feat: use dump_text
2024-10-14 03:32:47 +00:00
Ruihang Xia
a889ea88ca fix: case sensitive for __field__ matcher (#4822)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-10-14 03:18:59 +00:00
Yingwen
2f2b4b306c feat!: implement interval type by multiple structs (#4772)
* define structs and methods

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: re-implement interval types in time crate

* feat: use new

* feat: interval value

* feat: query crate interval

* feat: pg and mysql interval

* chore: remove unused imports

* chore: remove commented codes

* feat: make flow compile but may not work

* feat: flow datetime

* test: fix some tests

* test: fix some flow tests(WIP)

* chore: some fix test&docs

* fix: change interval order

* chore: remove unused codes

* chore: fix cilppy

* chore: now signature change

* chore: remove todo

* feat: update error message

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: discord9 <discord9@163.com>
2024-10-14 03:09:03 +00:00
jeremyhi
856c0280f5 feat: remove the distributed lock (#4825)
* feat: remove the distributed lock as we do not need it any more

* chore: delete todo comment

* chore: remove unused error
2024-10-12 09:04:22 +00:00
130 changed files with 7393 additions and 3374 deletions

214
Cargo.lock generated
View File

@@ -214,7 +214,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
[[package]]
name = "api"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"common-base",
"common-decimal",
@@ -230,6 +230,15 @@ dependencies = [
"tonic-build",
]
[[package]]
name = "approx"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f2a05fd1bd10b2527e20a2cd32d8873d115b8b39fe219ee25f42a8aca6ba278"
dependencies = [
"num-traits",
]
[[package]]
name = "approx"
version = "0.5.1"
@@ -766,7 +775,7 @@ dependencies = [
[[package]]
name = "auth"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"async-trait",
@@ -985,6 +994,7 @@ dependencies = [
"num-bigint",
"num-integer",
"num-traits",
"serde",
]
[[package]]
@@ -1375,7 +1385,7 @@ dependencies = [
[[package]]
name = "cache"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"catalog",
"common-error",
@@ -1383,7 +1393,7 @@ dependencies = [
"common-meta",
"moka",
"snafu 0.8.5",
"substrait 0.9.3",
"substrait 0.9.5",
]
[[package]]
@@ -1410,7 +1420,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "catalog"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"arrow",
@@ -1548,6 +1558,16 @@ dependencies = [
"vob",
]
[[package]]
name = "cgmath"
version = "0.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a98d30140e3296250832bbaaff83b27dcd6fa3cc70fb6f1f3e5c9c0023b5317"
dependencies = [
"approx 0.4.0",
"num-traits",
]
[[package]]
name = "chrono"
version = "0.4.38"
@@ -1739,7 +1759,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
[[package]]
name = "client"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"arc-swap",
@@ -1769,7 +1789,7 @@ dependencies = [
"serde_json",
"snafu 0.8.5",
"substrait 0.37.3",
"substrait 0.9.3",
"substrait 0.9.5",
"tokio",
"tokio-stream",
"tonic 0.11.0",
@@ -1799,7 +1819,7 @@ dependencies = [
[[package]]
name = "cmd"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"async-trait",
"auth",
@@ -1856,7 +1876,7 @@ dependencies = [
"similar-asserts",
"snafu 0.8.5",
"store-api",
"substrait 0.9.3",
"substrait 0.9.5",
"table",
"temp-env",
"tempfile",
@@ -1902,7 +1922,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]]
name = "common-base"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"anymap2",
"async-trait",
@@ -1920,7 +1940,7 @@ dependencies = [
[[package]]
name = "common-catalog"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"chrono",
"common-error",
@@ -1931,7 +1951,7 @@ dependencies = [
[[package]]
name = "common-config"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"common-base",
"common-error",
@@ -1954,7 +1974,7 @@ dependencies = [
[[package]]
name = "common-datasource"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"arrow",
"arrow-schema",
@@ -1991,7 +2011,7 @@ dependencies = [
[[package]]
name = "common-decimal"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"bigdecimal 0.4.5",
"common-error",
@@ -2004,7 +2024,7 @@ dependencies = [
[[package]]
name = "common-error"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"snafu 0.8.5",
"strum 0.25.0",
@@ -2013,7 +2033,7 @@ dependencies = [
[[package]]
name = "common-frontend"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"async-trait",
@@ -2028,7 +2048,7 @@ dependencies = [
[[package]]
name = "common-function"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"arc-swap",
@@ -2054,6 +2074,7 @@ dependencies = [
"once_cell",
"paste",
"ron",
"s2",
"serde",
"serde_json",
"session",
@@ -2067,7 +2088,7 @@ dependencies = [
[[package]]
name = "common-greptimedb-telemetry"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"async-trait",
"common-runtime",
@@ -2084,7 +2105,7 @@ dependencies = [
[[package]]
name = "common-grpc"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"arrow-flight",
@@ -2110,7 +2131,7 @@ dependencies = [
[[package]]
name = "common-grpc-expr"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"common-base",
@@ -2128,7 +2149,7 @@ dependencies = [
[[package]]
name = "common-macro"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"arc-swap",
"common-query",
@@ -2142,7 +2163,7 @@ dependencies = [
[[package]]
name = "common-mem-prof"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"common-error",
"common-macro",
@@ -2155,7 +2176,7 @@ dependencies = [
[[package]]
name = "common-meta"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"anymap2",
"api",
@@ -2212,11 +2233,23 @@ dependencies = [
[[package]]
name = "common-plugins"
version = "0.9.3"
version = "0.9.5"
[[package]]
name = "common-pprof"
version = "0.9.5"
dependencies = [
"common-error",
"common-macro",
"pprof",
"prost 0.12.6",
"snafu 0.8.5",
"tokio",
]
[[package]]
name = "common-procedure"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"async-stream",
"async-trait",
@@ -2243,7 +2276,7 @@ dependencies = [
[[package]]
name = "common-procedure-test"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"async-trait",
"common-procedure",
@@ -2251,7 +2284,7 @@ dependencies = [
[[package]]
name = "common-query"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"async-trait",
@@ -2277,7 +2310,7 @@ dependencies = [
[[package]]
name = "common-recordbatch"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"arc-swap",
"common-error",
@@ -2296,7 +2329,7 @@ dependencies = [
[[package]]
name = "common-runtime"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"async-trait",
"common-error",
@@ -2318,7 +2351,7 @@ dependencies = [
[[package]]
name = "common-telemetry"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"atty",
"backtrace",
@@ -2346,7 +2379,7 @@ dependencies = [
[[package]]
name = "common-test-util"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"client",
"common-query",
@@ -2358,7 +2391,7 @@ dependencies = [
[[package]]
name = "common-time"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"arrow",
"chrono",
@@ -2374,7 +2407,7 @@ dependencies = [
[[package]]
name = "common-version"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"build-data",
"const_format",
@@ -2385,7 +2418,7 @@ dependencies = [
[[package]]
name = "common-wal"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"common-base",
"common-error",
@@ -3194,7 +3227,7 @@ dependencies = [
[[package]]
name = "datanode"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"arrow-flight",
@@ -3244,7 +3277,7 @@ dependencies = [
"session",
"snafu 0.8.5",
"store-api",
"substrait 0.9.3",
"substrait 0.9.5",
"table",
"tokio",
"toml 0.8.19",
@@ -3253,7 +3286,7 @@ dependencies = [
[[package]]
name = "datatypes"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"arrow",
"arrow-array",
@@ -3859,7 +3892,7 @@ dependencies = [
[[package]]
name = "file-engine"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"async-trait",
@@ -3959,9 +3992,18 @@ version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28a80e3145d8ad11ba0995949bbcf48b9df2be62772b3d351ef017dff6ecb853"
[[package]]
name = "float_extras"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b22b70f8649ea2315955f1a36d964b0e4da482dfaa5f0d04df0d1fb7c338ab7a"
dependencies = [
"libc",
]
[[package]]
name = "flow"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"arrow",
@@ -4018,7 +4060,7 @@ dependencies = [
"snafu 0.8.5",
"store-api",
"strum 0.25.0",
"substrait 0.9.3",
"substrait 0.9.5",
"table",
"tokio",
"tonic 0.11.0",
@@ -4080,7 +4122,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
[[package]]
name = "frontend"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"arc-swap",
@@ -4389,7 +4431,7 @@ version = "0.7.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ff16065e5720f376fbced200a5ae0f47ace85fd70b7e54269790281353b6d61"
dependencies = [
"approx",
"approx 0.5.1",
"num-traits",
"serde",
]
@@ -4476,7 +4518,7 @@ dependencies = [
[[package]]
name = "greptime-proto"
version = "0.1.0"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=0b4f7c8ab06399f6b90e1626e8d5b9697cb33bb9#0b4f7c8ab06399f6b90e1626e8d5b9697cb33bb9"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=b4d301184eb0d01fd4d1042fcc7c5dfb54f3c1e3#b4d301184eb0d01fd4d1042fcc7c5dfb54f3c1e3"
dependencies = [
"prost 0.12.6",
"serde",
@@ -5128,7 +5170,7 @@ dependencies = [
[[package]]
name = "index"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"async-trait",
"asynchronous-codec",
@@ -5959,7 +6001,7 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
[[package]]
name = "log-store"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"async-stream",
"async-trait",
@@ -6279,7 +6321,7 @@ dependencies = [
[[package]]
name = "meta-client"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"async-trait",
@@ -6305,7 +6347,7 @@ dependencies = [
[[package]]
name = "meta-srv"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"async-trait",
@@ -6383,7 +6425,7 @@ dependencies = [
[[package]]
name = "metric-engine"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"aquamarine",
@@ -6486,7 +6528,7 @@ dependencies = [
[[package]]
name = "mito2"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"aquamarine",
@@ -6871,7 +6913,7 @@ version = "0.29.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d506eb7e08d6329505faa8a3a00a5dcc6de9f76e0c77e4b75763ae3c770831ff"
dependencies = [
"approx",
"approx 0.5.1",
"matrixmultiply",
"nalgebra-macros",
"num-complex",
@@ -7222,7 +7264,7 @@ dependencies = [
[[package]]
name = "object-store"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"anyhow",
"bytes",
@@ -7507,12 +7549,13 @@ dependencies = [
"ordered-float 4.3.0",
"percent-encoding",
"rand",
"serde_json",
"thiserror",
]
[[package]]
name = "operator"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"async-trait",
@@ -7557,7 +7600,7 @@ dependencies = [
"sql",
"sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
"store-api",
"substrait 0.9.3",
"substrait 0.9.5",
"table",
"tokio",
"tokio-util",
@@ -7807,7 +7850,7 @@ dependencies = [
[[package]]
name = "partition"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"async-trait",
@@ -8108,7 +8151,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "pipeline"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"ahash 0.8.11",
"api",
@@ -8270,7 +8313,7 @@ dependencies = [
[[package]]
name = "plugins"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"auth",
"common-base",
@@ -8544,7 +8587,7 @@ dependencies = [
[[package]]
name = "promql"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"ahash 0.8.11",
"async-trait",
@@ -8570,11 +8613,12 @@ dependencies = [
[[package]]
name = "promql-parser"
version = "0.4.0"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "007a331efb31f6ddb644590ef22359c9469784931162aad92599e34bcfa66583"
checksum = "0c1ad4a4cfa84ec4aa5831c82e57af0a3faf3f0af83bee13fa1390b2d0a32dc9"
dependencies = [
"cfgrammar",
"chrono",
"lazy_static",
"lrlex",
"lrpar",
@@ -8779,7 +8823,7 @@ dependencies = [
[[package]]
name = "puffin"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"async-compression 0.4.13",
"async-trait",
@@ -8901,7 +8945,7 @@ dependencies = [
[[package]]
name = "query"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"ahash 0.8.11",
"api",
@@ -8936,6 +8980,7 @@ dependencies = [
"datafusion-physical-expr",
"datafusion-sql",
"datatypes",
"fastrand",
"format_num",
"futures",
"futures-util",
@@ -8950,12 +8995,15 @@ dependencies = [
"object-store",
"once_cell",
"paste",
"pretty_assertions",
"prometheus",
"promql",
"promql-parser",
"prost 0.12.6",
"rand",
"regex",
"serde",
"serde_json",
"session",
"snafu 0.8.5",
"sql",
@@ -8964,7 +9012,7 @@ dependencies = [
"stats-cli",
"store-api",
"streaming-stats",
"substrait 0.9.3",
"substrait 0.9.5",
"table",
"tokio",
"tokio-stream",
@@ -10262,6 +10310,20 @@ version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
[[package]]
name = "s2"
version = "0.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cc7fbc04bb52c40b5f48c9bb2d2961375301916e0c25d9f373750654d588cd5c"
dependencies = [
"bigdecimal 0.3.1",
"cgmath",
"float_extras",
"lazy_static",
"libm",
"serde",
]
[[package]]
name = "safe-proc-macro2"
version = "1.0.67"
@@ -10384,7 +10446,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "script"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"arc-swap",
@@ -10678,8 +10740,9 @@ dependencies = [
[[package]]
name = "servers"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"ahash 0.8.11",
"aide",
"api",
"arrow",
@@ -10705,6 +10768,7 @@ dependencies = [
"common-mem-prof",
"common-meta",
"common-plugins",
"common-pprof",
"common-query",
"common-recordbatch",
"common-runtime",
@@ -10787,7 +10851,7 @@ dependencies = [
[[package]]
name = "session"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"arc-swap",
@@ -10899,7 +10963,7 @@ version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0b7840f121a46d63066ee7a99fc81dcabbc6105e437cae43528cea199b5a05f"
dependencies = [
"approx",
"approx 0.5.1",
"num-complex",
"num-traits",
"paste",
@@ -11108,7 +11172,7 @@ dependencies = [
[[package]]
name = "sql"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"chrono",
@@ -11169,7 +11233,7 @@ dependencies = [
[[package]]
name = "sqlness-runner"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"async-trait",
"clap 4.5.19",
@@ -11370,7 +11434,7 @@ version = "0.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b35a062dbadac17a42e0fc64c27f419b25d6fae98572eb43c8814c9e873d7721"
dependencies = [
"approx",
"approx 0.5.1",
"lazy_static",
"nalgebra",
"num-traits",
@@ -11389,7 +11453,7 @@ dependencies = [
[[package]]
name = "store-api"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"aquamarine",
@@ -11558,7 +11622,7 @@ dependencies = [
[[package]]
name = "substrait"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"async-trait",
"bytes",
@@ -11757,7 +11821,7 @@ dependencies = [
[[package]]
name = "table"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"async-trait",
@@ -12023,7 +12087,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
[[package]]
name = "tests-fuzz"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"arbitrary",
"async-trait",
@@ -12065,7 +12129,7 @@ dependencies = [
[[package]]
name = "tests-integration"
version = "0.9.3"
version = "0.9.5"
dependencies = [
"api",
"arrow-flight",
@@ -12127,7 +12191,7 @@ dependencies = [
"sql",
"sqlx",
"store-api",
"substrait 0.9.3",
"substrait 0.9.5",
"table",
"tempfile",
"time",

View File

@@ -20,6 +20,7 @@ members = [
"src/common/mem-prof",
"src/common/meta",
"src/common/plugins",
"src/common/pprof",
"src/common/procedure",
"src/common/procedure-test",
"src/common/query",
@@ -64,7 +65,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.9.3"
version = "0.9.5"
edition = "2021"
license = "Apache-2.0"
@@ -120,7 +121,7 @@ etcd-client = { version = "0.13" }
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0b4f7c8ab06399f6b90e1626e8d5b9697cb33bb9" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "b4d301184eb0d01fd4d1042fcc7c5dfb54f3c1e3" }
humantime = "2.1"
humantime-serde = "1.1"
itertools = "0.10"
@@ -137,12 +138,13 @@ opentelemetry-proto = { version = "0.5", features = [
"metrics",
"trace",
"with-serde",
"logs",
] }
parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
paste = "1.0"
pin-project = "1.0"
prometheus = { version = "0.13.3", features = ["process"] }
promql-parser = { version = "0.4" }
promql-parser = { version = "0.4.1" }
prost = "0.12"
raft-engine = { version = "0.4.1", default-features = false }
rand = "0.8"
@@ -208,6 +210,7 @@ common-macro = { path = "src/common/macro" }
common-mem-prof = { path = "src/common/mem-prof" }
common-meta = { path = "src/common/meta" }
common-plugins = { path = "src/common/plugins" }
common-pprof = { path = "src/common/pprof" }
common-procedure = { path = "src/common/procedure" }
common-procedure-test = { path = "src/common/procedure-test" }
common-query = { path = "src/common/query" }

View File

@@ -83,6 +83,7 @@
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system <br/>can still successfully replay memtable data without throwing an <br/>out-of-range error. <br/>However, enabling this option might lead to unexpected data loss, <br/>as the system will skip over missing entries instead of treating <br/>them as critical errors. |
| `metadata_store` | -- | -- | Metadata storage options. |
| `metadata_store.file_size` | String | `256MB` | Kv file size in bytes. |
| `metadata_store.purge_threshold` | String | `4GB` | Kv purge threshold. |
@@ -409,6 +410,7 @@
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
| `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.<br/>**It's only used when the provider is `kafka`**. |
| `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.<br/>**It's only used when the provider is `kafka`**. |
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system <br/>can still successfully replay memtable data without throwing an <br/>out-of-range error. <br/>However, enabling this option might lead to unexpected data loss, <br/>as the system will skip over missing entries instead of treating <br/>them as critical errors. |
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |

View File

@@ -213,6 +213,17 @@ create_index = true
## **It's only used when the provider is `kafka`**.
dump_index_interval = "60s"
## Ignore missing entries during read WAL.
## **It's only used when the provider is `kafka`**.
##
## This option ensures that when Kafka messages are deleted, the system
## can still successfully replay memtable data without throwing an
## out-of-range error.
## However, enabling this option might lead to unexpected data loss,
## as the system will skip over missing entries instead of treating
## them as critical errors.
overwrite_entry_start_id = false
# The Kafka SASL configuration.
# **It's only used when the provider is `kafka`**.
# Available SASL mechanisms:

View File

@@ -237,6 +237,17 @@ backoff_base = 2
## **It's only used when the provider is `kafka`**.
backoff_deadline = "5mins"
## Ignore missing entries during read WAL.
## **It's only used when the provider is `kafka`**.
##
## This option ensures that when Kafka messages are deleted, the system
## can still successfully replay memtable data without throwing an
## out-of-range error.
## However, enabling this option might lead to unexpected data loss,
## as the system will skip over missing entries instead of treating
## them as critical errors.
overwrite_entry_start_id = false
# The Kafka SASL configuration.
# **It's only used when the provider is `kafka`**.
# Available SASL mechanisms:

View File

@@ -48,4 +48,4 @@ Please refer to [SQL query](./query.sql) for GreptimeDB and Clickhouse, and [que
## Addition
- You can tune GreptimeDB's configuration to get better performance.
- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/operations/configuration/#storage-options).
- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/deployments/configuration#storage-options).

View File

@@ -1,11 +1,5 @@
# Profiling CPU
## Build GreptimeDB with `pprof` feature
```bash
cargo build --features=pprof
```
## HTTP API
Sample at 99 Hertz, for 5 seconds, output report in [protobuf format](https://github.com/google/pprof/blob/master/proto/profile.proto).
```bash

View File

@@ -18,12 +18,6 @@ sudo apt install libjemalloc-dev
curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph.pl > ./flamegraph.pl
```
### Build GreptimeDB with `mem-prof` feature.
```bash
cargo build --features=mem-prof
```
## Profiling
Start GreptimeDB instance with environment variables:

View File

@@ -17,10 +17,11 @@ use std::sync::Arc;
use common_base::BitVec;
use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION};
use common_decimal::Decimal128;
use common_time::interval::IntervalUnit;
use common_time::time::Time;
use common_time::timestamp::TimeUnit;
use common_time::{Date, DateTime, Interval, Timestamp};
use common_time::{
Date, DateTime, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp,
};
use datatypes::prelude::{ConcreteDataType, ValueRef};
use datatypes::scalars::ScalarVector;
use datatypes::types::{
@@ -456,13 +457,11 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
TimeUnit::Microsecond => values.time_microsecond_values.push(val.value()),
TimeUnit::Nanosecond => values.time_nanosecond_values.push(val.value()),
},
Value::Interval(val) => match val.unit() {
IntervalUnit::YearMonth => values.interval_year_month_values.push(val.to_i32()),
IntervalUnit::DayTime => values.interval_day_time_values.push(val.to_i64()),
IntervalUnit::MonthDayNano => values
.interval_month_day_nano_values
.push(convert_i128_to_interval(val.to_i128())),
},
Value::IntervalYearMonth(val) => values.interval_year_month_values.push(val.to_i32()),
Value::IntervalDayTime(val) => values.interval_day_time_values.push(val.to_i64()),
Value::IntervalMonthDayNano(val) => values
.interval_month_day_nano_values
.push(convert_month_day_nano_to_pb(val)),
Value::Decimal128(val) => values.decimal128_values.push(convert_to_pb_decimal128(val)),
Value::List(_) | Value::Duration(_) => unreachable!(),
});
@@ -507,14 +506,12 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
}
}
/// Converts an i128 value to google protobuf type [IntervalMonthDayNano].
pub fn convert_i128_to_interval(v: i128) -> v1::IntervalMonthDayNano {
let interval = Interval::from_i128(v);
let (months, days, nanoseconds) = interval.to_month_day_nano();
/// Converts an interval to google protobuf type [IntervalMonthDayNano].
pub fn convert_month_day_nano_to_pb(v: IntervalMonthDayNano) -> v1::IntervalMonthDayNano {
v1::IntervalMonthDayNano {
months,
days,
nanoseconds,
months: v.months,
days: v.days,
nanoseconds: v.nanoseconds,
}
}
@@ -562,11 +559,15 @@ pub fn pb_value_to_value_ref<'a>(
ValueData::TimeMillisecondValue(t) => ValueRef::Time(Time::new_millisecond(*t)),
ValueData::TimeMicrosecondValue(t) => ValueRef::Time(Time::new_microsecond(*t)),
ValueData::TimeNanosecondValue(t) => ValueRef::Time(Time::new_nanosecond(*t)),
ValueData::IntervalYearMonthValue(v) => ValueRef::Interval(Interval::from_i32(*v)),
ValueData::IntervalDayTimeValue(v) => ValueRef::Interval(Interval::from_i64(*v)),
ValueData::IntervalYearMonthValue(v) => {
ValueRef::IntervalYearMonth(IntervalYearMonth::from_i32(*v))
}
ValueData::IntervalDayTimeValue(v) => {
ValueRef::IntervalDayTime(IntervalDayTime::from_i64(*v))
}
ValueData::IntervalMonthDayNanoValue(v) => {
let interval = Interval::from_month_day_nano(v.months, v.days, v.nanoseconds);
ValueRef::Interval(interval)
let interval = IntervalMonthDayNano::new(v.months, v.days, v.nanoseconds);
ValueRef::IntervalMonthDayNano(interval)
}
ValueData::Decimal128Value(v) => {
// get precision and scale from datatype_extension
@@ -657,7 +658,7 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
IntervalType::MonthDayNano(_) => {
Arc::new(IntervalMonthDayNanoVector::from_iter_values(
values.interval_month_day_nano_values.iter().map(|x| {
Interval::from_month_day_nano(x.months, x.days, x.nanoseconds).to_i128()
IntervalMonthDayNano::new(x.months, x.days, x.nanoseconds).to_i128()
}),
))
}
@@ -802,18 +803,18 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
ConcreteDataType::Interval(IntervalType::YearMonth(_)) => values
.interval_year_month_values
.into_iter()
.map(|v| Value::Interval(Interval::from_i32(v)))
.map(|v| Value::IntervalYearMonth(IntervalYearMonth::from_i32(v)))
.collect(),
ConcreteDataType::Interval(IntervalType::DayTime(_)) => values
.interval_day_time_values
.into_iter()
.map(|v| Value::Interval(Interval::from_i64(v)))
.map(|v| Value::IntervalDayTime(IntervalDayTime::from_i64(v)))
.collect(),
ConcreteDataType::Interval(IntervalType::MonthDayNano(_)) => values
.interval_month_day_nano_values
.into_iter()
.map(|v| {
Value::Interval(Interval::from_month_day_nano(
Value::IntervalMonthDayNano(IntervalMonthDayNano::new(
v.months,
v.days,
v.nanoseconds,
@@ -941,18 +942,16 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
value_data: Some(ValueData::TimeNanosecondValue(v.value())),
},
},
Value::Interval(v) => match v.unit() {
IntervalUnit::YearMonth => v1::Value {
value_data: Some(ValueData::IntervalYearMonthValue(v.to_i32())),
},
IntervalUnit::DayTime => v1::Value {
value_data: Some(ValueData::IntervalDayTimeValue(v.to_i64())),
},
IntervalUnit::MonthDayNano => v1::Value {
value_data: Some(ValueData::IntervalMonthDayNanoValue(
convert_i128_to_interval(v.to_i128()),
)),
},
Value::IntervalYearMonth(v) => v1::Value {
value_data: Some(ValueData::IntervalYearMonthValue(v.to_i32())),
},
Value::IntervalDayTime(v) => v1::Value {
value_data: Some(ValueData::IntervalDayTimeValue(v.to_i64())),
},
Value::IntervalMonthDayNano(v) => v1::Value {
value_data: Some(ValueData::IntervalMonthDayNanoValue(
convert_month_day_nano_to_pb(v),
)),
},
Value::Decimal128(v) => v1::Value {
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
@@ -1044,13 +1043,11 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
TimeUnit::Microsecond => ValueData::TimeMicrosecondValue(v.value()),
TimeUnit::Nanosecond => ValueData::TimeNanosecondValue(v.value()),
}),
Value::Interval(v) => Some(match v.unit() {
IntervalUnit::YearMonth => ValueData::IntervalYearMonthValue(v.to_i32()),
IntervalUnit::DayTime => ValueData::IntervalDayTimeValue(v.to_i64()),
IntervalUnit::MonthDayNano => {
ValueData::IntervalMonthDayNanoValue(convert_i128_to_interval(v.to_i128()))
}
}),
Value::IntervalYearMonth(v) => Some(ValueData::IntervalYearMonthValue(v.to_i32())),
Value::IntervalDayTime(v) => Some(ValueData::IntervalDayTimeValue(v.to_i64())),
Value::IntervalMonthDayNano(v) => Some(ValueData::IntervalMonthDayNanoValue(
convert_month_day_nano_to_pb(v),
)),
Value::Decimal128(v) => Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
Value::List(_) | Value::Duration(_) => unreachable!(),
},
@@ -1061,6 +1058,7 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
mod tests {
use std::sync::Arc;
use common_time::interval::IntervalUnit;
use datatypes::types::{
Int32Type, IntervalDayTimeType, IntervalMonthDayNanoType, IntervalYearMonthType,
TimeMillisecondType, TimeSecondType, TimestampMillisecondType, TimestampSecondType,
@@ -1506,11 +1504,11 @@ mod tests {
#[test]
fn test_convert_i128_to_interval() {
let i128_val = 3000;
let interval = convert_i128_to_interval(i128_val);
let i128_val = 3;
let interval = convert_month_day_nano_to_pb(IntervalMonthDayNano::from_i128(i128_val));
assert_eq!(interval.months, 0);
assert_eq!(interval.days, 0);
assert_eq!(interval.nanoseconds, 3000);
assert_eq!(interval.nanoseconds, 3);
}
#[test]
@@ -1590,9 +1588,9 @@ mod tests {
},
);
let expect = vec![
Value::Interval(Interval::from_year_month(1_i32)),
Value::Interval(Interval::from_year_month(2_i32)),
Value::Interval(Interval::from_year_month(3_i32)),
Value::IntervalYearMonth(IntervalYearMonth::new(1_i32)),
Value::IntervalYearMonth(IntervalYearMonth::new(2_i32)),
Value::IntervalYearMonth(IntervalYearMonth::new(3_i32)),
];
assert_eq!(expect, actual);
@@ -1605,9 +1603,9 @@ mod tests {
},
);
let expect = vec![
Value::Interval(Interval::from_i64(1_i64)),
Value::Interval(Interval::from_i64(2_i64)),
Value::Interval(Interval::from_i64(3_i64)),
Value::IntervalDayTime(IntervalDayTime::from_i64(1_i64)),
Value::IntervalDayTime(IntervalDayTime::from_i64(2_i64)),
Value::IntervalDayTime(IntervalDayTime::from_i64(3_i64)),
];
assert_eq!(expect, actual);
@@ -1636,9 +1634,9 @@ mod tests {
},
);
let expect = vec![
Value::Interval(Interval::from_month_day_nano(1, 2, 3)),
Value::Interval(Interval::from_month_day_nano(5, 6, 7)),
Value::Interval(Interval::from_month_day_nano(9, 10, 11)),
Value::IntervalMonthDayNano(IntervalMonthDayNano::new(1, 2, 3)),
Value::IntervalMonthDayNano(IntervalMonthDayNano::new(5, 6, 7)),
Value::IntervalMonthDayNano(IntervalMonthDayNano::new(9, 10, 11)),
];
assert_eq!(expect, actual);
}

View File

@@ -10,7 +10,7 @@ name = "greptime"
path = "src/bin/greptime.rs"
[features]
default = ["python"]
default = ["python", "servers/pprof", "servers/mem-prof"]
tokio-console = ["common-telemetry/tokio-console"]
python = ["frontend/python"]

View File

@@ -48,6 +48,10 @@ impl Instance {
_guard: guard,
}
}
pub fn get_inner(&self) -> &MetasrvInstance {
&self.instance
}
}
#[async_trait]
@@ -86,6 +90,14 @@ impl Command {
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
self.subcmd.load_options(global_options)
}
pub fn config_file(&self) -> &Option<String> {
self.subcmd.config_file()
}
pub fn env_prefix(&self) -> &String {
self.subcmd.env_prefix()
}
}
#[derive(Parser)]
@@ -105,6 +117,18 @@ impl SubCommand {
SubCommand::Start(cmd) => cmd.load_options(global_options),
}
}
fn config_file(&self) -> &Option<String> {
match self {
SubCommand::Start(cmd) => &cmd.config_file,
}
}
fn env_prefix(&self) -> &String {
match self {
SubCommand::Start(cmd) => &cmd.env_prefix,
}
}
}
#[derive(Debug, Default, Parser)]

View File

@@ -653,7 +653,7 @@ impl StartCommand {
}
}
struct StandaloneInformationExtension {
pub struct StandaloneInformationExtension {
region_server: RegionServer,
procedure_manager: ProcedureManagerRef,
start_time_ms: u64,

View File

@@ -38,6 +38,18 @@ impl Plugins {
self.read().get::<T>().cloned()
}
pub fn get_or_insert<T, F>(&self, f: F) -> T
where
T: 'static + Send + Sync + Clone,
F: FnOnce() -> T,
{
let mut binding = self.write();
if !binding.contains::<T>() {
binding.insert(f());
}
binding.get::<T>().cloned().unwrap()
}
pub fn map_mut<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> R
where
F: FnOnce(Option<&mut T>) -> R,

View File

@@ -9,7 +9,7 @@ workspace = true
[features]
default = ["geo"]
geo = ["geohash", "h3o"]
geo = ["geohash", "h3o", "s2"]
[dependencies]
api.workspace = true
@@ -35,6 +35,7 @@ num = "0.4"
num-traits = "0.2"
once_cell.workspace = true
paste = "1.0"
s2 = { version = "0.0.12", optional = true }
serde.workspace = true
serde_json.workspace = true
session.workspace = true

View File

@@ -31,7 +31,6 @@ pub use polyval::PolyvalAccumulatorCreator;
pub use scipy_stats_norm_cdf::ScipyStatsNormCdfAccumulatorCreator;
pub use scipy_stats_norm_pdf::ScipyStatsNormPdfAccumulatorCreator;
use super::geo::encoding::JsonPathEncodeFunctionCreator;
use crate::function_registry::FunctionRegistry;
/// A function creates `AggregateFunctionCreator`.
@@ -93,6 +92,11 @@ impl AggregateFunctions {
register_aggr_func!("scipystatsnormcdf", 2, ScipyStatsNormCdfAccumulatorCreator);
register_aggr_func!("scipystatsnormpdf", 2, ScipyStatsNormPdfAccumulatorCreator);
register_aggr_func!("json_encode_path", 3, JsonPathEncodeFunctionCreator);
#[cfg(feature = "geo")]
register_aggr_func!(
"json_encode_path",
3,
super::geo::encoding::JsonPathEncodeFunctionCreator
);
}
}

View File

@@ -14,18 +14,19 @@
use std::fmt;
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
use common_query::error::{ArrowComputeSnafu, IntoVectorSnafu, InvalidFuncArgsSnafu, Result};
use common_query::prelude::Signature;
use datatypes::data_type::DataType;
use datatypes::arrow::compute::kernels::numeric;
use datatypes::prelude::ConcreteDataType;
use datatypes::value::ValueRef;
use datatypes::vectors::VectorRef;
use snafu::ensure;
use datatypes::vectors::{Helper, VectorRef};
use snafu::{ensure, ResultExt};
use crate::function::{Function, FunctionContext};
use crate::helper;
/// A function adds an interval value to Timestamp, Date or DateTime, and return the result.
/// A function adds an interval value to Timestamp, Date, and return the result.
/// The implementation of datetime type is based on Date64 which is incorrect so this function
/// doesn't support the datetime type.
#[derive(Clone, Debug, Default)]
pub struct DateAddFunction;
@@ -44,7 +45,6 @@ impl Function for DateAddFunction {
helper::one_of_sigs2(
vec![
ConcreteDataType::date_datatype(),
ConcreteDataType::datetime_datatype(),
ConcreteDataType::timestamp_second_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_microsecond_datatype(),
@@ -69,64 +69,14 @@ impl Function for DateAddFunction {
}
);
let left = &columns[0];
let right = &columns[1];
let left = columns[0].to_arrow_array();
let right = columns[1].to_arrow_array();
let size = left.len();
let left_datatype = columns[0].data_type();
match left_datatype {
ConcreteDataType::Timestamp(_) => {
let mut result = left_datatype.create_mutable_vector(size);
for i in 0..size {
let ts = left.get(i).as_timestamp();
let interval = right.get(i).as_interval();
let new_ts = match (ts, interval) {
(Some(ts), Some(interval)) => ts.add_interval(interval),
_ => ts,
};
result.push_value_ref(ValueRef::from(new_ts));
}
Ok(result.to_vector())
}
ConcreteDataType::Date(_) => {
let mut result = left_datatype.create_mutable_vector(size);
for i in 0..size {
let date = left.get(i).as_date();
let interval = right.get(i).as_interval();
let new_date = match (date, interval) {
(Some(date), Some(interval)) => date.add_interval(interval),
_ => date,
};
result.push_value_ref(ValueRef::from(new_date));
}
Ok(result.to_vector())
}
ConcreteDataType::DateTime(_) => {
let mut result = left_datatype.create_mutable_vector(size);
for i in 0..size {
let datetime = left.get(i).as_datetime();
let interval = right.get(i).as_interval();
let new_datetime = match (datetime, interval) {
(Some(datetime), Some(interval)) => datetime.add_interval(interval),
_ => datetime,
};
result.push_value_ref(ValueRef::from(new_datetime));
}
Ok(result.to_vector())
}
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
}
.fail(),
}
let result = numeric::add(&left, &right).context(ArrowComputeSnafu)?;
let arrow_type = result.data_type().clone();
Helper::try_into_vector(result).context(IntoVectorSnafu {
data_type: arrow_type,
})
}
}
@@ -144,8 +94,7 @@ mod tests {
use datatypes::prelude::ConcreteDataType;
use datatypes::value::Value;
use datatypes::vectors::{
DateTimeVector, DateVector, IntervalDayTimeVector, IntervalYearMonthVector,
TimestampSecondVector,
DateVector, IntervalDayTimeVector, IntervalYearMonthVector, TimestampSecondVector,
};
use super::{DateAddFunction, *};
@@ -168,16 +117,15 @@ mod tests {
ConcreteDataType::date_datatype(),
f.return_type(&[ConcreteDataType::date_datatype()]).unwrap()
);
assert_eq!(
ConcreteDataType::datetime_datatype(),
f.return_type(&[ConcreteDataType::datetime_datatype()])
.unwrap()
);
assert!(matches!(f.signature(),
assert!(
matches!(f.signature(),
Signature {
type_signature: TypeSignature::OneOf(sigs),
volatility: Volatility::Immutable
} if sigs.len() == 18));
} if sigs.len() == 15),
"{:?}",
f.signature()
);
}
#[test]
@@ -243,36 +191,4 @@ mod tests {
}
}
}
#[test]
fn test_datetime_date_add() {
let f = DateAddFunction;
let dates = vec![Some(123), None, Some(42), None];
// Intervals in months
let intervals = vec![1, 2, 3, 1];
let results = [Some(2678400123), None, Some(7776000042), None];
let date_vector = DateTimeVector::from(dates.clone());
let interval_vector = IntervalYearMonthVector::from_vec(intervals);
let args: Vec<VectorRef> = vec![Arc::new(date_vector), Arc::new(interval_vector)];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(4, vector.len());
for (i, _t) in dates.iter().enumerate() {
let v = vector.get(i);
let result = results.get(i).unwrap();
if result.is_none() {
assert_eq!(Value::Null, v);
continue;
}
match v {
Value::DateTime(date) => {
assert_eq!(date.val(), result.unwrap());
}
_ => unreachable!(),
}
}
}
}

View File

@@ -14,18 +14,19 @@
use std::fmt;
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
use common_query::error::{ArrowComputeSnafu, IntoVectorSnafu, InvalidFuncArgsSnafu, Result};
use common_query::prelude::Signature;
use datatypes::data_type::DataType;
use datatypes::arrow::compute::kernels::numeric;
use datatypes::prelude::ConcreteDataType;
use datatypes::value::ValueRef;
use datatypes::vectors::VectorRef;
use snafu::ensure;
use datatypes::vectors::{Helper, VectorRef};
use snafu::{ensure, ResultExt};
use crate::function::{Function, FunctionContext};
use crate::helper;
/// A function subtracts an interval value to Timestamp, Date or DateTime, and return the result.
/// A function subtracts an interval value to Timestamp, Date, and return the result.
/// The implementation of datetime type is based on Date64 which is incorrect so this function
/// doesn't support the datetime type.
#[derive(Clone, Debug, Default)]
pub struct DateSubFunction;
@@ -44,7 +45,6 @@ impl Function for DateSubFunction {
helper::one_of_sigs2(
vec![
ConcreteDataType::date_datatype(),
ConcreteDataType::datetime_datatype(),
ConcreteDataType::timestamp_second_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_microsecond_datatype(),
@@ -69,65 +69,14 @@ impl Function for DateSubFunction {
}
);
let left = &columns[0];
let right = &columns[1];
let left = columns[0].to_arrow_array();
let right = columns[1].to_arrow_array();
let size = left.len();
let left_datatype = columns[0].data_type();
match left_datatype {
ConcreteDataType::Timestamp(_) => {
let mut result = left_datatype.create_mutable_vector(size);
for i in 0..size {
let ts = left.get(i).as_timestamp();
let interval = right.get(i).as_interval();
let new_ts = match (ts, interval) {
(Some(ts), Some(interval)) => ts.sub_interval(interval),
_ => ts,
};
result.push_value_ref(ValueRef::from(new_ts));
}
Ok(result.to_vector())
}
ConcreteDataType::Date(_) => {
let mut result = left_datatype.create_mutable_vector(size);
for i in 0..size {
let date = left.get(i).as_date();
let interval = right.get(i).as_interval();
let new_date = match (date, interval) {
(Some(date), Some(interval)) => date.sub_interval(interval),
_ => date,
};
result.push_value_ref(ValueRef::from(new_date));
}
Ok(result.to_vector())
}
ConcreteDataType::DateTime(_) => {
let mut result = left_datatype.create_mutable_vector(size);
for i in 0..size {
let datetime = left.get(i).as_datetime();
let interval = right.get(i).as_interval();
let new_datetime = match (datetime, interval) {
(Some(datetime), Some(interval)) => datetime.sub_interval(interval),
_ => datetime,
};
result.push_value_ref(ValueRef::from(new_datetime));
}
Ok(result.to_vector())
}
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
}
.fail(),
}
let result = numeric::sub(&left, &right).context(ArrowComputeSnafu)?;
let arrow_type = result.data_type().clone();
Helper::try_into_vector(result).context(IntoVectorSnafu {
data_type: arrow_type,
})
}
}
@@ -145,8 +94,7 @@ mod tests {
use datatypes::prelude::ConcreteDataType;
use datatypes::value::Value;
use datatypes::vectors::{
DateTimeVector, DateVector, IntervalDayTimeVector, IntervalYearMonthVector,
TimestampSecondVector,
DateVector, IntervalDayTimeVector, IntervalYearMonthVector, TimestampSecondVector,
};
use super::{DateSubFunction, *};
@@ -174,11 +122,15 @@ mod tests {
f.return_type(&[ConcreteDataType::datetime_datatype()])
.unwrap()
);
assert!(matches!(f.signature(),
assert!(
matches!(f.signature(),
Signature {
type_signature: TypeSignature::OneOf(sigs),
volatility: Volatility::Immutable
} if sigs.len() == 18));
} if sigs.len() == 15),
"{:?}",
f.signature()
);
}
#[test]
@@ -250,42 +202,4 @@ mod tests {
}
}
}
#[test]
fn test_datetime_date_sub() {
let f = DateSubFunction;
let millis_per_month = 3600 * 24 * 30 * 1000;
let dates = vec![
Some(123 * millis_per_month),
None,
Some(42 * millis_per_month),
None,
];
// Intervals in months
let intervals = vec![1, 2, 3, 1];
let results = [Some(316137600000), None, Some(100915200000), None];
let date_vector = DateTimeVector::from(dates.clone());
let interval_vector = IntervalYearMonthVector::from_vec(intervals);
let args: Vec<VectorRef> = vec![Arc::new(date_vector), Arc::new(interval_vector)];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(4, vector.len());
for (i, _t) in dates.iter().enumerate() {
let v = vector.get(i);
let result = results.get(i).unwrap();
if result.is_none() {
assert_eq!(Value::Null, v);
continue;
}
match v {
Value::DateTime(date) => {
assert_eq!(date.val(), result.unwrap());
}
_ => unreachable!(),
}
}
}
}

View File

@@ -17,8 +17,7 @@ pub(crate) mod encoding;
mod geohash;
mod h3;
mod helpers;
use geohash::{GeohashFunction, GeohashNeighboursFunction};
mod s2;
use crate::function_registry::FunctionRegistry;
@@ -27,8 +26,8 @@ pub(crate) struct GeoFunctions;
impl GeoFunctions {
pub fn register(registry: &FunctionRegistry) {
// geohash
registry.register(Arc::new(GeohashFunction));
registry.register(Arc::new(GeohashNeighboursFunction));
registry.register(Arc::new(geohash::GeohashFunction));
registry.register(Arc::new(geohash::GeohashNeighboursFunction));
// h3 index
registry.register(Arc::new(h3::H3LatLngToCell));
@@ -55,5 +54,11 @@ impl GeoFunctions {
registry.register(Arc::new(h3::H3GridDiskDistances));
registry.register(Arc::new(h3::H3GridDistance));
registry.register(Arc::new(h3::H3GridPathCells));
// s2
registry.register(Arc::new(s2::S2LatLngToCell));
registry.register(Arc::new(s2::S2CellLevel));
registry.register(Arc::new(s2::S2CellToToken));
registry.register(Arc::new(s2::S2CellParent));
}
}

View File

@@ -17,7 +17,7 @@ use std::sync::Arc;
use common_error::ext::{BoxedError, PlainError};
use common_error::status_code::StatusCode;
use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{self, InvalidFuncArgsSnafu, InvalidInputStateSnafu, Result};
use common_query::error::{self, InvalidInputStateSnafu, Result};
use common_query::logical_plan::accumulator::AggrFuncTypeStore;
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::AccumulatorCreatorFunction;

View File

@@ -16,7 +16,7 @@ use std::str::FromStr;
use common_error::ext::{BoxedError, PlainError};
use common_error::status_code::StatusCode;
use common_query::error::{self, InvalidFuncArgsSnafu, Result};
use common_query::error::{self, Result};
use common_query::prelude::{Signature, TypeSignature};
use datafusion::logical_expr::Volatility;
use datatypes::prelude::ConcreteDataType;
@@ -29,9 +29,9 @@ use datatypes::vectors::{
use derive_more::Display;
use h3o::{CellIndex, LatLng, Resolution};
use once_cell::sync::Lazy;
use snafu::{ensure, ResultExt};
use snafu::ResultExt;
use super::helpers::{ensure_columns_len, ensure_columns_n};
use super::helpers::{ensure_and_coerce, ensure_columns_len, ensure_columns_n};
use crate::function::{Function, FunctionContext};
static CELL_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
@@ -382,15 +382,7 @@ impl Function for H3CellResolution {
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 1,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect 1, provided : {}",
columns.len()
),
}
);
ensure_columns_n!(columns, 1);
let cell_vec = &columns[0];
let size = cell_vec.len();
@@ -982,18 +974,6 @@ fn value_to_resolution(v: Value) -> Result<Resolution> {
.context(error::ExecuteSnafu)
}
macro_rules! ensure_and_coerce {
($compare:expr, $coerce:expr) => {{
ensure!(
$compare,
InvalidFuncArgsSnafu {
err_msg: "Argument was outside of acceptable range "
}
);
Ok($coerce)
}};
}
fn value_to_position(v: Value) -> Result<u64> {
match v {
Value::Int8(v) => ensure_and_coerce!(v >= 0, v as u64),

View File

@@ -14,15 +14,15 @@
macro_rules! ensure_columns_len {
($columns:ident) => {
ensure!(
snafu::ensure!(
$columns.windows(2).all(|c| c[0].len() == c[1].len()),
InvalidFuncArgsSnafu {
common_query::error::InvalidFuncArgsSnafu {
err_msg: "The length of input columns are in different size"
}
)
};
($column_a:ident, $column_b:ident, $($column_n:ident),*) => {
ensure!(
snafu::ensure!(
{
let mut result = $column_a.len() == $column_b.len();
$(
@@ -30,7 +30,7 @@ macro_rules! ensure_columns_len {
)*
result
}
InvalidFuncArgsSnafu {
common_query::error::InvalidFuncArgsSnafu {
err_msg: "The length of input columns are in different size"
}
)
@@ -41,9 +41,9 @@ pub(super) use ensure_columns_len;
macro_rules! ensure_columns_n {
($columns:ident, $n:literal) => {
ensure!(
snafu::ensure!(
$columns.len() == $n,
InvalidFuncArgsSnafu {
common_query::error::InvalidFuncArgsSnafu {
err_msg: format!(
"The length of arguments is not correct, expect {}, provided : {}",
stringify!($n),
@@ -59,3 +59,17 @@ macro_rules! ensure_columns_n {
}
pub(super) use ensure_columns_n;
macro_rules! ensure_and_coerce {
($compare:expr, $coerce:expr) => {{
snafu::ensure!(
$compare,
common_query::error::InvalidFuncArgsSnafu {
err_msg: "Argument was outside of acceptable range "
}
);
Ok($coerce)
}};
}
pub(super) use ensure_and_coerce;

View File

@@ -0,0 +1,275 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_query::error::{InvalidFuncArgsSnafu, Result};
use common_query::prelude::{Signature, TypeSignature};
use datafusion::logical_expr::Volatility;
use datatypes::prelude::ConcreteDataType;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::value::Value;
use datatypes::vectors::{MutableVector, StringVectorBuilder, UInt64VectorBuilder, VectorRef};
use derive_more::Display;
use once_cell::sync::Lazy;
use s2::cellid::{CellID, MAX_LEVEL};
use s2::latlng::LatLng;
use snafu::ensure;
use crate::function::{Function, FunctionContext};
use crate::scalars::geo::helpers::{ensure_and_coerce, ensure_columns_len, ensure_columns_n};
static CELL_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
vec![
ConcreteDataType::int64_datatype(),
ConcreteDataType::uint64_datatype(),
]
});
static COORDINATE_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
vec![
ConcreteDataType::float32_datatype(),
ConcreteDataType::float64_datatype(),
]
});
static LEVEL_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
vec![
ConcreteDataType::int8_datatype(),
ConcreteDataType::int16_datatype(),
ConcreteDataType::int32_datatype(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::uint8_datatype(),
ConcreteDataType::uint16_datatype(),
ConcreteDataType::uint32_datatype(),
ConcreteDataType::uint64_datatype(),
]
});
/// Function that returns [s2] encoding cellid for a given geospatial coordinate.
///
/// [s2]: http://s2geometry.io
#[derive(Clone, Debug, Default, Display)]
#[display("{}", self.name())]
pub struct S2LatLngToCell;
impl Function for S2LatLngToCell {
fn name(&self) -> &str {
"s2_latlng_to_cell"
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::uint64_datatype())
}
fn signature(&self) -> Signature {
let mut signatures = Vec::with_capacity(COORDINATE_TYPES.len());
for coord_type in COORDINATE_TYPES.as_slice() {
signatures.push(TypeSignature::Exact(vec![
// latitude
coord_type.clone(),
// longitude
coord_type.clone(),
]));
}
Signature::one_of(signatures, Volatility::Stable)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure_columns_n!(columns, 2);
let lat_vec = &columns[0];
let lon_vec = &columns[1];
let size = lat_vec.len();
let mut results = UInt64VectorBuilder::with_capacity(size);
for i in 0..size {
let lat = lat_vec.get(i).as_f64_lossy();
let lon = lon_vec.get(i).as_f64_lossy();
let result = match (lat, lon) {
(Some(lat), Some(lon)) => {
let coord = LatLng::from_degrees(lat, lon);
ensure!(
coord.is_valid(),
InvalidFuncArgsSnafu {
err_msg: "The input coordinates are invalid",
}
);
let cellid = CellID::from(coord);
let encoded: u64 = cellid.0;
Some(encoded)
}
_ => None,
};
results.push(result);
}
Ok(results.to_vector())
}
}
/// Return the level of current s2 cell
#[derive(Clone, Debug, Default, Display)]
#[display("{}", self.name())]
pub struct S2CellLevel;
impl Function for S2CellLevel {
fn name(&self) -> &str {
"s2_cell_level"
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::uint64_datatype())
}
fn signature(&self) -> Signature {
signature_of_cell()
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure_columns_n!(columns, 1);
let cell_vec = &columns[0];
let size = cell_vec.len();
let mut results = UInt64VectorBuilder::with_capacity(size);
for i in 0..size {
let cell = cell_from_value(cell_vec.get(i));
let res = cell.map(|cell| cell.level());
results.push(res);
}
Ok(results.to_vector())
}
}
/// Return the string presentation of the cell
#[derive(Clone, Debug, Default, Display)]
#[display("{}", self.name())]
pub struct S2CellToToken;
impl Function for S2CellToToken {
fn name(&self) -> &str {
"s2_cell_to_token"
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::string_datatype())
}
fn signature(&self) -> Signature {
signature_of_cell()
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure_columns_n!(columns, 1);
let cell_vec = &columns[0];
let size = cell_vec.len();
let mut results = StringVectorBuilder::with_capacity(size);
for i in 0..size {
let cell = cell_from_value(cell_vec.get(i));
let res = cell.map(|cell| cell.to_token());
results.push(res.as_deref());
}
Ok(results.to_vector())
}
}
/// Return parent at given level of current s2 cell
#[derive(Clone, Debug, Default, Display)]
#[display("{}", self.name())]
pub struct S2CellParent;
impl Function for S2CellParent {
fn name(&self) -> &str {
"s2_cell_parent"
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::uint64_datatype())
}
fn signature(&self) -> Signature {
signature_of_cell_and_level()
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure_columns_n!(columns, 2);
let cell_vec = &columns[0];
let level_vec = &columns[1];
let size = cell_vec.len();
let mut results = UInt64VectorBuilder::with_capacity(size);
for i in 0..size {
let cell = cell_from_value(cell_vec.get(i));
let level = value_to_level(level_vec.get(i))?;
let result = cell.map(|cell| cell.parent(level).0);
results.push(result);
}
Ok(results.to_vector())
}
}
fn signature_of_cell() -> Signature {
let mut signatures = Vec::with_capacity(CELL_TYPES.len());
for cell_type in CELL_TYPES.as_slice() {
signatures.push(TypeSignature::Exact(vec![cell_type.clone()]));
}
Signature::one_of(signatures, Volatility::Stable)
}
fn signature_of_cell_and_level() -> Signature {
let mut signatures = Vec::with_capacity(CELL_TYPES.len() * LEVEL_TYPES.len());
for cell_type in CELL_TYPES.as_slice() {
for level_type in LEVEL_TYPES.as_slice() {
signatures.push(TypeSignature::Exact(vec![
cell_type.clone(),
level_type.clone(),
]));
}
}
Signature::one_of(signatures, Volatility::Stable)
}
fn cell_from_value(v: Value) -> Option<CellID> {
match v {
Value::Int64(v) => Some(CellID(v as u64)),
Value::UInt64(v) => Some(CellID(v)),
_ => None,
}
}
fn value_to_level(v: Value) -> Result<u64> {
match v {
Value::Int8(v) => ensure_and_coerce!(v >= 0 && v <= MAX_LEVEL as i8, v as u64),
Value::Int16(v) => ensure_and_coerce!(v >= 0 && v <= MAX_LEVEL as i16, v as u64),
Value::Int32(v) => ensure_and_coerce!(v >= 0 && v <= MAX_LEVEL as i32, v as u64),
Value::Int64(v) => ensure_and_coerce!(v >= 0 && v <= MAX_LEVEL as i64, v as u64),
Value::UInt8(v) => ensure_and_coerce!(v <= MAX_LEVEL as u8, v as u64),
Value::UInt16(v) => ensure_and_coerce!(v <= MAX_LEVEL as u16, v as u64),
Value::UInt32(v) => ensure_and_coerce!(v <= MAX_LEVEL as u32, v as u64),
Value::UInt64(v) => ensure_and_coerce!(v <= MAX_LEVEL, v),
_ => unreachable!(),
}
}

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use api::helper::{convert_i128_to_interval, convert_to_pb_decimal128};
use api::helper::{convert_month_day_nano_to_pb, convert_to_pb_decimal128};
use api::v1::column::Values;
use common_base::BitVec;
use datatypes::types::{IntervalType, TimeType, TimestampType, WrapperType};
@@ -211,7 +211,7 @@ pub fn values(arrays: &[VectorRef]) -> Result<Values> {
ConcreteDataType::Interval(IntervalType::MonthDayNano(_)),
IntervalMonthDayNanoVector,
interval_month_day_nano_values,
|x| { convert_i128_to_interval(x.into_native()) }
|x| { convert_month_day_nano_to_pb(x) }
),
(
ConcreteDataType::Decimal128(_),

View File

@@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use std::sync::{Arc, Mutex};
use async_trait::async_trait;
use common_telemetry::error;
use common_telemetry::{error, info};
use crate::error::Result;
@@ -24,6 +24,8 @@ pub type LeadershipChangeNotifierCustomizerRef = Arc<dyn LeadershipChangeNotifie
/// A trait for customizing the leadership change notifier.
pub trait LeadershipChangeNotifierCustomizer: Send + Sync {
fn customize(&self, notifier: &mut LeadershipChangeNotifier);
fn add_listener(&self, listener: Arc<dyn LeadershipChangeListener>);
}
/// A trait for handling leadership change events in a distributed system.
@@ -45,6 +47,31 @@ pub struct LeadershipChangeNotifier {
listeners: Vec<Arc<dyn LeadershipChangeListener>>,
}
#[derive(Default)]
pub struct DefaultLeadershipChangeNotifierCustomizer {
listeners: Mutex<Vec<Arc<dyn LeadershipChangeListener>>>,
}
impl DefaultLeadershipChangeNotifierCustomizer {
pub fn new() -> Self {
Self {
listeners: Mutex::new(Vec::new()),
}
}
}
impl LeadershipChangeNotifierCustomizer for DefaultLeadershipChangeNotifierCustomizer {
fn customize(&self, notifier: &mut LeadershipChangeNotifier) {
info!("Customizing leadership change notifier");
let listeners = self.listeners.lock().unwrap().clone();
notifier.listeners.extend(listeners);
}
fn add_listener(&self, listener: Arc<dyn LeadershipChangeListener>) {
self.listeners.lock().unwrap().push(listener);
}
}
impl LeadershipChangeNotifier {
/// Adds a listener to the notifier.
pub fn add_listener(&mut self, listener: Arc<dyn LeadershipChangeListener>) {

View File

@@ -289,6 +289,7 @@ pub enum LeaderState {
///
/// - The [`Region`] may be unavailable (e.g., Crashed, Network disconnected).
/// - The [`Region`] was planned to migrate to another [`Peer`].
#[serde(alias = "Downgraded")]
Downgrading,
}
@@ -516,6 +517,73 @@ mod tests {
assert_eq!(decoded, region_route);
}
#[test]
fn test_region_route_compatibility() {
let region_route = RegionRoute {
region: Region {
id: 2.into(),
name: "r2".to_string(),
partition: None,
attrs: BTreeMap::new(),
},
leader_peer: Some(Peer::new(1, "a1")),
follower_peers: vec![Peer::new(2, "a2"), Peer::new(3, "a3")],
leader_state: Some(LeaderState::Downgrading),
leader_down_since: None,
};
let input = r#"{"region":{"id":2,"name":"r2","partition":null,"attrs":{}},"leader_peer":{"id":1,"addr":"a1"},"follower_peers":[{"id":2,"addr":"a2"},{"id":3,"addr":"a3"}],"leader_state":"Downgraded","leader_down_since":null}"#;
let decoded: RegionRoute = serde_json::from_str(input).unwrap();
assert_eq!(decoded, region_route);
let region_route = RegionRoute {
region: Region {
id: 2.into(),
name: "r2".to_string(),
partition: None,
attrs: BTreeMap::new(),
},
leader_peer: Some(Peer::new(1, "a1")),
follower_peers: vec![Peer::new(2, "a2"), Peer::new(3, "a3")],
leader_state: Some(LeaderState::Downgrading),
leader_down_since: None,
};
let input = r#"{"region":{"id":2,"name":"r2","partition":null,"attrs":{}},"leader_peer":{"id":1,"addr":"a1"},"follower_peers":[{"id":2,"addr":"a2"},{"id":3,"addr":"a3"}],"leader_status":"Downgraded","leader_down_since":null}"#;
let decoded: RegionRoute = serde_json::from_str(input).unwrap();
assert_eq!(decoded, region_route);
let region_route = RegionRoute {
region: Region {
id: 2.into(),
name: "r2".to_string(),
partition: None,
attrs: BTreeMap::new(),
},
leader_peer: Some(Peer::new(1, "a1")),
follower_peers: vec![Peer::new(2, "a2"), Peer::new(3, "a3")],
leader_state: Some(LeaderState::Downgrading),
leader_down_since: None,
};
let input = r#"{"region":{"id":2,"name":"r2","partition":null,"attrs":{}},"leader_peer":{"id":1,"addr":"a1"},"follower_peers":[{"id":2,"addr":"a2"},{"id":3,"addr":"a3"}],"leader_state":"Downgrading","leader_down_since":null}"#;
let decoded: RegionRoute = serde_json::from_str(input).unwrap();
assert_eq!(decoded, region_route);
let region_route = RegionRoute {
region: Region {
id: 2.into(),
name: "r2".to_string(),
partition: None,
attrs: BTreeMap::new(),
},
leader_peer: Some(Peer::new(1, "a1")),
follower_peers: vec![Peer::new(2, "a2"), Peer::new(3, "a3")],
leader_state: Some(LeaderState::Downgrading),
leader_down_since: None,
};
let input = r#"{"region":{"id":2,"name":"r2","partition":null,"attrs":{}},"leader_peer":{"id":1,"addr":"a1"},"follower_peers":[{"id":2,"addr":"a2"},{"id":3,"addr":"a3"}],"leader_status":"Downgrading","leader_down_since":null}"#;
let decoded: RegionRoute = serde_json::from_str(input).unwrap();
assert_eq!(decoded, region_route);
}
#[test]
fn test_de_serialize_partition() {
let p = Partition {

View File

@@ -0,0 +1,22 @@
[package]
name = "common-pprof"
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
common-error.workspace = true
common-macro.workspace = true
prost.workspace = true
snafu.workspace = true
tokio.workspace = true
[target.'cfg(unix)'.dependencies]
pprof = { version = "0.13", features = [
"flamegraph",
"prost-codec",
"protobuf",
] }
[lints]
workspace = true

View File

@@ -0,0 +1,99 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(unix)]
pub mod nix;
pub mod error {
use std::any::Any;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
#[derive(Snafu)]
#[stack_trace_debug]
#[snafu(visibility(pub(crate)))]
pub enum Error {
#[cfg(unix)]
#[snafu(display("Pprof error"))]
Pprof {
#[snafu(source)]
error: pprof::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Pprof is unsupported on this platform"))]
Unsupported {
#[snafu(implicit)]
location: Location,
},
}
pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
#[cfg(unix)]
Error::Pprof { .. } => StatusCode::Unexpected,
Error::Unsupported { .. } => StatusCode::Unsupported,
}
}
fn as_any(&self) -> &dyn Any {
self
}
}
}
#[cfg(not(unix))]
pub mod dummy {
use std::time::Duration;
use crate::error::{Result, UnsupportedSnafu};
/// Dummpy CPU profiler utility.
#[derive(Debug)]
pub struct Profiling {}
impl Profiling {
/// Creates a new profiler.
pub fn new(_duration: Duration, _frequency: i32) -> Profiling {
Profiling {}
}
/// Profiles and returns a generated text.
pub async fn dump_text(&self) -> Result<String> {
UnsupportedSnafu {}.fail()
}
/// Profiles and returns a generated flamegraph.
pub async fn dump_flamegraph(&self) -> Result<Vec<u8>> {
UnsupportedSnafu {}.fail()
}
/// Profiles and returns a generated proto.
pub async fn dump_proto(&self) -> Result<Vec<u8>> {
UnsupportedSnafu {}.fail()
}
}
}
#[cfg(not(unix))]
pub use dummy::Profiling;
#[cfg(unix)]
pub use nix::Profiling;

View File

@@ -0,0 +1,78 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::time::Duration;
use pprof::protos::Message;
use snafu::ResultExt;
use crate::error::{PprofSnafu, Result};
/// CPU profiler utility.
// Inspired by https://github.com/datafuselabs/databend/blob/67f445e83cd4eceda98f6c1c114858929d564029/src/common/base/src/base/profiling.rs
#[derive(Debug)]
pub struct Profiling {
/// Sample duration.
duration: Duration,
/// Sample frequency.
frequency: i32,
}
impl Profiling {
/// Creates a new profiler.
pub fn new(duration: Duration, frequency: i32) -> Profiling {
Profiling {
duration,
frequency,
}
}
/// Profiles and returns a generated pprof report.
pub async fn report(&self) -> Result<pprof::Report> {
let guard = pprof::ProfilerGuardBuilder::default()
.frequency(self.frequency)
.blocklist(&["libc", "libgcc", "pthread", "vdso"])
.build()
.context(PprofSnafu)?;
tokio::time::sleep(self.duration).await;
guard.report().build().context(PprofSnafu)
}
/// Profiles and returns a generated text.
pub async fn dump_text(&self) -> Result<String> {
let report = self.report().await?;
let text = format!("{report:?}");
Ok(text)
}
/// Profiles and returns a generated flamegraph.
pub async fn dump_flamegraph(&self) -> Result<Vec<u8>> {
let mut body: Vec<u8> = Vec::new();
let report = self.report().await?;
report.flamegraph(&mut body).context(PprofSnafu)?;
Ok(body)
}
/// Profiles and returns a generated proto.
pub async fn dump_proto(&self) -> Result<Vec<u8>> {
let report = self.report().await?;
// Generate googles pprof format report.
let profile = report.pprof().context(PprofSnafu)?;
let body = profile.encode_to_vec();
Ok(body)
}
}

View File

@@ -14,13 +14,13 @@
use std::fmt::{Display, Formatter, Write};
use chrono::{Datelike, Days, LocalResult, Months, NaiveDate, NaiveTime, TimeZone};
use chrono::{Datelike, Days, LocalResult, Months, NaiveDate, NaiveTime, TimeDelta, TimeZone};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use snafu::ResultExt;
use crate::error::{InvalidDateStrSnafu, ParseDateStrSnafu, Result};
use crate::interval::Interval;
use crate::interval::{IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth};
use crate::timezone::get_timezone;
use crate::util::datetime_to_utc;
use crate::Timezone;
@@ -134,29 +134,64 @@ impl Date {
(self.0 as i64) * 24 * 3600
}
/// Adds given Interval to the current date.
/// Returns None if the resulting date would be out of range.
pub fn add_interval(&self, interval: Interval) -> Option<Date> {
// FIXME(yingwen): remove add/sub intervals later
/// Adds given [IntervalYearMonth] to the current date.
pub fn add_year_month(&self, interval: IntervalYearMonth) -> Option<Date> {
let naive_date = self.to_chrono_date()?;
let (months, days, _) = interval.to_month_day_nano();
naive_date
.checked_add_months(Months::new(months as u32))?
.checked_add_days(Days::new(days as u64))
.checked_add_months(Months::new(interval.months as u32))
.map(Into::into)
}
/// Subtracts given Interval to the current date.
/// Returns None if the resulting date would be out of range.
pub fn sub_interval(&self, interval: Interval) -> Option<Date> {
/// Adds given [IntervalDayTime] to the current date.
pub fn add_day_time(&self, interval: IntervalDayTime) -> Option<Date> {
let naive_date = self.to_chrono_date()?;
let (months, days, _) = interval.to_month_day_nano();
naive_date
.checked_add_days(Days::new(interval.days as u64))?
.checked_add_signed(TimeDelta::milliseconds(interval.milliseconds as i64))
.map(Into::into)
}
/// Adds given [IntervalMonthDayNano] to the current date.
pub fn add_month_day_nano(&self, interval: IntervalMonthDayNano) -> Option<Date> {
let naive_date = self.to_chrono_date()?;
naive_date
.checked_sub_months(Months::new(months as u32))?
.checked_sub_days(Days::new(days as u64))
.checked_add_months(Months::new(interval.months as u32))?
.checked_add_days(Days::new(interval.days as u64))?
.checked_add_signed(TimeDelta::nanoseconds(interval.nanoseconds))
.map(Into::into)
}
/// Subtracts given [IntervalYearMonth] to the current date.
pub fn sub_year_month(&self, interval: IntervalYearMonth) -> Option<Date> {
let naive_date = self.to_chrono_date()?;
naive_date
.checked_sub_months(Months::new(interval.months as u32))
.map(Into::into)
}
/// Subtracts given [IntervalDayTime] to the current date.
pub fn sub_day_time(&self, interval: IntervalDayTime) -> Option<Date> {
let naive_date = self.to_chrono_date()?;
naive_date
.checked_sub_days(Days::new(interval.days as u64))?
.checked_sub_signed(TimeDelta::milliseconds(interval.milliseconds as i64))
.map(Into::into)
}
/// Subtracts given [IntervalMonthDayNano] to the current date.
pub fn sub_month_day_nano(&self, interval: IntervalMonthDayNano) -> Option<Date> {
let naive_date = self.to_chrono_date()?;
naive_date
.checked_sub_months(Months::new(interval.months as u32))?
.checked_sub_days(Days::new(interval.days as u64))?
.checked_sub_signed(TimeDelta::nanoseconds(interval.nanoseconds))
.map(Into::into)
}
@@ -246,12 +281,12 @@ mod tests {
fn test_add_sub_interval() {
let date = Date::new(1000);
let interval = Interval::from_year_month(3);
let interval = IntervalYearMonth::new(3);
let new_date = date.add_interval(interval).unwrap();
let new_date = date.add_year_month(interval).unwrap();
assert_eq!(new_date.val(), 1091);
assert_eq!(date, new_date.sub_interval(interval).unwrap());
assert_eq!(date, new_date.sub_year_month(interval).unwrap());
}
#[test]

View File

@@ -13,16 +13,18 @@
// limitations under the License.
use std::fmt::{Display, Formatter, Write};
use std::time::Duration;
use chrono::{Days, LocalResult, Months, NaiveDateTime, TimeZone as ChronoTimeZone, Utc};
use chrono::{
Days, LocalResult, Months, NaiveDateTime, TimeDelta, TimeZone as ChronoTimeZone, Utc,
};
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use crate::error::{InvalidDateStrSnafu, Result};
use crate::interval::{IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth};
use crate::timezone::{get_timezone, Timezone};
use crate::util::{datetime_to_utc, format_utc_datetime};
use crate::{Date, Interval};
use crate::Date;
const DATETIME_FORMAT: &str = "%F %H:%M:%S%.f";
const DATETIME_FORMAT_WITH_TZ: &str = "%F %H:%M:%S%.f%z";
@@ -160,32 +162,66 @@ impl DateTime {
None => Utc.from_utc_datetime(&v).naive_local(),
})
}
/// Adds given Interval to the current datetime.
/// Returns None if the resulting datetime would be out of range.
pub fn add_interval(&self, interval: Interval) -> Option<Self> {
// FIXME(yingwen): remove add/sub intervals later
/// Adds given [IntervalYearMonth] to the current datetime.
pub fn add_year_month(&self, interval: IntervalYearMonth) -> Option<Self> {
let naive_datetime = self.to_chrono_datetime()?;
let (months, days, nsecs) = interval.to_month_day_nano();
let naive_datetime = naive_datetime
.checked_add_months(Months::new(months as u32))?
.checked_add_days(Days::new(days as u64))?
+ Duration::from_nanos(nsecs as u64);
Some(naive_datetime.into())
naive_datetime
.checked_add_months(Months::new(interval.months as u32))
.map(Into::into)
}
/// Subtracts given Interval to the current datetime.
/// Returns None if the resulting datetime would be out of range.
pub fn sub_interval(&self, interval: Interval) -> Option<Self> {
/// Adds given [IntervalDayTime] to the current datetime.
pub fn add_day_time(&self, interval: IntervalDayTime) -> Option<Self> {
let naive_datetime = self.to_chrono_datetime()?;
let (months, days, nsecs) = interval.to_month_day_nano();
let naive_datetime = naive_datetime
.checked_sub_months(Months::new(months as u32))?
.checked_sub_days(Days::new(days as u64))?
- Duration::from_nanos(nsecs as u64);
naive_datetime
.checked_add_days(Days::new(interval.days as u64))?
.checked_add_signed(TimeDelta::milliseconds(interval.milliseconds as i64))
.map(Into::into)
}
Some(naive_datetime.into())
/// Adds given [IntervalMonthDayNano] to the current datetime.
pub fn add_month_day_nano(&self, interval: IntervalMonthDayNano) -> Option<Self> {
let naive_datetime = self.to_chrono_datetime()?;
naive_datetime
.checked_add_months(Months::new(interval.months as u32))?
.checked_add_days(Days::new(interval.days as u64))?
.checked_add_signed(TimeDelta::nanoseconds(interval.nanoseconds))
.map(Into::into)
}
/// Subtracts given [IntervalYearMonth] to the current datetime.
pub fn sub_year_month(&self, interval: IntervalYearMonth) -> Option<Self> {
let naive_datetime = self.to_chrono_datetime()?;
naive_datetime
.checked_sub_months(Months::new(interval.months as u32))
.map(Into::into)
}
/// Subtracts given [IntervalDayTime] to the current datetime.
pub fn sub_day_time(&self, interval: IntervalDayTime) -> Option<Self> {
let naive_datetime = self.to_chrono_datetime()?;
naive_datetime
.checked_sub_days(Days::new(interval.days as u64))?
.checked_sub_signed(TimeDelta::milliseconds(interval.milliseconds as i64))
.map(Into::into)
}
/// Subtracts given [IntervalMonthDayNano] to the current datetime.
pub fn sub_month_day_nano(&self, interval: IntervalMonthDayNano) -> Option<Self> {
let naive_datetime = self.to_chrono_datetime()?;
naive_datetime
.checked_sub_months(Months::new(interval.months as u32))?
.checked_sub_days(Days::new(interval.days as u64))?
.checked_sub_signed(TimeDelta::nanoseconds(interval.nanoseconds))
.map(Into::into)
}
/// Convert to [common_time::date].
@@ -231,12 +267,12 @@ mod tests {
fn test_add_sub_interval() {
let datetime = DateTime::new(1000);
let interval = Interval::from_day_time(1, 200);
let interval = IntervalDayTime::new(1, 200);
let new_datetime = datetime.add_interval(interval).unwrap();
let new_datetime = datetime.add_day_time(interval).unwrap();
assert_eq!(new_datetime.val(), 1000 + 3600 * 24 * 1000 + 200);
assert_eq!(datetime, new_datetime.sub_interval(interval).unwrap());
assert_eq!(datetime, new_datetime.sub_day_time(interval).unwrap());
}
#[test]

View File

@@ -12,18 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp::Ordering;
use std::default::Default;
use std::fmt::{self, Display, Formatter, Write};
use std::hash::{Hash, Hasher};
use std::hash::Hash;
use arrow::datatypes::IntervalUnit as ArrowIntervalUnit;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use snafu::ResultExt;
use crate::duration::Duration;
use crate::error::{Result, TimestampOverflowSnafu};
#[derive(
Debug, Default, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize,
@@ -61,268 +53,269 @@ impl From<ArrowIntervalUnit> for IntervalUnit {
}
}
/// Interval Type represents a period of time.
/// It is composed of months, days and nanoseconds.
/// 3 kinds of interval are supported: year-month, day-time and
/// month-day-nano, which will be stored in the following format.
/// Interval data format:
/// | months | days | nsecs |
/// | 4bytes | 4bytes | 8bytes |
#[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)]
pub struct Interval {
months: i32,
days: i32,
nsecs: i64,
unit: IntervalUnit,
// The `Value` type requires Serialize, Deserialize.
#[derive(
Debug, Default, Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, Serialize, Deserialize,
)]
#[repr(C)]
pub struct IntervalYearMonth {
/// Number of months
pub months: i32,
}
// Nanosecond convert to other time unit
pub const NANOS_PER_SEC: i64 = 1_000_000_000;
pub const NANOS_PER_MILLI: i64 = 1_000_000;
pub const NANOS_PER_MICRO: i64 = 1_000;
pub const NANOS_PER_HOUR: i64 = 60 * 60 * NANOS_PER_SEC;
pub const NANOS_PER_DAY: i64 = 24 * NANOS_PER_HOUR;
pub const NANOS_PER_MONTH: i64 = 30 * NANOS_PER_DAY;
pub const DAYS_PER_MONTH: i64 = 30;
impl Interval {
/// Creates a new interval from months, days and nanoseconds.
/// Precision is nanosecond.
pub fn from_month_day_nano(months: i32, days: i32, nsecs: i64) -> Self {
Interval {
months,
days,
nsecs,
unit: IntervalUnit::MonthDayNano,
}
}
/// Creates a new interval from months.
pub fn from_year_month(months: i32) -> Self {
Interval {
months,
days: 0,
nsecs: 0,
unit: IntervalUnit::YearMonth,
}
}
/// Creates a new interval from days and milliseconds.
pub fn from_day_time(days: i32, millis: i32) -> Self {
Interval {
months: 0,
days,
nsecs: (millis as i64) * NANOS_PER_MILLI,
unit: IntervalUnit::DayTime,
}
}
pub fn to_duration(&self) -> Result<Duration> {
Ok(Duration::new_nanosecond(
self.to_nanosecond()
.try_into()
.context(TimestampOverflowSnafu)?,
))
}
/// Return a tuple(months, days, nanoseconds) from the interval.
pub fn to_month_day_nano(&self) -> (i32, i32, i64) {
(self.months, self.days, self.nsecs)
}
/// Converts the interval to nanoseconds.
pub fn to_nanosecond(&self) -> i128 {
let days = (self.days as i64) + DAYS_PER_MONTH * (self.months as i64);
(self.nsecs as i128) + (NANOS_PER_DAY as i128) * (days as i128)
}
/// Smallest interval value.
pub const MIN: Self = Self {
months: i32::MIN,
days: i32::MIN,
nsecs: i64::MIN,
unit: IntervalUnit::MonthDayNano,
};
/// Largest interval value.
pub const MAX: Self = Self {
months: i32::MAX,
days: i32::MAX,
nsecs: i64::MAX,
unit: IntervalUnit::MonthDayNano,
};
/// Returns the justified interval.
/// allows you to adjust the interval of 30-day as one month and the interval of 24-hour as one day
pub fn justified_interval(&self) -> Self {
let mut result = *self;
let extra_months_d = self.days as i64 / DAYS_PER_MONTH;
let extra_months_nsecs = self.nsecs / NANOS_PER_MONTH;
result.days -= (extra_months_d * DAYS_PER_MONTH) as i32;
result.nsecs -= extra_months_nsecs * NANOS_PER_MONTH;
let extra_days = self.nsecs / NANOS_PER_DAY;
result.nsecs -= extra_days * NANOS_PER_DAY;
result.months += extra_months_d as i32 + extra_months_nsecs as i32;
result.days += extra_days as i32;
result
}
/// Convert Interval to nanoseconds,
/// to check whether Interval is positive
pub fn is_positive(&self) -> bool {
self.to_nanosecond() > 0
}
/// is_zero
pub fn is_zero(&self) -> bool {
self.months == 0 && self.days == 0 && self.nsecs == 0
}
/// get unit
pub fn unit(&self) -> IntervalUnit {
self.unit
}
/// Multiple Interval by an integer with overflow check.
/// Returns justified Interval, or `None` if overflow occurred.
pub fn checked_mul_int<I>(&self, rhs: I) -> Option<Self>
where
I: TryInto<i32>,
{
let rhs = rhs.try_into().ok()?;
let months = self.months.checked_mul(rhs)?;
let days = self.days.checked_mul(rhs)?;
let nsecs = self.nsecs.checked_mul(rhs as i64)?;
Some(
Self {
months,
days,
nsecs,
unit: self.unit,
}
.justified_interval(),
)
}
/// Convert Interval to ISO 8601 string
pub fn to_iso8601_string(self) -> String {
IntervalFormat::from(self).to_iso8601_string()
}
/// Convert Interval to postgres verbose string
pub fn to_postgres_string(self) -> String {
IntervalFormat::from(self).to_postgres_string()
}
/// Convert Interval to sql_standard string
pub fn to_sql_standard_string(self) -> String {
IntervalFormat::from(self).to_sql_standard_string()
}
/// Interval Type and i128 [IntervalUnit::MonthDayNano] Convert
/// v consists of months(i32) | days(i32) | nsecs(i64)
pub fn from_i128(v: i128) -> Self {
Interval {
nsecs: v as i64,
days: (v >> 64) as i32,
months: (v >> 96) as i32,
unit: IntervalUnit::MonthDayNano,
}
}
/// `Interval` Type and i64 [IntervalUnit::DayTime] Convert
/// v consists of days(i32) | milliseconds(i32)
pub fn from_i64(v: i64) -> Self {
Interval {
nsecs: ((v as i32) as i64) * NANOS_PER_MILLI,
days: (v >> 32) as i32,
months: 0,
unit: IntervalUnit::DayTime,
}
}
/// `Interval` Type and i32 [IntervalUnit::YearMonth] Convert
/// v consists of months(i32)
pub fn from_i32(v: i32) -> Self {
Interval {
nsecs: 0,
days: 0,
months: v,
unit: IntervalUnit::YearMonth,
}
}
pub fn to_i128(&self) -> i128 {
// 128 96 64 0
// +-------+-------+-------+-------+-------+-------+-------+-------+
// | months | days | nanoseconds |
// +-------+-------+-------+-------+-------+-------+-------+-------+
let months = (self.months as u128 & u32::MAX as u128) << 96;
let days = (self.days as u128 & u32::MAX as u128) << 64;
let nsecs = self.nsecs as u128 & u64::MAX as u128;
(months | days | nsecs) as i128
}
pub fn to_i64(&self) -> i64 {
// 64 32 0
// +-------+-------+-------+-------+-------+-------+-------+-------+
// | days | milliseconds |
// +-------+-------+-------+-------+-------+-------+-------+-------+
let days = (self.days as u64 & u32::MAX as u64) << 32;
let milliseconds = (self.nsecs / NANOS_PER_MILLI) as u64 & u32::MAX as u64;
(days | milliseconds) as i64
impl IntervalYearMonth {
pub fn new(months: i32) -> Self {
Self { months }
}
pub fn to_i32(&self) -> i32 {
self.months
}
pub fn from_i32(months: i32) -> Self {
Self { months }
}
pub fn negative(&self) -> Self {
Self {
months: -self.months,
days: -self.days,
nsecs: -self.nsecs,
unit: self.unit,
Self::new(-self.months)
}
pub fn to_iso8601_string(&self) -> String {
IntervalFormat::from(*self).to_iso8601_string()
}
}
impl From<IntervalYearMonth> for IntervalFormat {
fn from(interval: IntervalYearMonth) -> Self {
IntervalFormat {
years: interval.months / 12,
months: interval.months % 12,
..Default::default()
}
}
}
impl From<i128> for Interval {
impl From<i32> for IntervalYearMonth {
fn from(v: i32) -> Self {
Self::from_i32(v)
}
}
impl From<IntervalYearMonth> for i32 {
fn from(v: IntervalYearMonth) -> Self {
v.to_i32()
}
}
impl From<IntervalYearMonth> for serde_json::Value {
fn from(v: IntervalYearMonth) -> Self {
serde_json::Value::from(v.to_i32())
}
}
#[derive(
Debug, Default, Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, Serialize, Deserialize,
)]
#[repr(C)]
pub struct IntervalDayTime {
/// Number of days
pub days: i32,
/// Number of milliseconds
pub milliseconds: i32,
}
impl IntervalDayTime {
/// The additive identity i.e. `0`.
pub const ZERO: Self = Self::new(0, 0);
/// The multiplicative inverse, i.e. `-1`.
pub const MINUS_ONE: Self = Self::new(-1, -1);
/// The maximum value that can be represented
pub const MAX: Self = Self::new(i32::MAX, i32::MAX);
/// The minimum value that can be represented
pub const MIN: Self = Self::new(i32::MIN, i32::MIN);
pub const fn new(days: i32, milliseconds: i32) -> Self {
Self { days, milliseconds }
}
pub fn to_i64(&self) -> i64 {
let d = (self.days as u64 & u32::MAX as u64) << 32;
let m = self.milliseconds as u64 & u32::MAX as u64;
(d | m) as i64
}
pub fn from_i64(value: i64) -> Self {
let days = (value >> 32) as i32;
let milliseconds = value as i32;
Self { days, milliseconds }
}
pub fn negative(&self) -> Self {
Self::new(-self.days, -self.milliseconds)
}
pub fn to_iso8601_string(&self) -> String {
IntervalFormat::from(*self).to_iso8601_string()
}
pub fn as_millis(&self) -> i64 {
self.days as i64 * MS_PER_DAY + self.milliseconds as i64
}
}
impl From<i64> for IntervalDayTime {
fn from(v: i64) -> Self {
Self::from_i64(v)
}
}
impl From<IntervalDayTime> for i64 {
fn from(v: IntervalDayTime) -> Self {
v.to_i64()
}
}
impl From<IntervalDayTime> for serde_json::Value {
fn from(v: IntervalDayTime) -> Self {
serde_json::Value::from(v.to_i64())
}
}
// Millisecond convert to other time unit
pub const MS_PER_SEC: i64 = 1_000;
pub const MS_PER_MINUTE: i64 = 60 * MS_PER_SEC;
pub const MS_PER_HOUR: i64 = 60 * MS_PER_MINUTE;
pub const MS_PER_DAY: i64 = 24 * MS_PER_HOUR;
pub const NANOS_PER_MILLI: i64 = 1_000_000;
impl From<IntervalDayTime> for IntervalFormat {
fn from(interval: IntervalDayTime) -> Self {
IntervalFormat {
days: interval.days,
hours: interval.milliseconds as i64 / MS_PER_HOUR,
minutes: (interval.milliseconds as i64 % MS_PER_HOUR) / MS_PER_MINUTE,
seconds: (interval.milliseconds as i64 % MS_PER_MINUTE) / MS_PER_SEC,
microseconds: (interval.milliseconds as i64 % MS_PER_SEC) * MS_PER_SEC,
..Default::default()
}
}
}
#[derive(
Debug, Default, Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, Serialize, Deserialize,
)]
#[repr(C)]
pub struct IntervalMonthDayNano {
/// Number of months
pub months: i32,
/// Number of days
pub days: i32,
/// Number of nanoseconds
pub nanoseconds: i64,
}
impl IntervalMonthDayNano {
/// The additive identity i.e. `0`.
pub const ZERO: Self = Self::new(0, 0, 0);
/// The multiplicative inverse, i.e. `-1`.
pub const MINUS_ONE: Self = Self::new(-1, -1, -1);
/// The maximum value that can be represented
pub const MAX: Self = Self::new(i32::MAX, i32::MAX, i64::MAX);
/// The minimum value that can be represented
pub const MIN: Self = Self::new(i32::MIN, i32::MIN, i64::MIN);
pub const fn new(months: i32, days: i32, nanoseconds: i64) -> Self {
Self {
months,
days,
nanoseconds,
}
}
pub fn to_i128(&self) -> i128 {
let m = (self.months as u128 & u32::MAX as u128) << 96;
let d = (self.days as u128 & u32::MAX as u128) << 64;
let n = self.nanoseconds as u128 & u64::MAX as u128;
(m | d | n) as i128
}
pub fn from_i128(value: i128) -> Self {
let months = (value >> 96) as i32;
let days = (value >> 64) as i32;
let nanoseconds = value as i64;
Self {
months,
days,
nanoseconds,
}
}
pub fn negative(&self) -> Self {
Self::new(-self.months, -self.days, -self.nanoseconds)
}
pub fn to_iso8601_string(&self) -> String {
IntervalFormat::from(*self).to_iso8601_string()
}
}
impl From<i128> for IntervalMonthDayNano {
fn from(v: i128) -> Self {
Self::from_i128(v)
}
}
impl From<Interval> for i128 {
fn from(v: Interval) -> Self {
impl From<IntervalMonthDayNano> for i128 {
fn from(v: IntervalMonthDayNano) -> Self {
v.to_i128()
}
}
impl From<Interval> for serde_json::Value {
fn from(v: Interval) -> Self {
Value::String(v.to_string())
impl From<IntervalMonthDayNano> for serde_json::Value {
fn from(v: IntervalMonthDayNano) -> Self {
serde_json::Value::from(v.to_i128().to_string())
}
}
impl Display for Interval {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let mut s = String::new();
if self.months != 0 {
write!(s, "{} months ", self.months)?;
// Nanosecond convert to other time unit
pub const NS_PER_SEC: i64 = 1_000_000_000;
pub const NS_PER_MINUTE: i64 = 60 * NS_PER_SEC;
pub const NS_PER_HOUR: i64 = 60 * NS_PER_MINUTE;
pub const NS_PER_DAY: i64 = 24 * NS_PER_HOUR;
impl From<IntervalMonthDayNano> for IntervalFormat {
fn from(interval: IntervalMonthDayNano) -> Self {
IntervalFormat {
years: interval.months / 12,
months: interval.months % 12,
days: interval.days,
hours: interval.nanoseconds / NS_PER_HOUR,
minutes: (interval.nanoseconds % NS_PER_HOUR) / NS_PER_MINUTE,
seconds: (interval.nanoseconds % NS_PER_MINUTE) / NS_PER_SEC,
microseconds: (interval.nanoseconds % NS_PER_SEC) / 1_000,
}
if self.days != 0 {
write!(s, "{} days ", self.days)?;
}
if self.nsecs != 0 {
write!(s, "{} nsecs", self.nsecs)?;
}
write!(f, "{}", s.trim())
}
}
pub fn interval_year_month_to_month_day_nano(interval: IntervalYearMonth) -> IntervalMonthDayNano {
IntervalMonthDayNano {
months: interval.months,
days: 0,
nanoseconds: 0,
}
}
pub fn interval_day_time_to_month_day_nano(interval: IntervalDayTime) -> IntervalMonthDayNano {
IntervalMonthDayNano {
months: 0,
days: interval.days,
nanoseconds: interval.milliseconds as i64 * NANOS_PER_MILLI,
}
}
@@ -339,31 +332,6 @@ pub struct IntervalFormat {
pub microseconds: i64,
}
impl From<Interval> for IntervalFormat {
fn from(val: Interval) -> IntervalFormat {
let months = val.months;
let days = val.days;
let microseconds = val.nsecs / NANOS_PER_MICRO;
let years = (months - (months % 12)) / 12;
let months = months - years * 12;
let hours = (microseconds - (microseconds % 3_600_000_000)) / 3_600_000_000;
let microseconds = microseconds - hours * 3_600_000_000;
let minutes = (microseconds - (microseconds % 60_000_000)) / 60_000_000;
let microseconds = microseconds - minutes * 60_000_000;
let seconds = (microseconds - (microseconds % 1_000_000)) / 1_000_000;
let microseconds = microseconds - seconds * 1_000_000;
IntervalFormat {
years,
months,
days,
hours,
minutes,
seconds,
microseconds,
}
}
}
impl IntervalFormat {
/// All the field in the interval is 0
pub fn is_zero(&self) -> bool {
@@ -540,117 +508,37 @@ fn get_time_part(
interval
}
/// IntervalCompare is used to compare two intervals
/// It makes interval into nanoseconds style.
#[derive(PartialEq, Eq, Hash, PartialOrd, Ord)]
struct IntervalCompare(i128);
impl From<Interval> for IntervalCompare {
fn from(interval: Interval) -> Self {
Self(interval.to_nanosecond())
}
}
impl Ord for Interval {
fn cmp(&self, other: &Self) -> Ordering {
IntervalCompare::from(*self).cmp(&IntervalCompare::from(*other))
}
}
impl PartialOrd for Interval {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Eq for Interval {}
impl PartialEq for Interval {
fn eq(&self, other: &Self) -> bool {
self.cmp(other).is_eq()
}
}
impl Hash for Interval {
fn hash<H: Hasher>(&self, state: &mut H) {
IntervalCompare::from(*self).hash(state)
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use super::*;
use crate::timestamp::TimeUnit;
#[test]
fn test_from_year_month() {
let interval = Interval::from_year_month(1);
let interval = IntervalYearMonth::new(1);
assert_eq!(interval.months, 1);
}
#[test]
fn test_from_date_time() {
let interval = Interval::from_day_time(1, 2);
let interval = IntervalDayTime::new(1, 2);
assert_eq!(interval.days, 1);
assert_eq!(interval.nsecs, 2_000_000);
assert_eq!(interval.milliseconds, 2);
}
#[test]
fn test_to_duration() {
let interval = Interval::from_day_time(1, 2);
let duration = interval.to_duration().unwrap();
assert_eq!(86400002000000, duration.value());
assert_eq!(TimeUnit::Nanosecond, duration.unit());
let interval = Interval::from_year_month(12);
let duration = interval.to_duration().unwrap();
assert_eq!(31104000000000000, duration.value());
assert_eq!(TimeUnit::Nanosecond, duration.unit());
}
#[test]
fn test_interval_is_positive() {
let interval = Interval::from_year_month(1);
assert!(interval.is_positive());
let interval = Interval::from_year_month(-1);
assert!(!interval.is_positive());
let interval = Interval::from_day_time(1, i32::MIN);
assert!(!interval.is_positive());
}
#[test]
fn test_to_nanosecond() {
let interval = Interval::from_year_month(1);
assert_eq!(interval.to_nanosecond(), 2592000000000000);
let interval = Interval::from_day_time(1, 2);
assert_eq!(interval.to_nanosecond(), 86400002000000);
let max_interval = Interval::from_month_day_nano(i32::MAX, i32::MAX, i64::MAX);
assert_eq!(max_interval.to_nanosecond(), 5751829423496836854775807);
let min_interval = Interval::from_month_day_nano(i32::MIN, i32::MIN, i64::MIN);
assert_eq!(min_interval.to_nanosecond(), -5751829426175236854775808);
}
#[test]
fn test_interval_is_zero() {
let interval = Interval::from_month_day_nano(1, 1, 1);
assert!(!interval.is_zero());
let interval = Interval::from_month_day_nano(0, 0, 0);
assert!(interval.is_zero());
fn test_from_month_day_nano() {
let interval = IntervalMonthDayNano::new(1, 2, 3);
assert_eq!(interval.months, 1);
assert_eq!(interval.days, 2);
assert_eq!(interval.nanoseconds, 3);
}
#[test]
fn test_interval_i128_convert() {
let test_interval_eq = |month, day, nano| {
let interval = Interval::from_month_day_nano(month, day, nano);
let interval = IntervalMonthDayNano::new(month, day, nano);
let interval_i128 = interval.to_i128();
let interval2 = Interval::from_i128(interval_i128);
let interval2 = IntervalMonthDayNano::from_i128(interval_i128);
assert_eq!(interval, interval2);
};
@@ -666,11 +554,26 @@ mod tests {
test_interval_eq(i32::MAX, i32::MIN, i64::MIN);
test_interval_eq(i32::MIN, i32::MAX, i64::MIN);
test_interval_eq(i32::MIN, i32::MIN, i64::MIN);
let interval = IntervalMonthDayNano::from_i128(1);
assert_eq!(interval, IntervalMonthDayNano::new(0, 0, 1));
assert_eq!(1, IntervalMonthDayNano::new(0, 0, 1).to_i128());
}
#[test]
fn test_interval_i64_convert() {
let interval = IntervalDayTime::from_i64(1);
assert_eq!(interval, IntervalDayTime::new(0, 1));
assert_eq!(1, IntervalDayTime::new(0, 1).to_i64());
}
#[test]
fn test_convert_interval_format() {
let interval = Interval::from_month_day_nano(14, 160, 1000000);
let interval = IntervalMonthDayNano {
months: 14,
days: 160,
nanoseconds: 1000000,
};
let interval_format = IntervalFormat::from(interval);
assert_eq!(interval_format.years, 1);
assert_eq!(interval_format.months, 2);
@@ -681,94 +584,34 @@ mod tests {
assert_eq!(interval_format.microseconds, 1000);
}
#[test]
fn test_interval_hash() {
let interval = Interval::from_month_day_nano(1, 31, 1);
let interval2 = Interval::from_month_day_nano(2, 1, 1);
let mut map = HashMap::new();
map.insert(interval, 1);
assert_eq!(map.get(&interval2), Some(&1));
}
#[test]
fn test_interval_mul_int() {
let interval = Interval::from_month_day_nano(1, 1, 1);
let interval2 = interval.checked_mul_int(2).unwrap();
assert_eq!(interval2.months, 2);
assert_eq!(interval2.days, 2);
assert_eq!(interval2.nsecs, 2);
// test justified interval
let interval = Interval::from_month_day_nano(1, 31, 1);
let interval2 = interval.checked_mul_int(2).unwrap();
assert_eq!(interval2.months, 4);
assert_eq!(interval2.days, 2);
assert_eq!(interval2.nsecs, 2);
// test overflow situation
let interval = Interval::from_month_day_nano(i32::MAX, 1, 1);
let interval2 = interval.checked_mul_int(2);
assert!(interval2.is_none());
}
#[test]
fn test_display() {
let interval = Interval::from_month_day_nano(1, 1, 1);
assert_eq!(interval.to_string(), "1 months 1 days 1 nsecs");
let interval = Interval::from_month_day_nano(14, 31, 10000000000);
assert_eq!(interval.to_string(), "14 months 31 days 10000000000 nsecs");
}
#[test]
fn test_interval_justified() {
let interval = Interval::from_month_day_nano(1, 131, 1).justified_interval();
let interval2 = Interval::from_month_day_nano(5, 11, 1);
assert_eq!(interval, interval2);
let interval = Interval::from_month_day_nano(1, 1, NANOS_PER_MONTH + 2 * NANOS_PER_DAY)
.justified_interval();
let interval2 = Interval::from_month_day_nano(2, 3, 0);
assert_eq!(interval, interval2);
}
#[test]
fn test_serde_json() {
let interval = Interval::from_month_day_nano(1, 1, 1);
let json = serde_json::to_string(&interval).unwrap();
assert_eq!(
json,
"{\"months\":1,\"days\":1,\"nsecs\":1,\"unit\":\"MonthDayNano\"}"
);
let interval2: Interval = serde_json::from_str(&json).unwrap();
assert_eq!(interval, interval2);
}
#[test]
fn test_to_iso8601_string() {
// Test interval zero
let interval = Interval::from_month_day_nano(0, 0, 0);
let interval = IntervalMonthDayNano::new(0, 0, 0);
assert_eq!(interval.to_iso8601_string(), "PT0S");
let interval = Interval::from_month_day_nano(1, 1, 1);
let interval = IntervalMonthDayNano::new(1, 1, 1);
assert_eq!(interval.to_iso8601_string(), "P0Y1M1DT0H0M0S");
let interval = Interval::from_month_day_nano(14, 31, 10000000000);
let interval = IntervalMonthDayNano::new(14, 31, 10000000000);
assert_eq!(interval.to_iso8601_string(), "P1Y2M31DT0H0M10S");
let interval = Interval::from_month_day_nano(14, 31, 23210200000000);
let interval = IntervalMonthDayNano::new(14, 31, 23210200000000);
assert_eq!(interval.to_iso8601_string(), "P1Y2M31DT6H26M50.2S");
}
#[test]
fn test_to_postgres_string() {
// Test interval zero
let interval = Interval::from_month_day_nano(0, 0, 0);
assert_eq!(interval.to_postgres_string(), "00:00:00");
let interval = Interval::from_month_day_nano(23, 100, 23210200000000);
let interval = IntervalMonthDayNano::new(0, 0, 0);
assert_eq!(
interval.to_postgres_string(),
IntervalFormat::from(interval).to_postgres_string(),
"00:00:00"
);
let interval = IntervalMonthDayNano::new(23, 100, 23210200000000);
assert_eq!(
IntervalFormat::from(interval).to_postgres_string(),
"1 year 11 mons 100 days 06:26:50.200000"
);
}
@@ -776,18 +619,21 @@ mod tests {
#[test]
fn test_to_sql_standard_string() {
// Test zero interval
let interval = Interval::from_month_day_nano(0, 0, 0);
assert_eq!(interval.to_sql_standard_string(), "0");
let interval = IntervalMonthDayNano::new(0, 0, 0);
assert_eq!(IntervalFormat::from(interval).to_sql_standard_string(), "0");
let interval = Interval::from_month_day_nano(23, 100, 23210200000000);
let interval = IntervalMonthDayNano::new(23, 100, 23210200000000);
assert_eq!(
interval.to_sql_standard_string(),
IntervalFormat::from(interval).to_sql_standard_string(),
"+1-11 +100 +6:26:50.200000"
);
// Test interval without year, month, day
let interval = Interval::from_month_day_nano(0, 0, 23210200000000);
assert_eq!(interval.to_sql_standard_string(), "6:26:50.200000");
let interval = IntervalMonthDayNano::new(0, 0, 23210200000000);
assert_eq!(
IntervalFormat::from(interval).to_sql_standard_string(),
"6:26:50.200000"
);
}
#[test]

View File

@@ -27,7 +27,7 @@ pub mod util;
pub use date::Date;
pub use datetime::DateTime;
pub use duration::Duration;
pub use interval::Interval;
pub use interval::{IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth};
pub use range::RangeMillis;
pub use timestamp::Timestamp;
pub use timestamp_millis::TimestampMillis;

View File

@@ -20,16 +20,17 @@ use std::time::Duration;
use arrow::datatypes::TimeUnit as ArrowTimeUnit;
use chrono::{
DateTime, Days, LocalResult, Months, NaiveDate, NaiveDateTime, NaiveTime,
DateTime, Days, LocalResult, Months, NaiveDate, NaiveDateTime, NaiveTime, TimeDelta,
TimeZone as ChronoTimeZone, Utc,
};
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
use crate::error;
use crate::error::{ArithmeticOverflowSnafu, ParseTimestampSnafu, Result, TimestampOverflowSnafu};
use crate::interval::{IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth};
use crate::timezone::{get_timezone, Timezone};
use crate::util::{datetime_to_utc, div_ceil};
use crate::{error, Interval};
/// Timestamp represents the value of units(seconds/milliseconds/microseconds/nanoseconds) elapsed
/// since UNIX epoch. The valid value range of [Timestamp] depends on it's unit (all in UTC timezone):
@@ -140,40 +141,77 @@ impl Timestamp {
})
}
/// Adds given Interval to the current timestamp.
/// Returns None if the resulting timestamp would be out of range.
pub fn add_interval(&self, interval: Interval) -> Option<Timestamp> {
// FIXME(yingwen): remove add/sub intervals later
/// Adds given [IntervalYearMonth] to the current timestamp.
pub fn add_year_month(&self, interval: IntervalYearMonth) -> Option<Timestamp> {
let naive_datetime = self.to_chrono_datetime()?;
let (months, days, nsecs) = interval.to_month_day_nano();
let naive_datetime = naive_datetime
.checked_add_months(Months::new(months as u32))?
.checked_add_days(Days::new(days as u64))?
+ Duration::from_nanos(nsecs as u64);
let naive_datetime =
naive_datetime.checked_add_months(Months::new(interval.months as u32))?;
match Timestamp::from_chrono_datetime(naive_datetime) {
// Have to convert the new timestamp by the current unit.
Some(ts) => ts.convert_to(self.unit),
None => None,
}
// Have to convert the new timestamp by the current unit.
Timestamp::from_chrono_datetime(naive_datetime).and_then(|ts| ts.convert_to(self.unit))
}
/// Subtracts given Interval to the current timestamp.
/// Returns None if the resulting timestamp would be out of range.
pub fn sub_interval(&self, interval: Interval) -> Option<Timestamp> {
/// Adds given [IntervalDayTime] to the current timestamp.
pub fn add_day_time(&self, interval: IntervalDayTime) -> Option<Timestamp> {
let naive_datetime = self.to_chrono_datetime()?;
let (months, days, nsecs) = interval.to_month_day_nano();
let naive_datetime = naive_datetime
.checked_sub_months(Months::new(months as u32))?
.checked_sub_days(Days::new(days as u64))?
- Duration::from_nanos(nsecs as u64);
.checked_add_days(Days::new(interval.days as u64))?
.checked_add_signed(TimeDelta::milliseconds(interval.milliseconds as i64))?;
match Timestamp::from_chrono_datetime(naive_datetime) {
// Have to convert the new timestamp by the current unit.
Some(ts) => ts.convert_to(self.unit),
None => None,
}
// Have to convert the new timestamp by the current unit.
Timestamp::from_chrono_datetime(naive_datetime).and_then(|ts| ts.convert_to(self.unit))
}
/// Adds given [IntervalMonthDayNano] to the current timestamp.
pub fn add_month_day_nano(&self, interval: IntervalMonthDayNano) -> Option<Timestamp> {
let naive_datetime = self.to_chrono_datetime()?;
let naive_datetime = naive_datetime
.checked_add_months(Months::new(interval.months as u32))?
.checked_add_days(Days::new(interval.days as u64))?
.checked_add_signed(TimeDelta::nanoseconds(interval.nanoseconds))?;
// Have to convert the new timestamp by the current unit.
Timestamp::from_chrono_datetime(naive_datetime).and_then(|ts| ts.convert_to(self.unit))
}
/// Subtracts given [IntervalYearMonth] to the current timestamp.
pub fn sub_year_month(&self, interval: IntervalYearMonth) -> Option<Timestamp> {
let naive_datetime = self.to_chrono_datetime()?;
let naive_datetime =
naive_datetime.checked_sub_months(Months::new(interval.months as u32))?;
// Have to convert the new timestamp by the current unit.
Timestamp::from_chrono_datetime(naive_datetime).and_then(|ts| ts.convert_to(self.unit))
}
/// Subtracts given [IntervalDayTime] to the current timestamp.
pub fn sub_day_time(&self, interval: IntervalDayTime) -> Option<Timestamp> {
let naive_datetime = self.to_chrono_datetime()?;
let naive_datetime = naive_datetime
.checked_sub_days(Days::new(interval.days as u64))?
.checked_sub_signed(TimeDelta::milliseconds(interval.milliseconds as i64))?;
// Have to convert the new timestamp by the current unit.
Timestamp::from_chrono_datetime(naive_datetime).and_then(|ts| ts.convert_to(self.unit))
}
/// Subtracts given [IntervalMonthDayNano] to the current timestamp.
pub fn sub_month_day_nano(&self, interval: IntervalMonthDayNano) -> Option<Timestamp> {
let naive_datetime = self.to_chrono_datetime()?;
let naive_datetime = naive_datetime
.checked_sub_months(Months::new(interval.months as u32))?
.checked_sub_days(Days::new(interval.days as u64))?
.checked_sub_signed(TimeDelta::nanoseconds(interval.nanoseconds))?;
// Have to convert the new timestamp by the current unit.
Timestamp::from_chrono_datetime(naive_datetime).and_then(|ts| ts.convert_to(self.unit))
}
/// Subtracts current timestamp with another timestamp, yielding a duration.
@@ -688,13 +726,13 @@ mod tests {
fn test_add_sub_interval() {
let ts = Timestamp::new(1000, TimeUnit::Millisecond);
let interval = Interval::from_day_time(1, 200);
let interval = IntervalDayTime::new(1, 200);
let new_ts = ts.add_interval(interval).unwrap();
let new_ts = ts.add_day_time(interval).unwrap();
assert_eq!(new_ts.unit(), TimeUnit::Millisecond);
assert_eq!(new_ts.value(), 1000 + 3600 * 24 * 1000 + 200);
assert_eq!(ts, new_ts.sub_interval(interval).unwrap());
assert_eq!(ts, new_ts.sub_day_time(interval).unwrap());
}
#[test]

View File

@@ -46,6 +46,8 @@ pub struct DatanodeKafkaConfig {
pub create_index: bool,
#[serde(with = "humantime_serde")]
pub dump_index_interval: Duration,
/// Ignore missing entries during read WAL.
pub overwrite_entry_start_id: bool,
}
impl Default for DatanodeKafkaConfig {
@@ -60,6 +62,7 @@ impl Default for DatanodeKafkaConfig {
auto_create_topics: true,
create_index: true,
dump_index_interval: Duration::from_secs(60),
overwrite_entry_start_id: false,
}
}
}

View File

@@ -32,7 +32,7 @@ use common_recordbatch::SendableRecordBatchStream;
use common_runtime::Runtime;
use common_telemetry::tracing::{self, info_span};
use common_telemetry::tracing_context::{FutureExt, TracingContext};
use common_telemetry::{error, info, warn};
use common_telemetry::{debug, error, info, warn};
use dashmap::DashMap;
use datafusion::datasource::{provider_as_source, TableProvider};
use datafusion::error::Result as DfResult;
@@ -893,7 +893,7 @@ impl RegionServerInner {
for region in logical_regions {
self.region_map
.insert(region, RegionEngineWithStatus::Ready(engine.clone()));
info!("Logical region {} is registered!", region);
debug!("Logical region {} is registered!", region);
}
Ok(())
}
@@ -935,17 +935,19 @@ impl RegionServerInner {
.iter()
.map(|x| (*x.key(), x.value().clone()))
.collect::<Vec<_>>();
let num_regions = regions.len();
for (region_id, engine) in regions {
let closed = engine
.handle_request(region_id, RegionRequest::Close(RegionCloseRequest {}))
.await;
match closed {
Ok(_) => info!("Region {region_id} is closed"),
Ok(_) => debug!("Region {region_id} is closed"),
Err(e) => warn!("Failed to close region {region_id}, err: {e}"),
}
}
self.region_map.clear();
info!("closed {num_regions} regions");
let engines = self.engines.write().unwrap().drain().collect::<Vec<_>>();
for (engine_name, engine) in engines {

View File

@@ -12,11 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use common_time::interval::Interval;
use common_time::{IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth};
use paste::paste;
use serde::{Deserialize, Serialize};
use crate::prelude::{Scalar, Value, ValueRef};
use crate::prelude::Scalar;
use crate::scalars::ScalarRef;
use crate::types::{
IntervalDayTimeType, IntervalMonthDayNanoType, IntervalYearMonthType, WrapperType,
@@ -26,39 +25,6 @@ use crate::vectors::{IntervalDayTimeVector, IntervalMonthDayNanoVector, Interval
macro_rules! define_interval_with_unit {
($unit: ident, $native_ty: ty) => {
paste! {
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub struct [<Interval $unit>](pub Interval);
impl [<Interval $unit>] {
pub fn new(val: $native_ty) -> Self {
Self(Interval:: [<from_ $native_ty>](val))
}
}
impl Default for [<Interval $unit>] {
fn default() -> Self {
Self::new(0)
}
}
impl From<[<Interval $unit>]> for Value {
fn from(t: [<Interval $unit>]) -> Value {
Value::Interval(t.0)
}
}
impl From<[<Interval $unit>]> for serde_json::Value {
fn from(t: [<Interval $unit>]) -> Self {
t.0.into()
}
}
impl From<[<Interval $unit>]> for ValueRef<'static> {
fn from(t: [<Interval $unit>]) -> Self {
ValueRef::Interval(t.0)
}
}
impl Scalar for [<Interval $unit>] {
type VectorType = [<Interval $unit Vector>];
type RefType<'a> = [<Interval $unit>];
@@ -87,41 +53,11 @@ macro_rules! define_interval_with_unit {
type Native = $native_ty;
fn from_native(value: Self::Native) -> Self {
Self::new(value)
Self::[<from_ $native_ty>](value)
}
fn into_native(self) -> Self::Native {
self.0.[<to_ $native_ty>]()
}
}
impl From<$native_ty> for [<Interval $unit>] {
fn from(val: $native_ty) -> Self {
[<Interval $unit>]::from_native(val as $native_ty)
}
}
impl From<[<Interval $unit>]> for $native_ty {
fn from(val: [<Interval $unit>]) -> Self {
val.0.[<to_ $native_ty>]()
}
}
impl TryFrom<Value> for Option<[<Interval $unit>]> {
type Error = $crate::error::Error;
#[inline]
fn try_from(from: Value) -> std::result::Result<Self, Self::Error> {
match from {
Value::Interval(v) if v.unit() == common_time::interval::IntervalUnit::$unit => {
Ok(Some([<Interval $unit>](v)))
},
Value::Null => Ok(None),
_ => $crate::error::TryFromValueSnafu {
reason: format!("{:?} is not a {}", from, stringify!([<Interval $unit>])),
}
.fail(),
}
self.[<to_ $native_ty>]()
}
}
}
@@ -138,17 +74,17 @@ mod tests {
#[test]
fn test_interval_scalar() {
let interval = IntervalYearMonth::new(1000);
let interval = IntervalYearMonth::from(1000);
assert_eq!(interval, interval.as_scalar_ref());
assert_eq!(interval, interval.to_owned_scalar());
assert_eq!(1000, interval.into_native());
let interval = IntervalDayTime::new(1000);
let interval = IntervalDayTime::from(1000);
assert_eq!(interval, interval.as_scalar_ref());
assert_eq!(interval, interval.to_owned_scalar());
assert_eq!(1000, interval.into_native());
let interval = IntervalMonthDayNano::new(1000);
let interval = IntervalMonthDayNano::from(1000);
assert_eq!(interval, interval.as_scalar_ref());
assert_eq!(interval, interval.to_owned_scalar());
assert_eq!(1000, interval.into_native());
@@ -156,15 +92,15 @@ mod tests {
#[test]
fn test_interval_convert_to_native_type() {
let interval = IntervalMonthDayNano::new(1000);
let interval = IntervalMonthDayNano::from(1000);
let native_value: i128 = interval.into();
assert_eq!(native_value, 1000);
let interval = IntervalDayTime::new(1000);
let interval = IntervalDayTime::from(1000);
let native_interval: i64 = interval.into();
assert_eq!(native_interval, 1000);
let interval = IntervalYearMonth::new(1000);
let interval = IntervalYearMonth::from(1000);
let native_interval: i32 = interval.into();
assert_eq!(native_interval, 1000);
}

View File

@@ -17,8 +17,9 @@ use arrow::datatypes::{
IntervalMonthDayNanoType as ArrowIntervalMonthDayNanoType, IntervalUnit as ArrowIntervalUnit,
IntervalYearMonthType as ArrowIntervalYearMonthType,
};
use common_time::interval::IntervalUnit;
use common_time::Interval;
use common_time::interval::{
IntervalDayTime, IntervalMonthDayNano, IntervalUnit, IntervalYearMonth,
};
use enum_dispatch::enum_dispatch;
use paste::paste;
use serde::{Deserialize, Serialize};
@@ -26,7 +27,6 @@ use snafu::OptionExt;
use crate::data_type::ConcreteDataType;
use crate::error;
use crate::interval::{IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth};
use crate::prelude::{
DataType, LogicalTypeId, MutableVector, ScalarVectorBuilder, Value, ValueRef, Vector,
};
@@ -75,7 +75,7 @@ macro_rules! impl_data_type_for_interval {
}
fn default_value(&self) -> Value {
Value::Interval(Interval::from_i128(0))
Value::[<Interval $unit>]([<Interval $unit>]::default())
}
fn as_arrow_type(&self) -> ArrowDataType {
@@ -124,7 +124,7 @@ macro_rules! impl_data_type_for_interval {
fn cast_value_ref(value: ValueRef) -> crate::Result<Option<Self::Wrapper>> {
match value {
ValueRef::Null => Ok(None),
ValueRef::Interval(t) => Ok(Some([<Interval $unit>](t))),
ValueRef::[<Interval $unit>](t) => Ok(Some(t)),
other => error::CastTypeSnafu {
msg: format!("Failed to cast value {:?} to {}", other, stringify!([<Interval $unit>])),
}

View File

@@ -16,7 +16,6 @@ use std::cmp::Ordering;
use std::fmt;
use arrow::datatypes::{ArrowNativeType, ArrowPrimitiveType, DataType as ArrowDataType};
use common_time::interval::IntervalUnit;
use common_time::{Date, DateTime};
use serde::{Deserialize, Serialize};
use snafu::OptionExt;
@@ -30,6 +29,7 @@ use crate::types::{DateTimeType, DateType};
use crate::value::{Value, ValueRef};
use crate::vectors::{MutableVector, PrimitiveVector, PrimitiveVectorBuilder, Vector};
// TODO(yingwen): Can we remove `Into<serde_json::Value>`?
/// Represents the wrapper type that wraps a native type using the `newtype pattern`,
/// such as [Date](`common_time::Date`) is a wrapper type for the underlying native
/// type `i32`.
@@ -364,11 +364,7 @@ impl DataType for Int64Type {
Value::DateTime(v) => Some(Value::Int64(v.val())),
Value::Timestamp(v) => Some(Value::Int64(v.value())),
Value::Time(v) => Some(Value::Int64(v.value())),
Value::Interval(v) => match v.unit() {
IntervalUnit::DayTime => Some(Value::Int64(v.to_i64())),
IntervalUnit::YearMonth => None,
IntervalUnit::MonthDayNano => None,
},
// We don't allow casting interval type to int.
_ => None,
}
}
@@ -410,11 +406,7 @@ impl DataType for Int32Type {
Value::Float64(v) => num::cast::cast(v).map(Value::Int32),
Value::String(v) => v.as_utf8().parse::<i32>().map(Value::Int32).ok(),
Value::Date(v) => Some(Value::Int32(v.val())),
Value::Interval(v) => match v.unit() {
IntervalUnit::YearMonth => Some(Value::Int32(v.to_i32())),
IntervalUnit::DayTime => None,
IntervalUnit::MonthDayNano => None,
},
// We don't allow casting interval type to int.
_ => None,
}
}

View File

@@ -78,7 +78,15 @@ impl DataType for StringType {
Value::DateTime(v) => Some(Value::String(StringBytes::from(v.to_string()))),
Value::Timestamp(v) => Some(Value::String(StringBytes::from(v.to_iso8601_string()))),
Value::Time(v) => Some(Value::String(StringBytes::from(v.to_iso8601_string()))),
Value::Interval(v) => Some(Value::String(StringBytes::from(v.to_iso8601_string()))),
Value::IntervalYearMonth(v) => {
Some(Value::String(StringBytes::from(v.to_iso8601_string())))
}
Value::IntervalDayTime(v) => {
Some(Value::String(StringBytes::from(v.to_iso8601_string())))
}
Value::IntervalMonthDayNano(v) => {
Some(Value::String(StringBytes::from(v.to_iso8601_string())))
}
Value::Duration(v) => Some(Value::String(StringBytes::from(v.to_string()))),
Value::Decimal128(v) => Some(Value::String(StringBytes::from(v.to_string()))),

View File

@@ -28,7 +28,7 @@ use common_time::datetime::DateTime;
use common_time::interval::IntervalUnit;
use common_time::time::Time;
use common_time::timestamp::{TimeUnit, Timestamp};
use common_time::{Duration, Interval, Timezone};
use common_time::{Duration, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timezone};
use datafusion_common::ScalarValue;
use greptime_proto::v1::value::ValueData;
pub use ordered_float::OrderedFloat;
@@ -38,6 +38,7 @@ use snafu::{ensure, ResultExt};
use crate::error::{self, ConvertArrowArrayToScalarsSnafu, Error, Result, TryFromValueSnafu};
use crate::prelude::*;
use crate::schema::ColumnSchema;
use crate::type_id::LogicalTypeId;
use crate::types::{IntervalType, ListType};
use crate::vectors::ListVector;
@@ -78,7 +79,10 @@ pub enum Value {
Timestamp(Timestamp),
Time(Time),
Duration(Duration),
Interval(Interval),
// Interval types:
IntervalYearMonth(IntervalYearMonth),
IntervalDayTime(IntervalDayTime),
IntervalMonthDayNano(IntervalMonthDayNano),
List(ListValue),
}
@@ -111,7 +115,15 @@ impl Display for Value {
Value::DateTime(v) => write!(f, "{v}"),
Value::Timestamp(v) => write!(f, "{}", v.to_iso8601_string()),
Value::Time(t) => write!(f, "{}", t.to_iso8601_string()),
Value::Interval(v) => write!(f, "{}", v.to_iso8601_string()),
Value::IntervalYearMonth(v) => {
write!(f, "{}", v.to_iso8601_string())
}
Value::IntervalDayTime(v) => {
write!(f, "{}", v.to_iso8601_string())
}
Value::IntervalMonthDayNano(v) => {
write!(f, "{}", v.to_iso8601_string())
}
Value::Duration(d) => write!(f, "{d}"),
Value::List(v) => {
let items = v
@@ -153,7 +165,15 @@ macro_rules! define_data_type_func {
$struct::DateTime(_) => ConcreteDataType::datetime_datatype(),
$struct::Time(t) => ConcreteDataType::time_datatype(*t.unit()),
$struct::Timestamp(v) => ConcreteDataType::timestamp_datatype(v.unit()),
$struct::Interval(v) => ConcreteDataType::interval_datatype(v.unit()),
$struct::IntervalYearMonth(_) => {
ConcreteDataType::interval_datatype(IntervalUnit::YearMonth)
}
$struct::IntervalDayTime(_) => {
ConcreteDataType::interval_datatype(IntervalUnit::DayTime)
}
$struct::IntervalMonthDayNano(_) => {
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano)
}
$struct::List(list) => ConcreteDataType::list_datatype(list.datatype().clone()),
$struct::Duration(d) => ConcreteDataType::duration_datatype(d.unit()),
$struct::Decimal128(d) => {
@@ -206,7 +226,9 @@ impl Value {
Value::List(v) => ValueRef::List(ListValueRef::Ref { val: v }),
Value::Timestamp(v) => ValueRef::Timestamp(*v),
Value::Time(v) => ValueRef::Time(*v),
Value::Interval(v) => ValueRef::Interval(*v),
Value::IntervalYearMonth(v) => ValueRef::IntervalYearMonth(*v),
Value::IntervalDayTime(v) => ValueRef::IntervalDayTime(*v),
Value::IntervalMonthDayNano(v) => ValueRef::IntervalMonthDayNano(*v),
Value::Duration(v) => ValueRef::Duration(*v),
Value::Decimal128(v) => ValueRef::Decimal128(*v),
}
@@ -220,14 +242,6 @@ impl Value {
}
}
/// Cast Value to Interval. Return None if value is not a valid interval data type.
pub fn as_interval(&self) -> Option<Interval> {
match self {
Value::Interval(i) => Some(*i),
_ => None,
}
}
/// Cast Value to utf8 String. Return None if value is not a valid string data type.
pub fn as_string(&self) -> Option<String> {
match self {
@@ -255,12 +269,35 @@ impl Value {
/// Cast Value to [Time]. Return None if value is not a valid time data type.
pub fn as_time(&self) -> Option<Time> {
match self {
Value::Int64(v) => Some(Time::new_millisecond(*v)),
Value::Time(t) => Some(*t),
_ => None,
}
}
/// Cast Value to [IntervalYearMonth]. Return None if value is not a valid interval year month data type.
pub fn as_interval_year_month(&self) -> Option<IntervalYearMonth> {
match self {
Value::IntervalYearMonth(v) => Some(*v),
_ => None,
}
}
/// Cast Value to [IntervalDayTime]. Return None if value is not a valid interval day time data type.
pub fn as_interval_day_time(&self) -> Option<IntervalDayTime> {
match self {
Value::IntervalDayTime(v) => Some(*v),
_ => None,
}
}
/// Cast Value to [IntervalMonthDayNano]. Return None if value is not a valid interval month day nano data type.
pub fn as_interval_month_day_nano(&self) -> Option<IntervalMonthDayNano> {
match self {
Value::IntervalMonthDayNano(v) => Some(*v),
_ => None,
}
}
/// Cast Value to u64. Return None if value is not a valid uint64 data type.
pub fn as_u64(&self) -> Option<u64> {
match self {
@@ -321,11 +358,9 @@ impl Value {
TimeUnit::Microsecond => LogicalTypeId::TimeMicrosecond,
TimeUnit::Nanosecond => LogicalTypeId::TimeNanosecond,
},
Value::Interval(v) => match v.unit() {
IntervalUnit::YearMonth => LogicalTypeId::IntervalYearMonth,
IntervalUnit::DayTime => LogicalTypeId::IntervalDayTime,
IntervalUnit::MonthDayNano => LogicalTypeId::IntervalMonthDayNano,
},
Value::IntervalYearMonth(_) => LogicalTypeId::IntervalYearMonth,
Value::IntervalDayTime(_) => LogicalTypeId::IntervalDayTime,
Value::IntervalMonthDayNano(_) => LogicalTypeId::IntervalMonthDayNano,
Value::Duration(d) => match d.unit() {
TimeUnit::Second => LogicalTypeId::DurationSecond,
TimeUnit::Millisecond => LogicalTypeId::DurationMillisecond,
@@ -375,11 +410,9 @@ impl Value {
}
Value::Timestamp(t) => timestamp_to_scalar_value(t.unit(), Some(t.value())),
Value::Time(t) => time_to_scalar_value(*t.unit(), Some(t.value()))?,
Value::Interval(v) => match v.unit() {
IntervalUnit::YearMonth => ScalarValue::IntervalYearMonth(Some(v.to_i32())),
IntervalUnit::DayTime => ScalarValue::IntervalDayTime(Some(v.to_i64())),
IntervalUnit::MonthDayNano => ScalarValue::IntervalMonthDayNano(Some(v.to_i128())),
},
Value::IntervalYearMonth(v) => ScalarValue::IntervalYearMonth(Some(v.to_i32())),
Value::IntervalDayTime(v) => ScalarValue::IntervalDayTime(Some(v.to_i64())),
Value::IntervalMonthDayNano(v) => ScalarValue::IntervalMonthDayNano(Some(v.to_i128())),
Value::Duration(d) => duration_to_scalar_value(d.unit(), Some(d.value())),
Value::Decimal128(d) => {
let (v, p, s) = d.to_scalar_value();
@@ -434,7 +467,9 @@ impl Value {
Value::Timestamp(x) => Some(Value::Timestamp(x.negative())),
Value::Time(x) => Some(Value::Time(x.negative())),
Value::Duration(x) => Some(Value::Duration(x.negative())),
Value::Interval(x) => Some(Value::Interval(x.negative())),
Value::IntervalYearMonth(x) => Some(Value::IntervalYearMonth(x.negative())),
Value::IntervalDayTime(x) => Some(Value::IntervalDayTime(x.negative())),
Value::IntervalMonthDayNano(x) => Some(Value::IntervalMonthDayNano(x.negative())),
Value::Binary(_) | Value::String(_) | Value::Boolean(_) | Value::List(_) => None,
}
@@ -571,16 +606,6 @@ pub fn scalar_value_to_timestamp(
}
}
/// Convert [ScalarValue] to [Interval].
pub fn scalar_value_to_interval(scalar: &ScalarValue) -> Option<Interval> {
match scalar {
ScalarValue::IntervalYearMonth(v) => v.map(Interval::from_i32),
ScalarValue::IntervalDayTime(v) => v.map(Interval::from_i64),
ScalarValue::IntervalMonthDayNano(v) => v.map(Interval::from_i128),
_ => None,
}
}
macro_rules! impl_ord_for_value_like {
($Type: ident, $left: ident, $right: ident) => {
if $left.is_null() && !$right.is_null() {
@@ -607,7 +632,9 @@ macro_rules! impl_ord_for_value_like {
($Type::DateTime(v1), $Type::DateTime(v2)) => v1.cmp(v2),
($Type::Timestamp(v1), $Type::Timestamp(v2)) => v1.cmp(v2),
($Type::Time(v1), $Type::Time(v2)) => v1.cmp(v2),
($Type::Interval(v1), $Type::Interval(v2)) => v1.cmp(v2),
($Type::IntervalYearMonth(v1), $Type::IntervalYearMonth(v2)) => v1.cmp(v2),
($Type::IntervalDayTime(v1), $Type::IntervalDayTime(v2)) => v1.cmp(v2),
($Type::IntervalMonthDayNano(v1), $Type::IntervalMonthDayNano(v2)) => v1.cmp(v2),
($Type::Duration(v1), $Type::Duration(v2)) => v1.cmp(v2),
($Type::List(v1), $Type::List(v2)) => v1.cmp(v2),
_ => panic!(
@@ -685,7 +712,9 @@ impl_try_from_value!(Date, Date);
impl_try_from_value!(Time, Time);
impl_try_from_value!(DateTime, DateTime);
impl_try_from_value!(Timestamp, Timestamp);
impl_try_from_value!(Interval, Interval);
impl_try_from_value!(IntervalYearMonth, IntervalYearMonth);
impl_try_from_value!(IntervalDayTime, IntervalDayTime);
impl_try_from_value!(IntervalMonthDayNano, IntervalMonthDayNano);
impl_try_from_value!(Duration, Duration);
impl_try_from_value!(Decimal128, Decimal128);
@@ -727,7 +756,9 @@ impl_value_from!(Date, Date);
impl_value_from!(Time, Time);
impl_value_from!(DateTime, DateTime);
impl_value_from!(Timestamp, Timestamp);
impl_value_from!(Interval, Interval);
impl_value_from!(IntervalYearMonth, IntervalYearMonth);
impl_value_from!(IntervalDayTime, IntervalDayTime);
impl_value_from!(IntervalMonthDayNano, IntervalMonthDayNano);
impl_value_from!(Duration, Duration);
impl_value_from!(String, String);
impl_value_from!(Decimal128, Decimal128);
@@ -774,7 +805,9 @@ impl TryFrom<Value> for serde_json::Value {
Value::List(v) => serde_json::to_value(v)?,
Value::Timestamp(v) => serde_json::to_value(v.value())?,
Value::Time(v) => serde_json::to_value(v.value())?,
Value::Interval(v) => serde_json::to_value(v.to_i128())?,
Value::IntervalYearMonth(v) => serde_json::to_value(v.to_i32())?,
Value::IntervalDayTime(v) => serde_json::to_value(v.to_i64())?,
Value::IntervalMonthDayNano(v) => serde_json::to_value(v.to_i128())?,
Value::Duration(v) => serde_json::to_value(v.value())?,
Value::Decimal128(v) => serde_json::to_value(v.to_string())?,
};
@@ -926,13 +959,13 @@ impl TryFrom<ScalarValue> for Value {
.unwrap_or(Value::Null),
ScalarValue::IntervalYearMonth(t) => t
.map(|x| Value::Interval(Interval::from_i32(x)))
.map(|x| Value::IntervalYearMonth(IntervalYearMonth::from_i32(x)))
.unwrap_or(Value::Null),
ScalarValue::IntervalDayTime(t) => t
.map(|x| Value::Interval(Interval::from_i64(x)))
.map(|x| Value::IntervalDayTime(IntervalDayTime::from_i64(x)))
.unwrap_or(Value::Null),
ScalarValue::IntervalMonthDayNano(t) => t
.map(|x| Value::Interval(Interval::from_i128(x)))
.map(|x| Value::IntervalMonthDayNano(IntervalMonthDayNano::from_i128(x)))
.unwrap_or(Value::Null),
ScalarValue::DurationSecond(d) => d
.map(|x| Value::Duration(Duration::new(x, TimeUnit::Second)))
@@ -987,7 +1020,9 @@ impl From<ValueRef<'_>> for Value {
ValueRef::DateTime(v) => Value::DateTime(v),
ValueRef::Timestamp(v) => Value::Timestamp(v),
ValueRef::Time(v) => Value::Time(v),
ValueRef::Interval(v) => Value::Interval(v),
ValueRef::IntervalYearMonth(v) => Value::IntervalYearMonth(v),
ValueRef::IntervalDayTime(v) => Value::IntervalDayTime(v),
ValueRef::IntervalMonthDayNano(v) => Value::IntervalMonthDayNano(v),
ValueRef::Duration(v) => Value::Duration(v),
ValueRef::List(v) => v.to_value(),
ValueRef::Decimal128(v) => Value::Decimal128(v),
@@ -1026,7 +1061,10 @@ pub enum ValueRef<'a> {
Timestamp(Timestamp),
Time(Time),
Duration(Duration),
Interval(Interval),
// Interval types:
IntervalYearMonth(IntervalYearMonth),
IntervalDayTime(IntervalDayTime),
IntervalMonthDayNano(IntervalMonthDayNano),
// Compound types:
List(ListValueRef<'a>),
@@ -1150,9 +1188,19 @@ impl<'a> ValueRef<'a> {
impl_as_for_value_ref!(self, Duration)
}
/// Cast itself to [Interval].
pub fn as_interval(&self) -> Result<Option<Interval>> {
impl_as_for_value_ref!(self, Interval)
/// Cast itself to [IntervalYearMonth].
pub fn as_interval_year_month(&self) -> Result<Option<IntervalYearMonth>> {
impl_as_for_value_ref!(self, IntervalYearMonth)
}
/// Cast itself to [IntervalDayTime].
pub fn as_interval_day_time(&self) -> Result<Option<IntervalDayTime>> {
impl_as_for_value_ref!(self, IntervalDayTime)
}
/// Cast itself to [IntervalMonthDayNano].
pub fn as_interval_month_day_nano(&self) -> Result<Option<IntervalMonthDayNano>> {
impl_as_for_value_ref!(self, IntervalMonthDayNano)
}
/// Cast itself to [ListValueRef].
@@ -1212,7 +1260,9 @@ impl_value_ref_from!(Date, Date);
impl_value_ref_from!(DateTime, DateTime);
impl_value_ref_from!(Timestamp, Timestamp);
impl_value_ref_from!(Time, Time);
impl_value_ref_from!(Interval, Interval);
impl_value_ref_from!(IntervalYearMonth, IntervalYearMonth);
impl_value_ref_from!(IntervalDayTime, IntervalDayTime);
impl_value_ref_from!(IntervalMonthDayNano, IntervalMonthDayNano);
impl_value_ref_from!(Duration, Duration);
impl_value_ref_from!(Decimal128, Decimal128);
@@ -1237,37 +1287,52 @@ impl<'a> From<Option<ListValueRef<'a>>> for ValueRef<'a> {
}
}
impl<'a> TryFrom<ValueRef<'a>> for serde_json::Value {
type Error = serde_json::Error;
/// transform a [ValueRef] to a [serde_json::Value].
/// The json type will be handled specially
pub fn transform_value_ref_to_json_value<'a>(
value: ValueRef<'a>,
schema: &'a ColumnSchema,
) -> serde_json::Result<serde_json::Value> {
let json_value = match value {
ValueRef::Null => serde_json::Value::Null,
ValueRef::Boolean(v) => serde_json::Value::Bool(v),
ValueRef::UInt8(v) => serde_json::Value::from(v),
ValueRef::UInt16(v) => serde_json::Value::from(v),
ValueRef::UInt32(v) => serde_json::Value::from(v),
ValueRef::UInt64(v) => serde_json::Value::from(v),
ValueRef::Int8(v) => serde_json::Value::from(v),
ValueRef::Int16(v) => serde_json::Value::from(v),
ValueRef::Int32(v) => serde_json::Value::from(v),
ValueRef::Int64(v) => serde_json::Value::from(v),
ValueRef::Float32(v) => serde_json::Value::from(v.0),
ValueRef::Float64(v) => serde_json::Value::from(v.0),
ValueRef::String(bytes) => serde_json::Value::String(bytes.to_string()),
ValueRef::Binary(bytes) => {
if let ConcreteDataType::Json(_) = schema.data_type {
match jsonb::from_slice(bytes) {
Ok(json) => json.into(),
Err(e) => {
error!(e; "Failed to parse jsonb");
serde_json::Value::Null
}
}
} else {
serde_json::to_value(bytes)?
}
}
ValueRef::Date(v) => serde_json::Value::Number(v.val().into()),
ValueRef::DateTime(v) => serde_json::Value::Number(v.val().into()),
ValueRef::List(v) => serde_json::to_value(v)?,
ValueRef::Timestamp(v) => serde_json::to_value(v.value())?,
ValueRef::Time(v) => serde_json::to_value(v.value())?,
ValueRef::IntervalYearMonth(v) => serde_json::Value::from(v),
ValueRef::IntervalDayTime(v) => serde_json::Value::from(v),
ValueRef::IntervalMonthDayNano(v) => serde_json::Value::from(v),
ValueRef::Duration(v) => serde_json::to_value(v.value())?,
ValueRef::Decimal128(v) => serde_json::to_value(v.to_string())?,
};
fn try_from(value: ValueRef<'a>) -> serde_json::Result<serde_json::Value> {
let json_value = match value {
ValueRef::Null => serde_json::Value::Null,
ValueRef::Boolean(v) => serde_json::Value::Bool(v),
ValueRef::UInt8(v) => serde_json::Value::from(v),
ValueRef::UInt16(v) => serde_json::Value::from(v),
ValueRef::UInt32(v) => serde_json::Value::from(v),
ValueRef::UInt64(v) => serde_json::Value::from(v),
ValueRef::Int8(v) => serde_json::Value::from(v),
ValueRef::Int16(v) => serde_json::Value::from(v),
ValueRef::Int32(v) => serde_json::Value::from(v),
ValueRef::Int64(v) => serde_json::Value::from(v),
ValueRef::Float32(v) => serde_json::Value::from(v.0),
ValueRef::Float64(v) => serde_json::Value::from(v.0),
ValueRef::String(bytes) => serde_json::Value::String(bytes.to_string()),
ValueRef::Binary(bytes) => serde_json::to_value(bytes)?,
ValueRef::Date(v) => serde_json::Value::Number(v.val().into()),
ValueRef::DateTime(v) => serde_json::Value::Number(v.val().into()),
ValueRef::List(v) => serde_json::to_value(v)?,
ValueRef::Timestamp(v) => serde_json::to_value(v.value())?,
ValueRef::Time(v) => serde_json::to_value(v.value())?,
ValueRef::Interval(v) => serde_json::to_value(v.to_i128())?,
ValueRef::Duration(v) => serde_json::to_value(v.value())?,
ValueRef::Decimal128(v) => serde_json::to_value(v.to_string())?,
};
Ok(json_value)
}
Ok(json_value)
}
/// Reference to a [ListValue].
@@ -1359,7 +1424,9 @@ impl<'a> ValueRef<'a> {
ValueRef::Timestamp(_) => 16,
ValueRef::Time(_) => 16,
ValueRef::Duration(_) => 16,
ValueRef::Interval(_) => 24,
ValueRef::IntervalYearMonth(_) => 4,
ValueRef::IntervalDayTime(_) => 8,
ValueRef::IntervalMonthDayNano(_) => 16,
ValueRef::Decimal128(_) => 32,
ValueRef::List(v) => match v {
ListValueRef::Indexed { vector, .. } => vector.memory_size() / vector.len(),
@@ -1428,7 +1495,9 @@ pub fn column_data_to_json(data: ValueData) -> JsonValue {
mod tests {
use arrow::datatypes::DataType as ArrowDataType;
use common_time::timezone::set_default_timezone;
use greptime_proto::v1::{Decimal128 as ProtoDecimal128, IntervalMonthDayNano};
use greptime_proto::v1::{
Decimal128 as ProtoDecimal128, IntervalMonthDayNano as ProtoIntervalMonthDayNano,
};
use num_traits::Float;
use super::*;
@@ -1525,11 +1594,13 @@ mod tests {
JsonValue::String("interval year [12]".to_string())
);
assert_eq!(
column_data_to_json(ValueData::IntervalMonthDayNanoValue(IntervalMonthDayNano {
months: 1,
days: 2,
nanoseconds: 3,
})),
column_data_to_json(ValueData::IntervalMonthDayNanoValue(
ProtoIntervalMonthDayNano {
months: 1,
days: 2,
nanoseconds: 3,
}
)),
JsonValue::String("interval month [1][2][3]".to_string())
);
assert_eq!(
@@ -1740,12 +1811,10 @@ mod tests {
ScalarValue::IntervalMonthDayNano(None).try_into().unwrap()
);
assert_eq!(
Value::Interval(Interval::from_month_day_nano(1, 1, 1)),
ScalarValue::IntervalMonthDayNano(Some(
Interval::from_month_day_nano(1, 1, 1).to_i128()
))
.try_into()
.unwrap()
Value::IntervalMonthDayNano(IntervalMonthDayNano::new(1, 1, 1)),
ScalarValue::IntervalMonthDayNano(Some(IntervalMonthDayNano::new(1, 1, 1).to_i128()))
.try_into()
.unwrap()
);
assert_eq!(
@@ -1975,9 +2044,17 @@ mod tests {
&ConcreteDataType::time_nanosecond_datatype(),
&Value::Time(Time::new_nanosecond(1)),
);
check_type_and_value(
&ConcreteDataType::interval_year_month_datatype(),
&Value::IntervalYearMonth(IntervalYearMonth::new(1)),
);
check_type_and_value(
&ConcreteDataType::interval_day_time_datatype(),
&Value::IntervalDayTime(IntervalDayTime::new(1, 2)),
);
check_type_and_value(
&ConcreteDataType::interval_month_day_nano_datatype(),
&Value::Interval(Interval::from_month_day_nano(1, 2, 3)),
&Value::IntervalMonthDayNano(IntervalMonthDayNano::new(1, 2, 3)),
);
check_type_and_value(
&ConcreteDataType::duration_second_datatype(),
@@ -2160,7 +2237,9 @@ mod tests {
check_as_value_ref!(Float64, OrderedF64::from(16.0));
check_as_value_ref!(Timestamp, Timestamp::new_millisecond(1));
check_as_value_ref!(Time, Time::new_millisecond(1));
check_as_value_ref!(Interval, Interval::from_month_day_nano(1, 2, 3));
check_as_value_ref!(IntervalYearMonth, IntervalYearMonth::new(1));
check_as_value_ref!(IntervalDayTime, IntervalDayTime::new(1, 2));
check_as_value_ref!(IntervalMonthDayNano, IntervalMonthDayNano::new(1, 2, 3));
check_as_value_ref!(Duration, Duration::new_millisecond(1));
assert_eq!(
@@ -2672,9 +2751,11 @@ mod tests {
check_value_ref_size_eq(&ValueRef::DateTime(DateTime::new(1)), 8);
check_value_ref_size_eq(&ValueRef::Timestamp(Timestamp::new_millisecond(1)), 16);
check_value_ref_size_eq(&ValueRef::Time(Time::new_millisecond(1)), 16);
check_value_ref_size_eq(&ValueRef::IntervalYearMonth(IntervalYearMonth::new(1)), 4);
check_value_ref_size_eq(&ValueRef::IntervalDayTime(IntervalDayTime::new(1, 2)), 8);
check_value_ref_size_eq(
&ValueRef::Interval(Interval::from_month_day_nano(1, 2, 3)),
24,
&ValueRef::IntervalMonthDayNano(IntervalMonthDayNano::new(1, 2, 3)),
16,
);
check_value_ref_size_eq(&ValueRef::Duration(Duration::new_millisecond(1)), 16);
check_value_ref_size_eq(

View File

@@ -421,7 +421,7 @@ mod tests {
use common_decimal::Decimal128;
use common_time::time::Time;
use common_time::timestamp::TimeUnit;
use common_time::{Date, DateTime, Duration, Interval};
use common_time::{Date, DateTime, Duration, IntervalMonthDayNano};
use super::*;
use crate::value::Value;
@@ -689,7 +689,10 @@ mod tests {
);
assert_eq!(3, vector.len());
for i in 0..vector.len() {
assert_eq!(Value::Interval(Interval::from_i128(2000)), vector.get(i));
assert_eq!(
Value::IntervalMonthDayNano(IntervalMonthDayNano::from_i128(2000)),
vector.get(i)
);
}
}

View File

@@ -560,7 +560,7 @@ fn reduce_batch_subgraph(
.get_mut(i)
.context(InternalSnafu{
reason: format!(
"Output builder should have the same length as the row, expected at most {} but got {}",
"Output builder should have the same length as the row, expected at most {} but got {}",
column_cnt - 1,
i
)
@@ -1162,7 +1162,9 @@ fn from_val_to_slice_idx(
#[cfg(test)]
mod test {
use common_time::{DateTime, Interval, Timestamp};
use std::time::Duration;
use common_time::Timestamp;
use datatypes::data_type::{ConcreteDataType, ConcreteDataType as CDT};
use hydroflow::scheduled::graph::Hydroflow;
@@ -1214,8 +1216,8 @@ mod test {
let expected = TypedPlan {
schema: RelationType::new(vec![
ColumnType::new(CDT::uint64_datatype(), true), // sum(number)
ColumnType::new(CDT::datetime_datatype(), false), // window start
ColumnType::new(CDT::datetime_datatype(), false), // window end
ColumnType::new(CDT::timestamp_millisecond_datatype(), false), // window start
ColumnType::new(CDT::timestamp_millisecond_datatype(), false), // window end
])
.into_unnamed(),
// TODO(discord9): mfp indirectly ref to key columns
@@ -1232,7 +1234,10 @@ mod test {
.with_types(
RelationType::new(vec![
ColumnType::new(ConcreteDataType::uint32_datatype(), false),
ColumnType::new(ConcreteDataType::datetime_datatype(), false),
ColumnType::new(
ConcreteDataType::timestamp_millisecond_datatype(),
false,
),
])
.into_unnamed(),
),
@@ -1242,22 +1247,18 @@ mod test {
.map(vec![
ScalarExpr::Column(1).call_unary(
UnaryFunc::TumbleWindowFloor {
window_size: Interval::from_month_day_nano(
0,
0,
1_000_000_000,
),
start_time: Some(DateTime::new(1625097600000)),
window_size: Duration::from_nanos(1_000_000_000),
start_time: Some(Timestamp::new_millisecond(
1625097600000,
)),
},
),
ScalarExpr::Column(1).call_unary(
UnaryFunc::TumbleWindowCeiling {
window_size: Interval::from_month_day_nano(
0,
0,
1_000_000_000,
),
start_time: Some(DateTime::new(1625097600000)),
window_size: Duration::from_nanos(1_000_000_000),
start_time: Some(Timestamp::new_millisecond(
1625097600000,
)),
},
),
])
@@ -1278,9 +1279,9 @@ mod test {
}
.with_types(
RelationType::new(vec![
ColumnType::new(CDT::datetime_datatype(), false), // window start
ColumnType::new(CDT::datetime_datatype(), false), // window end
ColumnType::new(CDT::uint64_datatype(), true), //sum(number)
ColumnType::new(CDT::timestamp_millisecond_datatype(), false), // window start
ColumnType::new(CDT::timestamp_millisecond_datatype(), false), // window end
ColumnType::new(CDT::uint64_datatype(), true), //sum(number)
])
.with_key(vec![1])
.with_time_index(Some(0))

View File

@@ -171,9 +171,13 @@ impl DfScalarFunction {
}
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct RawDfScalarFn {
/// The raw bytes encoded datafusion scalar function
/// The raw bytes encoded datafusion scalar function,
/// due to substrait have too many layers of nested struct and `ScalarFunction` 's derive is different
/// for simplicity's sake
/// so we store bytes instead of `ScalarFunction` here
/// but in unit test we will still compare decoded struct(using `f_decoded` field in Debug impl)
pub(crate) f: bytes::BytesMut,
/// The input schema of the function
pub(crate) input_schema: RelationDesc,
@@ -181,6 +185,17 @@ pub struct RawDfScalarFn {
pub(crate) extensions: FunctionExtensions,
}
impl std::fmt::Debug for RawDfScalarFn {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("RawDfScalarFn")
.field("f", &self.f)
.field("f_decoded", &ScalarFunction::decode(&mut self.f.as_ref()))
.field("df_schema", &self.input_schema)
.field("extensions", &self.extensions)
.finish()
}
}
impl RawDfScalarFn {
pub fn from_proto(
f: &substrait::substrait_proto_df::proto::expression::ScalarFunction,

View File

@@ -16,19 +16,18 @@
use std::collections::HashMap;
use std::sync::{Arc, OnceLock};
use std::time::Duration;
use arrow::array::{ArrayRef, BooleanArray};
use common_error::ext::BoxedError;
use common_time::timestamp::TimeUnit;
use common_time::{DateTime, Timestamp};
use common_time::Timestamp;
use datafusion_expr::Operator;
use datatypes::data_type::ConcreteDataType;
use datatypes::prelude::DataType;
use datatypes::types::cast;
use datatypes::value::Value;
use datatypes::vectors::{
BooleanVector, DateTimeVector, Helper, TimestampMillisecondVector, VectorRef,
};
use datatypes::vectors::{BooleanVector, Helper, TimestampMillisecondVector, VectorRef};
use serde::{Deserialize, Serialize};
use smallvec::smallvec;
use snafu::{ensure, OptionExt, ResultExt};
@@ -52,8 +51,8 @@ pub enum UnmaterializableFunc {
CurrentSchema,
TumbleWindow {
ts: Box<TypedExpr>,
window_size: common_time::Interval,
start_time: Option<DateTime>,
window_size: Duration,
start_time: Option<Timestamp>,
},
}
@@ -63,7 +62,8 @@ impl UnmaterializableFunc {
match self {
Self::Now => Signature {
input: smallvec![],
output: ConcreteDataType::datetime_datatype(),
// TODO(yingwen): Maybe return timestamp.
output: ConcreteDataType::timestamp_millisecond_datatype(),
generic_fn: GenericFn::Now,
},
Self::CurrentSchema => Signature {
@@ -110,12 +110,12 @@ pub enum UnaryFunc {
StepTimestamp,
Cast(ConcreteDataType),
TumbleWindowFloor {
window_size: common_time::Interval,
start_time: Option<DateTime>,
window_size: Duration,
start_time: Option<Timestamp>,
},
TumbleWindowCeiling {
window_size: common_time::Interval,
start_time: Option<DateTime>,
window_size: Duration,
start_time: Option<Timestamp>,
},
}
@@ -139,8 +139,8 @@ impl UnaryFunc {
},
},
Self::StepTimestamp => Signature {
input: smallvec![ConcreteDataType::datetime_datatype()],
output: ConcreteDataType::datetime_datatype(),
input: smallvec![ConcreteDataType::timestamp_millisecond_datatype()],
output: ConcreteDataType::timestamp_millisecond_datatype(),
generic_fn: GenericFn::StepTimestamp,
},
Self::Cast(to) => Signature {
@@ -238,19 +238,19 @@ impl UnaryFunc {
}
}
Self::StepTimestamp => {
let datetime_array = get_datetime_array(&arg_col)?;
let date_array_ref = datetime_array
let timestamp_array = get_timestamp_array(&arg_col)?;
let timestamp_array_ref = timestamp_array
.as_any()
.downcast_ref::<arrow::array::Date64Array>()
.downcast_ref::<arrow::array::TimestampMillisecondArray>()
.context({
TypeMismatchSnafu {
expected: ConcreteDataType::boolean_datatype(),
actual: ConcreteDataType::from_arrow_type(datetime_array.data_type()),
actual: ConcreteDataType::from_arrow_type(timestamp_array.data_type()),
}
})?;
let ret = arrow::compute::unary(date_array_ref, |arr| arr + 1);
let ret = DateTimeVector::from(ret);
let ret = arrow::compute::unary(timestamp_array_ref, |arr| arr + 1);
let ret = TimestampMillisecondVector::from(ret);
Ok(Arc::new(ret))
}
Self::Cast(to) => {
@@ -266,19 +266,19 @@ impl UnaryFunc {
window_size,
start_time,
} => {
let datetime_array = get_datetime_array(&arg_col)?;
let date_array_ref = datetime_array
let timestamp_array = get_timestamp_array(&arg_col)?;
let date_array_ref = timestamp_array
.as_any()
.downcast_ref::<arrow::array::Date64Array>()
.downcast_ref::<arrow::array::TimestampMillisecondArray>()
.context({
TypeMismatchSnafu {
expected: ConcreteDataType::boolean_datatype(),
actual: ConcreteDataType::from_arrow_type(datetime_array.data_type()),
actual: ConcreteDataType::from_arrow_type(timestamp_array.data_type()),
}
})?;
let start_time = start_time.map(|t| t.val());
let window_size = (window_size.to_nanosecond() / 1_000_000) as repr::Duration; // nanosecond to millisecond
let start_time = start_time.map(|t| t.value());
let window_size = window_size.as_millis() as repr::Duration;
let ret = arrow::compute::unary(date_array_ref, |ts| {
get_window_start(ts, window_size, start_time)
@@ -291,19 +291,19 @@ impl UnaryFunc {
window_size,
start_time,
} => {
let datetime_array = get_datetime_array(&arg_col)?;
let date_array_ref = datetime_array
let timestamp_array = get_timestamp_array(&arg_col)?;
let date_array_ref = timestamp_array
.as_any()
.downcast_ref::<arrow::array::Date64Array>()
.downcast_ref::<arrow::array::TimestampMillisecondArray>()
.context({
TypeMismatchSnafu {
expected: ConcreteDataType::boolean_datatype(),
actual: ConcreteDataType::from_arrow_type(datetime_array.data_type()),
actual: ConcreteDataType::from_arrow_type(timestamp_array.data_type()),
}
})?;
let start_time = start_time.map(|t| t.val());
let window_size = (window_size.to_nanosecond() / 1_000_000) as repr::Duration; // nanosecond to millisecond
let start_time = start_time.map(|t| t.value());
let window_size = window_size.as_millis() as repr::Duration;
let ret = arrow::compute::unary(date_array_ref, |ts| {
get_window_start(ts, window_size, start_time) + window_size
@@ -330,19 +330,20 @@ impl UnaryFunc {
})?;
if let Some(window_size) = window_size_untyped.as_string() {
// cast as interval
cast(
let interval = cast(
Value::from(window_size),
&ConcreteDataType::interval_month_day_nano_datatype(),
&ConcreteDataType::interval_day_time_datatype(),
)
.map_err(BoxedError::new)
.context(ExternalSnafu)?
.as_interval()
.as_interval_day_time()
.context(UnexpectedSnafu {
reason: "Expect window size arg to be interval after successful cast"
.to_string(),
})?
} else if let Some(interval) = window_size_untyped.as_interval() {
interval
})?;
Duration::from_millis(interval.as_millis() as u64)
} else if let Some(interval) = window_size_untyped.as_interval_day_time() {
Duration::from_millis(interval.as_millis() as u64)
} else {
InvalidQuerySnafu {
reason: format!(
@@ -357,16 +358,19 @@ impl UnaryFunc {
let start_time = match args.get(2) {
Some(start_time) => {
if let Some(value) = start_time.expr.as_literal() {
// cast as DateTime
let ret = cast(value, &ConcreteDataType::datetime_datatype())
.map_err(BoxedError::new)
.context(ExternalSnafu)?
.as_datetime()
.context(UnexpectedSnafu {
reason:
"Expect start time arg to be datetime after successful cast"
.to_string(),
})?;
// cast as timestamp
let ret = cast(
value,
&ConcreteDataType::timestamp_millisecond_datatype(),
)
.map_err(BoxedError::new)
.context(ExternalSnafu)?
.as_timestamp()
.context(UnexpectedSnafu {
reason:
"Expect start time arg to be timestamp after successful cast"
.to_string(),
})?;
Some(ret)
} else {
UnexpectedSnafu {
@@ -446,15 +450,15 @@ impl UnaryFunc {
}
Self::StepTimestamp => {
let ty = arg.data_type();
if let Value::DateTime(datetime) = arg {
let datetime = DateTime::from(datetime.val() + 1);
Ok(Value::from(datetime))
if let Value::Timestamp(timestamp) = arg {
let timestamp = Timestamp::new_millisecond(timestamp.value() + 1);
Ok(Value::from(timestamp))
} else if let Ok(v) = value_to_internal_ts(arg) {
let datetime = DateTime::from(v + 1);
Ok(Value::from(datetime))
let timestamp = Timestamp::new_millisecond(v + 1);
Ok(Value::from(timestamp))
} else {
TypeMismatchSnafu {
expected: ConcreteDataType::datetime_datatype(),
expected: ConcreteDataType::timestamp_millisecond_datatype(),
actual: ty,
}
.fail()?
@@ -474,8 +478,8 @@ impl UnaryFunc {
start_time,
} => {
let ts = get_ts_as_millisecond(arg)?;
let start_time = start_time.map(|t| t.val());
let window_size = (window_size.to_nanosecond() / 1_000_000) as repr::Duration; // nanosecond to millisecond
let start_time = start_time.map(|t| t.value());
let window_size = window_size.as_millis() as repr::Duration;
let window_start = get_window_start(ts, window_size, start_time);
let ret = Timestamp::new_millisecond(window_start);
@@ -486,8 +490,8 @@ impl UnaryFunc {
start_time,
} => {
let ts = get_ts_as_millisecond(arg)?;
let start_time = start_time.map(|t| t.val());
let window_size = (window_size.to_nanosecond() / 1_000_000) as repr::Duration; // nanosecond to millisecond
let start_time = start_time.map(|t| t.value());
let window_size = window_size.as_millis() as repr::Duration;
let window_start = get_window_start(ts, window_size, start_time);
let window_end = window_start + window_size;
@@ -498,21 +502,22 @@ impl UnaryFunc {
}
}
fn get_datetime_array(vector: &VectorRef) -> Result<arrow::array::ArrayRef, EvalError> {
fn get_timestamp_array(vector: &VectorRef) -> Result<arrow::array::ArrayRef, EvalError> {
let arrow_array = vector.to_arrow_array();
let datetime_array =
if *arrow_array.data_type() == ConcreteDataType::datetime_datatype().as_arrow_type() {
arrow_array
} else {
arrow::compute::cast(
&arrow_array,
&ConcreteDataType::datetime_datatype().as_arrow_type(),
)
.context(ArrowSnafu {
context: "Trying to cast to datetime in StepTimestamp",
})?
};
Ok(datetime_array)
let timestamp_array = if *arrow_array.data_type()
== ConcreteDataType::timestamp_millisecond_datatype().as_arrow_type()
{
arrow_array
} else {
arrow::compute::cast(
&arrow_array,
&ConcreteDataType::timestamp_millisecond_datatype().as_arrow_type(),
)
.context(ArrowSnafu {
context: "Trying to cast to timestamp in StepTimestamp",
})?
};
Ok(timestamp_array)
}
fn get_window_start(
@@ -1284,7 +1289,6 @@ where
mod test {
use std::sync::Arc;
use common_time::Interval;
use datatypes::vectors::Vector;
use pretty_assertions::assert_eq;
@@ -1292,18 +1296,18 @@ mod test {
#[test]
fn test_tumble_batch() {
let datetime_vector = DateTimeVector::from_vec(vec![1, 2, 10, 13, 14, 20, 25]);
let timestamp_vector = TimestampMillisecondVector::from_vec(vec![1, 2, 10, 13, 14, 20, 25]);
let tumble_start = UnaryFunc::TumbleWindowFloor {
window_size: Interval::from_day_time(0, 10),
window_size: Duration::from_millis(10),
start_time: None,
};
let tumble_end = UnaryFunc::TumbleWindowCeiling {
window_size: Interval::from_day_time(0, 10),
window_size: Duration::from_millis(10),
start_time: None,
};
let len = datetime_vector.len();
let batch = Batch::try_new(vec![Arc::new(datetime_vector)], len).unwrap();
let len = timestamp_vector.len();
let batch = Batch::try_new(vec![Arc::new(timestamp_vector)], len).unwrap();
let arg = ScalarExpr::Column(0);
let start = tumble_start.eval_batch(&batch, &arg).unwrap();
@@ -1459,4 +1463,17 @@ mod test {
Err(Error::InvalidQuery { .. })
);
}
#[test]
fn test_cast_int() {
let interval = cast(
Value::from("1 second"),
&ConcreteDataType::interval_day_time_datatype(),
)
.unwrap();
assert_eq!(
interval,
Value::from(common_time::IntervalDayTime::new(0, 1000))
);
}
}

View File

@@ -61,7 +61,7 @@ pub const BATCH_SIZE: usize = 32 * 16384;
/// Convert a value that is or can be converted to Datetime to internal timestamp
///
/// support types are: `Date`, `DateTime`, `TimeStamp`, `i64`
pub fn value_to_internal_ts(value: Value) -> Result<Timestamp, EvalError> {
pub fn value_to_internal_ts(value: Value) -> Result<i64, EvalError> {
let is_supported_time_type = |arg: &Value| {
let ty = arg.data_type();
matches!(
@@ -76,14 +76,14 @@ pub fn value_to_internal_ts(value: Value) -> Result<Timestamp, EvalError> {
Value::Int64(ts) => Ok(ts),
arg if is_supported_time_type(&arg) => {
let arg_ty = arg.data_type();
let res = cast(arg, &ConcreteDataType::datetime_datatype()).context({
let res = cast(arg, &ConcreteDataType::timestamp_millisecond_datatype()).context({
CastValueSnafu {
from: arg_ty,
to: ConcreteDataType::datetime_datatype(),
to: ConcreteDataType::timestamp_millisecond_datatype(),
}
})?;
if let Value::DateTime(ts) = res {
Ok(ts.val())
if let Value::Timestamp(ts) = res {
Ok(ts.value())
} else {
unreachable!()
}

View File

@@ -156,10 +156,10 @@ mod test {
use catalog::RegisterTableRequest;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, NUMBERS_TABLE_ID};
use common_time::DateTime;
use datatypes::prelude::*;
use datatypes::schema::Schema;
use datatypes::vectors::VectorRef;
use datatypes::timestamp::TimestampMillisecond;
use datatypes::vectors::{TimestampMillisecondVectorBuilder, VectorRef};
use itertools::Itertools;
use prost::Message;
use query::parser::QueryLanguageParser;
@@ -202,7 +202,7 @@ mod test {
];
let schema = RelationType::new(vec![
ColumnType::new(CDT::uint32_datatype(), false),
ColumnType::new(CDT::datetime_datatype(), false),
ColumnType::new(CDT::timestamp_millisecond_datatype(), false),
]);
schemas.insert(
gid,
@@ -232,7 +232,11 @@ mod test {
let schema = vec![
datatypes::schema::ColumnSchema::new("number", CDT::uint32_datatype(), false),
datatypes::schema::ColumnSchema::new("ts", CDT::datetime_datatype(), false),
datatypes::schema::ColumnSchema::new(
"ts",
CDT::timestamp_millisecond_datatype(),
false,
),
];
let mut columns = vec![];
let numbers = (1..=10).collect_vec();
@@ -240,7 +244,11 @@ mod test {
columns.push(column);
let ts = (1..=10).collect_vec();
let column: VectorRef = Arc::new(<DateTime as Scalar>::VectorType::from_vec(ts));
let mut builder = TimestampMillisecondVectorBuilder::with_capacity(10);
ts.into_iter()
.map(|v| builder.push(Some(TimestampMillisecond::new(v))))
.count();
let column: VectorRef = builder.to_vector_cloned();
columns.push(column);
let schema = Arc::new(Schema::new(schema));

View File

@@ -345,9 +345,10 @@ impl TypedPlan {
#[cfg(test)]
mod test {
use std::collections::BTreeMap;
use std::time::Duration;
use bytes::BytesMut;
use common_time::{DateTime, Interval};
use common_time::{IntervalMonthDayNano, Timestamp};
use datatypes::prelude::ConcreteDataType;
use datatypes::value::Value;
use pretty_assertions::assert_eq;
@@ -398,7 +399,10 @@ mod test {
.with_types(
RelationType::new(vec![
ColumnType::new(ConcreteDataType::uint32_datatype(), false),
ColumnType::new(ConcreteDataType::datetime_datatype(), false),
ColumnType::new(
ConcreteDataType::timestamp_millisecond_datatype(),
false,
),
])
.into_named(vec![
Some("number".to_string()),
@@ -413,22 +417,18 @@ mod test {
.map(vec![
ScalarExpr::Column(1).call_unary(
UnaryFunc::TumbleWindowFloor {
window_size: Interval::from_month_day_nano(
0,
0,
1_000_000_000,
),
start_time: Some(DateTime::new(1625097600000)),
window_size: Duration::from_nanos(1_000_000_000),
start_time: Some(Timestamp::new_millisecond(
1625097600000,
)),
},
),
ScalarExpr::Column(1).call_unary(
UnaryFunc::TumbleWindowCeiling {
window_size: Interval::from_month_day_nano(
0,
0,
1_000_000_000,
),
start_time: Some(DateTime::new(1625097600000)),
window_size: Duration::from_nanos(1_000_000_000),
start_time: Some(Timestamp::new_millisecond(
1625097600000,
)),
},
),
])
@@ -539,7 +539,10 @@ mod test {
.with_types(
RelationType::new(vec![
ColumnType::new(ConcreteDataType::uint32_datatype(), false),
ColumnType::new(ConcreteDataType::datetime_datatype(), false),
ColumnType::new(
ConcreteDataType::timestamp_millisecond_datatype(),
false,
),
])
.into_named(vec![
Some("number".to_string()),
@@ -554,22 +557,18 @@ mod test {
.map(vec![
ScalarExpr::Column(1).call_unary(
UnaryFunc::TumbleWindowFloor {
window_size: Interval::from_month_day_nano(
0,
0,
1_000_000_000,
),
start_time: Some(DateTime::new(1625097600000)),
window_size: Duration::from_nanos(1_000_000_000),
start_time: Some(Timestamp::new_millisecond(
1625097600000,
)),
},
),
ScalarExpr::Column(1).call_unary(
UnaryFunc::TumbleWindowCeiling {
window_size: Interval::from_month_day_nano(
0,
0,
1_000_000_000,
),
start_time: Some(DateTime::new(1625097600000)),
window_size: Duration::from_nanos(1_000_000_000),
start_time: Some(Timestamp::new_millisecond(
1625097600000,
)),
},
),
])
@@ -686,7 +685,10 @@ mod test {
.with_types(
RelationType::new(vec![
ColumnType::new(ConcreteDataType::uint32_datatype(), false),
ColumnType::new(ConcreteDataType::datetime_datatype(), false),
ColumnType::new(
ConcreteDataType::timestamp_millisecond_datatype(),
false,
),
])
.into_named(vec![
Some("number".to_string()),
@@ -701,21 +703,13 @@ mod test {
.map(vec![
ScalarExpr::Column(1).call_unary(
UnaryFunc::TumbleWindowFloor {
window_size: Interval::from_month_day_nano(
0,
0,
3_600_000_000_000,
),
window_size: Duration::from_nanos(3_600_000_000_000),
start_time: None,
},
),
ScalarExpr::Column(1).call_unary(
UnaryFunc::TumbleWindowCeiling {
window_size: Interval::from_month_day_nano(
0,
0,
3_600_000_000_000,
),
window_size: Duration::from_nanos(3_600_000_000_000),
start_time: None,
},
),
@@ -833,7 +827,10 @@ mod test {
.with_types(
RelationType::new(vec![
ColumnType::new(ConcreteDataType::uint32_datatype(), false),
ColumnType::new(ConcreteDataType::datetime_datatype(), false),
ColumnType::new(
ConcreteDataType::timestamp_millisecond_datatype(),
false,
),
])
.into_named(vec![
Some("number".to_string()),
@@ -848,21 +845,13 @@ mod test {
.map(vec![
ScalarExpr::Column(1).call_unary(
UnaryFunc::TumbleWindowFloor {
window_size: Interval::from_month_day_nano(
0,
0,
3_600_000_000_000,
),
window_size: Duration::from_nanos(3_600_000_000_000),
start_time: None,
},
),
ScalarExpr::Column(1).call_unary(
UnaryFunc::TumbleWindowCeiling {
window_size: Interval::from_month_day_nano(
0,
0,
3_600_000_000_000,
),
window_size: Duration::from_nanos(3_600_000_000_000),
start_time: None,
},
),
@@ -948,7 +937,10 @@ mod test {
.with_types(
RelationType::new(vec![
ColumnType::new(ConcreteDataType::uint32_datatype(), false),
ColumnType::new(ConcreteDataType::datetime_datatype(), false),
ColumnType::new(
ConcreteDataType::timestamp_millisecond_datatype(),
false,
),
])
.into_named(vec![
Some("number".to_string()),
@@ -963,22 +955,18 @@ mod test {
.map(vec![
ScalarExpr::Column(1).call_unary(
UnaryFunc::TumbleWindowFloor {
window_size: Interval::from_month_day_nano(
0,
0,
3_600_000_000_000,
),
start_time: Some(DateTime::new(1625097600000)),
window_size: Duration::from_nanos(3_600_000_000_000),
start_time: Some(Timestamp::new_millisecond(
1625097600000,
)),
},
),
ScalarExpr::Column(1).call_unary(
UnaryFunc::TumbleWindowCeiling {
window_size: Interval::from_month_day_nano(
0,
0,
3_600_000_000_000,
),
start_time: Some(DateTime::new(1625097600000)),
window_size: Duration::from_nanos(3_600_000_000_000),
start_time: Some(Timestamp::new_millisecond(
1625097600000,
)),
},
),
])
@@ -1512,7 +1500,7 @@ mod test {
.with_types(
RelationType::new(vec![
ColumnType::new(ConcreteDataType::uint32_datatype(), false),
ColumnType::new(ConcreteDataType::datetime_datatype(), false),
ColumnType::new(ConcreteDataType::timestamp_millisecond_datatype(), false),
])
.into_named(vec![
Some("number".to_string()),
@@ -1536,7 +1524,7 @@ mod test {
true,
),ColumnType::new(
ConcreteDataType::timestamp_millisecond_datatype(),
true,
false,
)])
.into_unnamed(),
extensions: FunctionExtensions {
@@ -1554,10 +1542,10 @@ mod test {
.unwrap(),
exprs: vec![
ScalarExpr::Literal(
Value::Interval(Interval::from_month_day_nano(0, 0, 30000000000)),
Value::IntervalMonthDayNano(IntervalMonthDayNano::new(0, 0, 30000000000)),
CDT::interval_month_day_nano_datatype()
),
ScalarExpr::Column(1).cast(CDT::timestamp_millisecond_datatype())
ScalarExpr::Column(1)
],
}])
.unwrap()

View File

@@ -178,14 +178,14 @@ pub(crate) fn from_substrait_literal(lit: &Literal) -> Result<(Value, CDT), Erro
let (days, seconds, microseconds) =
(interval.days, interval.seconds, interval.microseconds);
let millis = microseconds / 1000 + seconds * 1000;
let value_interval = common_time::Interval::from_day_time(days, millis);
let value_interval = common_time::IntervalDayTime::new(days, millis);
(
Value::Interval(value_interval),
Value::IntervalDayTime(value_interval),
CDT::interval_day_time_datatype(),
)
}
Some(LiteralType::IntervalYearToMonth(interval)) => (
Value::Interval(common_time::Interval::from_year_month(
Value::IntervalYearMonth(common_time::IntervalYearMonth::new(
interval.years * 12 + interval.months,
)),
CDT::interval_year_month_datatype(),
@@ -239,9 +239,9 @@ fn from_substrait_user_defined_type(user_defined: &UserDefined) -> Result<(Value
}
);
let i: i32 = from_bytes(&val.value)?;
let value_interval = common_time::Interval::from_year_month(i);
let value_interval = common_time::IntervalYearMonth::new(i);
(
Value::Interval(value_interval),
Value::IntervalYearMonth(value_interval),
CDT::interval_year_month_datatype(),
)
}
@@ -255,12 +255,12 @@ fn from_substrait_user_defined_type(user_defined: &UserDefined) -> Result<(Value
)
}
);
// TODO(yingwen): Datafusion may change the representation of the interval type.
let i: i128 = from_bytes(&val.value)?;
let (months, days, nsecs) = ((i >> 96) as i32, (i >> 64) as i32, i as i64);
let value_interval =
common_time::Interval::from_month_day_nano(months, days, nsecs);
let value_interval = common_time::IntervalMonthDayNano::new(months, days, nsecs);
(
Value::Interval(value_interval),
Value::IntervalMonthDayNano(value_interval),
CDT::interval_month_day_nano_datatype(),
)
}
@@ -274,11 +274,12 @@ fn from_substrait_user_defined_type(user_defined: &UserDefined) -> Result<(Value
)
}
);
// TODO(yingwen): Datafusion may change the representation of the interval type.
let i: i64 = from_bytes(&val.value)?;
let (days, millis) = ((i >> 32) as i32, i as i32);
let value_interval = common_time::Interval::from_day_time(days, millis);
let value_interval = common_time::IntervalDayTime::new(days, millis);
(
Value::Interval(value_interval),
Value::IntervalDayTime(value_interval),
CDT::interval_day_time_datatype(),
)
}

View File

@@ -17,8 +17,10 @@ use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
use client::Output;
use common_error::ext::BoxedError;
use common_telemetry::tracing;
use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest;
use opentelemetry_proto::tonic::collector::metrics::v1::ExportMetricsServiceRequest;
use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest;
use pipeline::PipelineWay;
use servers::error::{self, AuthSnafu, Result as ServerResult};
use servers::interceptor::{OpenTelemetryProtocolInterceptor, OpenTelemetryProtocolInterceptorRef};
use servers::otlp;
@@ -28,7 +30,7 @@ use session::context::QueryContextRef;
use snafu::ResultExt;
use crate::instance::Instance;
use crate::metrics::{OTLP_METRICS_ROWS, OTLP_TRACES_ROWS};
use crate::metrics::{OTLP_LOGS_ROWS, OTLP_METRICS_ROWS, OTLP_TRACES_ROWS};
#[async_trait]
impl OpenTelemetryProtocolHandler for Instance {
@@ -92,4 +94,31 @@ impl OpenTelemetryProtocolHandler for Instance {
.map_err(BoxedError::new)
.context(error::ExecuteGrpcQuerySnafu)
}
#[tracing::instrument(skip_all)]
async fn logs(
&self,
request: ExportLogsServiceRequest,
pipeline: PipelineWay,
table_name: String,
ctx: QueryContextRef,
) -> ServerResult<Output> {
self.plugins
.get::<PermissionCheckerRef>()
.as_ref()
.check_permission(ctx.current_user(), PermissionReq::Otlp)
.context(AuthSnafu)?;
let interceptor_ref = self
.plugins
.get::<OpenTelemetryProtocolInterceptorRef<servers::error::Error>>();
interceptor_ref.pre_execute(ctx.clone())?;
let (requests, rows) = otlp::logs::to_grpc_insert_requests(request, pipeline, table_name)?;
self.handle_log_inserts(requests, ctx)
.await
.inspect(|_| OTLP_LOGS_ROWS.inc_by(rows as u64))
.map_err(BoxedError::new)
.context(error::ExecuteGrpcQuerySnafu)
}
}

View File

@@ -41,16 +41,28 @@ lazy_static! {
.with_label_values(&["insert"]);
pub static ref EXECUTE_SCRIPT_ELAPSED: Histogram = HANDLE_SCRIPT_ELAPSED
.with_label_values(&["execute"]);
/// The number of OpenTelemetry metrics send by frontend node.
pub static ref OTLP_METRICS_ROWS: IntCounter = register_int_counter!(
"greptime_frontend_otlp_metrics_rows",
"frontend otlp metrics rows"
)
.unwrap();
/// The number of OpenTelemetry traces send by frontend node.
pub static ref OTLP_TRACES_ROWS: IntCounter = register_int_counter!(
"greptime_frontend_otlp_traces_rows",
"frontend otlp traces rows"
)
.unwrap();
/// The number of OpenTelemetry logs send by frontend node.
pub static ref OTLP_LOGS_ROWS: IntCounter = register_int_counter!(
"greptime_frontend_otlp_logs_rows",
"frontend otlp logs rows"
)
.unwrap();
/// The number of heartbeats send by frontend node.
pub static ref HEARTBEAT_SENT_COUNT: IntCounter = register_int_counter!(
"greptime_frontend_heartbeat_send_count",

View File

@@ -49,6 +49,8 @@ pub struct KafkaLogStore {
max_batch_bytes: usize,
/// The consumer wait timeout.
consumer_wait_timeout: Duration,
/// Ignore missing entries during read WAL.
overwrite_entry_start_id: bool,
}
impl KafkaLogStore {
@@ -64,6 +66,7 @@ impl KafkaLogStore {
client_manager,
max_batch_bytes: config.max_batch_bytes.as_bytes() as usize,
consumer_wait_timeout: config.consumer_wait_timeout,
overwrite_entry_start_id: config.overwrite_entry_start_id,
})
}
}
@@ -205,7 +208,7 @@ impl LogStore for KafkaLogStore {
async fn read(
&self,
provider: &Provider,
entry_id: EntryId,
mut entry_id: EntryId,
index: Option<WalIndex>,
) -> Result<SendableEntryStream<'static, Entry, Self::Error>> {
let provider = provider
@@ -225,6 +228,25 @@ impl LogStore for KafkaLogStore {
.client()
.clone();
if self.overwrite_entry_start_id {
let start_offset =
client
.get_offset(OffsetAt::Earliest)
.await
.context(GetOffsetSnafu {
topic: &provider.topic,
})?;
if entry_id as i64 <= start_offset {
warn!(
"The entry_id: {} is less than start_offset: {}, topic: {}. Overwriting entry_id with start_offset",
entry_id, start_offset, &provider.topic
);
entry_id = start_offset as u64;
}
}
// Gets the offset of the latest record in the topic. Actually, it's the latest record of the single partition in the topic.
// The read operation terminates when this record is consumed.
// Warning: the `get_offset` returns the end offset of the latest record. For our usage, it should be decremented.

View File

@@ -1,129 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use std::time::Duration;
use api::v1::meta::Role;
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
use common_meta::rpc::lock::{LockRequest, UnlockRequest};
use meta_client::client::MetaClientBuilder;
use meta_client::MetaClientRef;
use tracing::{info, subscriber};
use tracing_subscriber::FmtSubscriber;
fn main() {
subscriber::set_global_default(FmtSubscriber::builder().finish()).unwrap();
run();
}
#[tokio::main]
async fn run() {
let id = (1000u64, 2000u64);
let config = ChannelConfig::new()
.timeout(Duration::from_secs(30))
.connect_timeout(Duration::from_secs(5))
.tcp_nodelay(true);
let channel_manager = ChannelManager::with_config(config);
let mut meta_client = MetaClientBuilder::new(id.0, id.1, Role::Datanode)
.enable_lock()
.channel_manager(channel_manager)
.build();
meta_client.start(&["127.0.0.1:3002"]).await.unwrap();
let meta_client = Arc::new(meta_client);
run_normal(meta_client.clone()).await;
run_multi_thread(meta_client.clone()).await;
run_multi_thread_with_one_timeout(meta_client).await;
}
async fn run_normal(meta_client: MetaClientRef) {
let name = "lock_name".as_bytes().to_vec();
let expire_secs = 60;
let lock_req = LockRequest { name, expire_secs };
let lock_result = meta_client.lock(lock_req).await.unwrap();
let key = lock_result.key;
info!(
"lock success! Returned key: {}",
String::from_utf8(key.clone()).unwrap()
);
// It is recommended that time of holding lock is less than the timeout of the grpc channel
info!("do some work, take 3 seconds");
tokio::time::sleep(Duration::from_secs(3)).await;
let unlock_req = UnlockRequest { key };
meta_client.unlock(unlock_req).await.unwrap();
info!("unlock success!");
}
async fn run_multi_thread(meta_client: MetaClientRef) {
let meta_client_clone = meta_client.clone();
let join1 = tokio::spawn(async move {
run_normal(meta_client_clone.clone()).await;
});
tokio::time::sleep(Duration::from_secs(1)).await;
let join2 = tokio::spawn(async move {
run_normal(meta_client).await;
});
join1.await.unwrap();
join2.await.unwrap();
}
async fn run_multi_thread_with_one_timeout(meta_client: MetaClientRef) {
let meta_client_clone = meta_client.clone();
let join1 = tokio::spawn(async move {
run_with_timeout(meta_client_clone.clone()).await;
});
tokio::time::sleep(Duration::from_secs(1)).await;
let join2 = tokio::spawn(async move {
run_normal(meta_client).await;
});
join1.await.unwrap();
join2.await.unwrap();
}
async fn run_with_timeout(meta_client: MetaClientRef) {
let name = "lock_name".as_bytes().to_vec();
let expire_secs = 5;
let lock_req = LockRequest { name, expire_secs };
let lock_result = meta_client.lock(lock_req).await.unwrap();
let key = lock_result.key;
info!(
"lock success! Returned key: {}",
String::from_utf8(key.clone()).unwrap()
);
// It is recommended that time of holding lock is less than the timeout of the grpc channel
info!("do some work, take 20 seconds");
tokio::time::sleep(Duration::from_secs(20)).await;
let unlock_req = UnlockRequest { key };
meta_client.unlock(unlock_req).await.unwrap();
info!("unlock success!");
}

View File

@@ -15,7 +15,6 @@
mod ask_leader;
mod heartbeat;
mod load_balance;
mod lock;
mod procedure;
mod cluster;
@@ -33,7 +32,6 @@ use common_meta::datanode::{DatanodeStatKey, DatanodeStatValue, RegionStat};
use common_meta::ddl::{ExecutorContext, ProcedureExecutor};
use common_meta::error::{self as meta_error, Result as MetaResult};
use common_meta::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse};
use common_meta::rpc::lock::{LockRequest, LockResponse, UnlockRequest};
use common_meta::rpc::procedure::{
MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse,
};
@@ -45,7 +43,6 @@ use common_meta::rpc::store::{
use common_meta::ClusterId;
use common_telemetry::info;
use heartbeat::Client as HeartbeatClient;
use lock::Client as LockClient;
use procedure::Client as ProcedureClient;
use snafu::{OptionExt, ResultExt};
use store::Client as StoreClient;
@@ -67,7 +64,6 @@ pub struct MetaClientBuilder {
role: Role,
enable_heartbeat: bool,
enable_store: bool,
enable_lock: bool,
enable_procedure: bool,
enable_access_cluster_info: bool,
channel_manager: Option<ChannelManager>,
@@ -123,13 +119,6 @@ impl MetaClientBuilder {
}
}
pub fn enable_lock(self) -> Self {
Self {
enable_lock: true,
..self
}
}
pub fn enable_procedure(self) -> Self {
Self {
enable_procedure: true,
@@ -188,10 +177,6 @@ impl MetaClientBuilder {
client.store = Some(StoreClient::new(self.id, self.role, mgr.clone()));
}
if self.enable_lock {
client.lock = Some(LockClient::new(self.id, self.role, mgr.clone()));
}
if self.enable_procedure {
let mgr = self.ddl_channel_manager.unwrap_or(mgr.clone());
client.procedure = Some(ProcedureClient::new(
@@ -221,7 +206,6 @@ pub struct MetaClient {
channel_manager: ChannelManager,
heartbeat: Option<HeartbeatClient>,
store: Option<StoreClient>,
lock: Option<LockClient>,
procedure: Option<ProcedureClient>,
cluster: Option<ClusterClient>,
}
@@ -383,10 +367,6 @@ impl MetaClient {
client.start(urls.clone()).await?;
info!("Store client started");
}
if let Some(client) = &mut self.lock {
client.start(urls.clone()).await?;
info!("Lock client started");
}
if let Some(client) = &mut self.procedure {
client.start(urls.clone()).await?;
info!("DDL client started");
@@ -482,15 +462,6 @@ impl MetaClient {
.context(ConvertMetaResponseSnafu)
}
pub async fn lock(&self, req: LockRequest) -> Result<LockResponse> {
self.lock_client()?.lock(req.into()).await.map(Into::into)
}
pub async fn unlock(&self, req: UnlockRequest) -> Result<()> {
let _ = self.lock_client()?.unlock(req.into()).await?;
Ok(())
}
/// Query the procedure state by its id.
pub async fn query_procedure_state(&self, pid: &str) -> Result<ProcedureStateResponse> {
self.procedure_client()?.query_procedure_state(pid).await
@@ -538,12 +509,6 @@ impl MetaClient {
})
}
pub fn lock_client(&self) -> Result<LockClient> {
self.lock.clone().context(NotStartedSnafu {
name: "lock_client",
})
}
pub fn procedure_client(&self) -> Result<ProcedureClient> {
self.procedure.clone().context(NotStartedSnafu {
name: "procedure_client",

View File

@@ -1,178 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashSet;
use std::sync::Arc;
use api::v1::meta::lock_client::LockClient;
use api::v1::meta::{LockRequest, LockResponse, Role, UnlockRequest, UnlockResponse};
use common_grpc::channel_manager::ChannelManager;
use common_telemetry::tracing_context::TracingContext;
use snafu::{ensure, OptionExt, ResultExt};
use tokio::sync::RwLock;
use tonic::transport::Channel;
use crate::client::{load_balance, Id};
use crate::error;
use crate::error::Result;
#[derive(Clone, Debug)]
pub struct Client {
inner: Arc<RwLock<Inner>>,
}
impl Client {
pub fn new(id: Id, role: Role, channel_manager: ChannelManager) -> Self {
let inner = Arc::new(RwLock::new(Inner {
id,
role,
channel_manager,
peers: vec![],
}));
Self { inner }
}
pub async fn start<U, A>(&mut self, urls: A) -> Result<()>
where
U: AsRef<str>,
A: AsRef<[U]>,
{
let mut inner = self.inner.write().await;
inner.start(urls).await
}
pub async fn lock(&self, req: LockRequest) -> Result<LockResponse> {
let inner = self.inner.read().await;
inner.lock(req).await
}
pub async fn unlock(&self, req: UnlockRequest) -> Result<UnlockResponse> {
let inner = self.inner.read().await;
inner.unlock(req).await
}
}
#[derive(Debug)]
struct Inner {
id: Id,
role: Role,
channel_manager: ChannelManager,
peers: Vec<String>,
}
impl Inner {
async fn start<U, A>(&mut self, urls: A) -> Result<()>
where
U: AsRef<str>,
A: AsRef<[U]>,
{
ensure!(
!self.is_started(),
error::IllegalGrpcClientStateSnafu {
err_msg: "Lock client already started",
}
);
self.peers = urls
.as_ref()
.iter()
.map(|url| url.as_ref().to_string())
.collect::<HashSet<_>>()
.drain()
.collect::<Vec<_>>();
Ok(())
}
fn random_client(&self) -> Result<LockClient<Channel>> {
let len = self.peers.len();
let peer = load_balance::random_get(len, |i| Some(&self.peers[i])).context(
error::IllegalGrpcClientStateSnafu {
err_msg: "Empty peers, lock client may not start yet",
},
)?;
self.make_client(peer)
}
fn make_client(&self, addr: impl AsRef<str>) -> Result<LockClient<Channel>> {
let channel = self
.channel_manager
.get(addr)
.context(error::CreateChannelSnafu)?;
Ok(LockClient::new(channel))
}
#[inline]
fn is_started(&self) -> bool {
!self.peers.is_empty()
}
async fn lock(&self, mut req: LockRequest) -> Result<LockResponse> {
let mut client = self.random_client()?;
req.set_header(
self.id,
self.role,
TracingContext::from_current_span().to_w3c(),
);
let res = client.lock(req).await.map_err(error::Error::from)?;
Ok(res.into_inner())
}
async fn unlock(&self, mut req: UnlockRequest) -> Result<UnlockResponse> {
let mut client = self.random_client()?;
req.set_header(
self.id,
self.role,
TracingContext::from_current_span().to_w3c(),
);
let res = client.unlock(req).await.map_err(error::Error::from)?;
Ok(res.into_inner())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_already_start() {
let mut client = Client::new((0, 0), Role::Datanode, ChannelManager::default());
client
.start(&["127.0.0.1:1000", "127.0.0.1:1001"])
.await
.unwrap();
let res = client.start(&["127.0.0.1:1002"]).await;
assert!(res.is_err());
assert!(matches!(
res.err(),
Some(error::Error::IllegalGrpcClientState { .. })
));
}
#[tokio::test]
async fn test_start_with_duplicate_peers() {
let mut client = Client::new((0, 0), Role::Datanode, ChannelManager::default());
client
.start(&["127.0.0.1:1000", "127.0.0.1:1000", "127.0.0.1:1000"])
.await
.unwrap();
assert_eq!(1, client.inner.write().await.peers.len());
}
}

View File

@@ -16,7 +16,6 @@ use std::sync::Arc;
use api::v1::meta::cluster_server::ClusterServer;
use api::v1::meta::heartbeat_server::HeartbeatServer;
use api::v1::meta::lock_server::LockServer;
use api::v1::meta::procedure_service_server::ProcedureServiceServer;
use api::v1::meta::store_server::StoreServer;
use common_base::Plugins;
@@ -48,8 +47,6 @@ use crate::election::etcd::EtcdElection;
#[cfg(feature = "pg_kvbackend")]
use crate::error::InvalidArgumentsSnafu;
use crate::error::{InitExportMetricsTaskSnafu, TomlFormatSnafu};
use crate::lock::etcd::EtcdLock;
use crate::lock::memory::MemLock;
use crate::metasrv::builder::MetasrvBuilder;
use crate::metasrv::{BackendImpl, Metasrv, MetasrvOptions, SelectorRef};
use crate::selector::lease_based::LeaseBasedSelector;
@@ -59,9 +56,8 @@ use crate::selector::SelectorType;
use crate::service::admin;
use crate::{error, Result};
#[derive(Clone)]
pub struct MetasrvInstance {
metasrv: Metasrv,
metasrv: Arc<Metasrv>,
httpsrv: Arc<HttpServer>,
@@ -86,8 +82,9 @@ impl MetasrvInstance {
.with_greptime_config_options(opts.to_toml().context(TomlFormatSnafu)?)
.build(),
);
let metasrv = Arc::new(metasrv);
// put metasrv into plugins for later use
plugins.insert::<Arc<Metasrv>>(Arc::new(metasrv.clone()));
plugins.insert::<Arc<Metasrv>>(metasrv.clone());
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
.context(InitExportMetricsTaskSnafu)?;
Ok(MetasrvInstance {
@@ -151,6 +148,10 @@ impl MetasrvInstance {
pub fn plugins(&self) -> Plugins {
self.plugins.clone()
}
pub fn get_inner(&self) -> &Metasrv {
&self.metasrv
}
}
pub async fn bootstrap_metasrv_with_router(
@@ -177,14 +178,13 @@ pub async fn bootstrap_metasrv_with_router(
Ok(())
}
pub fn router(metasrv: Metasrv) -> Router {
pub fn router(metasrv: Arc<Metasrv>) -> Router {
tonic::transport::Server::builder()
.accept_http1(true) // for admin services
.add_service(HeartbeatServer::new(metasrv.clone()))
.add_service(StoreServer::new(metasrv.clone()))
.add_service(ClusterServer::new(metasrv.clone()))
.add_service(LockServer::new(metasrv.clone()))
.add_service(ProcedureServiceServer::new(metasrv.clone()))
.add_service(HeartbeatServer::from_arc(metasrv.clone()))
.add_service(StoreServer::from_arc(metasrv.clone()))
.add_service(ClusterServer::from_arc(metasrv.clone()))
.add_service(ProcedureServiceServer::from_arc(metasrv.clone()))
.add_service(admin::make_admin_service(metasrv))
}
@@ -193,13 +193,9 @@ pub async fn metasrv_builder(
plugins: Plugins,
kv_backend: Option<KvBackendRef>,
) -> Result<MetasrvBuilder> {
let (kv_backend, election, lock) = match (kv_backend, &opts.backend) {
(Some(kv_backend), _) => (kv_backend, None, Some(Arc::new(MemLock::default()) as _)),
(None, BackendImpl::MemoryStore) => (
Arc::new(MemoryKvBackend::new()) as _,
None,
Some(Arc::new(MemLock::default()) as _),
),
let (kv_backend, election) = match (kv_backend, &opts.backend) {
(Some(kv_backend), _) => (kv_backend, None),
(None, BackendImpl::MemoryStore) => (Arc::new(MemoryKvBackend::new()) as _, None),
(None, BackendImpl::EtcdStore) => {
let etcd_client = create_etcd_client(opts).await?;
let kv_backend = {
@@ -224,18 +220,13 @@ pub async fn metasrv_builder(
)
.await?,
),
Some(EtcdLock::with_etcd_client(
etcd_client,
opts.store_key_prefix.clone(),
)?),
)
}
#[cfg(feature = "pg_kvbackend")]
(None, BackendImpl::PostgresStore) => {
let pg_client = create_postgres_client(opts).await?;
let kv_backend = PgStore::with_pg_client(pg_client).await.unwrap();
// TODO: implement locking and leader election for pg backend.
(kv_backend, None, None)
(kv_backend, None)
}
};
@@ -253,7 +244,6 @@ pub async fn metasrv_builder(
.in_memory(in_memory)
.selector(selector)
.election(election)
.lock(lock)
.plugins(plugins))
}

View File

@@ -448,30 +448,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to lock based on etcd"))]
Lock {
#[snafu(source)]
error: etcd_client::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to unlock based on etcd"))]
Unlock {
#[snafu(source)]
error: etcd_client::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to grant lease"))]
LeaseGrant {
#[snafu(source)]
error: etcd_client::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Invalid utf-8 value"))]
InvalidUtf8Value {
#[snafu(source)]
@@ -770,9 +746,6 @@ impl ErrorExt for Error {
| Error::ResponseHeaderNotFound { .. }
| Error::IsNotLeader { .. }
| Error::InvalidHttpBody { .. }
| Error::Lock { .. }
| Error::Unlock { .. }
| Error::LeaseGrant { .. }
| Error::ExceededRetryLimit { .. }
| Error::SendShutdownSignal { .. }
| Error::PusherNotFound { .. }

View File

@@ -14,7 +14,7 @@
use std::collections::{BTreeMap, HashSet};
use std::ops::Range;
use std::sync::Arc;
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
use api::v1::meta::mailbox_message::Payload;
@@ -209,15 +209,16 @@ impl Pushers {
}
}
struct NameCachedHandler {
#[derive(Clone)]
pub struct NameCachedHandler {
name: &'static str,
handler: Box<dyn HeartbeatHandler>,
handler: Arc<dyn HeartbeatHandler>,
}
impl NameCachedHandler {
fn new(handler: impl HeartbeatHandler + 'static) -> Self {
let name = handler.name();
let handler = Box::new(handler);
let handler = Arc::new(handler);
Self { name, handler }
}
}
@@ -225,7 +226,7 @@ impl NameCachedHandler {
pub type HeartbeatHandlerGroupRef = Arc<HeartbeatHandlerGroup>;
/// The group of heartbeat handlers.
#[derive(Default)]
#[derive(Default, Clone)]
pub struct HeartbeatHandlerGroup {
handlers: Vec<NameCachedHandler>,
pushers: Pushers,
@@ -540,20 +541,32 @@ impl HeartbeatHandlerGroupBuilder {
}
Ok(HeartbeatHandlerGroup {
handlers: self.handlers.into_iter().collect(),
handlers: self.handlers,
pushers: self.pushers,
})
}
fn add_handler_after_inner(&mut self, target: &str, handler: NameCachedHandler) -> Result<()> {
if let Some(pos) = self.handlers.iter().position(|x| x.name == target) {
self.handlers.insert(pos + 1, handler);
return Ok(());
}
error::HandlerNotFoundSnafu { name: target }.fail()
}
/// Adds the handler after the specified handler.
pub fn add_handler_after(
&mut self,
target: &'static str,
handler: impl HeartbeatHandler + 'static,
) -> Result<()> {
self.add_handler_after_inner(target, NameCachedHandler::new(handler))
}
fn add_handler_before_inner(&mut self, target: &str, handler: NameCachedHandler) -> Result<()> {
if let Some(pos) = self.handlers.iter().position(|x| x.name == target) {
self.handlers
.insert(pos + 1, NameCachedHandler::new(handler));
self.handlers.insert(pos, handler);
return Ok(());
}
@@ -566,8 +579,12 @@ impl HeartbeatHandlerGroupBuilder {
target: &'static str,
handler: impl HeartbeatHandler + 'static,
) -> Result<()> {
self.add_handler_before_inner(target, NameCachedHandler::new(handler))
}
fn replace_handler_inner(&mut self, target: &str, handler: NameCachedHandler) -> Result<()> {
if let Some(pos) = self.handlers.iter().position(|x| x.name == target) {
self.handlers.insert(pos, NameCachedHandler::new(handler));
self.handlers[pos] = handler;
return Ok(());
}
@@ -580,25 +597,115 @@ impl HeartbeatHandlerGroupBuilder {
target: &'static str,
handler: impl HeartbeatHandler + 'static,
) -> Result<()> {
if let Some(pos) = self.handlers.iter().position(|x| x.name == target) {
self.handlers[pos] = NameCachedHandler::new(handler);
return Ok(());
}
self.replace_handler_inner(target, NameCachedHandler::new(handler))
}
error::HandlerNotFoundSnafu { name: target }.fail()
fn add_handler_last_inner(&mut self, handler: NameCachedHandler) {
self.handlers.push(handler);
}
fn add_handler_last(&mut self, handler: impl HeartbeatHandler + 'static) {
self.handlers.push(NameCachedHandler::new(handler));
self.add_handler_last_inner(NameCachedHandler::new(handler));
}
}
pub type HeartbeatHandlerGroupBuilderCustomizerRef =
Arc<dyn HeartbeatHandlerGroupBuilderCustomizer>;
pub enum CustomizeHeartbeatGroupAction {
AddHandlerAfter {
target: String,
handler: NameCachedHandler,
},
AddHandlerBefore {
target: String,
handler: NameCachedHandler,
},
ReplaceHandler {
target: String,
handler: NameCachedHandler,
},
AddHandlerLast {
handler: NameCachedHandler,
},
}
impl CustomizeHeartbeatGroupAction {
pub fn new_add_handler_after(
target: &'static str,
handler: impl HeartbeatHandler + 'static,
) -> Self {
Self::AddHandlerAfter {
target: target.to_string(),
handler: NameCachedHandler::new(handler),
}
}
pub fn new_add_handler_before(
target: &'static str,
handler: impl HeartbeatHandler + 'static,
) -> Self {
Self::AddHandlerBefore {
target: target.to_string(),
handler: NameCachedHandler::new(handler),
}
}
pub fn new_replace_handler(
target: &'static str,
handler: impl HeartbeatHandler + 'static,
) -> Self {
Self::ReplaceHandler {
target: target.to_string(),
handler: NameCachedHandler::new(handler),
}
}
pub fn new_add_handler_last(handler: impl HeartbeatHandler + 'static) -> Self {
Self::AddHandlerLast {
handler: NameCachedHandler::new(handler),
}
}
}
/// The customizer of the [`HeartbeatHandlerGroupBuilder`].
pub trait HeartbeatHandlerGroupBuilderCustomizer: Send + Sync {
fn customize(&self, builder: &mut HeartbeatHandlerGroupBuilder) -> Result<()>;
fn add_action(&self, action: CustomizeHeartbeatGroupAction);
}
#[derive(Default)]
pub struct DefaultHeartbeatHandlerGroupBuilderCustomizer {
actions: Mutex<Vec<CustomizeHeartbeatGroupAction>>,
}
impl HeartbeatHandlerGroupBuilderCustomizer for DefaultHeartbeatHandlerGroupBuilderCustomizer {
fn customize(&self, builder: &mut HeartbeatHandlerGroupBuilder) -> Result<()> {
info!("Customizing the heartbeat handler group builder");
let mut actions = self.actions.lock().unwrap();
for action in actions.drain(..) {
match action {
CustomizeHeartbeatGroupAction::AddHandlerAfter { target, handler } => {
builder.add_handler_after_inner(&target, handler)?;
}
CustomizeHeartbeatGroupAction::AddHandlerBefore { target, handler } => {
builder.add_handler_before_inner(&target, handler)?;
}
CustomizeHeartbeatGroupAction::ReplaceHandler { target, handler } => {
builder.replace_handler_inner(&target, handler)?;
}
CustomizeHeartbeatGroupAction::AddHandlerLast { handler } => {
builder.add_handler_last_inner(handler);
}
}
}
Ok(())
}
fn add_action(&self, action: CustomizeHeartbeatGroupAction) {
self.actions.lock().unwrap().push(action);
}
}
#[cfg(test)]

View File

@@ -20,6 +20,8 @@ use crate::error::Result;
use crate::handler::{HandleControl, HeartbeatAccumulator, HeartbeatHandler};
use crate::metasrv::Context;
pub const NAME: &str = "FilterInactiveRegionStatsHandler";
pub struct FilterInactiveRegionStatsHandler;
#[async_trait]

View File

@@ -28,7 +28,6 @@ pub mod flow_meta_alloc;
pub mod handler;
pub mod key;
pub mod lease;
pub mod lock;
pub mod metasrv;
mod metrics;
#[cfg(feature = "mock")]

View File

@@ -1,99 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod etcd;
pub(crate) mod memory;
use std::sync::Arc;
use common_telemetry::error;
use crate::error::Result;
pub type Key = Vec<u8>;
pub const DEFAULT_EXPIRE_TIME_SECS: u64 = 10;
pub struct Opts {
// If the expiration time is exceeded and currently holds the lock, the lock is
// automatically released.
pub expire_secs: Option<u64>,
}
impl Default for Opts {
fn default() -> Self {
Opts {
expire_secs: Some(DEFAULT_EXPIRE_TIME_SECS),
}
}
}
#[async_trait::async_trait]
pub trait DistLock: Send + Sync {
// Lock acquires a distributed shared lock on a given named lock. On success, it
// will return a unique key that exists so long as the lock is held by the caller.
async fn lock(&self, name: Vec<u8>, opts: Opts) -> Result<Key>;
// Unlock takes a key returned by Lock and releases the hold on lock.
async fn unlock(&self, key: Vec<u8>) -> Result<()>;
}
pub type DistLockRef = Arc<dyn DistLock>;
pub struct DistLockGuard<'a> {
lock: &'a DistLockRef,
name: Vec<u8>,
key: Option<Key>,
}
impl<'a> DistLockGuard<'a> {
pub fn new(lock: &'a DistLockRef, name: Vec<u8>) -> Self {
Self {
lock,
name,
key: None,
}
}
pub async fn lock(&mut self) -> Result<()> {
if self.key.is_some() {
return Ok(());
}
let key = self
.lock
.lock(
self.name.clone(),
Opts {
expire_secs: Some(2),
},
)
.await?;
self.key = Some(key);
Ok(())
}
}
impl Drop for DistLockGuard<'_> {
fn drop(&mut self) {
if let Some(key) = self.key.take() {
let lock = self.lock.clone();
let name = self.name.clone();
let _handle = common_runtime::spawn_global(async move {
if let Err(e) = lock.unlock(key).await {
error!(e; "Failed to unlock '{}'", String::from_utf8_lossy(&name));
}
});
}
}
}

View File

@@ -1,93 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use etcd_client::{Client, LockOptions};
use snafu::ResultExt;
use super::{DistLock, DistLockRef, Opts, DEFAULT_EXPIRE_TIME_SECS};
use crate::error;
use crate::error::Result;
/// A implementation of distributed lock based on etcd. The Clone of EtcdLock is cheap.
#[derive(Clone)]
pub struct EtcdLock {
client: Client,
store_key_prefix: String,
}
impl EtcdLock {
pub async fn with_endpoints<E, S>(endpoints: S, store_key_prefix: String) -> Result<DistLockRef>
where
E: AsRef<str>,
S: AsRef<[E]>,
{
let client = Client::connect(endpoints, None)
.await
.context(error::ConnectEtcdSnafu)?;
Self::with_etcd_client(client, store_key_prefix)
}
pub fn with_etcd_client(client: Client, store_key_prefix: String) -> Result<DistLockRef> {
Ok(Arc::new(EtcdLock {
client,
store_key_prefix,
}))
}
fn lock_key(&self, key: Vec<u8>) -> Vec<u8> {
if self.store_key_prefix.is_empty() {
key
} else {
let mut prefix = self.store_key_prefix.as_bytes().to_vec();
prefix.extend_from_slice(&key);
prefix
}
}
}
#[async_trait::async_trait]
impl DistLock for EtcdLock {
async fn lock(&self, key: Vec<u8>, opts: Opts) -> Result<Vec<u8>> {
let expire = opts.expire_secs.unwrap_or(DEFAULT_EXPIRE_TIME_SECS) as i64;
let mut client = self.client.clone();
let resp = client
.lease_grant(expire, None)
.await
.context(error::LeaseGrantSnafu)?;
let lease_id = resp.id();
let lock_opts = LockOptions::new().with_lease(lease_id);
let resp = client
.lock(self.lock_key(key), Some(lock_opts))
.await
.context(error::LockSnafu)?;
Ok(resp.key().to_vec())
}
async fn unlock(&self, key: Vec<u8>) -> Result<()> {
let mut client = self.client.clone();
let _ = client
.unlock(self.lock_key(key))
.await
.context(error::UnlockSnafu)?;
Ok(())
}
}

View File

@@ -1,112 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use async_trait::async_trait;
use dashmap::DashMap;
use tokio::sync::{Mutex, OwnedMutexGuard};
use crate::error::Result;
use crate::lock::{DistLock, Key, Opts};
#[derive(Default)]
pub(crate) struct MemLock {
mutexes: DashMap<Key, Arc<Mutex<()>>>,
guards: DashMap<Key, OwnedMutexGuard<()>>,
}
#[async_trait]
impl DistLock for MemLock {
async fn lock(&self, key: Vec<u8>, _opts: Opts) -> Result<Key> {
let mutex = self
.mutexes
.entry(key.clone())
.or_insert_with(|| Arc::new(Mutex::new(())))
.clone();
let guard = mutex.lock_owned().await;
let _ = self.guards.insert(key.clone(), guard);
Ok(key)
}
async fn unlock(&self, key: Vec<u8>) -> Result<()> {
// drop the guard, so that the mutex can be unlocked,
// effectively make the `mutex.lock_owned` in `lock` method to proceed
let _ = self.guards.remove(&key);
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use std::sync::atomic::{AtomicU32, Ordering};
use rand::seq::SliceRandom;
use super::*;
#[tokio::test(flavor = "multi_thread")]
async fn test_mem_lock_concurrently() {
let lock = Arc::new(MemLock::default());
let keys = (0..10)
.map(|i| format!("my-lock-{i}").into_bytes())
.collect::<Vec<Key>>();
let counters: [(Key, AtomicU32); 10] = keys
.iter()
.map(|x| (x.clone(), AtomicU32::new(0)))
.collect::<Vec<_>>()
.try_into()
.unwrap();
let counters = Arc::new(HashMap::from(counters));
let tasks = (0..100)
.map(|_| {
let mut keys = keys.clone();
keys.shuffle(&mut rand::thread_rng());
let lock_clone = lock.clone();
let counters_clone = counters.clone();
tokio::spawn(async move {
// every key counter will be added by 1 for 10 times
for i in 0..100 {
let key = &keys[i % keys.len()];
assert!(lock_clone
.lock(key.clone(), Opts { expire_secs: None })
.await
.is_ok());
// Intentionally create a critical section:
// if our MemLock is flawed, the resulting counter is wrong.
//
// Note that AtomicU32 is only used to enable the updates from multiple tasks,
// does not make any guarantee about the correctness of the result.
let counter = counters_clone.get(key).unwrap();
let v = counter.load(Ordering::Relaxed);
counter.store(v + 1, Ordering::Relaxed);
lock_clone.unlock(key.clone()).await.unwrap();
}
})
})
.collect::<Vec<_>>();
let _ = futures::future::join_all(tasks).await;
assert!(counters.values().all(|x| x.load(Ordering::Relaxed) == 1000));
}
}

View File

@@ -16,7 +16,7 @@ pub mod builder;
use std::fmt::Display;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::sync::{Arc, Mutex, RwLock};
use std::time::Duration;
use clap::ValueEnum;
@@ -44,20 +44,19 @@ use common_wal::config::MetasrvWalConfig;
use serde::{Deserialize, Serialize};
use servers::export_metrics::ExportMetricsOption;
use servers::http::HttpOptions;
use snafu::ResultExt;
use snafu::{OptionExt, ResultExt};
use table::metadata::TableId;
use tokio::sync::broadcast::error::RecvError;
use crate::cluster::MetaPeerClientRef;
use crate::election::{Election, LeaderChangeMessage};
use crate::error::{
InitMetadataSnafu, KvBackendSnafu, Result, StartProcedureManagerSnafu, StartTelemetryTaskSnafu,
StopProcedureManagerSnafu,
self, InitMetadataSnafu, KvBackendSnafu, Result, StartProcedureManagerSnafu,
StartTelemetryTaskSnafu, StopProcedureManagerSnafu,
};
use crate::failure_detector::PhiAccrualFailureDetectorOptions;
use crate::handler::HeartbeatHandlerGroupRef;
use crate::handler::{HeartbeatHandlerGroupBuilder, HeartbeatHandlerGroupRef};
use crate::lease::lookup_datanode_peer;
use crate::lock::DistLockRef;
use crate::procedure::region_migration::manager::RegionMigrationManagerRef;
use crate::procedure::ProcedureManagerListenerAdapter;
use crate::pubsub::{PublisherRef, SubscriptionManagerRef};
@@ -338,7 +337,6 @@ impl MetaStateHandler {
}
}
#[derive(Clone)]
pub struct Metasrv {
state: StateRef,
started: Arc<AtomicBool>,
@@ -354,9 +352,9 @@ pub struct Metasrv {
selector: SelectorRef,
// The flow selector is used to select a target flownode.
flow_selector: SelectorRef,
handler_group: HeartbeatHandlerGroupRef,
handler_group: RwLock<Option<HeartbeatHandlerGroupRef>>,
handler_group_builder: Mutex<Option<HeartbeatHandlerGroupBuilder>>,
election: Option<ElectionRef>,
lock: DistLockRef,
procedure_manager: ProcedureManagerRef,
mailbox: MailboxRef,
procedure_executor: ProcedureExecutorRef,
@@ -382,6 +380,16 @@ impl Metasrv {
return Ok(());
}
let handler_group_builder =
self.handler_group_builder
.lock()
.unwrap()
.take()
.context(error::UnexpectedSnafu {
violated: "expected heartbeat handler group builder",
})?;
*self.handler_group.write().unwrap() = Some(Arc::new(handler_group_builder.build()?));
// Creates default schema if not exists
self.table_metadata_manager
.init()
@@ -560,18 +568,14 @@ impl Metasrv {
&self.flow_selector
}
pub fn handler_group(&self) -> &HeartbeatHandlerGroupRef {
&self.handler_group
pub fn handler_group(&self) -> Option<HeartbeatHandlerGroupRef> {
self.handler_group.read().unwrap().clone()
}
pub fn election(&self) -> Option<&ElectionRef> {
self.election.as_ref()
}
pub fn lock(&self) -> &DistLockRef {
&self.lock
}
pub fn mailbox(&self) -> &MailboxRef {
&self.mailbox
}

View File

@@ -13,7 +13,7 @@
// limitations under the License.
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, RwLock};
use std::sync::{Arc, Mutex, RwLock};
use std::time::Duration;
use client::client_manager::NodeClients;
@@ -48,12 +48,8 @@ use crate::flow_meta_alloc::FlowPeerAllocator;
use crate::greptimedb_telemetry::get_greptimedb_telemetry_task;
use crate::handler::failure_handler::RegionFailureHandler;
use crate::handler::region_lease_handler::RegionLeaseHandler;
use crate::handler::{
HeartbeatHandlerGroup, HeartbeatHandlerGroupBuilder, HeartbeatMailbox, Pushers,
};
use crate::handler::{HeartbeatHandlerGroupBuilder, HeartbeatMailbox, Pushers};
use crate::lease::MetaPeerLookupService;
use crate::lock::memory::MemLock;
use crate::lock::DistLockRef;
use crate::metasrv::{
ElectionRef, Metasrv, MetasrvInfo, MetasrvOptions, SelectorContext, SelectorRef, TABLE_ID_SEQ,
};
@@ -76,10 +72,9 @@ pub struct MetasrvBuilder {
kv_backend: Option<KvBackendRef>,
in_memory: Option<ResettableKvBackendRef>,
selector: Option<SelectorRef>,
handler_group: Option<HeartbeatHandlerGroup>,
handler_group_builder: Option<HeartbeatHandlerGroupBuilder>,
election: Option<ElectionRef>,
meta_peer_client: Option<MetaPeerClientRef>,
lock: Option<DistLockRef>,
node_manager: Option<NodeManagerRef>,
plugins: Option<Plugins>,
table_metadata_allocator: Option<TableMetadataAllocatorRef>,
@@ -91,11 +86,10 @@ impl MetasrvBuilder {
kv_backend: None,
in_memory: None,
selector: None,
handler_group: None,
handler_group_builder: None,
meta_peer_client: None,
election: None,
options: None,
lock: None,
node_manager: None,
plugins: None,
table_metadata_allocator: None,
@@ -122,8 +116,11 @@ impl MetasrvBuilder {
self
}
pub fn heartbeat_handler(mut self, handler_group: HeartbeatHandlerGroup) -> Self {
self.handler_group = Some(handler_group);
pub fn heartbeat_handler(
mut self,
handler_group_builder: HeartbeatHandlerGroupBuilder,
) -> Self {
self.handler_group_builder = Some(handler_group_builder);
self
}
@@ -137,11 +134,6 @@ impl MetasrvBuilder {
self
}
pub fn lock(mut self, lock: Option<DistLockRef>) -> Self {
self.lock = lock;
self
}
pub fn node_manager(mut self, node_manager: NodeManagerRef) -> Self {
self.node_manager = Some(node_manager);
self
@@ -170,8 +162,7 @@ impl MetasrvBuilder {
kv_backend,
in_memory,
selector,
handler_group,
lock,
handler_group_builder,
node_manager,
plugins,
table_metadata_allocator,
@@ -205,7 +196,6 @@ impl MetasrvBuilder {
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(
leader_cached_kv_backend.clone() as _,
));
let lock = lock.unwrap_or_else(|| Arc::new(MemLock::default()));
let selector_ctx = SelectorContext {
server_addr: options.server_addr.clone(),
datanode_lease_secs: distributed_time_constants::DATANODE_LEASE_SECS,
@@ -349,8 +339,8 @@ impl MetasrvBuilder {
.context(error::InitDdlManagerSnafu)?,
);
let handler_group = match handler_group {
Some(handler_group) => handler_group,
let handler_group_builder = match handler_group_builder {
Some(handler_group_builder) => handler_group_builder,
None => {
let region_lease_handler = RegionLeaseHandler::new(
distributed_time_constants::REGION_LEASE_SECS,
@@ -363,7 +353,6 @@ impl MetasrvBuilder {
.with_region_failure_handler(region_failover_handler)
.with_region_lease_handler(Some(region_lease_handler))
.add_default_handlers()
.build()?
}
};
@@ -382,9 +371,9 @@ impl MetasrvBuilder {
selector,
// TODO(jeremy): We do not allow configuring the flow selector.
flow_selector: Arc::new(RoundRobinSelector::new(SelectTarget::Flownode)),
handler_group: Arc::new(handler_group),
handler_group: RwLock::new(None),
handler_group_builder: Mutex::new(Some(handler_group_builder)),
election,
lock,
procedure_manager,
mailbox,
procedure_executor: ddl_manager,

View File

@@ -45,7 +45,20 @@ lazy_static! {
/// Meta kv cache miss counter.
pub static ref METRIC_META_KV_CACHE_MISS: IntCounterVec =
register_int_counter_vec!("greptime_meta_kv_cache_miss", "meta kv cache miss", &["op"]).unwrap();
// Heartbeat received by metasrv.
/// Heartbeat received by metasrv.
pub static ref METRIC_META_HEARTBEAT_RECV: IntCounterVec =
register_int_counter_vec!("greptime_meta_heartbeat_recv", "heartbeats received by metasrv", &["pusher_key"]).unwrap();
/// The migration execute histogram.
pub static ref METRIC_META_REGION_MIGRATION_EXECUTE: HistogramVec =
register_histogram_vec!("greptime_meta_region_migration_execute", "meta region migration execute", &["state"]).unwrap();
/// The migration error counter.
pub static ref METRIC_META_REGION_MIGRATION_ERROR: IntCounterVec =
register_int_counter_vec!("greptime_meta_region_migration_error", "meta region migration abort", &["state", "error_type"]).unwrap();
/// The migration datanode counter.
pub static ref METRIC_META_REGION_MIGRATION_DATANODES: IntCounterVec =
register_int_counter_vec!("greptime_meta_region_migration_stat", "meta region migration stat", &["datanode_type", "datanode_id"]).unwrap();
/// The migration fail counter.
pub static ref METRIC_META_REGION_MIGRATION_FAIL: IntCounter =
register_int_counter!("greptime_meta_region_migration_fail", "meta region migration fail").unwrap();
}

View File

@@ -33,7 +33,7 @@ use crate::metasrv::{Metasrv, MetasrvOptions, SelectorRef};
pub struct MockInfo {
pub server_addr: String,
pub channel_manager: ChannelManager,
pub metasrv: Metasrv,
pub metasrv: Arc<Metasrv>,
}
pub async fn mock_with_memstore() -> MockInfo {
@@ -78,12 +78,13 @@ pub async fn mock(
metasrv.try_start().await.unwrap();
let (client, server) = tokio::io::duplex(1024);
let metasrv = Arc::new(metasrv);
let service = metasrv.clone();
let _handle = tokio::spawn(async move {
tonic::transport::Server::builder()
.add_service(HeartbeatServer::new(service.clone()))
.add_service(StoreServer::new(service.clone()))
.add_service(ProcedureServiceServer::new(service.clone()))
.add_service(HeartbeatServer::from_arc(service.clone()))
.add_service(StoreServer::from_arc(service.clone()))
.add_service(ProcedureServiceServer::from_arc(service.clone()))
.serve_with_incoming(futures::stream::iter(vec![Ok::<_, std::io::Error>(server)]))
.await
});

View File

@@ -54,6 +54,7 @@ use tokio::time::Instant;
use self::migration_start::RegionMigrationStart;
use crate::error::{self, Result};
use crate::metrics::{METRIC_META_REGION_MIGRATION_ERROR, METRIC_META_REGION_MIGRATION_EXECUTE};
use crate::service::mailbox::MailboxRef;
/// It's shared in each step and available even after recovering.
@@ -390,6 +391,12 @@ impl Context {
#[async_trait::async_trait]
#[typetag::serde(tag = "region_migration_state")]
pub(crate) trait State: Sync + Send + Debug {
fn name(&self) -> &'static str {
let type_name = std::any::type_name::<Self>();
// short name
type_name.split("::").last().unwrap_or(type_name)
}
/// Yields the next [State] and [Status].
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)>;
@@ -478,10 +485,20 @@ impl Procedure for RegionMigrationProcedure {
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
let state = &mut self.state;
let name = state.name();
let _timer = METRIC_META_REGION_MIGRATION_EXECUTE
.with_label_values(&[name])
.start_timer();
let (next, status) = state.next(&mut self.context).await.map_err(|e| {
if e.is_retryable() {
METRIC_META_REGION_MIGRATION_ERROR
.with_label_values(&[name, "retryable"])
.inc();
ProcedureError::retry_later(e)
} else {
METRIC_META_REGION_MIGRATION_ERROR
.with_label_values(&[name, "external"])
.inc();
ProcedureError::external(e)
}
})?;

View File

@@ -30,6 +30,7 @@ use store_api::storage::RegionId;
use table::table_name::TableName;
use crate::error::{self, Result};
use crate::metrics::{METRIC_META_REGION_MIGRATION_DATANODES, METRIC_META_REGION_MIGRATION_FAIL};
use crate::procedure::region_migration::{
DefaultContextFactory, PersistentContext, RegionMigrationProcedure,
};
@@ -323,6 +324,12 @@ impl RegionMigrationManager {
schema_name,
..
} = table_info.table_name();
METRIC_META_REGION_MIGRATION_DATANODES
.with_label_values(&["src", &task.from_peer.id.to_string()])
.inc();
METRIC_META_REGION_MIGRATION_DATANODES
.with_label_values(&["desc", &task.to_peer.id.to_string()])
.inc();
let RegionMigrationProcedureTask {
cluster_id,
region_id,
@@ -358,6 +365,7 @@ impl RegionMigrationManager {
if let Err(e) = watcher::wait(watcher).await {
error!(e; "Failed to wait region migration procedure {procedure_id} for {task}");
METRIC_META_REGION_MIGRATION_FAIL.inc();
return;
}

View File

@@ -258,6 +258,7 @@ impl RegionFailureDetectorController for RegionFailureDetectorControl {
}
/// [`HeartbeatAcceptor`] forwards heartbeats to [`RegionSupervisor`].
#[derive(Clone)]
pub(crate) struct HeartbeatAcceptor {
sender: Sender<Event>,
}

View File

@@ -20,7 +20,6 @@ use tonic::{Response, Status};
pub mod admin;
pub mod cluster;
mod heartbeat;
pub mod lock;
pub mod mailbox;
pub mod procedure;
pub mod store;

View File

@@ -30,7 +30,7 @@ use tonic::server::NamedService;
use crate::metasrv::Metasrv;
pub fn make_admin_service(metasrv: Metasrv) -> Admin {
pub fn make_admin_service(metasrv: Arc<Metasrv>) -> Admin {
let router = Router::new().route("/health", health::HealthHandler);
let router = router.route(

View File

@@ -23,6 +23,7 @@ use api::v1::meta::{
use common_telemetry::{debug, error, info, warn};
use futures::StreamExt;
use once_cell::sync::OnceCell;
use snafu::OptionExt;
use tokio::sync::mpsc;
use tokio::sync::mpsc::Sender;
use tokio_stream::wrappers::ReceiverStream;
@@ -45,7 +46,10 @@ impl heartbeat_server::Heartbeat for Metasrv {
) -> GrpcResult<Self::HeartbeatStream> {
let mut in_stream = req.into_inner();
let (tx, rx) = mpsc::channel(128);
let handler_group = self.handler_group().clone();
let handler_group = self.handler_group().context(error::UnexpectedSnafu {
violated: "expected heartbeat handlers",
})?;
let ctx = self.new_ctx();
let _handle = common_runtime::spawn_global(async move {
let mut pusher_key = None;

View File

@@ -1,51 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::meta::{lock_server, LockRequest, LockResponse, UnlockRequest, UnlockResponse};
use tonic::{Request, Response};
use super::GrpcResult;
use crate::lock::Opts;
use crate::metasrv::Metasrv;
#[async_trait::async_trait]
impl lock_server::Lock for Metasrv {
async fn lock(&self, request: Request<LockRequest>) -> GrpcResult<LockResponse> {
let LockRequest {
name, expire_secs, ..
} = request.into_inner();
let expire_secs = Some(expire_secs as u64);
let key = self.lock().lock(name, Opts { expire_secs }).await?;
let resp = LockResponse {
key,
..Default::default()
};
Ok(Response::new(resp))
}
async fn unlock(&self, request: Request<UnlockRequest>) -> GrpcResult<UnlockResponse> {
let UnlockRequest { key, .. } = request.into_inner();
let _ = self.lock().unlock(key).await?;
let resp = UnlockResponse {
..Default::default()
};
Ok(Response::new(resp))
}
}

View File

@@ -17,6 +17,7 @@ mod catchup;
mod close;
mod create;
mod drop;
mod flush;
mod open;
mod options;
mod put;
@@ -145,7 +146,7 @@ impl RegionEngine for MetricEngine {
.alter_region(region_id, alter, &mut extension_return_value)
.await
}
RegionRequest::Flush(_) | RegionRequest::Compact(_) => {
RegionRequest::Compact(_) => {
if self.inner.is_physical_region(region_id) {
self.inner
.mito
@@ -157,10 +158,11 @@ impl RegionEngine for MetricEngine {
UnsupportedRegionRequestSnafu { request }.fail()
}
}
RegionRequest::Flush(req) => self.inner.flush_region(region_id, req).await,
RegionRequest::Delete(_) | RegionRequest::Truncate(_) => {
UnsupportedRegionRequestSnafu { request }.fail()
}
RegionRequest::Catchup(ref req) => self.inner.catchup_region(region_id, *req).await,
RegionRequest::Catchup(req) => self.inner.catchup_region(region_id, req).await,
};
result.map_err(BoxedError::new).map(|rows| RegionResponse {
@@ -338,7 +340,7 @@ mod test {
.await
.unwrap();
// close nonexistent region
// close nonexistent region won't report error
let nonexistent_region_id = RegionId::new(12313, 12);
engine
.handle_request(
@@ -346,7 +348,7 @@ mod test {
RegionRequest::Close(RegionCloseRequest {}),
)
.await
.unwrap_err();
.unwrap();
// open nonexistent region won't report error
let invalid_open_request = RegionOpenRequest {

View File

@@ -84,6 +84,12 @@ impl MetricEngineInner {
return Ok(physical_region_id);
};
// lock metadata region for this logical region id
let _write_guard = self
.metadata_region
.write_lock_logical_region(logical_region_id)
.await;
let metadata_region_id = to_metadata_region_id(physical_region_id);
let mut columns_to_add = vec![];
for col in &columns {

View File

@@ -47,9 +47,10 @@ impl MetricEngineInner {
.await
.context(MitoCatchupOperationSnafu)?;
let data_region_id = utils::to_data_region_id(region_id);
self.mito
.handle_request(
region_id,
data_region_id,
RegionRequest::Catchup(RegionCatchupRequest {
set_writable: req.set_writable,
entry_id: req.entry_id,

View File

@@ -14,13 +14,14 @@
//! Close a metric region
use common_telemetry::debug;
use snafu::ResultExt;
use store_api::region_engine::RegionEngine;
use store_api::region_request::{AffectedRows, RegionCloseRequest, RegionRequest};
use store_api::storage::RegionId;
use super::MetricEngineInner;
use crate::error::{CloseMitoRegionSnafu, LogicalRegionNotFoundSnafu, Result};
use crate::error::{CloseMitoRegionSnafu, Result};
use crate::metrics::PHYSICAL_REGION_COUNT;
use crate::utils;
@@ -54,7 +55,8 @@ impl MetricEngineInner {
{
Ok(0)
} else {
Err(LogicalRegionNotFoundSnafu { region_id }.build())
debug!("Closing a non-existent logical region {}", region_id);
Ok(0)
}
}

View File

@@ -0,0 +1,52 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use snafu::ResultExt;
use store_api::region_engine::RegionEngine;
use store_api::region_request::{AffectedRows, RegionFlushRequest, RegionRequest};
use store_api::storage::RegionId;
use crate::engine::MetricEngineInner;
use crate::error::{MitoFlushOperationSnafu, Result, UnsupportedRegionRequestSnafu};
use crate::utils;
impl MetricEngineInner {
pub async fn flush_region(
&self,
region_id: RegionId,
req: RegionFlushRequest,
) -> Result<AffectedRows> {
if !self.is_physical_region(region_id) {
return UnsupportedRegionRequestSnafu {
request: RegionRequest::Flush(req),
}
.fail();
}
let metadata_region_id = utils::to_metadata_region_id(region_id);
// Flushes the metadata region as well
self.mito
.handle_request(metadata_region_id, RegionRequest::Flush(req.clone()))
.await
.context(MitoFlushOperationSnafu)
.map(|response| response.affected_rows)?;
let data_region_id = utils::to_data_region_id(region_id);
self.mito
.handle_request(data_region_id, RegionRequest::Flush(req.clone()))
.await
.context(MitoFlushOperationSnafu)
.map(|response| response.affected_rows)
}
}

View File

@@ -134,17 +134,26 @@ impl MetricEngineInner {
.await?;
let logical_region_num = logical_regions.len();
let mut state = self.state.write().unwrap();
// recover physical column names
let physical_column_names = physical_columns
.into_iter()
.map(|col| col.column_schema.name)
.collect();
state.add_physical_region(physical_region_id, physical_column_names);
// recover logical regions
for logical_region_id in logical_regions {
state.add_logical_region(physical_region_id, logical_region_id);
{
let mut state = self.state.write().unwrap();
// recover physical column names
let physical_column_names = physical_columns
.into_iter()
.map(|col| col.column_schema.name)
.collect();
state.add_physical_region(physical_region_id, physical_column_names);
// recover logical regions
for logical_region_id in &logical_regions {
state.add_logical_region(physical_region_id, *logical_region_id);
}
}
for logical_region_id in logical_regions {
self.metadata_region
.open_logical_region(logical_region_id)
.await;
}
LOGICAL_REGION_COUNT.add(logical_region_num as i64);
Ok(())

View File

@@ -41,6 +41,10 @@ impl MetricEngineInner {
}
// Else load from metadata region and update the cache.
let _read_guard = self
.metadata_region
.read_lock_logical_region(logical_region_id)
.await;
// Load logical and physical columns, and intersect them to get logical column metadata.
let mut logical_column_metadata = self
.metadata_region

View File

@@ -13,6 +13,7 @@
// limitations under the License.
use std::collections::HashMap;
use std::sync::Arc;
use api::v1::value::ValueData;
use api::v1::{ColumnDataType, ColumnSchema, Row, Rows, SemanticType, Value};
@@ -21,7 +22,7 @@ use base64::Engine;
use common_recordbatch::util::collect;
use datafusion::prelude::{col, lit};
use mito2::engine::MitoEngine;
use snafu::ResultExt;
use snafu::{OptionExt, ResultExt};
use store_api::metadata::ColumnMetadata;
use store_api::metric_engine_consts::{
METADATA_SCHEMA_KEY_COLUMN_INDEX, METADATA_SCHEMA_KEY_COLUMN_NAME,
@@ -31,11 +32,12 @@ use store_api::metric_engine_consts::{
use store_api::region_engine::RegionEngine;
use store_api::region_request::{RegionDeleteRequest, RegionPutRequest};
use store_api::storage::{RegionId, ScanRequest};
use tokio::sync::{OwnedRwLockReadGuard, OwnedRwLockWriteGuard, RwLock};
use crate::error::{
CollectRecordBatchStreamSnafu, DecodeColumnValueSnafu, DeserializeColumnMetadataSnafu,
MitoReadOperationSnafu, MitoWriteOperationSnafu, ParseRegionIdSnafu, RegionAlreadyExistsSnafu,
Result,
LogicalRegionNotFoundSnafu, MitoReadOperationSnafu, MitoWriteOperationSnafu,
ParseRegionIdSnafu, RegionAlreadyExistsSnafu, Result,
};
use crate::utils;
@@ -56,11 +58,19 @@ const COLUMN_PREFIX: &str = "__column_";
/// itself.
pub struct MetadataRegion {
mito: MitoEngine,
/// Logical lock for operations that need to be serialized. Like update & read region columns.
///
/// Region entry will be registered on creating and opening logical region, and deregistered on
/// removing logical region.
logical_region_lock: RwLock<HashMap<RegionId, Arc<RwLock<()>>>>,
}
impl MetadataRegion {
pub fn new(mito: MitoEngine) -> Self {
Self { mito }
Self {
mito,
logical_region_lock: RwLock::new(HashMap::new()),
}
}
/// Add a new table key to metadata.
@@ -85,10 +95,21 @@ impl MetadataRegion {
}
.fail()
} else {
self.logical_region_lock
.write()
.await
.insert(logical_region_id, Arc::new(RwLock::new(())));
Ok(())
}
}
pub async fn open_logical_region(&self, logical_region_id: RegionId) {
self.logical_region_lock
.write()
.await
.insert(logical_region_id, Arc::new(RwLock::new(())));
}
/// Add a new column key to metadata.
///
/// This method won't check if the column already exists. But
@@ -111,6 +132,40 @@ impl MetadataRegion {
.await
}
/// Retrieve a read lock guard of given logical region id.
pub async fn read_lock_logical_region(
&self,
logical_region_id: RegionId,
) -> Result<OwnedRwLockReadGuard<()>> {
let lock = self
.logical_region_lock
.read()
.await
.get(&logical_region_id)
.context(LogicalRegionNotFoundSnafu {
region_id: logical_region_id,
})?
.clone();
Ok(RwLock::read_owned(lock).await)
}
/// Retrieve a write lock guard of given logical region id.
pub async fn write_lock_logical_region(
&self,
logical_region_id: RegionId,
) -> Result<OwnedRwLockWriteGuard<()>> {
let lock = self
.logical_region_lock
.read()
.await
.get(&logical_region_id)
.context(LogicalRegionNotFoundSnafu {
region_id: logical_region_id,
})?
.clone();
Ok(RwLock::write_owned(lock).await)
}
/// Remove a registered logical region from metadata.
///
/// This method doesn't check if the previous key exists.
@@ -136,6 +191,11 @@ impl MetadataRegion {
column_keys.push(region_key);
self.delete(region_id, &column_keys).await?;
self.logical_region_lock
.write()
.await
.remove(&logical_region_id);
Ok(())
}

View File

@@ -22,6 +22,7 @@ pub mod projection;
pub(crate) mod prune;
pub(crate) mod range;
pub(crate) mod scan_region;
pub(crate) mod scan_util;
pub(crate) mod seq_scan;
pub(crate) mod unordered_scan;
@@ -57,7 +58,6 @@ use crate::error::{
use crate::memtable::BoxedBatchIterator;
use crate::metrics::{READ_BATCHES_RETURN, READ_ROWS_RETURN, READ_STAGE_ELAPSED};
use crate::read::prune::PruneReader;
use crate::sst::parquet::reader::{ReaderFilterMetrics, ReaderMetrics};
/// Storage internal representation of a batch of rows for a primary key (time series).
///
@@ -738,7 +738,7 @@ impl<T: BatchReader + ?Sized> BatchReader for Box<T> {
pub(crate) struct ScannerMetrics {
/// Duration to prepare the scan task.
prepare_scan_cost: Duration,
/// Duration to build parts.
/// Duration to build file ranges.
build_parts_cost: Duration,
/// Duration to build the (merge) reader.
build_reader_cost: Duration,
@@ -758,31 +758,17 @@ pub(crate) struct ScannerMetrics {
num_mem_ranges: usize,
/// Number of file ranges scanned.
num_file_ranges: usize,
/// Filter related metrics for readers.
filter_metrics: ReaderFilterMetrics,
}
impl ScannerMetrics {
/// Sets and observes metrics on initializing parts.
fn observe_init_part(&mut self, build_parts_cost: Duration, reader_metrics: &ReaderMetrics) {
self.build_parts_cost = build_parts_cost;
// Observes metrics.
/// Observes metrics.
fn observe_metrics(&self) {
READ_STAGE_ELAPSED
.with_label_values(&["prepare_scan"])
.observe(self.prepare_scan_cost.as_secs_f64());
READ_STAGE_ELAPSED
.with_label_values(&["build_parts"])
.observe(self.build_parts_cost.as_secs_f64());
// We only call this once so we overwrite it directly.
self.filter_metrics = reader_metrics.filter_metrics;
// Observes filter metrics.
self.filter_metrics.observe();
}
/// Observes metrics on scanner finish.
fn observe_metrics_on_finish(&self) {
READ_STAGE_ELAPSED
.with_label_values(&["build_reader"])
.observe(self.build_reader_cost.as_secs_f64());
@@ -801,6 +787,21 @@ impl ScannerMetrics {
READ_ROWS_RETURN.observe(self.num_rows as f64);
READ_BATCHES_RETURN.observe(self.num_batches as f64);
}
/// Merges metrics from another [ScannerMetrics].
fn merge_from(&mut self, other: &ScannerMetrics) {
self.prepare_scan_cost += other.prepare_scan_cost;
self.build_parts_cost += other.build_parts_cost;
self.build_reader_cost += other.build_reader_cost;
self.scan_cost += other.scan_cost;
self.convert_cost += other.convert_cost;
self.yield_cost += other.yield_cost;
self.total_cost += other.total_cost;
self.num_batches += other.num_batches;
self.num_rows += other.num_rows;
self.num_mem_ranges += other.num_mem_ranges;
self.num_file_ranges += other.num_file_ranges;
}
}
#[cfg(test)]

View File

@@ -14,7 +14,9 @@
//! Structs for partition ranges.
use common_time::Timestamp;
use smallvec::{smallvec, SmallVec};
use store_api::region_engine::PartitionRange;
use crate::memtable::MemtableRef;
use crate::read::scan_region::ScanInput;
@@ -48,6 +50,26 @@ pub(crate) struct RangeMeta {
}
impl RangeMeta {
/// Creates a [PartitionRange] with specific identifier.
/// It converts the inclusive max timestamp to exclusive end timestamp.
pub(crate) fn new_partition_range(&self, identifier: usize) -> PartitionRange {
PartitionRange {
start: self.time_range.0,
end: Timestamp::new(
// The i64::MAX timestamp may be invisible but we don't guarantee to support this
// value now.
self.time_range
.1
.value()
.checked_add(1)
.unwrap_or(self.time_range.1.value()),
self.time_range.1.unit(),
),
num_rows: self.num_rows,
identifier,
}
}
/// Creates a list of ranges from the `input` for seq scan.
pub(crate) fn seq_scan_ranges(input: &ScanInput) -> Vec<RangeMeta> {
let mut ranges = Vec::with_capacity(input.memtables.len() + input.files.len());
@@ -177,7 +199,7 @@ impl RangeMeta {
}
fn push_seq_mem_ranges(memtables: &[MemtableRef], ranges: &mut Vec<RangeMeta>) {
// For non append-only mode, each range only contains one memtable.
// For non append-only mode, each range only contains one memtable by default.
for (i, memtable) in memtables.iter().enumerate() {
let stats = memtable.stats();
let Some(time_range) = stats.time_range() else {
@@ -195,6 +217,7 @@ impl RangeMeta {
}
}
// TODO(yingwen): Support multiple row groups in a range so we can split them later.
fn push_seq_file_ranges(
num_memtables: usize,
files: &[FileHandle],
@@ -264,3 +287,83 @@ fn maybe_split_ranges_for_seq_scan(ranges: Vec<RangeMeta>) -> Vec<RangeMeta> {
new_ranges
}
#[cfg(test)]
mod tests {
use common_time::timestamp::TimeUnit;
use common_time::Timestamp;
use super::*;
type Output = (Vec<usize>, i64, i64);
fn run_group_ranges_test(input: &[(usize, i64, i64)], expect: &[Output]) {
let ranges = input
.iter()
.map(|(idx, start, end)| {
let time_range = (
Timestamp::new(*start, TimeUnit::Second),
Timestamp::new(*end, TimeUnit::Second),
);
RangeMeta {
time_range,
indices: smallvec![*idx],
row_group_indices: smallvec![RowGroupIndex {
index: *idx,
row_group_index: 0
}],
num_rows: 1,
}
})
.collect();
let output = group_ranges_for_seq_scan(ranges);
let actual: Vec<_> = output
.iter()
.map(|range| {
let indices = range.indices.to_vec();
let group_indices: Vec<_> = range
.row_group_indices
.iter()
.map(|idx| idx.index)
.collect();
assert_eq!(indices, group_indices);
let range = range.time_range;
(indices, range.0.value(), range.1.value())
})
.collect();
assert_eq!(expect, actual);
}
#[test]
fn test_group_ranges() {
// Group 1 part.
run_group_ranges_test(&[(1, 0, 2000)], &[(vec![1], 0, 2000)]);
// 1, 2, 3, 4 => [3, 1, 4], [2]
run_group_ranges_test(
&[
(1, 1000, 2000),
(2, 6000, 7000),
(3, 0, 1500),
(4, 1500, 3000),
],
&[(vec![3, 1, 4], 0, 3000), (vec![2], 6000, 7000)],
);
// 1, 2, 3 => [3], [1], [2],
run_group_ranges_test(
&[(1, 3000, 4000), (2, 4001, 6000), (3, 0, 1000)],
&[
(vec![3], 0, 1000),
(vec![1], 3000, 4000),
(vec![2], 4001, 6000),
],
);
// 1, 2, 3 => [3], [1, 2]
run_group_ranges_test(
&[(1, 3000, 4000), (2, 4000, 6000), (3, 0, 1000)],
&[(vec![3], 0, 1000), (vec![1, 2], 3000, 6000)],
);
}
}

View File

@@ -17,14 +17,12 @@
use std::collections::{BTreeMap, HashSet};
use std::fmt;
use std::sync::{Arc, Mutex as StdMutex};
use std::time::{Duration, Instant};
use std::time::Instant;
use common_error::ext::BoxedError;
use common_recordbatch::SendableRecordBatchStream;
use common_telemetry::{debug, error, tracing, warn};
use common_time::range::TimestampRange;
use common_time::Timestamp;
use datafusion::physical_plan::DisplayFormatType;
use datafusion_expr::utils::expr_to_columns;
use parquet::arrow::arrow_reader::RowSelection;
use smallvec::SmallVec;
@@ -48,7 +46,7 @@ use crate::read::unordered_scan::UnorderedScan;
use crate::read::{Batch, Source};
use crate::region::options::MergeMode;
use crate::region::version::VersionRef;
use crate::sst::file::{overlaps, FileHandle, FileMeta};
use crate::sst::file::FileHandle;
use crate::sst::index::fulltext_index::applier::builder::FulltextIndexApplierBuilder;
use crate::sst::index::fulltext_index::applier::FulltextIndexApplierRef;
use crate::sst::index::inverted_index::applier::builder::InvertedIndexApplierBuilder;
@@ -700,73 +698,6 @@ impl ScanInput {
})
}
/// Prunes file ranges to scan and adds them to the `collector`.
pub(crate) async fn prune_file_ranges(
&self,
collector: &mut impl FileRangeCollector,
) -> Result<ReaderMetrics> {
let mut file_prune_cost = Duration::ZERO;
let mut reader_metrics = ReaderMetrics::default();
for file in &self.files {
let prune_start = Instant::now();
let res = self
.access_layer
.read_sst(file.clone())
.predicate(self.predicate.clone())
.time_range(self.time_range)
.projection(Some(self.mapper.column_ids().to_vec()))
.cache(self.cache_manager.clone())
.inverted_index_applier(self.inverted_index_applier.clone())
.fulltext_index_applier(self.fulltext_index_applier.clone())
.expected_metadata(Some(self.mapper.metadata().clone()))
.build_reader_input(&mut reader_metrics)
.await;
file_prune_cost += prune_start.elapsed();
let (mut file_range_ctx, row_groups) = match res {
Ok(x) => x,
Err(e) => {
if e.is_object_not_found() && self.ignore_file_not_found {
error!(e; "File to scan does not exist, region_id: {}, file: {}", file.region_id(), file.file_id());
continue;
} else {
return Err(e);
}
}
};
if !compat::has_same_columns(
self.mapper.metadata(),
file_range_ctx.read_format().metadata(),
) {
// They have different schema. We need to adapt the batch first so the
// mapper can convert it.
let compat = CompatBatch::new(
&self.mapper,
file_range_ctx.read_format().metadata().clone(),
)?;
file_range_ctx.set_compat_batch(Some(compat));
}
// Build ranges from row groups.
let file_range_ctx = Arc::new(file_range_ctx);
let file_ranges = row_groups
.into_iter()
.map(|(row_group_idx, row_selection)| {
FileRange::new(file_range_ctx.clone(), row_group_idx, row_selection)
});
collector.append_file_ranges(file.meta_ref(), file_ranges);
}
READ_SST_COUNT.observe(self.files.len() as f64);
common_telemetry::debug!(
"Region {} prune {} files, cost is {:?}",
self.mapper.metadata().region_id,
self.files.len(),
file_prune_cost
);
Ok(reader_metrics)
}
/// Scans the input source in another task and sends batches to the sender.
pub(crate) fn spawn_scan_task(
&self,
@@ -806,10 +737,7 @@ impl ScanInput {
pub(crate) fn predicate(&self) -> Option<Predicate> {
self.predicate.clone()
}
}
#[cfg(test)]
impl ScanInput {
/// Returns number of memtables to scan.
pub(crate) fn num_memtables(&self) -> usize {
self.memtables.len()
@@ -819,166 +747,21 @@ impl ScanInput {
pub(crate) fn num_files(&self) -> usize {
self.files.len()
}
}
#[cfg(test)]
impl ScanInput {
/// Returns SST file ids to scan.
pub(crate) fn file_ids(&self) -> Vec<crate::sst::file::FileId> {
self.files.iter().map(|file| file.file_id()).collect()
}
}
/// Groups of file ranges. Each group in the list contains multiple file
/// ranges to scan. File ranges in the same group may come from different files.
pub(crate) type FileRangesGroup = SmallVec<[Vec<FileRange>; 4]>;
/// A partition of a scanner to read.
/// It contains memtables and file ranges to scan.
#[derive(Clone, Default)]
pub(crate) struct ScanPart {
/// Memtable ranges to scan.
pub(crate) memtable_ranges: Vec<MemtableRange>,
/// File ranges to scan.
pub(crate) file_ranges: FileRangesGroup,
/// Optional time range of the part (inclusive).
pub(crate) time_range: Option<(Timestamp, Timestamp)>,
}
impl fmt::Debug for ScanPart {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"ScanPart({} memtable ranges, {} file ranges",
self.memtable_ranges.len(),
self.file_ranges
.iter()
.map(|ranges| ranges.len())
.sum::<usize>(),
)?;
if let Some(time_range) = &self.time_range {
write!(f, ", time range: {:?})", time_range)
} else {
write!(f, ")")
}
}
}
impl ScanPart {
/// Returns true if the time range given `part` overlaps with this part.
pub(crate) fn overlaps(&self, part: &ScanPart) -> bool {
let (Some(current_range), Some(part_range)) = (self.time_range, part.time_range) else {
return true;
};
overlaps(&current_range, &part_range)
}
/// Merges given `part` to this part.
pub(crate) fn merge(&mut self, mut part: ScanPart) {
self.memtable_ranges.append(&mut part.memtable_ranges);
self.file_ranges.append(&mut part.file_ranges);
let Some(part_range) = part.time_range else {
return;
};
let Some(current_range) = self.time_range else {
self.time_range = part.time_range;
return;
};
let start = current_range.0.min(part_range.0);
let end = current_range.1.max(part_range.1);
self.time_range = Some((start, end));
}
/// Returns true if the we can split the part into multiple parts
/// and preserving order.
pub(crate) fn can_split_preserve_order(&self) -> bool {
self.memtable_ranges.is_empty()
&& self.file_ranges.len() == 1
&& self.file_ranges[0].len() > 1
}
}
/// A trait to collect file ranges to scan.
pub(crate) trait FileRangeCollector {
/// Appends file ranges from the **same file** to the collector.
fn append_file_ranges(
&mut self,
file_meta: &FileMeta,
file_ranges: impl Iterator<Item = FileRange>,
);
}
/// Optional list of [ScanPart]s.
#[derive(Default)]
pub(crate) struct ScanPartList(pub(crate) Option<Vec<ScanPart>>);
impl fmt::Debug for ScanPartList {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self.0 {
Some(parts) => write!(f, "{:?}", parts),
None => write!(f, "[]"),
}
}
}
impl ScanPartList {
/// Returns true if the list is None.
pub(crate) fn is_none(&self) -> bool {
self.0.is_none()
}
/// Sets parts to the list.
pub(crate) fn set_parts(&mut self, parts: Vec<ScanPart>) {
self.0 = Some(parts);
}
/// Gets the part by index, returns None if the index is out of bound.
/// # Panics
/// Panics if parts are not initialized.
pub(crate) fn get_part(&mut self, index: usize) -> Option<&ScanPart> {
let parts = self.0.as_ref().unwrap();
parts.get(index)
}
/// Returns the number of parts.
pub(crate) fn len(&self) -> usize {
self.0.as_ref().map_or(0, |parts| parts.len())
}
/// Returns the number of memtable ranges.
pub(crate) fn num_mem_ranges(&self) -> usize {
self.0.as_ref().map_or(0, |parts| {
parts.iter().map(|part| part.memtable_ranges.len()).sum()
})
}
/// Returns the number of files.
pub(crate) fn num_files(&self) -> usize {
self.0.as_ref().map_or(0, |parts| {
parts.iter().map(|part| part.file_ranges.len()).sum()
})
}
/// Returns the number of file ranges.
pub(crate) fn num_file_ranges(&self) -> usize {
self.0.as_ref().map_or(0, |parts| {
parts
.iter()
.flat_map(|part| part.file_ranges.iter())
.map(|ranges| ranges.len())
.sum()
})
}
}
/// Context shared by different streams from a scanner.
/// It contains the input and distributes input to multiple parts
/// to scan.
/// It contains the input and ranges to scan.
pub(crate) struct StreamContext {
/// Input memtables and files.
pub(crate) input: ScanInput,
/// Parts to scan and the cost to build parts.
/// The scanner builds parts to scan from the input lazily.
/// The mutex is used to ensure the parts are only built once.
pub(crate) parts: Mutex<(ScanPartList, Duration)>,
/// Metadata for partition ranges.
pub(crate) ranges: Vec<RangeMeta>,
/// Lists of range builders.
@@ -994,12 +777,11 @@ impl StreamContext {
pub(crate) fn seq_scan_ctx(input: ScanInput) -> Self {
let query_start = input.query_start.unwrap_or_else(Instant::now);
let ranges = RangeMeta::seq_scan_ranges(&input);
READ_SST_COUNT.observe(input.files.len() as f64);
let range_builders = RangeBuilderList::new(input.memtables.len(), input.files.len());
READ_SST_COUNT.observe(input.num_files() as f64);
let range_builders = RangeBuilderList::new(input.num_memtables(), input.num_files());
Self {
input,
parts: Mutex::new((ScanPartList::default(), Duration::default())),
ranges,
range_builders,
query_start,
@@ -1010,12 +792,11 @@ impl StreamContext {
pub(crate) fn unordered_scan_ctx(input: ScanInput) -> Self {
let query_start = input.query_start.unwrap_or_else(Instant::now);
let ranges = RangeMeta::unordered_scan_ranges(&input);
READ_SST_COUNT.observe(input.files.len() as f64);
let range_builders = RangeBuilderList::new(input.memtables.len(), input.files.len());
READ_SST_COUNT.observe(input.num_files() as f64);
let range_builders = RangeBuilderList::new(input.num_memtables(), input.num_files());
Self {
input,
parts: Mutex::new((ScanPartList::default(), Duration::default())),
ranges,
range_builders,
query_start,
@@ -1024,27 +805,28 @@ impl StreamContext {
/// Returns true if the index refers to a memtable.
pub(crate) fn is_mem_range_index(&self, index: RowGroupIndex) -> bool {
self.input.memtables.len() > index.index
self.input.num_memtables() > index.index
}
/// Creates file ranges to scan.
pub(crate) async fn build_file_ranges(
&self,
index: RowGroupIndex,
ranges: &mut Vec<FileRange>,
reader_metrics: &mut ReaderMetrics,
) -> Result<()> {
ranges.clear();
) -> Result<SmallVec<[FileRange; 2]>> {
let mut ranges = SmallVec::new();
self.range_builders
.build_file_ranges(&self.input, index, ranges, reader_metrics)
.await
.build_file_ranges(&self.input, index, &mut ranges, reader_metrics)
.await?;
Ok(ranges)
}
/// Creates memtable ranges to scan.
pub(crate) fn build_mem_ranges(&self, index: RowGroupIndex, ranges: &mut Vec<MemtableRange>) {
ranges.clear();
pub(crate) fn build_mem_ranges(&self, index: RowGroupIndex) -> SmallVec<[MemtableRange; 2]> {
let mut ranges = SmallVec::new();
self.range_builders
.build_mem_ranges(&self.input, index, ranges)
.build_mem_ranges(&self.input, index, &mut ranges);
ranges
}
/// Retrieves the partition ranges.
@@ -1052,35 +834,30 @@ impl StreamContext {
self.ranges
.iter()
.enumerate()
.map(|(idx, range_meta)| PartitionRange {
start: range_meta.time_range.0,
end: range_meta.time_range.1,
num_rows: range_meta.num_rows,
identifier: idx,
})
.map(|(idx, range_meta)| range_meta.new_partition_range(idx))
.collect()
}
/// Format the context for explain.
pub(crate) fn format_for_explain(
&self,
t: DisplayFormatType,
f: &mut fmt::Formatter,
) -> fmt::Result {
match self.parts.try_lock() {
Ok(inner) => match t {
DisplayFormatType::Default => write!(
f,
"partition_count={} ({} memtable ranges, {} file {} ranges)",
inner.0.len(),
inner.0.num_mem_ranges(),
inner.0.num_files(),
inner.0.num_file_ranges()
)?,
DisplayFormatType::Verbose => write!(f, "{:?}", inner.0)?,
},
Err(_) => write!(f, "<locked>")?,
pub(crate) fn format_for_explain(&self, f: &mut fmt::Formatter) -> fmt::Result {
let (mut num_mem_ranges, mut num_file_ranges) = (0, 0);
for range_meta in &self.ranges {
for idx in &range_meta.row_group_indices {
if self.is_mem_range_index(*idx) {
num_mem_ranges += 1;
} else {
num_file_ranges += 1;
}
}
}
write!(
f,
"partition_count={} ({} memtable ranges, {} file {} ranges)",
self.ranges.len(),
num_mem_ranges,
self.input.num_files(),
num_file_ranges,
)?;
if let Some(selector) = &self.input.series_row_selector {
write!(f, ", selector={}", selector)?;
}
@@ -1110,7 +887,7 @@ impl RangeBuilderList {
&self,
input: &ScanInput,
index: RowGroupIndex,
ranges: &mut Vec<FileRange>,
ranges: &mut SmallVec<[FileRange; 2]>,
reader_metrics: &mut ReaderMetrics,
) -> Result<()> {
let file_index = index.index - self.mem_builders.len();
@@ -1131,7 +908,7 @@ impl RangeBuilderList {
&self,
input: &ScanInput,
index: RowGroupIndex,
ranges: &mut Vec<MemtableRange>,
ranges: &mut SmallVec<[MemtableRange; 2]>,
) {
let mut builder_opt = self.mem_builders[index.index].lock().unwrap();
match &mut *builder_opt {
@@ -1159,7 +936,7 @@ struct FileRangeBuilder {
impl FileRangeBuilder {
/// Builds file ranges to read.
/// Negative `row_group_index` indicates all row groups.
fn build_ranges(&self, row_group_index: i64, ranges: &mut Vec<FileRange>) {
fn build_ranges(&self, row_group_index: i64, ranges: &mut SmallVec<[FileRange; 2]>) {
let Some(context) = self.context.clone() else {
return;
};
@@ -1196,7 +973,7 @@ struct MemRangeBuilder {
impl MemRangeBuilder {
/// Builds mem ranges to read in the memtable.
/// Negative `row_group_index` indicates all row groups.
fn build_ranges(&self, row_group_index: i64, ranges: &mut Vec<MemtableRange>) {
fn build_ranges(&self, row_group_index: i64, ranges: &mut SmallVec<[MemtableRange; 2]>) {
if row_group_index >= 0 {
let row_group_index = row_group_index as usize;
// Scans one row group.

View File

@@ -0,0 +1,182 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Utilities for scanners.
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
use async_stream::try_stream;
use common_telemetry::debug;
use futures::Stream;
use store_api::storage::RegionId;
use crate::error::Result;
use crate::read::range::RowGroupIndex;
use crate::read::scan_region::StreamContext;
use crate::read::{Batch, ScannerMetrics, Source};
use crate::sst::parquet::reader::ReaderMetrics;
struct PartitionMetricsInner {
region_id: RegionId,
/// Index of the partition to scan.
partition: usize,
/// Label to distinguish different scan operation.
scanner_type: &'static str,
/// Query start time.
query_start: Instant,
/// Elapsed time before the first poll operation.
first_poll: Duration,
metrics: ScannerMetrics,
reader_metrics: ReaderMetrics,
}
impl PartitionMetricsInner {
fn on_finish(&mut self) {
if self.metrics.total_cost.is_zero() {
self.metrics.total_cost = self.query_start.elapsed();
}
self.metrics.build_parts_cost = self.reader_metrics.build_cost;
}
}
impl Drop for PartitionMetricsInner {
fn drop(&mut self) {
self.on_finish();
self.metrics.observe_metrics();
debug!(
"{} finished, region_id: {}, partition: {}, first_poll: {:?}, metrics: {:?}, reader_metrics: {:?}",
self.scanner_type, self.region_id, self.partition, self.first_poll, self.metrics, self.reader_metrics
);
}
}
/// Metrics while reading a partition.
#[derive(Clone)]
pub(crate) struct PartitionMetrics(Arc<Mutex<PartitionMetricsInner>>);
impl PartitionMetrics {
pub(crate) fn new(
region_id: RegionId,
partition: usize,
scanner_type: &'static str,
query_start: Instant,
metrics: ScannerMetrics,
) -> Self {
let inner = PartitionMetricsInner {
region_id,
partition,
scanner_type,
query_start,
first_poll: Duration::default(),
metrics,
reader_metrics: ReaderMetrics::default(),
};
Self(Arc::new(Mutex::new(inner)))
}
pub(crate) fn on_first_poll(&self) {
let mut inner = self.0.lock().unwrap();
inner.first_poll = inner.query_start.elapsed();
}
pub(crate) fn inc_num_mem_ranges(&self, num: usize) {
let mut inner = self.0.lock().unwrap();
inner.metrics.num_mem_ranges += num;
}
pub(crate) fn inc_num_file_ranges(&self, num: usize) {
let mut inner = self.0.lock().unwrap();
inner.metrics.num_file_ranges += num;
}
pub(crate) fn inc_build_reader_cost(&self, cost: Duration) {
let mut inner = self.0.lock().unwrap();
inner.metrics.build_reader_cost += cost;
}
pub(crate) fn merge_metrics(&self, metrics: &ScannerMetrics) {
let mut inner = self.0.lock().unwrap();
inner.metrics.merge_from(metrics);
}
pub(crate) fn merge_reader_metrics(&self, metrics: &ReaderMetrics) {
let mut inner = self.0.lock().unwrap();
inner.reader_metrics.merge_from(metrics);
}
pub(crate) fn on_finish(&self) {
let mut inner = self.0.lock().unwrap();
inner.on_finish();
}
}
/// Scans memtable ranges at `index`.
pub(crate) fn scan_mem_ranges(
stream_ctx: Arc<StreamContext>,
part_metrics: PartitionMetrics,
index: RowGroupIndex,
) -> impl Stream<Item = Result<Batch>> {
try_stream! {
let ranges = stream_ctx.build_mem_ranges(index);
part_metrics.inc_num_mem_ranges(ranges.len());
for range in ranges {
let build_reader_start = Instant::now();
let iter = range.build_iter()?;
part_metrics.inc_build_reader_cost(build_reader_start.elapsed());
let mut source = Source::Iter(iter);
while let Some(batch) = source.next_batch().await? {
yield batch;
}
}
}
}
/// Scans file ranges at `index`.
pub(crate) fn scan_file_ranges(
stream_ctx: Arc<StreamContext>,
part_metrics: PartitionMetrics,
index: RowGroupIndex,
read_type: &'static str,
) -> impl Stream<Item = Result<Batch>> {
try_stream! {
let mut reader_metrics = ReaderMetrics::default();
let ranges = stream_ctx
.build_file_ranges(index, &mut reader_metrics)
.await?;
part_metrics.inc_num_file_ranges(ranges.len());
for range in ranges {
let build_reader_start = Instant::now();
let reader = range.reader(None).await?;
part_metrics.inc_build_reader_cost(build_reader_start.elapsed());
let compat_batch = range.compat_batch();
let mut source = Source::PruneReader(reader);
while let Some(mut batch) = source.next_batch().await? {
if let Some(compact_batch) = compat_batch {
batch = compact_batch.compat_batch(batch)?;
}
yield batch;
}
if let Source::PruneReader(mut reader) = source {
reader_metrics.merge_from(reader.metrics());
}
}
// Reports metrics.
reader_metrics.observe_rows(read_type);
part_metrics.merge_reader_metrics(&reader_metrics);
}
}

View File

@@ -16,36 +16,29 @@
use std::fmt;
use std::sync::Arc;
use std::time::{Duration, Instant};
use std::time::Instant;
use async_stream::try_stream;
use common_error::ext::BoxedError;
use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::util::ChainedRecordBatchStream;
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
use common_telemetry::{debug, tracing};
use common_telemetry::tracing;
use datafusion::physical_plan::{DisplayAs, DisplayFormatType};
use datatypes::schema::SchemaRef;
use smallvec::smallvec;
use snafu::ResultExt;
use store_api::region_engine::{PartitionRange, RegionScanner, ScannerProperties};
use store_api::storage::{ColumnId, TimeSeriesRowSelector};
use table::predicate::Predicate;
use store_api::storage::TimeSeriesRowSelector;
use tokio::sync::Semaphore;
use crate::error::{PartitionOutOfRangeSnafu, Result};
use crate::memtable::MemtableRef;
use crate::read::dedup::{DedupReader, LastNonNull, LastRow};
use crate::read::last_row::LastRowReader;
use crate::read::merge::MergeReaderBuilder;
use crate::read::scan_region::{
FileRangeCollector, ScanInput, ScanPart, ScanPartList, StreamContext,
};
use crate::read::scan_region::{ScanInput, StreamContext};
use crate::read::scan_util::{scan_file_ranges, scan_mem_ranges, PartitionMetrics};
use crate::read::{BatchReader, BoxedBatchReader, ScannerMetrics, Source};
use crate::region::options::MergeMode;
use crate::sst::file::FileMeta;
use crate::sst::parquet::file_range::FileRange;
use crate::sst::parquet::reader::ReaderMetrics;
/// Scans a region and returns rows in a sorted sequence.
///
@@ -66,6 +59,8 @@ pub struct SeqScan {
impl SeqScan {
/// Creates a new [SeqScan].
pub(crate) fn new(input: ScanInput) -> Self {
// TODO(yingwen): Set permits according to partition num. But we need to support file
// level parallelism.
let parallelism = input.parallelism.parallelism.max(1);
let mut properties = ScannerProperties::default()
.with_append_mode(input.append_mode)
@@ -102,150 +97,49 @@ impl SeqScan {
/// Builds a [BoxedBatchReader] from sequential scan for compaction.
pub async fn build_reader(&self) -> Result<BoxedBatchReader> {
let mut metrics = ScannerMetrics {
prepare_scan_cost: self.stream_ctx.query_start.elapsed(),
..Default::default()
};
let maybe_reader = Self::build_all_merge_reader(
let part_metrics = PartitionMetrics::new(
self.stream_ctx.input.mapper.metadata().region_id,
0,
get_scanner_type(self.compaction),
self.stream_ctx.query_start,
ScannerMetrics {
prepare_scan_cost: self.stream_ctx.query_start.elapsed(),
..Default::default()
},
);
debug_assert_eq!(1, self.properties.partitions.len());
let partition_ranges = &self.properties.partitions[0];
let reader = Self::build_all_merge_reader(
&self.stream_ctx,
partition_ranges,
self.semaphore.clone(),
&mut metrics,
self.compaction,
self.properties.num_partitions(),
&part_metrics,
)
.await?;
// Safety: `build_merge_reader()` always returns a reader if partition is None.
let reader = maybe_reader.unwrap();
Ok(Box::new(reader))
}
/// Builds sources from a [ScanPart].
fn build_part_sources(
part: &ScanPart,
sources: &mut Vec<Source>,
row_selector: Option<TimeSeriesRowSelector>,
compaction: bool,
) -> Result<()> {
sources.reserve(part.memtable_ranges.len() + part.file_ranges.len());
// Read memtables.
for mem in &part.memtable_ranges {
let iter = mem.build_iter()?;
sources.push(Source::Iter(iter));
}
let read_type = if compaction {
"compaction"
} else {
"seq_scan_files"
};
// Read files.
for file in &part.file_ranges {
if file.is_empty() {
continue;
}
// Creates a stream to read the file.
let ranges = file.clone();
let stream = try_stream! {
let mut reader_metrics = ReaderMetrics::default();
// Safety: We checked whether it is empty before.
let file_id = ranges[0].file_handle().file_id();
let region_id = ranges[0].file_handle().region_id();
let range_num = ranges.len();
for range in ranges {
let mut reader = range.reader(row_selector).await?;
let compat_batch = range.compat_batch();
while let Some(mut batch) = reader.next_batch().await? {
if let Some(compat) = compat_batch {
batch = compat
.compat_batch(batch)?;
}
yield batch;
}
reader_metrics.merge_from(reader.metrics());
}
debug!(
"Seq scan region {}, file {}, {} ranges finished, metrics: {:?}, compaction: {}",
region_id, file_id, range_num, reader_metrics, compaction
);
// Reports metrics.
reader_metrics.observe_rows(read_type);
};
let stream = Box::pin(stream);
sources.push(Source::Stream(stream));
}
Ok(())
}
/// Builds a merge reader that reads all data.
async fn build_all_merge_reader(
stream_ctx: &StreamContext,
stream_ctx: &Arc<StreamContext>,
partition_ranges: &[PartitionRange],
semaphore: Arc<Semaphore>,
metrics: &mut ScannerMetrics,
compaction: bool,
parallelism: usize,
) -> Result<Option<BoxedBatchReader>> {
// initialize parts list
let mut parts = stream_ctx.parts.lock().await;
Self::maybe_init_parts(&stream_ctx.input, &mut parts, metrics, parallelism).await?;
let parts_len = parts.0.len();
let mut sources = Vec::with_capacity(parts_len);
for id in 0..parts_len {
let Some(part) = parts.0.get_part(id) else {
return Ok(None);
};
Self::build_part_sources(part, &mut sources, None, compaction)?;
}
Self::build_reader_from_sources(stream_ctx, sources, semaphore).await
}
/// Builds a merge reader that reads data from one [`PartitionRange`].
///
/// If the `range_id` is out of bound, returns None.
async fn build_merge_reader(
stream_ctx: &StreamContext,
range_id: usize,
semaphore: Arc<Semaphore>,
metrics: &mut ScannerMetrics,
compaction: bool,
parallelism: usize,
) -> Result<Option<BoxedBatchReader>> {
part_metrics: &PartitionMetrics,
) -> Result<BoxedBatchReader> {
let mut sources = Vec::new();
let build_start = {
let mut parts = stream_ctx.parts.lock().await;
Self::maybe_init_parts(&stream_ctx.input, &mut parts, metrics, parallelism).await?;
let Some(part) = parts.0.get_part(range_id) else {
return Ok(None);
};
let build_start = Instant::now();
Self::build_part_sources(
part,
&mut sources,
stream_ctx.input.series_row_selector,
for part_range in partition_ranges {
build_sources(
stream_ctx,
part_range,
compaction,
)?;
build_start
};
let maybe_reader = Self::build_reader_from_sources(stream_ctx, sources, semaphore).await;
let build_reader_cost = build_start.elapsed();
metrics.build_reader_cost += build_reader_cost;
debug!(
"Build reader region: {}, range_id: {}, from sources, build_reader_cost: {:?}, compaction: {}",
stream_ctx.input.mapper.metadata().region_id,
range_id,
build_reader_cost,
compaction,
);
maybe_reader
part_metrics,
&mut sources,
);
}
Self::build_reader_from_sources(stream_ctx, sources, semaphore).await
}
#[tracing::instrument(level = tracing::Level::DEBUG, skip_all)]
@@ -253,7 +147,7 @@ impl SeqScan {
stream_ctx: &StreamContext,
mut sources: Vec<Source>,
semaphore: Arc<Semaphore>,
) -> Result<Option<BoxedBatchReader>> {
) -> Result<BoxedBatchReader> {
if stream_ctx.input.parallelism.parallelism > 1 {
// Read sources in parallel. We always spawn a task so we can control the parallelism
// by the semaphore.
@@ -286,13 +180,11 @@ impl SeqScan {
None => reader,
};
Ok(Some(reader))
Ok(reader)
}
/// Scans the given partition when the part list is set properly.
/// Otherwise the returned stream might not contains any data.
// TODO: refactor out `uncached_scan_part_impl`.
#[allow(dead_code)]
fn scan_partition_impl(
&self,
partition: usize,
@@ -307,28 +199,36 @@ impl SeqScan {
));
}
let mut metrics = ScannerMetrics {
prepare_scan_cost: self.stream_ctx.query_start.elapsed(),
..Default::default()
};
let stream_ctx = self.stream_ctx.clone();
let semaphore = self.semaphore.clone();
let partition_ranges = self.properties.partitions[partition].clone();
let compaction = self.compaction;
let parallelism = self.properties.num_partitions();
let stream = try_stream! {
let first_poll = stream_ctx.query_start.elapsed();
let part_metrics = PartitionMetrics::new(
self.stream_ctx.input.mapper.metadata().region_id,
partition,
get_scanner_type(self.compaction),
stream_ctx.query_start,
ScannerMetrics {
prepare_scan_cost: self.stream_ctx.query_start.elapsed(),
..Default::default()
},
);
for partition_range in partition_ranges {
let maybe_reader =
Self::build_merge_reader(&stream_ctx, partition_range.identifier, semaphore.clone(), &mut metrics, compaction, parallelism)
let stream = try_stream! {
part_metrics.on_first_poll();
// Scans each part.
for part_range in partition_ranges {
let mut sources = Vec::new();
build_sources(&stream_ctx, &part_range, compaction, &part_metrics, &mut sources);
let mut reader =
Self::build_reader_from_sources(&stream_ctx, sources, semaphore.clone())
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
let Some(mut reader) = maybe_reader else {
return;
};
let cache = stream_ctx.input.cache_manager.as_deref();
let mut metrics = ScannerMetrics::default();
let mut fetch_start = Instant::now();
while let Some(batch) = reader
.next_batch()
@@ -350,18 +250,10 @@ impl SeqScan {
fetch_start = Instant::now();
}
metrics.scan_cost += fetch_start.elapsed();
metrics.total_cost = stream_ctx.query_start.elapsed();
metrics.observe_metrics_on_finish();
debug!(
"Seq scan finished, region_id: {:?}, partition: {}, metrics: {:?}, first_poll: {:?}, compaction: {}",
stream_ctx.input.mapper.metadata().region_id,
partition,
metrics,
first_poll,
compaction,
);
part_metrics.merge_metrics(&metrics);
}
part_metrics.on_finish();
};
let stream = Box::pin(RecordBatchStreamWrapper::new(
@@ -371,133 +263,6 @@ impl SeqScan {
Ok(stream)
}
/// Scans the given partition when the part list is not set.
/// This method will do a lazy initialize of part list and
/// ignores the partition settings in `properties`.
fn uncached_scan_part_impl(
&self,
partition: usize,
) -> Result<SendableRecordBatchStream, BoxedError> {
let num_partitions = self.properties.partitions.len();
if partition >= num_partitions {
return Err(BoxedError::new(
PartitionOutOfRangeSnafu {
given: partition,
all: self.properties.partitions.len(),
}
.build(),
));
}
let mut metrics = ScannerMetrics {
prepare_scan_cost: self.stream_ctx.query_start.elapsed(),
..Default::default()
};
let stream_ctx = self.stream_ctx.clone();
let semaphore = self.semaphore.clone();
let compaction = self.compaction;
let parallelism = self.properties.num_partitions();
// build stream
let stream = try_stream! {
let first_poll = stream_ctx.query_start.elapsed();
// init parts
let parts_len = {
let mut parts = stream_ctx.parts.lock().await;
Self::maybe_init_parts(&stream_ctx.input, &mut parts, &mut metrics, parallelism).await
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
parts.0.len()
};
for id in (0..parts_len).skip(partition).step_by(num_partitions) {
let maybe_reader = Self::build_merge_reader(
&stream_ctx,
id,
semaphore.clone(),
&mut metrics,
compaction,
parallelism
)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
let Some(mut reader) = maybe_reader else {
return;
};
let cache = stream_ctx.input.cache_manager.as_deref();
let mut fetch_start = Instant::now();
while let Some(batch) = reader
.next_batch()
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?
{
metrics.scan_cost += fetch_start.elapsed();
metrics.num_batches += 1;
metrics.num_rows += batch.num_rows();
let convert_start = Instant::now();
let record_batch = stream_ctx.input.mapper.convert(&batch, cache)?;
metrics.convert_cost += convert_start.elapsed();
let yield_start = Instant::now();
yield record_batch;
metrics.yield_cost += yield_start.elapsed();
fetch_start = Instant::now();
}
metrics.scan_cost += fetch_start.elapsed();
metrics.total_cost = stream_ctx.query_start.elapsed();
metrics.observe_metrics_on_finish();
debug!(
"Seq scan finished, region_id: {}, partition: {}, id: {}, metrics: {:?}, first_poll: {:?}, compaction: {}",
stream_ctx.input.mapper.metadata().region_id,
partition,
id,
metrics,
first_poll,
compaction,
);
}
};
let stream = Box::pin(RecordBatchStreamWrapper::new(
self.stream_ctx.input.mapper.output_schema(),
Box::pin(stream),
));
Ok(stream)
}
/// Initializes parts if they are not built yet.
async fn maybe_init_parts(
input: &ScanInput,
part_list: &mut (ScanPartList, Duration),
metrics: &mut ScannerMetrics,
parallelism: usize,
) -> Result<()> {
if part_list.0.is_none() {
let now = Instant::now();
let mut distributor = SeqDistributor::default();
let reader_metrics = input.prune_file_ranges(&mut distributor).await?;
distributor.append_mem_ranges(
&input.memtables,
Some(input.mapper.column_ids()),
input.predicate.clone(),
);
part_list.0.set_parts(distributor.build_parts(parallelism));
let build_part_cost = now.elapsed();
part_list.1 = build_part_cost;
metrics.observe_init_part(build_part_cost, &reader_metrics);
} else {
// Updates the cost of building parts.
metrics.build_parts_cost = part_list.1;
}
Ok(())
}
}
impl RegionScanner for SeqScan {
@@ -510,7 +275,7 @@ impl RegionScanner for SeqScan {
}
fn scan_partition(&self, partition: usize) -> Result<SendableRecordBatchStream, BoxedError> {
self.uncached_scan_part_impl(partition)
self.scan_partition_impl(partition)
}
fn prepare(&mut self, ranges: Vec<Vec<PartitionRange>>) -> Result<(), BoxedError> {
@@ -525,24 +290,53 @@ impl RegionScanner for SeqScan {
}
impl DisplayAs for SeqScan {
fn fmt_as(&self, t: DisplayFormatType, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt_as(&self, _t: DisplayFormatType, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"SeqScan: region={}, ",
self.stream_ctx.input.mapper.metadata().region_id
)?;
self.stream_ctx.format_for_explain(t, f)
self.stream_ctx.format_for_explain(f)
}
}
impl fmt::Debug for SeqScan {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SeqScan")
.field("parts", &self.stream_ctx.parts)
.field("num_ranges", &self.stream_ctx.ranges.len())
.finish()
}
}
/// Builds sources for the partition range.
fn build_sources(
stream_ctx: &Arc<StreamContext>,
part_range: &PartitionRange,
compaction: bool,
part_metrics: &PartitionMetrics,
sources: &mut Vec<Source>,
) {
// Gets range meta.
let range_meta = &stream_ctx.ranges[part_range.identifier];
sources.reserve(range_meta.row_group_indices.len());
for index in &range_meta.row_group_indices {
let stream = if stream_ctx.is_mem_range_index(*index) {
let stream = scan_mem_ranges(stream_ctx.clone(), part_metrics.clone(), *index);
Box::pin(stream) as _
} else {
let read_type = if compaction {
"compaction"
} else {
"seq_scan_files"
};
let stream =
scan_file_ranges(stream_ctx.clone(), part_metrics.clone(), *index, read_type);
Box::pin(stream) as _
};
sources.push(Source::Stream(stream));
}
}
#[cfg(test)]
impl SeqScan {
/// Returns the input.
@@ -551,266 +345,11 @@ impl SeqScan {
}
}
/// Builds [ScanPart]s that preserves order.
#[derive(Default)]
pub(crate) struct SeqDistributor {
parts: Vec<ScanPart>,
}
impl FileRangeCollector for SeqDistributor {
fn append_file_ranges(
&mut self,
file_meta: &FileMeta,
file_ranges: impl Iterator<Item = FileRange>,
) {
// Creates a [ScanPart] for each file.
let ranges: Vec<_> = file_ranges.collect();
if ranges.is_empty() {
// No ranges to read.
return;
}
let part = ScanPart {
memtable_ranges: Vec::new(),
file_ranges: smallvec![ranges],
time_range: Some(file_meta.time_range),
};
self.parts.push(part);
}
}
impl SeqDistributor {
/// Appends memtable ranges to the distributor.
fn append_mem_ranges(
&mut self,
memtables: &[MemtableRef],
projection: Option<&[ColumnId]>,
predicate: Option<Predicate>,
) {
for mem in memtables {
let stats = mem.stats();
let mem_ranges = mem.ranges(projection, predicate.clone());
if mem_ranges.is_empty() {
continue;
}
let part = ScanPart {
memtable_ranges: mem_ranges.into_values().collect(),
file_ranges: smallvec![],
time_range: stats.time_range(),
};
self.parts.push(part);
}
}
/// Groups file ranges and memtable ranges by time ranges.
/// The output number of parts may be `<= parallelism`. If `parallelism` is 0, it will be set to 1.
///
/// Output parts have non-overlapping time ranges.
fn build_parts(self, parallelism: usize) -> Vec<ScanPart> {
let parallelism = parallelism.max(1);
let parts = group_parts_by_range(self.parts);
let parts = maybe_split_parts(parts, parallelism);
// Ensures it doesn't returns parts more than `parallelism`.
maybe_merge_parts(parts, parallelism)
}
}
/// Groups parts by time range. It may generate parts more than parallelism.
/// All time ranges are not None.
fn group_parts_by_range(mut parts: Vec<ScanPart>) -> Vec<ScanPart> {
if parts.is_empty() {
return Vec::new();
}
// Sorts parts by time range.
parts.sort_unstable_by(|a, b| {
// Safety: time ranges of parts from [SeqPartBuilder] are not None.
let a = a.time_range.unwrap();
let b = b.time_range.unwrap();
a.0.cmp(&b.0).then_with(|| b.1.cmp(&a.1))
});
let mut part_in_range = None;
// Parts with exclusive time ranges.
let mut part_groups = Vec::new();
for part in parts {
let Some(mut prev_part) = part_in_range.take() else {
part_in_range = Some(part);
continue;
};
if prev_part.overlaps(&part) {
prev_part.merge(part);
part_in_range = Some(prev_part);
} else {
// A new group.
part_groups.push(prev_part);
part_in_range = Some(part);
}
}
if let Some(part) = part_in_range {
part_groups.push(part);
}
part_groups
}
/// Merges parts by parallelism.
/// It merges parts if the number of parts is greater than `parallelism`.
fn maybe_merge_parts(mut parts: Vec<ScanPart>, parallelism: usize) -> Vec<ScanPart> {
assert!(parallelism > 0);
if parts.len() <= parallelism {
// No need to merge parts.
return parts;
}
// Sort parts by number of memtables and ranges in reverse order.
parts.sort_unstable_by(|a, b| {
a.memtable_ranges
.len()
.cmp(&b.memtable_ranges.len())
.then_with(|| {
let a_ranges_len = a
.file_ranges
.iter()
.map(|ranges| ranges.len())
.sum::<usize>();
let b_ranges_len = b
.file_ranges
.iter()
.map(|ranges| ranges.len())
.sum::<usize>();
a_ranges_len.cmp(&b_ranges_len)
})
.reverse()
});
let parts_to_reduce = parts.len() - parallelism;
for _ in 0..parts_to_reduce {
// Safety: We ensure `parts.len() > parallelism`.
let part = parts.pop().unwrap();
parts.last_mut().unwrap().merge(part);
}
parts
}
/// Splits parts by parallelism.
/// It splits a part if it only scans one file and doesn't scan any memtable.
fn maybe_split_parts(mut parts: Vec<ScanPart>, parallelism: usize) -> Vec<ScanPart> {
assert!(parallelism > 0);
if parts.len() >= parallelism {
// No need to split parts.
return parts;
}
let has_part_to_split = parts.iter().any(|part| part.can_split_preserve_order());
if !has_part_to_split {
// No proper parts to scan.
return parts;
}
// Sorts parts by the number of ranges in the first file.
parts.sort_unstable_by(|a, b| {
let a_len = a.file_ranges.first().map(|file| file.len()).unwrap_or(0);
let b_len = b.file_ranges.first().map(|file| file.len()).unwrap_or(0);
a_len.cmp(&b_len).reverse()
});
let num_parts_to_split = parallelism - parts.len();
let mut output_parts = Vec::with_capacity(parallelism);
// Split parts up to num_parts_to_split.
for part in parts.iter_mut() {
if !part.can_split_preserve_order() {
continue;
}
// Safety: `can_split_preserve_order()` ensures file_ranges.len() == 1.
// Splits part into `num_parts_to_split + 1` new parts if possible.
let target_part_num = num_parts_to_split + 1;
let ranges_per_part = (part.file_ranges[0].len() + target_part_num - 1) / target_part_num;
// `can_split_preserve_order()` ensures part.file_ranges[0].len() > 1.
assert!(ranges_per_part > 0);
for ranges in part.file_ranges[0].chunks(ranges_per_part) {
let new_part = ScanPart {
memtable_ranges: Vec::new(),
file_ranges: smallvec![ranges.to_vec()],
time_range: part.time_range,
};
output_parts.push(new_part);
}
// Replace the current part with the last output part as we will put the current part
// into the output parts later.
*part = output_parts.pop().unwrap();
if output_parts.len() >= num_parts_to_split {
// We already split enough parts.
break;
}
}
// Put the remaining parts into the output parts.
output_parts.append(&mut parts);
output_parts
}
#[cfg(test)]
mod tests {
use common_time::timestamp::TimeUnit;
use common_time::Timestamp;
use super::*;
use crate::memtable::MemtableId;
use crate::test_util::memtable_util::mem_range_for_test;
type Output = (Vec<MemtableId>, i64, i64);
fn run_group_parts_test(input: &[(MemtableId, i64, i64)], expect: &[Output]) {
let parts = input
.iter()
.map(|(id, start, end)| {
let range = (
Timestamp::new(*start, TimeUnit::Second),
Timestamp::new(*end, TimeUnit::Second),
);
ScanPart {
memtable_ranges: vec![mem_range_for_test(*id)],
file_ranges: smallvec![],
time_range: Some(range),
}
})
.collect();
let output = group_parts_by_range(parts);
let actual: Vec<_> = output
.iter()
.map(|part| {
let ids: Vec<_> = part.memtable_ranges.iter().map(|mem| mem.id()).collect();
let range = part.time_range.unwrap();
(ids, range.0.value(), range.1.value())
})
.collect();
assert_eq!(expect, actual);
}
#[test]
fn test_group_parts() {
// Group 1 part.
run_group_parts_test(&[(1, 0, 2000)], &[(vec![1], 0, 2000)]);
// 1, 2, 3, 4 => [3, 1, 4], [2]
run_group_parts_test(
&[
(1, 1000, 2000),
(2, 6000, 7000),
(3, 0, 1500),
(4, 1500, 3000),
],
&[(vec![3, 1, 4], 0, 3000), (vec![2], 6000, 7000)],
);
// 1, 2, 3 => [3], [1], [2],
run_group_parts_test(
&[(1, 3000, 4000), (2, 4001, 6000), (3, 0, 1000)],
&[
(vec![3], 0, 1000),
(vec![1], 3000, 4000),
(vec![2], 4001, 6000),
],
);
/// Returns the scanner type.
fn get_scanner_type(compaction: bool) -> &'static str {
if compaction {
"SeqScan(compaction)"
} else {
"SeqScan"
}
}

View File

@@ -21,24 +21,17 @@ use std::time::Instant;
use async_stream::{stream, try_stream};
use common_error::ext::BoxedError;
use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::{RecordBatch, RecordBatchStreamWrapper, SendableRecordBatchStream};
use common_telemetry::debug;
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
use datafusion::physical_plan::{DisplayAs, DisplayFormatType};
use datatypes::schema::SchemaRef;
use futures::{Stream, StreamExt};
use snafu::ResultExt;
use store_api::region_engine::{PartitionRange, RegionScanner, ScannerProperties};
use crate::cache::CacheManager;
use crate::error::Result;
use crate::memtable::MemtableRange;
use crate::read::compat::CompatBatch;
use crate::read::projection::ProjectionMapper;
use crate::read::range::RowGroupIndex;
use crate::error::{PartitionOutOfRangeSnafu, Result};
use crate::read::scan_region::{ScanInput, StreamContext};
use crate::read::{ScannerMetrics, Source};
use crate::sst::parquet::file_range::FileRange;
use crate::sst::parquet::reader::ReaderMetrics;
use crate::read::scan_util::{scan_file_ranges, scan_mem_ranges, PartitionMetrics};
use crate::read::{Batch, ScannerMetrics};
/// Scans a region without providing any output ordering guarantee.
///
@@ -85,62 +78,23 @@ impl UnorderedScan {
Ok(stream)
}
/// Fetch a batch from the source and convert it into a record batch.
async fn fetch_from_source(
source: &mut Source,
mapper: &ProjectionMapper,
cache: Option<&CacheManager>,
compat_batch: Option<&CompatBatch>,
metrics: &mut ScannerMetrics,
) -> common_recordbatch::error::Result<Option<RecordBatch>> {
let start = Instant::now();
let Some(mut batch) = source
.next_batch()
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?
else {
metrics.scan_cost += start.elapsed();
return Ok(None);
};
if let Some(compat) = compat_batch {
batch = compat
.compat_batch(batch)
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
}
metrics.scan_cost += start.elapsed();
let convert_start = Instant::now();
let record_batch = mapper.convert(&batch, cache)?;
metrics.convert_cost += convert_start.elapsed();
Ok(Some(record_batch))
}
/// Scans a [PartitionRange] and returns a stream.
fn scan_partition_range<'a>(
stream_ctx: &'a StreamContext,
part_range: &'a PartitionRange,
mem_ranges: &'a mut Vec<MemtableRange>,
file_ranges: &'a mut Vec<FileRange>,
reader_metrics: &'a mut ReaderMetrics,
metrics: &'a mut ScannerMetrics,
) -> impl Stream<Item = common_recordbatch::error::Result<RecordBatch>> + 'a {
/// Scans a [PartitionRange] by its `identifier` and returns a stream.
fn scan_partition_range(
stream_ctx: Arc<StreamContext>,
part_range_id: usize,
part_metrics: PartitionMetrics,
) -> impl Stream<Item = Result<Batch>> {
stream! {
// Gets range meta.
let range_meta = &stream_ctx.ranges[part_range.identifier];
let range_meta = &stream_ctx.ranges[part_range_id];
for index in &range_meta.row_group_indices {
if stream_ctx.is_mem_range_index(*index) {
let stream = Self::scan_mem_ranges(stream_ctx, *index, mem_ranges, metrics);
let stream = scan_mem_ranges(stream_ctx.clone(), part_metrics.clone(), *index);
for await batch in stream {
yield batch;
}
} else {
let stream = Self::scan_file_ranges(stream_ctx, *index, file_ranges, reader_metrics, metrics);
let stream = scan_file_ranges(stream_ctx.clone(), part_metrics.clone(), *index, "unordered_scan_files");
for await batch in stream {
yield batch;
}
@@ -149,124 +103,68 @@ impl UnorderedScan {
}
}
/// Scans memtable ranges at `index`.
fn scan_mem_ranges<'a>(
stream_ctx: &'a StreamContext,
index: RowGroupIndex,
ranges: &'a mut Vec<MemtableRange>,
metrics: &'a mut ScannerMetrics,
) -> impl Stream<Item = common_recordbatch::error::Result<RecordBatch>> + 'a {
try_stream! {
let mapper = &stream_ctx.input.mapper;
let cache = stream_ctx.input.cache_manager.as_deref();
stream_ctx.build_mem_ranges(index, ranges);
metrics.num_mem_ranges += ranges.len();
for range in ranges {
let build_reader_start = Instant::now();
let iter = range.build_iter().map_err(BoxedError::new).context(ExternalSnafu)?;
metrics.build_reader_cost = build_reader_start.elapsed();
let mut source = Source::Iter(iter);
while let Some(batch) =
Self::fetch_from_source(&mut source, mapper, cache, None, metrics).await?
{
metrics.num_batches += 1;
metrics.num_rows += batch.num_rows();
let yield_start = Instant::now();
yield batch;
metrics.yield_cost += yield_start.elapsed();
}
}
}
}
/// Scans file ranges at `index`.
fn scan_file_ranges<'a>(
stream_ctx: &'a StreamContext,
index: RowGroupIndex,
ranges: &'a mut Vec<FileRange>,
reader_metrics: &'a mut ReaderMetrics,
metrics: &'a mut ScannerMetrics,
) -> impl Stream<Item = common_recordbatch::error::Result<RecordBatch>> + 'a {
try_stream! {
let mapper = &stream_ctx.input.mapper;
let cache = stream_ctx.input.cache_manager.as_deref();
stream_ctx
.build_file_ranges(index, ranges, reader_metrics)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
metrics.num_file_ranges += ranges.len();
for range in ranges {
let build_reader_start = Instant::now();
let reader = range
.reader(None)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
metrics.build_reader_cost += build_reader_start.elapsed();
let compat_batch = range.compat_batch();
let mut source = Source::PruneReader(reader);
while let Some(batch) =
Self::fetch_from_source(&mut source, mapper, cache, compat_batch, metrics)
.await?
{
metrics.num_batches += 1;
metrics.num_rows += batch.num_rows();
let yield_start = Instant::now();
yield batch;
metrics.yield_cost += yield_start.elapsed();
}
if let Source::PruneReader(mut reader) = source {
reader_metrics.merge_from(reader.metrics());
}
}
}
}
fn scan_partition_impl(
&self,
partition: usize,
) -> Result<SendableRecordBatchStream, BoxedError> {
let mut metrics = ScannerMetrics {
prepare_scan_cost: self.stream_ctx.query_start.elapsed(),
..Default::default()
};
if partition >= self.properties.partitions.len() {
return Err(BoxedError::new(
PartitionOutOfRangeSnafu {
given: partition,
all: self.properties.partitions.len(),
}
.build(),
));
}
let part_metrics = PartitionMetrics::new(
self.stream_ctx.input.mapper.metadata().region_id,
partition,
"UnorderedScan",
self.stream_ctx.query_start,
ScannerMetrics {
prepare_scan_cost: self.stream_ctx.query_start.elapsed(),
..Default::default()
},
);
let stream_ctx = self.stream_ctx.clone();
let ranges_opt = self.properties.partitions.get(partition).cloned();
let part_ranges = self.properties.partitions[partition].clone();
let stream = stream! {
let first_poll = stream_ctx.query_start.elapsed();
let Some(part_ranges) = ranges_opt else {
return;
};
let stream = try_stream! {
part_metrics.on_first_poll();
let mut mem_ranges = Vec::new();
let mut file_ranges = Vec::new();
let mut reader_metrics = ReaderMetrics::default();
let cache = stream_ctx.input.cache_manager.as_deref();
// Scans each part.
for part_range in part_ranges {
let mut metrics = ScannerMetrics::default();
let mut fetch_start = Instant::now();
let stream = Self::scan_partition_range(
&stream_ctx,
&part_range,
&mut mem_ranges,
&mut file_ranges,
&mut reader_metrics,
&mut metrics,
stream_ctx.clone(),
part_range.identifier,
part_metrics.clone(),
);
for await batch in stream {
yield batch;
let batch = batch.map_err(BoxedError::new).context(ExternalSnafu)?;
metrics.scan_cost += fetch_start.elapsed();
metrics.num_batches += 1;
metrics.num_rows += batch.num_rows();
let convert_start = Instant::now();
let record_batch = stream_ctx.input.mapper.convert(&batch, cache)?;
metrics.convert_cost += convert_start.elapsed();
let yield_start = Instant::now();
yield record_batch;
metrics.yield_cost += yield_start.elapsed();
fetch_start = Instant::now();
}
metrics.scan_cost += fetch_start.elapsed();
part_metrics.merge_metrics(&metrics);
}
reader_metrics.observe_rows("unordered_scan_files");
metrics.total_cost = stream_ctx.query_start.elapsed();
metrics.observe_metrics_on_finish();
let mapper = &stream_ctx.input.mapper;
debug!(
"Unordered scan partition {} finished, region_id: {}, metrics: {:?}, reader_metrics: {:?}, first_poll: {:?}",
partition, mapper.metadata().region_id, metrics, reader_metrics, first_poll,
);
part_metrics.on_finish();
};
let stream = Box::pin(RecordBatchStreamWrapper::new(
self.stream_ctx.input.mapper.output_schema(),
@@ -302,20 +200,20 @@ impl RegionScanner for UnorderedScan {
}
impl DisplayAs for UnorderedScan {
fn fmt_as(&self, t: DisplayFormatType, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt_as(&self, _t: DisplayFormatType, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"UnorderedScan: region={}, ",
self.stream_ctx.input.mapper.metadata().region_id
)?;
self.stream_ctx.format_for_explain(t, f)
self.stream_ctx.format_for_explain(f)
}
}
impl fmt::Debug for UnorderedScan {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("UnorderedScan")
.field("parts", &self.stream_ctx.parts)
.field("num_ranges", &self.stream_ctx.ranges.len())
.finish()
}
}

View File

@@ -16,9 +16,10 @@ use bytes::Buf;
use common_base::bytes::Bytes;
use common_decimal::Decimal128;
use common_time::time::Time;
use common_time::{Date, Duration, Interval};
use common_time::{Date, Duration, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth};
use datatypes::data_type::ConcreteDataType;
use datatypes::prelude::Value;
use datatypes::types::IntervalType;
use datatypes::value::ValueRef;
use memcomparable::{Deserializer, Serializer};
use paste::paste;
@@ -117,6 +118,24 @@ impl SortField {
.serialize($serializer)
.context(SerializeFieldSnafu)?;
}
ConcreteDataType::Interval(IntervalType::YearMonth(_)) => {
let interval = value.as_interval_year_month().context(FieldTypeMismatchSnafu)?;
interval.map(|i| i.to_i32())
.serialize($serializer)
.context(SerializeFieldSnafu)?;
}
ConcreteDataType::Interval(IntervalType::DayTime(_)) => {
let interval = value.as_interval_day_time().context(FieldTypeMismatchSnafu)?;
interval.map(|i| i.to_i64())
.serialize($serializer)
.context(SerializeFieldSnafu)?;
}
ConcreteDataType::Interval(IntervalType::MonthDayNano(_)) => {
let interval = value.as_interval_month_day_nano().context(FieldTypeMismatchSnafu)?;
interval.map(|i| i.to_i128())
.serialize($serializer)
.context(SerializeFieldSnafu)?;
}
ConcreteDataType::List(_) |
ConcreteDataType::Dictionary(_) |
ConcreteDataType::Null(_) => {
@@ -144,7 +163,6 @@ impl SortField {
Date, date,
DateTime, datetime,
Time, time,
Interval, interval,
Duration, duration,
Decimal128, decimal128,
Json, binary
@@ -181,6 +199,24 @@ impl SortField {
.map(|t|ty.create_timestamp(t));
Ok(Value::from(timestamp))
}
ConcreteDataType::Interval(IntervalType::YearMonth(_)) => {
let interval = Option::<i32>::deserialize(deserializer)
.context(error::DeserializeFieldSnafu)?
.map(IntervalYearMonth::from_i32);
Ok(Value::from(interval))
}
ConcreteDataType::Interval(IntervalType::DayTime(_)) => {
let interval = Option::<i64>::deserialize(deserializer)
.context(error::DeserializeFieldSnafu)?
.map(IntervalDayTime::from_i64);
Ok(Value::from(interval))
}
ConcreteDataType::Interval(IntervalType::MonthDayNano(_)) => {
let interval = Option::<i128>::deserialize(deserializer)
.context(error::DeserializeFieldSnafu)?
.map(IntervalMonthDayNano::from_i128);
Ok(Value::from(interval))
}
ConcreteDataType::List(l) => NotSupportedFieldSnafu {
data_type: ConcreteDataType::List(l.clone()),
}
@@ -212,7 +248,6 @@ impl SortField {
Date, Date,
Time, Time,
DateTime, DateTime,
Interval, Interval,
Duration, Duration,
Decimal128, Decimal128
)
@@ -263,7 +298,9 @@ impl SortField {
ConcreteDataType::Timestamp(_) => 9, // We treat timestamp as Option<i64>
ConcreteDataType::Time(_) => 10, // i64 and 1 byte time unit
ConcreteDataType::Duration(_) => 10,
ConcreteDataType::Interval(_) => 18,
ConcreteDataType::Interval(IntervalType::YearMonth(_)) => 5,
ConcreteDataType::Interval(IntervalType::DayTime(_)) => 9,
ConcreteDataType::Interval(IntervalType::MonthDayNano(_)) => 17,
ConcreteDataType::Decimal128(_) => 19,
ConcreteDataType::Null(_)
| ConcreteDataType::List(_)
@@ -387,7 +424,9 @@ impl RowCodec for McmpRowCodec {
#[cfg(test)]
mod tests {
use common_base::bytes::StringBytes;
use common_time::{DateTime, Timestamp};
use common_time::{
DateTime, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp,
};
use datatypes::value::Value;
use super::*;
@@ -563,6 +602,8 @@ mod tests {
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::time_millisecond_datatype(),
ConcreteDataType::duration_millisecond_datatype(),
ConcreteDataType::interval_year_month_datatype(),
ConcreteDataType::interval_day_time_datatype(),
ConcreteDataType::interval_month_day_nano_datatype(),
ConcreteDataType::decimal128_default_datatype(),
],
@@ -585,7 +626,9 @@ mod tests {
Value::Timestamp(Timestamp::new_millisecond(12)),
Value::Time(Time::new_millisecond(13)),
Value::Duration(Duration::new_millisecond(14)),
Value::Interval(Interval::from_month_day_nano(1, 1, 15)),
Value::IntervalYearMonth(IntervalYearMonth::new(1)),
Value::IntervalDayTime(IntervalDayTime::new(1, 15)),
Value::IntervalMonthDayNano(IntervalMonthDayNano::new(1, 1, 15)),
Value::Decimal128(Decimal128::from(16)),
],
);

View File

@@ -90,7 +90,8 @@ impl FromStr for FileId {
}
}
/// Time range of a SST file.
/// Time range (min and max timestamps) of a SST file.
/// Both min and max are inclusive.
pub type FileTimeRange = (Timestamp, Timestamp);
/// Checks if two inclusive timestamp ranges overlap with each other.

View File

@@ -238,9 +238,6 @@ impl ParquetReaderBuilder {
cache_manager: self.cache_manager.clone(),
};
// TODO(yingwen): count the cost of the method.
metrics.build_cost = start.elapsed();
let mut filters = if let Some(predicate) = &self.predicate {
predicate
.exprs()
@@ -270,6 +267,9 @@ impl ParquetReaderBuilder {
);
let context = FileRangeContext::new(reader_builder, filters, read_format, codec);
metrics.build_cost += start.elapsed();
Ok((context, row_groups))
}

View File

@@ -35,7 +35,7 @@ use crate::memtable::key_values::KeyValue;
use crate::memtable::partition_tree::data::{timestamp_array_to_i64_slice, DataBatch, DataBuffer};
use crate::memtable::{
BoxedBatchIterator, BulkPart, IterBuilder, KeyValues, Memtable, MemtableBuilder, MemtableId,
MemtableRange, MemtableRangeContext, MemtableRef, MemtableStats,
MemtableRange, MemtableRef, MemtableStats,
};
use crate::row_converter::{McmpRowCodec, RowCodec, SortField};
@@ -361,11 +361,3 @@ pub(crate) fn collect_iter_timestamps(iter: BoxedBatchIterator) -> Vec<i64> {
.map(|v| v.unwrap().0.value())
.collect()
}
/// Builds a memtable range for test.
pub(crate) fn mem_range_for_test(id: MemtableId) -> MemtableRange {
let builder = Box::new(EmptyIterBuilder::default());
let context = Arc::new(MemtableRangeContext::new(id, builder));
MemtableRange::new(context)
}

View File

@@ -210,6 +210,37 @@ where
self.transformer.transform_mut(val)
}
pub fn prepare_pipeline_value(&self, val: Value, result: &mut [Value]) -> Result<()> {
match val {
Value::Map(map) => {
let mut search_from = 0;
// because of the key in the json map is ordered
for (payload_key, payload_value) in map.values.into_iter() {
if search_from >= self.required_keys.len() {
break;
}
// because of map key is ordered, required_keys is ordered too
if let Some(pos) = self.required_keys[search_from..]
.iter()
.position(|k| k == &payload_key)
{
result[search_from + pos] = payload_value;
// next search from is always after the current key
search_from += pos;
}
}
}
Value::String(_) => {
result[0] = val;
}
_ => {
return PrepareValueMustBeObjectSnafu.fail();
}
}
Ok(())
}
pub fn prepare(&self, val: serde_json::Value, result: &mut [Value]) -> Result<()> {
match val {
serde_json::Value::Object(map) => {
@@ -286,6 +317,11 @@ pub(crate) fn find_key_index(intermediate_keys: &[String], key: &str, kind: &str
.context(IntermediateKeyIndexSnafu { kind, key })
}
pub enum PipelineWay {
Identity,
Custom(std::sync::Arc<Pipeline<crate::GreptimeTransformer>>),
}
#[cfg(test)]
mod tests {

View File

@@ -438,18 +438,26 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("failed to coerce complex value, not supported"))]
CoerceComplexType {
#[snafu(display("Can not coerce json type to {ty}"))]
CoerceJsonTypeTo {
ty: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("failed to coerce value: {msg}"))]
#[snafu(display(
"Can not coerce {ty} to json type. we only consider object and array to be json types."
))]
CoerceTypeToJson {
ty: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to coerce value: {msg}"))]
CoerceIncompatibleTypes {
msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display(
"Invalid resolution: '{resolution}'. Available resolutions: {valid_resolution}"
))]

View File

@@ -402,7 +402,8 @@ impl Processor for CmcdProcessor {
#[cfg(test)]
mod tests {
use ahash::HashMap;
use std::collections::BTreeMap;
use urlencoding::decode;
use super::{CmcdProcessorBuilder, CMCD_KEYS};
@@ -563,14 +564,14 @@ mod tests {
let values = vec
.into_iter()
.map(|(k, v)| (k.to_string(), v))
.collect::<HashMap<String, Value>>();
.collect::<BTreeMap<String, Value>>();
let expected = Map { values };
let actual = processor.parse(0, &decoded).unwrap();
let actual = actual
.into_iter()
.map(|(index, value)| (intermediate_keys[index].clone(), value))
.collect::<HashMap<String, Value>>();
.collect::<BTreeMap<String, Value>>();
let actual = Map { values: actual };
assert_eq!(actual, expected);
}

View File

@@ -383,6 +383,8 @@ impl Processor for RegexProcessor {
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use ahash::{HashMap, HashMapExt};
use itertools::Itertools;
@@ -475,14 +477,14 @@ ignore_missing: false"#;
.map(|k| k.to_string())
.collect_vec();
let processor = builder.build(&intermediate_keys).unwrap();
let mut result = HashMap::new();
let mut result = BTreeMap::new();
for (index, pattern) in processor.patterns.iter().enumerate() {
let r = processor
.process(&breadcrumbs_str, pattern, (0, index))
.unwrap()
.into_iter()
.map(|(k, v)| (intermediate_keys[k].clone(), v))
.collect::<HashMap<_, _>>();
.collect::<BTreeMap<_, _>>();
result.extend(r);
}
let map = Map { values: result };

View File

@@ -12,16 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::column_data_type_extension::TypeExt;
use api::v1::column_def::options_from_fulltext;
use api::v1::ColumnOptions;
use api::v1::{ColumnDataTypeExtension, ColumnOptions, JsonTypeExtension};
use datatypes::schema::FulltextOptions;
use greptime_proto::v1::value::ValueData;
use greptime_proto::v1::{ColumnDataType, ColumnSchema, SemanticType};
use snafu::ResultExt;
use crate::etl::error::{
CoerceComplexTypeSnafu, CoerceIncompatibleTypesSnafu, CoerceStringToTypeSnafu,
CoerceUnsupportedEpochTypeSnafu, CoerceUnsupportedNullTypeSnafu,
CoerceIncompatibleTypesSnafu, CoerceJsonTypeToSnafu, CoerceStringToTypeSnafu,
CoerceTypeToJsonSnafu, CoerceUnsupportedEpochTypeSnafu, CoerceUnsupportedNullTypeSnafu,
CoerceUnsupportedNullTypeToSnafu, ColumnOptionsSnafu, Error, Result,
};
use crate::etl::transform::index::Index;
@@ -62,7 +63,10 @@ impl TryFrom<Value> for ValueData {
}
Value::Timestamp(Timestamp::Second(s)) => Ok(ValueData::TimestampSecondValue(s)),
Value::Array(_) | Value::Map(_) => CoerceComplexTypeSnafu.fail(),
Value::Array(_) | Value::Map(_) => {
let data: jsonb::Value = value.into();
Ok(ValueData::BinaryValue(data.to_vec()))
}
}
}
}
@@ -74,15 +78,15 @@ pub(crate) fn coerce_columns(transform: &Transform) -> Result<Vec<ColumnSchema>>
for field in transform.real_fields.iter() {
let column_name = field.output_name().to_string();
let datatype = coerce_type(transform)? as i32;
let (datatype, datatype_extension) = coerce_type(transform)?;
let semantic_type = coerce_semantic_type(transform) as i32;
let column = ColumnSchema {
column_name,
datatype,
datatype: datatype as i32,
semantic_type,
datatype_extension: None,
datatype_extension,
options: coerce_options(transform)?,
};
columns.push(column);
@@ -111,30 +115,41 @@ fn coerce_options(transform: &Transform) -> Result<Option<ColumnOptions>> {
}
}
fn coerce_type(transform: &Transform) -> Result<ColumnDataType> {
fn coerce_type(transform: &Transform) -> Result<(ColumnDataType, Option<ColumnDataTypeExtension>)> {
match transform.type_ {
Value::Int8(_) => Ok(ColumnDataType::Int8),
Value::Int16(_) => Ok(ColumnDataType::Int16),
Value::Int32(_) => Ok(ColumnDataType::Int32),
Value::Int64(_) => Ok(ColumnDataType::Int64),
Value::Int8(_) => Ok((ColumnDataType::Int8, None)),
Value::Int16(_) => Ok((ColumnDataType::Int16, None)),
Value::Int32(_) => Ok((ColumnDataType::Int32, None)),
Value::Int64(_) => Ok((ColumnDataType::Int64, None)),
Value::Uint8(_) => Ok(ColumnDataType::Uint8),
Value::Uint16(_) => Ok(ColumnDataType::Uint16),
Value::Uint32(_) => Ok(ColumnDataType::Uint32),
Value::Uint64(_) => Ok(ColumnDataType::Uint64),
Value::Uint8(_) => Ok((ColumnDataType::Uint8, None)),
Value::Uint16(_) => Ok((ColumnDataType::Uint16, None)),
Value::Uint32(_) => Ok((ColumnDataType::Uint32, None)),
Value::Uint64(_) => Ok((ColumnDataType::Uint64, None)),
Value::Float32(_) => Ok(ColumnDataType::Float32),
Value::Float64(_) => Ok(ColumnDataType::Float64),
Value::Float32(_) => Ok((ColumnDataType::Float32, None)),
Value::Float64(_) => Ok((ColumnDataType::Float64, None)),
Value::Boolean(_) => Ok(ColumnDataType::Boolean),
Value::String(_) => Ok(ColumnDataType::String),
Value::Boolean(_) => Ok((ColumnDataType::Boolean, None)),
Value::String(_) => Ok((ColumnDataType::String, None)),
Value::Timestamp(Timestamp::Nanosecond(_)) => Ok(ColumnDataType::TimestampNanosecond),
Value::Timestamp(Timestamp::Microsecond(_)) => Ok(ColumnDataType::TimestampMicrosecond),
Value::Timestamp(Timestamp::Millisecond(_)) => Ok(ColumnDataType::TimestampMillisecond),
Value::Timestamp(Timestamp::Second(_)) => Ok(ColumnDataType::TimestampSecond),
Value::Timestamp(Timestamp::Nanosecond(_)) => {
Ok((ColumnDataType::TimestampNanosecond, None))
}
Value::Timestamp(Timestamp::Microsecond(_)) => {
Ok((ColumnDataType::TimestampMicrosecond, None))
}
Value::Timestamp(Timestamp::Millisecond(_)) => {
Ok((ColumnDataType::TimestampMillisecond, None))
}
Value::Timestamp(Timestamp::Second(_)) => Ok((ColumnDataType::TimestampSecond, None)),
Value::Array(_) | Value::Map(_) => CoerceComplexTypeSnafu.fail(),
Value::Array(_) | Value::Map(_) => Ok((
ColumnDataType::Binary,
Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
}),
)),
Value::Null => CoerceUnsupportedNullTypeToSnafu {
ty: transform.type_.to_str_type(),
@@ -191,12 +206,12 @@ pub(crate) fn coerce_value(val: &Value, transform: &Transform) -> Result<Option<
))),
},
_ => CoerceIncompatibleTypesSnafu {
msg: "Timestamp can only be coerced to another timestamp",
msg: "Timestamp can only be coerced to another type",
}
.fail(),
},
Value::Array(_) | Value::Map(_) => CoerceComplexTypeSnafu.fail(),
Value::Array(_) | Value::Map(_) => coerce_json_value(val, transform),
}
}
@@ -228,7 +243,12 @@ fn coerce_bool_value(b: bool, transform: &Transform) -> Result<Option<ValueData>
}
},
Value::Array(_) | Value::Map(_) => return CoerceComplexTypeSnafu.fail(),
Value::Array(_) | Value::Map(_) => {
return CoerceJsonTypeToSnafu {
ty: transform.type_.to_str_type(),
}
.fail()
}
Value::Null => return Ok(None),
};
@@ -264,7 +284,12 @@ fn coerce_i64_value(n: i64, transform: &Transform) -> Result<Option<ValueData>>
}
},
Value::Array(_) | Value::Map(_) => return CoerceComplexTypeSnafu.fail(),
Value::Array(_) | Value::Map(_) => {
return CoerceJsonTypeToSnafu {
ty: transform.type_.to_str_type(),
}
.fail()
}
Value::Null => return Ok(None),
};
@@ -300,7 +325,12 @@ fn coerce_u64_value(n: u64, transform: &Transform) -> Result<Option<ValueData>>
}
},
Value::Array(_) | Value::Map(_) => return CoerceComplexTypeSnafu.fail(),
Value::Array(_) | Value::Map(_) => {
return CoerceJsonTypeToSnafu {
ty: transform.type_.to_str_type(),
}
.fail()
}
Value::Null => return Ok(None),
};
@@ -336,7 +366,12 @@ fn coerce_f64_value(n: f64, transform: &Transform) -> Result<Option<ValueData>>
}
},
Value::Array(_) | Value::Map(_) => return CoerceComplexTypeSnafu.fail(),
Value::Array(_) | Value::Map(_) => {
return CoerceJsonTypeToSnafu {
ty: transform.type_.to_str_type(),
}
.fail()
}
Value::Null => return Ok(None),
};
@@ -411,12 +446,43 @@ fn coerce_string_value(s: &String, transform: &Transform) -> Result<Option<Value
None => CoerceUnsupportedEpochTypeSnafu { ty: "String" }.fail(),
},
Value::Array(_) | Value::Map(_) => CoerceComplexTypeSnafu.fail(),
Value::Array(_) | Value::Map(_) => {
return CoerceJsonTypeToSnafu {
ty: transform.type_.to_str_type(),
}
.fail()
}
Value::Null => Ok(None),
}
}
fn coerce_json_value(v: &Value, transform: &Transform) -> Result<Option<ValueData>> {
match &transform.type_ {
Value::Array(_) | Value::Map(_) => (),
t => {
return CoerceTypeToJsonSnafu {
ty: t.to_str_type(),
}
.fail();
}
}
match v {
Value::Map(_) => {
let data: jsonb::Value = v.into();
Ok(Some(ValueData::BinaryValue(data.to_vec())))
}
Value::Array(_) => {
let data: jsonb::Value = v.into();
Ok(Some(ValueData::BinaryValue(data.to_vec())))
}
_ => CoerceTypeToJsonSnafu {
ty: v.to_str_type(),
}
.fail(),
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -16,8 +16,10 @@ pub mod array;
pub mod map;
pub mod time;
use ahash::{HashMap, HashMapExt};
use std::collections::BTreeMap;
pub use array::Array;
use jsonb::{Number as JsonbNumber, Object as JsonbObject, Value as JsonbValue};
pub use map::Map;
use snafu::{OptionExt, ResultExt};
pub use time::Timestamp;
@@ -57,6 +59,7 @@ pub enum Value {
Timestamp(Timestamp),
/// We only consider object and array to be json types.
Array(Array),
Map(Map),
}
@@ -110,8 +113,9 @@ impl Value {
_ => Ok(Value::Timestamp(Timestamp::Nanosecond(0))),
},
"array" => Ok(Value::Array(Array::default())),
"map" => Ok(Value::Map(Map::default())),
// We only consider object and array to be json types. and use Map to represent json
// TODO(qtang): Needs to be defined with better semantics
"json" => Ok(Value::Map(Map::default())),
_ => ValueParseTypeSnafu { t }.fail(),
}
@@ -221,8 +225,7 @@ impl Value {
Value::Timestamp(_) => "epoch",
Value::Array(_) => "array",
Value::Map(_) => "map",
Value::Array(_) | Value::Map(_) => "json",
Value::Null => "null",
}
@@ -287,7 +290,7 @@ impl TryFrom<serde_json::Value> for Value {
Ok(Value::Array(Array { values }))
}
serde_json::Value::Object(v) => {
let mut values = HashMap::with_capacity(v.len());
let mut values = BTreeMap::new();
for (k, v) in v {
values.insert(k, Value::try_from(v)?);
}
@@ -318,7 +321,7 @@ impl TryFrom<&yaml_rust::Yaml> for Value {
Ok(Value::Array(Array { values }))
}
yaml_rust::Yaml::Hash(v) => {
let mut values = HashMap::new();
let mut values = BTreeMap::new();
for (k, v) in v {
let key = k
.as_str()
@@ -331,3 +334,79 @@ impl TryFrom<&yaml_rust::Yaml> for Value {
}
}
}
impl<'a> From<&Value> for JsonbValue<'a> {
fn from(value: &Value) -> Self {
match value {
Value::Null => JsonbValue::Null,
Value::Boolean(v) => JsonbValue::Bool(*v),
Value::Int8(v) => JsonbValue::Number(JsonbNumber::Int64(*v as i64)),
Value::Int16(v) => JsonbValue::Number(JsonbNumber::Int64(*v as i64)),
Value::Int32(v) => JsonbValue::Number(JsonbNumber::Int64(*v as i64)),
Value::Int64(v) => JsonbValue::Number(JsonbNumber::Int64(*v)),
Value::Uint8(v) => JsonbValue::Number(JsonbNumber::UInt64(*v as u64)),
Value::Uint16(v) => JsonbValue::Number(JsonbNumber::UInt64(*v as u64)),
Value::Uint32(v) => JsonbValue::Number(JsonbNumber::UInt64(*v as u64)),
Value::Uint64(v) => JsonbValue::Number(JsonbNumber::UInt64(*v)),
Value::Float32(v) => JsonbValue::Number(JsonbNumber::Float64(*v as f64)),
Value::Float64(v) => JsonbValue::Number(JsonbNumber::Float64(*v)),
Value::String(v) => JsonbValue::String(v.clone().into()),
Value::Timestamp(v) => JsonbValue::String(v.to_string().into()),
Value::Array(arr) => {
let mut vals: Vec<JsonbValue> = Vec::with_capacity(arr.len());
for val in arr.iter() {
vals.push(val.into());
}
JsonbValue::Array(vals)
}
Value::Map(obj) => {
let mut map = JsonbObject::new();
for (k, v) in obj.iter() {
let val: JsonbValue = v.into();
map.insert(k.to_string(), val);
}
JsonbValue::Object(map)
}
}
}
}
impl<'a> From<Value> for JsonbValue<'a> {
fn from(value: Value) -> Self {
match value {
Value::Null => JsonbValue::Null,
Value::Boolean(v) => JsonbValue::Bool(v),
Value::Int8(v) => JsonbValue::Number(JsonbNumber::Int64(v as i64)),
Value::Int16(v) => JsonbValue::Number(JsonbNumber::Int64(v as i64)),
Value::Int32(v) => JsonbValue::Number(JsonbNumber::Int64(v as i64)),
Value::Int64(v) => JsonbValue::Number(JsonbNumber::Int64(v)),
Value::Uint8(v) => JsonbValue::Number(JsonbNumber::UInt64(v as u64)),
Value::Uint16(v) => JsonbValue::Number(JsonbNumber::UInt64(v as u64)),
Value::Uint32(v) => JsonbValue::Number(JsonbNumber::UInt64(v as u64)),
Value::Uint64(v) => JsonbValue::Number(JsonbNumber::UInt64(v)),
Value::Float32(v) => JsonbValue::Number(JsonbNumber::Float64(v as f64)),
Value::Float64(v) => JsonbValue::Number(JsonbNumber::Float64(v)),
Value::String(v) => JsonbValue::String(v.into()),
Value::Timestamp(v) => JsonbValue::String(v.to_string().into()),
Value::Array(arr) => {
let mut vals: Vec<JsonbValue> = Vec::with_capacity(arr.len());
for val in arr.into_iter() {
vals.push(val.into());
}
JsonbValue::Array(vals)
}
Value::Map(obj) => {
let mut map = JsonbObject::new();
for (k, v) in obj.into_iter() {
let val: JsonbValue = v.into();
map.insert(k, val);
}
JsonbValue::Object(map)
}
}
}
}

View File

@@ -12,21 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use ahash::{HashMap, HashMapExt};
use std::collections::BTreeMap;
use ahash::HashMap;
use crate::etl::value::Value;
#[derive(Debug, Clone, PartialEq)]
#[derive(Debug, Clone, PartialEq, Default)]
pub struct Map {
pub values: HashMap<String, Value>,
}
impl Default for Map {
fn default() -> Self {
Self {
values: HashMap::with_capacity(30),
}
}
pub values: BTreeMap<String, Value>,
}
impl Map {
@@ -47,12 +41,16 @@ impl Map {
impl From<HashMap<String, Value>> for Map {
fn from(values: HashMap<String, Value>) -> Self {
Map { values }
let mut map = Map::default();
for (k, v) in values.into_iter() {
map.insert(k, v);
}
map
}
}
impl std::ops::Deref for Map {
type Target = HashMap<String, Value>;
type Target = BTreeMap<String, Value>;
fn deref(&self) -> &Self::Target {
&self.values
@@ -65,6 +63,16 @@ impl std::ops::DerefMut for Map {
}
}
impl std::iter::IntoIterator for Map {
type Item = (String, Value);
type IntoIter = std::collections::btree_map::IntoIter<String, Value>;
fn into_iter(self) -> Self::IntoIter {
self.values.into_iter()
}
}
impl std::fmt::Display for Map {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
let values = self

View File

@@ -21,7 +21,7 @@ pub use etl::processor::Processor;
pub use etl::transform::transformer::identity_pipeline;
pub use etl::transform::{GreptimeTransformer, Transformer};
pub use etl::value::{Array, Map, Value};
pub use etl::{parse, Content, Pipeline};
pub use etl::{error as etl_error, parse, Content, Pipeline, PipelineWay};
pub use manager::{
error, pipeline_operator, table, util, PipelineInfo, PipelineRef, PipelineTableRef,
PipelineVersion,

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::value::ValueData;
use api::v1::Rows;
use common_telemetry::tracing::info;
use greptime_proto::v1::value::ValueData::{
@@ -466,6 +467,57 @@ transform:
}
}
#[test]
fn test_json_type() {
let input_value_str = r#"
{
"product_object": {"hello":"world"},
"product_array": ["hello", "world"]
}
"#;
let input_value = serde_json::from_str::<serde_json::Value>(input_value_str).unwrap();
let pipeline_yaml = r#"
processors:
transform:
- fields:
- product_object
- product_array
type: json
"#;
let yaml_content = Content::Yaml(pipeline_yaml.into());
let pipeline: Pipeline<GreptimeTransformer> = parse(&yaml_content).unwrap();
let mut status = pipeline.init_intermediate_state();
pipeline.prepare(input_value, &mut status).unwrap();
let row = pipeline.exec_mut(&mut status).unwrap();
let r = row
.values
.into_iter()
.map(|v| v.value_data.unwrap())
.collect::<Vec<_>>();
let product_object = r[0].clone();
let product_array = r[1].clone();
match product_object {
ValueData::BinaryValue(data) => {
let jsonb = jsonb::from_slice(&data).unwrap().to_string();
assert_eq!(r#"{"hello":"world"}"#, jsonb);
}
_ => panic!("unexpected value"),
}
match product_array {
ValueData::BinaryValue(data) => {
let jsonb = jsonb::from_slice(&data).unwrap().to_string();
assert_eq!(r#"["hello","world"]"#, jsonb);
}
_ => panic!("unexpected value"),
}
}
#[test]
fn test_simple_data() {
let input_value_str = r#"

Some files were not shown because too many files have changed in this diff Show More