mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-06 05:12:54 +00:00
Compare commits
67 Commits
v0.5.0-nig
...
v0.4.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e083b8011c | ||
|
|
06327fba1e | ||
|
|
06da33b1ed | ||
|
|
2aa6ac5731 | ||
|
|
b28af9443b | ||
|
|
142035340d | ||
|
|
d2cf72e0f1 | ||
|
|
ae27fbc7f2 | ||
|
|
9bd10134dd | ||
|
|
3329da5b72 | ||
|
|
a24f8c96b3 | ||
|
|
a691cff0c4 | ||
|
|
f92b55c745 | ||
|
|
a9e5b902fd | ||
|
|
5b978269cc | ||
|
|
3dffc7b62c | ||
|
|
968c872d15 | ||
|
|
e2a770f8de | ||
|
|
dc46e96879 | ||
|
|
8f3b299a45 | ||
|
|
506e6887f3 | ||
|
|
1757061272 | ||
|
|
6599bb5a46 | ||
|
|
3f981ef2b3 | ||
|
|
5cff735e02 | ||
|
|
f5eede4ce1 | ||
|
|
22ee45f3df | ||
|
|
8fd0766754 | ||
|
|
af7107565a | ||
|
|
f02dc0e274 | ||
|
|
b53537e69b | ||
|
|
0cd6dacb45 | ||
|
|
a3611516a2 | ||
|
|
93f21b188d | ||
|
|
b9a7c2db7e | ||
|
|
c62ba79759 | ||
|
|
9d029f7337 | ||
|
|
f1e8afcda9 | ||
|
|
9697632888 | ||
|
|
69ee2c336c | ||
|
|
1f57c6b1f0 | ||
|
|
53a5864944 | ||
|
|
5b70881098 | ||
|
|
06d273b75a | ||
|
|
b382900c5c | ||
|
|
c79bb5a936 | ||
|
|
7e0dcfc797 | ||
|
|
51ddebdc73 | ||
|
|
e9f7579091 | ||
|
|
f387a09535 | ||
|
|
cf94d3295f | ||
|
|
0a91335e24 | ||
|
|
6fd04e38a3 | ||
|
|
bbaae9223a | ||
|
|
060864d0c1 | ||
|
|
395632c874 | ||
|
|
0dca63bc7b | ||
|
|
7323d727c9 | ||
|
|
68f92ecf08 | ||
|
|
39d52f25bf | ||
|
|
fb8d0c6ce5 | ||
|
|
ce867fb583 | ||
|
|
04a8fc5138 | ||
|
|
479ffe5a0f | ||
|
|
4b48c716b2 | ||
|
|
a9137b77f0 | ||
|
|
5f3bbdca4f |
35
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
35
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -41,13 +41,27 @@ body:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: what-happened
|
||||
id: reproduce
|
||||
attributes:
|
||||
label: What happened?
|
||||
label: Minimal reproduce step
|
||||
description: |
|
||||
Tell us what happened and also what you would have expected to
|
||||
happen instead.
|
||||
placeholder: "Describe the bug"
|
||||
Please walk us through and provide steps and details on how
|
||||
to reproduce the issue. If possible, provide scripts that we
|
||||
can run to trigger the bug.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: expected-manner
|
||||
attributes:
|
||||
label: What did you expect to see?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: actual-manner
|
||||
attributes:
|
||||
label: What did you see instead?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
@@ -72,14 +86,3 @@ body:
|
||||
trace. This will be automatically formatted into code, so no
|
||||
need for backticks.
|
||||
render: bash
|
||||
|
||||
- type: textarea
|
||||
id: reproduce
|
||||
attributes:
|
||||
label: How can we reproduce the bug?
|
||||
description: |
|
||||
Please walk us through and provide steps and details on how
|
||||
to reproduce the issue. If possible, provide scripts that we
|
||||
can run to trigger the bug.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
2
.github/workflows/license.yaml
vendored
2
.github/workflows/license.yaml
vendored
@@ -13,4 +13,4 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Check License Header
|
||||
uses: apache/skywalking-eyes/header@df70871af1a8109c9a5b1dc824faaf65246c5236
|
||||
uses: korandoru/hawkeye@v3
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
header:
|
||||
license:
|
||||
spdx-id: Apache-2.0
|
||||
copyright-owner: Greptime Team
|
||||
|
||||
paths:
|
||||
- "**/*.rs"
|
||||
- "**/*.py"
|
||||
|
||||
comment: on-failure
|
||||
|
||||
dependency:
|
||||
files:
|
||||
- Cargo.toml
|
||||
@@ -49,6 +49,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
||||
### Before PR
|
||||
|
||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
|
||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||
|
||||
373
Cargo.lock
generated
373
Cargo.lock
generated
@@ -196,7 +196,7 @@ checksum = "8f1f8f5a6f3d50d89e3797d7593a50f96bb2aaa20ca0cc7be1fb673232c91d72"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -660,7 +660,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -833,7 +833,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "benchmarks"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -857,6 +857,19 @@ dependencies = [
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bigdecimal"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c06619be423ea5bb86c95f087d5707942791a08a85530df0db2209a3ecfb8bc9"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"libm",
|
||||
"num-bigint",
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bincode"
|
||||
version = "1.3.3"
|
||||
@@ -958,47 +971,26 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "borsh"
|
||||
version = "0.10.3"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b"
|
||||
checksum = "bf617fabf5cdbdc92f774bfe5062d870f228b80056d41180797abf48bed4056e"
|
||||
dependencies = [
|
||||
"borsh-derive",
|
||||
"hashbrown 0.13.2",
|
||||
"cfg_aliases",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "borsh-derive"
|
||||
version = "0.10.3"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0754613691538d51f329cce9af41d7b7ca150bc973056f1156611489475f54f7"
|
||||
dependencies = [
|
||||
"borsh-derive-internal",
|
||||
"borsh-schema-derive-internal",
|
||||
"proc-macro-crate 0.1.5",
|
||||
"proc-macro2",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "borsh-derive-internal"
|
||||
version = "0.10.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb"
|
||||
checksum = "f404657a7ea7b5249e36808dff544bc88a28f26e0ac40009f674b7a009d14be3"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"proc-macro-crate 2.0.0",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "borsh-schema-derive-internal"
|
||||
version = "0.10.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
"syn 2.0.38",
|
||||
"syn_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1175,7 +1167,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1250,6 +1242,12 @@ version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "cfg_aliases"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
|
||||
|
||||
[[package]]
|
||||
name = "cfgrammar"
|
||||
version = "0.12.0"
|
||||
@@ -1457,7 +1455,7 @@ checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1"
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -1490,7 +1488,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu",
|
||||
"substrait 0.17.1",
|
||||
"substrait 0.4.2",
|
||||
"substrait 0.4.3",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tonic 0.10.2",
|
||||
@@ -1520,7 +1518,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"async-trait",
|
||||
@@ -1568,7 +1566,7 @@ dependencies = [
|
||||
"servers",
|
||||
"session",
|
||||
"snafu",
|
||||
"substrait 0.4.2",
|
||||
"substrait 0.4.3",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tikv-jemallocator",
|
||||
@@ -1601,7 +1599,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"bitvec",
|
||||
@@ -1616,20 +1614,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"humantime-serde",
|
||||
@@ -1638,7 +1634,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
@@ -1665,9 +1661,23 @@ dependencies = [
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-decimal"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"bigdecimal 0.4.2",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"rust_decimal",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"snafu",
|
||||
"strum 0.25.0",
|
||||
@@ -1675,7 +1685,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"chrono-tz 0.6.3",
|
||||
@@ -1698,7 +1708,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -1717,7 +1727,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -1747,7 +1757,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1766,12 +1776,10 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"backtrace",
|
||||
"common-query",
|
||||
"common-telemetry",
|
||||
"datatypes",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -1783,7 +1791,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -1796,10 +1804,10 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
"async-recursion",
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
"base64 0.21.5",
|
||||
@@ -1830,11 +1838,12 @@ dependencies = [
|
||||
"strum 0.25.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.10.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -1858,7 +1867,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -1866,7 +1875,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1881,7 +1890,7 @@ dependencies = [
|
||||
"datatypes",
|
||||
"serde",
|
||||
"snafu",
|
||||
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=602d7878c9949e48512251c7f18695a50936e51c)",
|
||||
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd)",
|
||||
"sqlparser_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"statrs",
|
||||
"tokio",
|
||||
@@ -1889,7 +1898,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -1906,7 +1915,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -1924,7 +1933,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"common-error",
|
||||
@@ -1941,7 +1950,6 @@ dependencies = [
|
||||
"tokio",
|
||||
"tracing",
|
||||
"tracing-appender",
|
||||
"tracing-bunyan-formatter",
|
||||
"tracing-futures",
|
||||
"tracing-log",
|
||||
"tracing-opentelemetry",
|
||||
@@ -1950,7 +1958,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"rand",
|
||||
@@ -1959,7 +1967,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -1974,7 +1982,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
]
|
||||
@@ -2633,7 +2641,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -2693,7 +2701,7 @@ dependencies = [
|
||||
"sql",
|
||||
"storage",
|
||||
"store-api",
|
||||
"substrait 0.4.2",
|
||||
"substrait 0.4.3",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -2707,12 +2715,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
"arrow-schema",
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"common-telemetry",
|
||||
@@ -2723,6 +2732,7 @@ dependencies = [
|
||||
"num-traits",
|
||||
"ordered-float 3.9.2",
|
||||
"paste",
|
||||
"rust_decimal",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu",
|
||||
@@ -3066,16 +3076,16 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "etcd-client"
|
||||
version = "0.11.1"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f4b0ea5ef6dc2388a4b1669fa32097249bc03a15417b97cb75e38afb309e4a89"
|
||||
checksum = "3d982a3b3088a5f95d19882d298b352a2e0be20703e3080c1e6767731d5dec79"
|
||||
dependencies = [
|
||||
"http",
|
||||
"prost 0.11.9",
|
||||
"prost 0.12.1",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tonic 0.9.2",
|
||||
"tonic-build 0.9.2",
|
||||
"tonic 0.10.2",
|
||||
"tonic-build 0.10.2",
|
||||
"tower",
|
||||
"tower-service",
|
||||
]
|
||||
@@ -3143,7 +3153,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -3259,7 +3269,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -3320,11 +3330,11 @@ dependencies = [
|
||||
"session",
|
||||
"snafu",
|
||||
"sql",
|
||||
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=602d7878c9949e48512251c7f18695a50936e51c)",
|
||||
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd)",
|
||||
"storage",
|
||||
"store-api",
|
||||
"strfmt",
|
||||
"substrait 0.4.2",
|
||||
"substrait 0.4.3",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.7.8",
|
||||
@@ -3589,7 +3599,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=5da72f1cae6b24315e5afc87520aaf7b4d6bb872#5da72f1cae6b24315e5afc87520aaf7b4d6bb872"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=7eb2e78be7a104d2582fbea0bcb1e019407da702#7eb2e78be7a104d2582fbea0bcb1e019407da702"
|
||||
dependencies = [
|
||||
"prost 0.12.1",
|
||||
"serde",
|
||||
@@ -4383,11 +4393,10 @@ checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
"base64 0.13.1",
|
||||
"byteorder",
|
||||
"bytes",
|
||||
"common-base",
|
||||
@@ -4616,6 +4625,15 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memmap2"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "43a5a03cefb0d953ec0be133036f14e109412fa594edc2f77227249db66cc3ed"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memoffset"
|
||||
version = "0.6.5"
|
||||
@@ -4645,7 +4663,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -4675,7 +4693,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"api",
|
||||
@@ -4704,6 +4722,7 @@ dependencies = [
|
||||
"futures",
|
||||
"h2",
|
||||
"http-body",
|
||||
"humantime-serde",
|
||||
"lazy_static",
|
||||
"once_cell",
|
||||
"parking_lot 0.12.1",
|
||||
@@ -4749,6 +4768,32 @@ dependencies = [
|
||||
"meter-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "metric-engine"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
"base64 0.21.5",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-telemetry",
|
||||
"common-test-util",
|
||||
"common-time",
|
||||
"datafusion",
|
||||
"datatypes",
|
||||
"lazy_static",
|
||||
"mito2",
|
||||
"object-store",
|
||||
"prometheus",
|
||||
"serde_json",
|
||||
"snafu",
|
||||
"store-api",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mime"
|
||||
version = "0.3.17"
|
||||
@@ -4794,7 +4839,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"api",
|
||||
@@ -4809,6 +4854,7 @@ dependencies = [
|
||||
"common-base",
|
||||
"common-catalog",
|
||||
"common-datasource",
|
||||
"common-decimal",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"common-procedure",
|
||||
@@ -4830,6 +4876,7 @@ dependencies = [
|
||||
"log-store",
|
||||
"memcomparable",
|
||||
"moka",
|
||||
"num_cpus",
|
||||
"object-store",
|
||||
"parquet",
|
||||
"paste",
|
||||
@@ -4948,7 +4995,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "57349d5a326b437989b6ee4dc8f2f34b0cc131202748414712a8e7d98952fc8c"
|
||||
dependencies = [
|
||||
"base64 0.21.5",
|
||||
"bigdecimal",
|
||||
"bigdecimal 0.3.1",
|
||||
"bindgen",
|
||||
"bitflags 2.4.1",
|
||||
"bitvec",
|
||||
@@ -5247,7 +5294,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
@@ -5461,7 +5508,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-compat",
|
||||
@@ -5504,10 +5551,10 @@ dependencies = [
|
||||
"session",
|
||||
"snafu",
|
||||
"sql",
|
||||
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=602d7878c9949e48512251c7f18695a50936e51c)",
|
||||
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd)",
|
||||
"storage",
|
||||
"store-api",
|
||||
"substrait 0.4.2",
|
||||
"substrait 0.4.3",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.10.2",
|
||||
@@ -5736,7 +5783,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6065,7 +6112,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"common-base",
|
||||
@@ -6130,9 +6177,9 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
|
||||
|
||||
[[package]]
|
||||
name = "pprof"
|
||||
version = "0.11.1"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "196ded5d4be535690899a4631cc9f18cdc41b7ebf24a79400f46f48e49a11059"
|
||||
checksum = "ef5c97c51bd34c7e742402e216abdeb44d415fbe6ae41d56b114723e953711cb"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"cfg-if 1.0.0",
|
||||
@@ -6143,9 +6190,9 @@ dependencies = [
|
||||
"nix 0.26.4",
|
||||
"once_cell",
|
||||
"parking_lot 0.12.1",
|
||||
"prost 0.11.9",
|
||||
"prost-build 0.11.9",
|
||||
"prost-derive 0.11.9",
|
||||
"prost 0.12.1",
|
||||
"prost-build 0.12.1",
|
||||
"prost-derive 0.12.1",
|
||||
"protobuf",
|
||||
"sha2",
|
||||
"smallvec",
|
||||
@@ -6206,15 +6253,6 @@ dependencies = [
|
||||
"indexmap 1.9.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-crate"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785"
|
||||
dependencies = [
|
||||
"toml 0.5.11",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-crate"
|
||||
version = "1.3.1"
|
||||
@@ -6222,7 +6260,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"toml_edit",
|
||||
"toml_edit 0.19.15",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-crate"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8"
|
||||
dependencies = [
|
||||
"toml_edit 0.20.7",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6302,7 +6349,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"async-recursion",
|
||||
"async-trait",
|
||||
@@ -6509,6 +6556,18 @@ dependencies = [
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "puffin"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"derive_builder 0.12.0",
|
||||
"futures",
|
||||
"pin-project",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pulldown-cmark"
|
||||
version = "0.9.3"
|
||||
@@ -6604,7 +6663,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"ahash 0.8.6",
|
||||
"api",
|
||||
@@ -6662,7 +6721,7 @@ dependencies = [
|
||||
"stats-cli",
|
||||
"store-api",
|
||||
"streaming-stats",
|
||||
"substrait 0.4.2",
|
||||
"substrait 0.4.3",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -7294,9 +7353,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rust_decimal"
|
||||
version = "1.32.0"
|
||||
version = "1.33.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a4c4216490d5a413bc6d10fa4742bd7d4955941d062c0ef873141d6b0e7b30fd"
|
||||
checksum = "076ba1058b036d3ca8bcafb1d54d0b0572e99d7ecd3e4222723e18ca8e9ca9a8"
|
||||
dependencies = [
|
||||
"arrayvec",
|
||||
"borsh",
|
||||
@@ -7592,7 +7651,7 @@ dependencies = [
|
||||
"mac_address",
|
||||
"md-5",
|
||||
"memchr",
|
||||
"memmap2",
|
||||
"memmap2 0.5.10",
|
||||
"mt19937",
|
||||
"nix 0.26.4",
|
||||
"num-bigint",
|
||||
@@ -7881,7 +7940,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "script"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -8152,7 +8211,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"aide",
|
||||
"api",
|
||||
@@ -8162,7 +8221,7 @@ dependencies = [
|
||||
"axum",
|
||||
"axum-macros",
|
||||
"axum-test-helper",
|
||||
"base64 0.13.1",
|
||||
"base64 0.21.5",
|
||||
"build-data",
|
||||
"bytes",
|
||||
"catalog",
|
||||
@@ -8200,7 +8259,6 @@ dependencies = [
|
||||
"lazy_static",
|
||||
"mime_guess",
|
||||
"mysql_async",
|
||||
"num_cpus",
|
||||
"once_cell",
|
||||
"openmetrics-parser",
|
||||
"opensrv-mysql",
|
||||
@@ -8246,7 +8304,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -8419,7 +8477,6 @@ version = "0.7.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e4de37ad025c587a29e8f3f5605c00f70b98715ef90b9061a815b9e59e9042d6"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"doc-comment",
|
||||
"snafu-derive",
|
||||
]
|
||||
@@ -8508,12 +8565,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
"common-catalog",
|
||||
"common-datasource",
|
||||
"common-decimal",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"common-query",
|
||||
@@ -8526,7 +8584,7 @@ dependencies = [
|
||||
"once_cell",
|
||||
"regex",
|
||||
"snafu",
|
||||
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=602d7878c9949e48512251c7f18695a50936e51c)",
|
||||
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd)",
|
||||
"sqlparser_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"table",
|
||||
]
|
||||
@@ -8559,7 +8617,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.4.7",
|
||||
@@ -8589,13 +8647,13 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sqlparser"
|
||||
version = "0.38.0"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=602d7878c9949e48512251c7f18695a50936e51c#602d7878c9949e48512251c7f18695a50936e51c"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd#0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"log",
|
||||
"regex",
|
||||
"sqlparser 0.38.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"sqlparser_derive 0.1.1 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=602d7878c9949e48512251c7f18695a50936e51c)",
|
||||
"sqlparser_derive 0.1.1 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -8612,7 +8670,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sqlparser_derive"
|
||||
version = "0.1.1"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=602d7878c9949e48512251c7f18695a50936e51c#602d7878c9949e48512251c7f18695a50936e51c"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd#0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -8765,7 +8823,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "storage"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -8819,7 +8877,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -8958,7 +9016,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"async-recursion",
|
||||
"async-trait",
|
||||
@@ -9013,21 +9071,21 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
|
||||
|
||||
[[package]]
|
||||
name = "symbolic-common"
|
||||
version = "10.2.1"
|
||||
version = "12.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1b55cdc318ede251d0957f07afe5fed912119b8c1bc5a7804151826db999e737"
|
||||
checksum = "405af7bd5edd866cef462e22ef73f11cf9bf506c9d62824fef8364eb69d4d4ad"
|
||||
dependencies = [
|
||||
"debugid",
|
||||
"memmap2",
|
||||
"memmap2 0.8.0",
|
||||
"stable_deref_trait",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "symbolic-demangle"
|
||||
version = "10.2.1"
|
||||
version = "12.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "79be897be8a483a81fff6a3a4e195b4ac838ef73ca42d348b3f722da9902e489"
|
||||
checksum = "2bcd041ccfb77d9c70639efcd5b804b508ac7a273e9224d227379e225625daf9"
|
||||
dependencies = [
|
||||
"cpp_demangle",
|
||||
"rustc-demangle",
|
||||
@@ -9065,6 +9123,18 @@ dependencies = [
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "syn_derive"
|
||||
version = "0.1.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b"
|
||||
dependencies = [
|
||||
"proc-macro-error",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.38",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sync_wrapper"
|
||||
version = "0.1.2"
|
||||
@@ -9094,7 +9164,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"async-trait",
|
||||
@@ -9200,7 +9270,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -9210,6 +9280,7 @@ dependencies = [
|
||||
"catalog",
|
||||
"chrono",
|
||||
"client",
|
||||
"cmd",
|
||||
"common-base",
|
||||
"common-catalog",
|
||||
"common-config",
|
||||
@@ -9232,6 +9303,7 @@ dependencies = [
|
||||
"itertools 0.10.5",
|
||||
"meta-client",
|
||||
"meta-srv",
|
||||
"num_cpus",
|
||||
"object-store",
|
||||
"once_cell",
|
||||
"opentelemetry-proto",
|
||||
@@ -9253,7 +9325,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.4.2",
|
||||
"substrait 0.4.3",
|
||||
"table",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
@@ -9616,7 +9688,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
"toml_edit",
|
||||
"toml_edit 0.19.15",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -9641,6 +9713,17 @@ dependencies = [
|
||||
"winnow",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_edit"
|
||||
version = "0.20.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81"
|
||||
dependencies = [
|
||||
"indexmap 2.0.2",
|
||||
"toml_datetime",
|
||||
"winnow",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tonic"
|
||||
version = "0.9.2"
|
||||
@@ -9835,24 +9918,6 @@ dependencies = [
|
||||
"syn 2.0.38",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-bunyan-formatter"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b5c266b9ac83dedf0e0385ad78514949e6d89491269e7065bee51d2bb8ec7373"
|
||||
dependencies = [
|
||||
"ahash 0.8.6",
|
||||
"gethostname",
|
||||
"log",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"time",
|
||||
"tracing",
|
||||
"tracing-core",
|
||||
"tracing-log",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-core"
|
||||
version = "0.1.32"
|
||||
|
||||
19
Cargo.toml
19
Cargo.toml
@@ -27,6 +27,7 @@ members = [
|
||||
"src/common/telemetry",
|
||||
"src/common/test-util",
|
||||
"src/common/time",
|
||||
"src/common/decimal",
|
||||
"src/common/version",
|
||||
"src/datanode",
|
||||
"src/datatypes",
|
||||
@@ -35,12 +36,14 @@ members = [
|
||||
"src/log-store",
|
||||
"src/meta-client",
|
||||
"src/meta-srv",
|
||||
"src/metric-engine",
|
||||
"src/mito2",
|
||||
"src/object-store",
|
||||
"src/operator",
|
||||
"src/partition",
|
||||
"src/plugins",
|
||||
"src/promql",
|
||||
"src/puffin",
|
||||
"src/query",
|
||||
"src/script",
|
||||
"src/servers",
|
||||
@@ -55,7 +58,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.4.2"
|
||||
version = "0.4.3"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -67,6 +70,8 @@ arrow-flight = "47.0"
|
||||
arrow-schema = { version = "47.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
base64 = "0.21"
|
||||
bigdecimal = "0.4.2"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
@@ -76,10 +81,10 @@ datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.g
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
derive_builder = "0.12"
|
||||
etcd-client = "0.11"
|
||||
etcd-client = "0.12"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "5da72f1cae6b24315e5afc87520aaf7b4d6bb872" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "7eb2e78be7a104d2582fbea0bcb1e019407da702" }
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
@@ -93,6 +98,7 @@ opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.gi
|
||||
] }
|
||||
parquet = "47.0"
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
prost = "0.12"
|
||||
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
|
||||
@@ -103,12 +109,13 @@ reqwest = { version = "0.11", default-features = false, features = [
|
||||
"rustls-tls-native-roots",
|
||||
"stream",
|
||||
] }
|
||||
rust_decimal = "1.33"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
smallvec = "1"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
snafu = "0.7"
|
||||
# on branch v0.38.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "602d7878c9949e48512251c7f18695a50936e51c", features = [
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd", features = [
|
||||
"visitor",
|
||||
] }
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
@@ -118,6 +125,7 @@ tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.7"
|
||||
tonic = { version = "0.10", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
|
||||
## workspaces members
|
||||
api = { path = "src/api" }
|
||||
auth = { path = "src/auth" }
|
||||
@@ -128,6 +136,7 @@ common-base = { path = "src/common/base" }
|
||||
common-catalog = { path = "src/common/catalog" }
|
||||
common-config = { path = "src/common/config" }
|
||||
common-datasource = { path = "src/common/datasource" }
|
||||
common-decimal = { path = "src/common/decimal" }
|
||||
common-error = { path = "src/common/error" }
|
||||
common-function = { path = "src/common/function" }
|
||||
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -186,7 +186,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2022 Greptime Team
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
4
Makefile
4
Makefile
@@ -157,11 +157,11 @@ sqlness-test: ## Run sqlness test.
|
||||
|
||||
.PHONY: check
|
||||
check: ## Cargo check all the targets.
|
||||
cargo check --workspace --all-targets
|
||||
cargo check --workspace --all-targets --all-features
|
||||
|
||||
.PHONY: clippy
|
||||
clippy: ## Check clippy rules.
|
||||
cargo clippy --workspace --all-targets -F pyo3_backend -- -D warnings
|
||||
cargo clippy --workspace --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check: ## Check code format.
|
||||
|
||||
10
README.md
10
README.md
@@ -27,14 +27,6 @@
|
||||
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
|
||||
</p>
|
||||
|
||||
## Upcoming Event
|
||||
Come and meet us in **KubeCon + CloudNativeCon North America 2023!**
|
||||
<p align="center">
|
||||
<picture>
|
||||
<img alt="KubeCon + CloudNativeCon North Logo" src="./docs/banner/KCCNC_NA_2023_1000x200_Email Banner.png" width="800px">
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
## What is GreptimeDB
|
||||
|
||||
GreptimeDB is an open-source time-series database with a special focus on
|
||||
@@ -117,7 +109,7 @@ To write and query data, GreptimeDB is compatible with multiple [protocols and c
|
||||
### Installation
|
||||
|
||||
- [Pre-built Binaries](https://greptime.com/download):
|
||||
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
||||
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
||||
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
|
||||
We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
||||
|
||||
@@ -8,7 +8,7 @@ license.workspace = true
|
||||
arrow.workspace = true
|
||||
chrono.workspace = true
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
client = { workspace = true }
|
||||
client.workspace = true
|
||||
futures-util.workspace = true
|
||||
indicatif = "0.17.1"
|
||||
itertools.workspace = true
|
||||
|
||||
@@ -105,6 +105,9 @@ global_write_buffer_reject_size = "2GB"
|
||||
sst_meta_cache_size = "128MB"
|
||||
# Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
|
||||
vector_cache_size = "512MB"
|
||||
# Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
|
||||
# Log options
|
||||
# [logging]
|
||||
|
||||
@@ -28,6 +28,13 @@ max_retry_times = 12
|
||||
# Initial retry delay of procedures, increases exponentially
|
||||
retry_delay = "500ms"
|
||||
|
||||
# Failure detectors options.
|
||||
[failure_detector]
|
||||
threshold = 8.0
|
||||
min_std_deviation = "100ms"
|
||||
acceptable_heartbeat_pause = "3000ms"
|
||||
first_heartbeat_estimate = "1000ms"
|
||||
|
||||
# # Datanode options.
|
||||
# [datanode]
|
||||
# # Datanode client options.
|
||||
|
||||
@@ -50,10 +50,10 @@ The concept "Table" in GreptimeDB is a bit "heavy" compared to other time-series
|
||||
```
|
||||
|
||||
The following parts will describe these implementation details:
|
||||
- How to route these metric region tables and how those table are distributed
|
||||
- How to maintain the schema and other metadata of the underlying mito engine table
|
||||
- How to maintain the schema of metric engine table
|
||||
- How the query goes
|
||||
- How to route these metric region tables and how those table are distributed
|
||||
- How to maintain the schema and other metadata of the underlying mito engine table
|
||||
- How to maintain the schema of metric engine table
|
||||
- How the query goes
|
||||
|
||||
## Routing
|
||||
|
||||
|
||||
113
docs/rfcs/2023-11-03-inverted-index.md
Normal file
113
docs/rfcs/2023-11-03-inverted-index.md
Normal file
@@ -0,0 +1,113 @@
|
||||
---
|
||||
Feature Name: Inverted Index for SST File
|
||||
Tracking Issue: TBD
|
||||
Date: 2023-11-03
|
||||
Author: "Zhong Zhenchi <zhongzc_arch@outlook.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
This RFC proposes an optimization towards the storage engine by introducing an inverted indexing methodology aimed at optimizing label selection queries specifically pertaining to Metrics with tag columns as the target for optimization.
|
||||
|
||||
# Introduction
|
||||
In the current system setup, in the Mito Engine, the first column of Primary Keys has a Min-Max index, which significantly optimizes the outcome. However, there are limitations when it comes to other columns, primarily tags. This RFC suggests the implementation of an inverted index to provide enhanced filtering benefits to bridge these limitations and improve overall system performance.
|
||||
|
||||
# Design Detail
|
||||
|
||||
## Inverted Index
|
||||
|
||||
The primary aim of the proposed inverted index is to optimize tag columns in the SST Parquet Files within the Mito Engine. The mapping and construction of an inverted index, from Tag Values to Row Groups, enables efficient logical structures that provide faster and more flexible queries.
|
||||
|
||||
When scanning SST Files, pushed-down filters applied to a respective Tag's inverted index, determine the final Row Groups to be indexed and scanned, further bolstering the speed and efficiency of data retrieval processes.
|
||||
|
||||
## Index Format
|
||||
|
||||
The Inverted Index for each SST file follows the format shown below:
|
||||
|
||||
```
|
||||
inverted_index₀ inverted_index₁ ... inverted_indexₙ footer
|
||||
```
|
||||
|
||||
The structure inside each Inverted Index is as followed:
|
||||
|
||||
```
|
||||
bitmap₀ bitmap₁ bitmap₂ ... bitmapₙ null_bitmap fst
|
||||
```
|
||||
|
||||
The format is encapsulated by a footer:
|
||||
|
||||
```
|
||||
footer_payload footer_payload_size
|
||||
```
|
||||
|
||||
The `footer_payload` is presented in protobuf encoding of `InvertedIndexFooter`.
|
||||
|
||||
The complete format is containerized in [Puffin](https://iceberg.apache.org/puffin-spec/) with the type defined as `greptime-inverted-index-v1`.
|
||||
|
||||
## Protobuf Details
|
||||
|
||||
The `InvertedIndexFooter` is defined in the following protobuf structure:
|
||||
|
||||
```protobuf
|
||||
message InvertedIndexFooter {
|
||||
repeated InvertedIndexMeta metas;
|
||||
}
|
||||
|
||||
message InvertedIndexMeta {
|
||||
string name;
|
||||
uint64 row_count_in_group;
|
||||
uint64 fst_offset;
|
||||
uint64 fst_size;
|
||||
uint64 null_bitmap_offset;
|
||||
uint64 null_bitmap_size;
|
||||
InvertedIndexStats stats;
|
||||
}
|
||||
|
||||
message InvertedIndexStats {
|
||||
uint64 null_count;
|
||||
uint64 distinct_count;
|
||||
bytes min_value;
|
||||
bytes max_value;
|
||||
}
|
||||
```
|
||||
|
||||
## Bitmap
|
||||
|
||||
Bitmaps are used to represent indices of fixed-size groups. Rows are divided into groups of a fixed size, defined in the `InvertedIndexMeta` as `row_count_in_group`.
|
||||
|
||||
For example, when `row_count_in_group` is `4096`, it means each group has `4096` rows. If there are a total of `10000` rows, there will be `3` groups in total. The first two groups will have `4096` rows each, and the last group will have `1808` rows. If the indexed values are found in row `200` and `9000`, they will correspond to groups `0` and `2`, respectively. Therefore, the bitmap should show `0` and `2`.
|
||||
|
||||
Bitmap is implemented using [BitVec](https://docs.rs/bitvec/latest/bitvec/), selected due to its efficient representation of dense data arrays typical of indices of groups.
|
||||
|
||||
|
||||
## Finite State Transducer (FST)
|
||||
|
||||
[FST](https://docs.rs/fst/latest/fst/) is a highly efficient data structure ideal for in-memory indexing. It represents ordered sets or maps where the keys are bytes. The choice of the FST effectively balances the need for performance, space efficiency, and the ability to perform complex analyses such as regular expression matching.
|
||||
|
||||
The conventional usage of FST and `u64` values has been adapted to facilitate indirect indexing to row groups. As the row groups are represented as Bitmaps, we utilize the `u64` values split into bitmap's offset (higher 32 bits) and size (lower 32 bits) to represent the location of these Bitmaps.
|
||||
|
||||
## API Design
|
||||
|
||||
Two APIs `InvertedIndexBuilder` for building indexes and `InvertedIndexSearcher` for querying indexes are designed:
|
||||
|
||||
```rust
|
||||
type Bytes = Vec<u8>;
|
||||
type GroupId = u64;
|
||||
|
||||
trait InvertedIndexBuilder {
|
||||
fn add(&mut self, name: &str, value: Option<&Bytes>, group_id: GroupId) -> Result<()>;
|
||||
fn finish(&mut self) -> Result<()>;
|
||||
}
|
||||
|
||||
enum Predicate {
|
||||
Gt(Bytes),
|
||||
GtEq(Bytes),
|
||||
Lt(Bytes),
|
||||
LtEq(Bytes),
|
||||
InList(Vec<Bytes>),
|
||||
RegexMatch(String),
|
||||
}
|
||||
|
||||
trait InvertedIndexSearcher {
|
||||
fn search(&mut self, name: &str, predicates: &[Predicate]) -> Result<impl IntoIterator<GroupId>>;
|
||||
}
|
||||
```
|
||||
169
docs/rfcs/2023-11-07-region-migration.md
Normal file
169
docs/rfcs/2023-11-07-region-migration.md
Normal file
@@ -0,0 +1,169 @@
|
||||
---
|
||||
Feature Name: Region Migration Procedure
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/2700
|
||||
Date: 2023-11-03
|
||||
Author: "Xu Wenkang <wenymedia@gmail.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
This RFC proposes a way that brings the ability of Meta Server to move regions between the Datanodes.
|
||||
|
||||
# Motivation
|
||||
Typically, We need this ability in the following scenarios:
|
||||
- Migrate hot-spot Regions to idle Datanode
|
||||
- Move the failure Regions to an available Datanode
|
||||
|
||||
# Details
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
style Start fill:#85CB90,color:#fff
|
||||
style End fill:#85CB90,color:#fff
|
||||
style SelectCandidate fill:#F38488,color:#fff
|
||||
style OpenCandidate fill:#F38488,color:#fff
|
||||
style UpdateMetadataDown fill:#F38488,color:#fff
|
||||
style UpdateMetadataUp fill:#F38488,color:#fff
|
||||
style UpdateMetadataRollback fill:#F38488,color:#fff
|
||||
style DowngradeLeader fill:#F38488,color:#fff
|
||||
style UpgradeCandidate fill:#F38488,color:#fff
|
||||
|
||||
Start[Start]
|
||||
SelectCandidate[Select Candidate]
|
||||
UpdateMetadataDown["`Update Metadata(Down)
|
||||
1. Downgrade Leader
|
||||
`"]
|
||||
DowngradeLeader["`Downgrade Leader
|
||||
1. Become Follower
|
||||
2. Return **last_entry_id**
|
||||
`"]
|
||||
UpgradeCandidate["`Upgrade Candidate
|
||||
1. Replay to **last_entry_id**
|
||||
2. Become Leader
|
||||
`"]
|
||||
UpdateMetadataUp["`Update Metadata(Up)
|
||||
1. Switch Leader
|
||||
2.1. Remove Old Leader(Opt.)
|
||||
2.2. Move Old Leader to Follower(Opt.)
|
||||
`"]
|
||||
UpdateMetadataRollback["`Update Metadata(Rollback)
|
||||
1. Upgrade old Leader
|
||||
`"]
|
||||
End
|
||||
AnyCandidate{Available?}
|
||||
OpenCandidate["Open Candidate"]
|
||||
CloseOldLeader["Close Old Leader"]
|
||||
|
||||
Start
|
||||
--> SelectCandidate
|
||||
--> AnyCandidate
|
||||
--> |Yes| UpdateMetadataDown
|
||||
--> I1["Invalid Frontend Cache"]
|
||||
--> DowngradeLeader
|
||||
--> UpgradeCandidate
|
||||
--> UpdateMetadataUp
|
||||
--> I2["Invalid Frontend Cache"]
|
||||
--> End
|
||||
|
||||
UpgradeCandidate
|
||||
--> UpdateMetadataRollback
|
||||
--> I3["Invalid Frontend Cache"]
|
||||
--> End
|
||||
|
||||
I2
|
||||
--> CloseOldLeader
|
||||
--> End
|
||||
|
||||
AnyCandidate
|
||||
--> |No| OpenCandidate
|
||||
--> UpdateMetadataDown
|
||||
```
|
||||
|
||||
**Only the red nodes will persist state after it has succeeded**, and other nodes won't persist state. (excluding the Start and End nodes).
|
||||
|
||||
## Steps
|
||||
|
||||
**The persistent context:** It's shared in each step and available after recovering. It will only be updated/stored after the Red node has succeeded.
|
||||
|
||||
Values:
|
||||
- `region_id`: The target leader region.
|
||||
- `peer`: The target datanode.
|
||||
- `close_old_leader`: Indicates whether close the region.
|
||||
- `leader_may_unreachable`: It's used to support the failover procedure.
|
||||
|
||||
**The Volatile context:** It's shared in each step and available in executing (including retrying). It will be dropped if the procedure runner crashes.
|
||||
|
||||
### Select Candidate
|
||||
|
||||
The Persistent state: Selected Candidate Region.
|
||||
|
||||
### Update Metadata(Down)
|
||||
|
||||
**The Persistent context:**
|
||||
- The (latest/updated) `version` of `TableRouteValue`, It will be used in the step of `Update Metadata(Up)`.
|
||||
|
||||
### Downgrade Leader
|
||||
This step sends an instruction via heartbeat and performs:
|
||||
1. Downgrades leader region.
|
||||
2. Retrieves the `last_entry_id` (if available).
|
||||
|
||||
If the target leader region is not found:
|
||||
- Sets `close_old_leader` to true.
|
||||
- Sets `leader_may_unreachable` to true.
|
||||
|
||||
If the target Datanode is unreachable:
|
||||
- Waits for region lease expired.
|
||||
- Sets `close_old_leader` to true.
|
||||
- Sets `leader_may_unreachable` to true.
|
||||
|
||||
**The Persistent context:**
|
||||
None
|
||||
|
||||
**The Persistent state:**
|
||||
- `last_entry_id`
|
||||
|
||||
*Passes to next step.
|
||||
|
||||
|
||||
### Upgrade Candidate
|
||||
This step sends an instruction via heartbeat and performs:
|
||||
1. Replays the WAL to latest(`last_entry_id`).
|
||||
2. Upgrades the candidate region.
|
||||
|
||||
If the target region is not found:
|
||||
- Rollbacks.
|
||||
- Notifies the failover detector if `leader_may_unreachable` == true.
|
||||
- Exits procedure.
|
||||
|
||||
If the target Datanode is unreachable:
|
||||
- Rollbacks.
|
||||
- Notifies the failover detector if `leader_may_unreachable` == true.
|
||||
- Exits procedure.
|
||||
|
||||
**The Persistent context:**
|
||||
None
|
||||
|
||||
### Update Metadata(Up)
|
||||
This step performs
|
||||
1. Switches Leader.
|
||||
2. Removes Old Leader(Opt.).
|
||||
3. Moves Old Leader to follower(Opt.).
|
||||
|
||||
The `TableRouteValue` version should equal the `TableRouteValue`'s `version` in Persistent context. Otherwise, verifies whether `TableRouteValue` already updated.
|
||||
|
||||
**The Persistent context:**
|
||||
None
|
||||
|
||||
### Close Old Leader(Opt.)
|
||||
This step sends a close region instruction via heartbeat.
|
||||
|
||||
If the target leader region is not found:
|
||||
- Ignore.
|
||||
|
||||
If the target Datanode is unreachable:
|
||||
- Ignore.
|
||||
|
||||
### Open Candidate(Opt.)
|
||||
This step sends an open region instruction via heartbeat and waits for conditions to be met (typically, the condition is that the `last_entry_id` of the Candidate Region is very close to that of the Leader Region or the latest).
|
||||
|
||||
If the target Datanode is unreachable:
|
||||
- Exits procedure.
|
||||
24
licenserc.toml
Normal file
24
licenserc.toml
Normal file
@@ -0,0 +1,24 @@
|
||||
# Copyright 2023 Greptime Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
headerPath = "Apache-2.0.txt"
|
||||
|
||||
includes = [
|
||||
"*.rs",
|
||||
"*.py",
|
||||
]
|
||||
|
||||
[properties]
|
||||
inceptionYear = 2023
|
||||
copyrightOwner = "Greptime Team"
|
||||
@@ -5,14 +5,14 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-base = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
datatypes = { workspace = true }
|
||||
common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-time.workspace = true
|
||||
datatypes.workspace = true
|
||||
greptime-proto.workspace = true
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
snafu.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
|
||||
@@ -158,7 +158,8 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
},
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_) => {
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Decimal128(_) => {
|
||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
||||
}
|
||||
});
|
||||
@@ -341,7 +342,7 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
||||
TimeUnit::Microsecond => values.duration_microsecond_values.push(val.value()),
|
||||
TimeUnit::Nanosecond => values.duration_nanosecond_values.push(val.value()),
|
||||
},
|
||||
Value::List(_) => unreachable!(),
|
||||
Value::List(_) | Value::Decimal128(_) => unreachable!(),
|
||||
});
|
||||
column.null_mask = null_mask.into_vec();
|
||||
}
|
||||
@@ -522,7 +523,10 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
||||
values.duration_nanosecond_values,
|
||||
)),
|
||||
},
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Decimal128(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
@@ -692,7 +696,10 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
||||
.into_iter()
|
||||
.map(|v| Value::Duration(Duration::new_nanosecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Decimal128(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
@@ -816,7 +823,7 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
|
||||
value_data: Some(ValueData::DurationNanosecondValue(v.value())),
|
||||
},
|
||||
},
|
||||
Value::List(_) => return None,
|
||||
Value::List(_) | Value::Decimal128(_) => return None,
|
||||
};
|
||||
|
||||
Some(proto_value)
|
||||
@@ -908,9 +915,10 @@ pub fn to_column_data_type(data_type: &ConcreteDataType) -> Option<ColumnDataTyp
|
||||
ColumnDataType::IntervalMonthDayNano
|
||||
}
|
||||
ConcreteDataType::Interval(IntervalType::DayTime(_)) => ColumnDataType::IntervalDayTime,
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
return None
|
||||
}
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Decimal128(_) => return None,
|
||||
};
|
||||
|
||||
Some(column_data_type)
|
||||
@@ -974,7 +982,7 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
TimeUnit::Microsecond => ValueData::DurationMicrosecondValue(v.value()),
|
||||
TimeUnit::Nanosecond => ValueData::DurationNanosecondValue(v.value()),
|
||||
}),
|
||||
Value::List(_) => unreachable!(),
|
||||
Value::List(_) | Value::Decimal128(_) => unreachable!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,13 +4,14 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use secrecy::ExposeSecret;
|
||||
|
||||
use crate::error::{
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -8,28 +8,28 @@ license.workspace = true
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
api = { workspace = true }
|
||||
api.workspace = true
|
||||
arc-swap = "1.0"
|
||||
arrow-schema.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait = "0.1"
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-grpc = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-meta = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-grpc.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
dashmap = "5.4"
|
||||
datafusion.workspace = true
|
||||
datatypes = { workspace = true }
|
||||
datatypes.workspace = true
|
||||
futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
lazy_static.workspace = true
|
||||
meta-client = { workspace = true }
|
||||
meta-client.workspace = true
|
||||
moka = { workspace = true, features = ["future"] }
|
||||
parking_lot = "0.12"
|
||||
partition.workspace = true
|
||||
@@ -37,17 +37,17 @@ prometheus.workspace = true
|
||||
regex.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
session = { workspace = true }
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
store-api = { workspace = true }
|
||||
table = { workspace = true }
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
store-api.workspace = true
|
||||
table.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
catalog = { workspace = true, features = ["testing"] }
|
||||
chrono.workspace = true
|
||||
common-test-util = { workspace = true }
|
||||
log-store = { workspace = true }
|
||||
object-store = { workspace = true }
|
||||
storage = { workspace = true }
|
||||
common-test-util.workspace = true
|
||||
log-store.workspace = true
|
||||
object-store.workspace = true
|
||||
storage.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -8,22 +8,22 @@ license.workspace = true
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
api = { workspace = true }
|
||||
api.workspace = true
|
||||
arrow-flight.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-base = { workspace = true }
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-grpc = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-meta = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-grpc.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
datafusion.workspace = true
|
||||
datatypes = { workspace = true }
|
||||
datatypes.workspace = true
|
||||
derive_builder.workspace = true
|
||||
enum_dispatch = "0.3"
|
||||
futures-util.workspace = true
|
||||
@@ -33,17 +33,17 @@ parking_lot = "0.12"
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
rand.workspace = true
|
||||
session = { workspace = true }
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
tokio-stream = { version = "0.1", features = ["net"] }
|
||||
tokio.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-grpc-expr = { workspace = true }
|
||||
datanode = { workspace = true }
|
||||
common-grpc-expr.workspace = true
|
||||
datanode.workspace = true
|
||||
derive-new = "0.5"
|
||||
substrait = { workspace = true }
|
||||
substrait.workspace = true
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
|
||||
@@ -16,63 +16,63 @@ tokio-console = ["common-telemetry/tokio-console"]
|
||||
anymap = "1.0.0-beta.2"
|
||||
async-trait.workspace = true
|
||||
auth.workspace = true
|
||||
catalog = { workspace = true }
|
||||
catalog.workspace = true
|
||||
chrono.workspace = true
|
||||
clap = { version = "3.1", features = ["derive"] }
|
||||
client = { workspace = true }
|
||||
common-base = { workspace = true }
|
||||
common-catalog = { workspace = true }
|
||||
common-config = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-meta = { workspace = true }
|
||||
common-procedure = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
client.workspace = true
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-config.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-procedure.workspace = true
|
||||
common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-telemetry = { workspace = true, features = [
|
||||
"deadlock_detection",
|
||||
] }
|
||||
config = "0.13"
|
||||
datanode = { workspace = true }
|
||||
datatypes = { workspace = true }
|
||||
datanode.workspace = true
|
||||
datatypes.workspace = true
|
||||
either = "1.8"
|
||||
etcd-client.workspace = true
|
||||
file-engine = { workspace = true }
|
||||
frontend = { workspace = true }
|
||||
file-engine.workspace = true
|
||||
frontend.workspace = true
|
||||
futures.workspace = true
|
||||
lazy_static.workspace = true
|
||||
meta-client = { workspace = true }
|
||||
meta-srv = { workspace = true }
|
||||
mito2 = { workspace = true }
|
||||
meta-client.workspace = true
|
||||
meta-srv.workspace = true
|
||||
mito2.workspace = true
|
||||
nu-ansi-term = "0.46"
|
||||
partition = { workspace = true }
|
||||
partition.workspace = true
|
||||
plugins.workspace = true
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
query = { workspace = true }
|
||||
query.workspace = true
|
||||
rand.workspace = true
|
||||
regex.workspace = true
|
||||
rustyline = "10.1"
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
servers = { workspace = true }
|
||||
session = { workspace = true }
|
||||
servers.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
substrait = { workspace = true }
|
||||
table = { workspace = true }
|
||||
substrait.workspace = true
|
||||
table.workspace = true
|
||||
tokio.workspace = true
|
||||
toml.workspace = true
|
||||
|
||||
[target.'cfg(not(windows))'.dependencies]
|
||||
tikv-jemallocator = "0.5"
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util = { workspace = true }
|
||||
common-test-util.workspace = true
|
||||
serde.workspace = true
|
||||
temp-env = "0.3"
|
||||
toml.workspace = true
|
||||
|
||||
[target.'cfg(not(windows))'.dev-dependencies]
|
||||
rexpect = "0.5"
|
||||
|
||||
[build-dependencies]
|
||||
common-version = { workspace = true }
|
||||
common-version.workspace = true
|
||||
|
||||
@@ -20,14 +20,13 @@ use std::time::Duration;
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::{Region, RegionRoute};
|
||||
use common_meta::table_name::TableName;
|
||||
use common_telemetry::info;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||
use meta_srv::service::store::etcd::EtcdStore;
|
||||
use meta_srv::service::store::kv::KvBackendAdapter;
|
||||
use rand::Rng;
|
||||
use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableType};
|
||||
|
||||
@@ -64,9 +63,7 @@ impl BenchTableMetadataCommand {
|
||||
pub async fn build(&self) -> Result<Instance> {
|
||||
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr]).await.unwrap();
|
||||
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(KvBackendAdapter::wrap(
|
||||
etcd_store,
|
||||
)));
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(etcd_store));
|
||||
|
||||
let tool = BenchTableMetadata {
|
||||
table_metadata_manager,
|
||||
|
||||
@@ -17,6 +17,8 @@ use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use client::api::v1::auth_header::AuthScheme;
|
||||
use client::api::v1::Basic;
|
||||
use client::{Client, Database, DEFAULT_SCHEMA_NAME};
|
||||
use common_query::Output;
|
||||
use common_recordbatch::util::collect;
|
||||
@@ -25,13 +27,14 @@ use datatypes::scalars::ScalarVector;
|
||||
use datatypes::vectors::{StringVector, Vector};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio::io::{AsyncWriteExt, BufWriter};
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use crate::cli::{Instance, Tool};
|
||||
use crate::error::{
|
||||
CollectRecordBatchesSnafu, ConnectServerSnafu, EmptyResultSnafu, Error, FileIoSnafu,
|
||||
InvalidDatabaseNameSnafu, NotDataFromOutputSnafu, RequestDatabaseSnafu, Result,
|
||||
IllegalConfigSnafu, InvalidDatabaseNameSnafu, NotDataFromOutputSnafu, RequestDatabaseSnafu,
|
||||
Result,
|
||||
};
|
||||
|
||||
type TableReference = (String, String, String);
|
||||
@@ -70,6 +73,10 @@ pub struct ExportCommand {
|
||||
/// Things to export
|
||||
#[clap(long, short = 't', value_enum)]
|
||||
target: ExportTarget,
|
||||
|
||||
/// basic authentication for connecting to the server
|
||||
#[clap(long)]
|
||||
auth_basic: Option<String>,
|
||||
}
|
||||
|
||||
impl ExportCommand {
|
||||
@@ -82,12 +89,22 @@ impl ExportCommand {
|
||||
addr: self.addr.clone(),
|
||||
})?;
|
||||
let (catalog, schema) = split_database(&self.database)?;
|
||||
let database_client = Database::new(
|
||||
let mut database_client = Database::new(
|
||||
catalog.clone(),
|
||||
schema.clone().unwrap_or(DEFAULT_SCHEMA_NAME.to_string()),
|
||||
client,
|
||||
);
|
||||
|
||||
if let Some(auth_basic) = &self.auth_basic {
|
||||
let (username, password) = auth_basic.split_once(':').context(IllegalConfigSnafu {
|
||||
msg: "auth_basic cannot be split by ':'".to_string(),
|
||||
})?;
|
||||
database_client.set_auth(AuthScheme::Basic(Basic {
|
||||
username: username.to_string(),
|
||||
password: password.to_string(),
|
||||
}));
|
||||
}
|
||||
|
||||
Ok(Instance::Tool(Box::new(Export {
|
||||
client: database_client,
|
||||
catalog,
|
||||
@@ -141,6 +158,9 @@ impl Export {
|
||||
let mut result = Vec::with_capacity(schemas.len());
|
||||
for i in 0..schemas.len() {
|
||||
let schema = schemas.get_data(i).unwrap().to_owned();
|
||||
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
|
||||
continue;
|
||||
}
|
||||
result.push((self.catalog.clone(), schema));
|
||||
}
|
||||
Ok(result)
|
||||
@@ -326,25 +346,30 @@ impl Export {
|
||||
|
||||
let copy_from_file =
|
||||
Path::new(&self.output_dir).join(format!("{catalog}-{schema}_copy_from.sql"));
|
||||
let mut file = File::create(copy_from_file).await.context(FileIoSnafu)?;
|
||||
let mut writer =
|
||||
BufWriter::new(File::create(copy_from_file).await.context(FileIoSnafu)?);
|
||||
|
||||
let copy_from_sql = dir_filenames
|
||||
.into_iter()
|
||||
.map(|file| {
|
||||
let file = file.unwrap();
|
||||
let filename = file.file_name().into_string().unwrap();
|
||||
for table_file in dir_filenames {
|
||||
let table_file = table_file.unwrap();
|
||||
let table_name = table_file
|
||||
.file_name()
|
||||
.into_string()
|
||||
.unwrap()
|
||||
.replace(".parquet", "");
|
||||
|
||||
format!(
|
||||
"copy {} from '{}' with (format='parquet');\n",
|
||||
filename.replace(".parquet", ""),
|
||||
file.path().to_str().unwrap()
|
||||
writer
|
||||
.write(
|
||||
format!(
|
||||
"copy {} from '{}' with (format='parquet');\n",
|
||||
table_name,
|
||||
table_file.path().to_str().unwrap()
|
||||
)
|
||||
.as_bytes(),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("");
|
||||
file.write_all(copy_from_sql.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
writer.flush().await.context(FileIoSnafu)?;
|
||||
|
||||
info!("finished exporting {catalog}.{schema} copy_from.sql");
|
||||
|
||||
|
||||
@@ -27,6 +27,8 @@ use common_meta::key::table_name::{TableNameKey, TableNameValue};
|
||||
use common_meta::key::table_region::{TableRegionKey, TableRegionValue};
|
||||
use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue};
|
||||
use common_meta::key::{RegionDistribution, TableMetaKey};
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::range_stream::PaginationStream;
|
||||
use common_meta::rpc::router::TableRoute;
|
||||
use common_meta::rpc::store::{BatchDeleteRequest, BatchPutRequest, PutRequest, RangeRequest};
|
||||
@@ -35,8 +37,6 @@ use common_meta::util::get_prefix_end_key;
|
||||
use common_telemetry::info;
|
||||
use etcd_client::Client;
|
||||
use futures::TryStreamExt;
|
||||
use meta_srv::service::store::etcd::EtcdStore;
|
||||
use meta_srv::service::store::kv::{KvBackendAdapter, KvStoreRef};
|
||||
use prost::Message;
|
||||
use snafu::ResultExt;
|
||||
use v1_helper::{CatalogKey as v1CatalogKey, SchemaKey as v1SchemaKey, TableGlobalValue};
|
||||
@@ -81,7 +81,7 @@ impl UpgradeCommand {
|
||||
}
|
||||
|
||||
struct MigrateTableMetadata {
|
||||
etcd_store: KvStoreRef,
|
||||
etcd_store: KvBackendRef,
|
||||
dryrun: bool,
|
||||
|
||||
skip_table_global_keys: bool,
|
||||
@@ -123,7 +123,7 @@ impl MigrateTableMetadata {
|
||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||
|
||||
let mut stream = PaginationStream::new(
|
||||
KvBackendAdapter::wrap(self.etcd_store.clone()),
|
||||
self.etcd_store.clone(),
|
||||
RangeRequest::new().with_range(key, range_end),
|
||||
PAGE_SIZE,
|
||||
Arc::new(|kv: KeyValue| {
|
||||
@@ -182,7 +182,7 @@ impl MigrateTableMetadata {
|
||||
let mut keys = Vec::new();
|
||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||
let mut stream = PaginationStream::new(
|
||||
KvBackendAdapter::wrap(self.etcd_store.clone()),
|
||||
self.etcd_store.clone(),
|
||||
RangeRequest::new().with_range(key, range_end),
|
||||
PAGE_SIZE,
|
||||
Arc::new(|kv: KeyValue| {
|
||||
@@ -234,7 +234,7 @@ impl MigrateTableMetadata {
|
||||
let mut keys = Vec::new();
|
||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||
let mut stream = PaginationStream::new(
|
||||
KvBackendAdapter::wrap(self.etcd_store.clone()),
|
||||
self.etcd_store.clone(),
|
||||
RangeRequest::new().with_range(key, range_end),
|
||||
PAGE_SIZE,
|
||||
Arc::new(|kv: KeyValue| {
|
||||
@@ -284,7 +284,7 @@ impl MigrateTableMetadata {
|
||||
|
||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||
let mut stream = PaginationStream::new(
|
||||
KvBackendAdapter::wrap(self.etcd_store.clone()),
|
||||
self.etcd_store.clone(),
|
||||
RangeRequest::new().with_range(key, range_end.clone()),
|
||||
PAGE_SIZE,
|
||||
Arc::new(|kv: KeyValue| {
|
||||
|
||||
@@ -191,7 +191,6 @@ mod tests {
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::config::{CompactionConfig, FileConfig, ObjectStoreConfig, RegionManifestConfig};
|
||||
use servers::heartbeat_options::HeartbeatOptions;
|
||||
@@ -300,7 +299,6 @@ mod tests {
|
||||
max_inflight_tasks: 3,
|
||||
max_files_in_level0: 7,
|
||||
max_purge_tasks: 32,
|
||||
sst_write_buffer_size: ReadableSize::mb(8),
|
||||
},
|
||||
options.storage.compaction,
|
||||
);
|
||||
|
||||
@@ -201,7 +201,7 @@ impl StartCommand {
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
instance
|
||||
.build_servers(&opts)
|
||||
.build_servers(opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
|
||||
@@ -216,6 +216,12 @@ mod tests {
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
|
||||
[failure_detector]
|
||||
threshold = 8.0
|
||||
min_std_deviation = "100ms"
|
||||
acceptable_heartbeat_pause = "3000ms"
|
||||
first_heartbeat_estimate = "1000ms"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -234,6 +240,25 @@ mod tests {
|
||||
assert_eq!(SelectorType::LeaseBased, options.selector);
|
||||
assert_eq!("debug", options.logging.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
assert_eq!(8.0, options.failure_detector.threshold);
|
||||
assert_eq!(
|
||||
100.0,
|
||||
options.failure_detector.min_std_deviation.as_millis() as f32
|
||||
);
|
||||
assert_eq!(
|
||||
3000,
|
||||
options
|
||||
.failure_detector
|
||||
.acceptable_heartbeat_pause
|
||||
.as_millis()
|
||||
);
|
||||
assert_eq!(
|
||||
1000,
|
||||
options
|
||||
.failure_detector
|
||||
.first_heartbeat_estimate
|
||||
.as_millis()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -12,11 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_config::KvStoreConfig;
|
||||
use common_config::KvBackendConfig;
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use config::{Config, Environment, File, FileFormat};
|
||||
use datanode::config::{DatanodeOptions, ProcedureConfig};
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::error::{Result as FeResult, TomlFormatSnafu};
|
||||
use frontend::frontend::{FrontendOptions, TomlSerializable};
|
||||
use meta_srv::metasrv::MetaSrvOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
@@ -27,15 +28,28 @@ pub const ENV_VAR_SEP: &str = "__";
|
||||
pub const ENV_LIST_SEP: &str = ",";
|
||||
|
||||
/// Options mixed up from datanode, frontend and metasrv.
|
||||
#[derive(Serialize)]
|
||||
pub struct MixOptions {
|
||||
pub data_home: String,
|
||||
pub procedure: ProcedureConfig,
|
||||
pub metadata_store: KvStoreConfig,
|
||||
pub metadata_store: KvBackendConfig,
|
||||
pub frontend: FrontendOptions,
|
||||
pub datanode: DatanodeOptions,
|
||||
pub logging: LoggingOptions,
|
||||
}
|
||||
|
||||
impl From<MixOptions> for FrontendOptions {
|
||||
fn from(value: MixOptions) -> Self {
|
||||
value.frontend
|
||||
}
|
||||
}
|
||||
|
||||
impl TomlSerializable for MixOptions {
|
||||
fn to_toml(&self) -> FeResult<String> {
|
||||
toml::to_string(self).context(TomlFormatSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
pub enum Options {
|
||||
Datanode(Box<DatanodeOptions>),
|
||||
Frontend(Box<FrontendOptions>),
|
||||
|
||||
@@ -19,7 +19,7 @@ use catalog::kvbackend::KvBackendCatalogManager;
|
||||
use catalog::CatalogManagerRef;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_config::{metadata_store_dir, KvStoreConfig, WalConfig};
|
||||
use common_config::{metadata_store_dir, KvBackendConfig, WalConfig};
|
||||
use common_meta::cache_invalidator::DummyKvCacheInvalidator;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
@@ -97,7 +97,7 @@ pub struct StandaloneOptions {
|
||||
pub prom_store: PromStoreOptions,
|
||||
pub wal: WalConfig,
|
||||
pub storage: StorageConfig,
|
||||
pub metadata_store: KvStoreConfig,
|
||||
pub metadata_store: KvBackendConfig,
|
||||
pub procedure: ProcedureConfig,
|
||||
pub logging: LoggingOptions,
|
||||
pub user_provider: Option<String>,
|
||||
@@ -119,7 +119,7 @@ impl Default for StandaloneOptions {
|
||||
prom_store: PromStoreOptions::default(),
|
||||
wal: WalConfig::default(),
|
||||
storage: StorageConfig::default(),
|
||||
metadata_store: KvStoreConfig::default(),
|
||||
metadata_store: KvBackendConfig::default(),
|
||||
procedure: ProcedureConfig::default(),
|
||||
logging: LoggingOptions::default(),
|
||||
user_provider: None,
|
||||
@@ -316,13 +316,13 @@ impl StartCommand {
|
||||
#[allow(unused_variables)]
|
||||
#[allow(clippy::diverging_sub_expression)]
|
||||
async fn build(self, opts: MixOptions) -> Result<Instance> {
|
||||
let mut fe_opts = opts.frontend;
|
||||
let mut fe_opts = opts.frontend.clone();
|
||||
#[allow(clippy::unnecessary_mut_passed)]
|
||||
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts)
|
||||
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts) // mut ref is MUST, DO NOT change it
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let dn_opts = opts.datanode;
|
||||
let dn_opts = opts.datanode.clone();
|
||||
|
||||
info!("Standalone start command: {:#?}", self);
|
||||
info!(
|
||||
@@ -336,23 +336,26 @@ impl StartCommand {
|
||||
})?;
|
||||
|
||||
let metadata_dir = metadata_store_dir(&opts.data_home);
|
||||
let (kv_store, procedure_manager) = FeInstance::try_build_standalone_components(
|
||||
let (kv_backend, procedure_manager) = FeInstance::try_build_standalone_components(
|
||||
metadata_dir,
|
||||
opts.metadata_store,
|
||||
opts.procedure,
|
||||
opts.metadata_store.clone(),
|
||||
opts.procedure.clone(),
|
||||
)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let datanode =
|
||||
DatanodeBuilder::new(dn_opts.clone(), Some(kv_store.clone()), Default::default())
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
let datanode = DatanodeBuilder::new(
|
||||
dn_opts.clone(),
|
||||
Some(kv_backend.clone()),
|
||||
Default::default(),
|
||||
)
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
let region_server = datanode.region_server();
|
||||
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
kv_store.clone(),
|
||||
kv_backend.clone(),
|
||||
Arc::new(DummyKvCacheInvalidator),
|
||||
Arc::new(StandaloneDatanodeManager(region_server.clone())),
|
||||
);
|
||||
@@ -366,7 +369,7 @@ impl StartCommand {
|
||||
// TODO: build frontend instance like in distributed mode
|
||||
let mut frontend = build_frontend(
|
||||
fe_plugins,
|
||||
kv_store,
|
||||
kv_backend,
|
||||
procedure_manager.clone(),
|
||||
catalog_manager,
|
||||
region_server,
|
||||
@@ -374,7 +377,7 @@ impl StartCommand {
|
||||
.await?;
|
||||
|
||||
frontend
|
||||
.build_servers(&fe_opts)
|
||||
.build_servers(opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
@@ -389,13 +392,13 @@ impl StartCommand {
|
||||
/// Build frontend instance in standalone mode
|
||||
async fn build_frontend(
|
||||
plugins: Plugins,
|
||||
kv_store: KvBackendRef,
|
||||
kv_backend: KvBackendRef,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
region_server: RegionServer,
|
||||
) -> Result<FeInstance> {
|
||||
let frontend_instance = FeInstance::try_new_standalone(
|
||||
kv_store,
|
||||
kv_backend,
|
||||
procedure_manager,
|
||||
catalog_manager,
|
||||
plugins,
|
||||
|
||||
@@ -8,8 +8,8 @@ license.workspace = true
|
||||
anymap = "1.0.0-beta.2"
|
||||
bitvec = "1.0"
|
||||
bytes = { version = "1.1", features = ["serde"] }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
paste = "1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -5,11 +5,9 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
chrono.workspace = true
|
||||
|
||||
@@ -54,14 +54,14 @@ pub fn metadata_store_dir(store_dir: &str) -> String {
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(default)]
|
||||
pub struct KvStoreConfig {
|
||||
pub struct KvBackendConfig {
|
||||
// Kv file size in bytes
|
||||
pub file_size: ReadableSize,
|
||||
// Kv purge threshold in bytes
|
||||
pub purge_threshold: ReadableSize,
|
||||
}
|
||||
|
||||
impl Default for KvStoreConfig {
|
||||
impl Default for KvBackendConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
// log file size 256MB
|
||||
|
||||
@@ -17,14 +17,14 @@ async-compression = { version = "0.3", features = [
|
||||
] }
|
||||
async-trait.workspace = true
|
||||
bytes = "1.1"
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-runtime.workspace = true
|
||||
datafusion.workspace = true
|
||||
derive_builder.workspace = true
|
||||
futures.workspace = true
|
||||
lazy_static.workspace = true
|
||||
object-store = { workspace = true }
|
||||
object-store.workspace = true
|
||||
orc-rust = "0.2"
|
||||
paste = "1.0"
|
||||
regex = "1.7"
|
||||
@@ -36,4 +36,4 @@ tokio.workspace = true
|
||||
url = "2.3"
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util = { workspace = true }
|
||||
common-test-util.workspace = true
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
15
src/common/decimal/Cargo.toml
Normal file
15
src/common/decimal/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "common-decimal"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
arrow.workspace = true
|
||||
bigdecimal.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
rust_decimal.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
snafu.workspace = true
|
||||
417
src/common/decimal/src/decimal128.rs
Normal file
417
src/common/decimal/src/decimal128.rs
Normal file
@@ -0,0 +1,417 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Display;
|
||||
use std::hash::Hash;
|
||||
use std::str::FromStr;
|
||||
|
||||
use bigdecimal::{BigDecimal, ToPrimitive};
|
||||
use rust_decimal::Decimal as RustDecimal;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{
|
||||
self, BigDecimalOutOfRangeSnafu, Error, InvalidPrecisionOrScaleSnafu, ParseBigDecimalStrSnafu,
|
||||
ParseRustDecimalStrSnafu,
|
||||
};
|
||||
|
||||
/// The maximum precision for [Decimal128] values
|
||||
pub const DECIMAL128_MAX_PRECISION: u8 = 38;
|
||||
|
||||
/// The maximum scale for [Decimal128] values
|
||||
pub const DECIMAL128_MAX_SCALE: i8 = 38;
|
||||
|
||||
/// The default scale for [Decimal128] values
|
||||
pub const DECIMAL128_DEFAULT_SCALE: i8 = 10;
|
||||
|
||||
/// The maximum bytes length that an accurate RustDecimal can represent
|
||||
const BYTES_TO_OVERFLOW_RUST_DECIMAL: usize = 28;
|
||||
|
||||
/// 128bit decimal, using the i128 to represent the decimal.
|
||||
///
|
||||
/// **precision**: the total number of digits in the number, it's range is \[1, 38\].
|
||||
///
|
||||
/// **scale**: the number of digits to the right of the decimal point, it's range is \[0, precision\].
|
||||
#[derive(Debug, Eq, Copy, Clone, Serialize, Deserialize)]
|
||||
pub struct Decimal128 {
|
||||
value: i128,
|
||||
precision: u8,
|
||||
scale: i8,
|
||||
}
|
||||
|
||||
impl Decimal128 {
|
||||
/// Create a new Decimal128 from i128, precision and scale without any validation.
|
||||
pub fn new(value: i128, precision: u8, scale: i8) -> Self {
|
||||
// debug assert precision and scale is valid
|
||||
debug_assert!(
|
||||
precision > 0 && precision <= DECIMAL128_MAX_PRECISION,
|
||||
"precision should be in [1, {}]",
|
||||
DECIMAL128_MAX_PRECISION
|
||||
);
|
||||
debug_assert!(
|
||||
scale >= 0 && scale <= precision as i8,
|
||||
"scale should be in [0, precision]"
|
||||
);
|
||||
Self {
|
||||
value,
|
||||
precision,
|
||||
scale,
|
||||
}
|
||||
}
|
||||
|
||||
/// Try new Decimal128 from i128, precision and scale with validation.
|
||||
pub fn try_new(value: i128, precision: u8, scale: i8) -> error::Result<Self> {
|
||||
// make sure the precision and scale is valid.
|
||||
valid_precision_and_scale(precision, scale)?;
|
||||
Ok(Self {
|
||||
value,
|
||||
precision,
|
||||
scale,
|
||||
})
|
||||
}
|
||||
|
||||
/// Return underlying value without precision and scale
|
||||
pub fn val(&self) -> i128 {
|
||||
self.value
|
||||
}
|
||||
|
||||
/// Returns the precision of this decimal.
|
||||
pub fn precision(&self) -> u8 {
|
||||
self.precision
|
||||
}
|
||||
|
||||
/// Returns the scale of this decimal.
|
||||
pub fn scale(&self) -> i8 {
|
||||
self.scale
|
||||
}
|
||||
|
||||
/// Convert to ScalarValue
|
||||
pub fn to_scalar_value(&self) -> (Option<i128>, u8, i8) {
|
||||
(Some(self.value), self.precision, self.scale)
|
||||
}
|
||||
}
|
||||
|
||||
/// The default value of Decimal128 is 0, and its precision is 1 and scale is 0.
|
||||
impl Default for Decimal128 {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
value: 0,
|
||||
precision: 1,
|
||||
scale: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for Decimal128 {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.precision.eq(&other.precision)
|
||||
&& self.scale.eq(&other.scale)
|
||||
&& self.value.eq(&other.value)
|
||||
}
|
||||
}
|
||||
|
||||
// Two decimal values can be compared if they have the same precision and scale.
|
||||
impl PartialOrd for Decimal128 {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
if self.precision == other.precision && self.scale == other.scale {
|
||||
return self.value.partial_cmp(&other.value);
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert from string to Decimal128
|
||||
/// If the string length is less than 28, the result of rust_decimal will underflow,
|
||||
/// In this case, use BigDecimal to get accurate result.
|
||||
impl FromStr for Decimal128 {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let len = s.as_bytes().len();
|
||||
if len <= BYTES_TO_OVERFLOW_RUST_DECIMAL {
|
||||
let rd = RustDecimal::from_str_exact(s).context(ParseRustDecimalStrSnafu { raw: s })?;
|
||||
Ok(Self::from(rd))
|
||||
} else {
|
||||
let bd = BigDecimal::from_str(s).context(ParseBigDecimalStrSnafu { raw: s })?;
|
||||
Self::try_from(bd)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Decimal128 {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}",
|
||||
format_decimal_str(&self.value.to_string(), self.precision as usize, self.scale)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Hash for Decimal128 {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
state.write_i128(self.value);
|
||||
state.write_u8(self.precision);
|
||||
state.write_i8(self.scale);
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Decimal128> for serde_json::Value {
|
||||
fn from(decimal: Decimal128) -> Self {
|
||||
serde_json::Value::String(decimal.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Decimal128> for i128 {
|
||||
fn from(decimal: Decimal128) -> Self {
|
||||
decimal.val()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<i128> for Decimal128 {
|
||||
fn from(value: i128) -> Self {
|
||||
Self {
|
||||
value,
|
||||
precision: DECIMAL128_MAX_PRECISION,
|
||||
scale: DECIMAL128_DEFAULT_SCALE,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert from RustDecimal to Decimal128
|
||||
/// RustDecimal can represent the range is smaller than Decimal128,
|
||||
/// it is safe to convert RustDecimal to Decimal128
|
||||
impl From<RustDecimal> for Decimal128 {
|
||||
fn from(rd: RustDecimal) -> Self {
|
||||
let s = rd.to_string();
|
||||
let precision = (s.len() - s.matches(&['.', '-'][..]).count()) as u8;
|
||||
Self {
|
||||
value: rd.mantissa(),
|
||||
precision,
|
||||
scale: rd.scale() as i8,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Try from BigDecimal to Decimal128
|
||||
/// The range that BigDecimal can represent is larger than Decimal128,
|
||||
/// so it is not safe to convert BigDecimal to Decimal128,
|
||||
/// If the BigDecimal is out of range, return error.
|
||||
impl TryFrom<BigDecimal> for Decimal128 {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: BigDecimal) -> Result<Self, Self::Error> {
|
||||
let precision = value.digits();
|
||||
let (big_int, scale) = value.as_bigint_and_exponent();
|
||||
// convert big_int to i128, if convert failed, return error
|
||||
big_int
|
||||
.to_i128()
|
||||
.map(|val| Self::try_new(val, precision as u8, scale as i8))
|
||||
.unwrap_or_else(|| BigDecimalOutOfRangeSnafu { value }.fail())
|
||||
}
|
||||
}
|
||||
|
||||
/// Port from arrow-rs,
|
||||
/// see https://github.com/Apache/arrow-rs/blob/master/arrow-array/src/types.rs#L1323-L1344
|
||||
fn format_decimal_str(value_str: &str, precision: usize, scale: i8) -> String {
|
||||
let (sign, rest) = match value_str.strip_prefix('-') {
|
||||
Some(stripped) => ("-", stripped),
|
||||
None => ("", value_str),
|
||||
};
|
||||
|
||||
let bound = precision.min(rest.len()) + sign.len();
|
||||
let value_str = &value_str[0..bound];
|
||||
|
||||
if scale == 0 {
|
||||
value_str.to_string()
|
||||
} else if scale < 0 {
|
||||
let padding = value_str.len() + scale.unsigned_abs() as usize;
|
||||
format!("{value_str:0<padding$}")
|
||||
} else if rest.len() > scale as usize {
|
||||
// Decimal separator is in the middle of the string
|
||||
let (whole, decimal) = value_str.split_at(value_str.len() - scale as usize);
|
||||
format!("{whole}.{decimal}")
|
||||
} else {
|
||||
// String has to be padded
|
||||
format!("{}0.{:0>width$}", sign, rest, width = scale as usize)
|
||||
}
|
||||
}
|
||||
|
||||
/// check whether precision and scale is valid
|
||||
fn valid_precision_and_scale(precision: u8, scale: i8) -> error::Result<()> {
|
||||
if precision == 0 {
|
||||
return InvalidPrecisionOrScaleSnafu {
|
||||
reason: format!(
|
||||
"precision cannot be 0, has to be between [1, {}]",
|
||||
DECIMAL128_MAX_PRECISION
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
if precision > DECIMAL128_MAX_PRECISION {
|
||||
return InvalidPrecisionOrScaleSnafu {
|
||||
reason: format!(
|
||||
"precision {} is greater than max {}",
|
||||
precision, DECIMAL128_MAX_PRECISION
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
if scale > DECIMAL128_MAX_SCALE {
|
||||
return InvalidPrecisionOrScaleSnafu {
|
||||
reason: format!(
|
||||
"scale {} is greater than max {}",
|
||||
scale, DECIMAL128_MAX_SCALE
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
if scale > 0 && scale > precision as i8 {
|
||||
return InvalidPrecisionOrScaleSnafu {
|
||||
reason: format!("scale {} is greater than precision {}", scale, precision),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_common_decimal128() {
|
||||
let decimal = Decimal128::new(123456789, 9, 3);
|
||||
assert_eq!(decimal.to_string(), "123456.789");
|
||||
|
||||
let decimal = Decimal128::try_new(123456789, 9, 0);
|
||||
assert_eq!(decimal.unwrap().to_string(), "123456789");
|
||||
|
||||
let decimal = Decimal128::try_new(123456789, 9, 2);
|
||||
assert_eq!(decimal.unwrap().to_string(), "1234567.89");
|
||||
|
||||
let decimal = Decimal128::try_new(123, 3, -2);
|
||||
assert_eq!(decimal.unwrap().to_string(), "12300");
|
||||
|
||||
// invalid precision or scale
|
||||
|
||||
// precision is 0
|
||||
let decimal = Decimal128::try_new(123, 0, 0);
|
||||
assert!(decimal.is_err());
|
||||
|
||||
// precision is greater than 38
|
||||
let decimal = Decimal128::try_new(123, 39, 0);
|
||||
assert!(decimal.is_err());
|
||||
|
||||
// scale is greater than 38
|
||||
let decimal = Decimal128::try_new(123, 38, 39);
|
||||
assert!(decimal.is_err());
|
||||
|
||||
// scale is greater than precision
|
||||
let decimal = Decimal128::try_new(123, 3, 4);
|
||||
assert!(decimal.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decimal128_from_str() {
|
||||
// 0 < precision <= 28
|
||||
let decimal = Decimal128::from_str("1234567890.123456789").unwrap();
|
||||
assert_eq!(decimal.to_string(), "1234567890.123456789");
|
||||
assert_eq!(decimal.precision(), 19);
|
||||
assert_eq!(decimal.scale(), 9);
|
||||
|
||||
let decimal = Decimal128::from_str("1234567890.123456789012345678").unwrap();
|
||||
assert_eq!(decimal.to_string(), "1234567890.123456789012345678");
|
||||
assert_eq!(decimal.precision(), 28);
|
||||
assert_eq!(decimal.scale(), 18);
|
||||
|
||||
// 28 < precision <= 38
|
||||
let decimal = Decimal128::from_str("1234567890.1234567890123456789012").unwrap();
|
||||
assert_eq!(decimal.to_string(), "1234567890.1234567890123456789012");
|
||||
assert_eq!(decimal.precision(), 32);
|
||||
assert_eq!(decimal.scale(), 22);
|
||||
|
||||
let decimal = Decimal128::from_str("1234567890.1234567890123456789012345678").unwrap();
|
||||
assert_eq!(
|
||||
decimal.to_string(),
|
||||
"1234567890.1234567890123456789012345678"
|
||||
);
|
||||
assert_eq!(decimal.precision(), 38);
|
||||
assert_eq!(decimal.scale(), 28);
|
||||
|
||||
// precision > 38
|
||||
let decimal = Decimal128::from_str("1234567890.12345678901234567890123456789");
|
||||
assert!(decimal.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_parse_decimal128_speed() {
|
||||
// RustDecimal::from_str: 1.124855167s
|
||||
for _ in 0..1500000 {
|
||||
let _ = RustDecimal::from_str("1234567890.123456789012345678999").unwrap();
|
||||
}
|
||||
|
||||
// BigDecimal::try_from: 6.799290042s
|
||||
for _ in 0..1500000 {
|
||||
let _ = BigDecimal::from_str("1234567890.123456789012345678999").unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decimal128_precision_and_scale() {
|
||||
// precision and scale from Deicmal(1,1) to Decimal(38,38)
|
||||
for precision in 1..=38 {
|
||||
for scale in 1..=precision {
|
||||
let decimal_str = format!("0.{}", "1".repeat(scale as usize));
|
||||
let decimal = Decimal128::from_str(&decimal_str).unwrap();
|
||||
assert_eq!(decimal_str, decimal.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decimal128_compare() {
|
||||
// the same precision and scale
|
||||
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||
let decimal2 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||
assert!(decimal1 == decimal2);
|
||||
|
||||
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||
let decimal2 = Decimal128::from_str("1234567890.123456789012345678998").unwrap();
|
||||
assert!(decimal1 > decimal2);
|
||||
|
||||
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||
let decimal2 = Decimal128::from_str("1234567890.123456789012345678998").unwrap();
|
||||
assert!(decimal2 < decimal1);
|
||||
|
||||
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||
let decimal2 = Decimal128::from_str("1234567890.123456789012345678998").unwrap();
|
||||
assert!(decimal1 >= decimal2);
|
||||
|
||||
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||
let decimal2 = Decimal128::from_str("1234567890.123456789012345678998").unwrap();
|
||||
assert!(decimal2 <= decimal1);
|
||||
|
||||
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||
let decimal2 = Decimal128::from_str("1234567890.123456789012345678998").unwrap();
|
||||
assert!(decimal1 != decimal2);
|
||||
|
||||
// different precision and scale cmp is None
|
||||
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||
let decimal2 = Decimal128::from_str("1234567890.123").unwrap();
|
||||
assert_eq!(decimal1.partial_cmp(&decimal2), None);
|
||||
}
|
||||
}
|
||||
72
src/common/decimal/src/error.rs
Normal file
72
src/common/decimal/src/error.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use bigdecimal::BigDecimal;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Decimal out of range, decimal value: {}", value))]
|
||||
BigDecimalOutOfRange {
|
||||
value: BigDecimal,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse string to rust decimal, raw: {}", raw))]
|
||||
ParseRustDecimalStr {
|
||||
raw: String,
|
||||
#[snafu(source)]
|
||||
error: rust_decimal::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse string to big decimal, raw: {}", raw))]
|
||||
ParseBigDecimalStr {
|
||||
raw: String,
|
||||
#[snafu(source)]
|
||||
error: bigdecimal::ParseBigDecimalError,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid precision or scale, resion: {}", reason))]
|
||||
InvalidPrecisionOrScale { reason: String, location: Location },
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::BigDecimalOutOfRange { .. } => StatusCode::Internal,
|
||||
Error::ParseRustDecimalStr { .. }
|
||||
| Error::InvalidPrecisionOrScale { .. }
|
||||
| Error::ParseBigDecimalStr { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
fn location_opt(&self) -> Option<common_error::snafu::Location> {
|
||||
match self {
|
||||
Error::BigDecimalOutOfRange { location, .. } => Some(*location),
|
||||
Error::InvalidPrecisionOrScale { location, .. } => Some(*location),
|
||||
Error::ParseRustDecimalStr { .. } | Error::ParseBigDecimalStr { .. } => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -12,15 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
pub mod decimal128;
|
||||
pub mod error;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::service::store::kv::ResettableKvStore;
|
||||
|
||||
pub type MemStore = MemoryKvBackend<Error>;
|
||||
|
||||
impl ResettableKvStore for MemStore {
|
||||
fn reset(&self) {
|
||||
self.clear();
|
||||
}
|
||||
}
|
||||
pub use decimal128::Decimal128;
|
||||
@@ -5,5 +5,5 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
snafu.workspace = true
|
||||
strum.workspace = true
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(error_iter)]
|
||||
|
||||
pub mod ext;
|
||||
|
||||
@@ -7,12 +7,12 @@ license.workspace = true
|
||||
[dependencies]
|
||||
arc-swap = "1.0"
|
||||
chrono-tz = "0.6"
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-query.workspace = true
|
||||
common-time.workspace = true
|
||||
datafusion.workspace = true
|
||||
datatypes = { workspace = true }
|
||||
datatypes.workspace = true
|
||||
libc = "0.2"
|
||||
num = "0.4"
|
||||
num-traits = "0.2"
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
mod greatest;
|
||||
mod to_unixtime;
|
||||
|
||||
@@ -18,36 +18,50 @@ use std::sync::Arc;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::Timestamp;
|
||||
use common_time::{Date, DateTime, Timestamp};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::TimestampType;
|
||||
use datatypes::vectors::{
|
||||
Int64Vector, StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
TimestampNanosecondVector, TimestampSecondVector, Vector, VectorRef,
|
||||
};
|
||||
use datatypes::vectors::{Int64Vector, VectorRef};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::scalars::function::{Function, FunctionContext};
|
||||
|
||||
/// A function to convert the column into the unix timestamp in seconds.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ToUnixtimeFunction;
|
||||
|
||||
const NAME: &str = "to_unixtime";
|
||||
|
||||
fn convert_to_seconds(arg: &str) -> Option<i64> {
|
||||
match Timestamp::from_str(arg) {
|
||||
Ok(ts) => {
|
||||
let sec_mul = (TimeUnit::Second.factor() / ts.unit().factor()) as i64;
|
||||
Some(ts.value().div_euclid(sec_mul))
|
||||
}
|
||||
Err(_err) => None,
|
||||
if let Ok(dt) = DateTime::from_str(arg) {
|
||||
return Some(dt.val() / 1000);
|
||||
}
|
||||
|
||||
if let Ok(ts) = Timestamp::from_str(arg) {
|
||||
return Some(ts.split().0);
|
||||
}
|
||||
|
||||
if let Ok(date) = Date::from_str(arg) {
|
||||
return Some(date.to_secs());
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn process_vector(vector: &dyn Vector) -> Vec<Option<i64>> {
|
||||
fn convert_timestamps_to_seconds(vector: &VectorRef) -> Vec<Option<i64>> {
|
||||
(0..vector.len())
|
||||
.map(|i| paste::expr!((vector.get(i)).as_timestamp().map(|ts| ts.value())))
|
||||
.map(|i| vector.get(i).as_timestamp().map(|ts| ts.split().0))
|
||||
.collect::<Vec<Option<i64>>>()
|
||||
}
|
||||
|
||||
fn convert_dates_to_seconds(vector: &VectorRef) -> Vec<Option<i64>> {
|
||||
(0..vector.len())
|
||||
.map(|i| vector.get(i).as_date().map(|dt| dt.to_secs()))
|
||||
.collect::<Vec<Option<i64>>>()
|
||||
}
|
||||
|
||||
fn convert_datetimes_to_seconds(vector: &VectorRef) -> Vec<Option<i64>> {
|
||||
(0..vector.len())
|
||||
.map(|i| vector.get(i).as_datetime().map(|dt| dt.val() / 1000))
|
||||
.collect::<Vec<Option<i64>>>()
|
||||
}
|
||||
|
||||
@@ -67,6 +81,8 @@ impl Function for ToUnixtimeFunction {
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
@@ -87,51 +103,29 @@ impl Function for ToUnixtimeFunction {
|
||||
}
|
||||
);
|
||||
|
||||
let vector = &columns[0];
|
||||
|
||||
match columns[0].data_type() {
|
||||
ConcreteDataType::String(_) => {
|
||||
let array = columns[0].to_arrow_array();
|
||||
let vector = StringVector::try_from_arrow_array(&array).unwrap();
|
||||
Ok(Arc::new(Int64Vector::from(
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_seconds(&vector.get(i).to_string()))
|
||||
.collect::<Vec<_>>(),
|
||||
)))
|
||||
}
|
||||
ConcreteDataType::String(_) => Ok(Arc::new(Int64Vector::from(
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_seconds(&vector.get(i).to_string()))
|
||||
.collect::<Vec<_>>(),
|
||||
))),
|
||||
ConcreteDataType::Int64(_) | ConcreteDataType::Int32(_) => {
|
||||
let array = columns[0].to_arrow_array();
|
||||
Ok(Arc::new(Int64Vector::try_from_arrow_array(&array).unwrap()))
|
||||
// Safety: cast always successfully at here
|
||||
Ok(vector.cast(&ConcreteDataType::int64_datatype()).unwrap())
|
||||
}
|
||||
ConcreteDataType::Timestamp(ts) => {
|
||||
let array = columns[0].to_arrow_array();
|
||||
let value = match ts {
|
||||
TimestampType::Second(_) => {
|
||||
let vector = paste::expr!(TimestampSecondVector::try_from_arrow_array(
|
||||
array
|
||||
)
|
||||
.unwrap());
|
||||
process_vector(&vector)
|
||||
}
|
||||
TimestampType::Millisecond(_) => {
|
||||
let vector = paste::expr!(
|
||||
TimestampMillisecondVector::try_from_arrow_array(array).unwrap()
|
||||
);
|
||||
process_vector(&vector)
|
||||
}
|
||||
TimestampType::Microsecond(_) => {
|
||||
let vector = paste::expr!(
|
||||
TimestampMicrosecondVector::try_from_arrow_array(array).unwrap()
|
||||
);
|
||||
process_vector(&vector)
|
||||
}
|
||||
TimestampType::Nanosecond(_) => {
|
||||
let vector = paste::expr!(TimestampNanosecondVector::try_from_arrow_array(
|
||||
array
|
||||
)
|
||||
.unwrap());
|
||||
process_vector(&vector)
|
||||
}
|
||||
};
|
||||
Ok(Arc::new(Int64Vector::from(value)))
|
||||
ConcreteDataType::Date(_) => {
|
||||
let seconds = convert_dates_to_seconds(vector);
|
||||
Ok(Arc::new(Int64Vector::from(seconds)))
|
||||
}
|
||||
ConcreteDataType::DateTime(_) => {
|
||||
let seconds = convert_datetimes_to_seconds(vector);
|
||||
Ok(Arc::new(Int64Vector::from(seconds)))
|
||||
}
|
||||
ConcreteDataType::Timestamp(_) => {
|
||||
let seconds = convert_timestamps_to_seconds(vector);
|
||||
Ok(Arc::new(Int64Vector::from(seconds)))
|
||||
}
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
@@ -151,11 +145,11 @@ impl fmt::Display for ToUnixtimeFunction {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder};
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::timestamp::TimestampSecond;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVector, TimestampSecondVector};
|
||||
use datatypes::vectors::{
|
||||
DateTimeVector, DateVector, StringVector, TimestampMillisecondVector, TimestampSecondVector,
|
||||
};
|
||||
|
||||
use super::{ToUnixtimeFunction, *};
|
||||
use crate::scalars::Function;
|
||||
@@ -170,18 +164,20 @@ mod tests {
|
||||
);
|
||||
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
]
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
]
|
||||
));
|
||||
|
||||
let times = vec![
|
||||
@@ -212,26 +208,6 @@ mod tests {
|
||||
#[test]
|
||||
fn test_int_to_unixtime() {
|
||||
let f = ToUnixtimeFunction;
|
||||
assert_eq!("to_unixtime", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::int64_datatype(),
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
]
|
||||
));
|
||||
|
||||
let times = vec![Some(3_i64), None, Some(5_i64), None];
|
||||
let results = [Some(3), None, Some(5), None];
|
||||
@@ -254,38 +230,13 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_to_unixtime() {
|
||||
fn test_date_to_unixtime() {
|
||||
let f = ToUnixtimeFunction;
|
||||
assert_eq!("to_unixtime", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::int64_datatype(),
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
]
|
||||
));
|
||||
|
||||
let times: Vec<Option<TimestampSecond>> = vec![
|
||||
Some(TimestampSecond::new(123)),
|
||||
None,
|
||||
Some(TimestampSecond::new(42)),
|
||||
None,
|
||||
];
|
||||
let results = [Some(123), None, Some(42), None];
|
||||
let ts_vector: TimestampSecondVector = build_vector_from_slice(×);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(ts_vector)];
|
||||
let times = vec![Some(123), None, Some(42), None];
|
||||
let results = [Some(10627200), None, Some(3628800), None];
|
||||
let date_vector = DateVector::from(times.clone());
|
||||
let args: Vec<VectorRef> = vec![Arc::new(date_vector)];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in times.iter().enumerate() {
|
||||
@@ -303,11 +254,73 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn build_vector_from_slice<T: ScalarVector>(items: &[Option<T::RefItem<'_>>]) -> T {
|
||||
let mut builder = T::Builder::with_capacity(items.len());
|
||||
for item in items {
|
||||
builder.push(*item);
|
||||
#[test]
|
||||
fn test_datetime_to_unixtime() {
|
||||
let f = ToUnixtimeFunction;
|
||||
|
||||
let times = vec![Some(123000), None, Some(42000), None];
|
||||
let results = [Some(123), None, Some(42), None];
|
||||
let date_vector = DateTimeVector::from(times.clone());
|
||||
let args: Vec<VectorRef> = vec![Arc::new(date_vector)];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in times.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
if i == 1 || i == 3 {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::Int64(ts) => {
|
||||
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_to_unixtime() {
|
||||
let f = ToUnixtimeFunction;
|
||||
|
||||
let times = vec![Some(123), None, Some(42), None];
|
||||
let results = [Some(123), None, Some(42), None];
|
||||
let ts_vector = TimestampSecondVector::from(times.clone());
|
||||
let args: Vec<VectorRef> = vec![Arc::new(ts_vector)];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in times.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
if i == 1 || i == 3 {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::Int64(ts) => {
|
||||
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
let times = vec![Some(123000), None, Some(42000), None];
|
||||
let results = [Some(123), None, Some(42), None];
|
||||
let ts_vector = TimestampMillisecondVector::from(times.clone());
|
||||
let args: Vec<VectorRef> = vec![Arc::new(ts_vector)];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in times.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
if i == 1 || i == 3 {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::Int64(ts) => {
|
||||
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
builder.finish()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,19 +6,19 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
common-error = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
reqwest = { workspace = true }
|
||||
common-error.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
reqwest.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
tokio.workspace = true
|
||||
uuid.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util = { workspace = true }
|
||||
common-test-util.workspace = true
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
tempfile.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
common-version = { workspace = true }
|
||||
common-version.workspace = true
|
||||
|
||||
@@ -5,18 +5,18 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
api = { workspace = true }
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-base = { workspace = true }
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
datatypes = { workspace = true }
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
table = { workspace = true }
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-query.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
datatypes.workspace = true
|
||||
snafu.workspace = true
|
||||
table.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
paste = "1.0"
|
||||
|
||||
@@ -5,25 +5,25 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
api = { workspace = true }
|
||||
api.workspace = true
|
||||
arrow-flight.workspace = true
|
||||
async-trait = "0.1"
|
||||
backtrace = "0.3"
|
||||
common-base = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
dashmap = "5.4"
|
||||
datafusion.workspace = true
|
||||
datatypes = { workspace = true }
|
||||
datatypes.workspace = true
|
||||
flatbuffers = "23.1"
|
||||
futures = "0.3"
|
||||
lazy_static.workspace = true
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
tonic.workspace = true
|
||||
tower = "0.4"
|
||||
|
||||
@@ -71,7 +71,8 @@ macro_rules! convert_arrow_array_to_grpc_vals {
|
||||
return Ok(vals);
|
||||
},
|
||||
)+
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => unreachable!("Should not send {:?} in gRPC", $data_type),
|
||||
// TODO(QuenKar): support gRPC for Decimal128
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) | ConcreteDataType::Decimal128(_) => unreachable!("Should not send {:?} in gRPC", $data_type),
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -8,8 +8,6 @@ license.workspace = true
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
backtrace = "0.3"
|
||||
common-telemetry = { workspace = true }
|
||||
proc-macro2 = "1.0.66"
|
||||
quote = "1.0"
|
||||
syn = "1.0"
|
||||
@@ -25,7 +23,7 @@ syn2 = { version = "2.0", package = "syn", features = [
|
||||
|
||||
[dev-dependencies]
|
||||
arc-swap = "1.0"
|
||||
common-query = { workspace = true }
|
||||
datatypes = { workspace = true }
|
||||
common-query.workspace = true
|
||||
datatypes.workspace = true
|
||||
snafu.workspace = true
|
||||
static_assertions = "1.1.0"
|
||||
|
||||
@@ -5,8 +5,8 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
snafu.workspace = true
|
||||
tempfile = "3.4"
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -8,22 +8,22 @@ license.workspace = true
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
api = { workspace = true }
|
||||
arrow-flight.workspace = true
|
||||
api.workspace = true
|
||||
async-recursion = "1.0"
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
base64 = "0.21"
|
||||
base64.workspace = true
|
||||
bytes = "1.4"
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-grpc-expr.workspace = true
|
||||
common-macro = { workspace = true }
|
||||
common-procedure = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
datatypes = { workspace = true }
|
||||
common-macro.workspace = true
|
||||
common-procedure.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
datatypes.workspace = true
|
||||
etcd-client.workspace = true
|
||||
futures.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
@@ -34,12 +34,13 @@ regex.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
store-api = { workspace = true }
|
||||
store-api.workspace = true
|
||||
strum.workspace = true
|
||||
table = { workspace = true }
|
||||
table.workspace = true
|
||||
tokio.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
chrono.workspace = true
|
||||
datatypes = { workspace = true }
|
||||
datatypes.workspace = true
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -28,6 +28,26 @@ use crate::peer::Peer;
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Empty key is not allowed"))]
|
||||
EmptyKey { location: Location },
|
||||
|
||||
#[snafu(display("Invalid result with a txn response: {}", err_msg))]
|
||||
InvalidTxnResult { err_msg: String, location: Location },
|
||||
|
||||
#[snafu(display("Failed to connect to Etcd"))]
|
||||
ConnectEtcd {
|
||||
#[snafu(source)]
|
||||
error: etcd_client::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute via Etcd"))]
|
||||
EtcdFailed {
|
||||
#[snafu(source)]
|
||||
error: etcd_client::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to get sequence: {}", err_msg))]
|
||||
NextSequence { err_msg: String, location: Location },
|
||||
|
||||
@@ -254,7 +274,10 @@ impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
use Error::*;
|
||||
match self {
|
||||
IllegalServerState { .. } | EtcdTxnOpResponse { .. } => StatusCode::Internal,
|
||||
IllegalServerState { .. }
|
||||
| EtcdTxnOpResponse { .. }
|
||||
| EtcdFailed { .. }
|
||||
| ConnectEtcd { .. } => StatusCode::Internal,
|
||||
|
||||
SerdeJson { .. }
|
||||
| ParseOption { .. }
|
||||
@@ -267,7 +290,8 @@ impl ErrorExt for Error {
|
||||
| NextSequence { .. }
|
||||
| SequenceOutOfRange { .. }
|
||||
| UnexpectedSequenceValue { .. }
|
||||
| InvalidHeartbeatResponse { .. } => StatusCode::Unexpected,
|
||||
| InvalidHeartbeatResponse { .. }
|
||||
| InvalidTxnResult { .. } => StatusCode::Unexpected,
|
||||
|
||||
SendMessage { .. }
|
||||
| GetKvCache { .. }
|
||||
@@ -277,7 +301,7 @@ impl ErrorExt for Error {
|
||||
| RenameTable { .. }
|
||||
| Unsupported { .. } => StatusCode::Internal,
|
||||
|
||||
PrimaryKeyNotFound { .. } => StatusCode::InvalidArguments,
|
||||
PrimaryKeyNotFound { .. } | &EmptyKey { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
TableNotFound { .. } => StatusCode::TableNotFound,
|
||||
TableAlreadyExists { .. } => StatusCode::TableAlreadyExists,
|
||||
@@ -319,4 +343,16 @@ impl Error {
|
||||
pub fn is_retry_later(&self) -> bool {
|
||||
matches!(self, Error::RetryLater { .. })
|
||||
}
|
||||
|
||||
/// Returns true if the response exceeds the size limit.
|
||||
pub fn is_exceeded_size_limit(&self) -> bool {
|
||||
if let Error::EtcdFailed {
|
||||
error: etcd_client::Error::GRpcStatus(status),
|
||||
..
|
||||
} = self
|
||||
{
|
||||
return status.code() == tonic::Code::OutOfRange;
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -54,6 +54,8 @@ pub mod table_region;
|
||||
// TODO(weny): removes it.
|
||||
#[allow(deprecated)]
|
||||
pub mod table_route;
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub mod test_utils;
|
||||
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::fmt::Debug;
|
||||
@@ -62,6 +64,7 @@ use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_telemetry::warn;
|
||||
use datanode_table::{DatanodeTableKey, DatanodeTableManager, DatanodeTableValue};
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
@@ -81,7 +84,7 @@ use crate::ddl::utils::region_storage_path;
|
||||
use crate::error::{self, Result, SerdeJsonSnafu};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::router::{region_distribution, RegionRoute};
|
||||
use crate::rpc::router::{region_distribution, RegionRoute, RegionStatus};
|
||||
use crate::DatanodeId;
|
||||
|
||||
pub const REMOVED_PREFIX: &str = "__removed";
|
||||
@@ -89,12 +92,20 @@ pub const REMOVED_PREFIX: &str = "__removed";
|
||||
const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
|
||||
|
||||
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
|
||||
const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
|
||||
const TABLE_NAME_KEY_PREFIX: &str = "__table_name";
|
||||
const TABLE_REGION_KEY_PREFIX: &str = "__table_region";
|
||||
const CATALOG_NAME_KEY_PREFIX: &str = "__catalog_name";
|
||||
const SCHEMA_NAME_KEY_PREFIX: &str = "__schema_name";
|
||||
const TABLE_ROUTE_PREFIX: &str = "__table_route";
|
||||
|
||||
pub const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
|
||||
pub const TABLE_NAME_KEY_PREFIX: &str = "__table_name";
|
||||
pub const CATALOG_NAME_KEY_PREFIX: &str = "__catalog_name";
|
||||
pub const SCHEMA_NAME_KEY_PREFIX: &str = "__schema_name";
|
||||
pub const TABLE_ROUTE_PREFIX: &str = "__table_route";
|
||||
|
||||
pub const CACHE_KEY_PREFIXES: [&str; 4] = [
|
||||
TABLE_NAME_KEY_PREFIX,
|
||||
CATALOG_NAME_KEY_PREFIX,
|
||||
SCHEMA_NAME_KEY_PREFIX,
|
||||
TABLE_ROUTE_PREFIX,
|
||||
];
|
||||
|
||||
pub type RegionDistribution = BTreeMap<DatanodeId, Vec<RegionNumber>>;
|
||||
|
||||
@@ -615,6 +626,56 @@ impl TableMetadataManager {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Updates the leader status of the [RegionRoute].
|
||||
pub async fn update_leader_region_status<F>(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
current_table_route_value: DeserializedValueWithBytes<TableRouteValue>,
|
||||
next_region_route_status: F,
|
||||
) -> Result<()>
|
||||
where
|
||||
F: Fn(&RegionRoute) -> Option<Option<RegionStatus>>,
|
||||
{
|
||||
let mut new_region_routes = current_table_route_value.region_routes.clone();
|
||||
|
||||
let mut updated = 0;
|
||||
for route in &mut new_region_routes {
|
||||
if let Some(status) = next_region_route_status(route) {
|
||||
if route.set_leader_status(status) {
|
||||
updated += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if updated == 0 {
|
||||
warn!("No leader status updated");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Updates the table_route.
|
||||
let new_table_route_value = current_table_route_value.update(new_region_routes);
|
||||
|
||||
let (update_table_route_txn, on_update_table_route_failure) = self
|
||||
.table_route_manager()
|
||||
.build_update_txn(table_id, ¤t_table_route_value, &new_table_route_value)?;
|
||||
|
||||
let r = self.kv_backend.txn(update_table_route_txn).await?;
|
||||
|
||||
// Checks whether metadata was already updated.
|
||||
if !r.succeeded {
|
||||
let remote_table_route = on_update_table_route_failure(&r.responses)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table route during the updating leader region status",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let op_name = "the updating leader region status";
|
||||
ensure_values!(remote_table_route, new_table_route_value, op_name);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
@@ -684,12 +745,11 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, SchemaBuilder};
|
||||
use futures::TryStreamExt;
|
||||
use table::metadata::{RawTableInfo, TableInfo, TableInfoBuilder, TableMetaBuilder};
|
||||
use table::metadata::{RawTableInfo, TableInfo};
|
||||
|
||||
use super::datanode_table::DatanodeTableKey;
|
||||
use super::test_utils;
|
||||
use crate::ddl::utils::region_storage_path;
|
||||
use crate::key::datanode_table::RegionInfo;
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
@@ -698,7 +758,7 @@ mod tests {
|
||||
use crate::key::{to_removed_key, DeserializedValueWithBytes, TableMetadataManager};
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::router::{region_distribution, Region, RegionRoute};
|
||||
use crate::rpc::router::{region_distribution, Region, RegionRoute, RegionStatus};
|
||||
|
||||
#[test]
|
||||
fn test_deserialized_value_with_bytes() {
|
||||
@@ -735,40 +795,6 @@ mod tests {
|
||||
assert_eq!(removed, to_removed_key(key));
|
||||
}
|
||||
|
||||
fn new_test_table_info(region_numbers: impl Iterator<Item = u32>) -> TableInfo {
|
||||
let column_schemas = vec![
|
||||
ColumnSchema::new("col1", ConcreteDataType::int32_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
)
|
||||
.with_time_index(true),
|
||||
ColumnSchema::new("col2", ConcreteDataType::int32_datatype(), true),
|
||||
];
|
||||
let schema = SchemaBuilder::try_from(column_schemas)
|
||||
.unwrap()
|
||||
.version(123)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let meta = TableMetaBuilder::default()
|
||||
.schema(Arc::new(schema))
|
||||
.primary_key_indices(vec![0])
|
||||
.engine("engine")
|
||||
.next_column_id(3)
|
||||
.region_numbers(region_numbers.collect::<Vec<_>>())
|
||||
.build()
|
||||
.unwrap();
|
||||
TableInfoBuilder::default()
|
||||
.table_id(10)
|
||||
.table_version(5)
|
||||
.name("mytable")
|
||||
.meta(meta)
|
||||
.build()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn new_test_region_route() -> RegionRoute {
|
||||
new_region_route(1, 2)
|
||||
}
|
||||
@@ -787,6 +813,10 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn new_test_table_info(region_numbers: impl Iterator<Item = u32>) -> TableInfo {
|
||||
test_utils::new_test_table_info(10, region_numbers)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_metadata() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
@@ -1023,6 +1053,74 @@ mod tests {
|
||||
.is_err())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_table_leader_region_status() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let table_metadata_manager = TableMetadataManager::new(mem_kv);
|
||||
let datanode = 1;
|
||||
let region_routes = vec![
|
||||
RegionRoute {
|
||||
region: Region {
|
||||
id: 1.into(),
|
||||
name: "r1".to_string(),
|
||||
partition: None,
|
||||
attrs: BTreeMap::new(),
|
||||
},
|
||||
leader_peer: Some(Peer::new(datanode, "a2")),
|
||||
leader_status: Some(RegionStatus::Downgraded),
|
||||
follower_peers: vec![],
|
||||
},
|
||||
RegionRoute {
|
||||
region: Region {
|
||||
id: 2.into(),
|
||||
name: "r2".to_string(),
|
||||
partition: None,
|
||||
attrs: BTreeMap::new(),
|
||||
},
|
||||
leader_peer: Some(Peer::new(datanode, "a1")),
|
||||
leader_status: None,
|
||||
follower_peers: vec![],
|
||||
},
|
||||
];
|
||||
let table_info: RawTableInfo =
|
||||
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
|
||||
let table_id = table_info.ident.table_id;
|
||||
let current_table_route_value =
|
||||
DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes.clone()));
|
||||
// creates metadata.
|
||||
table_metadata_manager
|
||||
.create_table_metadata(table_info.clone(), region_routes.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
table_metadata_manager
|
||||
.update_leader_region_status(table_id, current_table_route_value, |region_route| {
|
||||
if region_route.leader_status.is_some() {
|
||||
None
|
||||
} else {
|
||||
Some(Some(RegionStatus::Downgraded))
|
||||
}
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let updated_route_value = table_metadata_manager
|
||||
.table_route_manager()
|
||||
.get(table_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
updated_route_value.region_routes[0].leader_status,
|
||||
Some(RegionStatus::Downgraded)
|
||||
);
|
||||
assert_eq!(
|
||||
updated_route_value.region_routes[1].leader_status,
|
||||
Some(RegionStatus::Downgraded)
|
||||
);
|
||||
}
|
||||
|
||||
async fn assert_datanode_table(
|
||||
table_metadata_manager: &TableMetadataManager,
|
||||
table_id: u32,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use table::engine::TableReference;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
@@ -21,6 +23,7 @@ use crate::error::Result;
|
||||
use crate::key::{to_removed_key, TableMetaKey};
|
||||
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::BatchGetRequest;
|
||||
use crate::table_name::TableName;
|
||||
|
||||
pub struct TableInfoKey {
|
||||
@@ -233,6 +236,37 @@ impl TableInfoManager {
|
||||
.map(|x| DeserializedValueWithBytes::from_inner_slice(&x.value))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub async fn batch_get(
|
||||
&self,
|
||||
table_ids: &[TableId],
|
||||
) -> Result<HashMap<TableId, TableInfoValue>> {
|
||||
let lookup_table = table_ids
|
||||
.iter()
|
||||
.map(|id| (TableInfoKey::new(*id).as_raw_key(), id))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let resp = self
|
||||
.kv_backend
|
||||
.batch_get(BatchGetRequest {
|
||||
keys: lookup_table.keys().cloned().collect::<Vec<_>>(),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let values = resp
|
||||
.kvs
|
||||
.iter()
|
||||
.map(|kv| {
|
||||
Ok((
|
||||
// Safety: must exist.
|
||||
**lookup_table.get(kv.key()).unwrap(),
|
||||
TableInfoValue::try_from_raw_value(&kv.value)?,
|
||||
))
|
||||
})
|
||||
.collect::<Result<HashMap<_, _>>>()?;
|
||||
|
||||
Ok(values)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Display;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -23,6 +24,7 @@ use crate::key::{to_removed_key, RegionDistribution, TableMetaKey, TABLE_ROUTE_P
|
||||
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::router::{region_distribution, RegionRoute};
|
||||
use crate::rpc::store::BatchGetRequest;
|
||||
|
||||
pub struct TableRouteKey {
|
||||
pub table_id: TableId,
|
||||
@@ -197,6 +199,38 @@ impl TableRouteManager {
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// It may return a subset of the `table_ids`.
|
||||
pub async fn batch_get(
|
||||
&self,
|
||||
table_ids: &[TableId],
|
||||
) -> Result<HashMap<TableId, TableRouteValue>> {
|
||||
let lookup_table = table_ids
|
||||
.iter()
|
||||
.map(|id| (TableRouteKey::new(*id).as_raw_key(), id))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let resp = self
|
||||
.kv_backend
|
||||
.batch_get(BatchGetRequest {
|
||||
keys: lookup_table.keys().cloned().collect::<Vec<_>>(),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let values = resp
|
||||
.kvs
|
||||
.iter()
|
||||
.map(|kv| {
|
||||
Ok((
|
||||
// Safety: must exist.
|
||||
**lookup_table.get(kv.key()).unwrap(),
|
||||
TableRouteValue::try_from_raw_value(&kv.value)?,
|
||||
))
|
||||
})
|
||||
.collect::<Result<HashMap<_, _>>>()?;
|
||||
|
||||
Ok(values)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub async fn get_removed(
|
||||
&self,
|
||||
|
||||
57
src/common/meta/src/key/test_utils.rs
Normal file
57
src/common/meta/src/key/test_utils.rs
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, SchemaBuilder};
|
||||
use store_api::storage::TableId;
|
||||
use table::metadata::{TableInfo, TableInfoBuilder, TableMetaBuilder};
|
||||
|
||||
pub fn new_test_table_info<I: IntoIterator<Item = u32>>(
|
||||
table_id: TableId,
|
||||
region_numbers: I,
|
||||
) -> TableInfo {
|
||||
let column_schemas = vec![
|
||||
ColumnSchema::new("col1", ConcreteDataType::int32_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
)
|
||||
.with_time_index(true),
|
||||
ColumnSchema::new("col2", ConcreteDataType::int32_datatype(), true),
|
||||
];
|
||||
let schema = SchemaBuilder::try_from(column_schemas)
|
||||
.unwrap()
|
||||
.version(123)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let meta = TableMetaBuilder::default()
|
||||
.schema(Arc::new(schema))
|
||||
.primary_key_indices(vec![0])
|
||||
.engine("engine")
|
||||
.next_column_id(3)
|
||||
.region_numbers(region_numbers.into_iter().collect::<Vec<_>>())
|
||||
.build()
|
||||
.unwrap();
|
||||
TableInfoBuilder::default()
|
||||
.table_id(table_id)
|
||||
.table_version(5)
|
||||
.name("mytable")
|
||||
.meta(meta)
|
||||
.build()
|
||||
.unwrap()
|
||||
}
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod etcd;
|
||||
pub mod memory;
|
||||
pub mod test;
|
||||
pub mod txn;
|
||||
@@ -127,3 +128,12 @@ where
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ResettableKvBackend: KvBackend
|
||||
where
|
||||
Self::Error: ErrorExt,
|
||||
{
|
||||
fn reset(&self);
|
||||
}
|
||||
|
||||
pub type ResettableKvBackendRef = Arc<dyn ResettableKvBackend<Error = Error> + Send + Sync>;
|
||||
|
||||
@@ -15,25 +15,47 @@
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_meta::kv_backend::txn::{Txn as KvTxn, TxnResponse as KvTxnResponse};
|
||||
use common_meta::kv_backend::{KvBackend, TxnService};
|
||||
use common_meta::metrics::METRIC_META_TXN_REQUEST;
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use etcd_client::{
|
||||
Client, Compare, CompareOp, DeleteOptions, GetOptions, PutOptions, Txn, TxnOp, TxnOpResponse,
|
||||
TxnResponse,
|
||||
};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
|
||||
use crate::error;
|
||||
use crate::error::{ConvertEtcdTxnObjectSnafu, Error, Result};
|
||||
use crate::service::store::etcd_util::KvPair;
|
||||
use crate::service::store::kv::KvStoreRef;
|
||||
use super::KvBackendRef;
|
||||
use crate::error::{self, Error, Result};
|
||||
use crate::kv_backend::txn::{Txn as KvTxn, TxnResponse as KvTxnResponse};
|
||||
use crate::kv_backend::{KvBackend, TxnService};
|
||||
use crate::metrics::METRIC_META_TXN_REQUEST;
|
||||
use crate::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
pub struct KvPair<'a>(&'a etcd_client::KeyValue);
|
||||
|
||||
impl<'a> KvPair<'a> {
|
||||
/// Creates a `KvPair` from etcd KeyValue
|
||||
#[inline]
|
||||
pub fn new(kv: &'a etcd_client::KeyValue) -> Self {
|
||||
Self(kv)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn from_etcd_kv(kv: &etcd_client::KeyValue) -> KeyValue {
|
||||
KeyValue::from(KvPair::new(kv))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<KvPair<'a>> for KeyValue {
|
||||
fn from(kv: KvPair<'a>) -> Self {
|
||||
Self {
|
||||
key: kv.0.key().to_vec(),
|
||||
value: kv.0.value().to_vec(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Maximum number of operations permitted in a transaction.
|
||||
// The etcd default configuration's `--max-txn-ops` is 128.
|
||||
@@ -46,7 +68,7 @@ pub struct EtcdStore {
|
||||
}
|
||||
|
||||
impl EtcdStore {
|
||||
pub async fn with_endpoints<E, S>(endpoints: S) -> Result<KvStoreRef>
|
||||
pub async fn with_endpoints<E, S>(endpoints: S) -> Result<KvBackendRef>
|
||||
where
|
||||
E: AsRef<str>,
|
||||
S: AsRef<[E]>,
|
||||
@@ -58,7 +80,7 @@ impl EtcdStore {
|
||||
Ok(Self::with_etcd_client(client))
|
||||
}
|
||||
|
||||
pub fn with_etcd_client(client: Client) -> KvStoreRef {
|
||||
pub fn with_etcd_client(client: Client) -> KvBackendRef {
|
||||
Arc::new(Self { client })
|
||||
}
|
||||
|
||||
@@ -305,7 +327,7 @@ impl TxnService for EtcdStore {
|
||||
.txn(etcd_txn)
|
||||
.await
|
||||
.context(error::EtcdFailedSnafu)?;
|
||||
txn_res.try_into().context(ConvertEtcdTxnObjectSnafu)
|
||||
txn_res.try_into()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -23,6 +23,7 @@ use async_trait::async_trait;
|
||||
use common_error::ext::ErrorExt;
|
||||
use serde::Serializer;
|
||||
|
||||
use super::ResettableKvBackend;
|
||||
use crate::kv_backend::txn::{Txn, TxnOp, TxnOpResponse, TxnRequest, TxnResponse};
|
||||
use crate::kv_backend::{KvBackend, TxnService};
|
||||
use crate::metrics::METRIC_META_TXN_REQUEST;
|
||||
@@ -324,6 +325,12 @@ impl<T: ErrorExt + Send + Sync> TxnService for MemoryKvBackend<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ErrorExt + Send + Sync + 'static> ResettableKvBackend for MemoryKvBackend<T> {
|
||||
fn reset(&self) {
|
||||
self.clear();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
@@ -336,24 +343,24 @@ mod tests {
|
||||
};
|
||||
|
||||
async fn mock_mem_store_with_data() -> MemoryKvBackend<Error> {
|
||||
let kv_store = MemoryKvBackend::<Error>::new();
|
||||
prepare_kv(&kv_store).await;
|
||||
let kv_backend = MemoryKvBackend::<Error>::new();
|
||||
prepare_kv(&kv_backend).await;
|
||||
|
||||
kv_store
|
||||
kv_backend
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_put() {
|
||||
let kv_store = mock_mem_store_with_data().await;
|
||||
let kv_backend = mock_mem_store_with_data().await;
|
||||
|
||||
test_kv_put(kv_store).await;
|
||||
test_kv_put(kv_backend).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_range() {
|
||||
let kv_store = mock_mem_store_with_data().await;
|
||||
let kv_backend = mock_mem_store_with_data().await;
|
||||
|
||||
test_kv_range(kv_store).await;
|
||||
test_kv_range(kv_backend).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -365,29 +372,29 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_batch_get() {
|
||||
let kv_store = mock_mem_store_with_data().await;
|
||||
let kv_backend = mock_mem_store_with_data().await;
|
||||
|
||||
test_kv_batch_get(kv_store).await;
|
||||
test_kv_batch_get(kv_backend).await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_compare_and_put() {
|
||||
let kv_store = Arc::new(MemoryKvBackend::<Error>::new());
|
||||
let kv_backend = Arc::new(MemoryKvBackend::<Error>::new());
|
||||
|
||||
test_kv_compare_and_put(kv_store).await;
|
||||
test_kv_compare_and_put(kv_backend).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_delete_range() {
|
||||
let kv_store = mock_mem_store_with_data().await;
|
||||
let kv_backend = mock_mem_store_with_data().await;
|
||||
|
||||
test_kv_delete_range(kv_store).await;
|
||||
test_kv_delete_range(kv_backend).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_batch_delete() {
|
||||
let kv_store = mock_mem_store_with_data().await;
|
||||
let kv_backend = mock_mem_store_with_data().await;
|
||||
|
||||
test_kv_batch_delete(kv_store).await;
|
||||
test_kv_batch_delete(kv_backend).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,9 +38,9 @@ pub fn mock_kvs() -> Vec<KeyValue> {
|
||||
]
|
||||
}
|
||||
|
||||
pub async fn prepare_kv(kv_store: &impl KvBackend) {
|
||||
pub async fn prepare_kv(kv_backend: &impl KvBackend) {
|
||||
let kvs = mock_kvs();
|
||||
assert!(kv_store
|
||||
assert!(kv_backend
|
||||
.batch_put(BatchPutRequest {
|
||||
kvs,
|
||||
..Default::default()
|
||||
@@ -48,7 +48,7 @@ pub async fn prepare_kv(kv_store: &impl KvBackend) {
|
||||
.await
|
||||
.is_ok());
|
||||
|
||||
assert!(kv_store
|
||||
assert!(kv_backend
|
||||
.put(PutRequest {
|
||||
key: b"key11".to_vec(),
|
||||
value: b"val11".to_vec(),
|
||||
@@ -58,8 +58,8 @@ pub async fn prepare_kv(kv_store: &impl KvBackend) {
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
pub async fn test_kv_put(kv_store: impl KvBackend) {
|
||||
let resp = kv_store
|
||||
pub async fn test_kv_put(kv_backend: impl KvBackend) {
|
||||
let resp = kv_backend
|
||||
.put(PutRequest {
|
||||
key: b"key11".to_vec(),
|
||||
value: b"val12".to_vec(),
|
||||
@@ -69,7 +69,7 @@ pub async fn test_kv_put(kv_store: impl KvBackend) {
|
||||
.unwrap();
|
||||
assert!(resp.prev_kv.is_none());
|
||||
|
||||
let resp = kv_store
|
||||
let resp = kv_backend
|
||||
.put(PutRequest {
|
||||
key: b"key11".to_vec(),
|
||||
value: b"val13".to_vec(),
|
||||
@@ -82,11 +82,11 @@ pub async fn test_kv_put(kv_store: impl KvBackend) {
|
||||
assert_eq!(b"val12", prev_kv.value());
|
||||
}
|
||||
|
||||
pub async fn test_kv_range(kv_store: impl KvBackend) {
|
||||
pub async fn test_kv_range(kv_backend: impl KvBackend) {
|
||||
let key = b"key1".to_vec();
|
||||
let range_end = util::get_prefix_end_key(b"key1");
|
||||
|
||||
let resp = kv_store
|
||||
let resp = kv_backend
|
||||
.range(RangeRequest {
|
||||
key: key.clone(),
|
||||
range_end: range_end.clone(),
|
||||
@@ -102,7 +102,7 @@ pub async fn test_kv_range(kv_store: impl KvBackend) {
|
||||
assert_eq!(b"key11", resp.kvs[1].key());
|
||||
assert_eq!(b"val11", resp.kvs[1].value());
|
||||
|
||||
let resp = kv_store
|
||||
let resp = kv_backend
|
||||
.range(RangeRequest {
|
||||
key: key.clone(),
|
||||
range_end: range_end.clone(),
|
||||
@@ -118,7 +118,7 @@ pub async fn test_kv_range(kv_store: impl KvBackend) {
|
||||
assert_eq!(b"key11", resp.kvs[1].key());
|
||||
assert_eq!(b"", resp.kvs[1].value());
|
||||
|
||||
let resp = kv_store
|
||||
let resp = kv_backend
|
||||
.range(RangeRequest {
|
||||
key: key.clone(),
|
||||
limit: 0,
|
||||
@@ -132,7 +132,7 @@ pub async fn test_kv_range(kv_store: impl KvBackend) {
|
||||
assert_eq!(b"key1", resp.kvs[0].key());
|
||||
assert_eq!(b"val1", resp.kvs[0].value());
|
||||
|
||||
let resp = kv_store
|
||||
let resp = kv_backend
|
||||
.range(RangeRequest {
|
||||
key,
|
||||
range_end,
|
||||
@@ -147,19 +147,19 @@ pub async fn test_kv_range(kv_store: impl KvBackend) {
|
||||
assert_eq!(b"val1", resp.kvs[0].value());
|
||||
}
|
||||
|
||||
pub async fn test_kv_range_2(kv_store: impl KvBackend) {
|
||||
kv_store
|
||||
pub async fn test_kv_range_2(kv_backend: impl KvBackend) {
|
||||
kv_backend
|
||||
.put(PutRequest::new().with_key("atest").with_value("value"))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
kv_store
|
||||
kv_backend
|
||||
.put(PutRequest::new().with_key("test").with_value("value"))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// If both key and range_end are ‘\0’, then range represents all keys.
|
||||
let result = kv_store
|
||||
let result = kv_backend
|
||||
.range(RangeRequest::new().with_range(b"\0".to_vec(), b"\0".to_vec()))
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -168,14 +168,14 @@ pub async fn test_kv_range_2(kv_store: impl KvBackend) {
|
||||
assert!(!result.more);
|
||||
|
||||
// If range_end is ‘\0’, the range is all keys greater than or equal to the key argument.
|
||||
let result = kv_store
|
||||
let result = kv_backend
|
||||
.range(RangeRequest::new().with_range(b"a".to_vec(), b"\0".to_vec()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result.kvs.len(), 2);
|
||||
|
||||
let result = kv_store
|
||||
let result = kv_backend
|
||||
.range(RangeRequest::new().with_range(b"b".to_vec(), b"\0".to_vec()))
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -184,7 +184,7 @@ pub async fn test_kv_range_2(kv_store: impl KvBackend) {
|
||||
assert_eq!(result.kvs[0].key, b"test");
|
||||
|
||||
// Fetches the keys >= "a", set limit to 1, the `more` should be true.
|
||||
let result = kv_store
|
||||
let result = kv_backend
|
||||
.range(
|
||||
RangeRequest::new()
|
||||
.with_range(b"a".to_vec(), b"\0".to_vec())
|
||||
@@ -196,7 +196,7 @@ pub async fn test_kv_range_2(kv_store: impl KvBackend) {
|
||||
assert!(result.more);
|
||||
|
||||
// Fetches the keys >= "a", set limit to 2, the `more` should be false.
|
||||
let result = kv_store
|
||||
let result = kv_backend
|
||||
.range(
|
||||
RangeRequest::new()
|
||||
.with_range(b"a".to_vec(), b"\0".to_vec())
|
||||
@@ -208,7 +208,7 @@ pub async fn test_kv_range_2(kv_store: impl KvBackend) {
|
||||
assert!(!result.more);
|
||||
|
||||
// Fetches the keys >= "a", set limit to 3, the `more` should be false.
|
||||
let result = kv_store
|
||||
let result = kv_backend
|
||||
.range(
|
||||
RangeRequest::new()
|
||||
.with_range(b"a".to_vec(), b"\0".to_vec())
|
||||
@@ -220,19 +220,28 @@ pub async fn test_kv_range_2(kv_store: impl KvBackend) {
|
||||
assert!(!result.more);
|
||||
}
|
||||
|
||||
pub async fn test_kv_batch_get(kv_store: impl KvBackend) {
|
||||
pub async fn test_kv_batch_get(kv_backend: impl KvBackend) {
|
||||
let keys = vec![];
|
||||
let resp = kv_store.batch_get(BatchGetRequest { keys }).await.unwrap();
|
||||
let resp = kv_backend
|
||||
.batch_get(BatchGetRequest { keys })
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(resp.kvs.is_empty());
|
||||
|
||||
let keys = vec![b"key10".to_vec()];
|
||||
let resp = kv_store.batch_get(BatchGetRequest { keys }).await.unwrap();
|
||||
let resp = kv_backend
|
||||
.batch_get(BatchGetRequest { keys })
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(resp.kvs.is_empty());
|
||||
|
||||
let keys = vec![b"key1".to_vec(), b"key3".to_vec(), b"key4".to_vec()];
|
||||
let resp = kv_store.batch_get(BatchGetRequest { keys }).await.unwrap();
|
||||
let resp = kv_backend
|
||||
.batch_get(BatchGetRequest { keys })
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(2, resp.kvs.len());
|
||||
assert_eq!(b"key1", resp.kvs[0].key());
|
||||
@@ -241,12 +250,12 @@ pub async fn test_kv_batch_get(kv_store: impl KvBackend) {
|
||||
assert_eq!(b"val3", resp.kvs[1].value());
|
||||
}
|
||||
|
||||
pub async fn test_kv_compare_and_put(kv_store: Arc<dyn KvBackend<Error = Error>>) {
|
||||
pub async fn test_kv_compare_and_put(kv_backend: Arc<dyn KvBackend<Error = Error>>) {
|
||||
let success = Arc::new(AtomicU8::new(0));
|
||||
|
||||
let mut joins = vec![];
|
||||
for _ in 0..20 {
|
||||
let kv_store_clone = kv_store.clone();
|
||||
let kv_backend_clone = kv_backend.clone();
|
||||
let success_clone = success.clone();
|
||||
let join = tokio::spawn(async move {
|
||||
let req = CompareAndPutRequest {
|
||||
@@ -254,7 +263,7 @@ pub async fn test_kv_compare_and_put(kv_store: Arc<dyn KvBackend<Error = Error>>
|
||||
expect: vec![],
|
||||
value: b"val_new".to_vec(),
|
||||
};
|
||||
let resp = kv_store_clone.compare_and_put(req).await.unwrap();
|
||||
let resp = kv_backend_clone.compare_and_put(req).await.unwrap();
|
||||
if resp.success {
|
||||
success_clone.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
@@ -269,20 +278,20 @@ pub async fn test_kv_compare_and_put(kv_store: Arc<dyn KvBackend<Error = Error>>
|
||||
assert_eq!(1, success.load(Ordering::SeqCst));
|
||||
}
|
||||
|
||||
pub async fn test_kv_delete_range(kv_store: impl KvBackend) {
|
||||
pub async fn test_kv_delete_range(kv_backend: impl KvBackend) {
|
||||
let req = DeleteRangeRequest {
|
||||
key: b"key3".to_vec(),
|
||||
range_end: vec![],
|
||||
prev_kv: true,
|
||||
};
|
||||
|
||||
let resp = kv_store.delete_range(req).await.unwrap();
|
||||
let resp = kv_backend.delete_range(req).await.unwrap();
|
||||
assert_eq!(1, resp.prev_kvs.len());
|
||||
assert_eq!(1, resp.deleted);
|
||||
assert_eq!(b"key3", resp.prev_kvs[0].key());
|
||||
assert_eq!(b"val3", resp.prev_kvs[0].value());
|
||||
|
||||
let resp = kv_store.get(b"key3").await.unwrap();
|
||||
let resp = kv_backend.get(b"key3").await.unwrap();
|
||||
assert!(resp.is_none());
|
||||
|
||||
let req = DeleteRangeRequest {
|
||||
@@ -291,11 +300,11 @@ pub async fn test_kv_delete_range(kv_store: impl KvBackend) {
|
||||
prev_kv: false,
|
||||
};
|
||||
|
||||
let resp = kv_store.delete_range(req).await.unwrap();
|
||||
let resp = kv_backend.delete_range(req).await.unwrap();
|
||||
assert_eq!(1, resp.deleted);
|
||||
assert!(resp.prev_kvs.is_empty());
|
||||
|
||||
let resp = kv_store.get(b"key2").await.unwrap();
|
||||
let resp = kv_backend.get(b"key2").await.unwrap();
|
||||
assert!(resp.is_none());
|
||||
|
||||
let key = b"key1".to_vec();
|
||||
@@ -306,7 +315,7 @@ pub async fn test_kv_delete_range(kv_store: impl KvBackend) {
|
||||
range_end: range_end.clone(),
|
||||
prev_kv: true,
|
||||
};
|
||||
let resp = kv_store.delete_range(req).await.unwrap();
|
||||
let resp = kv_backend.delete_range(req).await.unwrap();
|
||||
assert_eq!(2, resp.prev_kvs.len());
|
||||
|
||||
let req = RangeRequest {
|
||||
@@ -314,19 +323,19 @@ pub async fn test_kv_delete_range(kv_store: impl KvBackend) {
|
||||
range_end,
|
||||
..Default::default()
|
||||
};
|
||||
let resp = kv_store.range(req).await.unwrap();
|
||||
let resp = kv_backend.range(req).await.unwrap();
|
||||
assert!(resp.kvs.is_empty());
|
||||
}
|
||||
|
||||
pub async fn test_kv_batch_delete(kv_store: impl KvBackend) {
|
||||
assert!(kv_store.get(b"key1").await.unwrap().is_some());
|
||||
assert!(kv_store.get(b"key100").await.unwrap().is_none());
|
||||
pub async fn test_kv_batch_delete(kv_backend: impl KvBackend) {
|
||||
assert!(kv_backend.get(b"key1").await.unwrap().is_some());
|
||||
assert!(kv_backend.get(b"key100").await.unwrap().is_none());
|
||||
|
||||
let req = BatchDeleteRequest {
|
||||
keys: vec![b"key1".to_vec(), b"key100".to_vec()],
|
||||
prev_kv: true,
|
||||
};
|
||||
let resp = kv_store.batch_delete(req).await.unwrap();
|
||||
let resp = kv_backend.batch_delete(req).await.unwrap();
|
||||
assert_eq!(1, resp.prev_kvs.len());
|
||||
assert_eq!(
|
||||
vec![KeyValue {
|
||||
@@ -335,18 +344,18 @@ pub async fn test_kv_batch_delete(kv_store: impl KvBackend) {
|
||||
}],
|
||||
resp.prev_kvs
|
||||
);
|
||||
assert!(kv_store.get(b"key1").await.unwrap().is_none());
|
||||
assert!(kv_backend.get(b"key1").await.unwrap().is_none());
|
||||
|
||||
assert!(kv_store.get(b"key2").await.unwrap().is_some());
|
||||
assert!(kv_store.get(b"key3").await.unwrap().is_some());
|
||||
assert!(kv_backend.get(b"key2").await.unwrap().is_some());
|
||||
assert!(kv_backend.get(b"key3").await.unwrap().is_some());
|
||||
|
||||
let req = BatchDeleteRequest {
|
||||
keys: vec![b"key2".to_vec(), b"key3".to_vec()],
|
||||
prev_kv: false,
|
||||
};
|
||||
let resp = kv_store.batch_delete(req).await.unwrap();
|
||||
let resp = kv_backend.batch_delete(req).await.unwrap();
|
||||
assert!(resp.prev_kvs.is_empty());
|
||||
|
||||
assert!(kv_store.get(b"key2").await.unwrap().is_none());
|
||||
assert!(kv_store.get(b"key3").await.unwrap().is_none());
|
||||
assert!(kv_backend.get(b"key2").await.unwrap().is_none());
|
||||
assert!(kv_backend.get(b"key3").await.unwrap().is_none());
|
||||
}
|
||||
|
||||
@@ -268,9 +268,9 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_one_compare_op() {
|
||||
let kv_store = create_kv_store().await;
|
||||
let kv_backend = create_kv_backend().await;
|
||||
|
||||
let _ = kv_store
|
||||
let _ = kv_backend
|
||||
.put(PutRequest {
|
||||
key: vec![11],
|
||||
value: vec![3],
|
||||
@@ -288,7 +288,7 @@ mod tests {
|
||||
.and_then(vec![TxnOp::Put(vec![11], vec![1])])
|
||||
.or_else(vec![TxnOp::Put(vec![11], vec![2])]);
|
||||
|
||||
let txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
|
||||
assert!(txn_response.succeeded);
|
||||
assert_eq!(txn_response.responses.len(), 1);
|
||||
@@ -296,10 +296,10 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_multi_compare_op() {
|
||||
let kv_store = create_kv_store().await;
|
||||
let kv_backend = create_kv_backend().await;
|
||||
|
||||
for i in 1..3 {
|
||||
let _ = kv_store
|
||||
let _ = kv_backend
|
||||
.put(PutRequest {
|
||||
key: vec![i],
|
||||
value: vec![i],
|
||||
@@ -321,7 +321,7 @@ mod tests {
|
||||
])
|
||||
.or_else(vec![TxnOp::Put(vec![1], vec![11])]);
|
||||
|
||||
let txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
|
||||
assert!(txn_response.succeeded);
|
||||
assert_eq!(txn_response.responses.len(), 2);
|
||||
@@ -329,9 +329,9 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_compare_equal() {
|
||||
let kv_store = create_kv_store().await;
|
||||
let kv_backend = create_kv_backend().await;
|
||||
let key = vec![101u8];
|
||||
kv_store.delete(&key, false).await.unwrap();
|
||||
kv_backend.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_not_exist_value(
|
||||
@@ -340,10 +340,10 @@ mod tests {
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_store.txn(txn.clone()).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
@@ -354,15 +354,15 @@ mod tests {
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Put(key, vec![4])]);
|
||||
let txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_compare_greater() {
|
||||
let kv_store = create_kv_store().await;
|
||||
let kv_backend = create_kv_backend().await;
|
||||
let key = vec![102u8];
|
||||
kv_store.delete(&key, false).await.unwrap();
|
||||
kv_backend.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_not_exist_value(
|
||||
@@ -371,10 +371,10 @@ mod tests {
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_store.txn(txn.clone()).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
@@ -385,7 +385,7 @@ mod tests {
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
let mut txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let mut txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
let res = txn_response.responses.pop().unwrap();
|
||||
assert_eq!(
|
||||
@@ -402,9 +402,9 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_compare_less() {
|
||||
let kv_store = create_kv_store().await;
|
||||
let kv_backend = create_kv_backend().await;
|
||||
let key = vec![103u8];
|
||||
kv_store.delete(&[3], false).await.unwrap();
|
||||
kv_backend.delete(&[3], false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_not_exist_value(
|
||||
@@ -413,10 +413,10 @@ mod tests {
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_store.txn(txn.clone()).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
@@ -427,7 +427,7 @@ mod tests {
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
let mut txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let mut txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
let res = txn_response.responses.pop().unwrap();
|
||||
assert_eq!(
|
||||
@@ -444,9 +444,9 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_compare_not_equal() {
|
||||
let kv_store = create_kv_store().await;
|
||||
let kv_backend = create_kv_backend().await;
|
||||
let key = vec![104u8];
|
||||
kv_store.delete(&key, false).await.unwrap();
|
||||
kv_backend.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_not_exist_value(
|
||||
@@ -455,10 +455,10 @@ mod tests {
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_store.txn(txn.clone()).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
@@ -469,7 +469,7 @@ mod tests {
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
let mut txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let mut txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
let res = txn_response.responses.pop().unwrap();
|
||||
assert_eq!(
|
||||
@@ -484,7 +484,7 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
async fn create_kv_store() -> KvBackendRef {
|
||||
async fn create_kv_backend() -> KvBackendRef {
|
||||
Arc::new(MemoryKvBackend::<Error>::new())
|
||||
// TODO(jiachun): Add a feature to test against etcd in github CI
|
||||
//
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(assert_matches)]
|
||||
#![feature(btree_extract_if)]
|
||||
#![feature(async_closure)]
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -49,6 +49,14 @@ impl Peer {
|
||||
addr: addr.into(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn empty(id: u64) -> Self {
|
||||
Self {
|
||||
id,
|
||||
addr: String::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Peer {
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -17,10 +17,12 @@ use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use common_telemetry::debug;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::{ready, FutureExt, Stream};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::error::{self, Result};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::{RangeRequest, RangeResponse};
|
||||
use crate::rpc::KeyValue;
|
||||
@@ -39,7 +41,16 @@ enum PaginationStreamState<K, V> {
|
||||
Error,
|
||||
}
|
||||
|
||||
pub const DEFAULT_PAGE_SIZE: usize = 512;
|
||||
/// The Range Request's default page size.
|
||||
///
|
||||
/// It dependents on upstream KvStore server side grpc message size limitation.
|
||||
/// (e.g., etcd has default grpc message size limitation is 4MiB)
|
||||
///
|
||||
/// Generally, almost all metadata is smaller than is 2700 Byte.
|
||||
/// Therefore, We can set the [DEFAULT_PAGE_SIZE] to 1536 statically.
|
||||
///
|
||||
/// TODO(weny): Considers updating the default page size dynamically.
|
||||
pub const DEFAULT_PAGE_SIZE: usize = 1536;
|
||||
|
||||
struct PaginationStreamFactory {
|
||||
kv: KvBackendRef,
|
||||
@@ -55,10 +66,13 @@ struct PaginationStreamFactory {
|
||||
pub range_end: Vec<u8>,
|
||||
|
||||
/// page_size is the pagination page size.
|
||||
pub page_size: usize,
|
||||
page_size: usize,
|
||||
/// keys_only when set returns only the keys and not the values.
|
||||
pub keys_only: bool,
|
||||
|
||||
/// It reduces the page size if the response size exceeds the limit.
|
||||
pub adaptive_page_size: usize,
|
||||
|
||||
pub more: bool,
|
||||
}
|
||||
|
||||
@@ -78,19 +92,58 @@ impl PaginationStreamFactory {
|
||||
page_size,
|
||||
keys_only,
|
||||
more,
|
||||
adaptive_page_size: if page_size == 0 {
|
||||
DEFAULT_ADAPTIVE_PAGE_SIZE
|
||||
} else {
|
||||
page_size
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const DEFAULT_ADAPTIVE_PAGE_SIZE: usize = 1024;
|
||||
|
||||
impl PaginationStreamFactory {
|
||||
pub async fn read_next(self) -> Result<(Self, Option<RangeResponse>)> {
|
||||
fn try_reduce_adaptive_page_size(&mut self) -> Result<()> {
|
||||
self.adaptive_page_size /= 2;
|
||||
|
||||
ensure!(
|
||||
self.adaptive_page_size != 0,
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: "Exceeded maximum number of adaptive range retries"
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Decreases the `page size` if the response message size exceeds the limitation.
|
||||
/// TODO(weny): Considers to add an E2e test.
|
||||
#[async_recursion::async_recursion]
|
||||
async fn adaptive_range(&mut self, req: RangeRequest) -> Result<RangeResponse> {
|
||||
match self.kv.range(req.clone()).await {
|
||||
Ok(resp) => Ok(resp),
|
||||
Err(err) => {
|
||||
if err.is_exceeded_size_limit() {
|
||||
self.try_reduce_adaptive_page_size()?;
|
||||
debug!("Reset page_size to {}", self.adaptive_page_size);
|
||||
|
||||
self.adaptive_range(req.with_limit(self.adaptive_page_size as i64))
|
||||
.await
|
||||
} else {
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn read_next(mut self) -> Result<(Self, Option<RangeResponse>)> {
|
||||
if self.more {
|
||||
let resp = self
|
||||
.kv
|
||||
.range(RangeRequest {
|
||||
.adaptive_range(RangeRequest {
|
||||
key: self.key.clone(),
|
||||
range_end: self.range_end.clone(),
|
||||
limit: self.page_size as i64,
|
||||
limit: self.adaptive_page_size as i64,
|
||||
keys_only: self.keys_only,
|
||||
})
|
||||
.await?;
|
||||
@@ -111,6 +164,7 @@ impl PaginationStreamFactory {
|
||||
page_size: self.page_size,
|
||||
keys_only: self.keys_only,
|
||||
more: resp.more,
|
||||
adaptive_page_size: self.adaptive_page_size,
|
||||
},
|
||||
Some(resp),
|
||||
))
|
||||
@@ -214,6 +268,7 @@ impl<K, V> Stream for PaginationStream<K, V> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use futures::TryStreamExt;
|
||||
@@ -228,12 +283,47 @@ mod tests {
|
||||
Ok((kv.key.clone(), kv.value))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_reduce_page_size() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::<Error>::new()) as _;
|
||||
|
||||
let mut factory =
|
||||
PaginationStreamFactory::new(&kv_backend, vec![], vec![], 2, false, false);
|
||||
|
||||
// new adaptive page size: 1
|
||||
factory.try_reduce_adaptive_page_size().unwrap();
|
||||
|
||||
// new adaptive page size: 0
|
||||
assert_matches!(
|
||||
factory.try_reduce_adaptive_page_size().unwrap_err(),
|
||||
error::Error::Unexpected { .. }
|
||||
);
|
||||
|
||||
let mut factory =
|
||||
PaginationStreamFactory::new(&kv_backend, vec![], vec![], 1024, false, false);
|
||||
|
||||
factory.try_reduce_adaptive_page_size().unwrap();
|
||||
|
||||
assert_eq!(factory.adaptive_page_size, 512);
|
||||
|
||||
factory.try_reduce_adaptive_page_size().unwrap();
|
||||
|
||||
assert_eq!(factory.adaptive_page_size, 256);
|
||||
|
||||
let mut factory =
|
||||
PaginationStreamFactory::new(&kv_backend, vec![], vec![], 0, false, false);
|
||||
|
||||
factory.try_reduce_adaptive_page_size().unwrap();
|
||||
|
||||
assert_eq!(factory.adaptive_page_size, DEFAULT_ADAPTIVE_PAGE_SIZE / 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_range_empty() {
|
||||
let kv_store = Arc::new(MemoryKvBackend::<Error>::new());
|
||||
let kv_backend = Arc::new(MemoryKvBackend::<Error>::new());
|
||||
|
||||
let stream = PaginationStream::new(
|
||||
kv_store.clone(),
|
||||
kv_backend.clone(),
|
||||
RangeRequest {
|
||||
key: b"a".to_vec(),
|
||||
..Default::default()
|
||||
@@ -248,14 +338,14 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_range() {
|
||||
let kv_store = Arc::new(MemoryKvBackend::<Error>::new());
|
||||
let kv_backend = Arc::new(MemoryKvBackend::<Error>::new());
|
||||
let total = 26;
|
||||
|
||||
let mut expected = BTreeMap::<Vec<u8>, ()>::new();
|
||||
for i in 0..total {
|
||||
let key = vec![97 + i];
|
||||
|
||||
assert!(kv_store
|
||||
assert!(kv_backend
|
||||
.put(PutRequest {
|
||||
key: key.clone(),
|
||||
value: key.clone(),
|
||||
@@ -271,7 +361,7 @@ mod tests {
|
||||
let range_end = b"f".to_vec();
|
||||
|
||||
let stream = PaginationStream::new(
|
||||
kv_store.clone(),
|
||||
kv_backend.clone(),
|
||||
RangeRequest {
|
||||
key,
|
||||
range_end,
|
||||
|
||||
@@ -58,7 +58,10 @@ pub fn find_leaders(region_routes: &[RegionRoute]) -> HashSet<Peer> {
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn convert_to_region_map(region_routes: &[RegionRoute]) -> HashMap<u32, &Peer> {
|
||||
/// Returns the HashMap<[RegionNumber], &[Peer]>;
|
||||
///
|
||||
/// If the region doesn't have a leader peer, the [Region] will be omitted.
|
||||
pub fn convert_to_region_leader_map(region_routes: &[RegionRoute]) -> HashMap<RegionNumber, &Peer> {
|
||||
region_routes
|
||||
.iter()
|
||||
.filter_map(|x| {
|
||||
@@ -69,7 +72,43 @@ pub fn convert_to_region_map(region_routes: &[RegionRoute]) -> HashMap<u32, &Pee
|
||||
.collect::<HashMap<_, _>>()
|
||||
}
|
||||
|
||||
pub fn find_region_leader(region_routes: &[RegionRoute], region_number: u32) -> Option<&Peer> {
|
||||
/// Returns the HashMap<[RegionNumber], HashSet<DatanodeId>>
|
||||
pub fn convert_to_region_peer_map(
|
||||
region_routes: &[RegionRoute],
|
||||
) -> HashMap<RegionNumber, HashSet<u64>> {
|
||||
region_routes
|
||||
.iter()
|
||||
.map(|x| {
|
||||
let set = x
|
||||
.follower_peers
|
||||
.iter()
|
||||
.map(|p| p.id)
|
||||
.chain(x.leader_peer.as_ref().map(|p| p.id))
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
(x.region.id.region_number(), set)
|
||||
})
|
||||
.collect::<HashMap<_, _>>()
|
||||
}
|
||||
|
||||
/// Returns the HashMap<[RegionNumber], [RegionStatus]>;
|
||||
pub fn convert_to_region_leader_status_map(
|
||||
region_routes: &[RegionRoute],
|
||||
) -> HashMap<RegionNumber, RegionStatus> {
|
||||
region_routes
|
||||
.iter()
|
||||
.filter_map(|x| {
|
||||
x.leader_status
|
||||
.as_ref()
|
||||
.map(|status| (x.region.id.region_number(), *status))
|
||||
})
|
||||
.collect::<HashMap<_, _>>()
|
||||
}
|
||||
|
||||
pub fn find_region_leader(
|
||||
region_routes: &[RegionRoute],
|
||||
region_number: RegionNumber,
|
||||
) -> Option<&Peer> {
|
||||
region_routes
|
||||
.iter()
|
||||
.find(|x| x.region.id.region_number() == region_number)
|
||||
@@ -203,7 +242,7 @@ pub struct RegionRoute {
|
||||
}
|
||||
|
||||
/// The Status of the [Region].
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
|
||||
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq)]
|
||||
pub enum RegionStatus {
|
||||
/// The following cases in which the [Region] will be downgraded.
|
||||
///
|
||||
@@ -236,17 +275,27 @@ impl RegionRoute {
|
||||
pub fn downgrade_leader(&mut self) {
|
||||
self.leader_status = Some(RegionStatus::Downgraded)
|
||||
}
|
||||
|
||||
/// Sets the leader status.
|
||||
///
|
||||
/// Returns true if updated.
|
||||
pub fn set_leader_status(&mut self, status: Option<RegionStatus>) -> bool {
|
||||
let updated = self.leader_status != status;
|
||||
|
||||
self.leader_status = status;
|
||||
updated
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RegionRoutes(pub Vec<RegionRoute>);
|
||||
|
||||
impl RegionRoutes {
|
||||
pub fn region_map(&self) -> HashMap<u32, &Peer> {
|
||||
convert_to_region_map(&self.0)
|
||||
pub fn region_leader_map(&self) -> HashMap<RegionNumber, &Peer> {
|
||||
convert_to_region_leader_map(&self.0)
|
||||
}
|
||||
|
||||
pub fn find_region_leader(&self, region_number: u32) -> Option<&Peer> {
|
||||
self.region_map().get(®ion_number).copied()
|
||||
pub fn find_region_leader(&self, region_number: RegionNumber) -> Option<&Peer> {
|
||||
self.region_leader_map().get(®ion_number).copied()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -258,6 +307,16 @@ pub struct Region {
|
||||
pub attrs: BTreeMap<String, String>,
|
||||
}
|
||||
|
||||
impl Region {
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn new_test(id: RegionId) -> Self {
|
||||
Self {
|
||||
id,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PbRegion> for Region {
|
||||
fn from(r: PbRegion) -> Self {
|
||||
Self {
|
||||
|
||||
@@ -171,9 +171,9 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sequence() {
|
||||
let kv_store = Arc::new(MemoryKvBackend::default());
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let initial = 1024;
|
||||
let seq = Sequence::new("test_seq", initial, 10, kv_store);
|
||||
let seq = Sequence::new("test_seq", initial, 10, kv_backend);
|
||||
|
||||
for i in initial..initial + 100 {
|
||||
assert_eq!(i, seq.next().await.unwrap());
|
||||
@@ -182,9 +182,9 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sequence_out_of_rage() {
|
||||
let kv_store = Arc::new(MemoryKvBackend::default());
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let initial = u64::MAX - 10;
|
||||
let seq = Sequence::new("test_seq", initial, 10, kv_store);
|
||||
let seq = Sequence::new("test_seq", initial, 10, kv_backend);
|
||||
|
||||
for _ in 0..10 {
|
||||
let _ = seq.next().await.unwrap();
|
||||
@@ -248,8 +248,8 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
let kv_store = Arc::new(Noop {});
|
||||
let seq = Sequence::new("test_seq", 0, 10, kv_store);
|
||||
let kv_backend = Arc::new(Noop {});
|
||||
let seq = Sequence::new("test_seq", 0, 10, kv_backend);
|
||||
|
||||
let next = seq.next().await;
|
||||
assert!(next.is_err());
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -6,4 +6,4 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
common-procedure = { workspace = true }
|
||||
common-procedure.workspace = true
|
||||
|
||||
@@ -8,13 +8,13 @@ license.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
backon = "0.4"
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
futures.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
object-store = { workspace = true }
|
||||
object-store.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
smallvec.workspace = true
|
||||
@@ -23,5 +23,5 @@ tokio.workspace = true
|
||||
uuid.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util = { workspace = true }
|
||||
common-test-util.workspace = true
|
||||
futures-util.workspace = true
|
||||
|
||||
@@ -5,16 +5,16 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
api = { workspace = true }
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-time.workspace = true
|
||||
datafusion-common.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datafusion.workspace = true
|
||||
datatypes = { workspace = true }
|
||||
datatypes.workspace = true
|
||||
serde.workspace = true
|
||||
snafu.workspace = true
|
||||
sqlparser.workspace = true
|
||||
@@ -22,5 +22,5 @@ sqlparser_derive = "0.1"
|
||||
statrs = "0.16"
|
||||
|
||||
[dev-dependencies]
|
||||
common-base = { workspace = true }
|
||||
common-base.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -5,15 +5,15 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
datafusion-common.workspace = true
|
||||
datafusion.workspace = true
|
||||
datatypes = { workspace = true }
|
||||
datatypes.workspace = true
|
||||
futures.workspace = true
|
||||
paste = "1.0"
|
||||
serde.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
snafu.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
serde_json = "1.0"
|
||||
|
||||
@@ -43,7 +43,7 @@ pub trait RecordBatchStream: Stream<Item = Result<RecordBatch>> {
|
||||
|
||||
pub type SendableRecordBatchStream = Pin<Box<dyn RecordBatchStream + Send>>;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct OrderOption {
|
||||
pub name: String,
|
||||
pub options: SortOptions,
|
||||
|
||||
@@ -6,9 +6,9 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
lazy_static.workspace = true
|
||||
once_cell.workspace = true
|
||||
paste.workspace = true
|
||||
|
||||
@@ -8,28 +8,28 @@ license.workspace = true
|
||||
async-recursion = "1.0"
|
||||
async-trait.workspace = true
|
||||
bytes = "1.1"
|
||||
catalog = { workspace = true }
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
catalog.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
datafusion-common.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datafusion-substrait.workspace = true
|
||||
datafusion.workspace = true
|
||||
datatypes = { workspace = true }
|
||||
datatypes.workspace = true
|
||||
futures = "0.3"
|
||||
promql = { workspace = true }
|
||||
promql.workspace = true
|
||||
prost.workspace = true
|
||||
session = { workspace = true }
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
table = { workspace = true }
|
||||
table.workspace = true
|
||||
|
||||
[dependencies.substrait_proto]
|
||||
package = "substrait"
|
||||
version = "0.17"
|
||||
|
||||
[dev-dependencies]
|
||||
datatypes = { workspace = true }
|
||||
table = { workspace = true }
|
||||
datatypes.workspace = true
|
||||
table.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -10,7 +10,7 @@ deadlock_detection = ["parking_lot/deadlock_detection"]
|
||||
|
||||
[dependencies]
|
||||
backtrace = "0.3"
|
||||
common-error = { workspace = true }
|
||||
common-error.workspace = true
|
||||
console-subscriber = { version = "0.1", optional = true }
|
||||
lazy_static.workspace = true
|
||||
once_cell.workspace = true
|
||||
@@ -27,7 +27,6 @@ serde.workspace = true
|
||||
tokio.workspace = true
|
||||
tracing = "0.1"
|
||||
tracing-appender = "0.2"
|
||||
tracing-bunyan-formatter = "0.3"
|
||||
tracing-futures = { version = "0.2", features = ["futures-03"] }
|
||||
tracing-log = "0.1"
|
||||
tracing-opentelemetry = "0.17"
|
||||
|
||||
@@ -23,7 +23,6 @@ use serde::{Deserialize, Serialize};
|
||||
pub use tracing::{event, span, Level};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
use tracing_appender::rolling::{RollingFileAppender, Rotation};
|
||||
use tracing_bunyan_formatter::{BunyanFormattingLayer, JsonStorageLayer};
|
||||
use tracing_log::LogTracer;
|
||||
use tracing_subscriber::fmt::Layer;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
@@ -140,7 +139,7 @@ pub fn init_global_logging(
|
||||
// JSON log layer.
|
||||
let rolling_appender = RollingFileAppender::new(Rotation::HOURLY, dir, app_name);
|
||||
let (rolling_writer, rolling_writer_guard) = tracing_appender::non_blocking(rolling_appender);
|
||||
let file_logging_layer = BunyanFormattingLayer::new(app_name.to_string(), rolling_writer);
|
||||
let file_logging_layer = Layer::new().with_writer(rolling_writer);
|
||||
guards.push(rolling_writer_guard);
|
||||
|
||||
// error JSON log layer.
|
||||
@@ -148,8 +147,7 @@ pub fn init_global_logging(
|
||||
RollingFileAppender::new(Rotation::HOURLY, dir, format!("{}-{}", app_name, "err"));
|
||||
let (err_rolling_writer, err_rolling_writer_guard) =
|
||||
tracing_appender::non_blocking(err_rolling_appender);
|
||||
let err_file_logging_layer =
|
||||
BunyanFormattingLayer::new(app_name.to_string(), err_rolling_writer);
|
||||
let err_file_logging_layer = Layer::new().with_writer(err_rolling_writer);
|
||||
guards.push(err_rolling_writer_guard);
|
||||
|
||||
// resolve log level settings from:
|
||||
@@ -191,7 +189,6 @@ pub fn init_global_logging(
|
||||
|
||||
Registry::default()
|
||||
.with(tokio_console_layer)
|
||||
.with(JsonStorageLayer)
|
||||
.with(stdout_logging_layer)
|
||||
.with(file_logging_layer)
|
||||
.with(err_file_logging_layer.with_filter(filter::LevelFilter::ERROR))
|
||||
@@ -203,7 +200,6 @@ pub fn init_global_logging(
|
||||
#[cfg(not(feature = "tokio-console"))]
|
||||
let subscriber = Registry::default()
|
||||
.with(filter)
|
||||
.with(JsonStorageLayer)
|
||||
.with(stdout_logging_layer)
|
||||
.with(file_logging_layer)
|
||||
.with(err_file_logging_layer.with_filter(filter::LevelFilter::ERROR));
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use once_cell::sync::OnceCell;
|
||||
|
||||
@@ -8,11 +8,11 @@ license.workspace = true
|
||||
arrow.workspace = true
|
||||
chrono-tz = "0.8"
|
||||
chrono.workspace = true
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
snafu.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
rand.workspace = true
|
||||
|
||||
@@ -182,7 +182,7 @@ impl Timestamp {
|
||||
|
||||
/// Split a [Timestamp] into seconds part and nanoseconds part.
|
||||
/// Notice the seconds part of split result is always rounded down to floor.
|
||||
fn split(&self) -> (i64, u32) {
|
||||
pub fn split(&self) -> (i64, u32) {
|
||||
let sec_mul = (TimeUnit::Second.factor() / self.unit.factor()) as i64;
|
||||
let nsec_mul = (self.unit.factor() / TimeUnit::Nanosecond.factor()) as i64;
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ license.workspace = true
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
api = { workspace = true }
|
||||
api.workspace = true
|
||||
arrow-flight.workspace = true
|
||||
async-compat = "0.2"
|
||||
async-stream.workspace = true
|
||||
@@ -16,55 +16,55 @@ async-trait.workspace = true
|
||||
axum = "0.6"
|
||||
axum-macros = "0.3"
|
||||
bytes = "1.1"
|
||||
catalog = { workspace = true }
|
||||
common-base = { workspace = true }
|
||||
common-catalog = { workspace = true }
|
||||
common-config = { workspace = true }
|
||||
common-datasource = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-function = { workspace = true }
|
||||
common-greptimedb-telemetry = { workspace = true }
|
||||
common-grpc = { workspace = true }
|
||||
common-grpc-expr = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-meta = { workspace = true }
|
||||
common-procedure = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
catalog.workspace = true
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-config.workspace = true
|
||||
common-datasource.workspace = true
|
||||
common-error.workspace = true
|
||||
common-function.workspace = true
|
||||
common-greptimedb-telemetry.workspace = true
|
||||
common-grpc-expr.workspace = true
|
||||
common-grpc.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-procedure.workspace = true
|
||||
common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
dashmap = "5.4"
|
||||
datafusion-common.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datafusion.workspace = true
|
||||
datatypes = { workspace = true }
|
||||
file-engine = { workspace = true }
|
||||
datatypes.workspace = true
|
||||
file-engine.workspace = true
|
||||
futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
lazy_static.workspace = true
|
||||
log-store = { workspace = true }
|
||||
meta-client = { workspace = true }
|
||||
mito2 = { workspace = true }
|
||||
object-store = { workspace = true }
|
||||
log-store.workspace = true
|
||||
meta-client.workspace = true
|
||||
mito2.workspace = true
|
||||
object-store.workspace = true
|
||||
pin-project = "1.0"
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
query = { workspace = true }
|
||||
reqwest = { workspace = true }
|
||||
query.workspace = true
|
||||
reqwest.workspace = true
|
||||
secrecy = { version = "0.8", features = ["serde", "alloc"] }
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
servers = { workspace = true }
|
||||
session = { workspace = true }
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
sql = { workspace = true }
|
||||
storage = { workspace = true }
|
||||
store-api = { workspace = true }
|
||||
substrait = { workspace = true }
|
||||
table = { workspace = true }
|
||||
servers.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
sql.workspace = true
|
||||
storage.workspace = true
|
||||
store-api.workspace = true
|
||||
substrait.workspace = true
|
||||
table.workspace = true
|
||||
tokio-stream = { version = "0.1", features = ["net"] }
|
||||
tokio.workspace = true
|
||||
toml.workspace = true
|
||||
@@ -76,8 +76,8 @@ uuid.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
axum-test-helper = { git = "https://github.com/sunng87/axum-test-helper.git", branch = "patch-1" }
|
||||
client = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-test-util = { workspace = true }
|
||||
client.workspace = true
|
||||
common-query.workspace = true
|
||||
common-test-util.workspace = true
|
||||
datafusion-common.workspace = true
|
||||
session = { workspace = true }
|
||||
session.workspace = true
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::future::Future;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::GrantedRegion;
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
@@ -26,6 +27,7 @@ use common_meta::heartbeat::handler::{
|
||||
};
|
||||
use common_telemetry::{debug, error, info, trace, warn};
|
||||
use snafu::OptionExt;
|
||||
use store_api::region_engine::RegionRole;
|
||||
use store_api::region_request::{RegionCloseRequest, RegionRequest};
|
||||
use store_api::storage::RegionId;
|
||||
#[cfg(test)]
|
||||
@@ -122,10 +124,11 @@ impl RegionAliveKeeper {
|
||||
}
|
||||
}
|
||||
|
||||
async fn keep_lived(&self, designated_regions: Vec<RegionId>, deadline: Instant) {
|
||||
for region_id in designated_regions {
|
||||
async fn keep_lived(&self, regions: &[GrantedRegion], deadline: Instant) {
|
||||
for region in regions {
|
||||
let (role, region_id) = (region.role().into(), RegionId::from(region.region_id));
|
||||
if let Some(handle) = self.find_handle(region_id).await {
|
||||
handle.reset_deadline(deadline).await;
|
||||
handle.reset_deadline(role, deadline).await;
|
||||
}
|
||||
// Else the region alive keeper might be triggered by lagging messages, we can safely ignore it.
|
||||
}
|
||||
@@ -235,12 +238,8 @@ impl HeartbeatResponseHandler for RegionAliveKeeper {
|
||||
})?;
|
||||
let start_instant = self.epoch + Duration::from_millis(region_lease.duration_since_epoch);
|
||||
let deadline = start_instant + Duration::from_secs(region_lease.lease_seconds);
|
||||
let region_ids = region_lease
|
||||
.region_ids
|
||||
.iter()
|
||||
.map(|id| RegionId::from_u64(*id))
|
||||
.collect();
|
||||
self.keep_lived(region_ids, deadline).await;
|
||||
|
||||
self.keep_lived(®ion_lease.regions, deadline).await;
|
||||
Ok(HandleControl::Continue)
|
||||
}
|
||||
}
|
||||
@@ -251,7 +250,8 @@ enum CountdownCommand {
|
||||
/// 4 * `heartbeat_interval_millis`
|
||||
Start(u64),
|
||||
/// Reset countdown deadline to the given instance.
|
||||
Reset(Instant),
|
||||
/// (NextRole, Deadline)
|
||||
Reset((RegionRole, Instant)),
|
||||
/// Returns the current deadline of the countdown task.
|
||||
#[cfg(test)]
|
||||
Deadline(oneshot::Sender<Instant>),
|
||||
@@ -319,8 +319,12 @@ impl CountdownTaskHandle {
|
||||
None
|
||||
}
|
||||
|
||||
async fn reset_deadline(&self, deadline: Instant) {
|
||||
if let Err(e) = self.tx.send(CountdownCommand::Reset(deadline)).await {
|
||||
async fn reset_deadline(&self, role: RegionRole, deadline: Instant) {
|
||||
if let Err(e) = self
|
||||
.tx
|
||||
.send(CountdownCommand::Reset((role, deadline)))
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"Failed to reset region alive keeper deadline: {e}. \
|
||||
Maybe the task is stopped due to region been closed."
|
||||
@@ -368,13 +372,17 @@ impl CountdownTask {
|
||||
let first_deadline = Instant::now() + Duration::from_millis(heartbeat_interval_millis) * 4;
|
||||
countdown.set(tokio::time::sleep_until(first_deadline));
|
||||
},
|
||||
Some(CountdownCommand::Reset(deadline)) => {
|
||||
Some(CountdownCommand::Reset((role, deadline))) => {
|
||||
// The first-time granted regions might be ignored because the `first_deadline` is larger than the `region_lease_timeout`.
|
||||
// Therefore, we set writable at the outside.
|
||||
// TODO(weny): Considers setting `first_deadline` to `region_lease_timeout`.
|
||||
let _ = self.region_server.set_writable(self.region_id, role.writable());
|
||||
|
||||
if countdown.deadline() < deadline {
|
||||
trace!(
|
||||
"Reset deadline of region {region_id} to approximately {} seconds later",
|
||||
(deadline - Instant::now()).as_secs_f32(),
|
||||
);
|
||||
let _ = self.region_server.set_writable(self.region_id, true);
|
||||
countdown.set(tokio::time::sleep_until(deadline));
|
||||
}
|
||||
// Else the countdown could be either:
|
||||
@@ -434,6 +442,8 @@ impl CountdownTask {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use api::v1::meta::RegionRole;
|
||||
|
||||
use super::*;
|
||||
use crate::tests::mock_region_server;
|
||||
|
||||
@@ -455,7 +465,13 @@ mod test {
|
||||
|
||||
// extend lease then sleep
|
||||
alive_keeper
|
||||
.keep_lived(vec![region_id], Instant::now() + Duration::from_millis(500))
|
||||
.keep_lived(
|
||||
&[GrantedRegion {
|
||||
region_id: region_id.as_u64(),
|
||||
role: RegionRole::Leader.into(),
|
||||
}],
|
||||
Instant::now() + Duration::from_millis(500),
|
||||
)
|
||||
.await;
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
assert!(alive_keeper.find_handle(region_id).await.is_some());
|
||||
@@ -499,7 +515,10 @@ mod test {
|
||||
// reset deadline
|
||||
// a nearer deadline will be ignored
|
||||
countdown_handle
|
||||
.reset_deadline(Instant::now() + Duration::from_millis(heartbeat_interval_millis))
|
||||
.reset_deadline(
|
||||
RegionRole::Leader.into(),
|
||||
Instant::now() + Duration::from_millis(heartbeat_interval_millis),
|
||||
)
|
||||
.await;
|
||||
assert!(
|
||||
countdown_handle.deadline().await.unwrap()
|
||||
@@ -508,7 +527,10 @@ mod test {
|
||||
|
||||
// only a farther deadline will be accepted
|
||||
countdown_handle
|
||||
.reset_deadline(Instant::now() + Duration::from_millis(heartbeat_interval_millis * 5))
|
||||
.reset_deadline(
|
||||
RegionRole::Leader.into(),
|
||||
Instant::now() + Duration::from_millis(heartbeat_interval_millis * 5),
|
||||
)
|
||||
.await;
|
||||
assert!(
|
||||
countdown_handle.deadline().await.unwrap()
|
||||
|
||||
@@ -250,8 +250,6 @@ pub struct CompactionConfig {
|
||||
pub max_files_in_level0: usize,
|
||||
/// Max task number for SST purge task after compaction.
|
||||
pub max_purge_tasks: usize,
|
||||
/// Buffer threshold while writing SST files
|
||||
pub sst_write_buffer_size: ReadableSize,
|
||||
}
|
||||
|
||||
impl Default for CompactionConfig {
|
||||
@@ -260,7 +258,6 @@ impl Default for CompactionConfig {
|
||||
max_inflight_tasks: 4,
|
||||
max_files_in_level0: 8,
|
||||
max_purge_tasks: 32,
|
||||
sst_write_buffer_size: ReadableSize::mb(8),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -312,7 +309,6 @@ impl From<&DatanodeOptions> for StorageEngineConfig {
|
||||
manifest_gc_duration: value.storage.manifest.gc_duration,
|
||||
max_files_in_l0: value.storage.compaction.max_files_in_level0,
|
||||
max_purge_tasks: value.storage.compaction.max_purge_tasks,
|
||||
sst_write_buffer_size: value.storage.compaction.sst_write_buffer_size,
|
||||
max_flush_tasks: value.storage.flush.max_flush_tasks,
|
||||
region_write_buffer_size: value.storage.flush.region_write_buffer_size,
|
||||
picker_schedule_interval: value.storage.flush.picker_schedule_interval,
|
||||
|
||||
@@ -293,7 +293,9 @@ impl HeartbeatTask {
|
||||
role: RegionRole::from(stat.role).into(),
|
||||
approximate_bytes,
|
||||
// TODO(ruihang): scratch more info
|
||||
..Default::default()
|
||||
rcus: 0,
|
||||
wcus: 0,
|
||||
approximate_rows: 0,
|
||||
};
|
||||
region_stats.push(region_stat);
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user