mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-24 23:19:57 +00:00
Compare commits
43 Commits
v0.15.0
...
poc/create
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7e79b4b2f6 | ||
|
|
4ad40af468 | ||
|
|
e4b048e788 | ||
|
|
ecbf372de3 | ||
|
|
3d81a17360 | ||
|
|
025cae3679 | ||
|
|
68409e28ea | ||
|
|
699406ae32 | ||
|
|
344006deca | ||
|
|
63803f2b43 | ||
|
|
cf62767b98 | ||
|
|
4e53c1531d | ||
|
|
892cb66c53 | ||
|
|
8b392477c8 | ||
|
|
905593dc16 | ||
|
|
6c04cb9b19 | ||
|
|
24da3367c1 | ||
|
|
80b14965a6 | ||
|
|
5da3f86d0c | ||
|
|
151273d1df | ||
|
|
b0289dbdde | ||
|
|
c51730a954 | ||
|
|
207709c727 | ||
|
|
deca8c44fa | ||
|
|
2edd861ce9 | ||
|
|
14f3a4ab05 | ||
|
|
34875c0346 | ||
|
|
1d07864b29 | ||
|
|
9be75361a4 | ||
|
|
9c1df68a5f | ||
|
|
0209461155 | ||
|
|
e728cb33fb | ||
|
|
cde7e11983 | ||
|
|
944b4b3e49 | ||
|
|
7953b090c0 | ||
|
|
7aa9af5ba6 | ||
|
|
7a9444c85b | ||
|
|
bb12be3310 | ||
|
|
24019334ee | ||
|
|
116d5cf82b | ||
|
|
90a3894564 | ||
|
|
39d3e0651d | ||
|
|
a49edc6ca6 |
140
Cargo.lock
generated
140
Cargo.lock
generated
@@ -1986,7 +1986,7 @@ dependencies = [
|
||||
"operator",
|
||||
"query",
|
||||
"rand 0.9.0",
|
||||
"reqwest",
|
||||
"reqwest 0.12.9",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"servers",
|
||||
@@ -2118,13 +2118,14 @@ dependencies = [
|
||||
"mito2",
|
||||
"moka",
|
||||
"nu-ansi-term",
|
||||
"object-store",
|
||||
"plugins",
|
||||
"prometheus",
|
||||
"prost 0.13.5",
|
||||
"query",
|
||||
"rand 0.9.0",
|
||||
"regex",
|
||||
"reqwest",
|
||||
"reqwest 0.12.9",
|
||||
"rexpect",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -2220,6 +2221,7 @@ dependencies = [
|
||||
"humantime-serde",
|
||||
"meta-client",
|
||||
"num_cpus",
|
||||
"object-store",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
@@ -2332,6 +2334,7 @@ dependencies = [
|
||||
"datafusion",
|
||||
"datafusion-common",
|
||||
"datafusion-expr",
|
||||
"datafusion-functions-aggregate-common",
|
||||
"datatypes",
|
||||
"derive_more",
|
||||
"geo",
|
||||
@@ -2370,7 +2373,7 @@ dependencies = [
|
||||
"common-test-util",
|
||||
"common-version",
|
||||
"hyper 0.14.30",
|
||||
"reqwest",
|
||||
"reqwest 0.12.9",
|
||||
"serde",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
@@ -3762,7 +3765,7 @@ dependencies = [
|
||||
"prometheus",
|
||||
"prost 0.13.5",
|
||||
"query",
|
||||
"reqwest",
|
||||
"reqwest 0.12.9",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"servers",
|
||||
@@ -4699,7 +4702,6 @@ version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
"auth",
|
||||
"bytes",
|
||||
@@ -4736,6 +4738,7 @@ dependencies = [
|
||||
"log-store",
|
||||
"meta-client",
|
||||
"num_cpus",
|
||||
"object-store",
|
||||
"opentelemetry-proto 0.27.0",
|
||||
"operator",
|
||||
"otel-arrow-rust",
|
||||
@@ -5145,7 +5148,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=96c733f8472284d3c83a4c011dc6de9cf830c353#96c733f8472284d3c83a4c011dc6de9cf830c353"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=464226cf8a4a22696503536a123d0b9e318582f4#464226cf8a4a22696503536a123d0b9e318582f4"
|
||||
dependencies = [
|
||||
"prost 0.13.5",
|
||||
"serde",
|
||||
@@ -6696,7 +6699,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-targets 0.48.5",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -7232,6 +7235,7 @@ dependencies = [
|
||||
name = "metric-engine"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
"aquamarine",
|
||||
"async-stream",
|
||||
@@ -8097,14 +8101,21 @@ version = "0.15.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
"common-base",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"common-telemetry",
|
||||
"common-test-util",
|
||||
"futures",
|
||||
"humantime-serde",
|
||||
"lazy_static",
|
||||
"md5",
|
||||
"moka",
|
||||
"opendal",
|
||||
"prometheus",
|
||||
"reqwest 0.12.9",
|
||||
"serde",
|
||||
"snafu 0.8.5",
|
||||
"tokio",
|
||||
"uuid",
|
||||
]
|
||||
@@ -8239,7 +8250,7 @@ dependencies = [
|
||||
"prometheus",
|
||||
"quick-xml 0.36.2",
|
||||
"reqsign",
|
||||
"reqwest",
|
||||
"reqwest 0.12.9",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
@@ -8311,6 +8322,19 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-http"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f51189ce8be654f9b5f7e70e49967ed894e84a06fc35c6c042e64ac1fc5399e"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"http 0.2.12",
|
||||
"opentelemetry 0.21.0",
|
||||
"reqwest 0.11.27",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-otlp"
|
||||
version = "0.14.0"
|
||||
@@ -8321,10 +8345,12 @@ dependencies = [
|
||||
"futures-core",
|
||||
"http 0.2.12",
|
||||
"opentelemetry 0.21.0",
|
||||
"opentelemetry-http",
|
||||
"opentelemetry-proto 0.4.0",
|
||||
"opentelemetry-semantic-conventions",
|
||||
"opentelemetry_sdk 0.21.2",
|
||||
"prost 0.11.9",
|
||||
"reqwest 0.11.27",
|
||||
"thiserror 1.0.64",
|
||||
"tokio",
|
||||
"tonic 0.9.2",
|
||||
@@ -9569,7 +9595,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf"
|
||||
dependencies = [
|
||||
"heck 0.5.0",
|
||||
"itertools 0.11.0",
|
||||
"itertools 0.14.0",
|
||||
"log",
|
||||
"multimap",
|
||||
"once_cell",
|
||||
@@ -9615,7 +9641,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"itertools 0.11.0",
|
||||
"itertools 0.14.0",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.100",
|
||||
@@ -10311,7 +10337,7 @@ dependencies = [
|
||||
"percent-encoding",
|
||||
"quick-xml 0.35.0",
|
||||
"rand 0.8.5",
|
||||
"reqwest",
|
||||
"reqwest 0.12.9",
|
||||
"rsa",
|
||||
"rust-ini 0.21.1",
|
||||
"serde",
|
||||
@@ -10320,6 +10346,42 @@ dependencies = [
|
||||
"sha2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reqwest"
|
||||
version = "0.11.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62"
|
||||
dependencies = [
|
||||
"base64 0.21.7",
|
||||
"bytes",
|
||||
"encoding_rs",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"h2 0.3.26",
|
||||
"http 0.2.12",
|
||||
"http-body 0.4.6",
|
||||
"hyper 0.14.30",
|
||||
"ipnet",
|
||||
"js-sys",
|
||||
"log",
|
||||
"mime",
|
||||
"once_cell",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
"sync_wrapper 0.1.2",
|
||||
"system-configuration",
|
||||
"tokio",
|
||||
"tower-service",
|
||||
"url",
|
||||
"wasm-bindgen",
|
||||
"wasm-bindgen-futures",
|
||||
"web-sys",
|
||||
"winreg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reqwest"
|
||||
version = "0.12.9"
|
||||
@@ -11170,6 +11232,7 @@ dependencies = [
|
||||
"common-base",
|
||||
"common-catalog",
|
||||
"common-config",
|
||||
"common-datasource",
|
||||
"common-error",
|
||||
"common-frontend",
|
||||
"common-grpc",
|
||||
@@ -11212,16 +11275,23 @@ dependencies = [
|
||||
"local-ip-address",
|
||||
"log-query",
|
||||
"loki-proto",
|
||||
"metric-engine",
|
||||
"mime_guess",
|
||||
"mito-codec",
|
||||
"mito2",
|
||||
"mysql_async",
|
||||
"notify",
|
||||
"object-pool",
|
||||
"object-store",
|
||||
"once_cell",
|
||||
"openmetrics-parser",
|
||||
"opensrv-mysql",
|
||||
"opentelemetry-proto 0.27.0",
|
||||
"operator",
|
||||
"otel-arrow-rust",
|
||||
"parking_lot 0.12.3",
|
||||
"parquet",
|
||||
"partition",
|
||||
"permutation",
|
||||
"pgwire",
|
||||
"pin-project",
|
||||
@@ -11235,7 +11305,7 @@ dependencies = [
|
||||
"quoted-string",
|
||||
"rand 0.9.0",
|
||||
"regex",
|
||||
"reqwest",
|
||||
"reqwest 0.12.9",
|
||||
"rust-embed",
|
||||
"rustls",
|
||||
"rustls-pemfile",
|
||||
@@ -11679,7 +11749,7 @@ dependencies = [
|
||||
"local-ip-address",
|
||||
"mysql",
|
||||
"num_cpus",
|
||||
"reqwest",
|
||||
"reqwest 0.12.9",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
@@ -12329,6 +12399,27 @@ dependencies = [
|
||||
"nom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "system-configuration"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
"core-foundation",
|
||||
"system-configuration-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "system-configuration-sys"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9"
|
||||
dependencies = [
|
||||
"core-foundation-sys",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.15.0"
|
||||
@@ -12619,7 +12710,7 @@ dependencies = [
|
||||
"paste",
|
||||
"rand 0.9.0",
|
||||
"rand_chacha 0.9.0",
|
||||
"reqwest",
|
||||
"reqwest 0.12.9",
|
||||
"schemars",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -13775,12 +13866,13 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
|
||||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
version = "1.10.0"
|
||||
version = "1.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314"
|
||||
checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d"
|
||||
dependencies = [
|
||||
"getrandom 0.2.15",
|
||||
"rand 0.8.5",
|
||||
"getrandom 0.3.2",
|
||||
"js-sys",
|
||||
"rand 0.9.0",
|
||||
"serde",
|
||||
"wasm-bindgen",
|
||||
]
|
||||
@@ -14184,7 +14276,7 @@ version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
|
||||
dependencies = [
|
||||
"windows-sys 0.48.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -14502,6 +14594,16 @@ dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winreg"
|
||||
version = "0.50.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wit-bindgen-rt"
|
||||
version = "0.39.0"
|
||||
|
||||
@@ -121,6 +121,7 @@ datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-functions-aggregate-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
@@ -134,7 +135,7 @@ etcd-client = "0.14"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "96c733f8472284d3c83a4c011dc6de9cf830c353" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "464226cf8a4a22696503536a123d0b9e318582f4" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
|
||||
@@ -185,10 +185,11 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `slow_query` | -- | -- | The slow query log options. |
|
||||
@@ -288,10 +289,11 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `slow_query` | -- | -- | The slow query log options. |
|
||||
@@ -323,6 +325,7 @@
|
||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||
| `region_failure_detector_initialization_delay` | String | `10m` | Delay before initializing region failure detectors.<br/>This delay helps prevent premature initialization of region failure detectors in cases where<br/>cluster maintenance mode is enabled right after metasrv starts, especially when the cluster<br/>is not deployed via the recommended GreptimeDB Operator. Without this delay, early detector registration<br/>may trigger unnecessary region failovers during datanode startup. |
|
||||
| `allow_region_failover_on_local_wal` | Bool | `false` | Whether to allow region failover on local WAL.<br/>**This option is not recommended to be set to true, because it may lead to data loss during failover.** |
|
||||
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||
@@ -370,10 +373,11 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
@@ -534,10 +538,11 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
@@ -584,10 +589,11 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
|
||||
@@ -629,7 +629,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -640,6 +640,9 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
|
||||
@@ -83,7 +83,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -94,6 +94,9 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
|
||||
@@ -218,7 +218,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -229,6 +229,9 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
|
||||
@@ -43,6 +43,13 @@ use_memory_store = false
|
||||
## - Using shared storage (e.g., s3).
|
||||
enable_region_failover = false
|
||||
|
||||
## Delay before initializing region failure detectors.
|
||||
## This delay helps prevent premature initialization of region failure detectors in cases where
|
||||
## cluster maintenance mode is enabled right after metasrv starts, especially when the cluster
|
||||
## is not deployed via the recommended GreptimeDB Operator. Without this delay, early detector registration
|
||||
## may trigger unnecessary region failovers during datanode startup.
|
||||
region_failure_detector_initialization_delay = '10m'
|
||||
|
||||
## Whether to allow region failover on local WAL.
|
||||
## **This option is not recommended to be set to true, because it may lead to data loss during failover.**
|
||||
allow_region_failover_on_local_wal = false
|
||||
@@ -220,7 +227,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -231,6 +238,9 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
|
||||
@@ -720,7 +720,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -731,6 +731,9 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
|
||||
@@ -31,6 +31,7 @@ excludes = [
|
||||
"src/operator/src/expr_helper/trigger.rs",
|
||||
"src/sql/src/statements/create/trigger.rs",
|
||||
"src/sql/src/statements/show/trigger.rs",
|
||||
"src/sql/src/statements/drop/trigger.rs",
|
||||
"src/sql/src/parsers/create_parser/trigger.rs",
|
||||
"src/sql/src/parsers/show_parser/trigger.rs",
|
||||
]
|
||||
|
||||
@@ -22,6 +22,7 @@ use greptime_proto::v1::region::RegionResponse as RegionResponseV1;
|
||||
pub struct RegionResponse {
|
||||
pub affected_rows: AffectedRows,
|
||||
pub extensions: HashMap<String, Vec<u8>>,
|
||||
pub metadata: Vec<u8>,
|
||||
}
|
||||
|
||||
impl RegionResponse {
|
||||
@@ -29,6 +30,7 @@ impl RegionResponse {
|
||||
Self {
|
||||
affected_rows: region_response.affected_rows as _,
|
||||
extensions: region_response.extensions,
|
||||
metadata: region_response.metadata,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,6 +39,16 @@ impl RegionResponse {
|
||||
Self {
|
||||
affected_rows,
|
||||
extensions: Default::default(),
|
||||
metadata: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates one response with metadata.
|
||||
pub fn from_metadata(metadata: Vec<u8>) -> Self {
|
||||
Self {
|
||||
affected_rows: 0,
|
||||
extensions: Default::default(),
|
||||
metadata,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -226,20 +226,18 @@ mod tests {
|
||||
assert!(options.is_none());
|
||||
|
||||
let mut schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
|
||||
.with_fulltext_options(FulltextOptions::new_unchecked(
|
||||
true,
|
||||
FulltextAnalyzer::English,
|
||||
false,
|
||||
FulltextBackend::Bloom,
|
||||
10240,
|
||||
0.01,
|
||||
))
|
||||
.with_fulltext_options(FulltextOptions {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
backend: FulltextBackend::Bloom,
|
||||
})
|
||||
.unwrap();
|
||||
schema.set_inverted_index(true);
|
||||
let options = options_from_column_schema(&schema).unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\",\"granularity\":10240,\"false-positive-rate-in-10000\":100}"
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\"}"
|
||||
);
|
||||
assert_eq!(
|
||||
options.options.get(INVERTED_INDEX_GRPC_KEY).unwrap(),
|
||||
@@ -249,18 +247,16 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_options_with_fulltext() {
|
||||
let fulltext = FulltextOptions::new_unchecked(
|
||||
true,
|
||||
FulltextAnalyzer::English,
|
||||
false,
|
||||
FulltextBackend::Bloom,
|
||||
10240,
|
||||
0.01,
|
||||
);
|
||||
let fulltext = FulltextOptions {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
backend: FulltextBackend::Bloom,
|
||||
};
|
||||
let options = options_from_fulltext(&fulltext).unwrap().unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\",\"granularity\":10240,\"false-positive-rate-in-10000\":100}"
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\"}"
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ use common_meta::cache::{
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::table_info::{TableInfoManager, TableInfoValue};
|
||||
use common_meta::key::table_info::TableInfoValue;
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
@@ -39,7 +39,6 @@ use moka::sync::Cache;
|
||||
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||
use session::context::{Channel, QueryContext};
|
||||
use snafu::prelude::*;
|
||||
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
||||
use table::dist_table::DistTable;
|
||||
use table::metadata::TableId;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
@@ -143,61 +142,6 @@ impl KvBackendCatalogManager {
|
||||
pub fn procedure_manager(&self) -> Option<ProcedureManagerRef> {
|
||||
self.procedure_manager.clone()
|
||||
}
|
||||
|
||||
// Override logical table's partition key indices with physical table's.
|
||||
async fn override_logical_table_partition_key_indices(
|
||||
table_route_cache: &TableRouteCacheRef,
|
||||
table_info_manager: &TableInfoManager,
|
||||
table: TableRef,
|
||||
) -> Result<TableRef> {
|
||||
// If the table is not a metric table, return the table directly.
|
||||
if table.table_info().meta.engine != METRIC_ENGINE_NAME {
|
||||
return Ok(table);
|
||||
}
|
||||
|
||||
if let Some(table_route_value) = table_route_cache
|
||||
.get(table.table_info().table_id())
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
&& let TableRoute::Logical(logical_route) = &*table_route_value
|
||||
&& let Some(physical_table_info_value) = table_info_manager
|
||||
.get(logical_route.physical_table_id())
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
{
|
||||
let mut new_table_info = (*table.table_info()).clone();
|
||||
|
||||
// Remap partition key indices from physical table to logical table
|
||||
new_table_info.meta.partition_key_indices = physical_table_info_value
|
||||
.table_info
|
||||
.meta
|
||||
.partition_key_indices
|
||||
.iter()
|
||||
.filter_map(|&physical_index| {
|
||||
// Get the column name from the physical table using the physical index
|
||||
physical_table_info_value
|
||||
.table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.get(physical_index)
|
||||
.and_then(|physical_column| {
|
||||
// Find the corresponding index in the logical table schema
|
||||
new_table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_index_by_name(physical_column.name.as_str())
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let new_table = DistTable::table(Arc::new(new_table_info));
|
||||
|
||||
return Ok(new_table);
|
||||
}
|
||||
|
||||
Ok(table)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -324,7 +268,10 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
let table_cache: TableCacheRef = self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_cache",
|
||||
})?;
|
||||
|
||||
let table_route_cache: TableRouteCacheRef =
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_route_cache",
|
||||
})?;
|
||||
let table = table_cache
|
||||
.get_by_ref(&TableName {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
@@ -334,18 +281,55 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
.await
|
||||
.context(GetTableCacheSnafu)?;
|
||||
|
||||
if let Some(table) = table {
|
||||
let table_route_cache: TableRouteCacheRef =
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_route_cache",
|
||||
})?;
|
||||
return Self::override_logical_table_partition_key_indices(
|
||||
&table_route_cache,
|
||||
self.table_metadata_manager.table_info_manager(),
|
||||
table,
|
||||
)
|
||||
.await
|
||||
.map(Some);
|
||||
// Override logical table's partition key indices with physical table's.
|
||||
if let Some(table) = &table
|
||||
&& let Some(table_route_value) = table_route_cache
|
||||
.get(table.table_info().table_id())
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
&& let TableRoute::Logical(logical_route) = &*table_route_value
|
||||
&& let Some(physical_table_info_value) = self
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(logical_route.physical_table_id())
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
{
|
||||
let mut new_table_info = (*table.table_info()).clone();
|
||||
// Gather all column names from the logical table
|
||||
let logical_column_names: std::collections::HashSet<_> = new_table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas()
|
||||
.iter()
|
||||
.map(|col| &col.name)
|
||||
.collect();
|
||||
|
||||
// Only preserve partition key indices where the corresponding columns exist in logical table
|
||||
new_table_info.meta.partition_key_indices = physical_table_info_value
|
||||
.table_info
|
||||
.meta
|
||||
.partition_key_indices
|
||||
.iter()
|
||||
.filter(|&&index| {
|
||||
if let Some(physical_column) = physical_table_info_value
|
||||
.table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.get(index)
|
||||
{
|
||||
logical_column_names.contains(&physical_column.name)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let new_table = DistTable::table(Arc::new(new_table_info));
|
||||
|
||||
return Ok(Some(new_table));
|
||||
}
|
||||
|
||||
if channel == Channel::Postgres {
|
||||
@@ -358,7 +342,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
async fn tables_by_ids(
|
||||
@@ -410,20 +394,8 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
let catalog = catalog.to_string();
|
||||
let schema = schema.to_string();
|
||||
let semaphore = Arc::new(Semaphore::new(CONCURRENCY));
|
||||
let table_route_cache: Result<TableRouteCacheRef> =
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_route_cache",
|
||||
});
|
||||
|
||||
common_runtime::spawn_global(async move {
|
||||
let table_route_cache = match table_route_cache {
|
||||
Ok(table_route_cache) => table_route_cache,
|
||||
Err(e) => {
|
||||
let _ = tx.send(Err(e)).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let table_id_stream = metadata_manager
|
||||
.table_name_manager()
|
||||
.tables(&catalog, &schema)
|
||||
@@ -450,7 +422,6 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
let metadata_manager = metadata_manager.clone();
|
||||
let tx = tx.clone();
|
||||
let semaphore = semaphore.clone();
|
||||
let table_route_cache = table_route_cache.clone();
|
||||
common_runtime::spawn_global(async move {
|
||||
// we don't explicitly close the semaphore so just ignore the potential error.
|
||||
let _ = semaphore.acquire().await;
|
||||
@@ -468,16 +439,6 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
};
|
||||
|
||||
for table in table_info_values.into_values().map(build_table) {
|
||||
let table = if let Ok(table) = table {
|
||||
Self::override_logical_table_partition_key_indices(
|
||||
&table_route_cache,
|
||||
metadata_manager.table_info_manager(),
|
||||
table,
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
table
|
||||
};
|
||||
if tx.send(table).await.is_err() {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ use std::sync::{Arc, RwLock};
|
||||
use api::v1::frontend::{KillProcessRequest, ListProcessRequest, ProcessInfo};
|
||||
use common_base::cancellation::CancellationHandle;
|
||||
use common_frontend::selector::{FrontendSelector, MetaClientSelector};
|
||||
use common_telemetry::{debug, info, warn};
|
||||
use common_telemetry::{debug, info};
|
||||
use common_time::util::current_time_millis;
|
||||
use meta_client::MetaClientRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
@@ -141,20 +141,14 @@ impl ProcessManager {
|
||||
.await
|
||||
.context(error::InvokeFrontendSnafu)?;
|
||||
for mut f in frontends {
|
||||
let result = f
|
||||
.list_process(ListProcessRequest {
|
||||
processes.extend(
|
||||
f.list_process(ListProcessRequest {
|
||||
catalog: catalog.unwrap_or_default().to_string(),
|
||||
})
|
||||
.await
|
||||
.context(error::InvokeFrontendSnafu);
|
||||
match result {
|
||||
Ok(resp) => {
|
||||
processes.extend(resp.processes);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(e; "Skipping failing node: {:?}", f)
|
||||
}
|
||||
}
|
||||
.context(error::InvokeFrontendSnafu)?
|
||||
.processes,
|
||||
);
|
||||
}
|
||||
}
|
||||
processes.extend(self.local_processes(catalog)?);
|
||||
|
||||
@@ -19,7 +19,7 @@ mod information_memory_table;
|
||||
pub mod key_column_usage;
|
||||
mod partitions;
|
||||
mod procedure_info;
|
||||
mod process_list;
|
||||
pub mod process_list;
|
||||
pub mod region_peers;
|
||||
mod region_statistics;
|
||||
mod runtime_metrics;
|
||||
|
||||
@@ -39,14 +39,14 @@ use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::information_schema::InformationTable;
|
||||
|
||||
/// Column names of `information_schema.process_list`
|
||||
const ID: &str = "id";
|
||||
const CATALOG: &str = "catalog";
|
||||
const SCHEMAS: &str = "schemas";
|
||||
const QUERY: &str = "query";
|
||||
const CLIENT: &str = "client";
|
||||
const FRONTEND: &str = "frontend";
|
||||
const START_TIMESTAMP: &str = "start_timestamp";
|
||||
const ELAPSED_TIME: &str = "elapsed_time";
|
||||
pub const ID: &str = "id";
|
||||
pub const CATALOG: &str = "catalog";
|
||||
pub const SCHEMAS: &str = "schemas";
|
||||
pub const QUERY: &str = "query";
|
||||
pub const CLIENT: &str = "client";
|
||||
pub const FRONTEND: &str = "frontend";
|
||||
pub const START_TIMESTAMP: &str = "start_timestamp";
|
||||
pub const ELAPSED_TIME: &str = "elapsed_time";
|
||||
|
||||
/// `information_schema.process_list` table implementation that tracks running
|
||||
/// queries in current cluster.
|
||||
|
||||
@@ -67,6 +67,7 @@ metric-engine.workspace = true
|
||||
mito2.workspace = true
|
||||
moka.workspace = true
|
||||
nu-ansi-term = "0.46"
|
||||
object-store.workspace = true
|
||||
plugins.workspace = true
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
|
||||
@@ -280,7 +280,7 @@ mod tests {
|
||||
|
||||
use common_config::ENV_VAR_SEP;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::config::{FileConfig, GcsConfig, ObjectStoreConfig, S3Config};
|
||||
use object_store::config::{FileConfig, GcsConfig, ObjectStoreConfig, S3Config};
|
||||
use servers::heartbeat_options::HeartbeatOptions;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -257,15 +257,34 @@ pub struct Instance {
|
||||
flownode: FlownodeInstance,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
wal_options_allocator: WalOptionsAllocatorRef,
|
||||
|
||||
// The components of standalone, which make it easier to expand based
|
||||
// on the components.
|
||||
#[cfg(feature = "enterprise")]
|
||||
components: Components,
|
||||
|
||||
// Keep the logging guard to prevent the worker from being dropped.
|
||||
_guard: Vec<WorkerGuard>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub struct Components {
|
||||
pub plugins: Plugins,
|
||||
pub kv_backend: KvBackendRef,
|
||||
pub frontend_client: Arc<FrontendClient>,
|
||||
pub catalog_manager: catalog::CatalogManagerRef,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
/// Find the socket addr of a server by its `name`.
|
||||
pub fn server_addr(&self, name: &str) -> Option<SocketAddr> {
|
||||
self.frontend.server_handlers().addr(name)
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub fn components(&self) -> &Components {
|
||||
&self.components
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -546,13 +565,14 @@ impl StartCommand {
|
||||
// actually make a connection
|
||||
let (frontend_client, frontend_instance_handler) =
|
||||
FrontendClient::from_empty_grpc_handler();
|
||||
let frontend_client = Arc::new(frontend_client);
|
||||
let flow_builder = FlownodeBuilder::new(
|
||||
flownode_options,
|
||||
plugins.clone(),
|
||||
table_metadata_manager.clone(),
|
||||
catalog_manager.clone(),
|
||||
flow_metadata_manager.clone(),
|
||||
Arc::new(frontend_client.clone()),
|
||||
frontend_client.clone(),
|
||||
);
|
||||
let flownode = flow_builder
|
||||
.build()
|
||||
@@ -662,7 +682,7 @@ impl StartCommand {
|
||||
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
|
||||
.context(error::ServersSnafu)?;
|
||||
|
||||
let servers = Services::new(opts, fe_instance.clone(), plugins)
|
||||
let servers = Services::new(opts, fe_instance.clone(), plugins.clone())
|
||||
.build()
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
@@ -673,12 +693,22 @@ impl StartCommand {
|
||||
export_metrics_task,
|
||||
};
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
let components = Components {
|
||||
plugins,
|
||||
kv_backend,
|
||||
frontend_client,
|
||||
catalog_manager,
|
||||
};
|
||||
|
||||
Ok(Instance {
|
||||
datanode,
|
||||
frontend,
|
||||
flownode,
|
||||
procedure_manager,
|
||||
wal_options_allocator,
|
||||
#[cfg(feature = "enterprise")]
|
||||
components,
|
||||
_guard: guard,
|
||||
})
|
||||
}
|
||||
@@ -818,7 +848,7 @@ mod tests {
|
||||
use common_config::ENV_VAR_SEP;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{FileConfig, GcsConfig};
|
||||
use object_store::config::{FileConfig, GcsConfig};
|
||||
|
||||
use super::*;
|
||||
use crate::options::GlobalOptions;
|
||||
@@ -937,15 +967,15 @@ mod tests {
|
||||
|
||||
assert!(matches!(
|
||||
&dn_opts.storage.store,
|
||||
datanode::config::ObjectStoreConfig::File(FileConfig { .. })
|
||||
object_store::config::ObjectStoreConfig::File(FileConfig { .. })
|
||||
));
|
||||
assert_eq!(dn_opts.storage.providers.len(), 2);
|
||||
assert!(matches!(
|
||||
dn_opts.storage.providers[0],
|
||||
datanode::config::ObjectStoreConfig::Gcs(GcsConfig { .. })
|
||||
object_store::config::ObjectStoreConfig::Gcs(GcsConfig { .. })
|
||||
));
|
||||
match &dn_opts.storage.providers[1] {
|
||||
datanode::config::ObjectStoreConfig::S3(s3_config) => {
|
||||
object_store::config::ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(
|
||||
"SecretBox<alloc::string::String>([REDACTED])".to_string(),
|
||||
format!("{:?}", s3_config.access_key_id)
|
||||
|
||||
@@ -18,7 +18,7 @@ use cmd::options::GreptimeOptions;
|
||||
use cmd::standalone::StandaloneOptions;
|
||||
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
||||
use common_options::datanode::{ClientOptions, DatanodeClientOptions};
|
||||
use common_telemetry::logging::{LoggingOptions, DEFAULT_LOGGING_DIR, DEFAULT_OTLP_ENDPOINT};
|
||||
use common_telemetry::logging::{LoggingOptions, DEFAULT_LOGGING_DIR, DEFAULT_OTLP_HTTP_ENDPOINT};
|
||||
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
|
||||
@@ -81,7 +81,7 @@ fn test_load_datanode_example_config() {
|
||||
logging: LoggingOptions {
|
||||
level: Some("info".to_string()),
|
||||
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_HTTP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
@@ -124,7 +124,7 @@ fn test_load_frontend_example_config() {
|
||||
logging: LoggingOptions {
|
||||
level: Some("info".to_string()),
|
||||
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_HTTP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
@@ -172,7 +172,7 @@ fn test_load_metasrv_example_config() {
|
||||
logging: LoggingOptions {
|
||||
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
||||
level: Some("info".to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_HTTP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
@@ -229,7 +229,7 @@ fn test_load_standalone_example_config() {
|
||||
logging: LoggingOptions {
|
||||
level: Some("info".to_string()),
|
||||
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_HTTP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
|
||||
@@ -14,6 +14,7 @@ common-macro.workspace = true
|
||||
config.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
num_cpus.workspace = true
|
||||
object-store.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_with.workspace = true
|
||||
|
||||
@@ -106,7 +106,7 @@ mod tests {
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{ObjectStoreConfig, StorageConfig};
|
||||
use datanode::config::StorageConfig;
|
||||
use meta_client::MetaClientOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -212,7 +212,7 @@ mod tests {
|
||||
|
||||
// Check the configs from environment variables.
|
||||
match &opts.storage.store {
|
||||
ObjectStoreConfig::S3(s3_config) => {
|
||||
object_store::config::ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(s3_config.bucket, "mybucket".to_string());
|
||||
}
|
||||
_ => panic!("unexpected store type"),
|
||||
|
||||
@@ -21,6 +21,7 @@ pub mod error;
|
||||
pub mod file_format;
|
||||
pub mod lister;
|
||||
pub mod object_store;
|
||||
pub mod parquet_writer;
|
||||
pub mod share_buffer;
|
||||
#[cfg(test)]
|
||||
pub mod test_util;
|
||||
|
||||
52
src/common/datasource/src/parquet_writer.rs
Normal file
52
src/common/datasource/src/parquet_writer.rs
Normal file
@@ -0,0 +1,52 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::future::BoxFuture;
|
||||
use object_store::Writer;
|
||||
use parquet::arrow::async_writer::AsyncFileWriter;
|
||||
use parquet::errors::ParquetError;
|
||||
|
||||
/// Bridges opendal [Writer] with parquet [AsyncFileWriter].
|
||||
pub struct AsyncWriter {
|
||||
inner: Writer,
|
||||
}
|
||||
|
||||
impl AsyncWriter {
|
||||
/// Create a [`AsyncWriter`] by given [`Writer`].
|
||||
pub fn new(writer: Writer) -> Self {
|
||||
Self { inner: writer }
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncFileWriter for AsyncWriter {
|
||||
fn write(&mut self, bs: Bytes) -> BoxFuture<'_, parquet::errors::Result<()>> {
|
||||
Box::pin(async move {
|
||||
self.inner
|
||||
.write(bs)
|
||||
.await
|
||||
.map_err(|err| ParquetError::External(Box::new(err)))
|
||||
})
|
||||
}
|
||||
|
||||
fn complete(&mut self) -> BoxFuture<'_, parquet::errors::Result<()>> {
|
||||
Box::pin(async move {
|
||||
self.inner
|
||||
.close()
|
||||
.await
|
||||
.map(|_| ())
|
||||
.map_err(|err| ParquetError::External(Box::new(err)))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
@@ -31,7 +30,7 @@ use crate::error::{MetaSnafu, Result};
|
||||
pub type FrontendClientPtr = Box<dyn FrontendClient>;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait FrontendClient: Send + Debug {
|
||||
pub trait FrontendClient: Send {
|
||||
async fn list_process(&mut self, req: ListProcessRequest) -> Result<ListProcessResponse>;
|
||||
|
||||
async fn kill_process(&mut self, req: KillProcessRequest) -> Result<KillProcessResponse>;
|
||||
|
||||
@@ -33,6 +33,7 @@ common-version.workspace = true
|
||||
datafusion.workspace = true
|
||||
datafusion-common.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datafusion-functions-aggregate-common.workspace = true
|
||||
datatypes.workspace = true
|
||||
derive_more = { version = "1", default-features = false, features = ["display"] }
|
||||
geo = { version = "0.29", optional = true }
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
pub mod approximate;
|
||||
pub mod count_hash;
|
||||
#[cfg(feature = "geo")]
|
||||
pub mod geo;
|
||||
pub mod vector;
|
||||
|
||||
647
src/common/function/src/aggrs/count_hash.rs
Normal file
647
src/common/function/src/aggrs/count_hash.rs
Normal file
@@ -0,0 +1,647 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! `CountHash` / `count_hash` is a hash-based approximate distinct count function.
|
||||
//!
|
||||
//! It is a variant of `CountDistinct` that uses a hash function to approximate the
|
||||
//! distinct count.
|
||||
//! It is designed to be more efficient than `CountDistinct` for large datasets,
|
||||
//! but it is not as accurate, as the hash value may be collision.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::fmt::Debug;
|
||||
use std::sync::Arc;
|
||||
|
||||
use ahash::RandomState;
|
||||
use datafusion_common::cast::as_list_array;
|
||||
use datafusion_common::error::Result;
|
||||
use datafusion_common::hash_utils::create_hashes;
|
||||
use datafusion_common::utils::SingleRowListArrayBuilder;
|
||||
use datafusion_common::{internal_err, not_impl_err, ScalarValue};
|
||||
use datafusion_expr::function::{AccumulatorArgs, StateFieldsArgs};
|
||||
use datafusion_expr::utils::{format_state_name, AggregateOrderSensitivity};
|
||||
use datafusion_expr::{
|
||||
Accumulator, AggregateUDF, AggregateUDFImpl, EmitTo, GroupsAccumulator, ReversedUDAF,
|
||||
SetMonotonicity, Signature, TypeSignature, Volatility,
|
||||
};
|
||||
use datafusion_functions_aggregate_common::aggregate::groups_accumulator::nulls::filtered_null_mask;
|
||||
use datatypes::arrow;
|
||||
use datatypes::arrow::array::{
|
||||
Array, ArrayRef, AsArray, BooleanArray, Int64Array, ListArray, UInt64Array,
|
||||
};
|
||||
use datatypes::arrow::buffer::{OffsetBuffer, ScalarBuffer};
|
||||
use datatypes::arrow::datatypes::{DataType, Field};
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
type HashValueType = u64;
|
||||
|
||||
// read from /dev/urandom 4047821dc6144e4b2abddf23ad4171126a52eeecd26eff2191cf673b965a7875
|
||||
const RANDOM_SEED_0: u64 = 0x4047821dc6144e4b;
|
||||
const RANDOM_SEED_1: u64 = 0x2abddf23ad417112;
|
||||
const RANDOM_SEED_2: u64 = 0x6a52eeecd26eff21;
|
||||
const RANDOM_SEED_3: u64 = 0x91cf673b965a7875;
|
||||
|
||||
impl CountHash {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register_aggr(CountHash::udf_impl());
|
||||
}
|
||||
|
||||
pub fn udf_impl() -> AggregateUDF {
|
||||
AggregateUDF::new_from_impl(CountHash {
|
||||
signature: Signature::one_of(
|
||||
vec![TypeSignature::VariadicAny, TypeSignature::Nullary],
|
||||
Volatility::Immutable,
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CountHash {
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl AggregateUDFImpl for CountHash {
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"count_hash"
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn return_type(&self, _arg_types: &[DataType]) -> Result<DataType> {
|
||||
Ok(DataType::Int64)
|
||||
}
|
||||
|
||||
fn is_nullable(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn state_fields(&self, args: StateFieldsArgs) -> Result<Vec<Field>> {
|
||||
Ok(vec![Field::new_list(
|
||||
format_state_name(args.name, "count_hash"),
|
||||
Field::new_list_field(DataType::UInt64, true),
|
||||
// For count_hash accumulator, null list item stands for an
|
||||
// empty value set (i.e., all NULL value so far for that group).
|
||||
true,
|
||||
)])
|
||||
}
|
||||
|
||||
fn accumulator(&self, acc_args: AccumulatorArgs) -> Result<Box<dyn Accumulator>> {
|
||||
if acc_args.exprs.len() > 1 {
|
||||
return not_impl_err!("count_hash with multiple arguments");
|
||||
}
|
||||
|
||||
Ok(Box::new(CountHashAccumulator {
|
||||
values: HashSet::default(),
|
||||
random_state: RandomState::with_seeds(
|
||||
RANDOM_SEED_0,
|
||||
RANDOM_SEED_1,
|
||||
RANDOM_SEED_2,
|
||||
RANDOM_SEED_3,
|
||||
),
|
||||
batch_hashes: vec![],
|
||||
}))
|
||||
}
|
||||
|
||||
fn aliases(&self) -> &[String] {
|
||||
&[]
|
||||
}
|
||||
|
||||
fn groups_accumulator_supported(&self, _args: AccumulatorArgs) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn create_groups_accumulator(
|
||||
&self,
|
||||
args: AccumulatorArgs,
|
||||
) -> Result<Box<dyn GroupsAccumulator>> {
|
||||
if args.exprs.len() > 1 {
|
||||
return not_impl_err!("count_hash with multiple arguments");
|
||||
}
|
||||
|
||||
Ok(Box::new(CountHashGroupAccumulator::new()))
|
||||
}
|
||||
|
||||
fn reverse_expr(&self) -> ReversedUDAF {
|
||||
ReversedUDAF::Identical
|
||||
}
|
||||
|
||||
fn order_sensitivity(&self) -> AggregateOrderSensitivity {
|
||||
AggregateOrderSensitivity::Insensitive
|
||||
}
|
||||
|
||||
fn default_value(&self, _data_type: &DataType) -> Result<ScalarValue> {
|
||||
Ok(ScalarValue::Int64(Some(0)))
|
||||
}
|
||||
|
||||
fn set_monotonicity(&self, _data_type: &DataType) -> SetMonotonicity {
|
||||
SetMonotonicity::Increasing
|
||||
}
|
||||
}
|
||||
|
||||
/// GroupsAccumulator for `count_hash` aggregate function
|
||||
#[derive(Debug)]
|
||||
pub struct CountHashGroupAccumulator {
|
||||
/// One HashSet per group to track distinct values
|
||||
distinct_sets: Vec<HashSet<HashValueType, RandomState>>,
|
||||
random_state: RandomState,
|
||||
batch_hashes: Vec<HashValueType>,
|
||||
}
|
||||
|
||||
impl Default for CountHashGroupAccumulator {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl CountHashGroupAccumulator {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
distinct_sets: vec![],
|
||||
random_state: RandomState::with_seeds(
|
||||
RANDOM_SEED_0,
|
||||
RANDOM_SEED_1,
|
||||
RANDOM_SEED_2,
|
||||
RANDOM_SEED_3,
|
||||
),
|
||||
batch_hashes: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
fn ensure_sets(&mut self, total_num_groups: usize) {
|
||||
if self.distinct_sets.len() < total_num_groups {
|
||||
self.distinct_sets
|
||||
.resize_with(total_num_groups, HashSet::default);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl GroupsAccumulator for CountHashGroupAccumulator {
|
||||
fn update_batch(
|
||||
&mut self,
|
||||
values: &[ArrayRef],
|
||||
group_indices: &[usize],
|
||||
opt_filter: Option<&BooleanArray>,
|
||||
total_num_groups: usize,
|
||||
) -> Result<()> {
|
||||
assert_eq!(values.len(), 1, "count_hash expects a single argument");
|
||||
self.ensure_sets(total_num_groups);
|
||||
|
||||
let array = &values[0];
|
||||
self.batch_hashes.clear();
|
||||
self.batch_hashes.resize(array.len(), 0);
|
||||
let hashes = create_hashes(
|
||||
&[ArrayRef::clone(array)],
|
||||
&self.random_state,
|
||||
&mut self.batch_hashes,
|
||||
)?;
|
||||
|
||||
// Use a pattern similar to accumulate_indices to process rows
|
||||
// that are not null and pass the filter
|
||||
let nulls = array.logical_nulls();
|
||||
|
||||
match (nulls.as_ref(), opt_filter) {
|
||||
(None, None) => {
|
||||
// No nulls, no filter - process all rows
|
||||
for (row_idx, &group_idx) in group_indices.iter().enumerate() {
|
||||
self.distinct_sets[group_idx].insert(hashes[row_idx]);
|
||||
}
|
||||
}
|
||||
(Some(nulls), None) => {
|
||||
// Has nulls, no filter
|
||||
for (row_idx, (&group_idx, is_valid)) in
|
||||
group_indices.iter().zip(nulls.iter()).enumerate()
|
||||
{
|
||||
if is_valid {
|
||||
self.distinct_sets[group_idx].insert(hashes[row_idx]);
|
||||
}
|
||||
}
|
||||
}
|
||||
(None, Some(filter)) => {
|
||||
// No nulls, has filter
|
||||
for (row_idx, (&group_idx, filter_value)) in
|
||||
group_indices.iter().zip(filter.iter()).enumerate()
|
||||
{
|
||||
if let Some(true) = filter_value {
|
||||
self.distinct_sets[group_idx].insert(hashes[row_idx]);
|
||||
}
|
||||
}
|
||||
}
|
||||
(Some(nulls), Some(filter)) => {
|
||||
// Has nulls and filter
|
||||
let iter = filter
|
||||
.iter()
|
||||
.zip(group_indices.iter())
|
||||
.zip(nulls.iter())
|
||||
.enumerate();
|
||||
|
||||
for (row_idx, ((filter_value, &group_idx), is_valid)) in iter {
|
||||
if is_valid && filter_value == Some(true) {
|
||||
self.distinct_sets[group_idx].insert(hashes[row_idx]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn evaluate(&mut self, emit_to: EmitTo) -> Result<ArrayRef> {
|
||||
let distinct_sets: Vec<HashSet<u64, RandomState>> =
|
||||
emit_to.take_needed(&mut self.distinct_sets);
|
||||
|
||||
let counts = distinct_sets
|
||||
.iter()
|
||||
.map(|set| set.len() as i64)
|
||||
.collect::<Vec<_>>();
|
||||
Ok(Arc::new(Int64Array::from(counts)))
|
||||
}
|
||||
|
||||
fn merge_batch(
|
||||
&mut self,
|
||||
values: &[ArrayRef],
|
||||
group_indices: &[usize],
|
||||
_opt_filter: Option<&BooleanArray>,
|
||||
total_num_groups: usize,
|
||||
) -> Result<()> {
|
||||
assert_eq!(
|
||||
values.len(),
|
||||
1,
|
||||
"count_hash merge expects a single state array"
|
||||
);
|
||||
self.ensure_sets(total_num_groups);
|
||||
|
||||
let list_array = as_list_array(&values[0])?;
|
||||
|
||||
// For each group in the incoming batch
|
||||
for (i, &group_idx) in group_indices.iter().enumerate() {
|
||||
if i < list_array.len() {
|
||||
let inner_array = list_array.value(i);
|
||||
let inner_array = inner_array.as_any().downcast_ref::<UInt64Array>().unwrap();
|
||||
// Add each value to our set for this group
|
||||
for j in 0..inner_array.len() {
|
||||
if !inner_array.is_null(j) {
|
||||
self.distinct_sets[group_idx].insert(inner_array.value(j));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn state(&mut self, emit_to: EmitTo) -> Result<Vec<ArrayRef>> {
|
||||
let distinct_sets: Vec<HashSet<u64, RandomState>> =
|
||||
emit_to.take_needed(&mut self.distinct_sets);
|
||||
|
||||
let mut offsets = Vec::with_capacity(distinct_sets.len() + 1);
|
||||
offsets.push(0);
|
||||
let mut curr_len = 0i32;
|
||||
|
||||
let mut value_iter = distinct_sets
|
||||
.into_iter()
|
||||
.flat_map(|set| {
|
||||
// build offset
|
||||
curr_len += set.len() as i32;
|
||||
offsets.push(curr_len);
|
||||
// convert into iter
|
||||
set.into_iter()
|
||||
})
|
||||
.peekable();
|
||||
let data_array: ArrayRef = if value_iter.peek().is_none() {
|
||||
arrow::array::new_empty_array(&DataType::UInt64) as _
|
||||
} else {
|
||||
Arc::new(UInt64Array::from_iter_values(value_iter))
|
||||
};
|
||||
let offset_buffer = OffsetBuffer::new(ScalarBuffer::from(offsets));
|
||||
|
||||
let list_array = ListArray::new(
|
||||
Arc::new(Field::new_list_field(DataType::UInt64, true)),
|
||||
offset_buffer,
|
||||
data_array,
|
||||
None,
|
||||
);
|
||||
|
||||
Ok(vec![Arc::new(list_array) as _])
|
||||
}
|
||||
|
||||
fn convert_to_state(
|
||||
&self,
|
||||
values: &[ArrayRef],
|
||||
opt_filter: Option<&BooleanArray>,
|
||||
) -> Result<Vec<ArrayRef>> {
|
||||
// For a single hash value per row, create a list array with that value
|
||||
assert_eq!(values.len(), 1, "count_hash expects a single argument");
|
||||
let values = ArrayRef::clone(&values[0]);
|
||||
|
||||
let offsets = OffsetBuffer::new(ScalarBuffer::from_iter(0..values.len() as i32 + 1));
|
||||
let nulls = filtered_null_mask(opt_filter, &values);
|
||||
let list_array = ListArray::new(
|
||||
Arc::new(Field::new_list_field(DataType::UInt64, true)),
|
||||
offsets,
|
||||
values,
|
||||
nulls,
|
||||
);
|
||||
|
||||
Ok(vec![Arc::new(list_array)])
|
||||
}
|
||||
|
||||
fn supports_convert_to_state(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
// Base size of the struct
|
||||
let mut size = size_of::<Self>();
|
||||
|
||||
// Size of the vector holding the HashSets
|
||||
size += size_of::<Vec<HashSet<HashValueType, RandomState>>>()
|
||||
+ self.distinct_sets.capacity() * size_of::<HashSet<HashValueType, RandomState>>();
|
||||
|
||||
// Estimate HashSet contents size more efficiently
|
||||
// Instead of iterating through all values which is expensive, use an approximation
|
||||
for set in &self.distinct_sets {
|
||||
// Base size of the HashSet
|
||||
size += set.capacity() * size_of::<HashValueType>();
|
||||
}
|
||||
|
||||
size
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct CountHashAccumulator {
|
||||
values: HashSet<HashValueType, RandomState>,
|
||||
random_state: RandomState,
|
||||
batch_hashes: Vec<HashValueType>,
|
||||
}
|
||||
|
||||
impl CountHashAccumulator {
|
||||
// calculating the size for fixed length values, taking first batch size *
|
||||
// number of batches.
|
||||
fn fixed_size(&self) -> usize {
|
||||
size_of_val(self) + (size_of::<HashValueType>() * self.values.capacity())
|
||||
}
|
||||
}
|
||||
|
||||
impl Accumulator for CountHashAccumulator {
|
||||
/// Returns the distinct values seen so far as (one element) ListArray.
|
||||
fn state(&mut self) -> Result<Vec<ScalarValue>> {
|
||||
let values = self.values.iter().cloned().collect::<Vec<_>>();
|
||||
let arr = Arc::new(UInt64Array::from(values)) as _;
|
||||
let list_scalar = SingleRowListArrayBuilder::new(arr).build_list_scalar();
|
||||
Ok(vec![list_scalar])
|
||||
}
|
||||
|
||||
fn update_batch(&mut self, values: &[ArrayRef]) -> Result<()> {
|
||||
if values.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let arr = &values[0];
|
||||
if arr.data_type() == &DataType::Null {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.batch_hashes.clear();
|
||||
self.batch_hashes.resize(arr.len(), 0);
|
||||
let hashes = create_hashes(
|
||||
&[ArrayRef::clone(arr)],
|
||||
&self.random_state,
|
||||
&mut self.batch_hashes,
|
||||
)?;
|
||||
for hash in hashes.as_slice() {
|
||||
self.values.insert(*hash);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Merges multiple sets of distinct values into the current set.
|
||||
///
|
||||
/// The input to this function is a `ListArray` with **multiple** rows,
|
||||
/// where each row contains the values from a partial aggregate's phase (e.g.
|
||||
/// the result of calling `Self::state` on multiple accumulators).
|
||||
fn merge_batch(&mut self, states: &[ArrayRef]) -> Result<()> {
|
||||
if states.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
assert_eq!(states.len(), 1, "array_agg states must be singleton!");
|
||||
let array = &states[0];
|
||||
let list_array = array.as_list::<i32>();
|
||||
for inner_array in list_array.iter() {
|
||||
let Some(inner_array) = inner_array else {
|
||||
return internal_err!(
|
||||
"Intermediate results of count_hash should always be non null"
|
||||
);
|
||||
};
|
||||
let hash_array = inner_array.as_any().downcast_ref::<UInt64Array>().unwrap();
|
||||
for i in 0..hash_array.len() {
|
||||
self.values.insert(hash_array.value(i));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn evaluate(&mut self) -> Result<ScalarValue> {
|
||||
Ok(ScalarValue::Int64(Some(self.values.len() as i64)))
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
self.fixed_size()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use datatypes::arrow::array::{Array, BooleanArray, Int32Array, Int64Array};
|
||||
|
||||
use super::*;
|
||||
|
||||
fn create_test_accumulator() -> CountHashAccumulator {
|
||||
CountHashAccumulator {
|
||||
values: HashSet::default(),
|
||||
random_state: RandomState::with_seeds(
|
||||
RANDOM_SEED_0,
|
||||
RANDOM_SEED_1,
|
||||
RANDOM_SEED_2,
|
||||
RANDOM_SEED_3,
|
||||
),
|
||||
batch_hashes: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_hash_accumulator() -> Result<()> {
|
||||
let mut acc = create_test_accumulator();
|
||||
|
||||
// Test with some data
|
||||
let array = Arc::new(Int32Array::from(vec![
|
||||
Some(1),
|
||||
Some(2),
|
||||
Some(3),
|
||||
Some(1),
|
||||
Some(2),
|
||||
None,
|
||||
])) as ArrayRef;
|
||||
acc.update_batch(&[array])?;
|
||||
let result = acc.evaluate()?;
|
||||
assert_eq!(result, ScalarValue::Int64(Some(4)));
|
||||
|
||||
// Test with empty data
|
||||
let mut acc = create_test_accumulator();
|
||||
let array = Arc::new(Int32Array::from(vec![] as Vec<Option<i32>>)) as ArrayRef;
|
||||
acc.update_batch(&[array])?;
|
||||
let result = acc.evaluate()?;
|
||||
assert_eq!(result, ScalarValue::Int64(Some(0)));
|
||||
|
||||
// Test with only nulls
|
||||
let mut acc = create_test_accumulator();
|
||||
let array = Arc::new(Int32Array::from(vec![None, None, None])) as ArrayRef;
|
||||
acc.update_batch(&[array])?;
|
||||
let result = acc.evaluate()?;
|
||||
assert_eq!(result, ScalarValue::Int64(Some(1)));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_hash_accumulator_merge() -> Result<()> {
|
||||
// Accumulator 1
|
||||
let mut acc1 = create_test_accumulator();
|
||||
let array1 = Arc::new(Int32Array::from(vec![Some(1), Some(2), Some(3)])) as ArrayRef;
|
||||
acc1.update_batch(&[array1])?;
|
||||
let state1 = acc1.state()?;
|
||||
|
||||
// Accumulator 2
|
||||
let mut acc2 = create_test_accumulator();
|
||||
let array2 = Arc::new(Int32Array::from(vec![Some(3), Some(4), Some(5)])) as ArrayRef;
|
||||
acc2.update_batch(&[array2])?;
|
||||
let state2 = acc2.state()?;
|
||||
|
||||
// Merge state1 and state2 into a new accumulator
|
||||
let mut acc_merged = create_test_accumulator();
|
||||
let state_array1 = state1[0].to_array()?;
|
||||
let state_array2 = state2[0].to_array()?;
|
||||
|
||||
acc_merged.merge_batch(&[state_array1])?;
|
||||
acc_merged.merge_batch(&[state_array2])?;
|
||||
|
||||
let result = acc_merged.evaluate()?;
|
||||
// Distinct values are {1, 2, 3, 4, 5}, so count is 5
|
||||
assert_eq!(result, ScalarValue::Int64(Some(5)));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_test_group_accumulator() -> CountHashGroupAccumulator {
|
||||
CountHashGroupAccumulator::new()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_hash_group_accumulator() -> Result<()> {
|
||||
let mut acc = create_test_group_accumulator();
|
||||
let values = Arc::new(Int32Array::from(vec![1, 2, 1, 3, 2, 4, 5])) as ArrayRef;
|
||||
let group_indices = vec![0, 1, 0, 0, 1, 2, 0];
|
||||
let total_num_groups = 3;
|
||||
|
||||
acc.update_batch(&[values], &group_indices, None, total_num_groups)?;
|
||||
|
||||
let result_array = acc.evaluate(EmitTo::All)?;
|
||||
let result = result_array.as_any().downcast_ref::<Int64Array>().unwrap();
|
||||
|
||||
// Group 0: {1, 3, 5} -> 3
|
||||
// Group 1: {2} -> 1
|
||||
// Group 2: {4} -> 1
|
||||
assert_eq!(result.value(0), 3);
|
||||
assert_eq!(result.value(1), 1);
|
||||
assert_eq!(result.value(2), 1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_hash_group_accumulator_with_filter() -> Result<()> {
|
||||
let mut acc = create_test_group_accumulator();
|
||||
let values = Arc::new(Int32Array::from(vec![1, 2, 3, 4, 5, 6])) as ArrayRef;
|
||||
let group_indices = vec![0, 0, 1, 1, 2, 2];
|
||||
let filter = BooleanArray::from(vec![true, false, true, true, false, true]);
|
||||
let total_num_groups = 3;
|
||||
|
||||
acc.update_batch(&[values], &group_indices, Some(&filter), total_num_groups)?;
|
||||
|
||||
let result_array = acc.evaluate(EmitTo::All)?;
|
||||
let result = result_array.as_any().downcast_ref::<Int64Array>().unwrap();
|
||||
|
||||
// Group 0: {1} (2 is filtered out) -> 1
|
||||
// Group 1: {3, 4} -> 2
|
||||
// Group 2: {6} (5 is filtered out) -> 1
|
||||
assert_eq!(result.value(0), 1);
|
||||
assert_eq!(result.value(1), 2);
|
||||
assert_eq!(result.value(2), 1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_hash_group_accumulator_merge() -> Result<()> {
|
||||
// Accumulator 1
|
||||
let mut acc1 = create_test_group_accumulator();
|
||||
let values1 = Arc::new(Int32Array::from(vec![1, 2, 3, 4])) as ArrayRef;
|
||||
let group_indices1 = vec![0, 0, 1, 1];
|
||||
acc1.update_batch(&[values1], &group_indices1, None, 2)?;
|
||||
// acc1 state: group 0 -> {1, 2}, group 1 -> {3, 4}
|
||||
let state1 = acc1.state(EmitTo::All)?;
|
||||
|
||||
// Accumulator 2
|
||||
let mut acc2 = create_test_group_accumulator();
|
||||
let values2 = Arc::new(Int32Array::from(vec![5, 6, 1, 3])) as ArrayRef;
|
||||
// Merge into different group indices
|
||||
let group_indices2 = vec![2, 2, 0, 1];
|
||||
acc2.update_batch(&[values2], &group_indices2, None, 3)?;
|
||||
// acc2 state: group 0 -> {1}, group 1 -> {3}, group 2 -> {5, 6}
|
||||
|
||||
// Merge state from acc1 into acc2
|
||||
// We will merge acc1's group 0 into acc2's group 0
|
||||
// and acc1's group 1 into acc2's group 2
|
||||
let merge_group_indices = vec![0, 2];
|
||||
acc2.merge_batch(&state1, &merge_group_indices, None, 3)?;
|
||||
|
||||
let result_array = acc2.evaluate(EmitTo::All)?;
|
||||
let result = result_array.as_any().downcast_ref::<Int64Array>().unwrap();
|
||||
|
||||
// Final state of acc2:
|
||||
// Group 0: {1} U {1, 2} -> {1, 2}, count = 2
|
||||
// Group 1: {3}, count = 1
|
||||
// Group 2: {5, 6} U {3, 4} -> {3, 4, 5, 6}, count = 4
|
||||
assert_eq!(result.value(0), 2);
|
||||
assert_eq!(result.value(1), 1);
|
||||
assert_eq!(result.value(2), 4);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_size() {
|
||||
let acc = create_test_group_accumulator();
|
||||
// Just test it doesn't crash and returns a value.
|
||||
assert!(acc.size() > 0);
|
||||
}
|
||||
}
|
||||
@@ -21,6 +21,7 @@ use once_cell::sync::Lazy;
|
||||
|
||||
use crate::admin::AdminFunction;
|
||||
use crate::aggrs::approximate::ApproximateFunction;
|
||||
use crate::aggrs::count_hash::CountHash;
|
||||
use crate::aggrs::vector::VectorFunction as VectorAggrFunction;
|
||||
use crate::function::{AsyncFunctionRef, Function, FunctionRef};
|
||||
use crate::function_factory::ScalarFunctionFactory;
|
||||
@@ -144,6 +145,9 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
// Approximate functions
|
||||
ApproximateFunction::register(&function_registry);
|
||||
|
||||
// CountHash function
|
||||
CountHash::register(&function_registry);
|
||||
|
||||
Arc::new(function_registry)
|
||||
});
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ use table::requests::{
|
||||
};
|
||||
|
||||
use crate::error::{
|
||||
InvalidColumnDefSnafu, InvalidIndexOptionSnafu, InvalidSetFulltextOptionRequestSnafu,
|
||||
InvalidColumnDefSnafu, InvalidSetFulltextOptionRequestSnafu,
|
||||
InvalidSetSkippingIndexOptionRequestSnafu, InvalidSetTableOptionRequestSnafu,
|
||||
InvalidUnsetTableOptionRequestSnafu, MissingAlterIndexOptionSnafu, MissingFieldSnafu,
|
||||
MissingTimestampColumnSnafu, Result, UnknownLocationTypeSnafu,
|
||||
@@ -126,21 +126,18 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
|
||||
api::v1::set_index::Options::Fulltext(f) => AlterKind::SetIndex {
|
||||
options: SetIndexOptions::Fulltext {
|
||||
column_name: f.column_name.clone(),
|
||||
options: FulltextOptions::new(
|
||||
f.enable,
|
||||
as_fulltext_option_analyzer(
|
||||
options: FulltextOptions {
|
||||
enable: f.enable,
|
||||
analyzer: as_fulltext_option_analyzer(
|
||||
Analyzer::try_from(f.analyzer)
|
||||
.context(InvalidSetFulltextOptionRequestSnafu)?,
|
||||
),
|
||||
f.case_sensitive,
|
||||
as_fulltext_option_backend(
|
||||
case_sensitive: f.case_sensitive,
|
||||
backend: as_fulltext_option_backend(
|
||||
PbFulltextBackend::try_from(f.backend)
|
||||
.context(InvalidSetFulltextOptionRequestSnafu)?,
|
||||
),
|
||||
f.granularity as u32,
|
||||
f.false_positive_rate,
|
||||
)
|
||||
.context(InvalidIndexOptionSnafu)?,
|
||||
},
|
||||
},
|
||||
},
|
||||
api::v1::set_index::Options::Inverted(i) => AlterKind::SetIndex {
|
||||
@@ -151,15 +148,13 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
|
||||
api::v1::set_index::Options::Skipping(s) => AlterKind::SetIndex {
|
||||
options: SetIndexOptions::Skipping {
|
||||
column_name: s.column_name,
|
||||
options: SkippingIndexOptions::new(
|
||||
s.granularity as u32,
|
||||
s.false_positive_rate,
|
||||
as_skipping_index_type(
|
||||
options: SkippingIndexOptions {
|
||||
granularity: s.granularity as u32,
|
||||
index_type: as_skipping_index_type(
|
||||
PbSkippingIndexType::try_from(s.skipping_index_type)
|
||||
.context(InvalidSetSkippingIndexOptionRequestSnafu)?,
|
||||
),
|
||||
)
|
||||
.context(InvalidIndexOptionSnafu)?,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -153,14 +153,6 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid index option"))]
|
||||
InvalidIndexOption {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: datatypes::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -188,8 +180,7 @@ impl ErrorExt for Error {
|
||||
| Error::InvalidUnsetTableOptionRequest { .. }
|
||||
| Error::InvalidSetFulltextOptionRequest { .. }
|
||||
| Error::InvalidSetSkippingIndexOptionRequest { .. }
|
||||
| Error::MissingAlterIndexOption { .. }
|
||||
| Error::InvalidIndexOption { .. } => StatusCode::InvalidArguments,
|
||||
| Error::MissingAlterIndexOption { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@ impl MockDatanodeHandler for () {
|
||||
Ok(RegionResponse {
|
||||
affected_rows: 0,
|
||||
extensions: Default::default(),
|
||||
metadata: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -50,7 +50,11 @@ use crate::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::rpc::ddl::trigger::CreateTriggerTask;
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::rpc::ddl::trigger::DropTriggerTask;
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::rpc::ddl::DdlTask::CreateTrigger;
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::rpc::ddl::DdlTask::DropTrigger;
|
||||
use crate::rpc::ddl::DdlTask::{
|
||||
AlterDatabase, AlterLogicalTables, AlterTable, CreateDatabase, CreateFlow, CreateLogicalTables,
|
||||
CreateTable, CreateView, DropDatabase, DropFlow, DropLogicalTables, DropTable, DropView,
|
||||
@@ -91,6 +95,14 @@ pub trait TriggerDdlManager: Send + Sync {
|
||||
query_context: QueryContext,
|
||||
) -> Result<SubmitDdlTaskResponse>;
|
||||
|
||||
async fn drop_trigger(
|
||||
&self,
|
||||
drop_trigger_task: DropTriggerTask,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
ddl_context: DdlContext,
|
||||
query_context: QueryContext,
|
||||
) -> Result<SubmitDdlTaskResponse>;
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any;
|
||||
}
|
||||
|
||||
@@ -648,6 +660,28 @@ async fn handle_drop_flow_task(
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
async fn handle_drop_trigger_task(
|
||||
ddl_manager: &DdlManager,
|
||||
drop_trigger_task: DropTriggerTask,
|
||||
query_context: QueryContext,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let Some(m) = ddl_manager.trigger_ddl_manager.as_ref() else {
|
||||
return UnsupportedSnafu {
|
||||
operation: "drop trigger",
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
|
||||
m.drop_trigger(
|
||||
drop_trigger_task,
|
||||
ddl_manager.procedure_manager.clone(),
|
||||
ddl_manager.ddl_context.clone(),
|
||||
query_context,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn handle_drop_view_task(
|
||||
ddl_manager: &DdlManager,
|
||||
drop_view_task: DropViewTask,
|
||||
@@ -835,6 +869,11 @@ impl ProcedureExecutor for DdlManager {
|
||||
handle_create_flow_task(self, create_flow_task, request.query_context.into())
|
||||
.await
|
||||
}
|
||||
DropFlow(drop_flow_task) => handle_drop_flow_task(self, drop_flow_task).await,
|
||||
CreateView(create_view_task) => {
|
||||
handle_create_view_task(self, create_view_task).await
|
||||
}
|
||||
DropView(drop_view_task) => handle_drop_view_task(self, drop_view_task).await,
|
||||
#[cfg(feature = "enterprise")]
|
||||
CreateTrigger(create_trigger_task) => {
|
||||
handle_create_trigger_task(
|
||||
@@ -844,11 +883,11 @@ impl ProcedureExecutor for DdlManager {
|
||||
)
|
||||
.await
|
||||
}
|
||||
DropFlow(drop_flow_task) => handle_drop_flow_task(self, drop_flow_task).await,
|
||||
CreateView(create_view_task) => {
|
||||
handle_create_view_task(self, create_view_task).await
|
||||
#[cfg(feature = "enterprise")]
|
||||
DropTrigger(drop_trigger_task) => {
|
||||
handle_drop_trigger_task(self, drop_trigger_task, request.query_context.into())
|
||||
.await
|
||||
}
|
||||
DropView(drop_view_task) => handle_drop_view_task(self, drop_view_task).await,
|
||||
}
|
||||
}
|
||||
.trace(span)
|
||||
|
||||
@@ -48,6 +48,11 @@ impl TableRouteKey {
|
||||
pub fn new(table_id: TableId) -> Self {
|
||||
Self { table_id }
|
||||
}
|
||||
|
||||
/// Returns the range prefix of the table route key.
|
||||
pub fn range_prefix() -> Vec<u8> {
|
||||
format!("{}/", TABLE_ROUTE_PREFIX).into_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
|
||||
|
||||
@@ -14,14 +14,13 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use common_telemetry::debug;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::{BatchDeleteRequest, BatchGetRequest};
|
||||
use crate::rpc::store::BatchGetRequest;
|
||||
|
||||
/// [TombstoneManager] provides the ability to:
|
||||
/// - logically delete values
|
||||
@@ -29,9 +28,6 @@ use crate::rpc::store::{BatchDeleteRequest, BatchGetRequest};
|
||||
pub struct TombstoneManager {
|
||||
kv_backend: KvBackendRef,
|
||||
tombstone_prefix: String,
|
||||
// Only used for testing.
|
||||
#[cfg(test)]
|
||||
max_txn_ops: Option<usize>,
|
||||
}
|
||||
|
||||
const TOMBSTONE_PREFIX: &str = "__tombstone/";
|
||||
@@ -39,7 +35,10 @@ const TOMBSTONE_PREFIX: &str = "__tombstone/";
|
||||
impl TombstoneManager {
|
||||
/// Returns [TombstoneManager].
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
Self::new_with_prefix(kv_backend, TOMBSTONE_PREFIX)
|
||||
Self {
|
||||
kv_backend,
|
||||
tombstone_prefix: TOMBSTONE_PREFIX.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns [TombstoneManager] with a custom tombstone prefix.
|
||||
@@ -47,8 +46,6 @@ impl TombstoneManager {
|
||||
Self {
|
||||
kv_backend,
|
||||
tombstone_prefix: prefix.to_string(),
|
||||
#[cfg(test)]
|
||||
max_txn_ops: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,11 +53,6 @@ impl TombstoneManager {
|
||||
[self.tombstone_prefix.as_bytes(), key].concat()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn set_max_txn_ops(&mut self, max_txn_ops: usize) {
|
||||
self.max_txn_ops = Some(max_txn_ops);
|
||||
}
|
||||
|
||||
/// Moves value to `dest_key`.
|
||||
///
|
||||
/// Puts `value` to `dest_key` if the value of `src_key` equals `value`.
|
||||
@@ -91,11 +83,7 @@ impl TombstoneManager {
|
||||
ensure!(
|
||||
keys.len() == dest_keys.len(),
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: format!(
|
||||
"The length of keys({}) does not match the length of dest_keys({}).",
|
||||
keys.len(),
|
||||
dest_keys.len()
|
||||
),
|
||||
err_msg: "The length of keys does not match the length of dest_keys."
|
||||
}
|
||||
);
|
||||
// The key -> dest key mapping.
|
||||
@@ -148,45 +136,19 @@ impl TombstoneManager {
|
||||
.fail()
|
||||
}
|
||||
|
||||
fn max_txn_ops(&self) -> usize {
|
||||
#[cfg(test)]
|
||||
if let Some(max_txn_ops) = self.max_txn_ops {
|
||||
return max_txn_ops;
|
||||
}
|
||||
self.kv_backend.max_txn_ops()
|
||||
}
|
||||
|
||||
/// Moves values to `dest_key`.
|
||||
///
|
||||
/// Returns the number of keys that were moved.
|
||||
async fn move_values(&self, keys: Vec<Vec<u8>>, dest_keys: Vec<Vec<u8>>) -> Result<usize> {
|
||||
ensure!(
|
||||
keys.len() == dest_keys.len(),
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: format!(
|
||||
"The length of keys({}) does not match the length of dest_keys({}).",
|
||||
keys.len(),
|
||||
dest_keys.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
if keys.is_empty() {
|
||||
return Ok(0);
|
||||
}
|
||||
let chunk_size = self.max_txn_ops() / 2;
|
||||
let chunk_size = self.kv_backend.max_txn_ops() / 2;
|
||||
if keys.len() > chunk_size {
|
||||
debug!(
|
||||
"Moving values with multiple chunks, keys len: {}, chunk_size: {}",
|
||||
keys.len(),
|
||||
chunk_size
|
||||
);
|
||||
let mut moved_keys = 0;
|
||||
let keys_chunks = keys.chunks(chunk_size).collect::<Vec<_>>();
|
||||
let dest_keys_chunks = dest_keys.chunks(chunk_size).collect::<Vec<_>>();
|
||||
let dest_keys_chunks = keys.chunks(chunk_size).collect::<Vec<_>>();
|
||||
for (keys, dest_keys) in keys_chunks.into_iter().zip(dest_keys_chunks) {
|
||||
moved_keys += self.move_values_inner(keys, dest_keys).await?;
|
||||
self.move_values_inner(keys, dest_keys).await?;
|
||||
}
|
||||
Ok(moved_keys)
|
||||
|
||||
Ok(keys.len())
|
||||
} else {
|
||||
self.move_values_inner(&keys, &dest_keys).await
|
||||
}
|
||||
@@ -234,18 +196,15 @@ impl TombstoneManager {
|
||||
///
|
||||
/// Returns the number of keys that were deleted.
|
||||
pub async fn delete(&self, keys: Vec<Vec<u8>>) -> Result<usize> {
|
||||
let keys = keys
|
||||
let operations = keys
|
||||
.iter()
|
||||
.map(|key| self.to_tombstone(key))
|
||||
.map(|key| TxnOp::Delete(self.to_tombstone(key)))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let num_keys = keys.len();
|
||||
let _ = self
|
||||
.kv_backend
|
||||
.batch_delete(BatchDeleteRequest::new().with_keys(keys))
|
||||
.await?;
|
||||
|
||||
Ok(num_keys)
|
||||
let txn = Txn::new().and_then(operations);
|
||||
// Always success.
|
||||
let _ = self.kv_backend.txn(txn).await?;
|
||||
Ok(keys.len())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -433,73 +392,16 @@ mod tests {
|
||||
.into_iter()
|
||||
.map(|kv| (kv.key, kv.dest_key))
|
||||
.unzip();
|
||||
let moved_keys = tombstone_manager
|
||||
tombstone_manager
|
||||
.move_values(keys.clone(), dest_keys.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(kvs.len(), moved_keys);
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
// Moves again
|
||||
let moved_keys = tombstone_manager
|
||||
tombstone_manager
|
||||
.move_values(keys.clone(), dest_keys.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(0, moved_keys);
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_move_values_with_max_txn_ops() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let mut tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
||||
tombstone_manager.set_max_txn_ops(4);
|
||||
let kvs = HashMap::from([
|
||||
(b"bar".to_vec(), b"baz".to_vec()),
|
||||
(b"foo".to_vec(), b"hi".to_vec()),
|
||||
(b"baz".to_vec(), b"hello".to_vec()),
|
||||
(b"qux".to_vec(), b"world".to_vec()),
|
||||
(b"quux".to_vec(), b"world".to_vec()),
|
||||
(b"quuux".to_vec(), b"world".to_vec()),
|
||||
(b"quuuux".to_vec(), b"world".to_vec()),
|
||||
(b"quuuuux".to_vec(), b"world".to_vec()),
|
||||
(b"quuuuuux".to_vec(), b"world".to_vec()),
|
||||
]);
|
||||
for (key, value) in &kvs {
|
||||
kv_backend
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(key.clone())
|
||||
.with_value(value.clone()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
let move_values = kvs
|
||||
.iter()
|
||||
.map(|(key, value)| MoveValue {
|
||||
key: key.clone(),
|
||||
dest_key: tombstone_manager.to_tombstone(key),
|
||||
value: value.clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let (keys, dest_keys): (Vec<_>, Vec<_>) = move_values
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|kv| (kv.key, kv.dest_key))
|
||||
.unzip();
|
||||
let moved_keys = tombstone_manager
|
||||
.move_values(keys.clone(), dest_keys.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(kvs.len(), moved_keys);
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
// Moves again
|
||||
let moved_keys = tombstone_manager
|
||||
.move_values(keys.clone(), dest_keys.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(0, moved_keys);
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
}
|
||||
|
||||
@@ -537,19 +439,17 @@ mod tests {
|
||||
.unzip();
|
||||
keys.push(b"non-exists".to_vec());
|
||||
dest_keys.push(b"hi/non-exists".to_vec());
|
||||
let moved_keys = tombstone_manager
|
||||
tombstone_manager
|
||||
.move_values(keys.clone(), dest_keys.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
assert_eq!(3, moved_keys);
|
||||
// Moves again
|
||||
let moved_keys = tombstone_manager
|
||||
tombstone_manager
|
||||
.move_values(keys.clone(), dest_keys.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
assert_eq!(0, moved_keys);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -590,11 +490,10 @@ mod tests {
|
||||
.into_iter()
|
||||
.map(|kv| (kv.key, kv.dest_key))
|
||||
.unzip();
|
||||
let moved_keys = tombstone_manager
|
||||
tombstone_manager
|
||||
.move_values(keys, dest_keys)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(kvs.len(), moved_keys);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -672,24 +571,4 @@ mod tests {
|
||||
.unwrap();
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_move_values_with_different_lengths() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
||||
|
||||
let keys = vec![b"bar".to_vec(), b"foo".to_vec()];
|
||||
let dest_keys = vec![b"bar".to_vec(), b"foo".to_vec(), b"baz".to_vec()];
|
||||
|
||||
let err = tombstone_manager
|
||||
.move_values(keys, dest_keys)
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(err
|
||||
.to_string()
|
||||
.contains("The length of keys(2) does not match the length of dest_keys(3)."),);
|
||||
|
||||
let moved_keys = tombstone_manager.move_values(vec![], vec![]).await.unwrap();
|
||||
assert_eq!(0, moved_keys);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,6 +69,8 @@ pub enum DdlTask {
|
||||
AlterDatabase(AlterDatabaseTask),
|
||||
CreateFlow(CreateFlowTask),
|
||||
DropFlow(DropFlowTask),
|
||||
#[cfg(feature = "enterprise")]
|
||||
DropTrigger(trigger::DropTriggerTask),
|
||||
CreateView(CreateViewTask),
|
||||
DropView(DropViewTask),
|
||||
#[cfg(feature = "enterprise")]
|
||||
@@ -259,6 +261,18 @@ impl TryFrom<Task> for DdlTask {
|
||||
.fail()
|
||||
}
|
||||
}
|
||||
Task::DropTriggerTask(drop_trigger) => {
|
||||
#[cfg(feature = "enterprise")]
|
||||
return Ok(DdlTask::DropTrigger(drop_trigger.try_into()?));
|
||||
#[cfg(not(feature = "enterprise"))]
|
||||
{
|
||||
let _ = drop_trigger;
|
||||
crate::error::UnsupportedSnafu {
|
||||
operation: "drop trigger",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -311,6 +325,8 @@ impl TryFrom<SubmitDdlTaskRequest> for PbDdlTaskRequest {
|
||||
DdlTask::DropView(task) => Task::DropViewTask(task.into()),
|
||||
#[cfg(feature = "enterprise")]
|
||||
DdlTask::CreateTrigger(task) => Task::CreateTriggerTask(task.into()),
|
||||
#[cfg(feature = "enterprise")]
|
||||
DdlTask::DropTrigger(task) => Task::DropTriggerTask(task.into()),
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
use std::collections::HashMap;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::CreateTriggerTask as PbCreateTriggerTask;
|
||||
use api::v1::meta::{
|
||||
CreateTriggerTask as PbCreateTriggerTask, DropTriggerTask as PbDropTriggerTask,
|
||||
};
|
||||
use api::v1::notify_channel::ChannelType as PbChannelType;
|
||||
use api::v1::{
|
||||
CreateTriggerExpr, NotifyChannel as PbNotifyChannel, WebhookOptions as PbWebhookOptions,
|
||||
CreateTriggerExpr as PbCreateTriggerExpr, DropTriggerExpr as PbDropTriggerExpr,
|
||||
NotifyChannel as PbNotifyChannel, WebhookOptions as PbWebhookOptions,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::OptionExt;
|
||||
@@ -56,7 +59,7 @@ impl From<CreateTriggerTask> for PbCreateTriggerTask {
|
||||
.map(PbNotifyChannel::from)
|
||||
.collect();
|
||||
|
||||
let expr = CreateTriggerExpr {
|
||||
let expr = PbCreateTriggerExpr {
|
||||
catalog_name: task.catalog_name,
|
||||
trigger_name: task.trigger_name,
|
||||
create_if_not_exists: task.if_not_exists,
|
||||
@@ -139,17 +142,86 @@ impl TryFrom<PbNotifyChannel> for NotifyChannel {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DropTriggerTask {
|
||||
pub catalog_name: String,
|
||||
pub trigger_name: String,
|
||||
pub drop_if_exists: bool,
|
||||
}
|
||||
|
||||
impl From<DropTriggerTask> for PbDropTriggerTask {
|
||||
fn from(task: DropTriggerTask) -> Self {
|
||||
let expr = PbDropTriggerExpr {
|
||||
catalog_name: task.catalog_name,
|
||||
trigger_name: task.trigger_name,
|
||||
drop_if_exists: task.drop_if_exists,
|
||||
};
|
||||
|
||||
PbDropTriggerTask {
|
||||
drop_trigger: Some(expr),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PbDropTriggerTask> for DropTriggerTask {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(task: PbDropTriggerTask) -> Result<Self> {
|
||||
let expr = task.drop_trigger.context(error::InvalidProtoMsgSnafu {
|
||||
err_msg: "expected drop_trigger",
|
||||
})?;
|
||||
|
||||
Ok(DropTriggerTask {
|
||||
catalog_name: expr.catalog_name,
|
||||
trigger_name: expr.trigger_name,
|
||||
drop_if_exists: expr.drop_if_exists,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl DdlTask {
|
||||
/// Creates a [`DdlTask`] to create a trigger.
|
||||
pub fn new_create_trigger(expr: CreateTriggerTask) -> Self {
|
||||
DdlTask::CreateTrigger(expr)
|
||||
}
|
||||
|
||||
/// Creates a [`DdlTask`] to drop a trigger.
|
||||
pub fn new_drop_trigger(expr: DropTriggerTask) -> Self {
|
||||
DdlTask::DropTrigger(expr)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_convert_drop_trigger_task() {
|
||||
let original = DropTriggerTask {
|
||||
catalog_name: "test_catalog".to_string(),
|
||||
trigger_name: "test_trigger".to_string(),
|
||||
drop_if_exists: true,
|
||||
};
|
||||
|
||||
let pb_task: PbDropTriggerTask = original.clone().into();
|
||||
|
||||
let expr = pb_task.drop_trigger.as_ref().unwrap();
|
||||
assert_eq!(expr.catalog_name, "test_catalog");
|
||||
assert_eq!(expr.trigger_name, "test_trigger");
|
||||
assert!(expr.drop_if_exists);
|
||||
|
||||
let round_tripped = DropTriggerTask::try_from(pb_task).unwrap();
|
||||
|
||||
assert_eq!(original.catalog_name, round_tripped.catalog_name);
|
||||
assert_eq!(original.trigger_name, round_tripped.trigger_name);
|
||||
assert_eq!(original.drop_if_exists, round_tripped.drop_if_exists);
|
||||
|
||||
// Test invalid case where drop_trigger is None
|
||||
let invalid_task = PbDropTriggerTask { drop_trigger: None };
|
||||
let result = DropTriggerTask::try_from(invalid_task);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_create_trigger_task() {
|
||||
let original = CreateTriggerTask {
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
pub mod columnar_value;
|
||||
pub mod error;
|
||||
mod function;
|
||||
pub mod function;
|
||||
pub mod logical_plan;
|
||||
pub mod prelude;
|
||||
pub mod request;
|
||||
|
||||
@@ -178,6 +178,8 @@ pub enum Error {
|
||||
StreamTimeout {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: tokio::time::error::Elapsed,
|
||||
},
|
||||
|
||||
#[snafu(display("RecordBatch slice index overflow: {visit_index} > {size}"))]
|
||||
|
||||
@@ -22,7 +22,7 @@ once_cell.workspace = true
|
||||
opentelemetry = { version = "0.21.0", default-features = false, features = [
|
||||
"trace",
|
||||
] }
|
||||
opentelemetry-otlp = { version = "0.14.0", features = ["tokio"] }
|
||||
opentelemetry-otlp = { version = "0.14.0", features = ["tokio", "http-proto", "reqwest-client"] }
|
||||
opentelemetry-semantic-conventions = "0.13.0"
|
||||
opentelemetry_sdk = { version = "0.21.0", features = ["rt-tokio"] }
|
||||
parking_lot.workspace = true
|
||||
|
||||
@@ -20,7 +20,7 @@ use std::time::Duration;
|
||||
|
||||
use once_cell::sync::{Lazy, OnceCell};
|
||||
use opentelemetry::{global, KeyValue};
|
||||
use opentelemetry_otlp::WithExportConfig;
|
||||
use opentelemetry_otlp::{Protocol, SpanExporterBuilder, WithExportConfig};
|
||||
use opentelemetry_sdk::propagation::TraceContextPropagator;
|
||||
use opentelemetry_sdk::trace::Sampler;
|
||||
use opentelemetry_semantic_conventions::resource;
|
||||
@@ -36,7 +36,11 @@ use tracing_subscriber::{filter, EnvFilter, Registry};
|
||||
|
||||
use crate::tracing_sampler::{create_sampler, TracingSampleOptions};
|
||||
|
||||
pub const DEFAULT_OTLP_ENDPOINT: &str = "http://localhost:4317";
|
||||
/// The default endpoint when use gRPC exporter protocol.
|
||||
pub const DEFAULT_OTLP_GRPC_ENDPOINT: &str = "http://localhost:4317";
|
||||
|
||||
/// The default endpoint when use HTTP exporter protocol.
|
||||
pub const DEFAULT_OTLP_HTTP_ENDPOINT: &str = "http://localhost:4318";
|
||||
|
||||
/// The default logs directory.
|
||||
pub const DEFAULT_LOGGING_DIR: &str = "logs";
|
||||
@@ -67,11 +71,25 @@ pub struct LoggingOptions {
|
||||
/// Whether to enable tracing with OTLP. Default is false.
|
||||
pub enable_otlp_tracing: bool,
|
||||
|
||||
/// The endpoint of OTLP. Default is "http://localhost:4317".
|
||||
/// The endpoint of OTLP. Default is "http://localhost:4318".
|
||||
pub otlp_endpoint: Option<String>,
|
||||
|
||||
/// The tracing sample ratio.
|
||||
pub tracing_sample_ratio: Option<TracingSampleOptions>,
|
||||
|
||||
/// The protocol of OTLP export.
|
||||
pub otlp_export_protocol: Option<OtlpExportProtocol>,
|
||||
}
|
||||
|
||||
/// The protocol of OTLP export.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum OtlpExportProtocol {
|
||||
/// GRPC protocol.
|
||||
Grpc,
|
||||
|
||||
/// HTTP protocol with binary protobuf.
|
||||
Http,
|
||||
}
|
||||
|
||||
/// The options of slow query.
|
||||
@@ -147,6 +165,7 @@ impl Default for LoggingOptions {
|
||||
append_stdout: true,
|
||||
// Rotation hourly, 24 files per day, keeps info log files of 30 days
|
||||
max_log_files: 720,
|
||||
otlp_export_protocol: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -388,22 +407,9 @@ pub fn init_global_logging(
|
||||
KeyValue::new(resource::PROCESS_PID, std::process::id().to_string()),
|
||||
]));
|
||||
|
||||
let exporter = opentelemetry_otlp::new_exporter().tonic().with_endpoint(
|
||||
opts.otlp_endpoint
|
||||
.as_ref()
|
||||
.map(|e| {
|
||||
if e.starts_with("http") {
|
||||
e.to_string()
|
||||
} else {
|
||||
format!("http://{}", e)
|
||||
}
|
||||
})
|
||||
.unwrap_or(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
);
|
||||
|
||||
let tracer = opentelemetry_otlp::new_pipeline()
|
||||
.tracing()
|
||||
.with_exporter(exporter)
|
||||
.with_exporter(build_otlp_exporter(opts))
|
||||
.with_trace_config(trace_config)
|
||||
.install_batch(opentelemetry_sdk::runtime::Tokio)
|
||||
.expect("otlp tracer install failed");
|
||||
@@ -421,6 +427,42 @@ pub fn init_global_logging(
|
||||
guards
|
||||
}
|
||||
|
||||
fn build_otlp_exporter(opts: &LoggingOptions) -> SpanExporterBuilder {
|
||||
let protocol = opts
|
||||
.otlp_export_protocol
|
||||
.clone()
|
||||
.unwrap_or(OtlpExportProtocol::Http);
|
||||
|
||||
let endpoint = opts
|
||||
.otlp_endpoint
|
||||
.as_ref()
|
||||
.map(|e| {
|
||||
if e.starts_with("http") {
|
||||
e.to_string()
|
||||
} else {
|
||||
format!("http://{}", e)
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| match protocol {
|
||||
OtlpExportProtocol::Grpc => DEFAULT_OTLP_GRPC_ENDPOINT.to_string(),
|
||||
OtlpExportProtocol::Http => DEFAULT_OTLP_HTTP_ENDPOINT.to_string(),
|
||||
});
|
||||
|
||||
match protocol {
|
||||
OtlpExportProtocol::Grpc => SpanExporterBuilder::Tonic(
|
||||
opentelemetry_otlp::new_exporter()
|
||||
.tonic()
|
||||
.with_endpoint(endpoint),
|
||||
),
|
||||
OtlpExportProtocol::Http => SpanExporterBuilder::Http(
|
||||
opentelemetry_otlp::new_exporter()
|
||||
.http()
|
||||
.with_endpoint(endpoint)
|
||||
.with_protocol(Protocol::HttpBinary),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_slow_query_logger<S>(
|
||||
opts: &LoggingOptions,
|
||||
slow_query_opts: Option<&SlowQueryOptions>,
|
||||
|
||||
@@ -475,7 +475,7 @@ mod test {
|
||||
async fn region_alive_keeper() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut region_server = mock_region_server();
|
||||
let mut engine_env = TestEnv::with_prefix("region-alive-keeper").await;
|
||||
let mut engine_env = TestEnv::with_prefix("region-alive-keeper");
|
||||
let engine = engine_env.create_engine(MitoConfig::default()).await;
|
||||
let engine = Arc::new(engine);
|
||||
region_server.register_engine(engine.clone());
|
||||
|
||||
@@ -14,10 +14,7 @@
|
||||
|
||||
//! Datanode configurations
|
||||
|
||||
use core::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_base::secrets::{ExposeSecret, SecretString};
|
||||
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
||||
pub use common_procedure::options::ProcedureConfig;
|
||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||
@@ -27,6 +24,7 @@ use file_engine::config::EngineConfig as FileEngineConfig;
|
||||
use meta_client::MetaClientOptions;
|
||||
use metric_engine::config::EngineConfig as MetricEngineConfig;
|
||||
use mito2::config::MitoConfig;
|
||||
pub(crate) use object_store::config::ObjectStoreConfig;
|
||||
use query::options::QueryOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::export_metrics::ExportMetricsOption;
|
||||
@@ -36,53 +34,6 @@ use servers::http::HttpOptions;
|
||||
|
||||
pub const DEFAULT_OBJECT_STORE_CACHE_SIZE: ReadableSize = ReadableSize::gb(5);
|
||||
|
||||
/// Object storage config
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum ObjectStoreConfig {
|
||||
File(FileConfig),
|
||||
S3(S3Config),
|
||||
Oss(OssConfig),
|
||||
Azblob(AzblobConfig),
|
||||
Gcs(GcsConfig),
|
||||
}
|
||||
|
||||
impl ObjectStoreConfig {
|
||||
/// Returns the object storage type name, such as `S3`, `Oss` etc.
|
||||
pub fn provider_name(&self) -> &'static str {
|
||||
match self {
|
||||
Self::File(_) => "File",
|
||||
Self::S3(_) => "S3",
|
||||
Self::Oss(_) => "Oss",
|
||||
Self::Azblob(_) => "Azblob",
|
||||
Self::Gcs(_) => "Gcs",
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true when it's a remote object storage such as AWS s3 etc.
|
||||
pub fn is_object_storage(&self) -> bool {
|
||||
!matches!(self, Self::File(_))
|
||||
}
|
||||
|
||||
/// Returns the object storage configuration name, return the provider name if it's empty.
|
||||
pub fn config_name(&self) -> &str {
|
||||
let name = match self {
|
||||
// file storage doesn't support name
|
||||
Self::File(_) => self.provider_name(),
|
||||
Self::S3(s3) => &s3.name,
|
||||
Self::Oss(oss) => &oss.name,
|
||||
Self::Azblob(az) => &az.name,
|
||||
Self::Gcs(gcs) => &gcs.name,
|
||||
};
|
||||
|
||||
if name.trim().is_empty() {
|
||||
return self.provider_name();
|
||||
}
|
||||
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
/// Storage engine config
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(default)]
|
||||
@@ -112,252 +63,6 @@ impl Default for StorageConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Default, Deserialize, Eq, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct FileConfig {}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct ObjectStorageCacheConfig {
|
||||
/// The local file cache directory
|
||||
pub cache_path: Option<String>,
|
||||
/// The cache capacity in bytes
|
||||
pub cache_capacity: Option<ReadableSize>,
|
||||
}
|
||||
|
||||
/// The http client options to the storage.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct HttpClientConfig {
|
||||
/// The maximum idle connection per host allowed in the pool.
|
||||
pub(crate) pool_max_idle_per_host: u32,
|
||||
|
||||
/// The timeout for only the connect phase of a http client.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub(crate) connect_timeout: Duration,
|
||||
|
||||
/// The total request timeout, applied from when the request starts connecting until the response body has finished.
|
||||
/// Also considered a total deadline.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub(crate) timeout: Duration,
|
||||
|
||||
/// The timeout for idle sockets being kept-alive.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub(crate) pool_idle_timeout: Duration,
|
||||
|
||||
/// Skip SSL certificate validation (insecure)
|
||||
pub skip_ssl_validation: bool,
|
||||
}
|
||||
|
||||
impl Default for HttpClientConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
pool_max_idle_per_host: 1024,
|
||||
connect_timeout: Duration::from_secs(30),
|
||||
timeout: Duration::from_secs(30),
|
||||
pool_idle_timeout: Duration::from_secs(90),
|
||||
skip_ssl_validation: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct S3Config {
|
||||
pub name: String,
|
||||
pub bucket: String,
|
||||
pub root: String,
|
||||
#[serde(skip_serializing)]
|
||||
pub access_key_id: SecretString,
|
||||
#[serde(skip_serializing)]
|
||||
pub secret_access_key: SecretString,
|
||||
pub endpoint: Option<String>,
|
||||
pub region: Option<String>,
|
||||
/// Enable virtual host style so that opendal will send API requests in virtual host style instead of path style.
|
||||
/// By default, opendal will send API to https://s3.us-east-1.amazonaws.com/bucket_name
|
||||
/// Enabled, opendal will send API to https://bucket_name.s3.us-east-1.amazonaws.com
|
||||
pub enable_virtual_host_style: bool,
|
||||
#[serde(flatten)]
|
||||
pub cache: ObjectStorageCacheConfig,
|
||||
pub http_client: HttpClientConfig,
|
||||
}
|
||||
|
||||
impl PartialEq for S3Config {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.name == other.name
|
||||
&& self.bucket == other.bucket
|
||||
&& self.root == other.root
|
||||
&& self.access_key_id.expose_secret() == other.access_key_id.expose_secret()
|
||||
&& self.secret_access_key.expose_secret() == other.secret_access_key.expose_secret()
|
||||
&& self.endpoint == other.endpoint
|
||||
&& self.region == other.region
|
||||
&& self.enable_virtual_host_style == other.enable_virtual_host_style
|
||||
&& self.cache == other.cache
|
||||
&& self.http_client == other.http_client
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct OssConfig {
|
||||
pub name: String,
|
||||
pub bucket: String,
|
||||
pub root: String,
|
||||
#[serde(skip_serializing)]
|
||||
pub access_key_id: SecretString,
|
||||
#[serde(skip_serializing)]
|
||||
pub access_key_secret: SecretString,
|
||||
pub endpoint: String,
|
||||
#[serde(flatten)]
|
||||
pub cache: ObjectStorageCacheConfig,
|
||||
pub http_client: HttpClientConfig,
|
||||
}
|
||||
|
||||
impl PartialEq for OssConfig {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.name == other.name
|
||||
&& self.bucket == other.bucket
|
||||
&& self.root == other.root
|
||||
&& self.access_key_id.expose_secret() == other.access_key_id.expose_secret()
|
||||
&& self.access_key_secret.expose_secret() == other.access_key_secret.expose_secret()
|
||||
&& self.endpoint == other.endpoint
|
||||
&& self.cache == other.cache
|
||||
&& self.http_client == other.http_client
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct AzblobConfig {
|
||||
pub name: String,
|
||||
pub container: String,
|
||||
pub root: String,
|
||||
#[serde(skip_serializing)]
|
||||
pub account_name: SecretString,
|
||||
#[serde(skip_serializing)]
|
||||
pub account_key: SecretString,
|
||||
pub endpoint: String,
|
||||
pub sas_token: Option<String>,
|
||||
#[serde(flatten)]
|
||||
pub cache: ObjectStorageCacheConfig,
|
||||
pub http_client: HttpClientConfig,
|
||||
}
|
||||
|
||||
impl PartialEq for AzblobConfig {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.name == other.name
|
||||
&& self.container == other.container
|
||||
&& self.root == other.root
|
||||
&& self.account_name.expose_secret() == other.account_name.expose_secret()
|
||||
&& self.account_key.expose_secret() == other.account_key.expose_secret()
|
||||
&& self.endpoint == other.endpoint
|
||||
&& self.sas_token == other.sas_token
|
||||
&& self.cache == other.cache
|
||||
&& self.http_client == other.http_client
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct GcsConfig {
|
||||
pub name: String,
|
||||
pub root: String,
|
||||
pub bucket: String,
|
||||
pub scope: String,
|
||||
#[serde(skip_serializing)]
|
||||
pub credential_path: SecretString,
|
||||
#[serde(skip_serializing)]
|
||||
pub credential: SecretString,
|
||||
pub endpoint: String,
|
||||
#[serde(flatten)]
|
||||
pub cache: ObjectStorageCacheConfig,
|
||||
pub http_client: HttpClientConfig,
|
||||
}
|
||||
|
||||
impl PartialEq for GcsConfig {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.name == other.name
|
||||
&& self.root == other.root
|
||||
&& self.bucket == other.bucket
|
||||
&& self.scope == other.scope
|
||||
&& self.credential_path.expose_secret() == other.credential_path.expose_secret()
|
||||
&& self.credential.expose_secret() == other.credential.expose_secret()
|
||||
&& self.endpoint == other.endpoint
|
||||
&& self.cache == other.cache
|
||||
&& self.http_client == other.http_client
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for S3Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
name: String::default(),
|
||||
bucket: String::default(),
|
||||
root: String::default(),
|
||||
access_key_id: SecretString::from(String::default()),
|
||||
secret_access_key: SecretString::from(String::default()),
|
||||
enable_virtual_host_style: false,
|
||||
endpoint: Option::default(),
|
||||
region: Option::default(),
|
||||
cache: ObjectStorageCacheConfig::default(),
|
||||
http_client: HttpClientConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for OssConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
name: String::default(),
|
||||
bucket: String::default(),
|
||||
root: String::default(),
|
||||
access_key_id: SecretString::from(String::default()),
|
||||
access_key_secret: SecretString::from(String::default()),
|
||||
endpoint: String::default(),
|
||||
cache: ObjectStorageCacheConfig::default(),
|
||||
http_client: HttpClientConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AzblobConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
name: String::default(),
|
||||
container: String::default(),
|
||||
root: String::default(),
|
||||
account_name: SecretString::from(String::default()),
|
||||
account_key: SecretString::from(String::default()),
|
||||
endpoint: String::default(),
|
||||
sas_token: Option::default(),
|
||||
cache: ObjectStorageCacheConfig::default(),
|
||||
http_client: HttpClientConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for GcsConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
name: String::default(),
|
||||
root: String::default(),
|
||||
bucket: String::default(),
|
||||
scope: String::default(),
|
||||
credential_path: SecretString::from(String::default()),
|
||||
credential: SecretString::from(String::default()),
|
||||
endpoint: String::default(),
|
||||
cache: ObjectStorageCacheConfig::default(),
|
||||
http_client: HttpClientConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ObjectStoreConfig {
|
||||
fn default() -> Self {
|
||||
ObjectStoreConfig::File(FileConfig {})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct DatanodeOptions {
|
||||
@@ -467,37 +172,6 @@ mod tests {
|
||||
let _parsed: DatanodeOptions = toml::from_str(&toml_string).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_name() {
|
||||
let object_store_config = ObjectStoreConfig::default();
|
||||
assert_eq!("File", object_store_config.config_name());
|
||||
|
||||
let s3_config = ObjectStoreConfig::S3(S3Config::default());
|
||||
assert_eq!("S3", s3_config.config_name());
|
||||
assert_eq!("S3", s3_config.provider_name());
|
||||
|
||||
let s3_config = ObjectStoreConfig::S3(S3Config {
|
||||
name: "test".to_string(),
|
||||
..Default::default()
|
||||
});
|
||||
assert_eq!("test", s3_config.config_name());
|
||||
assert_eq!("S3", s3_config.provider_name());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_object_storage() {
|
||||
let store = ObjectStoreConfig::default();
|
||||
assert!(!store.is_object_storage());
|
||||
let s3_config = ObjectStoreConfig::S3(S3Config::default());
|
||||
assert!(s3_config.is_object_storage());
|
||||
let oss_config = ObjectStoreConfig::Oss(OssConfig::default());
|
||||
assert!(oss_config.is_object_storage());
|
||||
let gcs_config = ObjectStoreConfig::Gcs(GcsConfig::default());
|
||||
assert!(gcs_config.is_object_storage());
|
||||
let azblob_config = ObjectStoreConfig::Azblob(AzblobConfig::default());
|
||||
assert!(azblob_config.is_object_storage());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_secstr() {
|
||||
let toml_str = r#"
|
||||
|
||||
@@ -142,14 +142,6 @@ pub enum Error {
|
||||
source: Box<log_store::error::Error>,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to init backend"))]
|
||||
InitBackend {
|
||||
#[snafu(source)]
|
||||
error: object_store::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid SQL, error: {}", msg))]
|
||||
InvalidSql { msg: String },
|
||||
|
||||
@@ -387,6 +379,29 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serialize json"))]
|
||||
SerializeJson {
|
||||
#[snafu(source)]
|
||||
error: serde_json::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed object store operation"))]
|
||||
ObjectStore {
|
||||
source: object_store::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build cache store"))]
|
||||
BuildCacheStore {
|
||||
#[snafu(source)]
|
||||
error: object_store::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -439,8 +454,6 @@ impl ErrorExt for Error {
|
||||
|
||||
StartServer { source, .. } | ShutdownServer { source, .. } => source.status_code(),
|
||||
|
||||
InitBackend { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
OpenLogStore { source, .. } => source.status_code(),
|
||||
MetaClientInit { source, .. } => source.status_code(),
|
||||
UnsupportedOutput { .. } => StatusCode::Unsupported,
|
||||
@@ -457,6 +470,10 @@ impl ErrorExt for Error {
|
||||
StatusCode::RegionBusy
|
||||
}
|
||||
MissingCache { .. } => StatusCode::Internal,
|
||||
SerializeJson { .. } => StatusCode::Internal,
|
||||
|
||||
ObjectStore { source, .. } => source.status_code(),
|
||||
BuildCacheStore { .. } => StatusCode::StorageUnavailable,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -278,7 +278,7 @@ mod tests {
|
||||
let mut region_server = mock_region_server();
|
||||
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
|
||||
|
||||
let mut engine_env = TestEnv::with_prefix("close-region").await;
|
||||
let mut engine_env = TestEnv::with_prefix("close-region");
|
||||
let engine = engine_env.create_engine(MitoConfig::default()).await;
|
||||
region_server.register_engine(Arc::new(engine));
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
@@ -326,7 +326,7 @@ mod tests {
|
||||
let mut region_server = mock_region_server();
|
||||
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
|
||||
|
||||
let mut engine_env = TestEnv::with_prefix("open-region").await;
|
||||
let mut engine_env = TestEnv::with_prefix("open-region");
|
||||
let engine = engine_env.create_engine(MitoConfig::default()).await;
|
||||
region_server.register_engine(Arc::new(engine));
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
@@ -374,7 +374,7 @@ mod tests {
|
||||
let mut region_server = mock_region_server();
|
||||
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
|
||||
|
||||
let mut engine_env = TestEnv::with_prefix("open-not-exists-region").await;
|
||||
let mut engine_env = TestEnv::with_prefix("open-not-exists-region");
|
||||
let engine = engine_env.create_engine(MitoConfig::default()).await;
|
||||
region_server.register_engine(Arc::new(engine));
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
@@ -406,7 +406,7 @@ mod tests {
|
||||
let mut region_server = mock_region_server();
|
||||
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
|
||||
|
||||
let mut engine_env = TestEnv::with_prefix("downgrade-region").await;
|
||||
let mut engine_env = TestEnv::with_prefix("downgrade-region");
|
||||
let engine = engine_env.create_engine(MitoConfig::default()).await;
|
||||
region_server.register_engine(Arc::new(engine));
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
|
||||
@@ -20,12 +20,14 @@ use std::time::Duration;
|
||||
|
||||
use api::region::RegionResponse;
|
||||
use api::v1::region::sync_request::ManifestInfo;
|
||||
use api::v1::region::{region_request, RegionResponse as RegionResponseV1, SyncRequest};
|
||||
use api::v1::region::{
|
||||
region_request, ListMetadataRequest, RegionResponse as RegionResponseV1, SyncRequest,
|
||||
};
|
||||
use api::v1::{ResponseHeader, Status};
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use async_trait::async_trait;
|
||||
use bytes::Bytes;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_query::request::QueryRequest;
|
||||
use common_query::OutputData;
|
||||
@@ -47,6 +49,7 @@ pub use query::dummy_catalog::{
|
||||
DummyCatalogList, DummyTableProviderFactory, TableProviderFactoryRef,
|
||||
};
|
||||
use query::QueryEngineRef;
|
||||
use serde_json;
|
||||
use servers::error::{self as servers_error, ExecuteGrpcRequestSnafu, Result as ServerResult};
|
||||
use servers::grpc::flight::{FlightCraft, FlightRecordBatchStream, TonicStream};
|
||||
use servers::grpc::region_server::RegionServerHandler;
|
||||
@@ -71,10 +74,10 @@ use tonic::{Request, Response, Result as TonicResult};
|
||||
use crate::error::{
|
||||
self, BuildRegionRequestsSnafu, ConcurrentQueryLimiterClosedSnafu,
|
||||
ConcurrentQueryLimiterTimeoutSnafu, DataFusionSnafu, DecodeLogicalPlanSnafu,
|
||||
ExecuteLogicalPlanSnafu, FindLogicalRegionsSnafu, HandleBatchDdlRequestSnafu,
|
||||
HandleBatchOpenRequestSnafu, HandleRegionRequestSnafu, NewPlanDecoderSnafu,
|
||||
RegionEngineNotFoundSnafu, RegionNotFoundSnafu, RegionNotReadySnafu, Result,
|
||||
StopRegionEngineSnafu, UnexpectedSnafu, UnsupportedOutputSnafu,
|
||||
ExecuteLogicalPlanSnafu, FindLogicalRegionsSnafu, GetRegionMetadataSnafu,
|
||||
HandleBatchDdlRequestSnafu, HandleBatchOpenRequestSnafu, HandleRegionRequestSnafu,
|
||||
NewPlanDecoderSnafu, RegionEngineNotFoundSnafu, RegionNotFoundSnafu, RegionNotReadySnafu,
|
||||
Result, SerializeJsonSnafu, StopRegionEngineSnafu, UnexpectedSnafu, UnsupportedOutputSnafu,
|
||||
};
|
||||
use crate::event_listener::RegionServerEventListenerRef;
|
||||
|
||||
@@ -138,12 +141,12 @@ impl RegionServer {
|
||||
|
||||
/// Finds the region's engine by its id. If the region is not ready, returns `None`.
|
||||
pub fn find_engine(&self, region_id: RegionId) -> Result<Option<RegionEngineRef>> {
|
||||
self.inner
|
||||
.get_engine(region_id, &RegionChange::None)
|
||||
.map(|x| match x {
|
||||
CurrentEngine::Engine(engine) => Some(engine),
|
||||
CurrentEngine::EarlyReturn(_) => None,
|
||||
})
|
||||
match self.inner.get_engine(region_id, &RegionChange::None) {
|
||||
Ok(CurrentEngine::Engine(engine)) => Ok(Some(engine)),
|
||||
Ok(CurrentEngine::EarlyReturn(_)) => Ok(None),
|
||||
Err(error::Error::RegionNotFound { .. }) => Ok(None),
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
@@ -412,6 +415,7 @@ impl RegionServer {
|
||||
Ok(RegionResponse {
|
||||
affected_rows,
|
||||
extensions,
|
||||
metadata: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -441,6 +445,7 @@ impl RegionServer {
|
||||
Ok(RegionResponse {
|
||||
affected_rows,
|
||||
extensions,
|
||||
metadata: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -473,6 +478,48 @@ impl RegionServer {
|
||||
.map(|_| RegionResponse::new(AffectedRows::default()))
|
||||
}
|
||||
|
||||
/// Handles the ListMetadata request and retrieves metadata for specified regions.
|
||||
///
|
||||
/// Returns the results as a JSON-serialized list in the [RegionResponse]. It serializes
|
||||
/// non-existing regions as `null`.
|
||||
#[tracing::instrument(skip_all)]
|
||||
async fn handle_list_metadata_request(
|
||||
&self,
|
||||
request: &ListMetadataRequest,
|
||||
) -> Result<RegionResponse> {
|
||||
let mut region_metadatas = Vec::new();
|
||||
// Collect metadata for each region
|
||||
for region_id in &request.region_ids {
|
||||
let region_id = RegionId::from_u64(*region_id);
|
||||
// Get the engine.
|
||||
let Some(engine) = self.find_engine(region_id)? else {
|
||||
region_metadatas.push(None);
|
||||
continue;
|
||||
};
|
||||
|
||||
match engine.get_metadata(region_id).await {
|
||||
Ok(metadata) => region_metadatas.push(Some(metadata)),
|
||||
Err(err) => {
|
||||
if err.status_code() == StatusCode::RegionNotFound {
|
||||
region_metadatas.push(None);
|
||||
} else {
|
||||
Err(err).with_context(|_| GetRegionMetadataSnafu {
|
||||
engine: engine.name(),
|
||||
region_id,
|
||||
})?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize metadata to JSON
|
||||
let json_result = serde_json::to_vec(®ion_metadatas).context(SerializeJsonSnafu)?;
|
||||
|
||||
let response = RegionResponse::from_metadata(json_result);
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
/// Sync region manifest and registers new opened logical regions.
|
||||
pub async fn sync_region(
|
||||
&self,
|
||||
@@ -504,6 +551,10 @@ impl RegionServerHandler for RegionServer {
|
||||
region_request::Body::Sync(sync_request) => {
|
||||
self.handle_sync_region_request(sync_request).await
|
||||
}
|
||||
region_request::Body::ListMetadata(list_metadata_request) => {
|
||||
self.handle_list_metadata_request(list_metadata_request)
|
||||
.await
|
||||
}
|
||||
_ => self.handle_requests_in_serial(request).await,
|
||||
}
|
||||
.map_err(BoxedError::new)
|
||||
@@ -518,6 +569,7 @@ impl RegionServerHandler for RegionServer {
|
||||
}),
|
||||
affected_rows: response.affected_rows as _,
|
||||
extensions: response.extensions,
|
||||
metadata: response.metadata,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -897,6 +949,7 @@ impl RegionServerInner {
|
||||
Ok(RegionResponse {
|
||||
affected_rows: result.affected_rows,
|
||||
extensions: result.extensions,
|
||||
metadata: Vec::new(),
|
||||
})
|
||||
}
|
||||
Err(err) => {
|
||||
@@ -967,6 +1020,7 @@ impl RegionServerInner {
|
||||
Ok(RegionResponse {
|
||||
affected_rows: result.affected_rows,
|
||||
extensions: result.extensions,
|
||||
metadata: Vec::new(),
|
||||
})
|
||||
}
|
||||
Err(err) => {
|
||||
@@ -1242,8 +1296,11 @@ mod tests {
|
||||
|
||||
use std::assert_matches::assert_matches;
|
||||
|
||||
use api::v1::SemanticType;
|
||||
use common_error::ext::ErrorExt;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use mito2::test_util::CreateRequestBuilder;
|
||||
use store_api::metadata::{ColumnMetadata, RegionMetadata, RegionMetadataBuilder};
|
||||
use store_api::region_engine::RegionEngine;
|
||||
use store_api::region_request::{RegionDropRequest, RegionOpenRequest, RegionTruncateRequest};
|
||||
use store_api::storage::RegionId;
|
||||
@@ -1605,4 +1662,175 @@ mod tests {
|
||||
let forth_query = p.acquire().await;
|
||||
assert!(forth_query.is_ok());
|
||||
}
|
||||
|
||||
fn mock_region_metadata(region_id: RegionId) -> RegionMetadata {
|
||||
let mut metadata_builder = RegionMetadataBuilder::new(region_id);
|
||||
metadata_builder.push_column_metadata(ColumnMetadata {
|
||||
column_schema: datatypes::schema::ColumnSchema::new(
|
||||
"timestamp",
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Timestamp,
|
||||
column_id: 0,
|
||||
});
|
||||
metadata_builder.push_column_metadata(ColumnMetadata {
|
||||
column_schema: datatypes::schema::ColumnSchema::new(
|
||||
"file",
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: 1,
|
||||
});
|
||||
metadata_builder.push_column_metadata(ColumnMetadata {
|
||||
column_schema: datatypes::schema::ColumnSchema::new(
|
||||
"message",
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
),
|
||||
semantic_type: SemanticType::Field,
|
||||
column_id: 2,
|
||||
});
|
||||
metadata_builder.primary_key(vec![1]);
|
||||
metadata_builder.build().unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_handle_list_metadata_request() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut mock_region_server = mock_region_server();
|
||||
let region_id_1 = RegionId::new(1, 0);
|
||||
let region_id_2 = RegionId::new(2, 0);
|
||||
|
||||
let metadata_1 = mock_region_metadata(region_id_1);
|
||||
let metadata_2 = mock_region_metadata(region_id_2);
|
||||
let metadatas = vec![Some(metadata_1.clone()), Some(metadata_2.clone())];
|
||||
|
||||
let metadata_1 = Arc::new(metadata_1);
|
||||
let metadata_2 = Arc::new(metadata_2);
|
||||
let (engine, _) = MockRegionEngine::with_metadata_mock_fn(
|
||||
MITO_ENGINE_NAME,
|
||||
Box::new(move |region_id| {
|
||||
if region_id == region_id_1 {
|
||||
Ok(metadata_1.clone())
|
||||
} else if region_id == region_id_2 {
|
||||
Ok(metadata_2.clone())
|
||||
} else {
|
||||
error::RegionNotFoundSnafu { region_id }.fail()
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
mock_region_server.register_engine(engine.clone());
|
||||
mock_region_server
|
||||
.inner
|
||||
.region_map
|
||||
.insert(region_id_1, RegionEngineWithStatus::Ready(engine.clone()));
|
||||
mock_region_server
|
||||
.inner
|
||||
.region_map
|
||||
.insert(region_id_2, RegionEngineWithStatus::Ready(engine.clone()));
|
||||
|
||||
// All regions exist.
|
||||
let list_metadata_request = ListMetadataRequest {
|
||||
region_ids: vec![region_id_1.as_u64(), region_id_2.as_u64()],
|
||||
};
|
||||
let response = mock_region_server
|
||||
.handle_list_metadata_request(&list_metadata_request)
|
||||
.await
|
||||
.unwrap();
|
||||
let decoded_metadata: Vec<Option<RegionMetadata>> =
|
||||
serde_json::from_slice(&response.metadata).unwrap();
|
||||
assert_eq!(metadatas, decoded_metadata);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_handle_list_metadata_not_found() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut mock_region_server = mock_region_server();
|
||||
let region_id_1 = RegionId::new(1, 0);
|
||||
let region_id_2 = RegionId::new(2, 0);
|
||||
|
||||
let metadata_1 = mock_region_metadata(region_id_1);
|
||||
let metadatas = vec![Some(metadata_1.clone()), None];
|
||||
|
||||
let metadata_1 = Arc::new(metadata_1);
|
||||
let (engine, _) = MockRegionEngine::with_metadata_mock_fn(
|
||||
MITO_ENGINE_NAME,
|
||||
Box::new(move |region_id| {
|
||||
if region_id == region_id_1 {
|
||||
Ok(metadata_1.clone())
|
||||
} else {
|
||||
error::RegionNotFoundSnafu { region_id }.fail()
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
mock_region_server.register_engine(engine.clone());
|
||||
mock_region_server
|
||||
.inner
|
||||
.region_map
|
||||
.insert(region_id_1, RegionEngineWithStatus::Ready(engine.clone()));
|
||||
|
||||
// Not in region map.
|
||||
let list_metadata_request = ListMetadataRequest {
|
||||
region_ids: vec![region_id_1.as_u64(), region_id_2.as_u64()],
|
||||
};
|
||||
let response = mock_region_server
|
||||
.handle_list_metadata_request(&list_metadata_request)
|
||||
.await
|
||||
.unwrap();
|
||||
let decoded_metadata: Vec<Option<RegionMetadata>> =
|
||||
serde_json::from_slice(&response.metadata).unwrap();
|
||||
assert_eq!(metadatas, decoded_metadata);
|
||||
|
||||
// Not in region engine.
|
||||
mock_region_server
|
||||
.inner
|
||||
.region_map
|
||||
.insert(region_id_2, RegionEngineWithStatus::Ready(engine.clone()));
|
||||
let response = mock_region_server
|
||||
.handle_list_metadata_request(&list_metadata_request)
|
||||
.await
|
||||
.unwrap();
|
||||
let decoded_metadata: Vec<Option<RegionMetadata>> =
|
||||
serde_json::from_slice(&response.metadata).unwrap();
|
||||
assert_eq!(metadatas, decoded_metadata);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_handle_list_metadata_failed() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut mock_region_server = mock_region_server();
|
||||
let region_id_1 = RegionId::new(1, 0);
|
||||
|
||||
let (engine, _) = MockRegionEngine::with_metadata_mock_fn(
|
||||
MITO_ENGINE_NAME,
|
||||
Box::new(move |region_id| {
|
||||
error::UnexpectedSnafu {
|
||||
violated: format!("Failed to get region {region_id}"),
|
||||
}
|
||||
.fail()
|
||||
}),
|
||||
);
|
||||
|
||||
mock_region_server.register_engine(engine.clone());
|
||||
mock_region_server
|
||||
.inner
|
||||
.region_map
|
||||
.insert(region_id_1, RegionEngineWithStatus::Ready(engine.clone()));
|
||||
|
||||
// Failed to get.
|
||||
let list_metadata_request = ListMetadataRequest {
|
||||
region_ids: vec![region_id_1.as_u64()],
|
||||
};
|
||||
mock_region_server
|
||||
.handle_list_metadata_request(&list_metadata_request)
|
||||
.await
|
||||
.unwrap_err();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,45 +14,22 @@
|
||||
|
||||
//! object storage utilities
|
||||
|
||||
mod azblob;
|
||||
pub mod fs;
|
||||
mod gcs;
|
||||
mod oss;
|
||||
mod s3;
|
||||
use std::path;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_telemetry::{info, warn};
|
||||
use mito2::access_layer::{ATOMIC_WRITE_DIR, OLD_ATOMIC_WRITE_DIR};
|
||||
use object_store::factory::new_raw_object_store;
|
||||
use object_store::layers::{LruCacheLayer, RetryInterceptor, RetryLayer};
|
||||
use object_store::services::Fs;
|
||||
use object_store::util::{join_dir, normalize_dir, with_instrument_layers};
|
||||
use object_store::{Access, Error, HttpClient, ObjectStore, ObjectStoreBuilder};
|
||||
use object_store::util::{clean_temp_dir, join_dir, with_instrument_layers};
|
||||
use object_store::{
|
||||
Access, Error, ObjectStore, ObjectStoreBuilder, ATOMIC_WRITE_DIR, OLD_ATOMIC_WRITE_DIR,
|
||||
};
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::config::{HttpClientConfig, ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
|
||||
use crate::error::{self, BuildHttpClientSnafu, CreateDirSnafu, Result};
|
||||
|
||||
pub(crate) async fn new_raw_object_store(
|
||||
store: &ObjectStoreConfig,
|
||||
data_home: &str,
|
||||
) -> Result<ObjectStore> {
|
||||
let data_home = normalize_dir(data_home);
|
||||
let object_store = match store {
|
||||
ObjectStoreConfig::File(file_config) => {
|
||||
fs::new_fs_object_store(&data_home, file_config).await
|
||||
}
|
||||
ObjectStoreConfig::S3(s3_config) => s3::new_s3_object_store(s3_config).await,
|
||||
ObjectStoreConfig::Oss(oss_config) => oss::new_oss_object_store(oss_config).await,
|
||||
ObjectStoreConfig::Azblob(azblob_config) => {
|
||||
azblob::new_azblob_object_store(azblob_config).await
|
||||
}
|
||||
ObjectStoreConfig::Gcs(gcs_config) => gcs::new_gcs_object_store(gcs_config).await,
|
||||
}?;
|
||||
Ok(object_store)
|
||||
}
|
||||
use crate::config::{ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
|
||||
use crate::error::{self, CreateDirSnafu, Result};
|
||||
|
||||
fn with_retry_layers(object_store: ObjectStore) -> ObjectStore {
|
||||
object_store.layer(
|
||||
@@ -66,7 +43,9 @@ pub(crate) async fn new_object_store_without_cache(
|
||||
store: &ObjectStoreConfig,
|
||||
data_home: &str,
|
||||
) -> Result<ObjectStore> {
|
||||
let object_store = new_raw_object_store(store, data_home).await?;
|
||||
let object_store = new_raw_object_store(store, data_home)
|
||||
.await
|
||||
.context(error::ObjectStoreSnafu)?;
|
||||
// Enable retry layer and cache layer for non-fs object storages
|
||||
let object_store = if store.is_object_storage() {
|
||||
// Adds retry layer
|
||||
@@ -83,7 +62,9 @@ pub(crate) async fn new_object_store(
|
||||
store: ObjectStoreConfig,
|
||||
data_home: &str,
|
||||
) -> Result<ObjectStore> {
|
||||
let object_store = new_raw_object_store(&store, data_home).await?;
|
||||
let object_store = new_raw_object_store(&store, data_home)
|
||||
.await
|
||||
.context(error::ObjectStoreSnafu)?;
|
||||
// Enable retry layer and cache layer for non-fs object storages
|
||||
let object_store = if store.is_object_storage() {
|
||||
let object_store = if let Some(cache_layer) = build_cache_layer(&store, data_home).await? {
|
||||
@@ -170,20 +151,20 @@ async fn build_cache_layer(
|
||||
&& !path.trim().is_empty()
|
||||
{
|
||||
let atomic_temp_dir = join_dir(path, ATOMIC_WRITE_DIR);
|
||||
clean_temp_dir(&atomic_temp_dir)?;
|
||||
clean_temp_dir(&atomic_temp_dir).context(error::ObjectStoreSnafu)?;
|
||||
|
||||
// Compatible code. Remove this after a major release.
|
||||
let old_atomic_temp_dir = join_dir(path, OLD_ATOMIC_WRITE_DIR);
|
||||
clean_temp_dir(&old_atomic_temp_dir)?;
|
||||
clean_temp_dir(&old_atomic_temp_dir).context(error::ObjectStoreSnafu)?;
|
||||
|
||||
let cache_store = Fs::default()
|
||||
.root(path)
|
||||
.atomic_write_dir(&atomic_temp_dir)
|
||||
.build()
|
||||
.context(error::InitBackendSnafu)?;
|
||||
.context(error::BuildCacheStoreSnafu)?;
|
||||
|
||||
let cache_layer = LruCacheLayer::new(Arc::new(cache_store), cache_capacity.0 as usize)
|
||||
.context(error::InitBackendSnafu)?;
|
||||
.context(error::BuildCacheStoreSnafu)?;
|
||||
cache_layer.recover_cache(false).await;
|
||||
info!(
|
||||
"Enabled local object storage cache, path: {}, capacity: {}.",
|
||||
@@ -196,31 +177,6 @@ async fn build_cache_layer(
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn clean_temp_dir(dir: &str) -> Result<()> {
|
||||
if path::Path::new(&dir).exists() {
|
||||
info!("Begin to clean temp storage directory: {}", dir);
|
||||
std::fs::remove_dir_all(dir).context(error::RemoveDirSnafu { dir })?;
|
||||
info!("Cleaned temp storage directory: {}", dir);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn build_http_client(config: &HttpClientConfig) -> Result<HttpClient> {
|
||||
if config.skip_ssl_validation {
|
||||
common_telemetry::warn!("Skipping SSL validation for object storage HTTP client. Please ensure the environment is trusted.");
|
||||
}
|
||||
|
||||
let client = reqwest::ClientBuilder::new()
|
||||
.pool_max_idle_per_host(config.pool_max_idle_per_host as usize)
|
||||
.connect_timeout(config.connect_timeout)
|
||||
.pool_idle_timeout(config.pool_idle_timeout)
|
||||
.timeout(config.timeout)
|
||||
.danger_accept_invalid_certs(config.skip_ssl_validation)
|
||||
.build()
|
||||
.context(BuildHttpClientSnafu)?;
|
||||
Ok(HttpClient::with(client))
|
||||
}
|
||||
struct PrintDetailedError;
|
||||
|
||||
// PrintDetailedError is a retry interceptor that prints error in Debug format in retrying.
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_base::secrets::ExposeSecret;
|
||||
use common_telemetry::info;
|
||||
use object_store::services::Azblob;
|
||||
use object_store::{util, ObjectStore};
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::config::AzblobConfig;
|
||||
use crate::error::{self, Result};
|
||||
use crate::store::build_http_client;
|
||||
|
||||
pub(crate) async fn new_azblob_object_store(azblob_config: &AzblobConfig) -> Result<ObjectStore> {
|
||||
let root = util::normalize_dir(&azblob_config.root);
|
||||
|
||||
info!(
|
||||
"The azure storage container is: {}, root is: {}",
|
||||
azblob_config.container, &root
|
||||
);
|
||||
|
||||
let client = build_http_client(&azblob_config.http_client)?;
|
||||
|
||||
let mut builder = Azblob::default()
|
||||
.root(&root)
|
||||
.container(&azblob_config.container)
|
||||
.endpoint(&azblob_config.endpoint)
|
||||
.account_name(azblob_config.account_name.expose_secret())
|
||||
.account_key(azblob_config.account_key.expose_secret())
|
||||
.http_client(client);
|
||||
|
||||
if let Some(token) = &azblob_config.sas_token {
|
||||
builder = builder.sas_token(token);
|
||||
};
|
||||
|
||||
Ok(ObjectStore::new(builder)
|
||||
.context(error::InitBackendSnafu)?
|
||||
.finish())
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{fs, path};
|
||||
|
||||
use common_telemetry::info;
|
||||
use mito2::access_layer::{ATOMIC_WRITE_DIR, OLD_ATOMIC_WRITE_DIR};
|
||||
use object_store::services::Fs;
|
||||
use object_store::util::join_dir;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::config::FileConfig;
|
||||
use crate::error::{self, Result};
|
||||
use crate::store;
|
||||
|
||||
/// A helper function to create a file system object store.
|
||||
pub async fn new_fs_object_store(
|
||||
data_home: &str,
|
||||
_file_config: &FileConfig,
|
||||
) -> Result<ObjectStore> {
|
||||
fs::create_dir_all(path::Path::new(&data_home))
|
||||
.context(error::CreateDirSnafu { dir: data_home })?;
|
||||
info!("The file storage home is: {}", data_home);
|
||||
|
||||
let atomic_write_dir = join_dir(data_home, ATOMIC_WRITE_DIR);
|
||||
store::clean_temp_dir(&atomic_write_dir)?;
|
||||
|
||||
// Compatible code. Remove this after a major release.
|
||||
let old_atomic_temp_dir = join_dir(data_home, OLD_ATOMIC_WRITE_DIR);
|
||||
store::clean_temp_dir(&old_atomic_temp_dir)?;
|
||||
|
||||
let builder = Fs::default()
|
||||
.root(data_home)
|
||||
.atomic_write_dir(&atomic_write_dir);
|
||||
|
||||
let object_store = ObjectStore::new(builder)
|
||||
.context(error::InitBackendSnafu)?
|
||||
.finish();
|
||||
|
||||
Ok(object_store)
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_base::secrets::ExposeSecret;
|
||||
use common_telemetry::info;
|
||||
use object_store::services::Gcs;
|
||||
use object_store::{util, ObjectStore};
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::config::GcsConfig;
|
||||
use crate::error::{self, Result};
|
||||
use crate::store::build_http_client;
|
||||
|
||||
pub(crate) async fn new_gcs_object_store(gcs_config: &GcsConfig) -> Result<ObjectStore> {
|
||||
let root = util::normalize_dir(&gcs_config.root);
|
||||
info!(
|
||||
"The gcs storage bucket is: {}, root is: {}",
|
||||
gcs_config.bucket, &root
|
||||
);
|
||||
|
||||
let client = build_http_client(&gcs_config.http_client);
|
||||
|
||||
let builder = Gcs::default()
|
||||
.root(&root)
|
||||
.bucket(&gcs_config.bucket)
|
||||
.scope(&gcs_config.scope)
|
||||
.credential_path(gcs_config.credential_path.expose_secret())
|
||||
.credential(gcs_config.credential.expose_secret())
|
||||
.endpoint(&gcs_config.endpoint)
|
||||
.http_client(client?);
|
||||
|
||||
Ok(ObjectStore::new(builder)
|
||||
.context(error::InitBackendSnafu)?
|
||||
.finish())
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_base::secrets::ExposeSecret;
|
||||
use common_telemetry::info;
|
||||
use object_store::services::Oss;
|
||||
use object_store::{util, ObjectStore};
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::config::OssConfig;
|
||||
use crate::error::{self, Result};
|
||||
use crate::store::build_http_client;
|
||||
|
||||
pub(crate) async fn new_oss_object_store(oss_config: &OssConfig) -> Result<ObjectStore> {
|
||||
let root = util::normalize_dir(&oss_config.root);
|
||||
info!(
|
||||
"The oss storage bucket is: {}, root is: {}",
|
||||
oss_config.bucket, &root
|
||||
);
|
||||
|
||||
let client = build_http_client(&oss_config.http_client)?;
|
||||
|
||||
let builder = Oss::default()
|
||||
.root(&root)
|
||||
.bucket(&oss_config.bucket)
|
||||
.endpoint(&oss_config.endpoint)
|
||||
.access_key_id(oss_config.access_key_id.expose_secret())
|
||||
.access_key_secret(oss_config.access_key_secret.expose_secret())
|
||||
.http_client(client);
|
||||
|
||||
Ok(ObjectStore::new(builder)
|
||||
.context(error::InitBackendSnafu)?
|
||||
.finish())
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_base::secrets::ExposeSecret;
|
||||
use common_telemetry::info;
|
||||
use object_store::services::S3;
|
||||
use object_store::{util, ObjectStore};
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::config::S3Config;
|
||||
use crate::error::{self, Result};
|
||||
use crate::store::build_http_client;
|
||||
|
||||
pub(crate) async fn new_s3_object_store(s3_config: &S3Config) -> Result<ObjectStore> {
|
||||
let root = util::normalize_dir(&s3_config.root);
|
||||
|
||||
info!(
|
||||
"The s3 storage bucket is: {}, root is: {}",
|
||||
s3_config.bucket, &root
|
||||
);
|
||||
|
||||
let client = build_http_client(&s3_config.http_client)?;
|
||||
|
||||
let mut builder = S3::default()
|
||||
.root(&root)
|
||||
.bucket(&s3_config.bucket)
|
||||
.access_key_id(s3_config.access_key_id.expose_secret())
|
||||
.secret_access_key(s3_config.secret_access_key.expose_secret())
|
||||
.http_client(client);
|
||||
|
||||
if s3_config.endpoint.is_some() {
|
||||
builder = builder.endpoint(s3_config.endpoint.as_ref().unwrap());
|
||||
}
|
||||
if s3_config.region.is_some() {
|
||||
builder = builder.region(s3_config.region.as_ref().unwrap());
|
||||
}
|
||||
if s3_config.enable_virtual_host_style {
|
||||
builder = builder.enable_virtual_host_style();
|
||||
}
|
||||
|
||||
Ok(ObjectStore::new(builder)
|
||||
.context(error::InitBackendSnafu)?
|
||||
.finish())
|
||||
}
|
||||
@@ -108,11 +108,15 @@ pub type MockRequestHandler =
|
||||
pub type MockSetReadonlyGracefullyHandler =
|
||||
Box<dyn Fn(RegionId) -> Result<SetRegionRoleStateResponse, Error> + Send + Sync>;
|
||||
|
||||
pub type MockGetMetadataHandler =
|
||||
Box<dyn Fn(RegionId) -> Result<RegionMetadataRef, Error> + Send + Sync>;
|
||||
|
||||
pub struct MockRegionEngine {
|
||||
sender: Sender<(RegionId, RegionRequest)>,
|
||||
pub(crate) handle_request_delay: Option<Duration>,
|
||||
pub(crate) handle_request_mock_fn: Option<MockRequestHandler>,
|
||||
pub(crate) handle_set_readonly_gracefully_mock_fn: Option<MockSetReadonlyGracefullyHandler>,
|
||||
pub(crate) handle_get_metadata_mock_fn: Option<MockGetMetadataHandler>,
|
||||
pub(crate) mock_role: Option<Option<RegionRole>>,
|
||||
engine: String,
|
||||
}
|
||||
@@ -127,6 +131,7 @@ impl MockRegionEngine {
|
||||
sender: tx,
|
||||
handle_request_mock_fn: None,
|
||||
handle_set_readonly_gracefully_mock_fn: None,
|
||||
handle_get_metadata_mock_fn: None,
|
||||
mock_role: None,
|
||||
engine: engine.to_string(),
|
||||
}),
|
||||
@@ -146,6 +151,27 @@ impl MockRegionEngine {
|
||||
sender: tx,
|
||||
handle_request_mock_fn: Some(mock_fn),
|
||||
handle_set_readonly_gracefully_mock_fn: None,
|
||||
handle_get_metadata_mock_fn: None,
|
||||
mock_role: None,
|
||||
engine: engine.to_string(),
|
||||
}),
|
||||
rx,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn with_metadata_mock_fn(
|
||||
engine: &str,
|
||||
mock_fn: MockGetMetadataHandler,
|
||||
) -> (Arc<Self>, Receiver<(RegionId, RegionRequest)>) {
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(8);
|
||||
|
||||
(
|
||||
Arc::new(Self {
|
||||
handle_request_delay: None,
|
||||
sender: tx,
|
||||
handle_request_mock_fn: None,
|
||||
handle_set_readonly_gracefully_mock_fn: None,
|
||||
handle_get_metadata_mock_fn: Some(mock_fn),
|
||||
mock_role: None,
|
||||
engine: engine.to_string(),
|
||||
}),
|
||||
@@ -166,6 +192,7 @@ impl MockRegionEngine {
|
||||
sender: tx,
|
||||
handle_request_mock_fn: None,
|
||||
handle_set_readonly_gracefully_mock_fn: None,
|
||||
handle_get_metadata_mock_fn: None,
|
||||
mock_role: None,
|
||||
engine: engine.to_string(),
|
||||
};
|
||||
@@ -208,7 +235,11 @@ impl RegionEngine for MockRegionEngine {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn get_metadata(&self, _region_id: RegionId) -> Result<RegionMetadataRef, BoxedError> {
|
||||
async fn get_metadata(&self, region_id: RegionId) -> Result<RegionMetadataRef, BoxedError> {
|
||||
if let Some(mock_fn) = &self.handle_get_metadata_mock_fn {
|
||||
return mock_fn(region_id).map_err(BoxedError::new);
|
||||
};
|
||||
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
|
||||
@@ -31,10 +31,9 @@ pub use crate::schema::column_schema::{
|
||||
ColumnSchema, FulltextAnalyzer, FulltextBackend, FulltextOptions, Metadata,
|
||||
SkippingIndexOptions, SkippingIndexType, COLUMN_FULLTEXT_CHANGE_OPT_KEY_ENABLE,
|
||||
COLUMN_FULLTEXT_OPT_KEY_ANALYZER, COLUMN_FULLTEXT_OPT_KEY_BACKEND,
|
||||
COLUMN_FULLTEXT_OPT_KEY_CASE_SENSITIVE, COLUMN_FULLTEXT_OPT_KEY_FALSE_POSITIVE_RATE,
|
||||
COLUMN_FULLTEXT_OPT_KEY_GRANULARITY, COLUMN_SKIPPING_INDEX_OPT_KEY_FALSE_POSITIVE_RATE,
|
||||
COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY, COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE, COMMENT_KEY,
|
||||
FULLTEXT_KEY, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY, TIME_INDEX_KEY,
|
||||
COLUMN_FULLTEXT_OPT_KEY_CASE_SENSITIVE, COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY,
|
||||
COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE, COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY,
|
||||
SKIPPING_INDEX_KEY, TIME_INDEX_KEY,
|
||||
};
|
||||
pub use crate::schema::constraint::ColumnDefaultConstraint;
|
||||
pub use crate::schema::raw::RawSchema;
|
||||
|
||||
@@ -47,18 +47,13 @@ pub const COLUMN_FULLTEXT_CHANGE_OPT_KEY_ENABLE: &str = "enable";
|
||||
pub const COLUMN_FULLTEXT_OPT_KEY_ANALYZER: &str = "analyzer";
|
||||
pub const COLUMN_FULLTEXT_OPT_KEY_CASE_SENSITIVE: &str = "case_sensitive";
|
||||
pub const COLUMN_FULLTEXT_OPT_KEY_BACKEND: &str = "backend";
|
||||
pub const COLUMN_FULLTEXT_OPT_KEY_GRANULARITY: &str = "granularity";
|
||||
pub const COLUMN_FULLTEXT_OPT_KEY_FALSE_POSITIVE_RATE: &str = "false_positive_rate";
|
||||
|
||||
/// Keys used in SKIPPING index options
|
||||
pub const COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY: &str = "granularity";
|
||||
pub const COLUMN_SKIPPING_INDEX_OPT_KEY_FALSE_POSITIVE_RATE: &str = "false_positive_rate";
|
||||
pub const COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE: &str = "type";
|
||||
|
||||
pub const DEFAULT_GRANULARITY: u32 = 10240;
|
||||
|
||||
pub const DEFAULT_FALSE_POSITIVE_RATE: f64 = 0.01;
|
||||
|
||||
/// Schema of a column, used as an immutable struct.
|
||||
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ColumnSchema {
|
||||
@@ -509,7 +504,7 @@ impl TryFrom<&ColumnSchema> for Field {
|
||||
}
|
||||
|
||||
/// Fulltext options for a column.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Visit, VisitMut)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default, Visit, VisitMut)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct FulltextOptions {
|
||||
/// Whether the fulltext index is enabled.
|
||||
@@ -523,92 +518,6 @@ pub struct FulltextOptions {
|
||||
/// The fulltext backend to use.
|
||||
#[serde(default)]
|
||||
pub backend: FulltextBackend,
|
||||
/// The granularity of the fulltext index (for bloom backend only)
|
||||
#[serde(default = "fulltext_options_default_granularity")]
|
||||
pub granularity: u32,
|
||||
/// The false positive rate of the fulltext index (for bloom backend only)
|
||||
#[serde(default = "fulltext_options_default_false_positive_rate_in_10000")]
|
||||
pub false_positive_rate_in_10000: u32,
|
||||
}
|
||||
|
||||
fn fulltext_options_default_granularity() -> u32 {
|
||||
DEFAULT_GRANULARITY
|
||||
}
|
||||
|
||||
fn fulltext_options_default_false_positive_rate_in_10000() -> u32 {
|
||||
(DEFAULT_FALSE_POSITIVE_RATE * 10000.0) as u32
|
||||
}
|
||||
|
||||
impl FulltextOptions {
|
||||
/// Creates a new fulltext options.
|
||||
pub fn new(
|
||||
enable: bool,
|
||||
analyzer: FulltextAnalyzer,
|
||||
case_sensitive: bool,
|
||||
backend: FulltextBackend,
|
||||
granularity: u32,
|
||||
false_positive_rate: f64,
|
||||
) -> Result<Self> {
|
||||
ensure!(
|
||||
0.0 < false_positive_rate && false_positive_rate <= 1.0,
|
||||
error::InvalidFulltextOptionSnafu {
|
||||
msg: format!(
|
||||
"Invalid false positive rate: {false_positive_rate}, expected: 0.0 < rate <= 1.0"
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure!(
|
||||
granularity > 0,
|
||||
error::InvalidFulltextOptionSnafu {
|
||||
msg: format!("Invalid granularity: {granularity}, expected: positive integer"),
|
||||
}
|
||||
);
|
||||
Ok(Self::new_unchecked(
|
||||
enable,
|
||||
analyzer,
|
||||
case_sensitive,
|
||||
backend,
|
||||
granularity,
|
||||
false_positive_rate,
|
||||
))
|
||||
}
|
||||
|
||||
/// Creates a new fulltext options without checking `false_positive_rate` and `granularity`.
|
||||
pub fn new_unchecked(
|
||||
enable: bool,
|
||||
analyzer: FulltextAnalyzer,
|
||||
case_sensitive: bool,
|
||||
backend: FulltextBackend,
|
||||
granularity: u32,
|
||||
false_positive_rate: f64,
|
||||
) -> Self {
|
||||
Self {
|
||||
enable,
|
||||
analyzer,
|
||||
case_sensitive,
|
||||
backend,
|
||||
granularity,
|
||||
false_positive_rate_in_10000: (false_positive_rate * 10000.0) as u32,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the false positive rate.
|
||||
pub fn false_positive_rate(&self) -> f64 {
|
||||
self.false_positive_rate_in_10000 as f64 / 10000.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for FulltextOptions {
|
||||
fn default() -> Self {
|
||||
Self::new_unchecked(
|
||||
false,
|
||||
FulltextAnalyzer::default(),
|
||||
false,
|
||||
FulltextBackend::default(),
|
||||
DEFAULT_GRANULARITY,
|
||||
DEFAULT_FALSE_POSITIVE_RATE,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for FulltextOptions {
|
||||
@@ -618,10 +527,6 @@ impl fmt::Display for FulltextOptions {
|
||||
write!(f, ", analyzer={}", self.analyzer)?;
|
||||
write!(f, ", case_sensitive={}", self.case_sensitive)?;
|
||||
write!(f, ", backend={}", self.backend)?;
|
||||
if self.backend == FulltextBackend::Bloom {
|
||||
write!(f, ", granularity={}", self.granularity)?;
|
||||
write!(f, ", false_positive_rate={}", self.false_positive_rate())?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -706,45 +611,6 @@ impl TryFrom<HashMap<String, String>> for FulltextOptions {
|
||||
}
|
||||
}
|
||||
|
||||
if fulltext_options.backend == FulltextBackend::Bloom {
|
||||
// Parse granularity with default value 10240
|
||||
let granularity = match options.get(COLUMN_FULLTEXT_OPT_KEY_GRANULARITY) {
|
||||
Some(value) => value
|
||||
.parse::<u32>()
|
||||
.ok()
|
||||
.filter(|&v| v > 0)
|
||||
.ok_or_else(|| {
|
||||
error::InvalidFulltextOptionSnafu {
|
||||
msg: format!(
|
||||
"Invalid granularity: {value}, expected: positive integer"
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?,
|
||||
None => DEFAULT_GRANULARITY,
|
||||
};
|
||||
fulltext_options.granularity = granularity;
|
||||
|
||||
// Parse false positive rate with default value 0.01
|
||||
let false_positive_rate = match options.get(COLUMN_FULLTEXT_OPT_KEY_FALSE_POSITIVE_RATE)
|
||||
{
|
||||
Some(value) => value
|
||||
.parse::<f64>()
|
||||
.ok()
|
||||
.filter(|&v| v > 0.0 && v <= 1.0)
|
||||
.ok_or_else(|| {
|
||||
error::InvalidFulltextOptionSnafu {
|
||||
msg: format!(
|
||||
"Invalid false positive rate: {value}, expected: 0.0 < rate <= 1.0"
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?,
|
||||
None => DEFAULT_FALSE_POSITIVE_RATE,
|
||||
};
|
||||
fulltext_options.false_positive_rate_in_10000 = (false_positive_rate * 10000.0) as u32;
|
||||
}
|
||||
|
||||
Ok(fulltext_options)
|
||||
}
|
||||
}
|
||||
@@ -772,72 +638,23 @@ impl fmt::Display for FulltextAnalyzer {
|
||||
pub struct SkippingIndexOptions {
|
||||
/// The granularity of the skip index.
|
||||
pub granularity: u32,
|
||||
/// The false positive rate of the skip index (in ten-thousandths, e.g., 100 = 1%).
|
||||
pub false_positive_rate_in_10000: u32,
|
||||
/// The type of the skip index.
|
||||
#[serde(default)]
|
||||
pub index_type: SkippingIndexType,
|
||||
}
|
||||
|
||||
impl SkippingIndexOptions {
|
||||
/// Creates a new skipping index options without checking `false_positive_rate` and `granularity`.
|
||||
pub fn new_unchecked(
|
||||
granularity: u32,
|
||||
false_positive_rate: f64,
|
||||
index_type: SkippingIndexType,
|
||||
) -> Self {
|
||||
Self {
|
||||
granularity,
|
||||
false_positive_rate_in_10000: (false_positive_rate * 10000.0) as u32,
|
||||
index_type,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new skipping index options.
|
||||
pub fn new(
|
||||
granularity: u32,
|
||||
false_positive_rate: f64,
|
||||
index_type: SkippingIndexType,
|
||||
) -> Result<Self> {
|
||||
ensure!(
|
||||
0.0 < false_positive_rate && false_positive_rate <= 1.0,
|
||||
error::InvalidSkippingIndexOptionSnafu {
|
||||
msg: format!("Invalid false positive rate: {false_positive_rate}, expected: 0.0 < rate <= 1.0"),
|
||||
}
|
||||
);
|
||||
ensure!(
|
||||
granularity > 0,
|
||||
error::InvalidSkippingIndexOptionSnafu {
|
||||
msg: format!("Invalid granularity: {granularity}, expected: positive integer"),
|
||||
}
|
||||
);
|
||||
Ok(Self::new_unchecked(
|
||||
granularity,
|
||||
false_positive_rate,
|
||||
index_type,
|
||||
))
|
||||
}
|
||||
|
||||
/// Gets the false positive rate.
|
||||
pub fn false_positive_rate(&self) -> f64 {
|
||||
self.false_positive_rate_in_10000 as f64 / 10000.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SkippingIndexOptions {
|
||||
fn default() -> Self {
|
||||
Self::new_unchecked(
|
||||
DEFAULT_GRANULARITY,
|
||||
DEFAULT_FALSE_POSITIVE_RATE,
|
||||
SkippingIndexType::default(),
|
||||
)
|
||||
Self {
|
||||
granularity: DEFAULT_GRANULARITY,
|
||||
index_type: SkippingIndexType::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for SkippingIndexOptions {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "granularity={}", self.granularity)?;
|
||||
write!(f, ", false_positive_rate={}", self.false_positive_rate())?;
|
||||
write!(f, ", index_type={}", self.index_type)?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -864,37 +681,15 @@ impl TryFrom<HashMap<String, String>> for SkippingIndexOptions {
|
||||
fn try_from(options: HashMap<String, String>) -> Result<Self> {
|
||||
// Parse granularity with default value 1
|
||||
let granularity = match options.get(COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY) {
|
||||
Some(value) => value
|
||||
.parse::<u32>()
|
||||
.ok()
|
||||
.filter(|&v| v > 0)
|
||||
.ok_or_else(|| {
|
||||
error::InvalidSkippingIndexOptionSnafu {
|
||||
msg: format!("Invalid granularity: {value}, expected: positive integer"),
|
||||
}
|
||||
.build()
|
||||
})?,
|
||||
Some(value) => value.parse::<u32>().map_err(|_| {
|
||||
error::InvalidSkippingIndexOptionSnafu {
|
||||
msg: format!("Invalid granularity: {value}, expected: positive integer"),
|
||||
}
|
||||
.build()
|
||||
})?,
|
||||
None => DEFAULT_GRANULARITY,
|
||||
};
|
||||
|
||||
// Parse false positive rate with default value 100
|
||||
let false_positive_rate =
|
||||
match options.get(COLUMN_SKIPPING_INDEX_OPT_KEY_FALSE_POSITIVE_RATE) {
|
||||
Some(value) => value
|
||||
.parse::<f64>()
|
||||
.ok()
|
||||
.filter(|&v| v > 0.0 && v <= 1.0)
|
||||
.ok_or_else(|| {
|
||||
error::InvalidSkippingIndexOptionSnafu {
|
||||
msg: format!(
|
||||
"Invalid false positive rate: {value}, expected: 0.0 < rate <= 1.0"
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?,
|
||||
None => DEFAULT_FALSE_POSITIVE_RATE,
|
||||
};
|
||||
|
||||
// Parse index type with default value BloomFilter
|
||||
let index_type = match options.get(COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE) {
|
||||
Some(typ) => match typ.to_ascii_uppercase().as_str() {
|
||||
@@ -909,11 +704,10 @@ impl TryFrom<HashMap<String, String>> for SkippingIndexOptions {
|
||||
None => SkippingIndexType::default(),
|
||||
};
|
||||
|
||||
Ok(SkippingIndexOptions::new_unchecked(
|
||||
Ok(SkippingIndexOptions {
|
||||
granularity,
|
||||
false_positive_rate,
|
||||
index_type,
|
||||
))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -899,7 +899,7 @@ impl StreamingEngine {
|
||||
let rows_send = self.run_available(true).await?;
|
||||
let row = self.send_writeback_requests().await?;
|
||||
debug!(
|
||||
"Done to flush flow_id={:?} with {} input rows flushed, {} rows sent and {} output rows flushed",
|
||||
"Done to flush flow_id={:?} with {} input rows flushed, {} rows sended and {} output rows flushed",
|
||||
flow_id, flushed_input_rows, rows_send, row
|
||||
);
|
||||
Ok(row)
|
||||
|
||||
@@ -26,7 +26,7 @@ use common_error::ext::BoxedError;
|
||||
use common_meta::cache::{LayeredCacheRegistryRef, TableFlownodeSetCacheRef, TableRouteCacheRef};
|
||||
use common_meta::ddl::ProcedureExecutorRef;
|
||||
use common_meta::key::flow::FlowMetadataManagerRef;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::node_manager::{Flownode, NodeManagerRef};
|
||||
use common_query::Output;
|
||||
@@ -37,6 +37,7 @@ use greptime_proto::v1::flow::{flow_server, FlowRequest, FlowResponse, InsertReq
|
||||
use itertools::Itertools;
|
||||
use operator::delete::Deleter;
|
||||
use operator::insert::Inserter;
|
||||
use operator::schema_helper::SchemaHelper;
|
||||
use operator::statement::StatementExecutor;
|
||||
use partition::manager::PartitionRuleManager;
|
||||
use query::{QueryEngine, QueryEngineFactory};
|
||||
@@ -546,8 +547,14 @@ impl FrontendInvoker {
|
||||
name: TABLE_FLOWNODE_SET_CACHE_NAME,
|
||||
})?;
|
||||
|
||||
let inserter = Arc::new(Inserter::new(
|
||||
let schema_helper = SchemaHelper::new(
|
||||
catalog_manager.clone(),
|
||||
Arc::new(TableMetadataManager::new(kv_backend.clone())),
|
||||
procedure_executor.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
);
|
||||
let inserter = Arc::new(Inserter::new(
|
||||
schema_helper,
|
||||
partition_manager.clone(),
|
||||
node_manager.clone(),
|
||||
table_flownode_cache,
|
||||
@@ -588,7 +595,7 @@ impl FrontendInvoker {
|
||||
.start_timer();
|
||||
|
||||
self.inserter
|
||||
.handle_row_inserts(requests, ctx, &self.statement_executor, false, false)
|
||||
.handle_row_inserts(requests, ctx, false, false)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(common_frontend::error::ExternalSnafu)
|
||||
|
||||
@@ -14,7 +14,6 @@ workspace = true
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
arc-swap = "1.0"
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
auth.workspace = true
|
||||
bytes.workspace = true
|
||||
@@ -50,6 +49,7 @@ log-query.workspace = true
|
||||
log-store.workspace = true
|
||||
meta-client.workspace = true
|
||||
num_cpus.workspace = true
|
||||
object-store.workspace = true
|
||||
opentelemetry-proto.workspace = true
|
||||
operator.workspace = true
|
||||
otel-arrow-rust.workspace = true
|
||||
|
||||
@@ -363,12 +363,6 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Canceling statement due to statement timeout"))]
|
||||
StatementTimeout {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -449,8 +443,6 @@ impl ErrorExt for Error {
|
||||
Error::DataFusion { error, .. } => datafusion_status_code::<Self>(error, None),
|
||||
|
||||
Error::Cancelled { .. } => StatusCode::Cancelled,
|
||||
|
||||
Error::StatementTimeout { .. } => StatusCode::Cancelled,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ use common_config::config::Configurable;
|
||||
use common_options::datanode::DatanodeClientOptions;
|
||||
use common_telemetry::logging::{LoggingOptions, SlowQueryOptions, TracingOptions};
|
||||
use meta_client::MetaClientOptions;
|
||||
use object_store::config::ObjectStoreConfig;
|
||||
use query::options::QueryOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::export_metrics::{ExportMetricsOption, ExportMetricsTask};
|
||||
@@ -62,6 +63,7 @@ pub struct FrontendOptions {
|
||||
pub query: QueryOptions,
|
||||
pub max_in_flight_write_bytes: Option<ReadableSize>,
|
||||
pub slow_query: Option<SlowQueryOptions>,
|
||||
pub store: ObjectStoreConfig,
|
||||
}
|
||||
|
||||
impl Default for FrontendOptions {
|
||||
@@ -88,6 +90,7 @@ impl Default for FrontendOptions {
|
||||
query: QueryOptions::default(),
|
||||
max_in_flight_write_bytes: None,
|
||||
slow_query: Some(SlowQueryOptions::default()),
|
||||
store: ObjectStoreConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -116,8 +119,7 @@ impl Frontend {
|
||||
if let Some(t) = self.export_metrics_task.as_ref() {
|
||||
if t.send_by_handler {
|
||||
let inserter = self.instance.inserter().clone();
|
||||
let statement_executor = self.instance.statement_executor().clone();
|
||||
let handler = ExportMetricHandler::new_handler(inserter, statement_executor);
|
||||
let handler = ExportMetricHandler::new_handler(inserter);
|
||||
t.start(Some(handler)).context(error::StartServerSnafu)?
|
||||
} else {
|
||||
t.start(None).context(error::StartServerSnafu)?;
|
||||
|
||||
@@ -25,11 +25,9 @@ mod promql;
|
||||
mod region_query;
|
||||
pub mod standalone;
|
||||
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, SystemTime};
|
||||
use std::time::SystemTime;
|
||||
|
||||
use async_stream::stream;
|
||||
use async_trait::async_trait;
|
||||
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
|
||||
use catalog::process_manager::ProcessManagerRef;
|
||||
@@ -41,20 +39,20 @@ use common_config::KvBackendConfig;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::node_manager::NodeManagerRef;
|
||||
use common_meta::state_store::KvStateStore;
|
||||
use common_procedure::local::{LocalManager, ManagerConfig};
|
||||
use common_procedure::options::ProcedureConfig;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::error::StreamTimeoutSnafu;
|
||||
use common_recordbatch::RecordBatchStreamWrapper;
|
||||
use common_telemetry::{debug, error, info, tracing};
|
||||
use datafusion_expr::LogicalPlan;
|
||||
use futures::{Stream, StreamExt};
|
||||
use log_store::raft_engine::RaftEngineBackend;
|
||||
use operator::delete::DeleterRef;
|
||||
use operator::insert::InserterRef;
|
||||
use operator::schema_helper::SchemaHelper;
|
||||
use operator::statement::{StatementExecutor, StatementExecutorRef};
|
||||
use partition::manager::PartitionRuleManagerRef;
|
||||
use pipeline::pipeline_operator::PipelineOperator;
|
||||
use prometheus::HistogramTimer;
|
||||
use promql_parser::label::Matcher;
|
||||
@@ -63,6 +61,7 @@ use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
|
||||
use query::query_engine::options::{validate_catalog_and_schema, QueryOptions};
|
||||
use query::query_engine::DescribeResult;
|
||||
use query::QueryEngineRef;
|
||||
use servers::access_layer::AccessLayerFactory;
|
||||
use servers::error as server_error;
|
||||
use servers::error::{AuthSnafu, ExecuteQuerySnafu, ParsePromQLSnafu};
|
||||
use servers::interceptor::{
|
||||
@@ -70,21 +69,20 @@ use servers::interceptor::{
|
||||
};
|
||||
use servers::prometheus_handler::PrometheusHandler;
|
||||
use servers::query_handler::sql::SqlQueryHandler;
|
||||
use session::context::{Channel, QueryContextRef};
|
||||
use session::context::QueryContextRef;
|
||||
use session::table_name::table_idents_to_full_name;
|
||||
use snafu::prelude::*;
|
||||
use sql::dialect::Dialect;
|
||||
use sql::parser::{ParseOptions, ParserContext};
|
||||
use sql::statements::copy::{CopyDatabase, CopyTable};
|
||||
use sql::statements::statement::Statement;
|
||||
use sql::statements::tql::Tql;
|
||||
use sqlparser::ast::ObjectName;
|
||||
pub use standalone::StandaloneDatanodeManager;
|
||||
|
||||
use crate::error::{
|
||||
self, Error, ExecLogicalPlanSnafu, ExecutePromqlSnafu, ExternalSnafu, InvalidSqlSnafu,
|
||||
ParseSqlSnafu, PermissionSnafu, PlanStatementSnafu, Result, SqlExecInterceptedSnafu,
|
||||
StatementTimeoutSnafu, TableOperationSnafu,
|
||||
TableOperationSnafu,
|
||||
};
|
||||
use crate::limiter::LimiterRef;
|
||||
use crate::slow_query_recorder::SlowQueryRecorder;
|
||||
@@ -106,6 +104,7 @@ pub struct Instance {
|
||||
slow_query_recorder: Option<SlowQueryRecorder>,
|
||||
limiter: Option<LimiterRef>,
|
||||
process_manager: ProcessManagerRef,
|
||||
access_layer_factory: AccessLayerFactory,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
@@ -167,6 +166,27 @@ impl Instance {
|
||||
pub fn process_manager(&self) -> &ProcessManagerRef {
|
||||
&self.process_manager
|
||||
}
|
||||
|
||||
pub fn create_schema_helper(&self) -> SchemaHelper {
|
||||
SchemaHelper::new(
|
||||
self.catalog_manager.clone(),
|
||||
self.table_metadata_manager.clone(),
|
||||
self.statement_executor.procedure_executor().clone(),
|
||||
self.statement_executor.cache_invalidator().clone(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn partition_manager(&self) -> &PartitionRuleManagerRef {
|
||||
self.inserter.partition_manager()
|
||||
}
|
||||
|
||||
pub fn node_manager(&self) -> &NodeManagerRef {
|
||||
self.inserter.node_manager()
|
||||
}
|
||||
|
||||
pub fn access_layer_factory(&self) -> &AccessLayerFactory {
|
||||
&self.access_layer_factory
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_stmt(sql: &str, dialect: &(dyn Dialect + Send + Sync)) -> Result<Vec<Statement>> {
|
||||
@@ -194,7 +214,56 @@ impl Instance {
|
||||
Some(query_ctx.process_id()),
|
||||
);
|
||||
|
||||
let query_fut = self.exec_statement_with_timeout(stmt, query_ctx, query_interceptor);
|
||||
let query_fut = async {
|
||||
match stmt {
|
||||
Statement::Query(_) | Statement::Explain(_) | Statement::Delete(_) => {
|
||||
// TODO: remove this when format is supported in datafusion
|
||||
if let Statement::Explain(explain) = &stmt {
|
||||
if let Some(format) = explain.format() {
|
||||
query_ctx.set_explain_format(format.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
let stmt = QueryStatement::Sql(stmt);
|
||||
let plan = self
|
||||
.statement_executor
|
||||
.plan(&stmt, query_ctx.clone())
|
||||
.await?;
|
||||
|
||||
let QueryStatement::Sql(stmt) = stmt else {
|
||||
unreachable!()
|
||||
};
|
||||
query_interceptor.pre_execute(&stmt, Some(&plan), query_ctx.clone())?;
|
||||
self.statement_executor
|
||||
.exec_plan(plan, query_ctx)
|
||||
.await
|
||||
.context(TableOperationSnafu)
|
||||
}
|
||||
Statement::Tql(tql) => {
|
||||
let plan = self
|
||||
.statement_executor
|
||||
.plan_tql(tql.clone(), &query_ctx)
|
||||
.await?;
|
||||
|
||||
query_interceptor.pre_execute(
|
||||
&Statement::Tql(tql),
|
||||
Some(&plan),
|
||||
query_ctx.clone(),
|
||||
)?;
|
||||
self.statement_executor
|
||||
.exec_plan(plan, query_ctx)
|
||||
.await
|
||||
.context(TableOperationSnafu)
|
||||
}
|
||||
_ => {
|
||||
query_interceptor.pre_execute(&stmt, None, query_ctx.clone())?;
|
||||
self.statement_executor
|
||||
.execute_sql(stmt, query_ctx)
|
||||
.await
|
||||
.context(TableOperationSnafu)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
CancellableFuture::new(query_fut, ticket.cancellation_handle.clone())
|
||||
.await
|
||||
@@ -211,153 +280,6 @@ impl Instance {
|
||||
Output { data, meta }
|
||||
})
|
||||
}
|
||||
|
||||
async fn exec_statement_with_timeout(
|
||||
&self,
|
||||
stmt: Statement,
|
||||
query_ctx: QueryContextRef,
|
||||
query_interceptor: Option<&SqlQueryInterceptorRef<Error>>,
|
||||
) -> Result<Output> {
|
||||
let timeout = derive_timeout(&stmt, &query_ctx);
|
||||
match timeout {
|
||||
Some(timeout) => {
|
||||
let start = tokio::time::Instant::now();
|
||||
let output = tokio::time::timeout(
|
||||
timeout,
|
||||
self.exec_statement(stmt, query_ctx, query_interceptor),
|
||||
)
|
||||
.await
|
||||
.map_err(|_| StatementTimeoutSnafu.build())??;
|
||||
// compute remaining timeout
|
||||
let remaining_timeout = timeout.checked_sub(start.elapsed()).unwrap_or_default();
|
||||
attach_timeout(output, remaining_timeout)
|
||||
}
|
||||
None => {
|
||||
self.exec_statement(stmt, query_ctx, query_interceptor)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn exec_statement(
|
||||
&self,
|
||||
stmt: Statement,
|
||||
query_ctx: QueryContextRef,
|
||||
query_interceptor: Option<&SqlQueryInterceptorRef<Error>>,
|
||||
) -> Result<Output> {
|
||||
match stmt {
|
||||
Statement::Query(_) | Statement::Explain(_) | Statement::Delete(_) => {
|
||||
// TODO: remove this when format is supported in datafusion
|
||||
if let Statement::Explain(explain) = &stmt {
|
||||
if let Some(format) = explain.format() {
|
||||
query_ctx.set_explain_format(format.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
self.plan_and_exec_sql(stmt, &query_ctx, query_interceptor)
|
||||
.await
|
||||
}
|
||||
Statement::Tql(tql) => {
|
||||
self.plan_and_exec_tql(&query_ctx, query_interceptor, tql)
|
||||
.await
|
||||
}
|
||||
_ => {
|
||||
query_interceptor.pre_execute(&stmt, None, query_ctx.clone())?;
|
||||
self.statement_executor
|
||||
.execute_sql(stmt, query_ctx)
|
||||
.await
|
||||
.context(TableOperationSnafu)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn plan_and_exec_sql(
|
||||
&self,
|
||||
stmt: Statement,
|
||||
query_ctx: &QueryContextRef,
|
||||
query_interceptor: Option<&SqlQueryInterceptorRef<Error>>,
|
||||
) -> Result<Output> {
|
||||
let stmt = QueryStatement::Sql(stmt);
|
||||
let plan = self
|
||||
.statement_executor
|
||||
.plan(&stmt, query_ctx.clone())
|
||||
.await?;
|
||||
let QueryStatement::Sql(stmt) = stmt else {
|
||||
unreachable!()
|
||||
};
|
||||
query_interceptor.pre_execute(&stmt, Some(&plan), query_ctx.clone())?;
|
||||
self.statement_executor
|
||||
.exec_plan(plan, query_ctx.clone())
|
||||
.await
|
||||
.context(TableOperationSnafu)
|
||||
}
|
||||
|
||||
async fn plan_and_exec_tql(
|
||||
&self,
|
||||
query_ctx: &QueryContextRef,
|
||||
query_interceptor: Option<&SqlQueryInterceptorRef<Error>>,
|
||||
tql: Tql,
|
||||
) -> Result<Output> {
|
||||
let plan = self
|
||||
.statement_executor
|
||||
.plan_tql(tql.clone(), query_ctx)
|
||||
.await?;
|
||||
query_interceptor.pre_execute(&Statement::Tql(tql), Some(&plan), query_ctx.clone())?;
|
||||
self.statement_executor
|
||||
.exec_plan(plan, query_ctx.clone())
|
||||
.await
|
||||
.context(TableOperationSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
/// If the relevant variables are set, the timeout is enforced for all PostgreSQL statements.
|
||||
/// For MySQL, it applies only to read-only statements.
|
||||
fn derive_timeout(stmt: &Statement, query_ctx: &QueryContextRef) -> Option<Duration> {
|
||||
let query_timeout = query_ctx.query_timeout()?;
|
||||
if query_timeout.is_zero() {
|
||||
return None;
|
||||
}
|
||||
match query_ctx.channel() {
|
||||
Channel::Mysql if stmt.is_readonly() => Some(query_timeout),
|
||||
Channel::Postgres => Some(query_timeout),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn attach_timeout(output: Output, mut timeout: Duration) -> Result<Output> {
|
||||
if timeout.is_zero() {
|
||||
return StatementTimeoutSnafu.fail();
|
||||
}
|
||||
|
||||
let output = match output.data {
|
||||
OutputData::AffectedRows(_) | OutputData::RecordBatches(_) => output,
|
||||
OutputData::Stream(mut stream) => {
|
||||
let schema = stream.schema();
|
||||
let s = Box::pin(stream! {
|
||||
let mut start = tokio::time::Instant::now();
|
||||
while let Some(item) = tokio::time::timeout(timeout, stream.next()).await.map_err(|_| StreamTimeoutSnafu.build())? {
|
||||
yield item;
|
||||
|
||||
let now = tokio::time::Instant::now();
|
||||
timeout = timeout.checked_sub(now - start).unwrap_or(Duration::ZERO);
|
||||
start = now;
|
||||
// tokio::time::timeout may not return an error immediately when timeout is 0.
|
||||
if timeout.is_zero() {
|
||||
StreamTimeoutSnafu.fail()?;
|
||||
}
|
||||
}
|
||||
}) as Pin<Box<dyn Stream<Item = _> + Send>>;
|
||||
let stream = RecordBatchStreamWrapper {
|
||||
schema,
|
||||
stream: s,
|
||||
output_ordering: None,
|
||||
metrics: Default::default(),
|
||||
};
|
||||
Output::new(OutputData::Stream(Box::pin(stream)), output.meta)
|
||||
}
|
||||
};
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -617,6 +539,8 @@ pub fn check_permission(
|
||||
| Statement::AlterDatabase(_)
|
||||
| Statement::DropFlow(_)
|
||||
| Statement::Use(_) => {}
|
||||
#[cfg(feature = "enterprise")]
|
||||
Statement::DropTrigger(_) => {}
|
||||
Statement::ShowCreateDatabase(stmt) => {
|
||||
validate_database(&stmt.database_name, query_ctx)?;
|
||||
}
|
||||
@@ -720,6 +644,8 @@ pub fn check_permission(
|
||||
Statement::FetchCursor(_) | Statement::CloseCursor(_) => {}
|
||||
// User can only kill process in their own catalog.
|
||||
Statement::Kill(_) => {}
|
||||
// SHOW PROCESSLIST
|
||||
Statement::ShowProcesslist(_) => {}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -30,12 +30,14 @@ use operator::flow::FlowServiceOperator;
|
||||
use operator::insert::Inserter;
|
||||
use operator::procedure::ProcedureServiceOperator;
|
||||
use operator::request::Requester;
|
||||
use operator::schema_helper::SchemaHelper;
|
||||
use operator::statement::{StatementExecutor, StatementExecutorRef};
|
||||
use operator::table::TableMutationOperator;
|
||||
use partition::manager::PartitionRuleManager;
|
||||
use pipeline::pipeline_operator::PipelineOperator;
|
||||
use query::region_query::RegionQueryHandlerFactoryRef;
|
||||
use query::QueryEngineFactory;
|
||||
use servers::access_layer::AccessLayerFactory;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
@@ -130,8 +132,15 @@ impl FrontendBuilder {
|
||||
name: TABLE_FLOWNODE_SET_CACHE_NAME,
|
||||
})?;
|
||||
|
||||
let inserter = Arc::new(Inserter::new(
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
|
||||
let schema_helper = SchemaHelper::new(
|
||||
self.catalog_manager.clone(),
|
||||
table_metadata_manager.clone(),
|
||||
self.procedure_executor.clone(),
|
||||
local_cache_invalidator.clone(),
|
||||
);
|
||||
let inserter = Arc::new(Inserter::new(
|
||||
schema_helper,
|
||||
partition_manager.clone(),
|
||||
node_manager.clone(),
|
||||
table_flownode_cache,
|
||||
@@ -176,7 +185,7 @@ impl FrontendBuilder {
|
||||
self.catalog_manager.clone(),
|
||||
query_engine.clone(),
|
||||
self.procedure_executor,
|
||||
kv_backend.clone(),
|
||||
kv_backend,
|
||||
local_cache_invalidator,
|
||||
inserter.clone(),
|
||||
table_route_cache,
|
||||
@@ -211,6 +220,7 @@ impl FrontendBuilder {
|
||||
Arc::new(Limiter::new(max_in_flight_write_bytes.as_bytes()))
|
||||
});
|
||||
|
||||
let access_layer_factory = AccessLayerFactory::new(&self.options.store).await.unwrap();
|
||||
Ok(Instance {
|
||||
catalog_manager: self.catalog_manager,
|
||||
pipeline_operator,
|
||||
@@ -219,10 +229,11 @@ impl FrontendBuilder {
|
||||
plugins,
|
||||
inserter,
|
||||
deleter,
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(kv_backend)),
|
||||
table_metadata_manager,
|
||||
slow_query_recorder,
|
||||
limiter,
|
||||
process_manager,
|
||||
access_layer_factory,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -408,7 +408,7 @@ impl Instance {
|
||||
ctx: QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
self.inserter
|
||||
.handle_column_inserts(requests, ctx, self.statement_executor.as_ref())
|
||||
.handle_column_inserts(requests, ctx)
|
||||
.await
|
||||
.context(TableOperationSnafu)
|
||||
}
|
||||
@@ -422,13 +422,7 @@ impl Instance {
|
||||
is_single_value: bool,
|
||||
) -> Result<Output> {
|
||||
self.inserter
|
||||
.handle_row_inserts(
|
||||
requests,
|
||||
ctx,
|
||||
self.statement_executor.as_ref(),
|
||||
accommodate_existing_schema,
|
||||
is_single_value,
|
||||
)
|
||||
.handle_row_inserts(requests, ctx, accommodate_existing_schema, is_single_value)
|
||||
.await
|
||||
.context(TableOperationSnafu)
|
||||
}
|
||||
@@ -441,10 +435,7 @@ impl Instance {
|
||||
) -> Result<Output> {
|
||||
self.inserter
|
||||
.handle_last_non_null_inserts(
|
||||
requests,
|
||||
ctx,
|
||||
self.statement_executor.as_ref(),
|
||||
true,
|
||||
requests, ctx, true,
|
||||
// Influx protocol may writes multiple fields (values).
|
||||
false,
|
||||
)
|
||||
@@ -460,7 +451,7 @@ impl Instance {
|
||||
physical_table: String,
|
||||
) -> Result<Output> {
|
||||
self.inserter
|
||||
.handle_metric_row_inserts(requests, ctx, &self.statement_executor, physical_table)
|
||||
.handle_metric_row_inserts(requests, ctx, physical_table)
|
||||
.await
|
||||
.context(TableOperationSnafu)
|
||||
}
|
||||
|
||||
@@ -135,7 +135,7 @@ impl Instance {
|
||||
};
|
||||
|
||||
self.inserter
|
||||
.handle_log_inserts(log, ctx, self.statement_executor.as_ref())
|
||||
.handle_log_inserts(log, ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExecuteGrpcRequestSnafu)
|
||||
@@ -157,7 +157,7 @@ impl Instance {
|
||||
};
|
||||
|
||||
self.inserter
|
||||
.handle_trace_inserts(rows, ctx, self.statement_executor.as_ref())
|
||||
.handle_trace_inserts(rows, ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExecuteGrpcRequestSnafu)
|
||||
|
||||
@@ -28,7 +28,6 @@ use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_telemetry::{debug, tracing};
|
||||
use operator::insert::InserterRef;
|
||||
use operator::statement::StatementExecutor;
|
||||
use prost::Message;
|
||||
use servers::error::{self, AuthSnafu, InFlightWriteBytesExceededSnafu, Result as ServerResult};
|
||||
use servers::http::header::{collect_plan_metrics, CONTENT_ENCODING_SNAPPY, CONTENT_TYPE_PROTOBUF};
|
||||
@@ -271,18 +270,11 @@ impl PromStoreProtocolHandler for Instance {
|
||||
/// so only implement `PromStoreProtocolHandler::write` method.
|
||||
pub struct ExportMetricHandler {
|
||||
inserter: InserterRef,
|
||||
statement_executor: Arc<StatementExecutor>,
|
||||
}
|
||||
|
||||
impl ExportMetricHandler {
|
||||
pub fn new_handler(
|
||||
inserter: InserterRef,
|
||||
statement_executor: Arc<StatementExecutor>,
|
||||
) -> PromStoreProtocolHandlerRef {
|
||||
Arc::new(Self {
|
||||
inserter,
|
||||
statement_executor,
|
||||
})
|
||||
pub fn new_handler(inserter: InserterRef) -> PromStoreProtocolHandlerRef {
|
||||
Arc::new(Self { inserter })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -295,12 +287,7 @@ impl PromStoreProtocolHandler for ExportMetricHandler {
|
||||
_: bool,
|
||||
) -> ServerResult<Output> {
|
||||
self.inserter
|
||||
.handle_metric_row_inserts(
|
||||
request,
|
||||
ctx,
|
||||
&self.statement_executor,
|
||||
GREPTIME_PHYSICAL_TABLE.to_string(),
|
||||
)
|
||||
.handle_metric_row_inserts(request, ctx, GREPTIME_PHYSICAL_TABLE.to_string())
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::ExecuteGrpcQuerySnafu)
|
||||
|
||||
@@ -24,6 +24,7 @@ use servers::grpc::frontend_grpc_handler::FrontendGrpcHandler;
|
||||
use servers::grpc::greptime_handler::GreptimeRequestHandler;
|
||||
use servers::grpc::{GrpcOptions, GrpcServer};
|
||||
use servers::http::event::LogValidatorRef;
|
||||
use servers::http::prom_store::{PromBulkState, PromStoreState};
|
||||
use servers::http::{HttpServer, HttpServerBuilder};
|
||||
use servers::interceptor::LogIngestInterceptorRef;
|
||||
use servers::metrics_handler::MetricsHandler;
|
||||
@@ -95,13 +96,30 @@ where
|
||||
}
|
||||
|
||||
if opts.prom_store.enable {
|
||||
let bulk_state = if opts.prom_store.bulk_mode {
|
||||
let mut state = PromBulkState {
|
||||
schema_helper: self.instance.create_schema_helper(),
|
||||
partition_manager: self.instance.partition_manager().clone(),
|
||||
node_manager: self.instance.node_manager().clone(),
|
||||
access_layer_factory: self.instance.access_layer_factory().clone(),
|
||||
tx: None,
|
||||
};
|
||||
state.start_background_task();
|
||||
Some(state)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let state = PromStoreState {
|
||||
prom_store_handler: self.instance.clone(),
|
||||
pipeline_handler: Some(self.instance.clone()),
|
||||
prom_store_with_metric_engine: opts.prom_store.with_metric_engine,
|
||||
prom_validation_mode: opts.http.prom_validation_mode,
|
||||
bulk_state,
|
||||
};
|
||||
|
||||
builder = builder
|
||||
.with_prom_handler(
|
||||
self.instance.clone(),
|
||||
Some(self.instance.clone()),
|
||||
opts.prom_store.with_metric_engine,
|
||||
opts.http.prom_validation_mode,
|
||||
)
|
||||
.with_prom_handler(state)
|
||||
.with_prometheus_handler(self.instance.clone());
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ use serde::{Deserialize, Serialize};
|
||||
pub struct PromStoreOptions {
|
||||
pub enable: bool,
|
||||
pub with_metric_engine: bool,
|
||||
pub bulk_mode: bool,
|
||||
}
|
||||
|
||||
impl Default for PromStoreOptions {
|
||||
@@ -25,6 +26,7 @@ impl Default for PromStoreOptions {
|
||||
Self {
|
||||
enable: true,
|
||||
with_metric_engine: true,
|
||||
bulk_mode: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -37,6 +39,7 @@ mod tests {
|
||||
fn test_prom_store_options() {
|
||||
let default = PromStoreOptions::default();
|
||||
assert!(default.enable);
|
||||
assert!(default.with_metric_engine)
|
||||
assert!(default.with_metric_engine);
|
||||
assert!(!default.bulk_mode);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -233,7 +233,7 @@ impl SlowQueryEventHandler {
|
||||
.into();
|
||||
|
||||
self.inserter
|
||||
.handle_row_inserts(requests, query_ctx, &self.statement_executor, false, false)
|
||||
.handle_row_inserts(requests, query_ctx, false, false)
|
||||
.await
|
||||
.context(TableOperationSnafu)?;
|
||||
|
||||
|
||||
@@ -218,7 +218,6 @@ mod tests {
|
||||
let mut writer = Cursor::new(Vec::new());
|
||||
let mut creator = BloomFilterCreator::new(
|
||||
4,
|
||||
0.01,
|
||||
Arc::new(MockExternalTempFileProvider::new()),
|
||||
Arc::new(AtomicUsize::new(0)),
|
||||
None,
|
||||
|
||||
@@ -30,6 +30,9 @@ use crate::bloom_filter::SEED;
|
||||
use crate::external_provider::ExternalTempFileProvider;
|
||||
use crate::Bytes;
|
||||
|
||||
/// The false positive rate of the Bloom filter.
|
||||
pub const FALSE_POSITIVE_RATE: f64 = 0.01;
|
||||
|
||||
/// `BloomFilterCreator` is responsible for creating and managing bloom filters
|
||||
/// for a set of elements. It divides the rows into segments and creates
|
||||
/// bloom filters for each segment.
|
||||
@@ -76,7 +79,6 @@ impl BloomFilterCreator {
|
||||
/// `rows_per_segment` <= 0
|
||||
pub fn new(
|
||||
rows_per_segment: usize,
|
||||
false_positive_rate: f64,
|
||||
intermediate_provider: Arc<dyn ExternalTempFileProvider>,
|
||||
global_memory_usage: Arc<AtomicUsize>,
|
||||
global_memory_usage_threshold: Option<usize>,
|
||||
@@ -93,7 +95,6 @@ impl BloomFilterCreator {
|
||||
cur_seg_distinct_elems_mem_usage: 0,
|
||||
global_memory_usage: global_memory_usage.clone(),
|
||||
finalized_bloom_filters: FinalizedBloomFilterStorage::new(
|
||||
false_positive_rate,
|
||||
intermediate_provider,
|
||||
global_memory_usage,
|
||||
global_memory_usage_threshold,
|
||||
@@ -262,7 +263,6 @@ mod tests {
|
||||
let mut writer = Cursor::new(Vec::new());
|
||||
let mut creator = BloomFilterCreator::new(
|
||||
2,
|
||||
0.01,
|
||||
Arc::new(MockExternalTempFileProvider::new()),
|
||||
Arc::new(AtomicUsize::new(0)),
|
||||
None,
|
||||
@@ -337,7 +337,6 @@ mod tests {
|
||||
let mut writer = Cursor::new(Vec::new());
|
||||
let mut creator: BloomFilterCreator = BloomFilterCreator::new(
|
||||
2,
|
||||
0.01,
|
||||
Arc::new(MockExternalTempFileProvider::new()),
|
||||
Arc::new(AtomicUsize::new(0)),
|
||||
None,
|
||||
@@ -419,7 +418,6 @@ mod tests {
|
||||
let mut writer = Cursor::new(Vec::new());
|
||||
let mut creator = BloomFilterCreator::new(
|
||||
2,
|
||||
0.01,
|
||||
Arc::new(MockExternalTempFileProvider::new()),
|
||||
Arc::new(AtomicUsize::new(0)),
|
||||
None,
|
||||
|
||||
@@ -23,7 +23,7 @@ use futures::{stream, AsyncWriteExt, Stream};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::bloom_filter::creator::intermediate_codec::IntermediateBloomFilterCodecV1;
|
||||
use crate::bloom_filter::creator::SEED;
|
||||
use crate::bloom_filter::creator::{FALSE_POSITIVE_RATE, SEED};
|
||||
use crate::bloom_filter::error::{IntermediateSnafu, IoSnafu, Result};
|
||||
use crate::external_provider::ExternalTempFileProvider;
|
||||
use crate::Bytes;
|
||||
@@ -33,9 +33,6 @@ const MIN_MEMORY_USAGE_THRESHOLD: usize = 1024 * 1024; // 1MB
|
||||
|
||||
/// Storage for finalized Bloom filters.
|
||||
pub struct FinalizedBloomFilterStorage {
|
||||
/// The false positive rate of the Bloom filter.
|
||||
false_positive_rate: f64,
|
||||
|
||||
/// Indices of the segments in the sequence of finalized Bloom filters.
|
||||
segment_indices: Vec<usize>,
|
||||
|
||||
@@ -68,14 +65,12 @@ pub struct FinalizedBloomFilterStorage {
|
||||
impl FinalizedBloomFilterStorage {
|
||||
/// Creates a new `FinalizedBloomFilterStorage`.
|
||||
pub fn new(
|
||||
false_positive_rate: f64,
|
||||
intermediate_provider: Arc<dyn ExternalTempFileProvider>,
|
||||
global_memory_usage: Arc<AtomicUsize>,
|
||||
global_memory_usage_threshold: Option<usize>,
|
||||
) -> Self {
|
||||
let external_prefix = format!("intm-bloom-filters-{}", uuid::Uuid::new_v4());
|
||||
Self {
|
||||
false_positive_rate,
|
||||
segment_indices: Vec::new(),
|
||||
in_memory: Vec::new(),
|
||||
intermediate_file_id_counter: 0,
|
||||
@@ -101,7 +96,7 @@ impl FinalizedBloomFilterStorage {
|
||||
elems: impl IntoIterator<Item = Bytes>,
|
||||
element_count: usize,
|
||||
) -> Result<()> {
|
||||
let mut bf = BloomFilter::with_false_pos(self.false_positive_rate)
|
||||
let mut bf = BloomFilter::with_false_pos(FALSE_POSITIVE_RATE)
|
||||
.seed(&SEED)
|
||||
.expected_items(element_count);
|
||||
for elem in elems.into_iter() {
|
||||
@@ -289,7 +284,6 @@ mod tests {
|
||||
let global_memory_usage_threshold = Some(1024 * 1024); // 1MB
|
||||
let provider = Arc::new(mock_provider);
|
||||
let mut storage = FinalizedBloomFilterStorage::new(
|
||||
0.01,
|
||||
provider,
|
||||
global_memory_usage.clone(),
|
||||
global_memory_usage_threshold,
|
||||
@@ -346,7 +340,6 @@ mod tests {
|
||||
let global_memory_usage_threshold = Some(1024 * 1024); // 1MB
|
||||
let provider = Arc::new(mock_provider);
|
||||
let mut storage = FinalizedBloomFilterStorage::new(
|
||||
0.01,
|
||||
provider,
|
||||
global_memory_usage.clone(),
|
||||
global_memory_usage_threshold,
|
||||
|
||||
@@ -222,7 +222,6 @@ mod tests {
|
||||
let mut writer = Cursor::new(vec![]);
|
||||
let mut creator = BloomFilterCreator::new(
|
||||
2,
|
||||
0.01,
|
||||
Arc::new(MockExternalTempFileProvider::new()),
|
||||
Arc::new(AtomicUsize::new(0)),
|
||||
None,
|
||||
|
||||
@@ -45,7 +45,6 @@ impl BloomFilterFulltextIndexCreator {
|
||||
pub fn new(
|
||||
config: Config,
|
||||
rows_per_segment: usize,
|
||||
false_positive_rate: f64,
|
||||
intermediate_provider: Arc<dyn ExternalTempFileProvider>,
|
||||
global_memory_usage: Arc<AtomicUsize>,
|
||||
global_memory_usage_threshold: Option<usize>,
|
||||
@@ -58,7 +57,6 @@ impl BloomFilterFulltextIndexCreator {
|
||||
|
||||
let inner = BloomFilterCreator::new(
|
||||
rows_per_segment,
|
||||
false_positive_rate,
|
||||
intermediate_provider,
|
||||
global_memory_usage,
|
||||
global_memory_usage_threshold,
|
||||
|
||||
@@ -54,14 +54,6 @@ pub enum Error {
|
||||
peer_id: u64,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to lookup peer: {}", peer_id))]
|
||||
LookupPeer {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
peer_id: u64,
|
||||
},
|
||||
|
||||
#[snafu(display("Another migration procedure is running for region: {}", region_id))]
|
||||
MigrationRunning {
|
||||
#[snafu(implicit)]
|
||||
@@ -1033,7 +1025,6 @@ impl ErrorExt for Error {
|
||||
}
|
||||
|
||||
Error::Other { source, .. } => source.status_code(),
|
||||
Error::LookupPeer { source, .. } => source.status_code(),
|
||||
Error::NoEnoughAvailableNode { .. } => StatusCode::RuntimeResourcesExhausted,
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
|
||||
@@ -110,6 +110,14 @@ pub struct MetasrvOptions {
|
||||
pub use_memory_store: bool,
|
||||
/// Whether to enable region failover.
|
||||
pub enable_region_failover: bool,
|
||||
/// Delay before initializing region failure detectors.
|
||||
///
|
||||
/// This delay helps prevent premature initialization of region failure detectors in cases where
|
||||
/// cluster maintenance mode is enabled right after metasrv starts, especially when the cluster
|
||||
/// is not deployed via the recommended GreptimeDB Operator. Without this delay, early detector registration
|
||||
/// may trigger unnecessary region failovers during datanode startup.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub region_failure_detector_initialization_delay: Duration,
|
||||
/// Whether to allow region failover on local WAL.
|
||||
///
|
||||
/// If it's true, the region failover will be allowed even if the local WAL is used.
|
||||
@@ -219,6 +227,7 @@ impl Default for MetasrvOptions {
|
||||
selector: SelectorType::default(),
|
||||
use_memory_store: false,
|
||||
enable_region_failover: false,
|
||||
region_failure_detector_initialization_delay: Duration::from_secs(10 * 60),
|
||||
allow_region_failover_on_local_wal: false,
|
||||
grpc: GrpcOptions {
|
||||
bind_addr: format!("127.0.0.1:{}", DEFAULT_METASRV_ADDR_PORT),
|
||||
|
||||
@@ -64,7 +64,7 @@ use crate::procedure::wal_prune::manager::{WalPruneManager, WalPruneTicker};
|
||||
use crate::procedure::wal_prune::Context as WalPruneContext;
|
||||
use crate::region::supervisor::{
|
||||
HeartbeatAcceptor, RegionFailureDetectorControl, RegionSupervisor, RegionSupervisorSelector,
|
||||
RegionSupervisorTicker, DEFAULT_TICK_INTERVAL,
|
||||
RegionSupervisorTicker, DEFAULT_INITIALIZATION_RETRY_PERIOD, DEFAULT_TICK_INTERVAL,
|
||||
};
|
||||
use crate::selector::lease_based::LeaseBasedSelector;
|
||||
use crate::selector::round_robin::RoundRobinSelector;
|
||||
@@ -299,6 +299,8 @@ impl MetasrvBuilder {
|
||||
Arc::new(RegionFailureDetectorControl::new(tx.clone())) as _,
|
||||
Some(Arc::new(RegionSupervisorTicker::new(
|
||||
DEFAULT_TICK_INTERVAL,
|
||||
options.region_failure_detector_initialization_delay,
|
||||
DEFAULT_INITIALIZATION_RETRY_PERIOD,
|
||||
tx.clone(),
|
||||
))),
|
||||
)
|
||||
@@ -341,6 +343,7 @@ impl MetasrvBuilder {
|
||||
region_migration_manager.clone(),
|
||||
maintenance_mode_manager.clone(),
|
||||
peer_lookup_service.clone(),
|
||||
leader_cached_kv_backend.clone(),
|
||||
);
|
||||
|
||||
Some(RegionFailureHandler::new(
|
||||
|
||||
@@ -23,7 +23,7 @@ use common_meta::key::table_route::TableRouteValue;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::RegionRoute;
|
||||
use common_procedure::{watcher, ProcedureId, ProcedureManagerRef, ProcedureWithId};
|
||||
use common_telemetry::{error, info};
|
||||
use common_telemetry::{error, info, warn};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::storage::RegionId;
|
||||
use table::table_name::TableName;
|
||||
@@ -253,10 +253,12 @@ impl RegionMigrationManager {
|
||||
}
|
||||
|
||||
/// Throws an error if `leader_peer` is not the `from_peer`.
|
||||
///
|
||||
/// If `from_peer` is unknown, use the leader peer as the `from_peer`.
|
||||
fn verify_region_leader_peer(
|
||||
&self,
|
||||
region_route: &RegionRoute,
|
||||
task: &RegionMigrationProcedureTask,
|
||||
task: &mut RegionMigrationProcedureTask,
|
||||
) -> Result<()> {
|
||||
let leader_peer = region_route
|
||||
.leader_peer
|
||||
@@ -275,6 +277,15 @@ impl RegionMigrationManager {
|
||||
}
|
||||
);
|
||||
|
||||
if task.from_peer.addr.is_empty() {
|
||||
warn!(
|
||||
"The `from_peer` is unknown, use the leader peer({}) as the `from_peer`, region: {}",
|
||||
leader_peer, task.region_id
|
||||
);
|
||||
// The peer id is the same as the leader peer id.
|
||||
task.from_peer = leader_peer.clone();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -300,7 +311,7 @@ impl RegionMigrationManager {
|
||||
/// Submits a new region migration procedure.
|
||||
pub async fn submit_procedure(
|
||||
&self,
|
||||
task: RegionMigrationProcedureTask,
|
||||
mut task: RegionMigrationProcedureTask,
|
||||
) -> Result<Option<ProcedureId>> {
|
||||
let Some(guard) = self.insert_running_procedure(&task) else {
|
||||
return error::MigrationRunningSnafu {
|
||||
@@ -333,7 +344,7 @@ impl RegionMigrationManager {
|
||||
.fail();
|
||||
}
|
||||
|
||||
self.verify_region_leader_peer(®ion_route, &task)?;
|
||||
self.verify_region_leader_peer(®ion_route, &mut task)?;
|
||||
self.verify_region_follower_peers(®ion_route, &task)?;
|
||||
let table_info = self.retrieve_table_info(region_id).await?;
|
||||
let TableName {
|
||||
@@ -341,12 +352,6 @@ impl RegionMigrationManager {
|
||||
schema_name,
|
||||
..
|
||||
} = table_info.table_name();
|
||||
METRIC_META_REGION_MIGRATION_DATANODES
|
||||
.with_label_values(&["src", &task.from_peer.id.to_string()])
|
||||
.inc();
|
||||
METRIC_META_REGION_MIGRATION_DATANODES
|
||||
.with_label_values(&["desc", &task.to_peer.id.to_string()])
|
||||
.inc();
|
||||
let RegionMigrationProcedureTask {
|
||||
region_id,
|
||||
from_peer,
|
||||
@@ -377,6 +382,12 @@ impl RegionMigrationManager {
|
||||
return;
|
||||
}
|
||||
};
|
||||
METRIC_META_REGION_MIGRATION_DATANODES
|
||||
.with_label_values(&["src", &task.from_peer.id.to_string()])
|
||||
.inc();
|
||||
METRIC_META_REGION_MIGRATION_DATANODES
|
||||
.with_label_values(&["desc", &task.to_peer.id.to_string()])
|
||||
.inc();
|
||||
|
||||
if let Err(e) = watcher::wait(watcher).await {
|
||||
error!(e; "Failed to wait region migration procedure {procedure_id} for {task}");
|
||||
|
||||
@@ -103,6 +103,7 @@ pub mod mock {
|
||||
}),
|
||||
affected_rows: 0,
|
||||
extensions: Default::default(),
|
||||
metadata: Vec::new(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,23 +15,30 @@
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fmt::Debug;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_meta::datanode::Stat;
|
||||
use common_meta::ddl::{DetectingRegion, RegionFailureDetectorController};
|
||||
use common_meta::key::maintenance::MaintenanceModeManagerRef;
|
||||
use common_meta::key::table_route::{TableRouteKey, TableRouteValue};
|
||||
use common_meta::key::{MetadataKey, MetadataValue};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::leadership_notifier::LeadershipChangeListener;
|
||||
use common_meta::peer::{Peer, PeerLookupServiceRef};
|
||||
use common_meta::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||
use common_meta::rpc::store::RangeRequest;
|
||||
use common_meta::DatanodeId;
|
||||
use common_runtime::JoinHandle;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use common_time::util::current_time_millis;
|
||||
use error::Error::{LeaderPeerChanged, MigrationRunning, RegionMigrated, TableRouteNotFound};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use store_api::storage::RegionId;
|
||||
use tokio::sync::mpsc::{Receiver, Sender};
|
||||
use tokio::time::{interval, MissedTickBehavior};
|
||||
use tokio::sync::oneshot;
|
||||
use tokio::time::{interval, interval_at, MissedTickBehavior};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::failure_detector::PhiAccrualFailureDetectorOptions;
|
||||
@@ -70,6 +77,9 @@ impl From<&Stat> for DatanodeHeartbeat {
|
||||
///
|
||||
/// Variants:
|
||||
/// - `Tick`: This event is used to trigger region failure detection periodically.
|
||||
/// - `InitializeAllRegions`: This event is used to initialize all region failure detectors.
|
||||
/// - `RegisterFailureDetectors`: This event is used to register failure detectors for regions.
|
||||
/// - `DeregisterFailureDetectors`: This event is used to deregister failure detectors for regions.
|
||||
/// - `HeartbeatArrived`: This event presents the metasrv received [`DatanodeHeartbeat`] from the datanodes.
|
||||
/// - `Clear`: This event is used to reset the state of the supervisor, typically used
|
||||
/// when a system-wide reset or reinitialization is needed.
|
||||
@@ -78,6 +88,7 @@ impl From<&Stat> for DatanodeHeartbeat {
|
||||
/// of the supervisor during tests.
|
||||
pub(crate) enum Event {
|
||||
Tick,
|
||||
InitializeAllRegions(tokio::sync::oneshot::Sender<()>),
|
||||
RegisterFailureDetectors(Vec<DetectingRegion>),
|
||||
DeregisterFailureDetectors(Vec<DetectingRegion>),
|
||||
HeartbeatArrived(DatanodeHeartbeat),
|
||||
@@ -102,6 +113,7 @@ impl Debug for Event {
|
||||
Self::Tick => write!(f, "Tick"),
|
||||
Self::HeartbeatArrived(arg0) => f.debug_tuple("HeartbeatArrived").field(arg0).finish(),
|
||||
Self::Clear => write!(f, "Clear"),
|
||||
Self::InitializeAllRegions(_) => write!(f, "InspectAndRegisterRegions"),
|
||||
Self::RegisterFailureDetectors(arg0) => f
|
||||
.debug_tuple("RegisterFailureDetectors")
|
||||
.field(arg0)
|
||||
@@ -127,6 +139,12 @@ pub struct RegionSupervisorTicker {
|
||||
/// The interval of tick.
|
||||
tick_interval: Duration,
|
||||
|
||||
/// The delay before initializing all region failure detectors.
|
||||
initialization_delay: Duration,
|
||||
|
||||
/// The retry period for initializing all region failure detectors.
|
||||
initialization_retry_period: Duration,
|
||||
|
||||
/// Sends [Event]s.
|
||||
sender: Sender<Event>,
|
||||
}
|
||||
@@ -149,10 +167,21 @@ impl LeadershipChangeListener for RegionSupervisorTicker {
|
||||
}
|
||||
|
||||
impl RegionSupervisorTicker {
|
||||
pub(crate) fn new(tick_interval: Duration, sender: Sender<Event>) -> Self {
|
||||
pub(crate) fn new(
|
||||
tick_interval: Duration,
|
||||
initialization_delay: Duration,
|
||||
initialization_retry_period: Duration,
|
||||
sender: Sender<Event>,
|
||||
) -> Self {
|
||||
info!(
|
||||
"RegionSupervisorTicker is created, tick_interval: {:?}, initialization_delay: {:?}, initialization_retry_period: {:?}",
|
||||
tick_interval, initialization_delay, initialization_retry_period
|
||||
);
|
||||
Self {
|
||||
tick_handle: Mutex::new(None),
|
||||
tick_interval,
|
||||
initialization_delay,
|
||||
initialization_retry_period,
|
||||
sender,
|
||||
}
|
||||
}
|
||||
@@ -163,15 +192,39 @@ impl RegionSupervisorTicker {
|
||||
if handle.is_none() {
|
||||
let sender = self.sender.clone();
|
||||
let tick_interval = self.tick_interval;
|
||||
let initialization_delay = self.initialization_delay;
|
||||
|
||||
let mut initialization_interval = interval_at(
|
||||
tokio::time::Instant::now() + initialization_delay,
|
||||
self.initialization_retry_period,
|
||||
);
|
||||
initialization_interval.set_missed_tick_behavior(MissedTickBehavior::Skip);
|
||||
common_runtime::spawn_global(async move {
|
||||
loop {
|
||||
initialization_interval.tick().await;
|
||||
let (tx, rx) = oneshot::channel();
|
||||
if sender.send(Event::InitializeAllRegions(tx)).await.is_err() {
|
||||
info!("EventReceiver is dropped, region failure detectors initialization loop is stopped");
|
||||
break;
|
||||
}
|
||||
if rx.await.is_ok() {
|
||||
info!("All region failure detectors are initialized.");
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let sender = self.sender.clone();
|
||||
let ticker_loop = tokio::spawn(async move {
|
||||
let mut interval = interval(tick_interval);
|
||||
interval.set_missed_tick_behavior(MissedTickBehavior::Skip);
|
||||
let mut tick_interval = interval(tick_interval);
|
||||
tick_interval.set_missed_tick_behavior(MissedTickBehavior::Skip);
|
||||
|
||||
if let Err(err) = sender.send(Event::Clear).await {
|
||||
warn!(err; "EventReceiver is dropped, failed to send Event::Clear");
|
||||
return;
|
||||
}
|
||||
loop {
|
||||
interval.tick().await;
|
||||
tick_interval.tick().await;
|
||||
if sender.send(Event::Tick).await.is_err() {
|
||||
info!("EventReceiver is dropped, tick loop is stopped");
|
||||
break;
|
||||
@@ -202,6 +255,8 @@ pub type RegionSupervisorRef = Arc<RegionSupervisor>;
|
||||
|
||||
/// The default tick interval.
|
||||
pub const DEFAULT_TICK_INTERVAL: Duration = Duration::from_secs(1);
|
||||
/// The default initialization retry period.
|
||||
pub const DEFAULT_INITIALIZATION_RETRY_PERIOD: Duration = Duration::from_secs(60);
|
||||
|
||||
/// Selector for region supervisor.
|
||||
pub enum RegionSupervisorSelector {
|
||||
@@ -228,6 +283,8 @@ pub struct RegionSupervisor {
|
||||
maintenance_mode_manager: MaintenanceModeManagerRef,
|
||||
/// Peer lookup service
|
||||
peer_lookup: PeerLookupServiceRef,
|
||||
/// The kv backend.
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
/// Controller for managing failure detectors for regions.
|
||||
@@ -290,6 +347,7 @@ impl RegionSupervisor {
|
||||
tokio::sync::mpsc::channel(1024)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn new(
|
||||
event_receiver: Receiver<Event>,
|
||||
options: PhiAccrualFailureDetectorOptions,
|
||||
@@ -298,6 +356,7 @@ impl RegionSupervisor {
|
||||
region_migration_manager: RegionMigrationManagerRef,
|
||||
maintenance_mode_manager: MaintenanceModeManagerRef,
|
||||
peer_lookup: PeerLookupServiceRef,
|
||||
kv_backend: KvBackendRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
failure_detector: RegionFailureDetector::new(options),
|
||||
@@ -308,6 +367,7 @@ impl RegionSupervisor {
|
||||
region_migration_manager,
|
||||
maintenance_mode_manager,
|
||||
peer_lookup,
|
||||
kv_backend,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -315,6 +375,26 @@ impl RegionSupervisor {
|
||||
pub(crate) async fn run(&mut self) {
|
||||
while let Some(event) = self.receiver.recv().await {
|
||||
match event {
|
||||
Event::InitializeAllRegions(sender) => {
|
||||
match self.is_maintenance_mode_enabled().await {
|
||||
Ok(false) => {}
|
||||
Ok(true) => {
|
||||
warn!("Skipping initialize all regions since maintenance mode is enabled.");
|
||||
continue;
|
||||
}
|
||||
Err(err) => {
|
||||
error!(err; "Failed to check maintenance mode during initialize all regions.");
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(err) = self.initialize_all().await {
|
||||
error!(err; "Failed to initialize all regions.");
|
||||
} else {
|
||||
// Ignore the error.
|
||||
let _ = sender.send(());
|
||||
}
|
||||
}
|
||||
Event::Tick => {
|
||||
let regions = self.detect_region_failure();
|
||||
self.handle_region_failures(regions).await;
|
||||
@@ -336,6 +416,59 @@ impl RegionSupervisor {
|
||||
info!("RegionSupervisor is stopped!");
|
||||
}
|
||||
|
||||
async fn initialize_all(&self) -> Result<()> {
|
||||
let now = Instant::now();
|
||||
let regions = self.regions();
|
||||
let req = RangeRequest::new().with_prefix(TableRouteKey::range_prefix());
|
||||
let stream = PaginationStream::new(self.kv_backend.clone(), req, DEFAULT_PAGE_SIZE, |kv| {
|
||||
TableRouteKey::from_bytes(&kv.key).map(|v| (v.table_id, kv.value))
|
||||
})
|
||||
.into_stream();
|
||||
|
||||
let mut stream = stream
|
||||
.map_ok(|(_, value)| {
|
||||
TableRouteValue::try_from_raw_value(&value)
|
||||
.context(error::TableMetadataManagerSnafu)
|
||||
})
|
||||
.boxed();
|
||||
let mut detecting_regions = Vec::new();
|
||||
while let Some(route) = stream
|
||||
.try_next()
|
||||
.await
|
||||
.context(error::TableMetadataManagerSnafu)?
|
||||
{
|
||||
let route = route?;
|
||||
if !route.is_physical() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let physical_table_route = route.into_physical_table_route();
|
||||
physical_table_route
|
||||
.region_routes
|
||||
.iter()
|
||||
.for_each(|region_route| {
|
||||
if !regions.contains(®ion_route.region.id) {
|
||||
if let Some(leader_peer) = ®ion_route.leader_peer {
|
||||
detecting_regions.push((leader_peer.id, region_route.region.id));
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let num_detecting_regions = detecting_regions.len();
|
||||
if !detecting_regions.is_empty() {
|
||||
self.register_failure_detectors(detecting_regions).await;
|
||||
}
|
||||
|
||||
info!(
|
||||
"Initialize {} region failure detectors, elapsed: {:?}",
|
||||
num_detecting_regions,
|
||||
now.elapsed()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn register_failure_detectors(&self, detecting_regions: Vec<DetectingRegion>) {
|
||||
let ts_millis = current_time_millis();
|
||||
for region in detecting_regions {
|
||||
@@ -497,12 +630,10 @@ impl RegionSupervisor {
|
||||
.peer_lookup
|
||||
.datanode(from_peer_id)
|
||||
.await
|
||||
.context(error::LookupPeerSnafu {
|
||||
peer_id: from_peer_id,
|
||||
})?
|
||||
.context(error::PeerUnavailableSnafu {
|
||||
peer_id: from_peer_id,
|
||||
})?;
|
||||
.ok()
|
||||
.flatten()
|
||||
.unwrap_or_else(|| Peer::empty(from_peer_id));
|
||||
|
||||
let region_peers = self
|
||||
.select_peers(from_peer_id, regions, failed_datanodes)
|
||||
.await?;
|
||||
@@ -599,6 +730,14 @@ impl RegionSupervisor {
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
/// Returns all regions that registered in the failure detector.
|
||||
fn regions(&self) -> HashSet<RegionId> {
|
||||
self.failure_detector
|
||||
.iter()
|
||||
.map(|e| e.region_ident().1)
|
||||
.collect::<HashSet<_>>()
|
||||
}
|
||||
|
||||
/// Updates the state of corresponding failure detectors.
|
||||
fn on_heartbeat_arrived(&self, heartbeat: DatanodeHeartbeat) {
|
||||
for region_id in heartbeat.regions {
|
||||
@@ -618,13 +757,22 @@ impl RegionSupervisor {
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
|
||||
use common_meta::ddl::test_util::{
|
||||
test_create_logical_table_task, test_create_physical_table_task,
|
||||
};
|
||||
use common_meta::ddl::RegionFailureDetectorController;
|
||||
use common_meta::key::maintenance;
|
||||
use common_meta::key::table_route::{
|
||||
LogicalTableRouteValue, PhysicalTableRouteValue, TableRouteValue,
|
||||
};
|
||||
use common_meta::key::{maintenance, TableMetadataManager};
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::{Region, RegionRoute};
|
||||
use common_meta::test_util::NoopPeerLookupService;
|
||||
use common_telemetry::info;
|
||||
use common_time::util::current_time_millis;
|
||||
use rand::Rng;
|
||||
use store_api::storage::RegionId;
|
||||
@@ -654,6 +802,7 @@ pub(crate) mod tests {
|
||||
Arc::new(maintenance::MaintenanceModeManager::new(env.kv_backend()));
|
||||
let peer_lookup = Arc::new(NoopPeerLookupService);
|
||||
let (tx, rx) = RegionSupervisor::channel();
|
||||
let kv_backend = env.kv_backend();
|
||||
|
||||
(
|
||||
RegionSupervisor::new(
|
||||
@@ -664,6 +813,7 @@ pub(crate) mod tests {
|
||||
region_migration_manager,
|
||||
maintenance_mode_manager,
|
||||
peer_lookup,
|
||||
kv_backend,
|
||||
),
|
||||
tx,
|
||||
)
|
||||
@@ -748,6 +898,8 @@ pub(crate) mod tests {
|
||||
let ticker = RegionSupervisorTicker {
|
||||
tick_handle: Mutex::new(None),
|
||||
tick_interval: Duration::from_millis(10),
|
||||
initialization_delay: Duration::from_millis(100),
|
||||
initialization_retry_period: Duration::from_millis(100),
|
||||
sender: tx,
|
||||
};
|
||||
// It's ok if we start the ticker again.
|
||||
@@ -757,11 +909,116 @@ pub(crate) mod tests {
|
||||
ticker.stop();
|
||||
assert!(!rx.is_empty());
|
||||
while let Ok(event) = rx.try_recv() {
|
||||
assert_matches!(event, Event::Tick | Event::Clear);
|
||||
assert_matches!(
|
||||
event,
|
||||
Event::Tick | Event::Clear | Event::InitializeAllRegions(_)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_initialize_all_regions_event_handling() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (tx, mut rx) = tokio::sync::mpsc::channel(128);
|
||||
let ticker = RegionSupervisorTicker {
|
||||
tick_handle: Mutex::new(None),
|
||||
tick_interval: Duration::from_millis(1000),
|
||||
initialization_delay: Duration::from_millis(50),
|
||||
initialization_retry_period: Duration::from_millis(50),
|
||||
sender: tx,
|
||||
};
|
||||
ticker.start();
|
||||
sleep(Duration::from_millis(60)).await;
|
||||
let handle = tokio::spawn(async move {
|
||||
let mut counter = 0;
|
||||
while let Some(event) = rx.recv().await {
|
||||
if let Event::InitializeAllRegions(tx) = event {
|
||||
if counter == 0 {
|
||||
// Ignore the first event
|
||||
counter += 1;
|
||||
continue;
|
||||
}
|
||||
tx.send(()).unwrap();
|
||||
info!("Responded initialize all regions event");
|
||||
break;
|
||||
}
|
||||
}
|
||||
rx
|
||||
});
|
||||
|
||||
let rx = handle.await.unwrap();
|
||||
for _ in 0..3 {
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
assert!(rx.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_initialize_all_regions() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (mut supervisor, sender) = new_test_supervisor();
|
||||
let table_metadata_manager = TableMetadataManager::new(supervisor.kv_backend.clone());
|
||||
|
||||
// Create a physical table metadata
|
||||
let table_id = 1024;
|
||||
let mut create_physical_table_task = test_create_physical_table_task("my_physical_table");
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
let table_info = create_physical_table_task.table_info;
|
||||
let table_route = PhysicalTableRouteValue::new(vec![RegionRoute {
|
||||
region: Region {
|
||||
id: RegionId::new(table_id, 0),
|
||||
..Default::default()
|
||||
},
|
||||
leader_peer: Some(Peer::empty(1)),
|
||||
..Default::default()
|
||||
}]);
|
||||
let table_route_value = TableRouteValue::Physical(table_route);
|
||||
table_metadata_manager
|
||||
.create_table_metadata(table_info, table_route_value, HashMap::new())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create a logical table metadata
|
||||
let logical_table_id = 1025;
|
||||
let mut test_create_logical_table_task = test_create_logical_table_task("my_logical_table");
|
||||
test_create_logical_table_task.set_table_id(logical_table_id);
|
||||
let table_info = test_create_logical_table_task.table_info;
|
||||
let table_route = LogicalTableRouteValue::new(1024, vec![RegionId::new(1025, 0)]);
|
||||
let table_route_value = TableRouteValue::Logical(table_route);
|
||||
table_metadata_manager
|
||||
.create_table_metadata(table_info, table_route_value, HashMap::new())
|
||||
.await
|
||||
.unwrap();
|
||||
tokio::spawn(async move { supervisor.run().await });
|
||||
let (tx, rx) = oneshot::channel();
|
||||
sender.send(Event::InitializeAllRegions(tx)).await.unwrap();
|
||||
assert!(rx.await.is_ok());
|
||||
|
||||
let (tx, rx) = oneshot::channel();
|
||||
sender.send(Event::Dump(tx)).await.unwrap();
|
||||
let detector = rx.await.unwrap();
|
||||
assert_eq!(detector.len(), 1);
|
||||
assert!(detector.contains(&(1, RegionId::new(1024, 0))));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_initialize_all_regions_with_maintenance_mode() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (mut supervisor, sender) = new_test_supervisor();
|
||||
|
||||
supervisor
|
||||
.maintenance_mode_manager
|
||||
.set_maintenance_mode()
|
||||
.await
|
||||
.unwrap();
|
||||
tokio::spawn(async move { supervisor.run().await });
|
||||
let (tx, rx) = oneshot::channel();
|
||||
sender.send(Event::InitializeAllRegions(tx)).await.unwrap();
|
||||
// The sender is dropped, so the receiver will receive an error.
|
||||
assert!(rx.await.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_region_failure_detector_controller() {
|
||||
let (mut supervisor, sender) = new_test_supervisor();
|
||||
|
||||
@@ -8,6 +8,7 @@ license.workspace = true
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
ahash.workspace = true
|
||||
api.workspace = true
|
||||
aquamarine.workspace = true
|
||||
async-stream.workspace = true
|
||||
|
||||
@@ -145,19 +145,12 @@ impl DataRegion {
|
||||
IndexOptions::Inverted => {
|
||||
c.column_schema.set_inverted_index(true);
|
||||
}
|
||||
IndexOptions::Skipping {
|
||||
granularity,
|
||||
false_positive_rate,
|
||||
} => {
|
||||
IndexOptions::Skipping { granularity } => {
|
||||
c.column_schema
|
||||
.set_skipping_options(
|
||||
&SkippingIndexOptions::new(
|
||||
granularity,
|
||||
false_positive_rate,
|
||||
SkippingIndexType::BloomFilter,
|
||||
)
|
||||
.context(SetSkippingIndexOptionSnafu)?,
|
||||
)
|
||||
.set_skipping_options(&SkippingIndexOptions {
|
||||
granularity,
|
||||
index_type: SkippingIndexType::BloomFilter,
|
||||
})
|
||||
.context(SetSkippingIndexOptionSnafu)?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -158,6 +158,7 @@ impl RegionEngine for MetricEngine {
|
||||
Ok(RegionResponse {
|
||||
affected_rows: rows,
|
||||
extensions: extension_return_value,
|
||||
metadata: Vec::new(),
|
||||
})
|
||||
}
|
||||
BatchRegionDdlRequest::Alter(requests) => {
|
||||
@@ -171,6 +172,7 @@ impl RegionEngine for MetricEngine {
|
||||
Ok(RegionResponse {
|
||||
affected_rows: rows,
|
||||
extensions: extension_return_value,
|
||||
metadata: Vec::new(),
|
||||
})
|
||||
}
|
||||
BatchRegionDdlRequest::Drop(requests) => {
|
||||
@@ -243,6 +245,7 @@ impl RegionEngine for MetricEngine {
|
||||
result.map_err(BoxedError::new).map(|rows| RegionResponse {
|
||||
affected_rows: rows,
|
||||
extensions: extension_return_value,
|
||||
metadata: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -439,6 +442,7 @@ impl MetricEngine {
|
||||
Ok(RegionResponse {
|
||||
affected_rows,
|
||||
extensions,
|
||||
metadata: Vec::new(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,7 +55,6 @@ use crate::utils::{
|
||||
};
|
||||
|
||||
const DEFAULT_TABLE_ID_SKIPPING_INDEX_GRANULARITY: u32 = 1024;
|
||||
const DEFAULT_TABLE_ID_SKIPPING_INDEX_FALSE_POSITIVE_RATE: f64 = 0.01;
|
||||
|
||||
impl MetricEngineInner {
|
||||
pub async fn create_regions(
|
||||
@@ -543,11 +542,10 @@ impl MetricEngineInner {
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
false,
|
||||
)
|
||||
.with_skipping_options(SkippingIndexOptions::new_unchecked(
|
||||
DEFAULT_TABLE_ID_SKIPPING_INDEX_GRANULARITY,
|
||||
DEFAULT_TABLE_ID_SKIPPING_INDEX_FALSE_POSITIVE_RATE,
|
||||
datatypes::schema::SkippingIndexType::BloomFilter,
|
||||
))
|
||||
.with_skipping_options(SkippingIndexOptions {
|
||||
granularity: DEFAULT_TABLE_ID_SKIPPING_INDEX_GRANULARITY,
|
||||
index_type: datatypes::schema::SkippingIndexType::BloomFilter,
|
||||
})
|
||||
.unwrap(),
|
||||
};
|
||||
let tsid_col = ColumnMetadata {
|
||||
|
||||
@@ -17,8 +17,6 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use store_api::metric_engine_consts::{
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_FALSE_POSITIVE_RATE_OPTION,
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_FALSE_POSITIVE_RATE_OPTION_DEFAULT,
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_GRANULARITY_OPTION,
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_GRANULARITY_OPTION_DEFAULT, METRIC_ENGINE_INDEX_TYPE_OPTION,
|
||||
};
|
||||
@@ -33,20 +31,19 @@ use crate::error::{Error, ParseRegionOptionsSnafu, Result};
|
||||
const SEG_ROW_COUNT_FOR_DATA_REGION: u32 = 256;
|
||||
|
||||
/// Physical region options.
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct PhysicalRegionOptions {
|
||||
pub index: IndexOptions,
|
||||
}
|
||||
|
||||
/// Index options for auto created columns
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq)]
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||
pub enum IndexOptions {
|
||||
#[default]
|
||||
None,
|
||||
Inverted,
|
||||
Skipping {
|
||||
granularity: u32,
|
||||
false_positive_rate: f64,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -57,7 +54,6 @@ pub fn set_data_region_options(
|
||||
) {
|
||||
options.remove(METRIC_ENGINE_INDEX_TYPE_OPTION);
|
||||
options.remove(METRIC_ENGINE_INDEX_SKIPPING_INDEX_GRANULARITY_OPTION);
|
||||
options.remove(METRIC_ENGINE_INDEX_SKIPPING_INDEX_FALSE_POSITIVE_RATE_OPTION);
|
||||
options.insert(
|
||||
"index.inverted_index.segment_row_count".to_string(),
|
||||
SEG_ROW_COUNT_FOR_DATA_REGION.to_string(),
|
||||
@@ -97,23 +93,7 @@ impl TryFrom<&HashMap<String, String>> for PhysicalRegionOptions {
|
||||
})
|
||||
},
|
||||
)?;
|
||||
let false_positive_rate = value
|
||||
.get(METRIC_ENGINE_INDEX_SKIPPING_INDEX_FALSE_POSITIVE_RATE_OPTION)
|
||||
.map_or(
|
||||
Ok(METRIC_ENGINE_INDEX_SKIPPING_INDEX_FALSE_POSITIVE_RATE_OPTION_DEFAULT),
|
||||
|f| {
|
||||
f.parse().ok().filter(|f| *f > 0.0 && *f <= 1.0).ok_or(
|
||||
ParseRegionOptionsSnafu {
|
||||
reason: format!("Invalid false positive rate: {}", f),
|
||||
}
|
||||
.build(),
|
||||
)
|
||||
},
|
||||
)?;
|
||||
Ok(IndexOptions::Skipping {
|
||||
granularity,
|
||||
false_positive_rate,
|
||||
})
|
||||
Ok(IndexOptions::Skipping { granularity })
|
||||
}
|
||||
Some(index_type) => ParseRegionOptionsSnafu {
|
||||
reason: format!("Invalid index type: {}", index_type),
|
||||
@@ -141,16 +121,11 @@ mod tests {
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_GRANULARITY_OPTION.to_string(),
|
||||
"102400".to_string(),
|
||||
);
|
||||
options.insert(
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_FALSE_POSITIVE_RATE_OPTION.to_string(),
|
||||
"0.01".to_string(),
|
||||
);
|
||||
set_data_region_options(&mut options, false);
|
||||
|
||||
for key in [
|
||||
METRIC_ENGINE_INDEX_TYPE_OPTION,
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_GRANULARITY_OPTION,
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_FALSE_POSITIVE_RATE_OPTION,
|
||||
] {
|
||||
assert_eq!(options.get(key), None);
|
||||
}
|
||||
@@ -179,16 +154,11 @@ mod tests {
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_GRANULARITY_OPTION.to_string(),
|
||||
"102400".to_string(),
|
||||
);
|
||||
options.insert(
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_FALSE_POSITIVE_RATE_OPTION.to_string(),
|
||||
"0.01".to_string(),
|
||||
);
|
||||
let physical_region_options = PhysicalRegionOptions::try_from(&options).unwrap();
|
||||
assert_eq!(
|
||||
physical_region_options.index,
|
||||
IndexOptions::Skipping {
|
||||
granularity: 102400,
|
||||
false_positive_rate: 0.01,
|
||||
granularity: 102400
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
@@ -147,7 +147,7 @@ impl MetricEngineInner {
|
||||
fn modify_rows(
|
||||
&self,
|
||||
physical_region_id: RegionId,
|
||||
table_id: TableId,
|
||||
logical_table_id: TableId,
|
||||
rows: &mut Rows,
|
||||
encoding: PrimaryKeyEncoding,
|
||||
) -> Result<()> {
|
||||
@@ -163,7 +163,9 @@ impl MetricEngineInner {
|
||||
.physical_columns();
|
||||
RowsIter::new(input, name_to_id)
|
||||
};
|
||||
let output = self.row_modifier.modify_rows(iter, table_id, encoding)?;
|
||||
let output = self
|
||||
.row_modifier
|
||||
.modify_rows(iter, logical_table_id, encoding)?;
|
||||
*rows = output;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ const TSID_HASH_SEED: u32 = 846793005;
|
||||
///
|
||||
/// - For [`PrimaryKeyEncoding::Dense`] encoding,
|
||||
/// it adds two columns(`__table_id`, `__tsid`) to the row.
|
||||
pub(crate) struct RowModifier {
|
||||
pub struct RowModifier {
|
||||
codec: SparsePrimaryKeyCodec,
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ impl RowModifier {
|
||||
}
|
||||
|
||||
/// Modify rows with the given primary key encoding.
|
||||
pub(crate) fn modify_rows(
|
||||
pub fn modify_rows(
|
||||
&self,
|
||||
iter: RowsIter,
|
||||
table_id: TableId,
|
||||
@@ -74,7 +74,7 @@ impl RowModifier {
|
||||
|
||||
let mut buffer = vec![];
|
||||
for mut iter in iter.iter_mut() {
|
||||
let (table_id, tsid) = self.fill_internal_columns(table_id, &iter);
|
||||
let (table_id, tsid) = Self::fill_internal_columns(table_id, &iter);
|
||||
let mut values = Vec::with_capacity(num_output_column);
|
||||
buffer.clear();
|
||||
let internal_columns = [
|
||||
@@ -135,7 +135,7 @@ impl RowModifier {
|
||||
options: None,
|
||||
});
|
||||
for iter in iter.iter_mut() {
|
||||
let (table_id, tsid) = self.fill_internal_columns(table_id, &iter);
|
||||
let (table_id, tsid) = Self::fill_internal_columns(table_id, &iter);
|
||||
iter.row.values.push(table_id);
|
||||
iter.row.values.push(tsid);
|
||||
}
|
||||
@@ -144,7 +144,7 @@ impl RowModifier {
|
||||
}
|
||||
|
||||
/// Fills internal columns of a row with table name and a hash of tag values.
|
||||
fn fill_internal_columns(&self, table_id: TableId, iter: &RowIter<'_>) -> (Value, Value) {
|
||||
pub fn fill_internal_columns(table_id: TableId, iter: &RowIter<'_>) -> (Value, Value) {
|
||||
let mut hasher = TsidGenerator::default();
|
||||
for (name, value) in iter.primary_keys_with_name() {
|
||||
// The type is checked before. So only null is ignored.
|
||||
@@ -264,7 +264,7 @@ impl IterIndex {
|
||||
}
|
||||
|
||||
/// Iterator of rows.
|
||||
pub(crate) struct RowsIter {
|
||||
pub struct RowsIter {
|
||||
rows: Rows,
|
||||
index: IterIndex,
|
||||
}
|
||||
@@ -276,7 +276,7 @@ impl RowsIter {
|
||||
}
|
||||
|
||||
/// Returns the iterator of rows.
|
||||
fn iter_mut(&mut self) -> impl Iterator<Item = RowIter> {
|
||||
pub fn iter_mut(&mut self) -> impl Iterator<Item = RowIter> {
|
||||
self.rows.rows.iter_mut().map(|row| RowIter {
|
||||
row,
|
||||
index: &self.index,
|
||||
@@ -290,10 +290,22 @@ impl RowsIter {
|
||||
.iter()
|
||||
.map(|idx| std::mem::take(&mut self.rows.schema[idx.index]))
|
||||
}
|
||||
|
||||
pub fn num_rows(&self) -> usize {
|
||||
self.rows.rows.len()
|
||||
}
|
||||
|
||||
pub fn num_columns(&self) -> usize {
|
||||
self.rows.schema.len()
|
||||
}
|
||||
|
||||
pub fn num_primary_keys(&self) -> usize {
|
||||
self.index.num_primary_key_column
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterator of a row.
|
||||
struct RowIter<'a> {
|
||||
pub struct RowIter<'a> {
|
||||
row: &'a mut Row,
|
||||
index: &'a IterIndex,
|
||||
schema: &'a Vec<ColumnSchema>,
|
||||
@@ -313,7 +325,7 @@ impl RowIter<'_> {
|
||||
}
|
||||
|
||||
/// Returns the primary keys.
|
||||
fn primary_keys(&self) -> impl Iterator<Item = (ColumnId, ValueRef)> {
|
||||
pub fn primary_keys(&self) -> impl Iterator<Item = (ColumnId, ValueRef)> {
|
||||
self.index.indices[..self.index.num_primary_key_column]
|
||||
.iter()
|
||||
.map(|idx| {
|
||||
@@ -333,6 +345,13 @@ impl RowIter<'_> {
|
||||
.iter()
|
||||
.map(|idx| std::mem::take(&mut self.row.values[idx.index]))
|
||||
}
|
||||
|
||||
/// Returns value at given offset.
|
||||
/// # Panics
|
||||
/// Panics if offset out-of-bound
|
||||
pub fn value_at(&self, idx: usize) -> &Value {
|
||||
&self.row.values[idx]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -476,7 +495,6 @@ mod tests {
|
||||
#[test]
|
||||
fn test_fill_internal_columns() {
|
||||
let name_to_column_id = test_name_to_column_id();
|
||||
let encoder = RowModifier::new();
|
||||
let table_id = 1025;
|
||||
let schema = test_schema();
|
||||
let row = test_row("greptimedb", "127.0.0.1");
|
||||
@@ -486,7 +504,7 @@ mod tests {
|
||||
};
|
||||
let mut rows_iter = RowsIter::new(rows, &name_to_column_id);
|
||||
let row_iter = rows_iter.iter_mut().next().unwrap();
|
||||
let (encoded_table_id, tsid) = encoder.fill_internal_columns(table_id, &row_iter);
|
||||
let (encoded_table_id, tsid) = RowModifier::fill_internal_columns(table_id, &row_iter);
|
||||
assert_eq!(encoded_table_id, ValueData::U32Value(1025).into());
|
||||
assert_eq!(tsid, ValueData::U64Value(9442261431637846000).into());
|
||||
|
||||
@@ -514,7 +532,7 @@ mod tests {
|
||||
};
|
||||
let mut rows_iter = RowsIter::new(rows, &name_to_column_id);
|
||||
let row_iter = rows_iter.iter_mut().next().unwrap();
|
||||
let (encoded_table_id, tsid) = encoder.fill_internal_columns(table_id, &row_iter);
|
||||
let (encoded_table_id, tsid) = RowModifier::fill_internal_columns(table_id, &row_iter);
|
||||
assert_eq!(encoded_table_id, ValueData::U32Value(1025).into());
|
||||
assert_eq!(tsid, ValueData::U64Value(9442261431637846000).into());
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ impl TestEnv {
|
||||
|
||||
/// Returns a new env with specific `prefix` and `config` for test.
|
||||
pub async fn with_prefix_and_config(prefix: &str, config: EngineConfig) -> Self {
|
||||
let mut mito_env = MitoTestEnv::with_prefix(prefix).await;
|
||||
let mut mito_env = MitoTestEnv::with_prefix(prefix);
|
||||
let mito = mito_env.create_engine(MitoConfig::default()).await;
|
||||
let metric = MetricEngine::try_new(mito.clone(), config).unwrap();
|
||||
Self {
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::sync::Arc;
|
||||
|
||||
use object_store::services::Fs;
|
||||
use object_store::util::{join_dir, with_instrument_layers};
|
||||
use object_store::{ErrorKind, ObjectStore};
|
||||
use object_store::{ErrorKind, ObjectStore, ATOMIC_WRITE_DIR, OLD_ATOMIC_WRITE_DIR};
|
||||
use smallvec::SmallVec;
|
||||
use snafu::ResultExt;
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
@@ -42,10 +42,6 @@ pub type AccessLayerRef = Arc<AccessLayer>;
|
||||
/// SST write results.
|
||||
pub type SstInfoArray = SmallVec<[SstInfo; 2]>;
|
||||
|
||||
pub const ATOMIC_WRITE_DIR: &str = "tmp/";
|
||||
/// For compatibility. Remove this after a major version release.
|
||||
pub const OLD_ATOMIC_WRITE_DIR: &str = ".tmp/";
|
||||
|
||||
/// A layer to access SST files under the same directory.
|
||||
pub struct AccessLayer {
|
||||
region_dir: String,
|
||||
|
||||
2
src/mito2/src/cache/index/inverted_index.rs
vendored
2
src/mito2/src/cache/index/inverted_index.rs
vendored
@@ -245,7 +245,7 @@ mod test {
|
||||
let blob = create_inverted_index_blob().await;
|
||||
|
||||
// Init a test range reader in local fs.
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let file_size = blob.len() as u64;
|
||||
let store = env.init_object_store_manager();
|
||||
let temp_path = "data";
|
||||
|
||||
10
src/mito2/src/cache/index/result_cache.rs
vendored
10
src/mito2/src/cache/index/result_cache.rs
vendored
@@ -31,11 +31,6 @@ use crate::sst::parquet::row_selection::RowGroupSelection;
|
||||
const INDEX_RESULT_TYPE: &str = "index_result";
|
||||
|
||||
/// Cache for storing index query results.
|
||||
///
|
||||
/// The `RowGroupSelection` is a collection of row groups that match the predicate.
|
||||
///
|
||||
/// Row groups can be partially searched. Row groups that not contained in `RowGroupSelection` are not searched.
|
||||
/// User can retrieve the partial results and handle uncontained row groups required by the predicate subsequently.
|
||||
pub struct IndexResultCache {
|
||||
cache: Cache<(PredicateKey, FileId), Arc<RowGroupSelection>>,
|
||||
}
|
||||
@@ -69,8 +64,6 @@ impl IndexResultCache {
|
||||
}
|
||||
|
||||
/// Puts a query result into the cache.
|
||||
///
|
||||
/// Allow user to put a partial result (not containing all row groups) into the cache.
|
||||
pub fn put(&self, key: PredicateKey, file_id: FileId, result: Arc<RowGroupSelection>) {
|
||||
let key = (key, file_id);
|
||||
let size = Self::index_result_cache_weight(&key, &result);
|
||||
@@ -81,9 +74,6 @@ impl IndexResultCache {
|
||||
}
|
||||
|
||||
/// Gets a query result from the cache.
|
||||
///
|
||||
/// Note: the returned `RowGroupSelection` only contains the row groups that are searched.
|
||||
/// Caller should handle the uncontained row groups required by the predicate subsequently.
|
||||
pub fn get(&self, key: &PredicateKey, file_id: FileId) -> Option<Arc<RowGroupSelection>> {
|
||||
let res = self.cache.get(&(key.clone(), file_id));
|
||||
if res.is_some() {
|
||||
|
||||
9
src/mito2/src/cache/write_cache.rs
vendored
9
src/mito2/src/cache/write_cache.rs
vendored
@@ -430,9 +430,10 @@ impl UploadTracker {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use object_store::ATOMIC_WRITE_DIR;
|
||||
|
||||
use super::*;
|
||||
use crate::access_layer::{OperationType, ATOMIC_WRITE_DIR};
|
||||
use crate::access_layer::OperationType;
|
||||
use crate::cache::test_util::new_fs_store;
|
||||
use crate::cache::{CacheManager, CacheStrategy};
|
||||
use crate::error::InvalidBatchSnafu;
|
||||
@@ -449,7 +450,7 @@ mod tests {
|
||||
async fn test_write_and_upload_sst() {
|
||||
// TODO(QuenKar): maybe find a way to create some object server for testing,
|
||||
// and now just use local file system to mock.
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let mock_store = env.init_object_store_manager();
|
||||
let path_provider = RegionFilePathFactory::new("test".to_string());
|
||||
|
||||
@@ -537,7 +538,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_read_metadata_from_write_cache() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let data_home = env.data_home().display().to_string();
|
||||
let mock_store = env.init_object_store_manager();
|
||||
|
||||
@@ -606,7 +607,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_write_cache_clean_tmp_files() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let data_home = env.data_home().display().to_string();
|
||||
let mock_store = env.init_object_store_manager();
|
||||
|
||||
|
||||
@@ -80,7 +80,6 @@ use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::codec::PrimaryKeyEncoding;
|
||||
use store_api::logstore::provider::Provider;
|
||||
use store_api::logstore::LogStore;
|
||||
use store_api::manifest::ManifestVersion;
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
use store_api::metric_engine_consts::MANIFEST_INFO_EXTENSION_KEY;
|
||||
use store_api::region_engine::{
|
||||
@@ -89,6 +88,7 @@ use store_api::region_engine::{
|
||||
};
|
||||
use store_api::region_request::{AffectedRows, RegionOpenRequest, RegionRequest};
|
||||
use store_api::storage::{RegionId, ScanRequest, SequenceNumber};
|
||||
use store_api::ManifestVersion;
|
||||
use tokio::sync::{oneshot, Semaphore};
|
||||
|
||||
use crate::cache::CacheStrategy;
|
||||
@@ -101,6 +101,7 @@ use crate::manifest::action::RegionEdit;
|
||||
use crate::memtable::MemtableStats;
|
||||
use crate::metrics::HANDLE_REQUEST_ELAPSED;
|
||||
use crate::read::scan_region::{ScanRegion, Scanner};
|
||||
use crate::read::stream::ScanBatchStream;
|
||||
use crate::region::MitoRegionRef;
|
||||
use crate::request::{RegionEditRequest, WorkerRequest};
|
||||
use crate::sst::file::FileMeta;
|
||||
@@ -183,6 +184,18 @@ impl MitoEngine {
|
||||
.await
|
||||
}
|
||||
|
||||
/// Scan [`Batch`]es by [`ScanRequest`].
|
||||
pub async fn scan_batch(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
request: ScanRequest,
|
||||
filter_deleted: bool,
|
||||
) -> Result<ScanBatchStream> {
|
||||
let mut scan_region = self.scan_region(region_id, request)?;
|
||||
scan_region.set_filter_deleted(filter_deleted);
|
||||
scan_region.scanner().await?.scan_batch()
|
||||
}
|
||||
|
||||
/// Returns a scanner to scan for `request`.
|
||||
async fn scanner(&self, region_id: RegionId, request: ScanRequest) -> Result<Scanner> {
|
||||
self.scan_region(region_id, request)?.scanner().await
|
||||
|
||||
@@ -84,14 +84,12 @@ fn alter_column_fulltext_options() -> RegionAlterRequest {
|
||||
kind: AlterKind::SetIndex {
|
||||
options: ApiSetIndexOptions::Fulltext {
|
||||
column_name: "tag_0".to_string(),
|
||||
options: FulltextOptions::new_unchecked(
|
||||
true,
|
||||
FulltextAnalyzer::English,
|
||||
false,
|
||||
FulltextBackend::Bloom,
|
||||
1000,
|
||||
0.01,
|
||||
),
|
||||
options: FulltextOptions {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
backend: FulltextBackend::Bloom,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -117,7 +115,7 @@ fn check_region_version(
|
||||
async fn test_alter_region() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -213,7 +211,7 @@ fn build_rows_for_tags(
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_put_after_alter() {
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
let region_id = RegionId::new(1, 1);
|
||||
let request = CreateRequestBuilder::new().build();
|
||||
@@ -318,7 +316,7 @@ async fn test_put_after_alter() {
|
||||
async fn test_alter_region_retry() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -376,7 +374,7 @@ async fn test_alter_region_retry() {
|
||||
async fn test_alter_on_flushing() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let listener = Arc::new(AlterFlushListener::default());
|
||||
let engine = env
|
||||
.create_engine_with(MitoConfig::default(), None, Some(listener.clone()))
|
||||
@@ -480,7 +478,7 @@ async fn test_alter_on_flushing() {
|
||||
async fn test_alter_column_fulltext_options() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let listener = Arc::new(AlterFlushListener::default());
|
||||
let engine = env
|
||||
.create_engine_with(MitoConfig::default(), None, Some(listener.clone()))
|
||||
@@ -555,14 +553,12 @@ async fn test_alter_column_fulltext_options() {
|
||||
// Wait for the write job.
|
||||
alter_job.await.unwrap();
|
||||
|
||||
let expect_fulltext_options = FulltextOptions::new_unchecked(
|
||||
true,
|
||||
FulltextAnalyzer::English,
|
||||
false,
|
||||
FulltextBackend::Bloom,
|
||||
1000,
|
||||
0.01,
|
||||
);
|
||||
let expect_fulltext_options = FulltextOptions {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
backend: FulltextBackend::Bloom,
|
||||
};
|
||||
let check_fulltext_options = |engine: &MitoEngine, expected: &FulltextOptions| {
|
||||
let current_fulltext_options = engine
|
||||
.get_region(region_id)
|
||||
@@ -601,7 +597,7 @@ async fn test_alter_column_fulltext_options() {
|
||||
async fn test_alter_column_set_inverted_index() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let listener = Arc::new(AlterFlushListener::default());
|
||||
let engine = env
|
||||
.create_engine_with(MitoConfig::default(), None, Some(listener.clone()))
|
||||
@@ -711,7 +707,7 @@ async fn test_alter_column_set_inverted_index() {
|
||||
async fn test_alter_region_ttl_options() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let listener = Arc::new(AlterFlushListener::default());
|
||||
let engine = env
|
||||
.create_engine_with(MitoConfig::default(), None, Some(listener.clone()))
|
||||
@@ -761,7 +757,7 @@ async fn test_alter_region_ttl_options() {
|
||||
async fn test_write_stall_on_altering() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let listener = Arc::new(NotifyRegionChangeResultListener::default());
|
||||
let engine = env
|
||||
.create_engine_with(MitoConfig::default(), None, Some(listener.clone()))
|
||||
|
||||
@@ -31,7 +31,7 @@ use crate::test_util::{
|
||||
async fn test_append_mode_write_query() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -89,7 +89,7 @@ async fn test_append_mode_write_query() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_append_mode_compaction() {
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env
|
||||
.create_engine(MitoConfig {
|
||||
..Default::default()
|
||||
|
||||
@@ -42,7 +42,7 @@ use crate::test_util::{
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_engine_new_stop() {
|
||||
let mut env = TestEnv::with_prefix("engine-stop").await;
|
||||
let mut env = TestEnv::with_prefix("engine-stop");
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -69,7 +69,7 @@ async fn test_engine_new_stop() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_write_to_region() {
|
||||
let mut env = TestEnv::with_prefix("write-to-region").await;
|
||||
let mut env = TestEnv::with_prefix("write-to-region");
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -97,9 +97,7 @@ async fn test_region_replay(factory: Option<LogStoreFactory>) {
|
||||
let Some(factory) = factory else {
|
||||
return;
|
||||
};
|
||||
let mut env = TestEnv::with_prefix("region-replay")
|
||||
.await
|
||||
.with_log_store_factory(factory.clone());
|
||||
let mut env = TestEnv::with_prefix("region-replay").with_log_store_factory(factory.clone());
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -175,7 +173,7 @@ async fn test_region_replay(factory: Option<LogStoreFactory>) {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_write_query_region() {
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -209,7 +207,7 @@ async fn test_write_query_region() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_different_order() {
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -270,7 +268,7 @@ async fn test_different_order() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_different_order_and_type() {
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -334,7 +332,7 @@ async fn test_different_order_and_type() {
|
||||
async fn test_put_delete() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -386,7 +384,7 @@ async fn test_put_delete() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_delete_not_null_fields() {
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -435,7 +433,7 @@ async fn test_delete_not_null_fields() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_put_overwrite() {
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -495,7 +493,7 @@ async fn test_put_overwrite() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_absent_and_invalid_columns() {
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -543,7 +541,7 @@ async fn test_absent_and_invalid_columns() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_region_usage() {
|
||||
let mut env = TestEnv::with_prefix("region_usage").await;
|
||||
let mut env = TestEnv::with_prefix("region_usage");
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -597,7 +595,7 @@ async fn test_region_usage() {
|
||||
async fn test_engine_with_write_cache() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let path = env.data_home().to_str().unwrap().to_string();
|
||||
let mito_config = MitoConfig::default().enable_write_cache(path, ReadableSize::mb(512), None);
|
||||
let engine = env.create_engine(mito_config).await;
|
||||
@@ -637,7 +635,7 @@ async fn test_engine_with_write_cache() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cache_null_primary_key() {
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env
|
||||
.create_engine(MitoConfig {
|
||||
vector_cache_size: ReadableSize::mb(32),
|
||||
|
||||
@@ -39,9 +39,8 @@ async fn test_batch_open(factory: Option<LogStoreFactory>) {
|
||||
let Some(factory) = factory else {
|
||||
return;
|
||||
};
|
||||
let mut env = TestEnv::with_prefix("open-batch-regions")
|
||||
.await
|
||||
.with_log_store_factory(factory.clone());
|
||||
let mut env =
|
||||
TestEnv::with_prefix("open-batch-regions").with_log_store_factory(factory.clone());
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
let topic = prepare_test_for_kafka_log_store(&factory).await;
|
||||
|
||||
@@ -161,9 +160,8 @@ async fn test_batch_open_err(factory: Option<LogStoreFactory>) {
|
||||
let Some(factory) = factory else {
|
||||
return;
|
||||
};
|
||||
let mut env = TestEnv::with_prefix("open-batch-regions-err")
|
||||
.await
|
||||
.with_log_store_factory(factory.clone());
|
||||
let mut env =
|
||||
TestEnv::with_prefix("open-batch-regions-err").with_log_store_factory(factory.clone());
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
let topic = prepare_test_for_kafka_log_store(&factory).await;
|
||||
let mut options = HashMap::new();
|
||||
|
||||
@@ -57,9 +57,7 @@ async fn test_catchup_with_last_entry_id(factory: Option<LogStoreFactory>) {
|
||||
return;
|
||||
};
|
||||
|
||||
let mut env = TestEnv::with_prefix("last_entry_id")
|
||||
.await
|
||||
.with_log_store_factory(factory.clone());
|
||||
let mut env = TestEnv::with_prefix("last_entry_id").with_log_store_factory(factory.clone());
|
||||
let topic = prepare_test_for_kafka_log_store(&factory).await;
|
||||
let leader_engine = env.create_engine(MitoConfig::default()).await;
|
||||
let follower_engine = env.create_follower_engine(MitoConfig::default()).await;
|
||||
@@ -177,9 +175,8 @@ async fn test_catchup_with_incorrect_last_entry_id(factory: Option<LogStoreFacto
|
||||
return;
|
||||
};
|
||||
|
||||
let mut env = TestEnv::with_prefix("incorrect_last_entry_id")
|
||||
.await
|
||||
.with_log_store_factory(factory.clone());
|
||||
let mut env =
|
||||
TestEnv::with_prefix("incorrect_last_entry_id").with_log_store_factory(factory.clone());
|
||||
let topic = prepare_test_for_kafka_log_store(&factory).await;
|
||||
let leader_engine = env.create_engine(MitoConfig::default()).await;
|
||||
let follower_engine = env.create_follower_engine(MitoConfig::default()).await;
|
||||
@@ -280,9 +277,8 @@ async fn test_catchup_without_last_entry_id(factory: Option<LogStoreFactory>) {
|
||||
return;
|
||||
};
|
||||
|
||||
let mut env = TestEnv::with_prefix("without_last_entry_id")
|
||||
.await
|
||||
.with_log_store_factory(factory.clone());
|
||||
let mut env =
|
||||
TestEnv::with_prefix("without_last_entry_id").with_log_store_factory(factory.clone());
|
||||
let topic = prepare_test_for_kafka_log_store(&factory).await;
|
||||
let leader_engine = env.create_engine(MitoConfig::default()).await;
|
||||
let follower_engine = env.create_follower_engine(MitoConfig::default()).await;
|
||||
@@ -384,9 +380,8 @@ async fn test_catchup_with_manifest_update(factory: Option<LogStoreFactory>) {
|
||||
return;
|
||||
};
|
||||
|
||||
let mut env = TestEnv::with_prefix("without_manifest_update")
|
||||
.await
|
||||
.with_log_store_factory(factory.clone());
|
||||
let mut env =
|
||||
TestEnv::with_prefix("without_manifest_update").with_log_store_factory(factory.clone());
|
||||
let topic = prepare_test_for_kafka_log_store(&factory).await;
|
||||
let leader_engine = env.create_engine(MitoConfig::default()).await;
|
||||
let follower_engine = env.create_follower_engine(MitoConfig::default()).await;
|
||||
@@ -550,9 +545,7 @@ async fn test_local_catchup(factory: Option<LogStoreFactory>) {
|
||||
return;
|
||||
};
|
||||
|
||||
let mut env = TestEnv::with_prefix("local_catchup")
|
||||
.await
|
||||
.with_log_store_factory(factory.clone());
|
||||
let mut env = TestEnv::with_prefix("local_catchup").with_log_store_factory(factory.clone());
|
||||
let leader_engine = env.create_engine(MitoConfig::default()).await;
|
||||
let Some(LogStoreImpl::RaftEngine(log_store)) = env.get_log_store() else {
|
||||
unreachable!()
|
||||
@@ -693,7 +686,7 @@ async fn test_local_catchup(factory: Option<LogStoreFactory>) {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_catchup_not_exist() {
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let non_exist_region_id = RegionId::new(1, 1);
|
||||
|
||||
@@ -21,7 +21,7 @@ use crate::test_util::{CreateRequestBuilder, TestEnv};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_engine_close_region() {
|
||||
let mut env = TestEnv::with_prefix("close").await;
|
||||
let mut env = TestEnv::with_prefix("close");
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
|
||||
@@ -136,7 +136,7 @@ async fn collect_stream_ts(stream: SendableRecordBatchStream) -> Vec<i64> {
|
||||
#[tokio::test]
|
||||
async fn test_compaction_region() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -202,7 +202,7 @@ async fn test_compaction_region() {
|
||||
#[tokio::test]
|
||||
async fn test_infer_compaction_time_window() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -341,7 +341,7 @@ async fn test_infer_compaction_time_window() {
|
||||
#[tokio::test]
|
||||
async fn test_compaction_overlapping_files() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -402,7 +402,7 @@ async fn test_compaction_overlapping_files() {
|
||||
#[tokio::test]
|
||||
async fn test_compaction_region_with_overlapping() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
let region_id = RegionId::new(1, 1);
|
||||
|
||||
@@ -450,7 +450,7 @@ async fn test_compaction_region_with_overlapping() {
|
||||
#[tokio::test]
|
||||
async fn test_compaction_region_with_overlapping_delete_all() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -506,7 +506,7 @@ async fn test_compaction_region_with_overlapping_delete_all() {
|
||||
#[tokio::test]
|
||||
async fn test_readonly_during_compaction() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let listener = Arc::new(CompactionListener::default());
|
||||
let engine = env
|
||||
.create_engine_with(
|
||||
@@ -590,7 +590,7 @@ async fn test_readonly_during_compaction() {
|
||||
#[tokio::test]
|
||||
async fn test_compaction_update_time_window() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -686,7 +686,7 @@ async fn test_compaction_update_time_window() {
|
||||
#[tokio::test]
|
||||
async fn test_change_region_compaction_window() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -811,7 +811,7 @@ async fn test_change_region_compaction_window() {
|
||||
#[tokio::test]
|
||||
async fn test_open_overwrite_compaction_window() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
|
||||
@@ -26,7 +26,7 @@ use crate::test_util::{build_rows, put_rows, rows_schema, CreateRequestBuilder,
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_engine_create_new_region() {
|
||||
let mut env = TestEnv::with_prefix("new-region").await;
|
||||
let mut env = TestEnv::with_prefix("new-region");
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -41,7 +41,7 @@ async fn test_engine_create_new_region() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_engine_create_existing_region() {
|
||||
let mut env = TestEnv::with_prefix("create-existing").await;
|
||||
let mut env = TestEnv::with_prefix("create-existing");
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -61,7 +61,7 @@ async fn test_engine_create_existing_region() {
|
||||
#[tokio::test]
|
||||
async fn test_engine_create_close_create_region() {
|
||||
// This test will trigger create_or_open function.
|
||||
let mut env = TestEnv::with_prefix("create-close-create").await;
|
||||
let mut env = TestEnv::with_prefix("create-close-create");
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -91,7 +91,7 @@ async fn test_engine_create_close_create_region() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_engine_create_with_different_id() {
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -110,7 +110,7 @@ async fn test_engine_create_with_different_id() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_engine_create_with_different_schema() {
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -130,7 +130,7 @@ async fn test_engine_create_with_different_schema() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_engine_create_with_different_primary_key() {
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -150,7 +150,7 @@ async fn test_engine_create_with_different_primary_key() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_engine_create_with_options() {
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
@@ -172,7 +172,7 @@ async fn test_engine_create_with_options() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_engine_create_with_custom_store() {
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env
|
||||
.create_engine_with_multiple_object_stores(MitoConfig::default(), None, None, &["Gcs"])
|
||||
.await;
|
||||
@@ -204,7 +204,7 @@ async fn test_engine_create_with_custom_store() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_engine_create_with_memtable_opts() {
|
||||
let mut env = TestEnv::new().await;
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
|
||||
@@ -35,7 +35,7 @@ use crate::worker::DROPPING_MARKER_FILE;
|
||||
async fn test_engine_drop_region() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut env = TestEnv::with_prefix("drop").await;
|
||||
let mut env = TestEnv::with_prefix("drop");
|
||||
let listener = Arc::new(DropListener::new(Duration::from_millis(100)));
|
||||
let engine = env
|
||||
.create_engine_with(MitoConfig::default(), None, Some(listener.clone()))
|
||||
@@ -143,7 +143,7 @@ async fn test_engine_drop_region_for_custom_store() {
|
||||
put_rows(engine, region_id, rows).await;
|
||||
flush_region(engine, region_id, None).await;
|
||||
}
|
||||
let mut env = TestEnv::with_prefix("drop").await;
|
||||
let mut env = TestEnv::with_prefix("drop");
|
||||
let listener = Arc::new(DropListener::new(Duration::from_millis(100)));
|
||||
let engine = env
|
||||
.create_engine_with_multiple_object_stores(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user