mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-26 16:10:02 +00:00
Compare commits
53 Commits
feat/bulk-
...
v0.15.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8612bb066f | ||
|
|
467593d329 | ||
|
|
9e4ae070b2 | ||
|
|
d8261dda51 | ||
|
|
7ab9b335a1 | ||
|
|
60835afb47 | ||
|
|
aba5bf7431 | ||
|
|
7897fe8dbe | ||
|
|
cc8ec706a1 | ||
|
|
7c688718db | ||
|
|
8a0e554e5a | ||
|
|
80fae1c559 | ||
|
|
c37c4df20d | ||
|
|
f712c1b356 | ||
|
|
7cd6be41ce | ||
|
|
15616d0c43 | ||
|
|
b43e315c67 | ||
|
|
36ab1ceef7 | ||
|
|
3fb1b726c6 | ||
|
|
c423bb31fe | ||
|
|
e026f766d2 | ||
|
|
9d08f2532a | ||
|
|
e072726ea8 | ||
|
|
e78c3e1eaa | ||
|
|
89e3c8edab | ||
|
|
d4826b998d | ||
|
|
d9faa5c801 | ||
|
|
12c3a3205b | ||
|
|
5231505021 | ||
|
|
6ece560f8c | ||
|
|
2ab08a8f93 | ||
|
|
086ae9cdcd | ||
|
|
6da8e00243 | ||
|
|
4b04c402b6 | ||
|
|
a59b6c36d2 | ||
|
|
f6ce6fe385 | ||
|
|
4d4bfb7d8b | ||
|
|
6e1e8f19e6 | ||
|
|
49cb4da6d2 | ||
|
|
0d0236ddab | ||
|
|
f8edb53b30 | ||
|
|
438791b3e4 | ||
|
|
50e4c916e7 | ||
|
|
16e7f7b64b | ||
|
|
53c4fd478e | ||
|
|
ecbbd2fbdb | ||
|
|
3e3a12385c | ||
|
|
079daf5db9 | ||
|
|
56b9ab5279 | ||
|
|
be4e0d589e | ||
|
|
2a3445c72c | ||
|
|
9d997d593c | ||
|
|
10bf9b11f6 |
15
.github/labeler.yaml
vendored
Normal file
15
.github/labeler.yaml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
ci:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: .github/**
|
||||
|
||||
docker:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docker/**
|
||||
|
||||
documentation:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docs/**
|
||||
|
||||
dashboard:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: grafana/**
|
||||
42
.github/workflows/pr-labeling.yaml
vendored
Normal file
42
.github/workflows/pr-labeling.yaml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
name: 'PR Labeling'
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
labeler:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/labeler@v5
|
||||
with:
|
||||
configuration-path: ".github/labeler.yaml"
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
size-label:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: pascalgn/size-label-action@v0.5.5
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
with:
|
||||
sizes: >
|
||||
{
|
||||
"0": "XS",
|
||||
"100": "S",
|
||||
"300": "M",
|
||||
"1000": "L",
|
||||
"1500": "XL",
|
||||
"2000": "XXL"
|
||||
}
|
||||
27
Cargo.lock
generated
27
Cargo.lock
generated
@@ -1670,9 +1670,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.1.24"
|
||||
version = "1.2.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "812acba72f0a070b003d3697490d2b55b837230ae7c6c6497f05cc2ddbb8d938"
|
||||
checksum = "d487aa071b5f64da6f19a3e848e3578944b726ee5a4854b82172f02aa876bfdc"
|
||||
dependencies = [
|
||||
"jobserver",
|
||||
"libc",
|
||||
@@ -1950,6 +1950,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
|
||||
name = "cli"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
"auth",
|
||||
"base64 0.22.1",
|
||||
@@ -1982,6 +1983,7 @@ dependencies = [
|
||||
"meta-srv",
|
||||
"nu-ansi-term",
|
||||
"object-store",
|
||||
"operator",
|
||||
"query",
|
||||
"rand 0.9.0",
|
||||
"reqwest",
|
||||
@@ -2669,7 +2671,6 @@ dependencies = [
|
||||
name = "common-telemetry"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"backtrace",
|
||||
"common-error",
|
||||
"console-subscriber",
|
||||
@@ -3070,9 +3071,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.5.13"
|
||||
version = "0.5.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2"
|
||||
checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
@@ -4698,6 +4699,7 @@ version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
"auth",
|
||||
"bytes",
|
||||
@@ -5143,7 +5145,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=17971523673f4fbc982510d3c9d6647ff642e16f#17971523673f4fbc982510d3c9d6647ff642e16f"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=96c733f8472284d3c83a4c011dc6de9cf830c353#96c733f8472284d3c83a4c011dc6de9cf830c353"
|
||||
dependencies = [
|
||||
"prost 0.13.5",
|
||||
"serde",
|
||||
@@ -6694,7 +6696,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-targets 0.52.6",
|
||||
"windows-targets 0.48.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -9567,7 +9569,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf"
|
||||
dependencies = [
|
||||
"heck 0.5.0",
|
||||
"itertools 0.14.0",
|
||||
"itertools 0.11.0",
|
||||
"log",
|
||||
"multimap",
|
||||
"once_cell",
|
||||
@@ -9613,7 +9615,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"itertools 0.14.0",
|
||||
"itertools 0.11.0",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.100",
|
||||
@@ -10388,15 +10390,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "ring"
|
||||
version = "0.17.8"
|
||||
version = "0.17.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d"
|
||||
checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"getrandom 0.2.15",
|
||||
"libc",
|
||||
"spin",
|
||||
"untrusted",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
@@ -14183,7 +14184,7 @@ version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
|
||||
dependencies = [
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -134,7 +134,7 @@ etcd-client = "0.14"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "17971523673f4fbc982510d3c9d6647ff642e16f" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "96c733f8472284d3c83a4c011dc6de9cf830c353" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
|
||||
@@ -189,7 +189,8 @@ We invite you to engage and contribute!
|
||||
- [Official Website](https://greptime.com/)
|
||||
- [Blog](https://greptime.com/blogs/)
|
||||
- [LinkedIn](https://www.linkedin.com/company/greptime/)
|
||||
- [Twitter](https://twitter.com/greptime)
|
||||
- [X (Twitter)](https://X.com/greptime)
|
||||
- [YouTube](https://www.youtube.com/@greptime)
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@@ -123,6 +123,7 @@
|
||||
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
||||
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
||||
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
||||
| `storage.http_client.skip_ssl_validation` | Bool | `false` | To skip the ssl verification<br/>**Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks. |
|
||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||
@@ -471,6 +472,7 @@
|
||||
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
||||
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
||||
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
||||
| `storage.http_client.skip_ssl_validation` | Bool | `false` | To skip the ssl verification<br/>**Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks. |
|
||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||
|
||||
@@ -367,6 +367,10 @@ timeout = "30s"
|
||||
## The timeout for idle sockets being kept-alive.
|
||||
pool_idle_timeout = "90s"
|
||||
|
||||
## To skip the ssl verification
|
||||
## **Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks.
|
||||
skip_ssl_validation = false
|
||||
|
||||
# Custom storage options
|
||||
# [[storage.providers]]
|
||||
# name = "S3"
|
||||
|
||||
@@ -458,6 +458,10 @@ timeout = "30s"
|
||||
## The timeout for idle sockets being kept-alive.
|
||||
pool_idle_timeout = "90s"
|
||||
|
||||
## To skip the ssl verification
|
||||
## **Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks.
|
||||
skip_ssl_validation = false
|
||||
|
||||
# Custom storage options
|
||||
# [[storage.providers]]
|
||||
# name = "S3"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -70,6 +70,7 @@
|
||||
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Compaction Input/Output Bytes | `sum by(instance, pod) (greptime_mito_compaction_input_bytes)`<br/>`sum by(instance, pod) (greptime_mito_compaction_output_bytes)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-input` |
|
||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Active Series and Field Builders Count | `sum by(instance, pod) (greptime_mito_memtable_active_series_count)`<br/>`sum by(instance, pod) (greptime_mito_memtable_field_builder_count)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]-series` |
|
||||
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
# OpenDAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
|
||||
@@ -612,6 +612,21 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- title: Active Series and Field Builders Count
|
||||
type: timeseries
|
||||
description: Compaction oinput output bytes
|
||||
unit: none
|
||||
queries:
|
||||
- expr: sum by(instance, pod) (greptime_mito_memtable_active_series_count)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-series'
|
||||
- expr: sum by(instance, pod) (greptime_mito_memtable_field_builder_count)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-field_builders'
|
||||
- title: Region Worker Convert Requests
|
||||
type: timeseries
|
||||
description: Per-stage elapsed time for region worker to decode requests.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -70,6 +70,7 @@
|
||||
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Compaction Input/Output Bytes | `sum by(instance, pod) (greptime_mito_compaction_input_bytes)`<br/>`sum by(instance, pod) (greptime_mito_compaction_output_bytes)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-input` |
|
||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Active Series and Field Builders Count | `sum by(instance, pod) (greptime_mito_memtable_active_series_count)`<br/>`sum by(instance, pod) (greptime_mito_memtable_field_builder_count)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]-series` |
|
||||
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
# OpenDAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
|
||||
@@ -612,6 +612,21 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- title: Active Series and Field Builders Count
|
||||
type: timeseries
|
||||
description: Compaction oinput output bytes
|
||||
unit: none
|
||||
queries:
|
||||
- expr: sum by(instance, pod) (greptime_mito_memtable_active_series_count)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-series'
|
||||
- expr: sum by(instance, pod) (greptime_mito_memtable_field_builder_count)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-field_builders'
|
||||
- title: Region Worker Convert Requests
|
||||
type: timeseries
|
||||
description: Per-stage elapsed time for region worker to decode requests.
|
||||
|
||||
@@ -226,18 +226,20 @@ mod tests {
|
||||
assert!(options.is_none());
|
||||
|
||||
let mut schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
|
||||
.with_fulltext_options(FulltextOptions {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
backend: FulltextBackend::Bloom,
|
||||
})
|
||||
.with_fulltext_options(FulltextOptions::new_unchecked(
|
||||
true,
|
||||
FulltextAnalyzer::English,
|
||||
false,
|
||||
FulltextBackend::Bloom,
|
||||
10240,
|
||||
0.01,
|
||||
))
|
||||
.unwrap();
|
||||
schema.set_inverted_index(true);
|
||||
let options = options_from_column_schema(&schema).unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\"}"
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\",\"granularity\":10240,\"false-positive-rate-in-10000\":100}"
|
||||
);
|
||||
assert_eq!(
|
||||
options.options.get(INVERTED_INDEX_GRPC_KEY).unwrap(),
|
||||
@@ -247,16 +249,18 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_options_with_fulltext() {
|
||||
let fulltext = FulltextOptions {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
backend: FulltextBackend::Bloom,
|
||||
};
|
||||
let fulltext = FulltextOptions::new_unchecked(
|
||||
true,
|
||||
FulltextAnalyzer::English,
|
||||
false,
|
||||
FulltextBackend::Bloom,
|
||||
10240,
|
||||
0.01,
|
||||
);
|
||||
let options = options_from_fulltext(&fulltext).unwrap().unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\"}"
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\",\"granularity\":10240,\"false-positive-rate-in-10000\":100}"
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -22,11 +22,13 @@ use common_catalog::consts::{
|
||||
PG_CATALOG_NAME,
|
||||
};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::{LayeredCacheRegistryRef, ViewInfoCacheRef};
|
||||
use common_meta::cache::{
|
||||
LayeredCacheRegistryRef, TableRoute, TableRouteCacheRef, ViewInfoCacheRef,
|
||||
};
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::table_info::TableInfoValue;
|
||||
use common_meta::key::table_info::{TableInfoManager, TableInfoValue};
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
@@ -37,6 +39,7 @@ use moka::sync::Cache;
|
||||
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||
use session::context::{Channel, QueryContext};
|
||||
use snafu::prelude::*;
|
||||
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
||||
use table::dist_table::DistTable;
|
||||
use table::metadata::TableId;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
@@ -140,6 +143,61 @@ impl KvBackendCatalogManager {
|
||||
pub fn procedure_manager(&self) -> Option<ProcedureManagerRef> {
|
||||
self.procedure_manager.clone()
|
||||
}
|
||||
|
||||
// Override logical table's partition key indices with physical table's.
|
||||
async fn override_logical_table_partition_key_indices(
|
||||
table_route_cache: &TableRouteCacheRef,
|
||||
table_info_manager: &TableInfoManager,
|
||||
table: TableRef,
|
||||
) -> Result<TableRef> {
|
||||
// If the table is not a metric table, return the table directly.
|
||||
if table.table_info().meta.engine != METRIC_ENGINE_NAME {
|
||||
return Ok(table);
|
||||
}
|
||||
|
||||
if let Some(table_route_value) = table_route_cache
|
||||
.get(table.table_info().table_id())
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
&& let TableRoute::Logical(logical_route) = &*table_route_value
|
||||
&& let Some(physical_table_info_value) = table_info_manager
|
||||
.get(logical_route.physical_table_id())
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
{
|
||||
let mut new_table_info = (*table.table_info()).clone();
|
||||
|
||||
// Remap partition key indices from physical table to logical table
|
||||
new_table_info.meta.partition_key_indices = physical_table_info_value
|
||||
.table_info
|
||||
.meta
|
||||
.partition_key_indices
|
||||
.iter()
|
||||
.filter_map(|&physical_index| {
|
||||
// Get the column name from the physical table using the physical index
|
||||
physical_table_info_value
|
||||
.table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.get(physical_index)
|
||||
.and_then(|physical_column| {
|
||||
// Find the corresponding index in the logical table schema
|
||||
new_table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_index_by_name(physical_column.name.as_str())
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let new_table = DistTable::table(Arc::new(new_table_info));
|
||||
|
||||
return Ok(new_table);
|
||||
}
|
||||
|
||||
Ok(table)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -266,16 +324,28 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
let table_cache: TableCacheRef = self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_cache",
|
||||
})?;
|
||||
if let Some(table) = table_cache
|
||||
|
||||
let table = table_cache
|
||||
.get_by_ref(&TableName {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
})
|
||||
.await
|
||||
.context(GetTableCacheSnafu)?
|
||||
{
|
||||
return Ok(Some(table));
|
||||
.context(GetTableCacheSnafu)?;
|
||||
|
||||
if let Some(table) = table {
|
||||
let table_route_cache: TableRouteCacheRef =
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_route_cache",
|
||||
})?;
|
||||
return Self::override_logical_table_partition_key_indices(
|
||||
&table_route_cache,
|
||||
self.table_metadata_manager.table_info_manager(),
|
||||
table,
|
||||
)
|
||||
.await
|
||||
.map(Some);
|
||||
}
|
||||
|
||||
if channel == Channel::Postgres {
|
||||
@@ -288,7 +358,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(None);
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn tables_by_ids(
|
||||
@@ -340,8 +410,20 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
let catalog = catalog.to_string();
|
||||
let schema = schema.to_string();
|
||||
let semaphore = Arc::new(Semaphore::new(CONCURRENCY));
|
||||
let table_route_cache: Result<TableRouteCacheRef> =
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_route_cache",
|
||||
});
|
||||
|
||||
common_runtime::spawn_global(async move {
|
||||
let table_route_cache = match table_route_cache {
|
||||
Ok(table_route_cache) => table_route_cache,
|
||||
Err(e) => {
|
||||
let _ = tx.send(Err(e)).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let table_id_stream = metadata_manager
|
||||
.table_name_manager()
|
||||
.tables(&catalog, &schema)
|
||||
@@ -368,6 +450,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
let metadata_manager = metadata_manager.clone();
|
||||
let tx = tx.clone();
|
||||
let semaphore = semaphore.clone();
|
||||
let table_route_cache = table_route_cache.clone();
|
||||
common_runtime::spawn_global(async move {
|
||||
// we don't explicitly close the semaphore so just ignore the potential error.
|
||||
let _ = semaphore.acquire().await;
|
||||
@@ -385,6 +468,16 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
};
|
||||
|
||||
for table in table_info_values.into_values().map(build_table) {
|
||||
let table = if let Ok(table) = table {
|
||||
Self::override_logical_table_partition_key_indices(
|
||||
&table_route_cache,
|
||||
metadata_manager.table_info_manager(),
|
||||
table,
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
table
|
||||
};
|
||||
if tx.send(table).await.is_err() {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
#![feature(assert_matches)]
|
||||
#![feature(try_blocks)]
|
||||
#![feature(let_chains)]
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
|
||||
@@ -15,13 +15,13 @@
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use api::v1::frontend::{KillProcessRequest, ListProcessRequest, ProcessInfo};
|
||||
use common_base::cancellation::CancellationHandle;
|
||||
use common_frontend::selector::{FrontendSelector, MetaClientSelector};
|
||||
use common_telemetry::{debug, info};
|
||||
use common_telemetry::{debug, info, warn};
|
||||
use common_time::util::current_time_millis;
|
||||
use meta_client::MetaClientRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
@@ -29,6 +29,7 @@ use snafu::{ensure, OptionExt, ResultExt};
|
||||
use crate::error;
|
||||
use crate::metrics::{PROCESS_KILL_COUNT, PROCESS_LIST_COUNT};
|
||||
|
||||
pub type ProcessId = u32;
|
||||
pub type ProcessManagerRef = Arc<ProcessManager>;
|
||||
|
||||
/// Query process manager.
|
||||
@@ -36,9 +37,9 @@ pub struct ProcessManager {
|
||||
/// Local frontend server address,
|
||||
server_addr: String,
|
||||
/// Next process id for local queries.
|
||||
next_id: AtomicU64,
|
||||
next_id: AtomicU32,
|
||||
/// Running process per catalog.
|
||||
catalogs: RwLock<HashMap<String, HashMap<u64, CancellableProcess>>>,
|
||||
catalogs: RwLock<HashMap<String, HashMap<ProcessId, CancellableProcess>>>,
|
||||
/// Frontend selector to locate frontend nodes.
|
||||
frontend_selector: Option<MetaClientSelector>,
|
||||
}
|
||||
@@ -65,9 +66,9 @@ impl ProcessManager {
|
||||
schemas: Vec<String>,
|
||||
query: String,
|
||||
client: String,
|
||||
id: Option<u64>,
|
||||
query_id: Option<ProcessId>,
|
||||
) -> Ticket {
|
||||
let id = id.unwrap_or_else(|| self.next_id.fetch_add(1, Ordering::Relaxed));
|
||||
let id = query_id.unwrap_or_else(|| self.next_id.fetch_add(1, Ordering::Relaxed));
|
||||
let process = ProcessInfo {
|
||||
id,
|
||||
catalog: catalog.clone(),
|
||||
@@ -96,12 +97,12 @@ impl ProcessManager {
|
||||
}
|
||||
|
||||
/// Generates the next process id.
|
||||
pub fn next_id(&self) -> u64 {
|
||||
pub fn next_id(&self) -> u32 {
|
||||
self.next_id.fetch_add(1, Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// De-register a query from process list.
|
||||
pub fn deregister_query(&self, catalog: String, id: u64) {
|
||||
pub fn deregister_query(&self, catalog: String, id: ProcessId) {
|
||||
if let Entry::Occupied(mut o) = self.catalogs.write().unwrap().entry(catalog) {
|
||||
let process = o.get_mut().remove(&id);
|
||||
debug!("Deregister process: {:?}", process);
|
||||
@@ -140,14 +141,20 @@ impl ProcessManager {
|
||||
.await
|
||||
.context(error::InvokeFrontendSnafu)?;
|
||||
for mut f in frontends {
|
||||
processes.extend(
|
||||
f.list_process(ListProcessRequest {
|
||||
let result = f
|
||||
.list_process(ListProcessRequest {
|
||||
catalog: catalog.unwrap_or_default().to_string(),
|
||||
})
|
||||
.await
|
||||
.context(error::InvokeFrontendSnafu)?
|
||||
.processes,
|
||||
);
|
||||
.context(error::InvokeFrontendSnafu);
|
||||
match result {
|
||||
Ok(resp) => {
|
||||
processes.extend(resp.processes);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(e; "Skipping failing node: {:?}", f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
processes.extend(self.local_processes(catalog)?);
|
||||
@@ -159,26 +166,10 @@ impl ProcessManager {
|
||||
&self,
|
||||
server_addr: String,
|
||||
catalog: String,
|
||||
id: u64,
|
||||
id: ProcessId,
|
||||
) -> error::Result<bool> {
|
||||
if server_addr == self.server_addr {
|
||||
if let Some(catalogs) = self.catalogs.write().unwrap().get_mut(&catalog) {
|
||||
if let Some(process) = catalogs.remove(&id) {
|
||||
process.handle.cancel();
|
||||
info!(
|
||||
"Killed process, catalog: {}, id: {:?}",
|
||||
process.process.catalog, process.process.id
|
||||
);
|
||||
PROCESS_KILL_COUNT.with_label_values(&[&catalog]).inc();
|
||||
Ok(true)
|
||||
} else {
|
||||
debug!("Failed to kill process, id not found: {}", id);
|
||||
Ok(false)
|
||||
}
|
||||
} else {
|
||||
debug!("Failed to kill process, catalog not found: {}", catalog);
|
||||
Ok(false)
|
||||
}
|
||||
self.kill_local_process(catalog, id).await
|
||||
} else {
|
||||
let mut nodes = self
|
||||
.frontend_selector
|
||||
@@ -204,12 +195,33 @@ impl ProcessManager {
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
|
||||
/// Kills local query with provided catalog and id.
|
||||
pub async fn kill_local_process(&self, catalog: String, id: ProcessId) -> error::Result<bool> {
|
||||
if let Some(catalogs) = self.catalogs.write().unwrap().get_mut(&catalog) {
|
||||
if let Some(process) = catalogs.remove(&id) {
|
||||
process.handle.cancel();
|
||||
info!(
|
||||
"Killed process, catalog: {}, id: {:?}",
|
||||
process.process.catalog, process.process.id
|
||||
);
|
||||
PROCESS_KILL_COUNT.with_label_values(&[&catalog]).inc();
|
||||
Ok(true)
|
||||
} else {
|
||||
debug!("Failed to kill process, id not found: {}", id);
|
||||
Ok(false)
|
||||
}
|
||||
} else {
|
||||
debug!("Failed to kill process, catalog not found: {}", catalog);
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Ticket {
|
||||
pub(crate) catalog: String,
|
||||
pub(crate) manager: ProcessManagerRef,
|
||||
pub(crate) id: u64,
|
||||
pub(crate) id: ProcessId,
|
||||
pub cancellation_handle: Arc<CancellationHandle>,
|
||||
}
|
||||
|
||||
@@ -323,7 +335,7 @@ mod tests {
|
||||
assert_eq!(running_processes.len(), 2);
|
||||
|
||||
// Verify both processes are present
|
||||
let ids: Vec<u64> = running_processes.iter().map(|p| p.id).collect();
|
||||
let ids: Vec<u32> = running_processes.iter().map(|p| p.id).collect();
|
||||
assert!(ids.contains(&ticket1.id));
|
||||
assert!(ids.contains(&ticket2.id));
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ mysql_kvbackend = ["common-meta/mysql_kvbackend", "meta-srv/mysql_kvbackend"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
auth.workspace = true
|
||||
base64.workspace = true
|
||||
@@ -50,6 +51,7 @@ meta-client.workspace = true
|
||||
meta-srv.workspace = true
|
||||
nu-ansi-term = "0.46"
|
||||
object-store.workspace = true
|
||||
operator.workspace = true
|
||||
query.workspace = true
|
||||
rand.workspace = true
|
||||
reqwest.workspace = true
|
||||
@@ -65,6 +67,7 @@ tokio.workspace = true
|
||||
tracing-appender.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-meta = { workspace = true, features = ["testing"] }
|
||||
common-version.workspace = true
|
||||
serde.workspace = true
|
||||
tempfile.workspace = true
|
||||
|
||||
@@ -17,8 +17,10 @@ use std::any::Any;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_meta::peer::Peer;
|
||||
use object_store::Error as ObjectStoreError;
|
||||
use snafu::{Location, Snafu};
|
||||
use store_api::storage::TableId;
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -73,6 +75,20 @@ pub enum Error {
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to get table metadata"))]
|
||||
TableMetadata {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unexpected error: {}", msg))]
|
||||
Unexpected {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing config, msg: {}", msg))]
|
||||
MissingConfig {
|
||||
msg: String,
|
||||
@@ -222,6 +238,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table not found: {table_id}"))]
|
||||
TableNotFound {
|
||||
table_id: TableId,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("OpenDAL operator failed"))]
|
||||
OpenDal {
|
||||
#[snafu(implicit)]
|
||||
@@ -267,6 +290,29 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to init backend"))]
|
||||
InitBackend {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: ObjectStoreError,
|
||||
},
|
||||
|
||||
#[snafu(display("Covert column schemas to defs failed"))]
|
||||
CovertColumnSchemasToDefs {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: operator::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to send request to datanode: {}", peer))]
|
||||
SendRequestToDatanode {
|
||||
peer: Peer,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -274,9 +320,9 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::InitMetadata { source, .. } | Error::InitDdlManager { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::InitMetadata { source, .. }
|
||||
| Error::InitDdlManager { source, .. }
|
||||
| Error::TableMetadata { source, .. } => source.status_code(),
|
||||
|
||||
Error::MissingConfig { .. }
|
||||
| Error::LoadLayeredConfig { .. }
|
||||
@@ -290,6 +336,9 @@ impl ErrorExt for Error {
|
||||
| Error::InvalidArguments { .. }
|
||||
| Error::ParseProxyOpts { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::CovertColumnSchemasToDefs { source, .. } => source.status_code(),
|
||||
Error::SendRequestToDatanode { source, .. } => source.status_code(),
|
||||
|
||||
Error::StartProcedureManager { source, .. }
|
||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||
Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
|
||||
@@ -297,6 +346,7 @@ impl ErrorExt for Error {
|
||||
Error::ParseSql { source, .. } | Error::PlanStatement { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::Unexpected { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::SerdeJson { .. }
|
||||
| Error::FileIo { .. }
|
||||
@@ -305,7 +355,7 @@ impl ErrorExt for Error {
|
||||
| Error::BuildClient { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::Other { source, .. } => source.status_code(),
|
||||
Error::OpenDal { .. } => StatusCode::Internal,
|
||||
Error::OpenDal { .. } | Error::InitBackend { .. } => StatusCode::Internal,
|
||||
Error::S3ConfigNotSet { .. }
|
||||
| Error::OutputDirNotSet { .. }
|
||||
| Error::EmptyStoreAddrs { .. } => StatusCode::InvalidArguments,
|
||||
@@ -314,6 +364,7 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::CacheRequired { .. } | Error::BuildCacheRegistry { .. } => StatusCode::Internal,
|
||||
Error::MetaClientInit { source, .. } => source.status_code(),
|
||||
Error::TableNotFound { .. } => StatusCode::TableNotFound,
|
||||
Error::SchemaNotFound { .. } => StatusCode::DatabaseNotFound,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,29 +14,39 @@
|
||||
|
||||
mod common;
|
||||
mod control;
|
||||
mod repair;
|
||||
mod snapshot;
|
||||
mod utils;
|
||||
|
||||
use clap::Subcommand;
|
||||
use common_error::ext::BoxedError;
|
||||
|
||||
use crate::metadata::control::ControlCommand;
|
||||
use crate::metadata::control::{DelCommand, GetCommand};
|
||||
use crate::metadata::repair::RepairLogicalTablesCommand;
|
||||
use crate::metadata::snapshot::SnapshotCommand;
|
||||
use crate::Tool;
|
||||
|
||||
/// Command for managing metadata operations, including saving metadata snapshots and restoring metadata from snapshots.
|
||||
/// Command for managing metadata operations,
|
||||
/// including saving and restoring metadata snapshots,
|
||||
/// controlling metadata operations, and diagnosing and repairing metadata.
|
||||
#[derive(Subcommand)]
|
||||
pub enum MetadataCommand {
|
||||
#[clap(subcommand)]
|
||||
Snapshot(SnapshotCommand),
|
||||
#[clap(subcommand)]
|
||||
Control(ControlCommand),
|
||||
Get(GetCommand),
|
||||
#[clap(subcommand)]
|
||||
Del(DelCommand),
|
||||
RepairLogicalTables(RepairLogicalTablesCommand),
|
||||
}
|
||||
|
||||
impl MetadataCommand {
|
||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
||||
match self {
|
||||
MetadataCommand::Snapshot(cmd) => cmd.build().await,
|
||||
MetadataCommand::Control(cmd) => cmd.build().await,
|
||||
MetadataCommand::RepairLogicalTables(cmd) => cmd.build().await,
|
||||
MetadataCommand::Get(cmd) => cmd.build().await,
|
||||
MetadataCommand::Del(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,27 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod del;
|
||||
mod get;
|
||||
#[cfg(test)]
|
||||
mod test_utils;
|
||||
mod utils;
|
||||
|
||||
use clap::Subcommand;
|
||||
use common_error::ext::BoxedError;
|
||||
use get::GetCommand;
|
||||
|
||||
use crate::Tool;
|
||||
|
||||
/// Subcommand for metadata control.
|
||||
#[derive(Subcommand)]
|
||||
pub enum ControlCommand {
|
||||
/// Get the metadata from the metasrv.
|
||||
#[clap(subcommand)]
|
||||
Get(GetCommand),
|
||||
}
|
||||
|
||||
impl ControlCommand {
|
||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
||||
match self {
|
||||
ControlCommand::Get(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
pub(crate) use del::DelCommand;
|
||||
pub(crate) use get::GetCommand;
|
||||
|
||||
42
src/cli/src/metadata/control/del.rs
Normal file
42
src/cli/src/metadata/control/del.rs
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod key;
|
||||
mod table;
|
||||
|
||||
use clap::Subcommand;
|
||||
use common_error::ext::BoxedError;
|
||||
|
||||
use crate::metadata::control::del::key::DelKeyCommand;
|
||||
use crate::metadata::control::del::table::DelTableCommand;
|
||||
use crate::Tool;
|
||||
|
||||
/// The prefix of the tombstone keys.
|
||||
pub(crate) const CLI_TOMBSTONE_PREFIX: &str = "__cli_tombstone/";
|
||||
|
||||
/// Subcommand for deleting metadata from the metadata store.
|
||||
#[derive(Subcommand)]
|
||||
pub enum DelCommand {
|
||||
Key(DelKeyCommand),
|
||||
Table(DelTableCommand),
|
||||
}
|
||||
|
||||
impl DelCommand {
|
||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
||||
match self {
|
||||
DelCommand::Key(cmd) => cmd.build().await,
|
||||
DelCommand::Table(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
132
src/cli/src/metadata/control/del/key.rs
Normal file
132
src/cli/src/metadata/control/del/key.rs
Normal file
@@ -0,0 +1,132 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::tombstone::TombstoneManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::rpc::store::RangeRequest;
|
||||
|
||||
use crate::metadata::common::StoreConfig;
|
||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
||||
use crate::Tool;
|
||||
|
||||
/// Delete key-value pairs logically from the metadata store.
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct DelKeyCommand {
|
||||
/// The key to delete from the metadata store.
|
||||
key: String,
|
||||
|
||||
/// Delete key-value pairs with the given prefix.
|
||||
#[clap(long)]
|
||||
prefix: bool,
|
||||
|
||||
#[clap(flatten)]
|
||||
store: StoreConfig,
|
||||
}
|
||||
|
||||
impl DelKeyCommand {
|
||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
||||
let kv_backend = self.store.build().await?;
|
||||
Ok(Box::new(DelKeyTool {
|
||||
key: self.key.to_string(),
|
||||
prefix: self.prefix,
|
||||
key_deleter: KeyDeleter::new(kv_backend),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
struct KeyDeleter {
|
||||
kv_backend: KvBackendRef,
|
||||
tombstone_manager: TombstoneManager,
|
||||
}
|
||||
|
||||
impl KeyDeleter {
|
||||
fn new(kv_backend: KvBackendRef) -> Self {
|
||||
Self {
|
||||
kv_backend: kv_backend.clone(),
|
||||
tombstone_manager: TombstoneManager::new_with_prefix(kv_backend, CLI_TOMBSTONE_PREFIX),
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete(&self, key: &str, prefix: bool) -> Result<usize, BoxedError> {
|
||||
let mut req = RangeRequest::default().with_keys_only();
|
||||
if prefix {
|
||||
req = req.with_prefix(key.as_bytes());
|
||||
} else {
|
||||
req = req.with_key(key.as_bytes());
|
||||
}
|
||||
let resp = self.kv_backend.range(req).await.map_err(BoxedError::new)?;
|
||||
let keys = resp.kvs.iter().map(|kv| kv.key.clone()).collect::<Vec<_>>();
|
||||
self.tombstone_manager
|
||||
.create(keys)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
}
|
||||
}
|
||||
|
||||
struct DelKeyTool {
|
||||
key: String,
|
||||
prefix: bool,
|
||||
key_deleter: KeyDeleter,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for DelKeyTool {
|
||||
async fn do_work(&self) -> Result<(), BoxedError> {
|
||||
let deleted = self.key_deleter.delete(&self.key, self.prefix).await?;
|
||||
// Print the number of deleted keys.
|
||||
println!("{}", deleted);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use common_meta::kv_backend::{KvBackend, KvBackendRef};
|
||||
use common_meta::rpc::store::RangeRequest;
|
||||
|
||||
use crate::metadata::control::del::key::KeyDeleter;
|
||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
||||
use crate::metadata::control::test_utils::put_key;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_delete_keys() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
|
||||
let key_deleter = KeyDeleter::new(kv_backend.clone());
|
||||
put_key(&kv_backend, "foo", "bar").await;
|
||||
put_key(&kv_backend, "foo/bar", "baz").await;
|
||||
put_key(&kv_backend, "foo/baz", "qux").await;
|
||||
let deleted = key_deleter.delete("foo", true).await.unwrap();
|
||||
assert_eq!(deleted, 3);
|
||||
let deleted = key_deleter.delete("foo/bar", false).await.unwrap();
|
||||
assert_eq!(deleted, 0);
|
||||
|
||||
let chroot = ChrootKvBackend::new(CLI_TOMBSTONE_PREFIX.as_bytes().to_vec(), kv_backend);
|
||||
let req = RangeRequest::default().with_prefix(b"foo");
|
||||
let resp = chroot.range(req).await.unwrap();
|
||||
assert_eq!(resp.kvs.len(), 3);
|
||||
assert_eq!(resp.kvs[0].key, b"foo");
|
||||
assert_eq!(resp.kvs[0].value, b"bar");
|
||||
assert_eq!(resp.kvs[1].key, b"foo/bar");
|
||||
assert_eq!(resp.kvs[1].value, b"baz");
|
||||
assert_eq!(resp.kvs[2].key, b"foo/baz");
|
||||
assert_eq!(resp.kvs[2].value, b"qux");
|
||||
}
|
||||
}
|
||||
235
src/cli/src/metadata/control/del/table.rs
Normal file
235
src/cli/src/metadata/control/del/table.rs
Normal file
@@ -0,0 +1,235 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::ddl::utils::get_region_wal_options;
|
||||
use common_meta::key::table_name::TableNameManager;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use store_api::storage::TableId;
|
||||
|
||||
use crate::error::{InvalidArgumentsSnafu, TableNotFoundSnafu};
|
||||
use crate::metadata::common::StoreConfig;
|
||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
||||
use crate::metadata::control::utils::get_table_id_by_name;
|
||||
use crate::Tool;
|
||||
|
||||
/// Delete table metadata logically from the metadata store.
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct DelTableCommand {
|
||||
/// The table id to delete from the metadata store.
|
||||
#[clap(long)]
|
||||
table_id: Option<u32>,
|
||||
|
||||
/// The table name to delete from the metadata store.
|
||||
#[clap(long)]
|
||||
table_name: Option<String>,
|
||||
|
||||
/// The schema name of the table.
|
||||
#[clap(long, default_value = DEFAULT_SCHEMA_NAME)]
|
||||
schema_name: String,
|
||||
|
||||
/// The catalog name of the table.
|
||||
#[clap(long, default_value = DEFAULT_CATALOG_NAME)]
|
||||
catalog_name: String,
|
||||
|
||||
#[clap(flatten)]
|
||||
store: StoreConfig,
|
||||
}
|
||||
|
||||
impl DelTableCommand {
|
||||
fn validate(&self) -> Result<(), BoxedError> {
|
||||
if matches!(
|
||||
(&self.table_id, &self.table_name),
|
||||
(Some(_), Some(_)) | (None, None)
|
||||
) {
|
||||
return Err(BoxedError::new(
|
||||
InvalidArgumentsSnafu {
|
||||
msg: "You must specify either --table-id or --table-name.",
|
||||
}
|
||||
.build(),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl DelTableCommand {
|
||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
||||
self.validate()?;
|
||||
let kv_backend = self.store.build().await?;
|
||||
Ok(Box::new(DelTableTool {
|
||||
table_id: self.table_id,
|
||||
table_name: self.table_name.clone(),
|
||||
schema_name: self.schema_name.clone(),
|
||||
catalog_name: self.catalog_name.clone(),
|
||||
table_name_manager: TableNameManager::new(kv_backend.clone()),
|
||||
table_metadata_deleter: TableMetadataDeleter::new(kv_backend),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
struct DelTableTool {
|
||||
table_id: Option<u32>,
|
||||
table_name: Option<String>,
|
||||
schema_name: String,
|
||||
catalog_name: String,
|
||||
table_name_manager: TableNameManager,
|
||||
table_metadata_deleter: TableMetadataDeleter,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for DelTableTool {
|
||||
async fn do_work(&self) -> Result<(), BoxedError> {
|
||||
let table_id = if let Some(table_name) = &self.table_name {
|
||||
let catalog_name = &self.catalog_name;
|
||||
let schema_name = &self.schema_name;
|
||||
|
||||
let Some(table_id) = get_table_id_by_name(
|
||||
&self.table_name_manager,
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
)
|
||||
.await?
|
||||
else {
|
||||
println!(
|
||||
"Table({}) not found",
|
||||
format_full_table_name(catalog_name, schema_name, table_name)
|
||||
);
|
||||
return Ok(());
|
||||
};
|
||||
table_id
|
||||
} else {
|
||||
// Safety: we have validated that table_id or table_name is not None
|
||||
self.table_id.unwrap()
|
||||
};
|
||||
self.table_metadata_deleter.delete(table_id).await?;
|
||||
println!("Table({}) deleted", table_id);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct TableMetadataDeleter {
|
||||
table_metadata_manager: TableMetadataManager,
|
||||
}
|
||||
|
||||
impl TableMetadataDeleter {
|
||||
fn new(kv_backend: KvBackendRef) -> Self {
|
||||
Self {
|
||||
table_metadata_manager: TableMetadataManager::new_with_custom_tombstone_prefix(
|
||||
kv_backend,
|
||||
CLI_TOMBSTONE_PREFIX,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete(&self, table_id: TableId) -> Result<(), BoxedError> {
|
||||
let (table_info, table_route) = self
|
||||
.table_metadata_manager
|
||||
.get_full_table_info(table_id)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
let Some(table_info) = table_info else {
|
||||
return Err(BoxedError::new(TableNotFoundSnafu { table_id }.build()));
|
||||
};
|
||||
let Some(table_route) = table_route else {
|
||||
return Err(BoxedError::new(TableNotFoundSnafu { table_id }.build()));
|
||||
};
|
||||
let physical_table_id = self
|
||||
.table_metadata_manager
|
||||
.table_route_manager()
|
||||
.get_physical_table_id(table_id)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
|
||||
let table_name = table_info.table_name();
|
||||
let region_wal_options = get_region_wal_options(
|
||||
&self.table_metadata_manager,
|
||||
&table_route,
|
||||
physical_table_id,
|
||||
)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
|
||||
self.table_metadata_manager
|
||||
.delete_table_metadata(table_id, &table_name, &table_route, ®ion_wal_options)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_meta::key::table_route::TableRouteValue;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use common_meta::kv_backend::{KvBackend, KvBackendRef};
|
||||
use common_meta::rpc::store::RangeRequest;
|
||||
|
||||
use crate::metadata::control::del::table::TableMetadataDeleter;
|
||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
||||
use crate::metadata::control::test_utils::prepare_physical_table_metadata;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_not_found() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
|
||||
|
||||
let table_metadata_deleter = TableMetadataDeleter::new(kv_backend);
|
||||
let table_id = 1;
|
||||
let err = table_metadata_deleter.delete(table_id).await.unwrap_err();
|
||||
assert_eq!(err.status_code(), StatusCode::TableNotFound);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_metadata() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
let table_metadata_manager = TableMetadataManager::new(kv_backend.clone());
|
||||
let table_id = 1024;
|
||||
let (table_info, table_route) = prepare_physical_table_metadata("my_table", table_id).await;
|
||||
table_metadata_manager
|
||||
.create_table_metadata(
|
||||
table_info,
|
||||
TableRouteValue::Physical(table_route),
|
||||
HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let total_keys = kv_backend.len();
|
||||
assert!(total_keys > 0);
|
||||
|
||||
let table_metadata_deleter = TableMetadataDeleter::new(kv_backend.clone());
|
||||
table_metadata_deleter.delete(table_id).await.unwrap();
|
||||
|
||||
// Check the tombstone keys are deleted
|
||||
let chroot =
|
||||
ChrootKvBackend::new(CLI_TOMBSTONE_PREFIX.as_bytes().to_vec(), kv_backend.clone());
|
||||
let req = RangeRequest::default().with_range(vec![0], vec![0]);
|
||||
let resp = chroot.range(req).await.unwrap();
|
||||
assert_eq!(resp.kvs.len(), total_keys);
|
||||
}
|
||||
}
|
||||
@@ -20,7 +20,6 @@ use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::table_info::TableInfoKey;
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
use common_meta::key::table_route::TableRouteKey;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
@@ -30,10 +29,10 @@ use futures::TryStreamExt;
|
||||
|
||||
use crate::error::InvalidArgumentsSnafu;
|
||||
use crate::metadata::common::StoreConfig;
|
||||
use crate::metadata::control::utils::{decode_key_value, json_fromatter};
|
||||
use crate::metadata::control::utils::{decode_key_value, get_table_id_by_name, json_fromatter};
|
||||
use crate::Tool;
|
||||
|
||||
/// Subcommand for get command.
|
||||
/// Getting metadata from metadata store.
|
||||
#[derive(Subcommand)]
|
||||
pub enum GetCommand {
|
||||
Key(GetKeyCommand),
|
||||
@@ -52,7 +51,7 @@ impl GetCommand {
|
||||
/// Get key-value pairs from the metadata store.
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct GetKeyCommand {
|
||||
/// The key to get from the metadata store. If empty, returns all key-value pairs.
|
||||
/// The key to get from the metadata store.
|
||||
#[clap(default_value = "")]
|
||||
key: String,
|
||||
|
||||
@@ -130,8 +129,12 @@ pub struct GetTableCommand {
|
||||
table_name: Option<String>,
|
||||
|
||||
/// The schema name of the table.
|
||||
#[clap(long)]
|
||||
schema_name: Option<String>,
|
||||
#[clap(long, default_value = DEFAULT_SCHEMA_NAME)]
|
||||
schema_name: String,
|
||||
|
||||
/// The catalog name of the table.
|
||||
#[clap(long, default_value = DEFAULT_CATALOG_NAME)]
|
||||
catalog_name: String,
|
||||
|
||||
/// Pretty print the output.
|
||||
#[clap(long, default_value = "false")]
|
||||
@@ -143,7 +146,10 @@ pub struct GetTableCommand {
|
||||
|
||||
impl GetTableCommand {
|
||||
pub fn validate(&self) -> Result<(), BoxedError> {
|
||||
if self.table_id.is_none() && self.table_name.is_none() {
|
||||
if matches!(
|
||||
(&self.table_id, &self.table_name),
|
||||
(Some(_), Some(_)) | (None, None)
|
||||
) {
|
||||
return Err(BoxedError::new(
|
||||
InvalidArgumentsSnafu {
|
||||
msg: "You must specify either --table-id or --table-name.",
|
||||
@@ -159,7 +165,8 @@ struct GetTableTool {
|
||||
kvbackend: KvBackendRef,
|
||||
table_id: Option<u32>,
|
||||
table_name: Option<String>,
|
||||
schema_name: Option<String>,
|
||||
schema_name: String,
|
||||
catalog_name: String,
|
||||
pretty: bool,
|
||||
}
|
||||
|
||||
@@ -172,23 +179,20 @@ impl Tool for GetTableTool {
|
||||
let table_route_manager = table_metadata_manager.table_route_manager();
|
||||
|
||||
let table_id = if let Some(table_name) = &self.table_name {
|
||||
let catalog = DEFAULT_CATALOG_NAME.to_string();
|
||||
let schema_name = self
|
||||
.schema_name
|
||||
.clone()
|
||||
.unwrap_or_else(|| DEFAULT_SCHEMA_NAME.to_string());
|
||||
let key = TableNameKey::new(&catalog, &schema_name, table_name);
|
||||
let catalog_name = &self.catalog_name;
|
||||
let schema_name = &self.schema_name;
|
||||
|
||||
let Some(table_name) = table_name_manager.get(key).await.map_err(BoxedError::new)?
|
||||
let Some(table_id) =
|
||||
get_table_id_by_name(table_name_manager, catalog_name, schema_name, table_name)
|
||||
.await?
|
||||
else {
|
||||
println!(
|
||||
"Table({}) not found",
|
||||
format_full_table_name(&catalog, &schema_name, table_name)
|
||||
format_full_table_name(catalog_name, schema_name, table_name)
|
||||
);
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
table_name.table_id()
|
||||
table_id
|
||||
} else {
|
||||
// Safety: we have validated that table_id or table_name is not None
|
||||
self.table_id.unwrap()
|
||||
@@ -236,6 +240,7 @@ impl GetTableCommand {
|
||||
table_id: self.table_id,
|
||||
table_name: self.table_name.clone(),
|
||||
schema_name: self.schema_name.clone(),
|
||||
catalog_name: self.catalog_name.clone(),
|
||||
pretty: self.pretty,
|
||||
}))
|
||||
}
|
||||
|
||||
51
src/cli/src/metadata/control/test_utils.rs
Normal file
51
src/cli/src/metadata/control/test_utils.rs
Normal file
@@ -0,0 +1,51 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_meta::ddl::test_util::test_create_physical_table_task;
|
||||
use common_meta::key::table_route::PhysicalTableRouteValue;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::{Region, RegionRoute};
|
||||
use common_meta::rpc::store::PutRequest;
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
use table::metadata::RawTableInfo;
|
||||
|
||||
/// Puts a key-value pair into the kv backend.
|
||||
pub async fn put_key(kv_backend: &KvBackendRef, key: &str, value: &str) {
|
||||
let put_req = PutRequest::new()
|
||||
.with_key(key.as_bytes())
|
||||
.with_value(value.as_bytes());
|
||||
kv_backend.put(put_req).await.unwrap();
|
||||
}
|
||||
|
||||
/// Prepares the physical table metadata for testing.
|
||||
///
|
||||
/// Returns the table info and the table route.
|
||||
pub async fn prepare_physical_table_metadata(
|
||||
table_name: &str,
|
||||
table_id: TableId,
|
||||
) -> (RawTableInfo, PhysicalTableRouteValue) {
|
||||
let mut create_physical_table_task = test_create_physical_table_task(table_name);
|
||||
let table_route = PhysicalTableRouteValue::new(vec![RegionRoute {
|
||||
region: Region {
|
||||
id: RegionId::new(table_id, 1),
|
||||
..Default::default()
|
||||
},
|
||||
leader_peer: Some(Peer::empty(1)),
|
||||
..Default::default()
|
||||
}]);
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
|
||||
(create_physical_table_task.table_info, table_route)
|
||||
}
|
||||
@@ -12,9 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::error::Result as CommonMetaResult;
|
||||
use common_meta::key::table_name::{TableNameKey, TableNameManager};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use serde::Serialize;
|
||||
use store_api::storage::TableId;
|
||||
|
||||
/// Decodes a key-value pair into a string.
|
||||
pub fn decode_key_value(kv: KeyValue) -> CommonMetaResult<(String, String)> {
|
||||
@@ -34,3 +37,21 @@ where
|
||||
serde_json::to_string(value).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the table id by table name.
|
||||
pub async fn get_table_id_by_name(
|
||||
table_name_manager: &TableNameManager,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableId>, BoxedError> {
|
||||
let table_name_key = TableNameKey::new(catalog_name, schema_name, table_name);
|
||||
let Some(table_name_value) = table_name_manager
|
||||
.get(table_name_key)
|
||||
.await
|
||||
.map_err(BoxedError::new)?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
Ok(Some(table_name_value.table_id()))
|
||||
}
|
||||
|
||||
369
src/cli/src/metadata/repair.rs
Normal file
369
src/cli/src/metadata/repair.rs
Normal file
@@ -0,0 +1,369 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod alter_table;
|
||||
mod create_table;
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use client::api::v1::CreateTableExpr;
|
||||
use client::client_manager::NodeClients;
|
||||
use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_grpc::channel_manager::ChannelConfig;
|
||||
use common_meta::error::Error as CommonMetaError;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::node_manager::NodeManagerRef;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::{find_leaders, RegionRoute};
|
||||
use common_telemetry::{error, info, warn};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use store_api::storage::TableId;
|
||||
|
||||
use crate::error::{
|
||||
InvalidArgumentsSnafu, Result, SendRequestToDatanodeSnafu, TableMetadataSnafu, UnexpectedSnafu,
|
||||
};
|
||||
use crate::metadata::common::StoreConfig;
|
||||
use crate::metadata::utils::{FullTableMetadata, IteratorInput, TableMetadataIterator};
|
||||
use crate::Tool;
|
||||
|
||||
/// Repair metadata of logical tables.
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct RepairLogicalTablesCommand {
|
||||
/// The names of the tables to repair.
|
||||
#[clap(long, value_delimiter = ',', alias = "table-name")]
|
||||
table_names: Vec<String>,
|
||||
|
||||
/// The id of the table to repair.
|
||||
#[clap(long, value_delimiter = ',', alias = "table-id")]
|
||||
table_ids: Vec<TableId>,
|
||||
|
||||
/// The schema of the tables to repair.
|
||||
#[clap(long, default_value = DEFAULT_SCHEMA_NAME)]
|
||||
schema_name: String,
|
||||
|
||||
/// The catalog of the tables to repair.
|
||||
#[clap(long, default_value = DEFAULT_CATALOG_NAME)]
|
||||
catalog_name: String,
|
||||
|
||||
/// Whether to fail fast if any repair operation fails.
|
||||
#[clap(long)]
|
||||
fail_fast: bool,
|
||||
|
||||
#[clap(flatten)]
|
||||
store: StoreConfig,
|
||||
|
||||
/// The timeout for the client to operate the datanode.
|
||||
#[clap(long, default_value_t = 30)]
|
||||
client_timeout_secs: u64,
|
||||
|
||||
/// The timeout for the client to connect to the datanode.
|
||||
#[clap(long, default_value_t = 3)]
|
||||
client_connect_timeout_secs: u64,
|
||||
}
|
||||
|
||||
impl RepairLogicalTablesCommand {
|
||||
fn validate(&self) -> Result<()> {
|
||||
ensure!(
|
||||
!self.table_names.is_empty() || !self.table_ids.is_empty(),
|
||||
InvalidArgumentsSnafu {
|
||||
msg: "You must specify --table-names or --table-ids.",
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl RepairLogicalTablesCommand {
|
||||
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
||||
self.validate().map_err(BoxedError::new)?;
|
||||
let kv_backend = self.store.build().await?;
|
||||
let node_client_channel_config = ChannelConfig::new()
|
||||
.timeout(Duration::from_secs(self.client_timeout_secs))
|
||||
.connect_timeout(Duration::from_secs(self.client_connect_timeout_secs));
|
||||
let node_manager = Arc::new(NodeClients::new(node_client_channel_config));
|
||||
|
||||
Ok(Box::new(RepairTool {
|
||||
table_names: self.table_names.clone(),
|
||||
table_ids: self.table_ids.clone(),
|
||||
schema_name: self.schema_name.clone(),
|
||||
catalog_name: self.catalog_name.clone(),
|
||||
fail_fast: self.fail_fast,
|
||||
kv_backend,
|
||||
node_manager,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
struct RepairTool {
|
||||
table_names: Vec<String>,
|
||||
table_ids: Vec<TableId>,
|
||||
schema_name: String,
|
||||
catalog_name: String,
|
||||
fail_fast: bool,
|
||||
kv_backend: KvBackendRef,
|
||||
node_manager: NodeManagerRef,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for RepairTool {
|
||||
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
|
||||
self.repair_tables().await.map_err(BoxedError::new)
|
||||
}
|
||||
}
|
||||
|
||||
impl RepairTool {
|
||||
fn generate_iterator_input(&self) -> Result<IteratorInput> {
|
||||
if !self.table_names.is_empty() {
|
||||
let table_names = &self.table_names;
|
||||
let catalog = &self.catalog_name;
|
||||
let schema_name = &self.schema_name;
|
||||
|
||||
let table_names = table_names
|
||||
.iter()
|
||||
.map(|table_name| {
|
||||
(
|
||||
catalog.to_string(),
|
||||
schema_name.to_string(),
|
||||
table_name.to_string(),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
return Ok(IteratorInput::new_table_names(table_names));
|
||||
} else if !self.table_ids.is_empty() {
|
||||
return Ok(IteratorInput::new_table_ids(self.table_ids.clone()));
|
||||
};
|
||||
|
||||
InvalidArgumentsSnafu {
|
||||
msg: "You must specify --table-names or --table-id.",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
async fn repair_tables(&self) -> Result<()> {
|
||||
let input = self.generate_iterator_input()?;
|
||||
let mut table_metadata_iterator =
|
||||
Box::pin(TableMetadataIterator::new(self.kv_backend.clone(), input).into_stream());
|
||||
let table_metadata_manager = TableMetadataManager::new(self.kv_backend.clone());
|
||||
|
||||
let mut skipped_table = 0;
|
||||
let mut success_table = 0;
|
||||
while let Some(full_table_metadata) = table_metadata_iterator.try_next().await? {
|
||||
let full_table_name = full_table_metadata.full_table_name();
|
||||
if !full_table_metadata.is_metric_engine() {
|
||||
warn!(
|
||||
"Skipping repair for non-metric engine table: {}",
|
||||
full_table_name
|
||||
);
|
||||
skipped_table += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if full_table_metadata.is_physical_table() {
|
||||
warn!("Skipping repair for physical table: {}", full_table_name);
|
||||
skipped_table += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
let (physical_table_id, physical_table_route) = table_metadata_manager
|
||||
.table_route_manager()
|
||||
.get_physical_table_route(full_table_metadata.table_id)
|
||||
.await
|
||||
.context(TableMetadataSnafu)?;
|
||||
|
||||
if let Err(err) = self
|
||||
.repair_table(
|
||||
&full_table_metadata,
|
||||
physical_table_id,
|
||||
&physical_table_route.region_routes,
|
||||
)
|
||||
.await
|
||||
{
|
||||
error!(
|
||||
err;
|
||||
"Failed to repair table: {}, skipped table: {}",
|
||||
full_table_name,
|
||||
skipped_table,
|
||||
);
|
||||
|
||||
if self.fail_fast {
|
||||
return Err(err);
|
||||
}
|
||||
} else {
|
||||
success_table += 1;
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
"Repair logical tables result: {} tables repaired, {} tables skipped",
|
||||
success_table, skipped_table
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn alter_table_on_datanodes(
|
||||
&self,
|
||||
full_table_metadata: &FullTableMetadata,
|
||||
physical_region_routes: &[RegionRoute],
|
||||
) -> Result<Vec<(Peer, CommonMetaError)>> {
|
||||
let logical_table_id = full_table_metadata.table_id;
|
||||
let alter_table_expr = alter_table::generate_alter_table_expr_for_all_columns(
|
||||
&full_table_metadata.table_info,
|
||||
)?;
|
||||
let node_manager = self.node_manager.clone();
|
||||
|
||||
let mut failed_peers = Vec::new();
|
||||
info!(
|
||||
"Sending alter table requests to all datanodes for table: {}, number of regions:{}.",
|
||||
full_table_metadata.full_table_name(),
|
||||
physical_region_routes.len()
|
||||
);
|
||||
let leaders = find_leaders(physical_region_routes);
|
||||
for peer in &leaders {
|
||||
let alter_table_request = alter_table::make_alter_region_request_for_peer(
|
||||
logical_table_id,
|
||||
&alter_table_expr,
|
||||
full_table_metadata.table_info.ident.version,
|
||||
peer,
|
||||
physical_region_routes,
|
||||
)?;
|
||||
let datanode = node_manager.datanode(peer).await;
|
||||
if let Err(err) = datanode.handle(alter_table_request).await {
|
||||
failed_peers.push((peer.clone(), err));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(failed_peers)
|
||||
}
|
||||
|
||||
async fn create_table_on_datanode(
|
||||
&self,
|
||||
create_table_expr: &CreateTableExpr,
|
||||
logical_table_id: TableId,
|
||||
physical_table_id: TableId,
|
||||
peer: &Peer,
|
||||
physical_region_routes: &[RegionRoute],
|
||||
) -> Result<()> {
|
||||
let node_manager = self.node_manager.clone();
|
||||
let datanode = node_manager.datanode(peer).await;
|
||||
let create_table_request = create_table::make_create_region_request_for_peer(
|
||||
logical_table_id,
|
||||
physical_table_id,
|
||||
create_table_expr,
|
||||
peer,
|
||||
physical_region_routes,
|
||||
)?;
|
||||
|
||||
datanode
|
||||
.handle(create_table_request)
|
||||
.await
|
||||
.with_context(|_| SendRequestToDatanodeSnafu { peer: peer.clone() })?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn repair_table(
|
||||
&self,
|
||||
full_table_metadata: &FullTableMetadata,
|
||||
physical_table_id: TableId,
|
||||
physical_region_routes: &[RegionRoute],
|
||||
) -> Result<()> {
|
||||
let full_table_name = full_table_metadata.full_table_name();
|
||||
// First we sends alter table requests to all datanodes with all columns.
|
||||
let failed_peers = self
|
||||
.alter_table_on_datanodes(full_table_metadata, physical_region_routes)
|
||||
.await?;
|
||||
|
||||
if failed_peers.is_empty() {
|
||||
info!(
|
||||
"All alter table requests sent successfully for table: {}",
|
||||
full_table_name
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
warn!(
|
||||
"Sending alter table requests to datanodes for table: {} failed for the datanodes: {:?}",
|
||||
full_table_name,
|
||||
failed_peers.iter().map(|(peer, _)| peer.id).collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
let create_table_expr =
|
||||
create_table::generate_create_table_expr(&full_table_metadata.table_info)?;
|
||||
|
||||
let mut errors = Vec::new();
|
||||
for (peer, err) in failed_peers {
|
||||
if err.status_code() != StatusCode::RegionNotFound {
|
||||
error!(
|
||||
err;
|
||||
"Sending alter table requests to datanode: {} for table: {} failed",
|
||||
peer.id,
|
||||
full_table_name,
|
||||
);
|
||||
continue;
|
||||
}
|
||||
info!(
|
||||
"Region not found for table: {}, datanode: {}, trying to create the logical table on that datanode",
|
||||
full_table_name,
|
||||
peer.id
|
||||
);
|
||||
|
||||
// If the alter table request fails for any datanode, we attempt to create the table on that datanode
|
||||
// as a fallback mechanism to ensure table consistency across the cluster.
|
||||
if let Err(err) = self
|
||||
.create_table_on_datanode(
|
||||
&create_table_expr,
|
||||
full_table_metadata.table_id,
|
||||
physical_table_id,
|
||||
&peer,
|
||||
physical_region_routes,
|
||||
)
|
||||
.await
|
||||
{
|
||||
error!(
|
||||
err;
|
||||
"Failed to create table on datanode: {} for table: {}",
|
||||
peer.id, full_table_name
|
||||
);
|
||||
errors.push(err);
|
||||
if self.fail_fast {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
info!(
|
||||
"Created table on datanode: {} for table: {}",
|
||||
peer.id, full_table_name
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if !errors.is_empty() {
|
||||
return UnexpectedSnafu {
|
||||
msg: format!(
|
||||
"Failed to create table on datanodes for table: {}",
|
||||
full_table_name,
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
85
src/cli/src/metadata/repair/alter_table.rs
Normal file
85
src/cli/src/metadata/repair/alter_table.rs
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use client::api::v1::alter_table_expr::Kind;
|
||||
use client::api::v1::region::{region_request, AlterRequests, RegionRequest, RegionRequestHeader};
|
||||
use client::api::v1::{AddColumn, AddColumns, AlterTableExpr};
|
||||
use common_meta::ddl::alter_logical_tables::make_alter_region_request;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::{find_leader_regions, RegionRoute};
|
||||
use operator::expr_helper::column_schemas_to_defs;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
use table::metadata::RawTableInfo;
|
||||
|
||||
use crate::error::{CovertColumnSchemasToDefsSnafu, Result};
|
||||
|
||||
/// Generates alter table expression for all columns.
|
||||
pub fn generate_alter_table_expr_for_all_columns(
|
||||
table_info: &RawTableInfo,
|
||||
) -> Result<AlterTableExpr> {
|
||||
let schema = &table_info.meta.schema;
|
||||
|
||||
let mut alter_table_expr = AlterTableExpr {
|
||||
catalog_name: table_info.catalog_name.to_string(),
|
||||
schema_name: table_info.schema_name.to_string(),
|
||||
table_name: table_info.name.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let primary_keys = table_info
|
||||
.meta
|
||||
.primary_key_indices
|
||||
.iter()
|
||||
.map(|i| schema.column_schemas[*i].name.clone())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let add_columns = column_schemas_to_defs(schema.column_schemas.clone(), &primary_keys)
|
||||
.context(CovertColumnSchemasToDefsSnafu)?;
|
||||
|
||||
alter_table_expr.kind = Some(Kind::AddColumns(AddColumns {
|
||||
add_columns: add_columns
|
||||
.into_iter()
|
||||
.map(|col| AddColumn {
|
||||
column_def: Some(col),
|
||||
location: None,
|
||||
add_if_not_exists: true,
|
||||
})
|
||||
.collect(),
|
||||
}));
|
||||
|
||||
Ok(alter_table_expr)
|
||||
}
|
||||
|
||||
/// Makes an alter region request for a peer.
|
||||
pub fn make_alter_region_request_for_peer(
|
||||
logical_table_id: TableId,
|
||||
alter_table_expr: &AlterTableExpr,
|
||||
schema_version: u64,
|
||||
peer: &Peer,
|
||||
region_routes: &[RegionRoute],
|
||||
) -> Result<RegionRequest> {
|
||||
let regions_on_this_peer = find_leader_regions(region_routes, peer);
|
||||
let mut requests = Vec::with_capacity(regions_on_this_peer.len());
|
||||
for region_number in ®ions_on_this_peer {
|
||||
let region_id = RegionId::new(logical_table_id, *region_number);
|
||||
let request = make_alter_region_request(region_id, alter_table_expr, schema_version);
|
||||
requests.push(request);
|
||||
}
|
||||
|
||||
Ok(RegionRequest {
|
||||
header: Some(RegionRequestHeader::default()),
|
||||
body: Some(region_request::Body::Alters(AlterRequests { requests })),
|
||||
})
|
||||
}
|
||||
89
src/cli/src/metadata/repair/create_table.rs
Normal file
89
src/cli/src/metadata/repair/create_table.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use client::api::v1::region::{region_request, CreateRequests, RegionRequest, RegionRequestHeader};
|
||||
use client::api::v1::CreateTableExpr;
|
||||
use common_meta::ddl::create_logical_tables::create_region_request_builder;
|
||||
use common_meta::ddl::utils::region_storage_path;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::{find_leader_regions, RegionRoute};
|
||||
use operator::expr_helper::column_schemas_to_defs;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
use table::metadata::RawTableInfo;
|
||||
|
||||
use crate::error::{CovertColumnSchemasToDefsSnafu, Result};
|
||||
|
||||
/// Generates a `CreateTableExpr` from a `RawTableInfo`.
|
||||
pub fn generate_create_table_expr(table_info: &RawTableInfo) -> Result<CreateTableExpr> {
|
||||
let schema = &table_info.meta.schema;
|
||||
let primary_keys = table_info
|
||||
.meta
|
||||
.primary_key_indices
|
||||
.iter()
|
||||
.map(|i| schema.column_schemas[*i].name.clone())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let timestamp_index = schema.timestamp_index.as_ref().unwrap();
|
||||
let time_index = schema.column_schemas[*timestamp_index].name.clone();
|
||||
let column_defs = column_schemas_to_defs(schema.column_schemas.clone(), &primary_keys)
|
||||
.context(CovertColumnSchemasToDefsSnafu)?;
|
||||
let table_options = HashMap::from(&table_info.meta.options);
|
||||
|
||||
Ok(CreateTableExpr {
|
||||
catalog_name: table_info.catalog_name.to_string(),
|
||||
schema_name: table_info.schema_name.to_string(),
|
||||
table_name: table_info.name.to_string(),
|
||||
desc: String::default(),
|
||||
column_defs,
|
||||
time_index,
|
||||
primary_keys,
|
||||
create_if_not_exists: true,
|
||||
table_options,
|
||||
table_id: None,
|
||||
engine: table_info.meta.engine.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Makes a create region request for a peer.
|
||||
pub fn make_create_region_request_for_peer(
|
||||
logical_table_id: TableId,
|
||||
physical_table_id: TableId,
|
||||
create_table_expr: &CreateTableExpr,
|
||||
peer: &Peer,
|
||||
region_routes: &[RegionRoute],
|
||||
) -> Result<RegionRequest> {
|
||||
let regions_on_this_peer = find_leader_regions(region_routes, peer);
|
||||
let mut requests = Vec::with_capacity(regions_on_this_peer.len());
|
||||
let request_builder =
|
||||
create_region_request_builder(create_table_expr, physical_table_id).unwrap();
|
||||
|
||||
let catalog = &create_table_expr.catalog_name;
|
||||
let schema = &create_table_expr.schema_name;
|
||||
let storage_path = region_storage_path(catalog, schema);
|
||||
|
||||
for region_number in ®ions_on_this_peer {
|
||||
let region_id = RegionId::new(logical_table_id, *region_number);
|
||||
let region_request =
|
||||
request_builder.build_one(region_id, storage_path.clone(), &HashMap::new());
|
||||
requests.push(region_request);
|
||||
}
|
||||
|
||||
Ok(RegionRequest {
|
||||
header: Some(RegionRequestHeader::default()),
|
||||
body: Some(region_request::Body::Creates(CreateRequests { requests })),
|
||||
})
|
||||
}
|
||||
178
src/cli/src/metadata/utils.rs
Normal file
178
src/cli/src/metadata/utils.rs
Normal file
@@ -0,0 +1,178 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::VecDeque;
|
||||
|
||||
use async_stream::try_stream;
|
||||
use common_catalog::consts::METRIC_ENGINE;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
use common_meta::key::table_route::TableRouteValue;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use futures::Stream;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::TableId;
|
||||
use table::metadata::RawTableInfo;
|
||||
|
||||
use crate::error::{Result, TableMetadataSnafu, UnexpectedSnafu};
|
||||
|
||||
/// The input for the iterator.
|
||||
pub enum IteratorInput {
|
||||
TableIds(VecDeque<TableId>),
|
||||
TableNames(VecDeque<(String, String, String)>),
|
||||
}
|
||||
|
||||
impl IteratorInput {
|
||||
/// Creates a new iterator input from a list of table ids.
|
||||
pub fn new_table_ids(table_ids: Vec<TableId>) -> Self {
|
||||
Self::TableIds(table_ids.into())
|
||||
}
|
||||
|
||||
/// Creates a new iterator input from a list of table names.
|
||||
pub fn new_table_names(table_names: Vec<(String, String, String)>) -> Self {
|
||||
Self::TableNames(table_names.into())
|
||||
}
|
||||
}
|
||||
|
||||
/// An iterator for retrieving table metadata from the metadata store.
|
||||
///
|
||||
/// This struct provides functionality to iterate over table metadata based on
|
||||
/// either [`TableId`] and their associated regions or fully qualified table names.
|
||||
pub struct TableMetadataIterator {
|
||||
input: IteratorInput,
|
||||
table_metadata_manager: TableMetadataManager,
|
||||
}
|
||||
|
||||
/// The full table metadata.
|
||||
pub struct FullTableMetadata {
|
||||
pub table_id: TableId,
|
||||
pub table_info: RawTableInfo,
|
||||
pub table_route: TableRouteValue,
|
||||
}
|
||||
|
||||
impl FullTableMetadata {
|
||||
/// Returns true if it's [TableRouteValue::Physical].
|
||||
pub fn is_physical_table(&self) -> bool {
|
||||
self.table_route.is_physical()
|
||||
}
|
||||
|
||||
/// Returns true if it's a metric engine table.
|
||||
pub fn is_metric_engine(&self) -> bool {
|
||||
self.table_info.meta.engine == METRIC_ENGINE
|
||||
}
|
||||
|
||||
/// Returns the full table name.
|
||||
pub fn full_table_name(&self) -> String {
|
||||
format_full_table_name(
|
||||
&self.table_info.catalog_name,
|
||||
&self.table_info.schema_name,
|
||||
&self.table_info.name,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl TableMetadataIterator {
|
||||
pub fn new(kvbackend: KvBackendRef, input: IteratorInput) -> Self {
|
||||
let table_metadata_manager = TableMetadataManager::new(kvbackend);
|
||||
Self {
|
||||
input,
|
||||
table_metadata_manager,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the next table metadata.
|
||||
///
|
||||
/// This method handles two types of inputs:
|
||||
/// - TableIds: Returns metadata for a specific [`TableId`].
|
||||
/// - TableNames: Returns metadata for a table identified by its full name (catalog.schema.table).
|
||||
///
|
||||
/// Returns `None` when there are no more tables to process.
|
||||
pub async fn next(&mut self) -> Result<Option<FullTableMetadata>> {
|
||||
match &mut self.input {
|
||||
IteratorInput::TableIds(table_ids) => {
|
||||
if let Some(table_id) = table_ids.pop_front() {
|
||||
let full_table_metadata = self.get_table_metadata(table_id).await?;
|
||||
return Ok(Some(full_table_metadata));
|
||||
}
|
||||
}
|
||||
|
||||
IteratorInput::TableNames(table_names) => {
|
||||
if let Some(full_table_name) = table_names.pop_front() {
|
||||
let table_id = self.get_table_id_by_name(full_table_name).await?;
|
||||
let full_table_metadata = self.get_table_metadata(table_id).await?;
|
||||
return Ok(Some(full_table_metadata));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Converts the iterator into a stream of table metadata.
|
||||
pub fn into_stream(mut self) -> impl Stream<Item = Result<FullTableMetadata>> {
|
||||
try_stream!({
|
||||
while let Some(full_table_metadata) = self.next().await? {
|
||||
yield full_table_metadata;
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_table_id_by_name(
|
||||
&mut self,
|
||||
(catalog_name, schema_name, table_name): (String, String, String),
|
||||
) -> Result<TableId> {
|
||||
let key = TableNameKey::new(&catalog_name, &schema_name, &table_name);
|
||||
let table_id = self
|
||||
.table_metadata_manager
|
||||
.table_name_manager()
|
||||
.get(key)
|
||||
.await
|
||||
.context(TableMetadataSnafu)?
|
||||
.with_context(|| UnexpectedSnafu {
|
||||
msg: format!(
|
||||
"Table not found: {}",
|
||||
format_full_table_name(&catalog_name, &schema_name, &table_name)
|
||||
),
|
||||
})?
|
||||
.table_id();
|
||||
Ok(table_id)
|
||||
}
|
||||
|
||||
async fn get_table_metadata(&mut self, table_id: TableId) -> Result<FullTableMetadata> {
|
||||
let (table_info, table_route) = self
|
||||
.table_metadata_manager
|
||||
.get_full_table_info(table_id)
|
||||
.await
|
||||
.context(TableMetadataSnafu)?;
|
||||
|
||||
let table_info = table_info
|
||||
.with_context(|| UnexpectedSnafu {
|
||||
msg: format!("Table info not found for table id: {table_id}"),
|
||||
})?
|
||||
.into_inner()
|
||||
.table_info;
|
||||
let table_route = table_route
|
||||
.with_context(|| UnexpectedSnafu {
|
||||
msg: format!("Table route not found for table id: {table_id}"),
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
Ok(FullTableMetadata {
|
||||
table_id,
|
||||
table_info,
|
||||
table_route,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -93,6 +93,7 @@ impl InstanceBuilder {
|
||||
MetaClientType::Datanode { member_id },
|
||||
meta_client_options,
|
||||
Some(&plugins),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
|
||||
@@ -55,14 +55,32 @@ type FlownodeOptions = GreptimeOptions<flow::FlownodeOptions>;
|
||||
pub struct Instance {
|
||||
flownode: FlownodeInstance,
|
||||
|
||||
// The components of flownode, which make it easier to expand based
|
||||
// on the components.
|
||||
#[cfg(feature = "enterprise")]
|
||||
components: Components,
|
||||
|
||||
// Keep the logging guard to prevent the worker from being dropped.
|
||||
_guard: Vec<WorkerGuard>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub struct Components {
|
||||
pub catalog_manager: catalog::CatalogManagerRef,
|
||||
pub fe_client: Arc<FrontendClient>,
|
||||
pub kv_backend: common_meta::kv_backend::KvBackendRef,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub fn new(flownode: FlownodeInstance, guard: Vec<WorkerGuard>) -> Self {
|
||||
pub fn new(
|
||||
flownode: FlownodeInstance,
|
||||
#[cfg(feature = "enterprise")] components: Components,
|
||||
guard: Vec<WorkerGuard>,
|
||||
) -> Self {
|
||||
Self {
|
||||
flownode,
|
||||
#[cfg(feature = "enterprise")]
|
||||
components,
|
||||
_guard: guard,
|
||||
}
|
||||
}
|
||||
@@ -75,6 +93,11 @@ impl Instance {
|
||||
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
|
||||
&mut self.flownode
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub fn components(&self) -> &Components {
|
||||
&self.components
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -283,6 +306,7 @@ impl StartCommand {
|
||||
MetaClientType::Flownode { member_id },
|
||||
meta_config,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
@@ -349,19 +373,20 @@ impl StartCommand {
|
||||
let flow_auth_header = get_flow_auth_options(&opts).context(StartFlownodeSnafu)?;
|
||||
let frontend_client =
|
||||
FrontendClient::from_meta_client(meta_client.clone(), flow_auth_header);
|
||||
let frontend_client = Arc::new(frontend_client);
|
||||
let flownode_builder = FlownodeBuilder::new(
|
||||
opts.clone(),
|
||||
plugins,
|
||||
table_metadata_manager,
|
||||
catalog_manager.clone(),
|
||||
flow_metadata_manager,
|
||||
Arc::new(frontend_client),
|
||||
frontend_client.clone(),
|
||||
)
|
||||
.with_heartbeat_task(heartbeat_task);
|
||||
|
||||
let mut flownode = flownode_builder.build().await.context(StartFlownodeSnafu)?;
|
||||
let services = FlownodeServiceBuilder::new(&opts)
|
||||
.with_grpc_server(flownode.flownode_server().clone())
|
||||
.with_default_grpc_server(flownode.flownode_server())
|
||||
.enable_http_service()
|
||||
.build()
|
||||
.context(StartFlownodeSnafu)?;
|
||||
@@ -393,6 +418,16 @@ impl StartCommand {
|
||||
.set_frontend_invoker(invoker)
|
||||
.await;
|
||||
|
||||
Ok(Instance::new(flownode, guard))
|
||||
#[cfg(feature = "enterprise")]
|
||||
let components = Components {
|
||||
catalog_manager: catalog_manager.clone(),
|
||||
fe_client: frontend_client,
|
||||
kv_backend: cached_meta_backend,
|
||||
};
|
||||
|
||||
#[cfg(not(feature = "enterprise"))]
|
||||
return Ok(Instance::new(flownode, guard));
|
||||
#[cfg(feature = "enterprise")]
|
||||
Ok(Instance::new(flownode, components, guard))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -313,6 +313,7 @@ impl StartCommand {
|
||||
MetaClientType::Frontend,
|
||||
meta_client_options,
|
||||
Some(&plugins),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context(error::MetaClientInitSnafu)?;
|
||||
|
||||
@@ -30,20 +30,16 @@ use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
||||
use common_config::{metadata_store_dir, Configurable, KvBackendConfig};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::LayeredCacheRegistryBuilder;
|
||||
use common_meta::cache_invalidator::CacheInvalidatorRef;
|
||||
use common_meta::cluster::{NodeInfo, NodeStatus};
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::ddl::flow_meta::{FlowMetadataAllocator, FlowMetadataAllocatorRef};
|
||||
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
|
||||
use common_meta::ddl::flow_meta::FlowMetadataAllocator;
|
||||
use common_meta::ddl::table_meta::TableMetadataAllocator;
|
||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl, ProcedureExecutorRef};
|
||||
use common_meta::ddl_manager::DdlManager;
|
||||
#[cfg(feature = "enterprise")]
|
||||
use common_meta::ddl_manager::TriggerDdlManagerRef;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::key::flow::{FlowMetadataManager, FlowMetadataManagerRef};
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::node_manager::NodeManagerRef;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::region_keeper::MemoryRegionKeeper;
|
||||
use common_meta::region_registry::LeaderRegionRegistry;
|
||||
@@ -594,28 +590,36 @@ impl StartCommand {
|
||||
.await
|
||||
.context(error::BuildWalOptionsAllocatorSnafu)?;
|
||||
let wal_options_allocator = Arc::new(wal_options_allocator);
|
||||
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
|
||||
let table_metadata_allocator = Arc::new(TableMetadataAllocator::new(
|
||||
table_id_sequence,
|
||||
wal_options_allocator.clone(),
|
||||
));
|
||||
let flow_meta_allocator = Arc::new(FlowMetadataAllocator::with_noop_peer_allocator(
|
||||
let flow_metadata_allocator = Arc::new(FlowMetadataAllocator::with_noop_peer_allocator(
|
||||
flow_id_sequence,
|
||||
));
|
||||
|
||||
let ddl_context = DdlContext {
|
||||
node_manager: node_manager.clone(),
|
||||
cache_invalidator: layered_cache_registry.clone(),
|
||||
memory_region_keeper: Arc::new(MemoryRegionKeeper::default()),
|
||||
leader_region_registry: Arc::new(LeaderRegionRegistry::default()),
|
||||
table_metadata_manager: table_metadata_manager.clone(),
|
||||
table_metadata_allocator: table_metadata_allocator.clone(),
|
||||
flow_metadata_manager: flow_metadata_manager.clone(),
|
||||
flow_metadata_allocator: flow_metadata_allocator.clone(),
|
||||
region_failure_detector_controller: Arc::new(NoopRegionFailureDetectorControl),
|
||||
};
|
||||
let procedure_manager_c = procedure_manager.clone();
|
||||
|
||||
let ddl_manager = DdlManager::try_new(ddl_context, procedure_manager_c, true)
|
||||
.context(error::InitDdlManagerSnafu)?;
|
||||
#[cfg(feature = "enterprise")]
|
||||
let trigger_ddl_manager: Option<TriggerDdlManagerRef> = plugins.get();
|
||||
let ddl_task_executor = Self::create_ddl_task_executor(
|
||||
procedure_manager.clone(),
|
||||
node_manager.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
table_metadata_manager,
|
||||
table_meta_allocator,
|
||||
flow_metadata_manager,
|
||||
flow_meta_allocator,
|
||||
#[cfg(feature = "enterprise")]
|
||||
trigger_ddl_manager,
|
||||
)
|
||||
.await?;
|
||||
let ddl_manager = {
|
||||
let trigger_ddl_manager: Option<common_meta::ddl_manager::TriggerDdlManagerRef> =
|
||||
plugins.get();
|
||||
ddl_manager.with_trigger_ddl_manager(trigger_ddl_manager)
|
||||
};
|
||||
let ddl_task_executor: ProcedureExecutorRef = Arc::new(ddl_manager);
|
||||
|
||||
let fe_instance = FrontendBuilder::new(
|
||||
fe_opts.clone(),
|
||||
@@ -679,41 +683,6 @@ impl StartCommand {
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn create_ddl_task_executor(
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
node_manager: NodeManagerRef,
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
table_metadata_allocator: TableMetadataAllocatorRef,
|
||||
flow_metadata_manager: FlowMetadataManagerRef,
|
||||
flow_metadata_allocator: FlowMetadataAllocatorRef,
|
||||
#[cfg(feature = "enterprise")] trigger_ddl_manager: Option<TriggerDdlManagerRef>,
|
||||
) -> Result<ProcedureExecutorRef> {
|
||||
let procedure_executor: ProcedureExecutorRef = Arc::new(
|
||||
DdlManager::try_new(
|
||||
DdlContext {
|
||||
node_manager,
|
||||
cache_invalidator,
|
||||
memory_region_keeper: Arc::new(MemoryRegionKeeper::default()),
|
||||
leader_region_registry: Arc::new(LeaderRegionRegistry::default()),
|
||||
table_metadata_manager,
|
||||
table_metadata_allocator,
|
||||
flow_metadata_manager,
|
||||
flow_metadata_allocator,
|
||||
region_failure_detector_controller: Arc::new(NoopRegionFailureDetectorControl),
|
||||
},
|
||||
procedure_manager,
|
||||
true,
|
||||
#[cfg(feature = "enterprise")]
|
||||
trigger_ddl_manager,
|
||||
)
|
||||
.context(error::InitDdlManagerSnafu)?,
|
||||
);
|
||||
|
||||
Ok(procedure_executor)
|
||||
}
|
||||
|
||||
pub async fn create_table_metadata_manager(
|
||||
kv_backend: KvBackendRef,
|
||||
) -> Result<TableMetadataManagerRef> {
|
||||
|
||||
@@ -23,7 +23,7 @@ pub mod selector;
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct DisplayProcessId {
|
||||
pub server_addr: String,
|
||||
pub id: u64,
|
||||
pub id: u32,
|
||||
}
|
||||
|
||||
impl Display for DisplayProcessId {
|
||||
@@ -44,7 +44,7 @@ impl TryFrom<&str> for DisplayProcessId {
|
||||
let id = split
|
||||
.next()
|
||||
.context(error::ParseProcessIdSnafu { s: value })?;
|
||||
let id = u64::from_str(id)
|
||||
let id = u32::from_str(id)
|
||||
.ok()
|
||||
.context(error::ParseProcessIdSnafu { s: value })?;
|
||||
Ok(DisplayProcessId { server_addr, id })
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
@@ -30,7 +31,7 @@ use crate::error::{MetaSnafu, Result};
|
||||
pub type FrontendClientPtr = Box<dyn FrontendClient>;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait FrontendClient: Send {
|
||||
pub trait FrontendClient: Send + Debug {
|
||||
async fn list_process(&mut self, req: ListProcessRequest) -> Result<ListProcessResponse>;
|
||||
|
||||
async fn kill_process(&mut self, req: KillProcessRequest) -> Result<KillProcessResponse>;
|
||||
|
||||
@@ -14,8 +14,8 @@
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
pub(crate) mod hll;
|
||||
mod uddsketch;
|
||||
pub mod hll;
|
||||
pub mod uddsketch;
|
||||
|
||||
pub(crate) struct ApproximateFunction;
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ use std::sync::Arc;
|
||||
use common_query::error::Result;
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVector};
|
||||
use datatypes::vectors::{StringVector, UInt64Vector, VectorRef};
|
||||
use datatypes::vectors::{StringVector, UInt32Vector, VectorRef};
|
||||
use derive_more::Display;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
@@ -144,7 +144,7 @@ impl Function for PgBackendPidFunction {
|
||||
fn eval(&self, func_ctx: &FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
let pid = func_ctx.query_ctx.process_id();
|
||||
|
||||
Ok(Arc::new(UInt64Vector::from_slice([pid])) as _)
|
||||
Ok(Arc::new(UInt32Vector::from_slice([pid])) as _)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -164,7 +164,7 @@ impl Function for ConnectionIdFunction {
|
||||
fn eval(&self, func_ctx: &FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
let pid = func_ctx.query_ctx.process_id();
|
||||
|
||||
Ok(Arc::new(UInt64Vector::from_slice([pid])) as _)
|
||||
Ok(Arc::new(UInt32Vector::from_slice([pid])) as _)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ use table::requests::{
|
||||
};
|
||||
|
||||
use crate::error::{
|
||||
InvalidColumnDefSnafu, InvalidSetFulltextOptionRequestSnafu,
|
||||
InvalidColumnDefSnafu, InvalidIndexOptionSnafu, InvalidSetFulltextOptionRequestSnafu,
|
||||
InvalidSetSkippingIndexOptionRequestSnafu, InvalidSetTableOptionRequestSnafu,
|
||||
InvalidUnsetTableOptionRequestSnafu, MissingAlterIndexOptionSnafu, MissingFieldSnafu,
|
||||
MissingTimestampColumnSnafu, Result, UnknownLocationTypeSnafu,
|
||||
@@ -126,18 +126,21 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
|
||||
api::v1::set_index::Options::Fulltext(f) => AlterKind::SetIndex {
|
||||
options: SetIndexOptions::Fulltext {
|
||||
column_name: f.column_name.clone(),
|
||||
options: FulltextOptions {
|
||||
enable: f.enable,
|
||||
analyzer: as_fulltext_option_analyzer(
|
||||
options: FulltextOptions::new(
|
||||
f.enable,
|
||||
as_fulltext_option_analyzer(
|
||||
Analyzer::try_from(f.analyzer)
|
||||
.context(InvalidSetFulltextOptionRequestSnafu)?,
|
||||
),
|
||||
case_sensitive: f.case_sensitive,
|
||||
backend: as_fulltext_option_backend(
|
||||
f.case_sensitive,
|
||||
as_fulltext_option_backend(
|
||||
PbFulltextBackend::try_from(f.backend)
|
||||
.context(InvalidSetFulltextOptionRequestSnafu)?,
|
||||
),
|
||||
},
|
||||
f.granularity as u32,
|
||||
f.false_positive_rate,
|
||||
)
|
||||
.context(InvalidIndexOptionSnafu)?,
|
||||
},
|
||||
},
|
||||
api::v1::set_index::Options::Inverted(i) => AlterKind::SetIndex {
|
||||
@@ -148,13 +151,15 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
|
||||
api::v1::set_index::Options::Skipping(s) => AlterKind::SetIndex {
|
||||
options: SetIndexOptions::Skipping {
|
||||
column_name: s.column_name,
|
||||
options: SkippingIndexOptions {
|
||||
granularity: s.granularity as u32,
|
||||
index_type: as_skipping_index_type(
|
||||
options: SkippingIndexOptions::new(
|
||||
s.granularity as u32,
|
||||
s.false_positive_rate,
|
||||
as_skipping_index_type(
|
||||
PbSkippingIndexType::try_from(s.skipping_index_type)
|
||||
.context(InvalidSetSkippingIndexOptionRequestSnafu)?,
|
||||
),
|
||||
},
|
||||
)
|
||||
.context(InvalidIndexOptionSnafu)?,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -180,6 +185,22 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
|
||||
},
|
||||
None => return MissingAlterIndexOptionSnafu.fail(),
|
||||
},
|
||||
Kind::DropDefaults(o) => {
|
||||
let names = o
|
||||
.drop_defaults
|
||||
.into_iter()
|
||||
.map(|col| {
|
||||
ensure!(
|
||||
!col.column_name.is_empty(),
|
||||
MissingFieldSnafu {
|
||||
field: "column_name"
|
||||
}
|
||||
);
|
||||
Ok(col.column_name)
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
AlterKind::DropDefaults { names }
|
||||
}
|
||||
};
|
||||
|
||||
let request = AlterTableRequest {
|
||||
|
||||
@@ -153,6 +153,14 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid index option"))]
|
||||
InvalidIndexOption {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: datatypes::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -180,7 +188,8 @@ impl ErrorExt for Error {
|
||||
| Error::InvalidUnsetTableOptionRequest { .. }
|
||||
| Error::InvalidSetFulltextOptionRequest { .. }
|
||||
| Error::InvalidSetSkippingIndexOptionRequest { .. }
|
||||
| Error::MissingAlterIndexOption { .. } => StatusCode::InvalidArguments,
|
||||
| Error::MissingAlterIndexOption { .. }
|
||||
| Error::InvalidIndexOption { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -201,8 +201,8 @@ impl ChannelManager {
|
||||
"http"
|
||||
};
|
||||
|
||||
let mut endpoint =
|
||||
Endpoint::new(format!("{http_prefix}://{addr}")).context(CreateChannelSnafu)?;
|
||||
let mut endpoint = Endpoint::new(format!("{http_prefix}://{addr}"))
|
||||
.context(CreateChannelSnafu { addr })?;
|
||||
|
||||
if let Some(dur) = self.config().timeout {
|
||||
endpoint = endpoint.timeout(dur);
|
||||
@@ -237,7 +237,7 @@ impl ChannelManager {
|
||||
if let Some(tls_config) = &self.inner.client_tls_config {
|
||||
endpoint = endpoint
|
||||
.tls_config(tls_config.clone())
|
||||
.context(CreateChannelSnafu)?;
|
||||
.context(CreateChannelSnafu { addr })?;
|
||||
}
|
||||
|
||||
endpoint = endpoint
|
||||
|
||||
@@ -52,8 +52,9 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create gRPC channel"))]
|
||||
#[snafu(display("Failed to create gRPC channel from '{addr}'"))]
|
||||
CreateChannel {
|
||||
addr: String,
|
||||
#[snafu(source)]
|
||||
error: tonic::transport::Error,
|
||||
#[snafu(implicit)]
|
||||
|
||||
@@ -17,7 +17,7 @@ workspace = true
|
||||
anymap2 = "0.13.0"
|
||||
api.workspace = true
|
||||
async-recursion = "1.0"
|
||||
async-stream = "0.3"
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
backon = { workspace = true, optional = true }
|
||||
base64.workspace = true
|
||||
|
||||
@@ -25,6 +25,7 @@ use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSn
|
||||
use common_procedure::{Context, LockKey, Procedure, Status};
|
||||
use common_telemetry::{error, info, warn};
|
||||
use futures_util::future;
|
||||
pub use region_request::make_alter_region_request;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
|
||||
@@ -12,20 +12,18 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::region::{
|
||||
alter_request, region_request, AddColumn, AddColumns, AlterRequest, AlterRequests,
|
||||
RegionColumnDef, RegionRequest, RegionRequestHeader,
|
||||
};
|
||||
use api::v1::{self, AlterTableExpr};
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||
use crate::error::Result;
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
use crate::rpc::router::{find_leader_regions, RegionRoute};
|
||||
|
||||
impl AlterLogicalTablesProcedure {
|
||||
@@ -62,34 +60,37 @@ impl AlterLogicalTablesProcedure {
|
||||
{
|
||||
for region_number in ®ions_on_this_peer {
|
||||
let region_id = RegionId::new(table.table_info.ident.table_id, *region_number);
|
||||
let request = self.make_alter_region_request(region_id, task, table)?;
|
||||
let request = make_alter_region_request(
|
||||
region_id,
|
||||
&task.alter_table,
|
||||
table.table_info.ident.version,
|
||||
);
|
||||
requests.push(request);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(AlterRequests { requests })
|
||||
}
|
||||
}
|
||||
|
||||
fn make_alter_region_request(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
task: &AlterTableTask,
|
||||
table: &TableInfoValue,
|
||||
) -> Result<AlterRequest> {
|
||||
let region_id = region_id.as_u64();
|
||||
let schema_version = table.table_info.ident.version;
|
||||
let kind = match &task.alter_table.kind {
|
||||
Some(Kind::AddColumns(add_columns)) => Some(alter_request::Kind::AddColumns(
|
||||
to_region_add_columns(add_columns),
|
||||
)),
|
||||
_ => unreachable!(), // Safety: we have checked the kind in check_input_tasks
|
||||
};
|
||||
/// Makes an alter region request.
|
||||
pub fn make_alter_region_request(
|
||||
region_id: RegionId,
|
||||
alter_table_expr: &AlterTableExpr,
|
||||
schema_version: u64,
|
||||
) -> AlterRequest {
|
||||
let region_id = region_id.as_u64();
|
||||
let kind = match &alter_table_expr.kind {
|
||||
Some(Kind::AddColumns(add_columns)) => Some(alter_request::Kind::AddColumns(
|
||||
to_region_add_columns(add_columns),
|
||||
)),
|
||||
_ => unreachable!(), // Safety: we have checked the kind in check_input_tasks
|
||||
};
|
||||
|
||||
Ok(AlterRequest {
|
||||
region_id,
|
||||
schema_version,
|
||||
kind,
|
||||
})
|
||||
AlterRequest {
|
||||
region_id,
|
||||
schema_version,
|
||||
kind,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -135,6 +135,7 @@ fn create_proto_alter_kind(
|
||||
Kind::UnsetTableOptions(v) => Ok(Some(alter_request::Kind::UnsetTableOptions(v.clone()))),
|
||||
Kind::SetIndex(v) => Ok(Some(alter_request::Kind::SetIndex(v.clone()))),
|
||||
Kind::UnsetIndex(v) => Ok(Some(alter_request::Kind::UnsetIndex(v.clone()))),
|
||||
Kind::DropDefaults(v) => Ok(Some(alter_request::Kind::DropDefaults(v.clone()))),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -61,7 +61,8 @@ impl AlterTableProcedure {
|
||||
| AlterKind::SetTableOptions { .. }
|
||||
| AlterKind::UnsetTableOptions { .. }
|
||||
| AlterKind::SetIndex { .. }
|
||||
| AlterKind::UnsetIndex { .. } => {}
|
||||
| AlterKind::UnsetIndex { .. }
|
||||
| AlterKind::DropDefaults { .. } => {}
|
||||
}
|
||||
|
||||
Ok(new_info)
|
||||
|
||||
@@ -25,6 +25,7 @@ use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSn
|
||||
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
|
||||
use common_telemetry::{debug, error, warn};
|
||||
use futures::future;
|
||||
pub use region_request::create_region_request_builder;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
|
||||
@@ -15,16 +15,16 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::v1::region::{region_request, CreateRequests, RegionRequest, RegionRequestHeader};
|
||||
use api::v1::CreateTableExpr;
|
||||
use common_telemetry::debug;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use store_api::storage::RegionId;
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
|
||||
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
|
||||
use crate::ddl::create_table_template::{build_template, CreateRequestBuilder};
|
||||
use crate::ddl::utils::region_storage_path;
|
||||
use crate::error::Result;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::ddl::CreateTableTask;
|
||||
use crate::rpc::router::{find_leader_regions, RegionRoute};
|
||||
|
||||
impl CreateLogicalTablesProcedure {
|
||||
@@ -45,13 +45,15 @@ impl CreateLogicalTablesProcedure {
|
||||
let catalog = &create_table_expr.catalog_name;
|
||||
let schema = &create_table_expr.schema_name;
|
||||
let logical_table_id = task.table_info.ident.table_id;
|
||||
let physical_table_id = self.data.physical_table_id;
|
||||
let storage_path = region_storage_path(catalog, schema);
|
||||
let request_builder = self.create_region_request_builder(task)?;
|
||||
let request_builder =
|
||||
create_region_request_builder(&task.create_table, physical_table_id)?;
|
||||
|
||||
for region_number in ®ions_on_this_peer {
|
||||
let region_id = RegionId::new(logical_table_id, *region_number);
|
||||
let one_region_request =
|
||||
request_builder.build_one(region_id, storage_path.clone(), &HashMap::new())?;
|
||||
request_builder.build_one(region_id, storage_path.clone(), &HashMap::new());
|
||||
requests.push(one_region_request);
|
||||
}
|
||||
}
|
||||
@@ -69,16 +71,13 @@ impl CreateLogicalTablesProcedure {
|
||||
body: Some(region_request::Body::Creates(CreateRequests { requests })),
|
||||
}))
|
||||
}
|
||||
|
||||
fn create_region_request_builder(
|
||||
&self,
|
||||
task: &CreateTableTask,
|
||||
) -> Result<CreateRequestBuilder> {
|
||||
let create_expr = &task.create_table;
|
||||
let template = build_template(create_expr)?;
|
||||
Ok(CreateRequestBuilder::new(
|
||||
template,
|
||||
Some(self.data.physical_table_id),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a region request builder.
|
||||
pub fn create_region_request_builder(
|
||||
create_table_expr: &CreateTableExpr,
|
||||
physical_table_id: TableId,
|
||||
) -> Result<CreateRequestBuilder> {
|
||||
let template = build_template(create_table_expr)?;
|
||||
Ok(CreateRequestBuilder::new(template, Some(physical_table_id)))
|
||||
}
|
||||
|
||||
@@ -218,11 +218,8 @@ impl CreateTableProcedure {
|
||||
let mut requests = Vec::with_capacity(regions.len());
|
||||
for region_number in regions {
|
||||
let region_id = RegionId::new(self.table_id(), region_number);
|
||||
let create_region_request = request_builder.build_one(
|
||||
region_id,
|
||||
storage_path.clone(),
|
||||
region_wal_options,
|
||||
)?;
|
||||
let create_region_request =
|
||||
request_builder.build_one(region_id, storage_path.clone(), region_wal_options);
|
||||
requests.push(PbRegionRequest::Create(create_region_request));
|
||||
}
|
||||
|
||||
|
||||
@@ -105,12 +105,12 @@ impl CreateRequestBuilder {
|
||||
&self.template
|
||||
}
|
||||
|
||||
pub(crate) fn build_one(
|
||||
pub fn build_one(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
storage_path: String,
|
||||
region_wal_options: &HashMap<RegionNumber, String>,
|
||||
) -> Result<CreateRequest> {
|
||||
) -> CreateRequest {
|
||||
let mut request = self.template.clone();
|
||||
|
||||
request.region_id = region_id.as_u64();
|
||||
@@ -130,6 +130,6 @@ impl CreateRequestBuilder {
|
||||
);
|
||||
}
|
||||
|
||||
Ok(request)
|
||||
request
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use common_procedure::Status;
|
||||
use common_telemetry::info;
|
||||
@@ -25,7 +24,7 @@ use table::table_name::TableName;
|
||||
use crate::ddl::drop_database::cursor::DropDatabaseCursor;
|
||||
use crate::ddl::drop_database::{DropDatabaseContext, DropTableTarget, State};
|
||||
use crate::ddl::drop_table::executor::DropTableExecutor;
|
||||
use crate::ddl::utils::extract_region_wal_options;
|
||||
use crate::ddl::utils::get_region_wal_options;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
@@ -109,17 +108,12 @@ impl State for DropDatabaseExecutor {
|
||||
);
|
||||
|
||||
// Deletes topic-region mapping if dropping physical table
|
||||
let region_wal_options =
|
||||
if let TableRouteValue::Physical(table_route_value) = &table_route_value {
|
||||
let datanode_table_values = ddl_ctx
|
||||
.table_metadata_manager
|
||||
.datanode_table_manager()
|
||||
.regions(self.physical_table_id, table_route_value)
|
||||
.await?;
|
||||
extract_region_wal_options(&datanode_table_values)?
|
||||
} else {
|
||||
HashMap::new()
|
||||
};
|
||||
let region_wal_options = get_region_wal_options(
|
||||
&ddl_ctx.table_metadata_manager,
|
||||
&table_route_value,
|
||||
self.physical_table_id,
|
||||
)
|
||||
.await?;
|
||||
|
||||
executor
|
||||
.on_destroy_metadata(ddl_ctx, &table_route_value, ®ion_wal_options)
|
||||
|
||||
@@ -42,7 +42,8 @@ use crate::error::{
|
||||
};
|
||||
use crate::key::datanode_table::DatanodeTableValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::TableMetadataManagerRef;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::ddl::CreateTableTask;
|
||||
use crate::rpc::router::{find_follower_regions, find_followers, RegionRoute};
|
||||
@@ -187,6 +188,25 @@ pub fn parse_region_wal_options(
|
||||
Ok(region_wal_options)
|
||||
}
|
||||
|
||||
/// Gets the wal options for a table.
|
||||
pub async fn get_region_wal_options(
|
||||
table_metadata_manager: &TableMetadataManager,
|
||||
table_route_value: &TableRouteValue,
|
||||
physical_table_id: TableId,
|
||||
) -> Result<HashMap<RegionNumber, WalOptions>> {
|
||||
let region_wal_options =
|
||||
if let TableRouteValue::Physical(table_route_value) = &table_route_value {
|
||||
let datanode_table_values = table_metadata_manager
|
||||
.datanode_table_manager()
|
||||
.regions(physical_table_id, table_route_value)
|
||||
.await?;
|
||||
extract_region_wal_options(&datanode_table_values)?
|
||||
} else {
|
||||
HashMap::new()
|
||||
};
|
||||
Ok(region_wal_options)
|
||||
}
|
||||
|
||||
/// Extracts region wal options from [DatanodeTableValue]s.
|
||||
pub fn extract_region_wal_options(
|
||||
datanode_table_values: &Vec<DatanodeTableValue>,
|
||||
|
||||
@@ -125,13 +125,12 @@ impl DdlManager {
|
||||
ddl_context: DdlContext,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
register_loaders: bool,
|
||||
#[cfg(feature = "enterprise")] trigger_ddl_manager: Option<TriggerDdlManagerRef>,
|
||||
) -> Result<Self> {
|
||||
let manager = Self {
|
||||
ddl_context,
|
||||
procedure_manager,
|
||||
#[cfg(feature = "enterprise")]
|
||||
trigger_ddl_manager,
|
||||
trigger_ddl_manager: None,
|
||||
};
|
||||
if register_loaders {
|
||||
manager.register_loaders()?;
|
||||
@@ -139,6 +138,15 @@ impl DdlManager {
|
||||
Ok(manager)
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub fn with_trigger_ddl_manager(
|
||||
mut self,
|
||||
trigger_ddl_manager: Option<TriggerDdlManagerRef>,
|
||||
) -> Self {
|
||||
self.trigger_ddl_manager = trigger_ddl_manager;
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns the [TableMetadataManagerRef].
|
||||
pub fn table_metadata_manager(&self) -> &TableMetadataManagerRef {
|
||||
&self.ddl_context.table_metadata_manager
|
||||
@@ -964,8 +972,6 @@ mod tests {
|
||||
},
|
||||
procedure_manager.clone(),
|
||||
true,
|
||||
#[cfg(feature = "enterprise")]
|
||||
None,
|
||||
);
|
||||
|
||||
let expected_loaders = vec![
|
||||
|
||||
@@ -109,7 +109,7 @@ pub mod table_name;
|
||||
pub mod table_route;
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub mod test_utils;
|
||||
mod tombstone;
|
||||
pub mod tombstone;
|
||||
pub mod topic_name;
|
||||
pub mod topic_region;
|
||||
pub mod txn_helper;
|
||||
@@ -535,6 +535,29 @@ impl TableMetadataManager {
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new `TableMetadataManager` with a custom tombstone prefix.
|
||||
pub fn new_with_custom_tombstone_prefix(
|
||||
kv_backend: KvBackendRef,
|
||||
tombstone_prefix: &str,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_name_manager: TableNameManager::new(kv_backend.clone()),
|
||||
table_info_manager: TableInfoManager::new(kv_backend.clone()),
|
||||
view_info_manager: ViewInfoManager::new(kv_backend.clone()),
|
||||
datanode_table_manager: DatanodeTableManager::new(kv_backend.clone()),
|
||||
catalog_manager: CatalogManager::new(kv_backend.clone()),
|
||||
schema_manager: SchemaManager::new(kv_backend.clone()),
|
||||
table_route_manager: TableRouteManager::new(kv_backend.clone()),
|
||||
tombstone_manager: TombstoneManager::new_with_prefix(
|
||||
kv_backend.clone(),
|
||||
tombstone_prefix,
|
||||
),
|
||||
topic_name_manager: TopicNameManager::new(kv_backend.clone()),
|
||||
topic_region_manager: TopicRegionManager::new(kv_backend.clone()),
|
||||
kv_backend,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn init(&self) -> Result<()> {
|
||||
let catalog_name = CatalogNameKey::new(DEFAULT_CATALOG_NAME);
|
||||
|
||||
@@ -925,7 +948,7 @@ impl TableMetadataManager {
|
||||
) -> Result<()> {
|
||||
let keys =
|
||||
self.table_metadata_keys(table_id, table_name, table_route_value, region_wal_options)?;
|
||||
self.tombstone_manager.create(keys).await
|
||||
self.tombstone_manager.create(keys).await.map(|_| ())
|
||||
}
|
||||
|
||||
/// Deletes metadata tombstone for table **permanently**.
|
||||
@@ -939,7 +962,10 @@ impl TableMetadataManager {
|
||||
) -> Result<()> {
|
||||
let table_metadata_keys =
|
||||
self.table_metadata_keys(table_id, table_name, table_route_value, region_wal_options)?;
|
||||
self.tombstone_manager.delete(table_metadata_keys).await
|
||||
self.tombstone_manager
|
||||
.delete(table_metadata_keys)
|
||||
.await
|
||||
.map(|_| ())
|
||||
}
|
||||
|
||||
/// Restores metadata for table.
|
||||
@@ -953,7 +979,7 @@ impl TableMetadataManager {
|
||||
) -> Result<()> {
|
||||
let keys =
|
||||
self.table_metadata_keys(table_id, table_name, table_route_value, region_wal_options)?;
|
||||
self.tombstone_manager.restore(keys).await
|
||||
self.tombstone_manager.restore(keys).await.map(|_| ())
|
||||
}
|
||||
|
||||
/// Deletes metadata for table **permanently**.
|
||||
|
||||
@@ -14,31 +14,51 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use common_telemetry::debug;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::BatchGetRequest;
|
||||
use crate::rpc::store::{BatchDeleteRequest, BatchGetRequest};
|
||||
|
||||
/// [TombstoneManager] provides the ability to:
|
||||
/// - logically delete values
|
||||
/// - restore the deleted values
|
||||
pub(crate) struct TombstoneManager {
|
||||
pub struct TombstoneManager {
|
||||
kv_backend: KvBackendRef,
|
||||
tombstone_prefix: String,
|
||||
// Only used for testing.
|
||||
#[cfg(test)]
|
||||
max_txn_ops: Option<usize>,
|
||||
}
|
||||
|
||||
const TOMBSTONE_PREFIX: &str = "__tombstone/";
|
||||
|
||||
fn to_tombstone(key: &[u8]) -> Vec<u8> {
|
||||
[TOMBSTONE_PREFIX.as_bytes(), key].concat()
|
||||
}
|
||||
|
||||
impl TombstoneManager {
|
||||
/// Returns [TombstoneManager].
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
Self { kv_backend }
|
||||
Self::new_with_prefix(kv_backend, TOMBSTONE_PREFIX)
|
||||
}
|
||||
|
||||
/// Returns [TombstoneManager] with a custom tombstone prefix.
|
||||
pub fn new_with_prefix(kv_backend: KvBackendRef, prefix: &str) -> Self {
|
||||
Self {
|
||||
kv_backend,
|
||||
tombstone_prefix: prefix.to_string(),
|
||||
#[cfg(test)]
|
||||
max_txn_ops: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_tombstone(&self, key: &[u8]) -> Vec<u8> {
|
||||
[self.tombstone_prefix.as_bytes(), key].concat()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn set_max_txn_ops(&mut self, max_txn_ops: usize) {
|
||||
self.max_txn_ops = Some(max_txn_ops);
|
||||
}
|
||||
|
||||
/// Moves value to `dest_key`.
|
||||
@@ -67,11 +87,15 @@ impl TombstoneManager {
|
||||
(txn, TxnOpGetResponseSet::filter(src_key))
|
||||
}
|
||||
|
||||
async fn move_values_inner(&self, keys: &[Vec<u8>], dest_keys: &[Vec<u8>]) -> Result<()> {
|
||||
async fn move_values_inner(&self, keys: &[Vec<u8>], dest_keys: &[Vec<u8>]) -> Result<usize> {
|
||||
ensure!(
|
||||
keys.len() == dest_keys.len(),
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: "The length of keys does not match the length of dest_keys."
|
||||
err_msg: format!(
|
||||
"The length of keys({}) does not match the length of dest_keys({}).",
|
||||
keys.len(),
|
||||
dest_keys.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
// The key -> dest key mapping.
|
||||
@@ -102,7 +126,7 @@ impl TombstoneManager {
|
||||
.unzip();
|
||||
let mut resp = self.kv_backend.txn(Txn::merge_all(txns)).await?;
|
||||
if resp.succeeded {
|
||||
return Ok(());
|
||||
return Ok(keys.len());
|
||||
}
|
||||
let mut set = TxnOpGetResponseSet::from(&mut resp.responses);
|
||||
// Updates results.
|
||||
@@ -124,17 +148,45 @@ impl TombstoneManager {
|
||||
.fail()
|
||||
}
|
||||
|
||||
/// Moves values to `dest_key`.
|
||||
async fn move_values(&self, keys: Vec<Vec<u8>>, dest_keys: Vec<Vec<u8>>) -> Result<()> {
|
||||
let chunk_size = self.kv_backend.max_txn_ops() / 2;
|
||||
if keys.len() > chunk_size {
|
||||
let keys_chunks = keys.chunks(chunk_size).collect::<Vec<_>>();
|
||||
let dest_keys_chunks = keys.chunks(chunk_size).collect::<Vec<_>>();
|
||||
for (keys, dest_keys) in keys_chunks.into_iter().zip(dest_keys_chunks) {
|
||||
self.move_values_inner(keys, dest_keys).await?;
|
||||
}
|
||||
fn max_txn_ops(&self) -> usize {
|
||||
#[cfg(test)]
|
||||
if let Some(max_txn_ops) = self.max_txn_ops {
|
||||
return max_txn_ops;
|
||||
}
|
||||
self.kv_backend.max_txn_ops()
|
||||
}
|
||||
|
||||
Ok(())
|
||||
/// Moves values to `dest_key`.
|
||||
///
|
||||
/// Returns the number of keys that were moved.
|
||||
async fn move_values(&self, keys: Vec<Vec<u8>>, dest_keys: Vec<Vec<u8>>) -> Result<usize> {
|
||||
ensure!(
|
||||
keys.len() == dest_keys.len(),
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: format!(
|
||||
"The length of keys({}) does not match the length of dest_keys({}).",
|
||||
keys.len(),
|
||||
dest_keys.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
if keys.is_empty() {
|
||||
return Ok(0);
|
||||
}
|
||||
let chunk_size = self.max_txn_ops() / 2;
|
||||
if keys.len() > chunk_size {
|
||||
debug!(
|
||||
"Moving values with multiple chunks, keys len: {}, chunk_size: {}",
|
||||
keys.len(),
|
||||
chunk_size
|
||||
);
|
||||
let mut moved_keys = 0;
|
||||
let keys_chunks = keys.chunks(chunk_size).collect::<Vec<_>>();
|
||||
let dest_keys_chunks = dest_keys.chunks(chunk_size).collect::<Vec<_>>();
|
||||
for (keys, dest_keys) in keys_chunks.into_iter().zip(dest_keys_chunks) {
|
||||
moved_keys += self.move_values_inner(keys, dest_keys).await?;
|
||||
}
|
||||
Ok(moved_keys)
|
||||
} else {
|
||||
self.move_values_inner(&keys, &dest_keys).await
|
||||
}
|
||||
@@ -145,11 +197,13 @@ impl TombstoneManager {
|
||||
/// Preforms to:
|
||||
/// - deletes origin values.
|
||||
/// - stores tombstone values.
|
||||
pub(crate) async fn create(&self, keys: Vec<Vec<u8>>) -> Result<()> {
|
||||
///
|
||||
/// Returns the number of keys that were moved.
|
||||
pub async fn create(&self, keys: Vec<Vec<u8>>) -> Result<usize> {
|
||||
let (keys, dest_keys): (Vec<_>, Vec<_>) = keys
|
||||
.into_iter()
|
||||
.map(|key| {
|
||||
let tombstone_key = to_tombstone(&key);
|
||||
let tombstone_key = self.to_tombstone(&key);
|
||||
(key, tombstone_key)
|
||||
})
|
||||
.unzip();
|
||||
@@ -162,11 +216,13 @@ impl TombstoneManager {
|
||||
/// Preforms to:
|
||||
/// - restore origin value.
|
||||
/// - deletes tombstone values.
|
||||
pub(crate) async fn restore(&self, keys: Vec<Vec<u8>>) -> Result<()> {
|
||||
///
|
||||
/// Returns the number of keys that were restored.
|
||||
pub async fn restore(&self, keys: Vec<Vec<u8>>) -> Result<usize> {
|
||||
let (keys, dest_keys): (Vec<_>, Vec<_>) = keys
|
||||
.into_iter()
|
||||
.map(|key| {
|
||||
let tombstone_key = to_tombstone(&key);
|
||||
let tombstone_key = self.to_tombstone(&key);
|
||||
(tombstone_key, key)
|
||||
})
|
||||
.unzip();
|
||||
@@ -175,16 +231,21 @@ impl TombstoneManager {
|
||||
}
|
||||
|
||||
/// Deletes tombstones values for the specified `keys`.
|
||||
pub(crate) async fn delete(&self, keys: Vec<Vec<u8>>) -> Result<()> {
|
||||
let operations = keys
|
||||
///
|
||||
/// Returns the number of keys that were deleted.
|
||||
pub async fn delete(&self, keys: Vec<Vec<u8>>) -> Result<usize> {
|
||||
let keys = keys
|
||||
.iter()
|
||||
.map(|key| TxnOp::Delete(to_tombstone(key)))
|
||||
.map(|key| self.to_tombstone(key))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let txn = Txn::new().and_then(operations);
|
||||
// Always success.
|
||||
let _ = self.kv_backend.txn(txn).await?;
|
||||
Ok(())
|
||||
let num_keys = keys.len();
|
||||
let _ = self
|
||||
.kv_backend
|
||||
.batch_delete(BatchDeleteRequest::new().with_keys(keys))
|
||||
.await?;
|
||||
|
||||
Ok(num_keys)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -194,7 +255,6 @@ mod tests {
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::to_tombstone;
|
||||
use crate::error::Error;
|
||||
use crate::key::tombstone::TombstoneManager;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
@@ -246,7 +306,7 @@ mod tests {
|
||||
assert!(!kv_backend.exists(b"foo").await.unwrap());
|
||||
assert_eq!(
|
||||
kv_backend
|
||||
.get(&to_tombstone(b"bar"))
|
||||
.get(&tombstone_manager.to_tombstone(b"bar"))
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
@@ -255,7 +315,7 @@ mod tests {
|
||||
);
|
||||
assert_eq!(
|
||||
kv_backend
|
||||
.get(&to_tombstone(b"foo"))
|
||||
.get(&tombstone_manager.to_tombstone(b"foo"))
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
@@ -287,7 +347,7 @@ mod tests {
|
||||
kv_backend.clone(),
|
||||
&[MoveValue {
|
||||
key: b"bar".to_vec(),
|
||||
dest_key: to_tombstone(b"bar"),
|
||||
dest_key: tombstone_manager.to_tombstone(b"bar"),
|
||||
value: b"baz".to_vec(),
|
||||
}],
|
||||
)
|
||||
@@ -364,7 +424,7 @@ mod tests {
|
||||
.iter()
|
||||
.map(|(key, value)| MoveValue {
|
||||
key: key.clone(),
|
||||
dest_key: to_tombstone(key),
|
||||
dest_key: tombstone_manager.to_tombstone(key),
|
||||
value: value.clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
@@ -373,16 +433,73 @@ mod tests {
|
||||
.into_iter()
|
||||
.map(|kv| (kv.key, kv.dest_key))
|
||||
.unzip();
|
||||
tombstone_manager
|
||||
let moved_keys = tombstone_manager
|
||||
.move_values(keys.clone(), dest_keys.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(kvs.len(), moved_keys);
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
// Moves again
|
||||
tombstone_manager
|
||||
let moved_keys = tombstone_manager
|
||||
.move_values(keys.clone(), dest_keys.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(0, moved_keys);
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_move_values_with_max_txn_ops() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let mut tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
||||
tombstone_manager.set_max_txn_ops(4);
|
||||
let kvs = HashMap::from([
|
||||
(b"bar".to_vec(), b"baz".to_vec()),
|
||||
(b"foo".to_vec(), b"hi".to_vec()),
|
||||
(b"baz".to_vec(), b"hello".to_vec()),
|
||||
(b"qux".to_vec(), b"world".to_vec()),
|
||||
(b"quux".to_vec(), b"world".to_vec()),
|
||||
(b"quuux".to_vec(), b"world".to_vec()),
|
||||
(b"quuuux".to_vec(), b"world".to_vec()),
|
||||
(b"quuuuux".to_vec(), b"world".to_vec()),
|
||||
(b"quuuuuux".to_vec(), b"world".to_vec()),
|
||||
]);
|
||||
for (key, value) in &kvs {
|
||||
kv_backend
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(key.clone())
|
||||
.with_value(value.clone()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
let move_values = kvs
|
||||
.iter()
|
||||
.map(|(key, value)| MoveValue {
|
||||
key: key.clone(),
|
||||
dest_key: tombstone_manager.to_tombstone(key),
|
||||
value: value.clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let (keys, dest_keys): (Vec<_>, Vec<_>) = move_values
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|kv| (kv.key, kv.dest_key))
|
||||
.unzip();
|
||||
let moved_keys = tombstone_manager
|
||||
.move_values(keys.clone(), dest_keys.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(kvs.len(), moved_keys);
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
// Moves again
|
||||
let moved_keys = tombstone_manager
|
||||
.move_values(keys.clone(), dest_keys.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(0, moved_keys);
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
}
|
||||
|
||||
@@ -409,7 +526,7 @@ mod tests {
|
||||
.iter()
|
||||
.map(|(key, value)| MoveValue {
|
||||
key: key.clone(),
|
||||
dest_key: to_tombstone(key),
|
||||
dest_key: tombstone_manager.to_tombstone(key),
|
||||
value: value.clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
@@ -420,17 +537,19 @@ mod tests {
|
||||
.unzip();
|
||||
keys.push(b"non-exists".to_vec());
|
||||
dest_keys.push(b"hi/non-exists".to_vec());
|
||||
tombstone_manager
|
||||
let moved_keys = tombstone_manager
|
||||
.move_values(keys.clone(), dest_keys.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
assert_eq!(3, moved_keys);
|
||||
// Moves again
|
||||
tombstone_manager
|
||||
let moved_keys = tombstone_manager
|
||||
.move_values(keys.clone(), dest_keys.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
assert_eq!(0, moved_keys);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -462,7 +581,7 @@ mod tests {
|
||||
.iter()
|
||||
.map(|(key, value)| MoveValue {
|
||||
key: key.clone(),
|
||||
dest_key: to_tombstone(key),
|
||||
dest_key: tombstone_manager.to_tombstone(key),
|
||||
value: value.clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
@@ -471,10 +590,11 @@ mod tests {
|
||||
.into_iter()
|
||||
.map(|kv| (kv.key, kv.dest_key))
|
||||
.unzip();
|
||||
tombstone_manager
|
||||
let moved_keys = tombstone_manager
|
||||
.move_values(keys, dest_keys)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(kvs.len(), moved_keys);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -502,7 +622,7 @@ mod tests {
|
||||
.iter()
|
||||
.map(|(key, value)| MoveValue {
|
||||
key: key.clone(),
|
||||
dest_key: to_tombstone(key),
|
||||
dest_key: tombstone_manager.to_tombstone(key),
|
||||
value: value.clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
@@ -537,7 +657,7 @@ mod tests {
|
||||
.iter()
|
||||
.map(|(key, value)| MoveValue {
|
||||
key: key.clone(),
|
||||
dest_key: to_tombstone(key),
|
||||
dest_key: tombstone_manager.to_tombstone(key),
|
||||
value: value.clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
@@ -552,4 +672,24 @@ mod tests {
|
||||
.unwrap();
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_move_values_with_different_lengths() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
||||
|
||||
let keys = vec![b"bar".to_vec(), b"foo".to_vec()];
|
||||
let dest_keys = vec![b"bar".to_vec(), b"foo".to_vec(), b"baz".to_vec()];
|
||||
|
||||
let err = tombstone_manager
|
||||
.move_values(keys, dest_keys)
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(err
|
||||
.to_string()
|
||||
.contains("The length of keys(2) does not match the length of dest_keys(3)."),);
|
||||
|
||||
let moved_keys = tombstone_manager.move_values(vec![], vec![]).await.unwrap();
|
||||
assert_eq!(0, moved_keys);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,20 +54,20 @@ impl<T> MemoryKvBackend<T> {
|
||||
kvs.clear();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
/// Returns true if the `kvs` is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.kvs.read().unwrap().is_empty()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
/// Returns the `kvs`.
|
||||
pub fn dump(&self) -> BTreeMap<Vec<u8>, Vec<u8>> {
|
||||
let kvs = self.kvs.read().unwrap();
|
||||
kvs.clone()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
/// Returns the length of `kvs`
|
||||
pub fn len(&self) -> usize {
|
||||
self.kvs.read().unwrap().len()
|
||||
|
||||
@@ -178,8 +178,6 @@ pub enum Error {
|
||||
StreamTimeout {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: tokio::time::error::Elapsed,
|
||||
},
|
||||
|
||||
#[snafu(display("RecordBatch slice index overflow: {visit_index} > {size}"))]
|
||||
|
||||
@@ -12,7 +12,6 @@ deadlock_detection = ["parking_lot/deadlock_detection"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
atty = "0.2"
|
||||
backtrace = "0.3"
|
||||
common-error.workspace = true
|
||||
console-subscriber = { version = "0.1", optional = true }
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
//! logging stuffs, inspired by databend
|
||||
use std::env;
|
||||
use std::io::IsTerminal;
|
||||
use std::sync::{Arc, Mutex, Once};
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -221,14 +222,14 @@ pub fn init_global_logging(
|
||||
Layer::new()
|
||||
.json()
|
||||
.with_writer(writer)
|
||||
.with_ansi(atty::is(atty::Stream::Stdout))
|
||||
.with_ansi(std::io::stdout().is_terminal())
|
||||
.boxed(),
|
||||
)
|
||||
} else {
|
||||
Some(
|
||||
Layer::new()
|
||||
.with_writer(writer)
|
||||
.with_ansi(atty::is(atty::Stream::Stdout))
|
||||
.with_ansi(std::io::stdout().is_terminal())
|
||||
.boxed(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -475,7 +475,7 @@ mod test {
|
||||
async fn region_alive_keeper() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut region_server = mock_region_server();
|
||||
let mut engine_env = TestEnv::with_prefix("region-alive-keeper");
|
||||
let mut engine_env = TestEnv::with_prefix("region-alive-keeper").await;
|
||||
let engine = engine_env.create_engine(MitoConfig::default()).await;
|
||||
let engine = Arc::new(engine);
|
||||
region_server.register_engine(engine.clone());
|
||||
|
||||
@@ -144,6 +144,9 @@ pub struct HttpClientConfig {
|
||||
/// The timeout for idle sockets being kept-alive.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub(crate) pool_idle_timeout: Duration,
|
||||
|
||||
/// Skip SSL certificate validation (insecure)
|
||||
pub skip_ssl_validation: bool,
|
||||
}
|
||||
|
||||
impl Default for HttpClientConfig {
|
||||
@@ -153,6 +156,7 @@ impl Default for HttpClientConfig {
|
||||
connect_timeout: Duration::from_secs(30),
|
||||
timeout: Duration::from_secs(30),
|
||||
pool_idle_timeout: Duration::from_secs(90),
|
||||
skip_ssl_validation: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -514,4 +518,48 @@ mod tests {
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn test_skip_ssl_validation_config() {
|
||||
// Test with skip_ssl_validation = true
|
||||
let toml_str_true = r#"
|
||||
[storage]
|
||||
type = "S3"
|
||||
[storage.http_client]
|
||||
skip_ssl_validation = true
|
||||
"#;
|
||||
let opts: DatanodeOptions = toml::from_str(toml_str_true).unwrap();
|
||||
match &opts.storage.store {
|
||||
ObjectStoreConfig::S3(cfg) => {
|
||||
assert!(cfg.http_client.skip_ssl_validation);
|
||||
}
|
||||
_ => panic!("Expected S3 config"),
|
||||
}
|
||||
|
||||
// Test with skip_ssl_validation = false
|
||||
let toml_str_false = r#"
|
||||
[storage]
|
||||
type = "S3"
|
||||
[storage.http_client]
|
||||
skip_ssl_validation = false
|
||||
"#;
|
||||
let opts: DatanodeOptions = toml::from_str(toml_str_false).unwrap();
|
||||
match &opts.storage.store {
|
||||
ObjectStoreConfig::S3(cfg) => {
|
||||
assert!(!cfg.http_client.skip_ssl_validation);
|
||||
}
|
||||
_ => panic!("Expected S3 config"),
|
||||
}
|
||||
// Test default value (should be false)
|
||||
let toml_str_default = r#"
|
||||
[storage]
|
||||
type = "S3"
|
||||
"#;
|
||||
let opts: DatanodeOptions = toml::from_str(toml_str_default).unwrap();
|
||||
match &opts.storage.store {
|
||||
ObjectStoreConfig::S3(cfg) => {
|
||||
assert!(!cfg.http_client.skip_ssl_validation);
|
||||
}
|
||||
_ => panic!("Expected S3 config"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -278,7 +278,7 @@ mod tests {
|
||||
let mut region_server = mock_region_server();
|
||||
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
|
||||
|
||||
let mut engine_env = TestEnv::with_prefix("close-region");
|
||||
let mut engine_env = TestEnv::with_prefix("close-region").await;
|
||||
let engine = engine_env.create_engine(MitoConfig::default()).await;
|
||||
region_server.register_engine(Arc::new(engine));
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
@@ -326,7 +326,7 @@ mod tests {
|
||||
let mut region_server = mock_region_server();
|
||||
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
|
||||
|
||||
let mut engine_env = TestEnv::with_prefix("open-region");
|
||||
let mut engine_env = TestEnv::with_prefix("open-region").await;
|
||||
let engine = engine_env.create_engine(MitoConfig::default()).await;
|
||||
region_server.register_engine(Arc::new(engine));
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
@@ -374,7 +374,7 @@ mod tests {
|
||||
let mut region_server = mock_region_server();
|
||||
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
|
||||
|
||||
let mut engine_env = TestEnv::with_prefix("open-not-exists-region");
|
||||
let mut engine_env = TestEnv::with_prefix("open-not-exists-region").await;
|
||||
let engine = engine_env.create_engine(MitoConfig::default()).await;
|
||||
region_server.register_engine(Arc::new(engine));
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
@@ -406,7 +406,7 @@ mod tests {
|
||||
let mut region_server = mock_region_server();
|
||||
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
|
||||
|
||||
let mut engine_env = TestEnv::with_prefix("downgrade-region");
|
||||
let mut engine_env = TestEnv::with_prefix("downgrade-region").await;
|
||||
let engine = engine_env.create_engine(MitoConfig::default()).await;
|
||||
region_server.register_engine(Arc::new(engine));
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
|
||||
@@ -17,7 +17,7 @@ use std::sync::Arc;
|
||||
|
||||
use common_config::Configurable;
|
||||
use servers::grpc::builder::GrpcServerBuilder;
|
||||
use servers::grpc::{GrpcServer, GrpcServerConfig};
|
||||
use servers::grpc::GrpcServer;
|
||||
use servers::http::HttpServerBuilder;
|
||||
use servers::metrics_handler::MetricsHandler;
|
||||
use servers::server::{ServerHandler, ServerHandlers};
|
||||
@@ -92,13 +92,7 @@ impl<'a> DatanodeServiceBuilder<'a> {
|
||||
opts: &DatanodeOptions,
|
||||
region_server: &RegionServer,
|
||||
) -> GrpcServerBuilder {
|
||||
let config = GrpcServerConfig {
|
||||
max_recv_message_size: opts.grpc.max_recv_message_size.as_bytes() as usize,
|
||||
max_send_message_size: opts.grpc.max_send_message_size.as_bytes() as usize,
|
||||
tls: opts.grpc.tls.clone(),
|
||||
};
|
||||
|
||||
GrpcServerBuilder::new(config, region_server.runtime())
|
||||
GrpcServerBuilder::new(opts.grpc.as_config(), region_server.runtime())
|
||||
.flight_handler(Arc::new(region_server.clone()))
|
||||
.region_server_handler(Arc::new(region_server.clone()))
|
||||
}
|
||||
|
||||
@@ -207,11 +207,16 @@ pub(crate) fn clean_temp_dir(dir: &str) -> Result<()> {
|
||||
}
|
||||
|
||||
pub(crate) fn build_http_client(config: &HttpClientConfig) -> Result<HttpClient> {
|
||||
if config.skip_ssl_validation {
|
||||
common_telemetry::warn!("Skipping SSL validation for object storage HTTP client. Please ensure the environment is trusted.");
|
||||
}
|
||||
|
||||
let client = reqwest::ClientBuilder::new()
|
||||
.pool_max_idle_per_host(config.pool_max_idle_per_host as usize)
|
||||
.connect_timeout(config.connect_timeout)
|
||||
.pool_idle_timeout(config.pool_idle_timeout)
|
||||
.timeout(config.timeout)
|
||||
.danger_accept_invalid_certs(config.skip_ssl_validation)
|
||||
.build()
|
||||
.context(BuildHttpClientSnafu)?;
|
||||
Ok(HttpClient::with(client))
|
||||
|
||||
@@ -31,9 +31,10 @@ pub use crate::schema::column_schema::{
|
||||
ColumnSchema, FulltextAnalyzer, FulltextBackend, FulltextOptions, Metadata,
|
||||
SkippingIndexOptions, SkippingIndexType, COLUMN_FULLTEXT_CHANGE_OPT_KEY_ENABLE,
|
||||
COLUMN_FULLTEXT_OPT_KEY_ANALYZER, COLUMN_FULLTEXT_OPT_KEY_BACKEND,
|
||||
COLUMN_FULLTEXT_OPT_KEY_CASE_SENSITIVE, COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY,
|
||||
COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE, COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY,
|
||||
SKIPPING_INDEX_KEY, TIME_INDEX_KEY,
|
||||
COLUMN_FULLTEXT_OPT_KEY_CASE_SENSITIVE, COLUMN_FULLTEXT_OPT_KEY_FALSE_POSITIVE_RATE,
|
||||
COLUMN_FULLTEXT_OPT_KEY_GRANULARITY, COLUMN_SKIPPING_INDEX_OPT_KEY_FALSE_POSITIVE_RATE,
|
||||
COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY, COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE, COMMENT_KEY,
|
||||
FULLTEXT_KEY, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY, TIME_INDEX_KEY,
|
||||
};
|
||||
pub use crate::schema::constraint::ColumnDefaultConstraint;
|
||||
pub use crate::schema::raw::RawSchema;
|
||||
|
||||
@@ -47,13 +47,18 @@ pub const COLUMN_FULLTEXT_CHANGE_OPT_KEY_ENABLE: &str = "enable";
|
||||
pub const COLUMN_FULLTEXT_OPT_KEY_ANALYZER: &str = "analyzer";
|
||||
pub const COLUMN_FULLTEXT_OPT_KEY_CASE_SENSITIVE: &str = "case_sensitive";
|
||||
pub const COLUMN_FULLTEXT_OPT_KEY_BACKEND: &str = "backend";
|
||||
pub const COLUMN_FULLTEXT_OPT_KEY_GRANULARITY: &str = "granularity";
|
||||
pub const COLUMN_FULLTEXT_OPT_KEY_FALSE_POSITIVE_RATE: &str = "false_positive_rate";
|
||||
|
||||
/// Keys used in SKIPPING index options
|
||||
pub const COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY: &str = "granularity";
|
||||
pub const COLUMN_SKIPPING_INDEX_OPT_KEY_FALSE_POSITIVE_RATE: &str = "false_positive_rate";
|
||||
pub const COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE: &str = "type";
|
||||
|
||||
pub const DEFAULT_GRANULARITY: u32 = 10240;
|
||||
|
||||
pub const DEFAULT_FALSE_POSITIVE_RATE: f64 = 0.01;
|
||||
|
||||
/// Schema of a column, used as an immutable struct.
|
||||
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ColumnSchema {
|
||||
@@ -504,7 +509,7 @@ impl TryFrom<&ColumnSchema> for Field {
|
||||
}
|
||||
|
||||
/// Fulltext options for a column.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default, Visit, VisitMut)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Visit, VisitMut)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct FulltextOptions {
|
||||
/// Whether the fulltext index is enabled.
|
||||
@@ -518,6 +523,92 @@ pub struct FulltextOptions {
|
||||
/// The fulltext backend to use.
|
||||
#[serde(default)]
|
||||
pub backend: FulltextBackend,
|
||||
/// The granularity of the fulltext index (for bloom backend only)
|
||||
#[serde(default = "fulltext_options_default_granularity")]
|
||||
pub granularity: u32,
|
||||
/// The false positive rate of the fulltext index (for bloom backend only)
|
||||
#[serde(default = "fulltext_options_default_false_positive_rate_in_10000")]
|
||||
pub false_positive_rate_in_10000: u32,
|
||||
}
|
||||
|
||||
fn fulltext_options_default_granularity() -> u32 {
|
||||
DEFAULT_GRANULARITY
|
||||
}
|
||||
|
||||
fn fulltext_options_default_false_positive_rate_in_10000() -> u32 {
|
||||
(DEFAULT_FALSE_POSITIVE_RATE * 10000.0) as u32
|
||||
}
|
||||
|
||||
impl FulltextOptions {
|
||||
/// Creates a new fulltext options.
|
||||
pub fn new(
|
||||
enable: bool,
|
||||
analyzer: FulltextAnalyzer,
|
||||
case_sensitive: bool,
|
||||
backend: FulltextBackend,
|
||||
granularity: u32,
|
||||
false_positive_rate: f64,
|
||||
) -> Result<Self> {
|
||||
ensure!(
|
||||
0.0 < false_positive_rate && false_positive_rate <= 1.0,
|
||||
error::InvalidFulltextOptionSnafu {
|
||||
msg: format!(
|
||||
"Invalid false positive rate: {false_positive_rate}, expected: 0.0 < rate <= 1.0"
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure!(
|
||||
granularity > 0,
|
||||
error::InvalidFulltextOptionSnafu {
|
||||
msg: format!("Invalid granularity: {granularity}, expected: positive integer"),
|
||||
}
|
||||
);
|
||||
Ok(Self::new_unchecked(
|
||||
enable,
|
||||
analyzer,
|
||||
case_sensitive,
|
||||
backend,
|
||||
granularity,
|
||||
false_positive_rate,
|
||||
))
|
||||
}
|
||||
|
||||
/// Creates a new fulltext options without checking `false_positive_rate` and `granularity`.
|
||||
pub fn new_unchecked(
|
||||
enable: bool,
|
||||
analyzer: FulltextAnalyzer,
|
||||
case_sensitive: bool,
|
||||
backend: FulltextBackend,
|
||||
granularity: u32,
|
||||
false_positive_rate: f64,
|
||||
) -> Self {
|
||||
Self {
|
||||
enable,
|
||||
analyzer,
|
||||
case_sensitive,
|
||||
backend,
|
||||
granularity,
|
||||
false_positive_rate_in_10000: (false_positive_rate * 10000.0) as u32,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the false positive rate.
|
||||
pub fn false_positive_rate(&self) -> f64 {
|
||||
self.false_positive_rate_in_10000 as f64 / 10000.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for FulltextOptions {
|
||||
fn default() -> Self {
|
||||
Self::new_unchecked(
|
||||
false,
|
||||
FulltextAnalyzer::default(),
|
||||
false,
|
||||
FulltextBackend::default(),
|
||||
DEFAULT_GRANULARITY,
|
||||
DEFAULT_FALSE_POSITIVE_RATE,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for FulltextOptions {
|
||||
@@ -527,6 +618,10 @@ impl fmt::Display for FulltextOptions {
|
||||
write!(f, ", analyzer={}", self.analyzer)?;
|
||||
write!(f, ", case_sensitive={}", self.case_sensitive)?;
|
||||
write!(f, ", backend={}", self.backend)?;
|
||||
if self.backend == FulltextBackend::Bloom {
|
||||
write!(f, ", granularity={}", self.granularity)?;
|
||||
write!(f, ", false_positive_rate={}", self.false_positive_rate())?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -611,6 +706,45 @@ impl TryFrom<HashMap<String, String>> for FulltextOptions {
|
||||
}
|
||||
}
|
||||
|
||||
if fulltext_options.backend == FulltextBackend::Bloom {
|
||||
// Parse granularity with default value 10240
|
||||
let granularity = match options.get(COLUMN_FULLTEXT_OPT_KEY_GRANULARITY) {
|
||||
Some(value) => value
|
||||
.parse::<u32>()
|
||||
.ok()
|
||||
.filter(|&v| v > 0)
|
||||
.ok_or_else(|| {
|
||||
error::InvalidFulltextOptionSnafu {
|
||||
msg: format!(
|
||||
"Invalid granularity: {value}, expected: positive integer"
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?,
|
||||
None => DEFAULT_GRANULARITY,
|
||||
};
|
||||
fulltext_options.granularity = granularity;
|
||||
|
||||
// Parse false positive rate with default value 0.01
|
||||
let false_positive_rate = match options.get(COLUMN_FULLTEXT_OPT_KEY_FALSE_POSITIVE_RATE)
|
||||
{
|
||||
Some(value) => value
|
||||
.parse::<f64>()
|
||||
.ok()
|
||||
.filter(|&v| v > 0.0 && v <= 1.0)
|
||||
.ok_or_else(|| {
|
||||
error::InvalidFulltextOptionSnafu {
|
||||
msg: format!(
|
||||
"Invalid false positive rate: {value}, expected: 0.0 < rate <= 1.0"
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?,
|
||||
None => DEFAULT_FALSE_POSITIVE_RATE,
|
||||
};
|
||||
fulltext_options.false_positive_rate_in_10000 = (false_positive_rate * 10000.0) as u32;
|
||||
}
|
||||
|
||||
Ok(fulltext_options)
|
||||
}
|
||||
}
|
||||
@@ -638,23 +772,72 @@ impl fmt::Display for FulltextAnalyzer {
|
||||
pub struct SkippingIndexOptions {
|
||||
/// The granularity of the skip index.
|
||||
pub granularity: u32,
|
||||
/// The false positive rate of the skip index (in ten-thousandths, e.g., 100 = 1%).
|
||||
pub false_positive_rate_in_10000: u32,
|
||||
/// The type of the skip index.
|
||||
#[serde(default)]
|
||||
pub index_type: SkippingIndexType,
|
||||
}
|
||||
|
||||
impl SkippingIndexOptions {
|
||||
/// Creates a new skipping index options without checking `false_positive_rate` and `granularity`.
|
||||
pub fn new_unchecked(
|
||||
granularity: u32,
|
||||
false_positive_rate: f64,
|
||||
index_type: SkippingIndexType,
|
||||
) -> Self {
|
||||
Self {
|
||||
granularity,
|
||||
false_positive_rate_in_10000: (false_positive_rate * 10000.0) as u32,
|
||||
index_type,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new skipping index options.
|
||||
pub fn new(
|
||||
granularity: u32,
|
||||
false_positive_rate: f64,
|
||||
index_type: SkippingIndexType,
|
||||
) -> Result<Self> {
|
||||
ensure!(
|
||||
0.0 < false_positive_rate && false_positive_rate <= 1.0,
|
||||
error::InvalidSkippingIndexOptionSnafu {
|
||||
msg: format!("Invalid false positive rate: {false_positive_rate}, expected: 0.0 < rate <= 1.0"),
|
||||
}
|
||||
);
|
||||
ensure!(
|
||||
granularity > 0,
|
||||
error::InvalidSkippingIndexOptionSnafu {
|
||||
msg: format!("Invalid granularity: {granularity}, expected: positive integer"),
|
||||
}
|
||||
);
|
||||
Ok(Self::new_unchecked(
|
||||
granularity,
|
||||
false_positive_rate,
|
||||
index_type,
|
||||
))
|
||||
}
|
||||
|
||||
/// Gets the false positive rate.
|
||||
pub fn false_positive_rate(&self) -> f64 {
|
||||
self.false_positive_rate_in_10000 as f64 / 10000.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SkippingIndexOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
granularity: DEFAULT_GRANULARITY,
|
||||
index_type: SkippingIndexType::default(),
|
||||
}
|
||||
Self::new_unchecked(
|
||||
DEFAULT_GRANULARITY,
|
||||
DEFAULT_FALSE_POSITIVE_RATE,
|
||||
SkippingIndexType::default(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for SkippingIndexOptions {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "granularity={}", self.granularity)?;
|
||||
write!(f, ", false_positive_rate={}", self.false_positive_rate())?;
|
||||
write!(f, ", index_type={}", self.index_type)?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -681,15 +864,37 @@ impl TryFrom<HashMap<String, String>> for SkippingIndexOptions {
|
||||
fn try_from(options: HashMap<String, String>) -> Result<Self> {
|
||||
// Parse granularity with default value 1
|
||||
let granularity = match options.get(COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY) {
|
||||
Some(value) => value.parse::<u32>().map_err(|_| {
|
||||
error::InvalidSkippingIndexOptionSnafu {
|
||||
msg: format!("Invalid granularity: {value}, expected: positive integer"),
|
||||
}
|
||||
.build()
|
||||
})?,
|
||||
Some(value) => value
|
||||
.parse::<u32>()
|
||||
.ok()
|
||||
.filter(|&v| v > 0)
|
||||
.ok_or_else(|| {
|
||||
error::InvalidSkippingIndexOptionSnafu {
|
||||
msg: format!("Invalid granularity: {value}, expected: positive integer"),
|
||||
}
|
||||
.build()
|
||||
})?,
|
||||
None => DEFAULT_GRANULARITY,
|
||||
};
|
||||
|
||||
// Parse false positive rate with default value 100
|
||||
let false_positive_rate =
|
||||
match options.get(COLUMN_SKIPPING_INDEX_OPT_KEY_FALSE_POSITIVE_RATE) {
|
||||
Some(value) => value
|
||||
.parse::<f64>()
|
||||
.ok()
|
||||
.filter(|&v| v > 0.0 && v <= 1.0)
|
||||
.ok_or_else(|| {
|
||||
error::InvalidSkippingIndexOptionSnafu {
|
||||
msg: format!(
|
||||
"Invalid false positive rate: {value}, expected: 0.0 < rate <= 1.0"
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?,
|
||||
None => DEFAULT_FALSE_POSITIVE_RATE,
|
||||
};
|
||||
|
||||
// Parse index type with default value BloomFilter
|
||||
let index_type = match options.get(COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE) {
|
||||
Some(typ) => match typ.to_ascii_uppercase().as_str() {
|
||||
@@ -704,10 +909,11 @@ impl TryFrom<HashMap<String, String>> for SkippingIndexOptions {
|
||||
None => SkippingIndexType::default(),
|
||||
};
|
||||
|
||||
Ok(SkippingIndexOptions {
|
||||
Ok(SkippingIndexOptions::new_unchecked(
|
||||
granularity,
|
||||
false_positive_rate,
|
||||
index_type,
|
||||
})
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -899,7 +899,7 @@ impl StreamingEngine {
|
||||
let rows_send = self.run_available(true).await?;
|
||||
let row = self.send_writeback_requests().await?;
|
||||
debug!(
|
||||
"Done to flush flow_id={:?} with {} input rows flushed, {} rows sended and {} output rows flushed",
|
||||
"Done to flush flow_id={:?} with {} input rows flushed, {} rows sent and {} output rows flushed",
|
||||
flow_id, flushed_input_rows, rows_send, row
|
||||
);
|
||||
Ok(row)
|
||||
|
||||
@@ -18,7 +18,8 @@ use std::sync::atomic::AtomicBool;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::flow::{
|
||||
flow_request, CreateRequest, DropRequest, FlowRequest, FlowResponse, FlushFlow,
|
||||
flow_request, CreateRequest, DirtyWindowRequests, DropRequest, FlowRequest, FlowResponse,
|
||||
FlushFlow,
|
||||
};
|
||||
use api::v1::region::InsertRequests;
|
||||
use catalog::CatalogManager;
|
||||
@@ -834,8 +835,13 @@ impl common_meta::node_manager::Flownode for FlowDualEngine {
|
||||
.map_err(to_meta_err(snafu::location!()))
|
||||
}
|
||||
|
||||
async fn handle_mark_window_dirty(&self, _req: DirtyWindowRequest) -> MetaResult<FlowResponse> {
|
||||
unreachable!()
|
||||
async fn handle_mark_window_dirty(&self, req: DirtyWindowRequest) -> MetaResult<FlowResponse> {
|
||||
self.batching_engine()
|
||||
.handle_mark_dirty_time_window(DirtyWindowRequests {
|
||||
requests: vec![req],
|
||||
})
|
||||
.await
|
||||
.map_err(to_meta_err(snafu::location!()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -859,97 +865,6 @@ fn to_meta_err(
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl common_meta::node_manager::Flownode for StreamingEngine {
|
||||
async fn handle(&self, request: FlowRequest) -> MetaResult<FlowResponse> {
|
||||
let query_ctx = request
|
||||
.header
|
||||
.and_then(|h| h.query_context)
|
||||
.map(|ctx| ctx.into());
|
||||
match request.body {
|
||||
Some(flow_request::Body::Create(CreateRequest {
|
||||
flow_id: Some(task_id),
|
||||
source_table_ids,
|
||||
sink_table_name: Some(sink_table_name),
|
||||
create_if_not_exists,
|
||||
expire_after,
|
||||
comment,
|
||||
sql,
|
||||
flow_options,
|
||||
or_replace,
|
||||
})) => {
|
||||
let source_table_ids = source_table_ids.into_iter().map(|id| id.id).collect_vec();
|
||||
let sink_table_name = [
|
||||
sink_table_name.catalog_name,
|
||||
sink_table_name.schema_name,
|
||||
sink_table_name.table_name,
|
||||
];
|
||||
let expire_after = expire_after.map(|e| e.value);
|
||||
let args = CreateFlowArgs {
|
||||
flow_id: task_id.id as u64,
|
||||
sink_table_name,
|
||||
source_table_ids,
|
||||
create_if_not_exists,
|
||||
or_replace,
|
||||
expire_after,
|
||||
comment: Some(comment),
|
||||
sql: sql.clone(),
|
||||
flow_options,
|
||||
query_ctx,
|
||||
};
|
||||
let ret = self
|
||||
.create_flow(args)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.with_context(|_| CreateFlowSnafu { sql: sql.clone() })
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
METRIC_FLOW_TASK_COUNT.inc();
|
||||
Ok(FlowResponse {
|
||||
affected_flows: ret
|
||||
.map(|id| greptime_proto::v1::FlowId { id: id as u32 })
|
||||
.into_iter()
|
||||
.collect_vec(),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
Some(flow_request::Body::Drop(DropRequest {
|
||||
flow_id: Some(flow_id),
|
||||
})) => {
|
||||
self.remove_flow(flow_id.id as u64)
|
||||
.await
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
METRIC_FLOW_TASK_COUNT.dec();
|
||||
Ok(Default::default())
|
||||
}
|
||||
Some(flow_request::Body::Flush(FlushFlow {
|
||||
flow_id: Some(flow_id),
|
||||
})) => {
|
||||
let row = self
|
||||
.flush_flow_inner(flow_id.id as u64)
|
||||
.await
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
Ok(FlowResponse {
|
||||
affected_flows: vec![flow_id],
|
||||
affected_rows: row as u64,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
other => common_meta::error::InvalidFlowRequestBodySnafu { body: other }.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_inserts(&self, request: InsertRequests) -> MetaResult<FlowResponse> {
|
||||
self.handle_inserts_inner(request)
|
||||
.await
|
||||
.map(|_| Default::default())
|
||||
.map_err(to_meta_err(snafu::location!()))
|
||||
}
|
||||
|
||||
async fn handle_mark_window_dirty(&self, _req: DirtyWindowRequest) -> MetaResult<FlowResponse> {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
impl FlowEngine for StreamingEngine {
|
||||
async fn create_flow(&self, args: CreateFlowArgs) -> Result<Option<FlowId>, Error> {
|
||||
self.create_flow_inner(args).await
|
||||
|
||||
@@ -18,7 +18,8 @@ use std::sync::{Arc, Weak};
|
||||
use std::time::SystemTime;
|
||||
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::CreateTableExpr;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{CreateTableExpr, QueryRequest};
|
||||
use client::{Client, Database};
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
@@ -269,6 +270,55 @@ impl FrontendClient {
|
||||
.await
|
||||
}
|
||||
|
||||
/// Execute a SQL statement on the frontend.
|
||||
pub async fn sql(&self, catalog: &str, schema: &str, sql: &str) -> Result<Output, Error> {
|
||||
match self {
|
||||
FrontendClient::Distributed { .. } => {
|
||||
let db = self.get_random_active_frontend(catalog, schema).await?;
|
||||
db.database
|
||||
.sql(sql)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
FrontendClient::Standalone { database_client } => {
|
||||
let ctx = QueryContextBuilder::default()
|
||||
.current_catalog(catalog.to_string())
|
||||
.current_schema(schema.to_string())
|
||||
.build();
|
||||
let ctx = Arc::new(ctx);
|
||||
{
|
||||
let database_client = {
|
||||
database_client
|
||||
.lock()
|
||||
.map_err(|e| {
|
||||
UnexpectedSnafu {
|
||||
reason: format!("Failed to lock database client: {e}"),
|
||||
}
|
||||
.build()
|
||||
})?
|
||||
.as_ref()
|
||||
.context(UnexpectedSnafu {
|
||||
reason: "Standalone's frontend instance is not set",
|
||||
})?
|
||||
.upgrade()
|
||||
.context(UnexpectedSnafu {
|
||||
reason: "Failed to upgrade database client",
|
||||
})?
|
||||
};
|
||||
let req = Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql.to_string())),
|
||||
});
|
||||
database_client
|
||||
.do_query(req, ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle a request to frontend
|
||||
pub(crate) async fn handle(
|
||||
&self,
|
||||
@@ -318,7 +368,7 @@ impl FrontendClient {
|
||||
})?
|
||||
};
|
||||
let resp: common_query::Output = database_client
|
||||
.do_query(req.clone(), ctx)
|
||||
.do_query(req, ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
|
||||
@@ -32,7 +32,7 @@ use common_meta::node_manager::{Flownode, NodeManagerRef};
|
||||
use common_query::Output;
|
||||
use common_runtime::JoinHandle;
|
||||
use common_telemetry::tracing::info;
|
||||
use futures::{FutureExt, TryStreamExt};
|
||||
use futures::TryStreamExt;
|
||||
use greptime_proto::v1::flow::{flow_server, FlowRequest, FlowResponse, InsertRequests};
|
||||
use itertools::Itertools;
|
||||
use operator::delete::Deleter;
|
||||
@@ -40,16 +40,16 @@ use operator::insert::Inserter;
|
||||
use operator::statement::StatementExecutor;
|
||||
use partition::manager::PartitionRuleManager;
|
||||
use query::{QueryEngine, QueryEngineFactory};
|
||||
use servers::error::{StartGrpcSnafu, TcpBindSnafu, TcpIncomingSnafu};
|
||||
use servers::add_service;
|
||||
use servers::grpc::builder::GrpcServerBuilder;
|
||||
use servers::grpc::{GrpcServer, GrpcServerConfig};
|
||||
use servers::http::HttpServerBuilder;
|
||||
use servers::metrics_handler::MetricsHandler;
|
||||
use servers::server::{ServerHandler, ServerHandlers};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::sync::{broadcast, oneshot, Mutex};
|
||||
use tonic::codec::CompressionEncoding;
|
||||
use tonic::transport::server::TcpIncoming;
|
||||
use tonic::{Request, Response, Status};
|
||||
|
||||
use crate::adapter::flownode_impl::{FlowDualEngine, FlowDualEngineRef};
|
||||
@@ -231,50 +231,6 @@ impl FlownodeServer {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl servers::server::Server for FlownodeServer {
|
||||
async fn shutdown(&self) -> Result<(), servers::error::Error> {
|
||||
let tx = self.inner.server_shutdown_tx.lock().await;
|
||||
if tx.send(()).is_err() {
|
||||
info!("Receiver dropped, the flow node server has already shutdown");
|
||||
}
|
||||
info!("Shutdown flow node server");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn start(&mut self, addr: SocketAddr) -> Result<(), servers::error::Error> {
|
||||
let mut rx_server = self.inner.server_shutdown_tx.lock().await.subscribe();
|
||||
|
||||
let incoming = {
|
||||
let listener = TcpListener::bind(addr)
|
||||
.await
|
||||
.context(TcpBindSnafu { addr })?;
|
||||
let addr = listener.local_addr().context(TcpBindSnafu { addr })?;
|
||||
let incoming =
|
||||
TcpIncoming::from_listener(listener, true, None).context(TcpIncomingSnafu)?;
|
||||
info!("flow server is bound to {}", addr);
|
||||
|
||||
incoming
|
||||
};
|
||||
|
||||
let builder = tonic::transport::Server::builder().add_service(self.create_flow_service());
|
||||
|
||||
let _handle = common_runtime::spawn_global(async move {
|
||||
let _result = builder
|
||||
.serve_with_incoming_shutdown(incoming, rx_server.recv().map(drop))
|
||||
.await
|
||||
.context(StartGrpcSnafu);
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
FLOW_NODE_SERVER_NAME
|
||||
}
|
||||
}
|
||||
|
||||
/// The flownode server instance.
|
||||
pub struct FlownodeInstance {
|
||||
flownode_server: FlownodeServer,
|
||||
@@ -470,7 +426,7 @@ impl FlownodeBuilder {
|
||||
/// Useful in distributed mode
|
||||
pub struct FlownodeServiceBuilder<'a> {
|
||||
opts: &'a FlownodeOptions,
|
||||
grpc_server: Option<FlownodeServer>,
|
||||
grpc_server: Option<GrpcServer>,
|
||||
enable_http_service: bool,
|
||||
}
|
||||
|
||||
@@ -490,13 +446,19 @@ impl<'a> FlownodeServiceBuilder<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_grpc_server(self, grpc_server: FlownodeServer) -> Self {
|
||||
pub fn with_grpc_server(self, grpc_server: GrpcServer) -> Self {
|
||||
Self {
|
||||
grpc_server: Some(grpc_server),
|
||||
..self
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_default_grpc_server(mut self, flownode_server: &FlownodeServer) -> Self {
|
||||
let grpc_server = Self::grpc_server_builder(self.opts, flownode_server).build();
|
||||
self.grpc_server = Some(grpc_server);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(mut self) -> Result<ServerHandlers, Error> {
|
||||
let handlers = ServerHandlers::default();
|
||||
if let Some(grpc_server) = self.grpc_server.take() {
|
||||
@@ -519,6 +481,22 @@ impl<'a> FlownodeServiceBuilder<'a> {
|
||||
}
|
||||
Ok(handlers)
|
||||
}
|
||||
|
||||
pub fn grpc_server_builder(
|
||||
opts: &FlownodeOptions,
|
||||
flownode_server: &FlownodeServer,
|
||||
) -> GrpcServerBuilder {
|
||||
let config = GrpcServerConfig {
|
||||
max_recv_message_size: opts.grpc.max_recv_message_size.as_bytes() as usize,
|
||||
max_send_message_size: opts.grpc.max_send_message_size.as_bytes() as usize,
|
||||
tls: opts.grpc.tls.clone(),
|
||||
};
|
||||
let service = flownode_server.create_flow_service();
|
||||
let runtime = common_runtime::global_runtime();
|
||||
let mut builder = GrpcServerBuilder::new(config, runtime);
|
||||
add_service!(builder, service);
|
||||
builder
|
||||
}
|
||||
}
|
||||
|
||||
/// Basically a tiny frontend that communicates with datanode, different from [`FrontendClient`] which
|
||||
|
||||
@@ -14,6 +14,7 @@ workspace = true
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
arc-swap = "1.0"
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
auth.workspace = true
|
||||
bytes.workspace = true
|
||||
|
||||
@@ -363,6 +363,12 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Canceling statement due to statement timeout"))]
|
||||
StatementTimeout {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -443,6 +449,8 @@ impl ErrorExt for Error {
|
||||
Error::DataFusion { error, .. } => datafusion_status_code::<Self>(error, None),
|
||||
|
||||
Error::Cancelled { .. } => StatusCode::Cancelled,
|
||||
|
||||
Error::StatementTimeout { .. } => StatusCode::Cancelled,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -25,9 +25,11 @@ mod promql;
|
||||
mod region_query;
|
||||
pub mod standalone;
|
||||
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::time::SystemTime;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use async_stream::stream;
|
||||
use async_trait::async_trait;
|
||||
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
|
||||
use catalog::process_manager::ProcessManagerRef;
|
||||
@@ -44,8 +46,11 @@ use common_procedure::local::{LocalManager, ManagerConfig};
|
||||
use common_procedure::options::ProcedureConfig;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::error::StreamTimeoutSnafu;
|
||||
use common_recordbatch::RecordBatchStreamWrapper;
|
||||
use common_telemetry::{debug, error, info, tracing};
|
||||
use datafusion_expr::LogicalPlan;
|
||||
use futures::{Stream, StreamExt};
|
||||
use log_store::raft_engine::RaftEngineBackend;
|
||||
use operator::delete::DeleterRef;
|
||||
use operator::insert::InserterRef;
|
||||
@@ -65,20 +70,21 @@ use servers::interceptor::{
|
||||
};
|
||||
use servers::prometheus_handler::PrometheusHandler;
|
||||
use servers::query_handler::sql::SqlQueryHandler;
|
||||
use session::context::QueryContextRef;
|
||||
use session::context::{Channel, QueryContextRef};
|
||||
use session::table_name::table_idents_to_full_name;
|
||||
use snafu::prelude::*;
|
||||
use sql::dialect::Dialect;
|
||||
use sql::parser::{ParseOptions, ParserContext};
|
||||
use sql::statements::copy::{CopyDatabase, CopyTable};
|
||||
use sql::statements::statement::Statement;
|
||||
use sql::statements::tql::Tql;
|
||||
use sqlparser::ast::ObjectName;
|
||||
pub use standalone::StandaloneDatanodeManager;
|
||||
|
||||
use crate::error::{
|
||||
self, Error, ExecLogicalPlanSnafu, ExecutePromqlSnafu, ExternalSnafu, InvalidSqlSnafu,
|
||||
ParseSqlSnafu, PermissionSnafu, PlanStatementSnafu, Result, SqlExecInterceptedSnafu,
|
||||
TableOperationSnafu,
|
||||
StatementTimeoutSnafu, TableOperationSnafu,
|
||||
};
|
||||
use crate::limiter::LimiterRef;
|
||||
use crate::slow_query_recorder::SlowQueryRecorder;
|
||||
@@ -185,59 +191,10 @@ impl Instance {
|
||||
vec![query_ctx.current_schema()],
|
||||
stmt.to_string(),
|
||||
query_ctx.conn_info().to_string(),
|
||||
None,
|
||||
Some(query_ctx.process_id()),
|
||||
);
|
||||
|
||||
let query_fut = async {
|
||||
match stmt {
|
||||
Statement::Query(_) | Statement::Explain(_) | Statement::Delete(_) => {
|
||||
// TODO: remove this when format is supported in datafusion
|
||||
if let Statement::Explain(explain) = &stmt {
|
||||
if let Some(format) = explain.format() {
|
||||
query_ctx.set_explain_format(format.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
let stmt = QueryStatement::Sql(stmt);
|
||||
let plan = self
|
||||
.statement_executor
|
||||
.plan(&stmt, query_ctx.clone())
|
||||
.await?;
|
||||
|
||||
let QueryStatement::Sql(stmt) = stmt else {
|
||||
unreachable!()
|
||||
};
|
||||
query_interceptor.pre_execute(&stmt, Some(&plan), query_ctx.clone())?;
|
||||
self.statement_executor
|
||||
.exec_plan(plan, query_ctx)
|
||||
.await
|
||||
.context(TableOperationSnafu)
|
||||
}
|
||||
Statement::Tql(tql) => {
|
||||
let plan = self
|
||||
.statement_executor
|
||||
.plan_tql(tql.clone(), &query_ctx)
|
||||
.await?;
|
||||
|
||||
query_interceptor.pre_execute(
|
||||
&Statement::Tql(tql),
|
||||
Some(&plan),
|
||||
query_ctx.clone(),
|
||||
)?;
|
||||
self.statement_executor
|
||||
.exec_plan(plan, query_ctx)
|
||||
.await
|
||||
.context(TableOperationSnafu)
|
||||
}
|
||||
_ => {
|
||||
query_interceptor.pre_execute(&stmt, None, query_ctx.clone())?;
|
||||
self.statement_executor
|
||||
.execute_sql(stmt, query_ctx)
|
||||
.await
|
||||
.context(TableOperationSnafu)
|
||||
}
|
||||
}
|
||||
};
|
||||
let query_fut = self.exec_statement_with_timeout(stmt, query_ctx, query_interceptor);
|
||||
|
||||
CancellableFuture::new(query_fut, ticket.cancellation_handle.clone())
|
||||
.await
|
||||
@@ -254,6 +211,153 @@ impl Instance {
|
||||
Output { data, meta }
|
||||
})
|
||||
}
|
||||
|
||||
async fn exec_statement_with_timeout(
|
||||
&self,
|
||||
stmt: Statement,
|
||||
query_ctx: QueryContextRef,
|
||||
query_interceptor: Option<&SqlQueryInterceptorRef<Error>>,
|
||||
) -> Result<Output> {
|
||||
let timeout = derive_timeout(&stmt, &query_ctx);
|
||||
match timeout {
|
||||
Some(timeout) => {
|
||||
let start = tokio::time::Instant::now();
|
||||
let output = tokio::time::timeout(
|
||||
timeout,
|
||||
self.exec_statement(stmt, query_ctx, query_interceptor),
|
||||
)
|
||||
.await
|
||||
.map_err(|_| StatementTimeoutSnafu.build())??;
|
||||
// compute remaining timeout
|
||||
let remaining_timeout = timeout.checked_sub(start.elapsed()).unwrap_or_default();
|
||||
attach_timeout(output, remaining_timeout)
|
||||
}
|
||||
None => {
|
||||
self.exec_statement(stmt, query_ctx, query_interceptor)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn exec_statement(
|
||||
&self,
|
||||
stmt: Statement,
|
||||
query_ctx: QueryContextRef,
|
||||
query_interceptor: Option<&SqlQueryInterceptorRef<Error>>,
|
||||
) -> Result<Output> {
|
||||
match stmt {
|
||||
Statement::Query(_) | Statement::Explain(_) | Statement::Delete(_) => {
|
||||
// TODO: remove this when format is supported in datafusion
|
||||
if let Statement::Explain(explain) = &stmt {
|
||||
if let Some(format) = explain.format() {
|
||||
query_ctx.set_explain_format(format.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
self.plan_and_exec_sql(stmt, &query_ctx, query_interceptor)
|
||||
.await
|
||||
}
|
||||
Statement::Tql(tql) => {
|
||||
self.plan_and_exec_tql(&query_ctx, query_interceptor, tql)
|
||||
.await
|
||||
}
|
||||
_ => {
|
||||
query_interceptor.pre_execute(&stmt, None, query_ctx.clone())?;
|
||||
self.statement_executor
|
||||
.execute_sql(stmt, query_ctx)
|
||||
.await
|
||||
.context(TableOperationSnafu)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn plan_and_exec_sql(
|
||||
&self,
|
||||
stmt: Statement,
|
||||
query_ctx: &QueryContextRef,
|
||||
query_interceptor: Option<&SqlQueryInterceptorRef<Error>>,
|
||||
) -> Result<Output> {
|
||||
let stmt = QueryStatement::Sql(stmt);
|
||||
let plan = self
|
||||
.statement_executor
|
||||
.plan(&stmt, query_ctx.clone())
|
||||
.await?;
|
||||
let QueryStatement::Sql(stmt) = stmt else {
|
||||
unreachable!()
|
||||
};
|
||||
query_interceptor.pre_execute(&stmt, Some(&plan), query_ctx.clone())?;
|
||||
self.statement_executor
|
||||
.exec_plan(plan, query_ctx.clone())
|
||||
.await
|
||||
.context(TableOperationSnafu)
|
||||
}
|
||||
|
||||
async fn plan_and_exec_tql(
|
||||
&self,
|
||||
query_ctx: &QueryContextRef,
|
||||
query_interceptor: Option<&SqlQueryInterceptorRef<Error>>,
|
||||
tql: Tql,
|
||||
) -> Result<Output> {
|
||||
let plan = self
|
||||
.statement_executor
|
||||
.plan_tql(tql.clone(), query_ctx)
|
||||
.await?;
|
||||
query_interceptor.pre_execute(&Statement::Tql(tql), Some(&plan), query_ctx.clone())?;
|
||||
self.statement_executor
|
||||
.exec_plan(plan, query_ctx.clone())
|
||||
.await
|
||||
.context(TableOperationSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
/// If the relevant variables are set, the timeout is enforced for all PostgreSQL statements.
|
||||
/// For MySQL, it applies only to read-only statements.
|
||||
fn derive_timeout(stmt: &Statement, query_ctx: &QueryContextRef) -> Option<Duration> {
|
||||
let query_timeout = query_ctx.query_timeout()?;
|
||||
if query_timeout.is_zero() {
|
||||
return None;
|
||||
}
|
||||
match query_ctx.channel() {
|
||||
Channel::Mysql if stmt.is_readonly() => Some(query_timeout),
|
||||
Channel::Postgres => Some(query_timeout),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn attach_timeout(output: Output, mut timeout: Duration) -> Result<Output> {
|
||||
if timeout.is_zero() {
|
||||
return StatementTimeoutSnafu.fail();
|
||||
}
|
||||
|
||||
let output = match output.data {
|
||||
OutputData::AffectedRows(_) | OutputData::RecordBatches(_) => output,
|
||||
OutputData::Stream(mut stream) => {
|
||||
let schema = stream.schema();
|
||||
let s = Box::pin(stream! {
|
||||
let mut start = tokio::time::Instant::now();
|
||||
while let Some(item) = tokio::time::timeout(timeout, stream.next()).await.map_err(|_| StreamTimeoutSnafu.build())? {
|
||||
yield item;
|
||||
|
||||
let now = tokio::time::Instant::now();
|
||||
timeout = timeout.checked_sub(now - start).unwrap_or(Duration::ZERO);
|
||||
start = now;
|
||||
// tokio::time::timeout may not return an error immediately when timeout is 0.
|
||||
if timeout.is_zero() {
|
||||
StreamTimeoutSnafu.fail()?;
|
||||
}
|
||||
}
|
||||
}) as Pin<Box<dyn Stream<Item = _> + Send>>;
|
||||
let stream = RecordBatchStreamWrapper {
|
||||
schema,
|
||||
stream: s,
|
||||
output_ordering: None,
|
||||
metrics: Default::default(),
|
||||
};
|
||||
Output::new(OutputData::Stream(Box::pin(stream)), output.meta)
|
||||
}
|
||||
};
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
|
||||
@@ -24,12 +24,14 @@ use common_function::scalars::json::json_get::{
|
||||
};
|
||||
use common_function::scalars::udf::create_udf;
|
||||
use common_function::state::FunctionState;
|
||||
use common_query::Output;
|
||||
use common_query::{Output, OutputData};
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::util;
|
||||
use datafusion::dataframe::DataFrame;
|
||||
use datafusion::execution::context::SessionContext;
|
||||
use datafusion::execution::SessionStateBuilder;
|
||||
use datafusion_expr::{col, lit, lit_timestamp_nano, wildcard, Expr, SortExpr};
|
||||
use datatypes::value::ValueRef;
|
||||
use query::QueryEngineRef;
|
||||
use serde_json::Value as JsonValue;
|
||||
use servers::error::{
|
||||
@@ -97,7 +99,7 @@ impl JaegerQueryHandler for Instance {
|
||||
filters.push(col(TIMESTAMP_COLUMN).lt_eq(lit_timestamp_nano(end_time * 1_000)));
|
||||
}
|
||||
|
||||
// It's equivalent to
|
||||
// It's equivalent to the following SQL query:
|
||||
//
|
||||
// ```
|
||||
// SELECT DISTINCT span_name, span_kind
|
||||
@@ -137,7 +139,7 @@ impl JaegerQueryHandler for Instance {
|
||||
start_time: Option<i64>,
|
||||
end_time: Option<i64>,
|
||||
) -> ServerResult<Output> {
|
||||
// It's equivalent to
|
||||
// It's equivalent to the following SQL query:
|
||||
//
|
||||
// ```
|
||||
// SELECT
|
||||
@@ -156,13 +158,11 @@ impl JaegerQueryHandler for Instance {
|
||||
let mut filters = vec![col(TRACE_ID_COLUMN).eq(lit(trace_id))];
|
||||
|
||||
if let Some(start_time) = start_time {
|
||||
// Microseconds to nanoseconds.
|
||||
filters.push(col(TIMESTAMP_COLUMN).gt_eq(lit_timestamp_nano(start_time * 1_000)));
|
||||
filters.push(col(TIMESTAMP_COLUMN).gt_eq(lit_timestamp_nano(start_time)));
|
||||
}
|
||||
|
||||
if let Some(end_time) = end_time {
|
||||
// Microseconds to nanoseconds.
|
||||
filters.push(col(TIMESTAMP_COLUMN).lt_eq(lit_timestamp_nano(end_time * 1_000)));
|
||||
filters.push(col(TIMESTAMP_COLUMN).lt_eq(lit_timestamp_nano(end_time)));
|
||||
}
|
||||
|
||||
Ok(query_trace_table(
|
||||
@@ -184,10 +184,11 @@ impl JaegerQueryHandler for Instance {
|
||||
ctx: QueryContextRef,
|
||||
query_params: QueryTraceParams,
|
||||
) -> ServerResult<Output> {
|
||||
let selects = vec![wildcard()];
|
||||
|
||||
let mut filters = vec![];
|
||||
|
||||
// `service_name` is already validated in `from_jaeger_query_params()`, so no additional check needed here.
|
||||
filters.push(col(SERVICE_NAME_COLUMN).eq(lit(query_params.service_name)));
|
||||
|
||||
if let Some(operation_name) = query_params.operation_name {
|
||||
filters.push(col(SPAN_NAME_COLUMN).eq(lit(operation_name)));
|
||||
}
|
||||
@@ -208,15 +209,73 @@ impl JaegerQueryHandler for Instance {
|
||||
filters.push(col(DURATION_NANO_COLUMN).lt_eq(lit(max_duration)));
|
||||
}
|
||||
|
||||
// Get all distinct trace ids that match the filters.
|
||||
// It's equivalent to the following SQL query:
|
||||
//
|
||||
// ```
|
||||
// SELECT DISTINCT trace_id
|
||||
// FROM
|
||||
// {db}.{trace_table}
|
||||
// WHERE
|
||||
// service_name = '{service_name}' AND
|
||||
// operation_name = '{operation_name}' AND
|
||||
// timestamp >= {start_time} AND
|
||||
// timestamp <= {end_time} AND
|
||||
// duration >= {min_duration} AND
|
||||
// duration <= {max_duration}
|
||||
// LIMIT {limit}
|
||||
// ```.
|
||||
let output = query_trace_table(
|
||||
ctx.clone(),
|
||||
self.catalog_manager(),
|
||||
self.query_engine(),
|
||||
vec![wildcard()],
|
||||
filters,
|
||||
vec![],
|
||||
Some(query_params.limit.unwrap_or(DEFAULT_LIMIT)),
|
||||
query_params.tags,
|
||||
vec![col(TRACE_ID_COLUMN)],
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Get all traces that match the trace ids from the previous query.
|
||||
// It's equivalent to the following SQL query:
|
||||
//
|
||||
// ```
|
||||
// SELECT *
|
||||
// FROM
|
||||
// {db}.{trace_table}
|
||||
// WHERE
|
||||
// trace_id IN ({trace_ids}) AND
|
||||
// timestamp >= {start_time} AND
|
||||
// timestamp <= {end_time}
|
||||
// ```
|
||||
let mut filters = vec![col(TRACE_ID_COLUMN).in_list(
|
||||
trace_ids_from_output(output)
|
||||
.await?
|
||||
.iter()
|
||||
.map(lit)
|
||||
.collect::<Vec<Expr>>(),
|
||||
false,
|
||||
)];
|
||||
|
||||
if let Some(start_time) = query_params.start_time {
|
||||
filters.push(col(TIMESTAMP_COLUMN).gt_eq(lit_timestamp_nano(start_time)));
|
||||
}
|
||||
|
||||
if let Some(end_time) = query_params.end_time {
|
||||
filters.push(col(TIMESTAMP_COLUMN).lt_eq(lit_timestamp_nano(end_time)));
|
||||
}
|
||||
|
||||
Ok(query_trace_table(
|
||||
ctx,
|
||||
self.catalog_manager(),
|
||||
self.query_engine(),
|
||||
selects,
|
||||
vec![wildcard()],
|
||||
filters,
|
||||
vec![col(TIMESTAMP_COLUMN).sort(false, false)], // Sort by timestamp in descending order.
|
||||
Some(DEFAULT_LIMIT),
|
||||
query_params.tags,
|
||||
vec![],
|
||||
None,
|
||||
None,
|
||||
vec![],
|
||||
)
|
||||
.await?)
|
||||
@@ -458,3 +517,34 @@ fn tags_filters(
|
||||
json_tag_filters(dataframe, tags)
|
||||
}
|
||||
}
|
||||
|
||||
// Get trace ids from the output in recordbatches.
|
||||
async fn trace_ids_from_output(output: Output) -> ServerResult<Vec<String>> {
|
||||
if let OutputData::Stream(stream) = output.data {
|
||||
let schema = stream.schema().clone();
|
||||
let recordbatches = util::collect(stream)
|
||||
.await
|
||||
.context(CollectRecordbatchSnafu)?;
|
||||
|
||||
// Only contains `trace_id` column in string type.
|
||||
if !recordbatches.is_empty()
|
||||
&& schema.num_columns() == 1
|
||||
&& schema.contains_column(TRACE_ID_COLUMN)
|
||||
{
|
||||
let mut trace_ids = vec![];
|
||||
for recordbatch in recordbatches {
|
||||
for col in recordbatch.columns().iter() {
|
||||
for row_idx in 0..recordbatch.num_rows() {
|
||||
if let ValueRef::String(value) = col.get_ref(row_idx) {
|
||||
trace_ids.push(value.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(trace_ids);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ use servers::error::Error as ServerError;
|
||||
use servers::grpc::builder::GrpcServerBuilder;
|
||||
use servers::grpc::frontend_grpc_handler::FrontendGrpcHandler;
|
||||
use servers::grpc::greptime_handler::GreptimeRequestHandler;
|
||||
use servers::grpc::{GrpcOptions, GrpcServer, GrpcServerConfig};
|
||||
use servers::grpc::{GrpcOptions, GrpcServer};
|
||||
use servers::http::event::LogValidatorRef;
|
||||
use servers::http::{HttpServer, HttpServerBuilder};
|
||||
use servers::interceptor::LogIngestInterceptorRef;
|
||||
@@ -66,12 +66,7 @@ where
|
||||
}
|
||||
|
||||
pub fn grpc_server_builder(&self, opts: &GrpcOptions) -> Result<GrpcServerBuilder> {
|
||||
let grpc_config = GrpcServerConfig {
|
||||
max_recv_message_size: opts.max_recv_message_size.as_bytes() as usize,
|
||||
max_send_message_size: opts.max_send_message_size.as_bytes() as usize,
|
||||
tls: opts.tls.clone(),
|
||||
};
|
||||
let builder = GrpcServerBuilder::new(grpc_config, common_runtime::global_runtime())
|
||||
let builder = GrpcServerBuilder::new(opts.as_config(), common_runtime::global_runtime())
|
||||
.with_tls_config(opts.tls.clone())
|
||||
.context(error::InvalidTlsConfigSnafu)?;
|
||||
Ok(builder)
|
||||
@@ -235,6 +230,7 @@ where
|
||||
opts.keep_alive.as_secs(),
|
||||
opts.reject_no_database.unwrap_or(false),
|
||||
)),
|
||||
Some(instance.process_manager().clone()),
|
||||
);
|
||||
handlers.insert((mysql_server, mysql_addr));
|
||||
}
|
||||
@@ -257,6 +253,7 @@ where
|
||||
opts.keep_alive.as_secs(),
|
||||
common_runtime::global_runtime(),
|
||||
user_provider.clone(),
|
||||
Some(self.instance.process_manager().clone()),
|
||||
)) as Box<dyn Server>;
|
||||
|
||||
handlers.insert((pg_server, pg_addr));
|
||||
|
||||
@@ -218,6 +218,7 @@ mod tests {
|
||||
let mut writer = Cursor::new(Vec::new());
|
||||
let mut creator = BloomFilterCreator::new(
|
||||
4,
|
||||
0.01,
|
||||
Arc::new(MockExternalTempFileProvider::new()),
|
||||
Arc::new(AtomicUsize::new(0)),
|
||||
None,
|
||||
|
||||
@@ -30,9 +30,6 @@ use crate::bloom_filter::SEED;
|
||||
use crate::external_provider::ExternalTempFileProvider;
|
||||
use crate::Bytes;
|
||||
|
||||
/// The false positive rate of the Bloom filter.
|
||||
pub const FALSE_POSITIVE_RATE: f64 = 0.01;
|
||||
|
||||
/// `BloomFilterCreator` is responsible for creating and managing bloom filters
|
||||
/// for a set of elements. It divides the rows into segments and creates
|
||||
/// bloom filters for each segment.
|
||||
@@ -79,6 +76,7 @@ impl BloomFilterCreator {
|
||||
/// `rows_per_segment` <= 0
|
||||
pub fn new(
|
||||
rows_per_segment: usize,
|
||||
false_positive_rate: f64,
|
||||
intermediate_provider: Arc<dyn ExternalTempFileProvider>,
|
||||
global_memory_usage: Arc<AtomicUsize>,
|
||||
global_memory_usage_threshold: Option<usize>,
|
||||
@@ -95,6 +93,7 @@ impl BloomFilterCreator {
|
||||
cur_seg_distinct_elems_mem_usage: 0,
|
||||
global_memory_usage: global_memory_usage.clone(),
|
||||
finalized_bloom_filters: FinalizedBloomFilterStorage::new(
|
||||
false_positive_rate,
|
||||
intermediate_provider,
|
||||
global_memory_usage,
|
||||
global_memory_usage_threshold,
|
||||
@@ -263,6 +262,7 @@ mod tests {
|
||||
let mut writer = Cursor::new(Vec::new());
|
||||
let mut creator = BloomFilterCreator::new(
|
||||
2,
|
||||
0.01,
|
||||
Arc::new(MockExternalTempFileProvider::new()),
|
||||
Arc::new(AtomicUsize::new(0)),
|
||||
None,
|
||||
@@ -337,6 +337,7 @@ mod tests {
|
||||
let mut writer = Cursor::new(Vec::new());
|
||||
let mut creator: BloomFilterCreator = BloomFilterCreator::new(
|
||||
2,
|
||||
0.01,
|
||||
Arc::new(MockExternalTempFileProvider::new()),
|
||||
Arc::new(AtomicUsize::new(0)),
|
||||
None,
|
||||
@@ -418,6 +419,7 @@ mod tests {
|
||||
let mut writer = Cursor::new(Vec::new());
|
||||
let mut creator = BloomFilterCreator::new(
|
||||
2,
|
||||
0.01,
|
||||
Arc::new(MockExternalTempFileProvider::new()),
|
||||
Arc::new(AtomicUsize::new(0)),
|
||||
None,
|
||||
|
||||
@@ -23,7 +23,7 @@ use futures::{stream, AsyncWriteExt, Stream};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::bloom_filter::creator::intermediate_codec::IntermediateBloomFilterCodecV1;
|
||||
use crate::bloom_filter::creator::{FALSE_POSITIVE_RATE, SEED};
|
||||
use crate::bloom_filter::creator::SEED;
|
||||
use crate::bloom_filter::error::{IntermediateSnafu, IoSnafu, Result};
|
||||
use crate::external_provider::ExternalTempFileProvider;
|
||||
use crate::Bytes;
|
||||
@@ -33,6 +33,9 @@ const MIN_MEMORY_USAGE_THRESHOLD: usize = 1024 * 1024; // 1MB
|
||||
|
||||
/// Storage for finalized Bloom filters.
|
||||
pub struct FinalizedBloomFilterStorage {
|
||||
/// The false positive rate of the Bloom filter.
|
||||
false_positive_rate: f64,
|
||||
|
||||
/// Indices of the segments in the sequence of finalized Bloom filters.
|
||||
segment_indices: Vec<usize>,
|
||||
|
||||
@@ -65,12 +68,14 @@ pub struct FinalizedBloomFilterStorage {
|
||||
impl FinalizedBloomFilterStorage {
|
||||
/// Creates a new `FinalizedBloomFilterStorage`.
|
||||
pub fn new(
|
||||
false_positive_rate: f64,
|
||||
intermediate_provider: Arc<dyn ExternalTempFileProvider>,
|
||||
global_memory_usage: Arc<AtomicUsize>,
|
||||
global_memory_usage_threshold: Option<usize>,
|
||||
) -> Self {
|
||||
let external_prefix = format!("intm-bloom-filters-{}", uuid::Uuid::new_v4());
|
||||
Self {
|
||||
false_positive_rate,
|
||||
segment_indices: Vec::new(),
|
||||
in_memory: Vec::new(),
|
||||
intermediate_file_id_counter: 0,
|
||||
@@ -96,7 +101,7 @@ impl FinalizedBloomFilterStorage {
|
||||
elems: impl IntoIterator<Item = Bytes>,
|
||||
element_count: usize,
|
||||
) -> Result<()> {
|
||||
let mut bf = BloomFilter::with_false_pos(FALSE_POSITIVE_RATE)
|
||||
let mut bf = BloomFilter::with_false_pos(self.false_positive_rate)
|
||||
.seed(&SEED)
|
||||
.expected_items(element_count);
|
||||
for elem in elems.into_iter() {
|
||||
@@ -284,6 +289,7 @@ mod tests {
|
||||
let global_memory_usage_threshold = Some(1024 * 1024); // 1MB
|
||||
let provider = Arc::new(mock_provider);
|
||||
let mut storage = FinalizedBloomFilterStorage::new(
|
||||
0.01,
|
||||
provider,
|
||||
global_memory_usage.clone(),
|
||||
global_memory_usage_threshold,
|
||||
@@ -340,6 +346,7 @@ mod tests {
|
||||
let global_memory_usage_threshold = Some(1024 * 1024); // 1MB
|
||||
let provider = Arc::new(mock_provider);
|
||||
let mut storage = FinalizedBloomFilterStorage::new(
|
||||
0.01,
|
||||
provider,
|
||||
global_memory_usage.clone(),
|
||||
global_memory_usage_threshold,
|
||||
|
||||
@@ -222,6 +222,7 @@ mod tests {
|
||||
let mut writer = Cursor::new(vec![]);
|
||||
let mut creator = BloomFilterCreator::new(
|
||||
2,
|
||||
0.01,
|
||||
Arc::new(MockExternalTempFileProvider::new()),
|
||||
Arc::new(AtomicUsize::new(0)),
|
||||
None,
|
||||
|
||||
@@ -45,6 +45,7 @@ impl BloomFilterFulltextIndexCreator {
|
||||
pub fn new(
|
||||
config: Config,
|
||||
rows_per_segment: usize,
|
||||
false_positive_rate: f64,
|
||||
intermediate_provider: Arc<dyn ExternalTempFileProvider>,
|
||||
global_memory_usage: Arc<AtomicUsize>,
|
||||
global_memory_usage_threshold: Option<usize>,
|
||||
@@ -57,6 +58,7 @@ impl BloomFilterFulltextIndexCreator {
|
||||
|
||||
let inner = BloomFilterCreator::new(
|
||||
rows_per_segment,
|
||||
false_positive_rate,
|
||||
intermediate_provider,
|
||||
global_memory_usage,
|
||||
global_memory_usage_threshold,
|
||||
|
||||
@@ -158,7 +158,7 @@ impl RaftEngineLogStore {
|
||||
.context(StartWalTaskSnafu { name: "sync_task" })
|
||||
}
|
||||
|
||||
fn span(&self, provider: &RaftEngineProvider) -> (Option<u64>, Option<u64>) {
|
||||
pub fn span(&self, provider: &RaftEngineProvider) -> (Option<u64>, Option<u64>) {
|
||||
(
|
||||
self.engine.first_index(provider.id),
|
||||
self.engine.last_index(provider.id),
|
||||
|
||||
@@ -25,7 +25,7 @@ use std::fmt::Debug;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::{ProcedureDetailResponse, Role};
|
||||
pub use ask_leader::AskLeader;
|
||||
pub use ask_leader::{AskLeader, LeaderProvider, LeaderProviderRef};
|
||||
use cluster::Client as ClusterClient;
|
||||
pub use cluster::ClusterKvBackend;
|
||||
use common_error::ext::BoxedError;
|
||||
@@ -247,6 +247,8 @@ pub trait RegionFollowerClient: Sync + Send + Debug {
|
||||
async fn remove_region_follower(&self, request: RemoveRegionFollowerRequest) -> Result<()>;
|
||||
|
||||
async fn start(&self, urls: &[&str]) -> Result<()>;
|
||||
|
||||
async fn start_with(&self, leader_provider: LeaderProviderRef) -> Result<()>;
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -469,6 +471,43 @@ impl MetaClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Start the client with a [LeaderProvider] and other Metasrv peers' addresses.
|
||||
pub(crate) async fn start_with<U, A>(
|
||||
&mut self,
|
||||
leader_provider: LeaderProviderRef,
|
||||
peers: A,
|
||||
) -> Result<()>
|
||||
where
|
||||
U: AsRef<str>,
|
||||
A: AsRef<[U]> + Clone,
|
||||
{
|
||||
if let Some(client) = &self.region_follower {
|
||||
info!("Starting region follower client ...");
|
||||
client.start_with(leader_provider.clone()).await?;
|
||||
}
|
||||
|
||||
if let Some(client) = &self.heartbeat {
|
||||
info!("Starting heartbeat client ...");
|
||||
client.start_with(leader_provider.clone()).await?;
|
||||
}
|
||||
|
||||
if let Some(client) = &mut self.store {
|
||||
info!("Starting store client ...");
|
||||
client.start(peers.clone()).await?;
|
||||
}
|
||||
|
||||
if let Some(client) = &self.procedure {
|
||||
info!("Starting procedure client ...");
|
||||
client.start_with(leader_provider.clone()).await?;
|
||||
}
|
||||
|
||||
if let Some(client) = &mut self.cluster {
|
||||
info!("Starting cluster client ...");
|
||||
client.start_with(leader_provider).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Ask the leader address of `metasrv`, and the heartbeat component
|
||||
/// needs to create a bidirectional streaming to the leader.
|
||||
pub async fn ask_leader(&self) -> Result<String> {
|
||||
|
||||
@@ -12,11 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::heartbeat_client::HeartbeatClient;
|
||||
use api::v1::meta::{AskLeaderRequest, RequestHeader, Role};
|
||||
use async_trait::async_trait;
|
||||
use common_grpc::channel_manager::ChannelManager;
|
||||
use common_meta::distributed_time_constants::META_KEEP_ALIVE_INTERVAL_SECS;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
@@ -30,6 +32,19 @@ use crate::client::Id;
|
||||
use crate::error;
|
||||
use crate::error::Result;
|
||||
|
||||
pub type LeaderProviderRef = Arc<dyn LeaderProvider>;
|
||||
|
||||
/// Provide [MetaClient] a Metasrv leader's address.
|
||||
#[async_trait]
|
||||
pub trait LeaderProvider: Debug + Send + Sync {
|
||||
/// Get the leader of the Metasrv. If it returns `None`, or the leader is outdated,
|
||||
/// you can use `ask_leader` to find a new one.
|
||||
fn leader(&self) -> Option<String>;
|
||||
|
||||
/// Find the current leader of the Metasrv.
|
||||
async fn ask_leader(&self) -> Result<String>;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct LeadershipGroup {
|
||||
leader: Option<String>,
|
||||
@@ -155,3 +170,14 @@ impl AskLeader {
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl LeaderProvider for AskLeader {
|
||||
fn leader(&self) -> Option<String> {
|
||||
self.get_leader()
|
||||
}
|
||||
|
||||
async fn ask_leader(&self) -> Result<String> {
|
||||
self.ask_leader().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ use tonic::transport::Channel;
|
||||
use tonic::Status;
|
||||
|
||||
use crate::client::ask_leader::AskLeader;
|
||||
use crate::client::{util, Id};
|
||||
use crate::client::{util, Id, LeaderProviderRef};
|
||||
use crate::error::{
|
||||
ConvertMetaResponseSnafu, CreateChannelSnafu, Error, IllegalGrpcClientStateSnafu,
|
||||
ReadOnlyKvBackendSnafu, Result, RetryTimesExceededSnafu,
|
||||
@@ -55,7 +55,7 @@ impl Client {
|
||||
id,
|
||||
role,
|
||||
channel_manager,
|
||||
ask_leader: None,
|
||||
leader_provider: None,
|
||||
max_retry,
|
||||
}));
|
||||
|
||||
@@ -68,7 +68,13 @@ impl Client {
|
||||
A: AsRef<[U]>,
|
||||
{
|
||||
let mut inner = self.inner.write().await;
|
||||
inner.start(urls).await
|
||||
inner.start(urls)
|
||||
}
|
||||
|
||||
/// Start the client with a [LeaderProvider].
|
||||
pub(crate) async fn start_with(&self, leader_provider: LeaderProviderRef) -> Result<()> {
|
||||
let mut inner = self.inner.write().await;
|
||||
inner.start_with(leader_provider)
|
||||
}
|
||||
|
||||
pub async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
|
||||
@@ -144,37 +150,40 @@ struct Inner {
|
||||
id: Id,
|
||||
role: Role,
|
||||
channel_manager: ChannelManager,
|
||||
ask_leader: Option<AskLeader>,
|
||||
leader_provider: Option<LeaderProviderRef>,
|
||||
max_retry: usize,
|
||||
}
|
||||
|
||||
impl Inner {
|
||||
async fn start<U, A>(&mut self, urls: A) -> Result<()>
|
||||
where
|
||||
U: AsRef<str>,
|
||||
A: AsRef<[U]>,
|
||||
{
|
||||
fn start_with(&mut self, leader_provider: LeaderProviderRef) -> Result<()> {
|
||||
ensure!(
|
||||
!self.is_started(),
|
||||
IllegalGrpcClientStateSnafu {
|
||||
err_msg: "Cluster client already started",
|
||||
}
|
||||
);
|
||||
self.leader_provider = Some(leader_provider);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn start<U, A>(&mut self, urls: A) -> Result<()>
|
||||
where
|
||||
U: AsRef<str>,
|
||||
A: AsRef<[U]>,
|
||||
{
|
||||
let peers = urls
|
||||
.as_ref()
|
||||
.iter()
|
||||
.map(|url| url.as_ref().to_string())
|
||||
.collect::<Vec<_>>();
|
||||
self.ask_leader = Some(AskLeader::new(
|
||||
let ask_leader = AskLeader::new(
|
||||
self.id,
|
||||
self.role,
|
||||
peers,
|
||||
self.channel_manager.clone(),
|
||||
self.max_retry,
|
||||
));
|
||||
|
||||
Ok(())
|
||||
);
|
||||
self.start_with(Arc::new(ask_leader))
|
||||
}
|
||||
|
||||
fn make_client(&self, addr: impl AsRef<str>) -> Result<ClusterClient<Channel>> {
|
||||
@@ -188,18 +197,7 @@ impl Inner {
|
||||
|
||||
#[inline]
|
||||
fn is_started(&self) -> bool {
|
||||
self.ask_leader.is_some()
|
||||
}
|
||||
|
||||
fn ask_leader(&self) -> Result<&AskLeader> {
|
||||
ensure!(
|
||||
self.is_started(),
|
||||
IllegalGrpcClientStateSnafu {
|
||||
err_msg: "Cluster client not start"
|
||||
}
|
||||
);
|
||||
|
||||
Ok(self.ask_leader.as_ref().unwrap())
|
||||
self.leader_provider.is_some()
|
||||
}
|
||||
|
||||
async fn with_retry<T, F, R, H>(&self, task: &str, body_fn: F, get_header: H) -> Result<T>
|
||||
@@ -208,19 +206,25 @@ impl Inner {
|
||||
F: Fn(ClusterClient<Channel>) -> R,
|
||||
H: Fn(&T) -> &Option<ResponseHeader>,
|
||||
{
|
||||
let ask_leader = self.ask_leader()?;
|
||||
let Some(leader_provider) = self.leader_provider.as_ref() else {
|
||||
return IllegalGrpcClientStateSnafu {
|
||||
err_msg: "not started",
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
|
||||
let mut times = 0;
|
||||
let mut last_error = None;
|
||||
|
||||
while times < self.max_retry {
|
||||
if let Some(leader) = &ask_leader.get_leader() {
|
||||
if let Some(leader) = &leader_provider.leader() {
|
||||
let client = self.make_client(leader)?;
|
||||
match body_fn(client).await {
|
||||
Ok(res) => {
|
||||
if util::is_not_leader(get_header(&res)) {
|
||||
last_error = Some(format!("{leader} is not a leader"));
|
||||
warn!("Failed to {task} to {leader}, not a leader");
|
||||
let leader = ask_leader.ask_leader().await?;
|
||||
let leader = leader_provider.ask_leader().await?;
|
||||
info!("Cluster client updated to new leader addr: {leader}");
|
||||
times += 1;
|
||||
continue;
|
||||
@@ -232,7 +236,7 @@ impl Inner {
|
||||
if util::is_unreachable(&status) {
|
||||
last_error = Some(status.to_string());
|
||||
warn!("Failed to {task} to {leader}, source: {status}");
|
||||
let leader = ask_leader.ask_leader().await?;
|
||||
let leader = leader_provider.ask_leader().await?;
|
||||
info!("Cluster client updated to new leader addr: {leader}");
|
||||
times += 1;
|
||||
continue;
|
||||
@@ -242,7 +246,7 @@ impl Inner {
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if let Err(err) = ask_leader.ask_leader().await {
|
||||
} else if let Err(err) = leader_provider.ask_leader().await {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ use tonic::transport::Channel;
|
||||
use tonic::Streaming;
|
||||
|
||||
use crate::client::ask_leader::AskLeader;
|
||||
use crate::client::Id;
|
||||
use crate::client::{Id, LeaderProviderRef};
|
||||
use crate::error;
|
||||
use crate::error::{InvalidResponseHeaderSnafu, Result};
|
||||
|
||||
@@ -116,7 +116,13 @@ impl Client {
|
||||
A: AsRef<[U]>,
|
||||
{
|
||||
let mut inner = self.inner.write().await;
|
||||
inner.start(urls).await
|
||||
inner.start(urls)
|
||||
}
|
||||
|
||||
/// Start the client with a [LeaderProvider].
|
||||
pub(crate) async fn start_with(&self, leader_provider: LeaderProviderRef) -> Result<()> {
|
||||
let mut inner = self.inner.write().await;
|
||||
inner.start_with(leader_provider)
|
||||
}
|
||||
|
||||
pub async fn ask_leader(&mut self) -> Result<String> {
|
||||
@@ -136,7 +142,7 @@ struct Inner {
|
||||
id: Id,
|
||||
role: Role,
|
||||
channel_manager: ChannelManager,
|
||||
ask_leader: Option<AskLeader>,
|
||||
leader_provider: Option<LeaderProviderRef>,
|
||||
max_retry: usize,
|
||||
}
|
||||
|
||||
@@ -146,48 +152,50 @@ impl Inner {
|
||||
id,
|
||||
role,
|
||||
channel_manager,
|
||||
ask_leader: None,
|
||||
leader_provider: None,
|
||||
max_retry,
|
||||
}
|
||||
}
|
||||
|
||||
async fn start<U, A>(&mut self, urls: A) -> Result<()>
|
||||
where
|
||||
U: AsRef<str>,
|
||||
A: AsRef<[U]>,
|
||||
{
|
||||
fn start_with(&mut self, leader_provider: LeaderProviderRef) -> Result<()> {
|
||||
ensure!(
|
||||
!self.is_started(),
|
||||
error::IllegalGrpcClientStateSnafu {
|
||||
err_msg: "Heartbeat client already started"
|
||||
}
|
||||
);
|
||||
self.leader_provider = Some(leader_provider);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn start<U, A>(&mut self, urls: A) -> Result<()>
|
||||
where
|
||||
U: AsRef<str>,
|
||||
A: AsRef<[U]>,
|
||||
{
|
||||
let peers = urls
|
||||
.as_ref()
|
||||
.iter()
|
||||
.map(|url| url.as_ref().to_string())
|
||||
.collect::<Vec<_>>();
|
||||
self.ask_leader = Some(AskLeader::new(
|
||||
let ask_leader = AskLeader::new(
|
||||
self.id,
|
||||
self.role,
|
||||
peers,
|
||||
self.channel_manager.clone(),
|
||||
self.max_retry,
|
||||
));
|
||||
|
||||
Ok(())
|
||||
);
|
||||
self.start_with(Arc::new(ask_leader))
|
||||
}
|
||||
|
||||
async fn ask_leader(&self) -> Result<String> {
|
||||
ensure!(
|
||||
self.is_started(),
|
||||
error::IllegalGrpcClientStateSnafu {
|
||||
err_msg: "Heartbeat client not start"
|
||||
let Some(leader_provider) = self.leader_provider.as_ref() else {
|
||||
return error::IllegalGrpcClientStateSnafu {
|
||||
err_msg: "not started",
|
||||
}
|
||||
);
|
||||
|
||||
self.ask_leader.as_ref().unwrap().ask_leader().await
|
||||
.fail();
|
||||
};
|
||||
leader_provider.ask_leader().await
|
||||
}
|
||||
|
||||
async fn heartbeat(&self) -> Result<(HeartbeatSender, HeartbeatStream)> {
|
||||
@@ -199,10 +207,10 @@ impl Inner {
|
||||
);
|
||||
|
||||
let leader_addr = self
|
||||
.ask_leader
|
||||
.leader_provider
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.get_leader()
|
||||
.leader()
|
||||
.context(error::NoLeaderSnafu)?;
|
||||
let mut leader = self.make_client(&leader_addr)?;
|
||||
|
||||
@@ -262,7 +270,7 @@ impl Inner {
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn is_started(&self) -> bool {
|
||||
self.ask_leader.is_some()
|
||||
self.leader_provider.is_some()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ use tonic::transport::Channel;
|
||||
use tonic::Status;
|
||||
|
||||
use crate::client::ask_leader::AskLeader;
|
||||
use crate::client::{util, Id};
|
||||
use crate::client::{util, Id, LeaderProviderRef};
|
||||
use crate::error;
|
||||
use crate::error::Result;
|
||||
|
||||
@@ -47,7 +47,7 @@ impl Client {
|
||||
id,
|
||||
role,
|
||||
channel_manager,
|
||||
ask_leader: None,
|
||||
leader_provider: None,
|
||||
max_retry,
|
||||
}));
|
||||
|
||||
@@ -60,7 +60,13 @@ impl Client {
|
||||
A: AsRef<[U]>,
|
||||
{
|
||||
let mut inner = self.inner.write().await;
|
||||
inner.start(urls).await
|
||||
inner.start(urls)
|
||||
}
|
||||
|
||||
/// Start the client with a [LeaderProvider].
|
||||
pub(crate) async fn start_with(&self, leader_provider: LeaderProviderRef) -> Result<()> {
|
||||
let mut inner = self.inner.write().await;
|
||||
inner.start_with(leader_provider)
|
||||
}
|
||||
|
||||
pub async fn submit_ddl_task(&self, req: DdlTaskRequest) -> Result<DdlTaskResponse> {
|
||||
@@ -103,37 +109,40 @@ struct Inner {
|
||||
id: Id,
|
||||
role: Role,
|
||||
channel_manager: ChannelManager,
|
||||
ask_leader: Option<AskLeader>,
|
||||
leader_provider: Option<LeaderProviderRef>,
|
||||
max_retry: usize,
|
||||
}
|
||||
|
||||
impl Inner {
|
||||
async fn start<U, A>(&mut self, urls: A) -> Result<()>
|
||||
where
|
||||
U: AsRef<str>,
|
||||
A: AsRef<[U]>,
|
||||
{
|
||||
fn start_with(&mut self, leader_provider: LeaderProviderRef) -> Result<()> {
|
||||
ensure!(
|
||||
!self.is_started(),
|
||||
error::IllegalGrpcClientStateSnafu {
|
||||
err_msg: "DDL client already started",
|
||||
}
|
||||
);
|
||||
self.leader_provider = Some(leader_provider);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn start<U, A>(&mut self, urls: A) -> Result<()>
|
||||
where
|
||||
U: AsRef<str>,
|
||||
A: AsRef<[U]>,
|
||||
{
|
||||
let peers = urls
|
||||
.as_ref()
|
||||
.iter()
|
||||
.map(|url| url.as_ref().to_string())
|
||||
.collect::<Vec<_>>();
|
||||
self.ask_leader = Some(AskLeader::new(
|
||||
let ask_leader = AskLeader::new(
|
||||
self.id,
|
||||
self.role,
|
||||
peers,
|
||||
self.channel_manager.clone(),
|
||||
self.max_retry,
|
||||
));
|
||||
|
||||
Ok(())
|
||||
);
|
||||
self.start_with(Arc::new(ask_leader))
|
||||
}
|
||||
|
||||
fn make_client(&self, addr: impl AsRef<str>) -> Result<ProcedureServiceClient<Channel>> {
|
||||
@@ -150,18 +159,7 @@ impl Inner {
|
||||
|
||||
#[inline]
|
||||
fn is_started(&self) -> bool {
|
||||
self.ask_leader.is_some()
|
||||
}
|
||||
|
||||
fn ask_leader(&self) -> Result<&AskLeader> {
|
||||
ensure!(
|
||||
self.is_started(),
|
||||
error::IllegalGrpcClientStateSnafu {
|
||||
err_msg: "DDL client not start"
|
||||
}
|
||||
);
|
||||
|
||||
Ok(self.ask_leader.as_ref().unwrap())
|
||||
self.leader_provider.is_some()
|
||||
}
|
||||
|
||||
async fn with_retry<T, F, R, H>(&self, task: &str, body_fn: F, get_header: H) -> Result<T>
|
||||
@@ -170,19 +168,25 @@ impl Inner {
|
||||
F: Fn(ProcedureServiceClient<Channel>) -> R,
|
||||
H: Fn(&T) -> &Option<ResponseHeader>,
|
||||
{
|
||||
let ask_leader = self.ask_leader()?;
|
||||
let Some(leader_provider) = self.leader_provider.as_ref() else {
|
||||
return error::IllegalGrpcClientStateSnafu {
|
||||
err_msg: "not started",
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
|
||||
let mut times = 0;
|
||||
let mut last_error = None;
|
||||
|
||||
while times < self.max_retry {
|
||||
if let Some(leader) = &ask_leader.get_leader() {
|
||||
if let Some(leader) = &leader_provider.leader() {
|
||||
let client = self.make_client(leader)?;
|
||||
match body_fn(client).await {
|
||||
Ok(res) => {
|
||||
if util::is_not_leader(get_header(&res)) {
|
||||
last_error = Some(format!("{leader} is not a leader"));
|
||||
warn!("Failed to {task} to {leader}, not a leader");
|
||||
let leader = ask_leader.ask_leader().await?;
|
||||
let leader = leader_provider.ask_leader().await?;
|
||||
info!("DDL client updated to new leader addr: {leader}");
|
||||
times += 1;
|
||||
continue;
|
||||
@@ -194,7 +198,7 @@ impl Inner {
|
||||
if util::is_unreachable(&status) {
|
||||
last_error = Some(status.to_string());
|
||||
warn!("Failed to {task} to {leader}, source: {status}");
|
||||
let leader = ask_leader.ask_leader().await?;
|
||||
let leader = leader_provider.ask_leader().await?;
|
||||
info!("Procedure client updated to new leader addr: {leader}");
|
||||
times += 1;
|
||||
continue;
|
||||
@@ -204,7 +208,7 @@ impl Inner {
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if let Err(err) = ask_leader.ask_leader().await {
|
||||
} else if let Err(err) = leader_provider.ask_leader().await {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
use common_telemetry::{debug, info};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::client::MetaClientBuilder;
|
||||
use crate::client::{LeaderProviderRef, MetaClientBuilder};
|
||||
|
||||
pub mod client;
|
||||
pub mod error;
|
||||
@@ -76,6 +76,7 @@ pub async fn create_meta_client(
|
||||
client_type: MetaClientType,
|
||||
meta_client_options: &MetaClientOptions,
|
||||
plugins: Option<&Plugins>,
|
||||
leader_provider: Option<LeaderProviderRef>,
|
||||
) -> error::Result<MetaClientRef> {
|
||||
info!(
|
||||
"Creating {:?} instance with Metasrv addrs {:?}",
|
||||
@@ -116,9 +117,15 @@ pub async fn create_meta_client(
|
||||
|
||||
let mut meta_client = builder.build();
|
||||
|
||||
meta_client
|
||||
.start(&meta_client_options.metasrv_addrs)
|
||||
.await?;
|
||||
if let Some(leader_provider) = leader_provider {
|
||||
meta_client
|
||||
.start_with(leader_provider, &meta_client_options.metasrv_addrs)
|
||||
.await?;
|
||||
} else {
|
||||
meta_client
|
||||
.start(&meta_client_options.metasrv_addrs)
|
||||
.await?;
|
||||
}
|
||||
|
||||
meta_client.ask_leader().await?;
|
||||
|
||||
|
||||
@@ -46,9 +46,7 @@ use snafu::ResultExt;
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
use sqlx::mysql::MySqlConnectOptions;
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
use sqlx::mysql::{MySqlConnection, MySqlPool};
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
use sqlx::Connection;
|
||||
use sqlx::mysql::MySqlPool;
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::sync::mpsc::{self, Receiver, Sender};
|
||||
use tokio::sync::{oneshot, Mutex};
|
||||
@@ -278,6 +276,7 @@ pub async fn metasrv_builder(
|
||||
let candidate_lease_ttl = Duration::from_secs(CANDIDATE_LEASE_SECS);
|
||||
let execution_timeout = Duration::from_secs(META_LEASE_SECS);
|
||||
let statement_timeout = Duration::from_secs(META_LEASE_SECS);
|
||||
let idle_session_timeout = Duration::from_secs(META_LEASE_SECS);
|
||||
let meta_lease_ttl = Duration::from_secs(META_LEASE_SECS);
|
||||
|
||||
let mut cfg = Config::new();
|
||||
@@ -286,8 +285,12 @@ pub async fn metasrv_builder(
|
||||
// We use a separate pool for election since we need a different session keep-alive idle time.
|
||||
let pool = create_postgres_pool_with(&opts.store_addrs, cfg).await?;
|
||||
|
||||
let election_client =
|
||||
ElectionPgClient::new(pool, execution_timeout, meta_lease_ttl, statement_timeout)?;
|
||||
let election_client = ElectionPgClient::new(
|
||||
pool,
|
||||
execution_timeout,
|
||||
idle_session_timeout,
|
||||
statement_timeout,
|
||||
)?;
|
||||
let election = PgElection::with_pg_client(
|
||||
opts.grpc.server_addr.clone(),
|
||||
election_client,
|
||||
@@ -308,6 +311,10 @@ pub async fn metasrv_builder(
|
||||
}
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
(None, BackendImpl::MysqlStore) => {
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::election::rds::mysql::ElectionMysqlClient;
|
||||
|
||||
let pool = create_mysql_pool(&opts.store_addrs).await?;
|
||||
let kv_backend =
|
||||
MySqlStore::with_mysql_pool(pool, &opts.meta_table_name, opts.max_txn_ops)
|
||||
@@ -315,13 +322,29 @@ pub async fn metasrv_builder(
|
||||
.context(error::KvBackendSnafu)?;
|
||||
// Since election will acquire a lock of the table, we need a separate table for election.
|
||||
let election_table_name = opts.meta_table_name.clone() + "_election";
|
||||
let election_client = create_mysql_client(opts).await?;
|
||||
// We use a separate pool for election since we need a different session keep-alive idle time.
|
||||
let pool = create_mysql_pool(&opts.store_addrs).await?;
|
||||
let execution_timeout = Duration::from_secs(META_LEASE_SECS);
|
||||
let statement_timeout = Duration::from_secs(META_LEASE_SECS);
|
||||
let idle_session_timeout = Duration::from_secs(META_LEASE_SECS);
|
||||
let innode_lock_wait_timeout = Duration::from_secs(META_LEASE_SECS / 2);
|
||||
let meta_lease_ttl = Duration::from_secs(META_LEASE_SECS);
|
||||
let candidate_lease_ttl = Duration::from_secs(CANDIDATE_LEASE_SECS);
|
||||
|
||||
let election_client = ElectionMysqlClient::new(
|
||||
pool,
|
||||
execution_timeout,
|
||||
statement_timeout,
|
||||
innode_lock_wait_timeout,
|
||||
idle_session_timeout,
|
||||
&election_table_name,
|
||||
);
|
||||
let election = MySqlElection::with_mysql_client(
|
||||
opts.grpc.server_addr.clone(),
|
||||
election_client,
|
||||
opts.store_key_prefix.clone(),
|
||||
CANDIDATE_LEASE_SECS,
|
||||
META_LEASE_SECS,
|
||||
candidate_lease_ttl,
|
||||
meta_lease_ttl,
|
||||
&election_table_name,
|
||||
)
|
||||
.await?;
|
||||
@@ -438,14 +461,6 @@ pub async fn create_mysql_pool(store_addrs: &[String]) -> Result<MySqlPool> {
|
||||
let pool = MySqlPool::connect_with(opts)
|
||||
.await
|
||||
.context(error::CreateMySqlPoolSnafu)?;
|
||||
|
||||
Ok(pool)
|
||||
}
|
||||
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
async fn create_mysql_client(opts: &MetasrvOptions) -> Result<MySqlConnection> {
|
||||
let opts = setup_mysql_options(&opts.store_addrs).await?;
|
||||
let client = MySqlConnection::connect_with(&opts)
|
||||
.await
|
||||
.context(error::ConnectMySqlSnafu)?;
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -382,6 +382,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to decode sql value"))]
|
||||
DecodeSqlValue {
|
||||
#[snafu(source)]
|
||||
error: sqlx::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to find table route for {table_id}"))]
|
||||
TableRouteNotFound {
|
||||
table_id: TableId,
|
||||
@@ -417,6 +425,18 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Leader lease expired"))]
|
||||
LeaderLeaseExpired {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Leader lease changed during election"))]
|
||||
LeaderLeaseChanged {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table {} not found", name))]
|
||||
TableNotFound {
|
||||
name: String,
|
||||
@@ -766,7 +786,7 @@ pub enum Error {
|
||||
error: deadpool::managed::PoolError<tokio_postgres::Error>,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||
#[snafu(display("Sql execution timeout, sql: {}, duration: {:?}", sql, duration))]
|
||||
SqlExecutionTimeout {
|
||||
#[snafu(implicit)]
|
||||
@@ -812,8 +832,8 @@ pub enum Error {
|
||||
},
|
||||
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
#[snafu(display("Failed to connect to mysql"))]
|
||||
ConnectMySql {
|
||||
#[snafu(display("Failed to acquire mysql client from pool"))]
|
||||
AcquireMySqlClient {
|
||||
#[snafu(source)]
|
||||
error: sqlx::Error,
|
||||
#[snafu(implicit)]
|
||||
@@ -911,6 +931,8 @@ impl ErrorExt for Error {
|
||||
| Error::SerializeToJson { .. }
|
||||
| Error::DeserializeFromJson { .. }
|
||||
| Error::NoLeader { .. }
|
||||
| Error::LeaderLeaseExpired { .. }
|
||||
| Error::LeaderLeaseChanged { .. }
|
||||
| Error::CreateChannel { .. }
|
||||
| Error::BatchGet { .. }
|
||||
| Error::Range { .. }
|
||||
@@ -928,7 +950,6 @@ impl ErrorExt for Error {
|
||||
| Error::RetryLater { .. }
|
||||
| Error::RetryLaterWithSource { .. }
|
||||
| Error::StartGrpc { .. }
|
||||
| Error::NoEnoughAvailableNode { .. }
|
||||
| Error::PublishMessage { .. }
|
||||
| Error::Join { .. }
|
||||
| Error::PeerUnavailable { .. }
|
||||
@@ -1013,17 +1034,21 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::Other { source, .. } => source.status_code(),
|
||||
Error::LookupPeer { source, .. } => source.status_code(),
|
||||
Error::NoEnoughAvailableNode { .. } => StatusCode::RuntimeResourcesExhausted,
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
Error::CreatePostgresPool { .. }
|
||||
| Error::GetPostgresClient { .. }
|
||||
| Error::GetPostgresConnection { .. }
|
||||
| Error::PostgresExecution { .. }
|
||||
| Error::SqlExecutionTimeout { .. } => StatusCode::Internal,
|
||||
| Error::PostgresExecution { .. } => StatusCode::Internal,
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
Error::MySqlExecution { .. }
|
||||
| Error::CreateMySqlPool { .. }
|
||||
| Error::ConnectMySql { .. }
|
||||
| Error::ParseMySqlUrl { .. } => StatusCode::Internal,
|
||||
| Error::ParseMySqlUrl { .. }
|
||||
| Error::DecodeSqlValue { .. }
|
||||
| Error::AcquireMySqlClient { .. } => StatusCode::Internal,
|
||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||
Error::SqlExecutionTimeout { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -417,6 +417,7 @@ pub struct Metasrv {
|
||||
meta_peer_client: MetaPeerClientRef,
|
||||
// The selector is used to select a target datanode.
|
||||
selector: SelectorRef,
|
||||
selector_ctx: SelectorContext,
|
||||
// The flow selector is used to select a target flownode.
|
||||
flow_selector: SelectorRef,
|
||||
handler_group: RwLock<Option<HeartbeatHandlerGroupRef>>,
|
||||
@@ -654,6 +655,10 @@ impl Metasrv {
|
||||
&self.selector
|
||||
}
|
||||
|
||||
pub fn selector_ctx(&self) -> &SelectorContext {
|
||||
&self.selector_ctx
|
||||
}
|
||||
|
||||
pub fn flow_selector(&self) -> &SelectorRef {
|
||||
&self.flow_selector
|
||||
}
|
||||
|
||||
@@ -353,30 +353,28 @@ impl MetasrvBuilder {
|
||||
|
||||
let leader_region_registry = Arc::new(LeaderRegionRegistry::default());
|
||||
|
||||
let ddl_context = DdlContext {
|
||||
node_manager,
|
||||
cache_invalidator: cache_invalidator.clone(),
|
||||
memory_region_keeper: memory_region_keeper.clone(),
|
||||
leader_region_registry: leader_region_registry.clone(),
|
||||
table_metadata_manager: table_metadata_manager.clone(),
|
||||
table_metadata_allocator: table_metadata_allocator.clone(),
|
||||
flow_metadata_manager: flow_metadata_manager.clone(),
|
||||
flow_metadata_allocator: flow_metadata_allocator.clone(),
|
||||
region_failure_detector_controller,
|
||||
};
|
||||
let procedure_manager_c = procedure_manager.clone();
|
||||
let ddl_manager = DdlManager::try_new(ddl_context, procedure_manager_c, true)
|
||||
.context(error::InitDdlManagerSnafu)?;
|
||||
#[cfg(feature = "enterprise")]
|
||||
let trigger_ddl_manager = plugins
|
||||
.as_ref()
|
||||
.and_then(|plugins| plugins.get::<common_meta::ddl_manager::TriggerDdlManagerRef>());
|
||||
let ddl_manager = Arc::new(
|
||||
DdlManager::try_new(
|
||||
DdlContext {
|
||||
node_manager,
|
||||
cache_invalidator: cache_invalidator.clone(),
|
||||
memory_region_keeper: memory_region_keeper.clone(),
|
||||
leader_region_registry: leader_region_registry.clone(),
|
||||
table_metadata_manager: table_metadata_manager.clone(),
|
||||
table_metadata_allocator: table_metadata_allocator.clone(),
|
||||
flow_metadata_manager: flow_metadata_manager.clone(),
|
||||
flow_metadata_allocator: flow_metadata_allocator.clone(),
|
||||
region_failure_detector_controller,
|
||||
},
|
||||
procedure_manager.clone(),
|
||||
true,
|
||||
#[cfg(feature = "enterprise")]
|
||||
trigger_ddl_manager,
|
||||
)
|
||||
.context(error::InitDdlManagerSnafu)?,
|
||||
);
|
||||
let ddl_manager = {
|
||||
let trigger_ddl_manager = plugins.as_ref().and_then(|plugins| {
|
||||
plugins.get::<common_meta::ddl_manager::TriggerDdlManagerRef>()
|
||||
});
|
||||
ddl_manager.with_trigger_ddl_manager(trigger_ddl_manager)
|
||||
};
|
||||
let ddl_manager = Arc::new(ddl_manager);
|
||||
|
||||
// remote WAL prune ticker and manager
|
||||
let wal_prune_ticker = if is_remote_wal && options.wal.enable_active_wal_pruning() {
|
||||
@@ -452,6 +450,7 @@ impl MetasrvBuilder {
|
||||
leader_cached_kv_backend,
|
||||
meta_peer_client: meta_peer_client.clone(),
|
||||
selector,
|
||||
selector_ctx,
|
||||
// TODO(jeremy): We do not allow configuring the flow selector.
|
||||
flow_selector,
|
||||
handler_group: RwLock::new(None),
|
||||
|
||||
@@ -145,12 +145,19 @@ impl DataRegion {
|
||||
IndexOptions::Inverted => {
|
||||
c.column_schema.set_inverted_index(true);
|
||||
}
|
||||
IndexOptions::Skipping { granularity } => {
|
||||
IndexOptions::Skipping {
|
||||
granularity,
|
||||
false_positive_rate,
|
||||
} => {
|
||||
c.column_schema
|
||||
.set_skipping_options(&SkippingIndexOptions {
|
||||
granularity,
|
||||
index_type: SkippingIndexType::BloomFilter,
|
||||
})
|
||||
.set_skipping_options(
|
||||
&SkippingIndexOptions::new(
|
||||
granularity,
|
||||
false_positive_rate,
|
||||
SkippingIndexType::BloomFilter,
|
||||
)
|
||||
.context(SetSkippingIndexOptionSnafu)?,
|
||||
)
|
||||
.context(SetSkippingIndexOptionSnafu)?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,12 +79,7 @@ impl MetricEngineInner {
|
||||
.context(MitoCatchupOperationSnafu)
|
||||
.map(|response| response.affected_rows)?;
|
||||
|
||||
let primary_key_encoding = self.mito.get_primary_key_encoding(data_region_id).context(
|
||||
PhysicalRegionNotFoundSnafu {
|
||||
region_id: data_region_id,
|
||||
},
|
||||
)?;
|
||||
self.recover_states(region_id, primary_key_encoding, physical_region_options)
|
||||
self.recover_states(region_id, physical_region_options)
|
||||
.await?;
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
@@ -33,9 +33,7 @@ use store_api::metric_engine_consts::{
|
||||
METADATA_SCHEMA_TIMESTAMP_COLUMN_INDEX, METADATA_SCHEMA_TIMESTAMP_COLUMN_NAME,
|
||||
METADATA_SCHEMA_VALUE_COLUMN_INDEX, METADATA_SCHEMA_VALUE_COLUMN_NAME,
|
||||
};
|
||||
use store_api::mito_engine_options::{
|
||||
APPEND_MODE_KEY, MEMTABLE_PARTITION_TREE_PRIMARY_KEY_ENCODING, SKIP_WAL_KEY, TTL_KEY,
|
||||
};
|
||||
use store_api::mito_engine_options::{TTL_KEY, WAL_OPTIONS_KEY};
|
||||
use store_api::region_engine::RegionEngine;
|
||||
use store_api::region_request::{AffectedRows, RegionCreateRequest, RegionRequest};
|
||||
use store_api::storage::consts::ReservedColumnId;
|
||||
@@ -57,6 +55,7 @@ use crate::utils::{
|
||||
};
|
||||
|
||||
const DEFAULT_TABLE_ID_SKIPPING_INDEX_GRANULARITY: u32 = 1024;
|
||||
const DEFAULT_TABLE_ID_SKIPPING_INDEX_FALSE_POSITIVE_RATE: f64 = 0.01;
|
||||
|
||||
impl MetricEngineInner {
|
||||
pub async fn create_regions(
|
||||
@@ -475,7 +474,7 @@ impl MetricEngineInner {
|
||||
// concat region dir
|
||||
let metadata_region_dir = join_dir(&request.region_dir, METADATA_REGION_SUBDIR);
|
||||
|
||||
let options = region_options_for_metadata_region(request.options.clone());
|
||||
let options = region_options_for_metadata_region(&request.options);
|
||||
RegionCreateRequest {
|
||||
engine: MITO_ENGINE_NAME.to_string(),
|
||||
column_metadatas: vec![
|
||||
@@ -544,10 +543,11 @@ impl MetricEngineInner {
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
false,
|
||||
)
|
||||
.with_skipping_options(SkippingIndexOptions {
|
||||
granularity: DEFAULT_TABLE_ID_SKIPPING_INDEX_GRANULARITY,
|
||||
index_type: datatypes::schema::SkippingIndexType::BloomFilter,
|
||||
})
|
||||
.with_skipping_options(SkippingIndexOptions::new_unchecked(
|
||||
DEFAULT_TABLE_ID_SKIPPING_INDEX_GRANULARITY,
|
||||
DEFAULT_TABLE_ID_SKIPPING_INDEX_FALSE_POSITIVE_RATE,
|
||||
datatypes::schema::SkippingIndexType::BloomFilter,
|
||||
))
|
||||
.unwrap(),
|
||||
};
|
||||
let tsid_col = ColumnMetadata {
|
||||
@@ -599,15 +599,16 @@ fn parse_physical_region_id(request: &RegionCreateRequest) -> Result<RegionId> {
|
||||
|
||||
/// Creates the region options for metadata region in metric engine.
|
||||
pub(crate) fn region_options_for_metadata_region(
|
||||
mut original: HashMap<String, String>,
|
||||
original: &HashMap<String, String>,
|
||||
) -> HashMap<String, String> {
|
||||
// TODO(ruihang, weny): add whitelist for metric engine options.
|
||||
original.remove(APPEND_MODE_KEY);
|
||||
// Don't allow to set primary key encoding for metadata region.
|
||||
original.remove(MEMTABLE_PARTITION_TREE_PRIMARY_KEY_ENCODING);
|
||||
original.insert(TTL_KEY.to_string(), FOREVER.to_string());
|
||||
original.remove(SKIP_WAL_KEY);
|
||||
original
|
||||
let mut metadata_region_options = HashMap::new();
|
||||
metadata_region_options.insert(TTL_KEY.to_string(), FOREVER.to_string());
|
||||
|
||||
if let Some(wal_options) = original.get(WAL_OPTIONS_KEY) {
|
||||
metadata_region_options.insert(WAL_OPTIONS_KEY.to_string(), wal_options.to_string());
|
||||
}
|
||||
|
||||
metadata_region_options
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -14,14 +14,14 @@
|
||||
|
||||
//! Open a metric region.
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use api::region::RegionResponse;
|
||||
use api::v1::SemanticType;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_telemetry::info;
|
||||
use datafusion::common::HashMap;
|
||||
use mito2::engine::MITO_ENGINE_NAME;
|
||||
use object_store::util::join_dir;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::codec::PrimaryKeyEncoding;
|
||||
use store_api::metric_engine_consts::{DATA_REGION_SUBDIR, METADATA_REGION_SUBDIR};
|
||||
use store_api::region_engine::{BatchResponses, RegionEngine};
|
||||
use store_api::region_request::{AffectedRows, RegionOpenRequest, RegionRequest};
|
||||
@@ -31,7 +31,8 @@ use crate::engine::create::region_options_for_metadata_region;
|
||||
use crate::engine::options::{set_data_region_options, PhysicalRegionOptions};
|
||||
use crate::engine::MetricEngineInner;
|
||||
use crate::error::{
|
||||
BatchOpenMitoRegionSnafu, OpenMitoRegionSnafu, PhysicalRegionNotFoundSnafu, Result,
|
||||
BatchOpenMitoRegionSnafu, NoOpenRegionResultSnafu, OpenMitoRegionSnafu,
|
||||
PhysicalRegionNotFoundSnafu, Result,
|
||||
};
|
||||
use crate::metrics::{LOGICAL_REGION_COUNT, PHYSICAL_REGION_COUNT};
|
||||
use crate::utils;
|
||||
@@ -44,8 +45,7 @@ impl MetricEngineInner {
|
||||
) -> Result<BatchResponses> {
|
||||
// We need to open metadata region and data region for each request.
|
||||
let mut all_requests = Vec::with_capacity(requests.len() * 2);
|
||||
let mut physical_region_ids = Vec::with_capacity(requests.len());
|
||||
let mut data_region_ids = HashSet::with_capacity(requests.len());
|
||||
let mut physical_region_ids = HashMap::with_capacity(requests.len());
|
||||
|
||||
for (region_id, request) in requests {
|
||||
if !request.is_physical_table() {
|
||||
@@ -58,35 +58,70 @@ impl MetricEngineInner {
|
||||
self.transform_open_physical_region_request(request);
|
||||
all_requests.push((metadata_region_id, open_metadata_region_request));
|
||||
all_requests.push((data_region_id, open_data_region_request));
|
||||
physical_region_ids.push((region_id, physical_region_options));
|
||||
data_region_ids.insert(data_region_id);
|
||||
physical_region_ids.insert(region_id, physical_region_options);
|
||||
}
|
||||
|
||||
let results = self
|
||||
let mut results = self
|
||||
.mito
|
||||
.handle_batch_open_requests(parallelism, all_requests)
|
||||
.await
|
||||
.context(BatchOpenMitoRegionSnafu {})?
|
||||
.into_iter()
|
||||
.filter(|(region_id, _)| data_region_ids.contains(region_id))
|
||||
.collect::<Vec<_>>();
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let mut responses = Vec::with_capacity(physical_region_ids.len());
|
||||
for (physical_region_id, physical_region_options) in physical_region_ids {
|
||||
let primary_key_encoding = self
|
||||
.mito
|
||||
.get_primary_key_encoding(physical_region_id)
|
||||
.context(PhysicalRegionNotFoundSnafu {
|
||||
region_id: physical_region_id,
|
||||
})?;
|
||||
self.recover_states(
|
||||
physical_region_id,
|
||||
primary_key_encoding,
|
||||
physical_region_options,
|
||||
)
|
||||
.await?;
|
||||
let metadata_region_id = utils::to_metadata_region_id(physical_region_id);
|
||||
let data_region_id = utils::to_data_region_id(physical_region_id);
|
||||
let metadata_region_result = results.remove(&metadata_region_id);
|
||||
let data_region_result = results.remove(&data_region_id);
|
||||
// Pass the optional `metadata_region_result` and `data_region_result` to
|
||||
// `open_physical_region_with_results`. This function handles errors for each
|
||||
// open physical region request, allowing the process to continue with the
|
||||
// remaining regions even if some requests fail.
|
||||
let response = self
|
||||
.open_physical_region_with_results(
|
||||
metadata_region_result,
|
||||
data_region_result,
|
||||
physical_region_id,
|
||||
physical_region_options,
|
||||
)
|
||||
.await
|
||||
.map_err(BoxedError::new);
|
||||
responses.push((physical_region_id, response));
|
||||
}
|
||||
|
||||
Ok(results)
|
||||
Ok(responses)
|
||||
}
|
||||
|
||||
async fn open_physical_region_with_results(
|
||||
&self,
|
||||
metadata_region_result: Option<std::result::Result<RegionResponse, BoxedError>>,
|
||||
data_region_result: Option<std::result::Result<RegionResponse, BoxedError>>,
|
||||
physical_region_id: RegionId,
|
||||
physical_region_options: PhysicalRegionOptions,
|
||||
) -> Result<RegionResponse> {
|
||||
let metadata_region_id = utils::to_metadata_region_id(physical_region_id);
|
||||
let data_region_id = utils::to_data_region_id(physical_region_id);
|
||||
let _ = metadata_region_result
|
||||
.context(NoOpenRegionResultSnafu {
|
||||
region_id: metadata_region_id,
|
||||
})?
|
||||
.context(OpenMitoRegionSnafu {
|
||||
region_type: "metadata",
|
||||
})?;
|
||||
|
||||
let data_region_response = data_region_result
|
||||
.context(NoOpenRegionResultSnafu {
|
||||
region_id: data_region_id,
|
||||
})?
|
||||
.context(OpenMitoRegionSnafu {
|
||||
region_type: "data",
|
||||
})?;
|
||||
|
||||
self.recover_states(physical_region_id, physical_region_options)
|
||||
.await?;
|
||||
Ok(data_region_response)
|
||||
}
|
||||
|
||||
/// Open a metric region.
|
||||
@@ -107,13 +142,7 @@ impl MetricEngineInner {
|
||||
// open physical region and recover states
|
||||
let physical_region_options = PhysicalRegionOptions::try_from(&request.options)?;
|
||||
self.open_physical_region(region_id, request).await?;
|
||||
let data_region_id = utils::to_data_region_id(region_id);
|
||||
let primary_key_encoding = self.mito.get_primary_key_encoding(data_region_id).context(
|
||||
PhysicalRegionNotFoundSnafu {
|
||||
region_id: data_region_id,
|
||||
},
|
||||
)?;
|
||||
self.recover_states(region_id, primary_key_encoding, physical_region_options)
|
||||
self.recover_states(region_id, physical_region_options)
|
||||
.await?;
|
||||
|
||||
Ok(0)
|
||||
@@ -138,7 +167,7 @@ impl MetricEngineInner {
|
||||
let metadata_region_dir = join_dir(&request.region_dir, METADATA_REGION_SUBDIR);
|
||||
let data_region_dir = join_dir(&request.region_dir, DATA_REGION_SUBDIR);
|
||||
|
||||
let metadata_region_options = region_options_for_metadata_region(request.options.clone());
|
||||
let metadata_region_options = region_options_for_metadata_region(&request.options);
|
||||
let open_metadata_region_request = RegionOpenRequest {
|
||||
region_dir: metadata_region_dir,
|
||||
options: metadata_region_options,
|
||||
@@ -208,7 +237,6 @@ impl MetricEngineInner {
|
||||
pub(crate) async fn recover_states(
|
||||
&self,
|
||||
physical_region_id: RegionId,
|
||||
primary_key_encoding: PrimaryKeyEncoding,
|
||||
physical_region_options: PhysicalRegionOptions,
|
||||
) -> Result<Vec<RegionId>> {
|
||||
// load logical regions and physical column names
|
||||
@@ -220,6 +248,12 @@ impl MetricEngineInner {
|
||||
.data_region
|
||||
.physical_columns(physical_region_id)
|
||||
.await?;
|
||||
let primary_key_encoding = self
|
||||
.mito
|
||||
.get_primary_key_encoding(physical_region_id)
|
||||
.context(PhysicalRegionNotFoundSnafu {
|
||||
region_id: physical_region_id,
|
||||
})?;
|
||||
|
||||
{
|
||||
let mut state = self.state.write().unwrap();
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use store_api::metric_engine_consts::{
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_FALSE_POSITIVE_RATE_OPTION,
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_FALSE_POSITIVE_RATE_OPTION_DEFAULT,
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_GRANULARITY_OPTION,
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_GRANULARITY_OPTION_DEFAULT, METRIC_ENGINE_INDEX_TYPE_OPTION,
|
||||
};
|
||||
@@ -31,19 +33,20 @@ use crate::error::{Error, ParseRegionOptionsSnafu, Result};
|
||||
const SEG_ROW_COUNT_FOR_DATA_REGION: u32 = 256;
|
||||
|
||||
/// Physical region options.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub struct PhysicalRegionOptions {
|
||||
pub index: IndexOptions,
|
||||
}
|
||||
|
||||
/// Index options for auto created columns
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq)]
|
||||
pub enum IndexOptions {
|
||||
#[default]
|
||||
None,
|
||||
Inverted,
|
||||
Skipping {
|
||||
granularity: u32,
|
||||
false_positive_rate: f64,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -54,6 +57,7 @@ pub fn set_data_region_options(
|
||||
) {
|
||||
options.remove(METRIC_ENGINE_INDEX_TYPE_OPTION);
|
||||
options.remove(METRIC_ENGINE_INDEX_SKIPPING_INDEX_GRANULARITY_OPTION);
|
||||
options.remove(METRIC_ENGINE_INDEX_SKIPPING_INDEX_FALSE_POSITIVE_RATE_OPTION);
|
||||
options.insert(
|
||||
"index.inverted_index.segment_row_count".to_string(),
|
||||
SEG_ROW_COUNT_FOR_DATA_REGION.to_string(),
|
||||
@@ -93,7 +97,23 @@ impl TryFrom<&HashMap<String, String>> for PhysicalRegionOptions {
|
||||
})
|
||||
},
|
||||
)?;
|
||||
Ok(IndexOptions::Skipping { granularity })
|
||||
let false_positive_rate = value
|
||||
.get(METRIC_ENGINE_INDEX_SKIPPING_INDEX_FALSE_POSITIVE_RATE_OPTION)
|
||||
.map_or(
|
||||
Ok(METRIC_ENGINE_INDEX_SKIPPING_INDEX_FALSE_POSITIVE_RATE_OPTION_DEFAULT),
|
||||
|f| {
|
||||
f.parse().ok().filter(|f| *f > 0.0 && *f <= 1.0).ok_or(
|
||||
ParseRegionOptionsSnafu {
|
||||
reason: format!("Invalid false positive rate: {}", f),
|
||||
}
|
||||
.build(),
|
||||
)
|
||||
},
|
||||
)?;
|
||||
Ok(IndexOptions::Skipping {
|
||||
granularity,
|
||||
false_positive_rate,
|
||||
})
|
||||
}
|
||||
Some(index_type) => ParseRegionOptionsSnafu {
|
||||
reason: format!("Invalid index type: {}", index_type),
|
||||
@@ -121,11 +141,16 @@ mod tests {
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_GRANULARITY_OPTION.to_string(),
|
||||
"102400".to_string(),
|
||||
);
|
||||
options.insert(
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_FALSE_POSITIVE_RATE_OPTION.to_string(),
|
||||
"0.01".to_string(),
|
||||
);
|
||||
set_data_region_options(&mut options, false);
|
||||
|
||||
for key in [
|
||||
METRIC_ENGINE_INDEX_TYPE_OPTION,
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_GRANULARITY_OPTION,
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_FALSE_POSITIVE_RATE_OPTION,
|
||||
] {
|
||||
assert_eq!(options.get(key), None);
|
||||
}
|
||||
@@ -154,11 +179,16 @@ mod tests {
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_GRANULARITY_OPTION.to_string(),
|
||||
"102400".to_string(),
|
||||
);
|
||||
options.insert(
|
||||
METRIC_ENGINE_INDEX_SKIPPING_INDEX_FALSE_POSITIVE_RATE_OPTION.to_string(),
|
||||
"0.01".to_string(),
|
||||
);
|
||||
let physical_region_options = PhysicalRegionOptions::try_from(&options).unwrap();
|
||||
assert_eq!(
|
||||
physical_region_options.index,
|
||||
IndexOptions::Skipping {
|
||||
granularity: 102400
|
||||
granularity: 102400,
|
||||
false_positive_rate: 0.01,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user