mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-26 16:10:02 +00:00
Compare commits
77 Commits
feat/scann
...
fix-topk
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8ac57368af | ||
|
|
0bc5a305be | ||
|
|
1afcddd5a9 | ||
|
|
62808b887b | ||
|
|
04ddd40e00 | ||
|
|
b4f028be5f | ||
|
|
da964880f5 | ||
|
|
a35a39f726 | ||
|
|
e0c1566e92 | ||
|
|
f6afb10e33 | ||
|
|
2dfcf35fee | ||
|
|
f7d5c87ac0 | ||
|
|
9cd57e9342 | ||
|
|
32f9cc5286 | ||
|
|
5232a12a8c | ||
|
|
913ac325e5 | ||
|
|
0c52d5bb34 | ||
|
|
e0697790e6 | ||
|
|
64e74916b9 | ||
|
|
b601781604 | ||
|
|
bd3ad60910 | ||
|
|
cbfdeca64c | ||
|
|
baffed8c6a | ||
|
|
11a5e1618d | ||
|
|
f5e0e94e3a | ||
|
|
ba4eda40e5 | ||
|
|
f06a64ff90 | ||
|
|
84b4777925 | ||
|
|
a26dee0ca1 | ||
|
|
276f6bf026 | ||
|
|
1d5291b06d | ||
|
|
564cc0c750 | ||
|
|
f1abe5d215 | ||
|
|
ab426cbf89 | ||
|
|
cb0f1afb01 | ||
|
|
a22d08f1b1 | ||
|
|
6817a376b5 | ||
|
|
4d1a587079 | ||
|
|
9f1aefe98f | ||
|
|
2f9130a2de | ||
|
|
fa2b4e5e63 | ||
|
|
9197e818ec | ||
|
|
36d89c3baf | ||
|
|
0ebfd161d8 | ||
|
|
8b26a98c3b | ||
|
|
7199823be9 | ||
|
|
60f752d306 | ||
|
|
edb1f6086f | ||
|
|
1ebcef4794 | ||
|
|
2147545c90 | ||
|
|
84e4e42ee7 | ||
|
|
d5c616a9ff | ||
|
|
f02bdf5428 | ||
|
|
f2288a86b0 | ||
|
|
9d35b8cad4 | ||
|
|
cc99f9d65b | ||
|
|
11ecb7a28a | ||
|
|
2a760f010f | ||
|
|
63dd37dca3 | ||
|
|
68fff3b1aa | ||
|
|
0177f244e9 | ||
|
|
931556dbd3 | ||
|
|
69f0249039 | ||
|
|
1f91422bae | ||
|
|
377373b8fd | ||
|
|
e107030d85 | ||
|
|
18875eed4d | ||
|
|
ee76d50569 | ||
|
|
5d634aeba0 | ||
|
|
8346acb900 | ||
|
|
fdab75ce27 | ||
|
|
4c07d2d5de | ||
|
|
020477994b | ||
|
|
afefc0c604 | ||
|
|
e44323c433 | ||
|
|
0aeaf405c7 | ||
|
|
b5cbc35a0d |
@@ -51,7 +51,7 @@ runs:
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install my-greptimedb \
|
||||
--set meta.backendStorage.etcd.endpoints=${{ inputs.etcd-endpoints }} \
|
||||
--set 'meta.backendStorage.etcd.endpoints[0]=${{ inputs.etcd-endpoints }}' \
|
||||
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
||||
--set image.registry=${{ inputs.image-registry }} \
|
||||
--set image.repository=${{ inputs.image-repository }} \
|
||||
|
||||
4
.github/scripts/deploy-greptimedb.sh
vendored
4
.github/scripts/deploy-greptimedb.sh
vendored
@@ -81,7 +81,7 @@ function deploy_greptimedb_cluster() {
|
||||
--create-namespace \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set initializer.tag="$GREPTIMEDB_INITIALIZER_IMAGE_TAG" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
--set "meta.backendStorage.etcd.endpoints[0]=etcd.$install_namespace.svc.cluster.local:2379" \
|
||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
||||
-n "$install_namespace"
|
||||
|
||||
@@ -119,7 +119,7 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
||||
--create-namespace \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set initializer.tag="$GREPTIMEDB_INITIALIZER_IMAGE_TAG" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
--set "meta.backendStorage.etcd.endpoints[0]=etcd.$install_namespace.svc.cluster.local:2379" \
|
||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
||||
--set objectStorage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||
--set objectStorage.s3.region="$AWS_REGION" \
|
||||
|
||||
32
.github/workflows/release.yml
vendored
32
.github/workflows/release.yml
vendored
@@ -49,14 +49,9 @@ on:
|
||||
description: Do not run integration tests during the build
|
||||
type: boolean
|
||||
default: true
|
||||
build_linux_amd64_artifacts:
|
||||
build_linux_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-amd64 artifacts
|
||||
required: false
|
||||
default: false
|
||||
build_linux_arm64_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-arm64 artifacts
|
||||
description: Build linux artifacts (both amd64 and arm64)
|
||||
required: false
|
||||
default: false
|
||||
build_macos_artifacts:
|
||||
@@ -144,7 +139,7 @@ jobs:
|
||||
./.github/scripts/check-version.sh "${{ steps.create-version.outputs.version }}"
|
||||
|
||||
- name: Allocate linux-amd64 runner
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-amd64-runner
|
||||
with:
|
||||
@@ -158,7 +153,7 @@ jobs:
|
||||
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
|
||||
|
||||
- name: Allocate linux-arm64 runner
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-arm64-runner
|
||||
with:
|
||||
@@ -173,7 +168,7 @@ jobs:
|
||||
|
||||
build-linux-amd64-artifacts:
|
||||
name: Build linux-amd64 artifacts
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
@@ -195,7 +190,7 @@ jobs:
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
@@ -217,7 +212,7 @@ jobs:
|
||||
|
||||
run-multi-lang-tests:
|
||||
name: Run Multi-language SDK Tests
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -386,7 +381,18 @@ jobs:
|
||||
|
||||
publish-github-release:
|
||||
name: Create GitHub release and upload artifacts
|
||||
if: ${{ inputs.publish_github_release || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
# Use always() to run even when optional jobs (macos, windows) are skipped.
|
||||
# Then check that required jobs succeeded and optional jobs didn't fail.
|
||||
if: |
|
||||
always() &&
|
||||
(inputs.publish_github_release || github.event_name == 'push' || github.event_name == 'schedule') &&
|
||||
needs.allocate-runners.result == 'success' &&
|
||||
(needs.build-linux-amd64-artifacts.result == 'success' || needs.build-linux-amd64-artifacts.result == 'skipped') &&
|
||||
(needs.build-linux-arm64-artifacts.result == 'success' || needs.build-linux-arm64-artifacts.result == 'skipped') &&
|
||||
(needs.build-macos-artifacts.result == 'success' || needs.build-macos-artifacts.result == 'skipped') &&
|
||||
(needs.build-windows-artifacts.result == 'success' || needs.build-windows-artifacts.result == 'skipped') &&
|
||||
(needs.release-images-to-dockerhub.result == 'success' || needs.release-images-to-dockerhub.result == 'skipped') &&
|
||||
(needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped')
|
||||
needs: [ # The job have to wait for all the artifacts are built.
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
|
||||
64
AUTHOR.md
64
AUTHOR.md
@@ -2,41 +2,41 @@
|
||||
|
||||
## Individual Committers (in alphabetical order)
|
||||
|
||||
* [CookiePieWw](https://github.com/CookiePieWw)
|
||||
* [etolbakov](https://github.com/etolbakov)
|
||||
* [irenjj](https://github.com/irenjj)
|
||||
* [KKould](https://github.com/KKould)
|
||||
* [Lanqing Yang](https://github.com/lyang24)
|
||||
* [NiwakaDev](https://github.com/NiwakaDev)
|
||||
* [tisonkun](https://github.com/tisonkun)
|
||||
- [apdong2022](https://github.com/apdong2022)
|
||||
- [beryl678](https://github.com/beryl678)
|
||||
- [CookiePieWw](https://github.com/CookiePieWw)
|
||||
- [etolbakov](https://github.com/etolbakov)
|
||||
- [irenjj](https://github.com/irenjj)
|
||||
- [KKould](https://github.com/KKould)
|
||||
- [Lanqing Yang](https://github.com/lyang24)
|
||||
- [nicecui](https://github.com/nicecui)
|
||||
- [NiwakaDev](https://github.com/NiwakaDev)
|
||||
- [paomian](https://github.com/paomian)
|
||||
- [tisonkun](https://github.com/tisonkun)
|
||||
- [Wenjie0329](https://github.com/Wenjie0329)
|
||||
- [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
||||
- [zhongzc](https://github.com/zhongzc)
|
||||
- [ZonaHex](https://github.com/ZonaHex)
|
||||
- [zyy17](https://github.com/zyy17)
|
||||
|
||||
## Team Members (in alphabetical order)
|
||||
|
||||
* [apdong2022](https://github.com/apdong2022)
|
||||
* [beryl678](https://github.com/beryl678)
|
||||
* [daviderli614](https://github.com/daviderli614)
|
||||
* [discord9](https://github.com/discord9)
|
||||
* [evenyag](https://github.com/evenyag)
|
||||
* [fengjiachun](https://github.com/fengjiachun)
|
||||
* [fengys1996](https://github.com/fengys1996)
|
||||
* [GrepTime](https://github.com/GrepTime)
|
||||
* [holalengyu](https://github.com/holalengyu)
|
||||
* [killme2008](https://github.com/killme2008)
|
||||
* [MichaelScofield](https://github.com/MichaelScofield)
|
||||
* [nicecui](https://github.com/nicecui)
|
||||
* [paomian](https://github.com/paomian)
|
||||
* [shuiyisong](https://github.com/shuiyisong)
|
||||
* [sunchanglong](https://github.com/sunchanglong)
|
||||
* [sunng87](https://github.com/sunng87)
|
||||
* [v0y4g3r](https://github.com/v0y4g3r)
|
||||
* [waynexia](https://github.com/waynexia)
|
||||
* [Wenjie0329](https://github.com/Wenjie0329)
|
||||
* [WenyXu](https://github.com/WenyXu)
|
||||
* [xtang](https://github.com/xtang)
|
||||
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
||||
* [zhongzc](https://github.com/zhongzc)
|
||||
* [ZonaHex](https://github.com/ZonaHex)
|
||||
* [zyy17](https://github.com/zyy17)
|
||||
- [daviderli614](https://github.com/daviderli614)
|
||||
- [discord9](https://github.com/discord9)
|
||||
- [evenyag](https://github.com/evenyag)
|
||||
- [fengjiachun](https://github.com/fengjiachun)
|
||||
- [fengys1996](https://github.com/fengys1996)
|
||||
- [GrepTime](https://github.com/GrepTime)
|
||||
- [holalengyu](https://github.com/holalengyu)
|
||||
- [killme2008](https://github.com/killme2008)
|
||||
- [MichaelScofield](https://github.com/MichaelScofield)
|
||||
- [shuiyisong](https://github.com/shuiyisong)
|
||||
- [sunchanglong](https://github.com/sunchanglong)
|
||||
- [sunng87](https://github.com/sunng87)
|
||||
- [v0y4g3r](https://github.com/v0y4g3r)
|
||||
- [waynexia](https://github.com/waynexia)
|
||||
- [WenyXu](https://github.com/WenyXu)
|
||||
- [xtang](https://github.com/xtang)
|
||||
|
||||
## All Contributors
|
||||
|
||||
|
||||
332
Cargo.lock
generated
332
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
12
Cargo.toml
12
Cargo.toml
@@ -21,6 +21,7 @@ members = [
|
||||
"src/common/grpc-expr",
|
||||
"src/common/macro",
|
||||
"src/common/mem-prof",
|
||||
"src/common/memory-manager",
|
||||
"src/common/meta",
|
||||
"src/common/options",
|
||||
"src/common/plugins",
|
||||
@@ -74,7 +75,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "1.0.0-beta.2"
|
||||
version = "1.0.0-beta.3"
|
||||
edition = "2024"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -131,7 +132,7 @@ datafusion-functions = "50"
|
||||
datafusion-functions-aggregate-common = "50"
|
||||
datafusion-optimizer = "50"
|
||||
datafusion-orc = "0.5"
|
||||
datafusion-pg-catalog = "0.12.1"
|
||||
datafusion-pg-catalog = "0.12.3"
|
||||
datafusion-physical-expr = "50"
|
||||
datafusion-physical-plan = "50"
|
||||
datafusion-sql = "50"
|
||||
@@ -139,6 +140,7 @@ datafusion-substrait = "50"
|
||||
deadpool = "0.12"
|
||||
deadpool-postgres = "0.14"
|
||||
derive_builder = "0.20"
|
||||
derive_more = { version = "2.1", features = ["full"] }
|
||||
dotenv = "0.15"
|
||||
either = "1.15"
|
||||
etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62df834f0cffda355eba96691fe1a9a332b75a7", features = [
|
||||
@@ -148,7 +150,7 @@ etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62d
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0df99f09f1d6785055b2d9da96fc4ecc2bdf6803" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0423fa30203187c75e2937a668df1da699c8b96c" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
@@ -200,7 +202,8 @@ reqwest = { version = "0.12", default-features = false, features = [
|
||||
"stream",
|
||||
"multipart",
|
||||
] }
|
||||
rskafka = { git = "https://github.com/WenyXu/rskafka.git", rev = "7b0f31ed39db049b4ee2e5f1e95b5a30be9baf76", features = [
|
||||
# Branch: feat/request-timeout
|
||||
rskafka = { git = "https://github.com/GreptimeTeam/rskafka.git", rev = "f5688f83e7da591cda3f2674c2408b4c0ed4ed50", features = [
|
||||
"transport-tls",
|
||||
] }
|
||||
rstest = "0.25"
|
||||
@@ -264,6 +267,7 @@ common-grpc = { path = "src/common/grpc" }
|
||||
common-grpc-expr = { path = "src/common/grpc-expr" }
|
||||
common-macro = { path = "src/common/macro" }
|
||||
common-mem-prof = { path = "src/common/mem-prof" }
|
||||
common-memory-manager = { path = "src/common/memory-manager" }
|
||||
common-meta = { path = "src/common/meta" }
|
||||
common-options = { path = "src/common/options" }
|
||||
common-plugins = { path = "src/common/plugins" }
|
||||
|
||||
@@ -108,9 +108,6 @@
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.enable_read_cache` | Bool | `true` | Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||
@@ -141,6 +138,8 @@
|
||||
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
|
||||
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
|
||||
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
|
||||
| `region_engine.mito.experimental_compaction_memory_limit` | String | 0 | Memory budget for compaction tasks. Setting it to 0 or "unlimited" disables the limit. |
|
||||
| `region_engine.mito.experimental_compaction_on_exhausted` | String | wait | Behavior when compaction cannot acquire memory from the budget.<br/>Options: "wait" (default, 10s), "wait(<duration>)", "fail" |
|
||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`. |
|
||||
@@ -154,6 +153,8 @@
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).<br/>When enabled, index files are loaded into the write cache during region initialization,<br/>which can improve query performance at the cost of longer startup times. |
|
||||
| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).<br/>The remaining capacity is used for data (parquet) files.<br/>Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,<br/>1GiB is reserved for index files and 4GiB for data files. |
|
||||
| `region_engine.mito.enable_refill_cache_on_read` | Bool | `true` | Enable refilling cache on read operations (default: true).<br/>When disabled, cache refilling on read won't happen. |
|
||||
| `region_engine.mito.manifest_cache_size` | String | `256MB` | Capacity for manifest cache (default: 256MB). |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
||||
@@ -294,7 +295,6 @@
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
@@ -457,7 +457,6 @@
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
@@ -488,9 +487,6 @@
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.enable_read_cache` | Bool | `true` | Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||
@@ -523,6 +519,8 @@
|
||||
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
|
||||
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
|
||||
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
|
||||
| `region_engine.mito.experimental_compaction_memory_limit` | String | 0 | Memory budget for compaction tasks. Setting it to 0 or "unlimited" disables the limit. |
|
||||
| `region_engine.mito.experimental_compaction_on_exhausted` | String | wait | Behavior when compaction cannot acquire memory from the budget.<br/>Options: "wait" (default, 10s), "wait(<duration>)", "fail" |
|
||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
||||
@@ -536,6 +534,8 @@
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).<br/>When enabled, index files are loaded into the write cache during region initialization,<br/>which can improve query performance at the cost of longer startup times. |
|
||||
| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).<br/>The remaining capacity is used for data (parquet) files.<br/>Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,<br/>1GiB is reserved for index files and 4GiB for data files. |
|
||||
| `region_engine.mito.enable_refill_cache_on_read` | Bool | `true` | Enable refilling cache on read operations (default: true).<br/>When disabled, cache refilling on read won't happen. |
|
||||
| `region_engine.mito.manifest_cache_size` | String | `256MB` | Capacity for manifest cache (default: 256MB). |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
||||
@@ -629,7 +629,6 @@
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
|
||||
@@ -99,9 +99,6 @@ metasrv_addrs = ["127.0.0.1:3002"]
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
@@ -284,18 +281,6 @@ data_home = "./greptimedb_data"
|
||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||
type = "File"
|
||||
|
||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||
## @toml2docs:none-default
|
||||
#+ cache_path = ""
|
||||
|
||||
## Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage.
|
||||
#+ enable_read_cache = true
|
||||
|
||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
||||
## @toml2docs:none-default
|
||||
cache_capacity = "5GiB"
|
||||
|
||||
## The S3 bucket name.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
@@ -455,6 +440,15 @@ compress_manifest = false
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ max_background_purges = 8
|
||||
|
||||
## Memory budget for compaction tasks. Setting it to 0 or "unlimited" disables the limit.
|
||||
## @toml2docs:none-default="0"
|
||||
#+ experimental_compaction_memory_limit = "0"
|
||||
|
||||
## Behavior when compaction cannot acquire memory from the budget.
|
||||
## Options: "wait" (default, 10s), "wait(<duration>)", "fail"
|
||||
## @toml2docs:none-default="wait"
|
||||
#+ experimental_compaction_on_exhausted = "wait"
|
||||
|
||||
## Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
|
||||
@@ -510,6 +504,13 @@ preload_index_cache = true
|
||||
## 1GiB is reserved for index files and 4GiB for data files.
|
||||
index_cache_percent = 20
|
||||
|
||||
## Enable refilling cache on read operations (default: true).
|
||||
## When disabled, cache refilling on read won't happen.
|
||||
enable_refill_cache_on_read = true
|
||||
|
||||
## Capacity for manifest cache (default: 256MB).
|
||||
manifest_cache_size = "256MB"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
|
||||
@@ -78,9 +78,6 @@ metasrv_addrs = ["127.0.0.1:3002"]
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
|
||||
@@ -226,9 +226,6 @@ metasrv_addrs = ["127.0.0.1:3002"]
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
|
||||
@@ -388,18 +388,6 @@ data_home = "./greptimedb_data"
|
||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||
type = "File"
|
||||
|
||||
## Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage.
|
||||
#+ enable_read_cache = true
|
||||
|
||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||
## @toml2docs:none-default
|
||||
#+ cache_path = ""
|
||||
|
||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
||||
## @toml2docs:none-default
|
||||
cache_capacity = "5GiB"
|
||||
|
||||
## The S3 bucket name.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
@@ -546,6 +534,15 @@ compress_manifest = false
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ max_background_purges = 8
|
||||
|
||||
## Memory budget for compaction tasks. Setting it to 0 or "unlimited" disables the limit.
|
||||
## @toml2docs:none-default="0"
|
||||
#+ experimental_compaction_memory_limit = "0"
|
||||
|
||||
## Behavior when compaction cannot acquire memory from the budget.
|
||||
## Options: "wait" (default, 10s), "wait(<duration>)", "fail"
|
||||
## @toml2docs:none-default="wait"
|
||||
#+ experimental_compaction_on_exhausted = "wait"
|
||||
|
||||
## Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
|
||||
@@ -601,6 +598,13 @@ preload_index_cache = true
|
||||
## 1GiB is reserved for index files and 4GiB for data files.
|
||||
index_cache_percent = 20
|
||||
|
||||
## Enable refilling cache on read operations (default: true).
|
||||
## When disabled, cache refilling on read won't happen.
|
||||
enable_refill_cache_on_read = true
|
||||
|
||||
## Capacity for manifest cache (default: 256MB).
|
||||
manifest_cache_size = "256MB"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
|
||||
20
flake.lock
generated
20
flake.lock
generated
@@ -8,11 +8,11 @@
|
||||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1760078406,
|
||||
"narHash": "sha256-JeJK0ZA845PtkCHkfo4KjeI1mYrsr2s3cxBYKhF4BoE=",
|
||||
"lastModified": 1765252472,
|
||||
"narHash": "sha256-byMt/uMi7DJ8tRniFopDFZMO3leSjGp6GS4zWOFT+uQ=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "351277c60d104944122ee389cdf581c5ce2c6732",
|
||||
"rev": "8456b985f6652e3eef0632ee9992b439735c5544",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -41,16 +41,16 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1759994382,
|
||||
"narHash": "sha256-wSK+3UkalDZRVHGCRikZ//CyZUJWDJkBDTQX1+G77Ow=",
|
||||
"lastModified": 1764983851,
|
||||
"narHash": "sha256-y7RPKl/jJ/KAP/VKLMghMgXTlvNIJMHKskl8/Uuar7o=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "5da4a26309e796daa7ffca72df93dbe53b8164c7",
|
||||
"rev": "d9bc5c7dceb30d8d6fafa10aeb6aa8a48c218454",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-25.05",
|
||||
"ref": "nixos-25.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
@@ -65,11 +65,11 @@
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1760014945,
|
||||
"narHash": "sha256-ySdl7F9+oeWNHVrg3QL/brazqmJvYFEdpGnF3pyoDH8=",
|
||||
"lastModified": 1765120009,
|
||||
"narHash": "sha256-nG76b87rkaDzibWbnB5bYDm6a52b78A+fpm+03pqYIw=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "90d2e1ce4dfe7dc49250a8b88a0f08ffdb9cb23f",
|
||||
"rev": "5e3e9c4e61bba8a5e72134b9ffefbef8f531d008",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
description = "Development environment flake";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.11";
|
||||
fenix = {
|
||||
url = "github:nix-community/fenix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
@@ -48,7 +48,7 @@
|
||||
gnuplot ## for cargo bench
|
||||
];
|
||||
|
||||
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
|
||||
buildInputs = buildInputs;
|
||||
NIX_HARDENING_ENABLE = "";
|
||||
};
|
||||
});
|
||||
|
||||
@@ -23,11 +23,9 @@ use common_time::{Date, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth
|
||||
use datatypes::json::value::{JsonNumber, JsonValue, JsonValueRef, JsonVariant};
|
||||
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
||||
use datatypes::types::{
|
||||
IntervalType, JsonFormat, StructField, StructType, TimeType, TimestampType,
|
||||
};
|
||||
use datatypes::value::{
|
||||
ListValue, ListValueRef, OrderedF32, OrderedF64, StructValue, StructValueRef, Value,
|
||||
IntervalType, JsonFormat, JsonType, StructField, StructType, TimeType, TimestampType,
|
||||
};
|
||||
use datatypes::value::{ListValueRef, OrderedF32, OrderedF64, StructValueRef, Value};
|
||||
use datatypes::vectors::VectorRef;
|
||||
use greptime_proto::v1::column_data_type_extension::TypeExt;
|
||||
use greptime_proto::v1::ddl_request::Expr;
|
||||
@@ -82,6 +80,10 @@ impl ColumnDataTypeWrapper {
|
||||
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
|
||||
(self.datatype, self.datatype_ext.clone())
|
||||
}
|
||||
|
||||
pub fn into_parts(self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
|
||||
(self.datatype, self.datatype_ext)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||
@@ -127,6 +129,7 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||
};
|
||||
ConcreteDataType::json_native_datatype(inner_type.into())
|
||||
}
|
||||
None => ConcreteDataType::Json(JsonType::null()),
|
||||
_ => {
|
||||
// invalid state, type extension is missing or invalid
|
||||
ConcreteDataType::null_datatype()
|
||||
@@ -441,18 +444,22 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
JsonFormat::Jsonb => Some(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
|
||||
}),
|
||||
JsonFormat::Native(inner) => {
|
||||
let inner_type = ColumnDataTypeWrapper::try_from(
|
||||
ConcreteDataType::from(inner.as_ref()),
|
||||
)?;
|
||||
Some(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::JsonNativeType(Box::new(
|
||||
JsonNativeTypeExtension {
|
||||
datatype: inner_type.datatype.into(),
|
||||
datatype_extension: inner_type.datatype_ext.map(Box::new),
|
||||
},
|
||||
))),
|
||||
})
|
||||
JsonFormat::Native(native_type) => {
|
||||
if native_type.is_null() {
|
||||
None
|
||||
} else {
|
||||
let native_type = ConcreteDataType::from(native_type.as_ref());
|
||||
let (datatype, datatype_extension) =
|
||||
ColumnDataTypeWrapper::try_from(native_type)?.into_parts();
|
||||
Some(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::JsonNativeType(Box::new(
|
||||
JsonNativeTypeExtension {
|
||||
datatype: datatype as i32,
|
||||
datatype_extension: datatype_extension.map(Box::new),
|
||||
},
|
||||
))),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -701,6 +708,7 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
||||
Some(Expr::CreateView(_)) => "ddl.create_view",
|
||||
Some(Expr::DropView(_)) => "ddl.drop_view",
|
||||
Some(Expr::AlterDatabase(_)) => "ddl.alter_database",
|
||||
Some(Expr::CommentOn(_)) => "ddl.comment_on",
|
||||
None => "ddl.empty",
|
||||
}
|
||||
}
|
||||
@@ -887,111 +895,6 @@ pub fn is_column_type_value_eq(
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Convert value into proto's value.
|
||||
pub fn to_proto_value(value: Value) -> v1::Value {
|
||||
match value {
|
||||
Value::Null => v1::Value { value_data: None },
|
||||
Value::Boolean(v) => v1::Value {
|
||||
value_data: Some(ValueData::BoolValue(v)),
|
||||
},
|
||||
Value::UInt8(v) => v1::Value {
|
||||
value_data: Some(ValueData::U8Value(v.into())),
|
||||
},
|
||||
Value::UInt16(v) => v1::Value {
|
||||
value_data: Some(ValueData::U16Value(v.into())),
|
||||
},
|
||||
Value::UInt32(v) => v1::Value {
|
||||
value_data: Some(ValueData::U32Value(v)),
|
||||
},
|
||||
Value::UInt64(v) => v1::Value {
|
||||
value_data: Some(ValueData::U64Value(v)),
|
||||
},
|
||||
Value::Int8(v) => v1::Value {
|
||||
value_data: Some(ValueData::I8Value(v.into())),
|
||||
},
|
||||
Value::Int16(v) => v1::Value {
|
||||
value_data: Some(ValueData::I16Value(v.into())),
|
||||
},
|
||||
Value::Int32(v) => v1::Value {
|
||||
value_data: Some(ValueData::I32Value(v)),
|
||||
},
|
||||
Value::Int64(v) => v1::Value {
|
||||
value_data: Some(ValueData::I64Value(v)),
|
||||
},
|
||||
Value::Float32(v) => v1::Value {
|
||||
value_data: Some(ValueData::F32Value(*v)),
|
||||
},
|
||||
Value::Float64(v) => v1::Value {
|
||||
value_data: Some(ValueData::F64Value(*v)),
|
||||
},
|
||||
Value::String(v) => v1::Value {
|
||||
value_data: Some(ValueData::StringValue(v.as_utf8().to_string())),
|
||||
},
|
||||
Value::Binary(v) => v1::Value {
|
||||
value_data: Some(ValueData::BinaryValue(v.to_vec())),
|
||||
},
|
||||
Value::Date(v) => v1::Value {
|
||||
value_data: Some(ValueData::DateValue(v.val())),
|
||||
},
|
||||
Value::Timestamp(v) => match v.unit() {
|
||||
TimeUnit::Second => v1::Value {
|
||||
value_data: Some(ValueData::TimestampSecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Millisecond => v1::Value {
|
||||
value_data: Some(ValueData::TimestampMillisecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Microsecond => v1::Value {
|
||||
value_data: Some(ValueData::TimestampMicrosecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Nanosecond => v1::Value {
|
||||
value_data: Some(ValueData::TimestampNanosecondValue(v.value())),
|
||||
},
|
||||
},
|
||||
Value::Time(v) => match v.unit() {
|
||||
TimeUnit::Second => v1::Value {
|
||||
value_data: Some(ValueData::TimeSecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Millisecond => v1::Value {
|
||||
value_data: Some(ValueData::TimeMillisecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Microsecond => v1::Value {
|
||||
value_data: Some(ValueData::TimeMicrosecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Nanosecond => v1::Value {
|
||||
value_data: Some(ValueData::TimeNanosecondValue(v.value())),
|
||||
},
|
||||
},
|
||||
Value::IntervalYearMonth(v) => v1::Value {
|
||||
value_data: Some(ValueData::IntervalYearMonthValue(v.to_i32())),
|
||||
},
|
||||
Value::IntervalDayTime(v) => v1::Value {
|
||||
value_data: Some(ValueData::IntervalDayTimeValue(v.to_i64())),
|
||||
},
|
||||
Value::IntervalMonthDayNano(v) => v1::Value {
|
||||
value_data: Some(ValueData::IntervalMonthDayNanoValue(
|
||||
convert_month_day_nano_to_pb(v),
|
||||
)),
|
||||
},
|
||||
Value::Decimal128(v) => v1::Value {
|
||||
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||
},
|
||||
Value::List(list_value) => v1::Value {
|
||||
value_data: Some(ValueData::ListValue(v1::ListValue {
|
||||
items: convert_list_to_pb_values(list_value),
|
||||
})),
|
||||
},
|
||||
Value::Struct(struct_value) => v1::Value {
|
||||
value_data: Some(ValueData::StructValue(v1::StructValue {
|
||||
items: convert_struct_to_pb_values(struct_value),
|
||||
})),
|
||||
},
|
||||
Value::Json(v) => v1::Value {
|
||||
value_data: Some(ValueData::JsonValue(encode_json_value(*v))),
|
||||
},
|
||||
Value::Duration(_) => v1::Value { value_data: None },
|
||||
}
|
||||
}
|
||||
|
||||
fn encode_json_value(value: JsonValue) -> v1::JsonValue {
|
||||
fn helper(json: JsonVariant) -> v1::JsonValue {
|
||||
let value = match json {
|
||||
@@ -1052,22 +955,6 @@ fn decode_json_value(value: &v1::JsonValue) -> JsonValueRef<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_list_to_pb_values(list_value: ListValue) -> Vec<v1::Value> {
|
||||
list_value
|
||||
.take_items()
|
||||
.into_iter()
|
||||
.map(to_proto_value)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn convert_struct_to_pb_values(struct_value: StructValue) -> Vec<v1::Value> {
|
||||
struct_value
|
||||
.take_items()
|
||||
.into_iter()
|
||||
.map(to_proto_value)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns the [ColumnDataTypeWrapper] of the value.
|
||||
///
|
||||
/// If value is null, returns `None`.
|
||||
@@ -1114,14 +1001,14 @@ pub fn vectors_to_rows<'a>(
|
||||
let mut rows = vec![Row { values: vec![] }; row_count];
|
||||
for column in columns {
|
||||
for (row_index, row) in rows.iter_mut().enumerate() {
|
||||
row.values.push(value_to_grpc_value(column.get(row_index)))
|
||||
row.values.push(to_grpc_value(column.get(row_index)))
|
||||
}
|
||||
}
|
||||
|
||||
rows
|
||||
}
|
||||
|
||||
pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
pub fn to_grpc_value(value: Value) -> GrpcValue {
|
||||
GrpcValue {
|
||||
value_data: match value {
|
||||
Value::Null => None,
|
||||
@@ -1161,7 +1048,7 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
let items = list_value
|
||||
.take_items()
|
||||
.into_iter()
|
||||
.map(value_to_grpc_value)
|
||||
.map(to_grpc_value)
|
||||
.collect();
|
||||
Some(ValueData::ListValue(v1::ListValue { items }))
|
||||
}
|
||||
@@ -1169,7 +1056,7 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
let items = struct_value
|
||||
.take_items()
|
||||
.into_iter()
|
||||
.map(value_to_grpc_value)
|
||||
.map(to_grpc_value)
|
||||
.collect();
|
||||
Some(ValueData::StructValue(v1::StructValue { items }))
|
||||
}
|
||||
@@ -1269,6 +1156,7 @@ mod tests {
|
||||
use common_time::interval::IntervalUnit;
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::types::{Int8Type, Int32Type, UInt8Type, UInt32Type};
|
||||
use datatypes::value::{ListValue, StructValue};
|
||||
use datatypes::vectors::{
|
||||
BooleanVector, DateVector, Float32Vector, PrimitiveVector, StringVector,
|
||||
};
|
||||
@@ -1872,7 +1760,7 @@ mod tests {
|
||||
Arc::new(ConcreteDataType::boolean_datatype()),
|
||||
));
|
||||
|
||||
let pb_value = to_proto_value(value);
|
||||
let pb_value = to_grpc_value(value);
|
||||
|
||||
match pb_value.value_data.unwrap() {
|
||||
ValueData::ListValue(pb_list_value) => {
|
||||
@@ -1901,7 +1789,7 @@ mod tests {
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
let pb_value = to_proto_value(value);
|
||||
let pb_value = to_grpc_value(value);
|
||||
|
||||
match pb_value.value_data.unwrap() {
|
||||
ValueData::StructValue(pb_struct_value) => {
|
||||
|
||||
@@ -15,11 +15,11 @@ workspace = true
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-base.workspace = true
|
||||
common-config.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
digest = "0.10"
|
||||
notify.workspace = true
|
||||
sha1 = "0.10"
|
||||
snafu.workspace = true
|
||||
sql.workspace = true
|
||||
|
||||
@@ -75,11 +75,12 @@ pub enum Error {
|
||||
username: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to initialize a watcher for file {}", path))]
|
||||
#[snafu(display("Failed to initialize a file watcher"))]
|
||||
FileWatch {
|
||||
path: String,
|
||||
#[snafu(source)]
|
||||
error: notify::Error,
|
||||
source: common_config::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("User is not authorized to perform this action"))]
|
||||
|
||||
@@ -12,16 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_config::file_watcher::{FileWatcherBuilder, FileWatcherConfig};
|
||||
use common_telemetry::{info, warn};
|
||||
use notify::{EventKind, RecursiveMode, Watcher};
|
||||
use snafu::{ResultExt, ensure};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{FileWatchSnafu, InvalidConfigSnafu, Result};
|
||||
use crate::error::{FileWatchSnafu, Result};
|
||||
use crate::user_provider::{UserInfoMap, authenticate_with_credential, load_credential_from_file};
|
||||
use crate::{Identity, Password, UserInfoRef, UserProvider};
|
||||
|
||||
@@ -41,61 +39,36 @@ impl WatchFileUserProvider {
|
||||
pub fn new(filepath: &str) -> Result<Self> {
|
||||
let credential = load_credential_from_file(filepath)?;
|
||||
let users = Arc::new(Mutex::new(credential));
|
||||
let this = WatchFileUserProvider {
|
||||
users: users.clone(),
|
||||
};
|
||||
|
||||
let (tx, rx) = channel::<notify::Result<notify::Event>>();
|
||||
let mut debouncer =
|
||||
notify::recommended_watcher(tx).context(FileWatchSnafu { path: "<none>" })?;
|
||||
let mut dir = Path::new(filepath).to_path_buf();
|
||||
ensure!(
|
||||
dir.pop(),
|
||||
InvalidConfigSnafu {
|
||||
value: filepath,
|
||||
msg: "UserProvider path must be a file path",
|
||||
}
|
||||
);
|
||||
debouncer
|
||||
.watch(&dir, RecursiveMode::NonRecursive)
|
||||
.context(FileWatchSnafu { path: filepath })?;
|
||||
let users_clone = users.clone();
|
||||
let filepath_owned = filepath.to_string();
|
||||
|
||||
let filepath = filepath.to_string();
|
||||
std::thread::spawn(move || {
|
||||
let filename = Path::new(&filepath).file_name();
|
||||
let _hold = debouncer;
|
||||
while let Ok(res) = rx.recv() {
|
||||
if let Ok(event) = res {
|
||||
let is_this_file = event.paths.iter().any(|p| p.file_name() == filename);
|
||||
let is_relevant_event = matches!(
|
||||
event.kind,
|
||||
EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)
|
||||
FileWatcherBuilder::new()
|
||||
.watch_path(filepath)
|
||||
.context(FileWatchSnafu)?
|
||||
.config(FileWatcherConfig::new())
|
||||
.spawn(move || match load_credential_from_file(&filepath_owned) {
|
||||
Ok(credential) => {
|
||||
let mut users = users_clone.lock().expect("users credential must be valid");
|
||||
#[cfg(not(test))]
|
||||
info!("User provider file {} reloaded", &filepath_owned);
|
||||
#[cfg(test)]
|
||||
info!(
|
||||
"User provider file {} reloaded: {:?}",
|
||||
&filepath_owned, credential
|
||||
);
|
||||
if is_this_file && is_relevant_event {
|
||||
info!(?event.kind, "User provider file {} changed", &filepath);
|
||||
match load_credential_from_file(&filepath) {
|
||||
Ok(credential) => {
|
||||
let mut users =
|
||||
users.lock().expect("users credential must be valid");
|
||||
#[cfg(not(test))]
|
||||
info!("User provider file {filepath} reloaded");
|
||||
#[cfg(test)]
|
||||
info!("User provider file {filepath} reloaded: {credential:?}");
|
||||
*users = credential;
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(
|
||||
?err,
|
||||
"Fail to load credential from file {filepath}; keep the old one",
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
*users = credential;
|
||||
}
|
||||
}
|
||||
});
|
||||
Err(err) => {
|
||||
warn!(
|
||||
?err,
|
||||
"Fail to load credential from file {}; keep the old one", &filepath_owned
|
||||
)
|
||||
}
|
||||
})
|
||||
.context(FileWatchSnafu)?;
|
||||
|
||||
Ok(this)
|
||||
Ok(WatchFileUserProvider { users })
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
enterprise = []
|
||||
testing = []
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -12,13 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
|
||||
|
||||
mod builder;
|
||||
mod client;
|
||||
mod manager;
|
||||
mod table_cache;
|
||||
|
||||
pub use builder::KvBackendCatalogManagerBuilder;
|
||||
pub use builder::{
|
||||
CatalogManagerConfigurator, CatalogManagerConfiguratorRef, KvBackendCatalogManagerBuilder,
|
||||
};
|
||||
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
|
||||
pub use manager::KvBackendCatalogManager;
|
||||
pub use table_cache::{TableCache, TableCacheRef, new_table_cache};
|
||||
|
||||
@@ -12,9 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::LayeredCacheRegistryRef;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
@@ -23,24 +25,34 @@ use common_procedure::ProcedureManagerRef;
|
||||
use moka::sync::Cache;
|
||||
use partition::manager::PartitionRuleManager;
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::information_schema::InformationSchemaTableFactoryRef;
|
||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||
use crate::information_schema::{
|
||||
InformationExtensionRef, InformationSchemaProvider, InformationSchemaTableFactoryRef,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::kvbackend::manager::{CATALOG_CACHE_MAX_CAPACITY, SystemCatalog};
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::numbers_table_provider::NumbersTableProvider;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
|
||||
/// The configurator that customizes or enhances the [`KvBackendCatalogManagerBuilder`].
|
||||
#[async_trait::async_trait]
|
||||
pub trait CatalogManagerConfigurator<C>: Send + Sync {
|
||||
async fn configure(
|
||||
&self,
|
||||
builder: KvBackendCatalogManagerBuilder,
|
||||
ctx: C,
|
||||
) -> std::result::Result<KvBackendCatalogManagerBuilder, BoxedError>;
|
||||
}
|
||||
|
||||
pub type CatalogManagerConfiguratorRef<C> = Arc<dyn CatalogManagerConfigurator<C>>;
|
||||
|
||||
pub struct KvBackendCatalogManagerBuilder {
|
||||
information_extension: InformationExtensionRef,
|
||||
backend: KvBackendRef,
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
process_manager: Option<ProcessManagerRef>,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories:
|
||||
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
extra_information_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
}
|
||||
|
||||
impl KvBackendCatalogManagerBuilder {
|
||||
@@ -55,8 +67,7 @@ impl KvBackendCatalogManagerBuilder {
|
||||
cache_registry,
|
||||
procedure_manager: None,
|
||||
process_manager: None,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories: std::collections::HashMap::new(),
|
||||
extra_information_table_factories: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,10 +82,9 @@ impl KvBackendCatalogManagerBuilder {
|
||||
}
|
||||
|
||||
/// Sets the extra information tables.
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub fn with_extra_information_table_factories(
|
||||
mut self,
|
||||
factories: std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
) -> Self {
|
||||
self.extra_information_table_factories = factories;
|
||||
self
|
||||
@@ -87,7 +97,6 @@ impl KvBackendCatalogManagerBuilder {
|
||||
cache_registry,
|
||||
procedure_manager,
|
||||
process_manager,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories,
|
||||
} = self;
|
||||
Arc::new_cyclic(|me| KvBackendCatalogManager {
|
||||
@@ -111,7 +120,6 @@ impl KvBackendCatalogManagerBuilder {
|
||||
process_manager.clone(),
|
||||
backend.clone(),
|
||||
);
|
||||
#[cfg(feature = "enterprise")]
|
||||
let provider = provider
|
||||
.with_extra_table_factories(extra_information_table_factories.clone());
|
||||
Arc::new(provider)
|
||||
@@ -123,7 +131,6 @@ impl KvBackendCatalogManagerBuilder {
|
||||
numbers_table_provider: NumbersTableProvider,
|
||||
backend,
|
||||
process_manager,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories,
|
||||
},
|
||||
cache_registry,
|
||||
|
||||
@@ -53,9 +53,9 @@ use crate::error::{
|
||||
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
||||
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||
};
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::information_schema::InformationSchemaTableFactoryRef;
|
||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||
use crate::information_schema::{
|
||||
InformationExtensionRef, InformationSchemaProvider, InformationSchemaTableFactoryRef,
|
||||
};
|
||||
use crate::kvbackend::TableCacheRef;
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
@@ -557,7 +557,6 @@ pub(super) struct SystemCatalog {
|
||||
pub(super) numbers_table_provider: NumbersTableProvider,
|
||||
pub(super) backend: KvBackendRef,
|
||||
pub(super) process_manager: Option<ProcessManagerRef>,
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub(super) extra_information_table_factories:
|
||||
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
}
|
||||
@@ -628,7 +627,6 @@ impl SystemCatalog {
|
||||
self.process_manager.clone(),
|
||||
self.backend.clone(),
|
||||
);
|
||||
#[cfg(feature = "enterprise")]
|
||||
let provider = provider
|
||||
.with_extra_table_factories(self.extra_information_table_factories.clone());
|
||||
Arc::new(provider)
|
||||
|
||||
@@ -117,7 +117,6 @@ macro_rules! setup_memory_table {
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub struct MakeInformationTableRequest {
|
||||
pub catalog_name: String,
|
||||
pub catalog_manager: Weak<dyn CatalogManager>,
|
||||
@@ -128,12 +127,10 @@ pub struct MakeInformationTableRequest {
|
||||
///
|
||||
/// This trait allows for extensibility of the information schema by providing
|
||||
/// a way to dynamically create custom information schema tables.
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub trait InformationSchemaTableFactory {
|
||||
fn make_information_table(&self, req: MakeInformationTableRequest) -> SystemTableRef;
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub type InformationSchemaTableFactoryRef = Arc<dyn InformationSchemaTableFactory + Send + Sync>;
|
||||
|
||||
/// The `information_schema` tables info provider.
|
||||
@@ -143,9 +140,7 @@ pub struct InformationSchemaProvider {
|
||||
process_manager: Option<ProcessManagerRef>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
#[allow(dead_code)]
|
||||
kv_backend: KvBackendRef,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
}
|
||||
|
||||
@@ -166,7 +161,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
}
|
||||
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
||||
#[cfg(feature = "enterprise")]
|
||||
if let Some(factory) = self.extra_table_factories.get(name) {
|
||||
let req = MakeInformationTableRequest {
|
||||
catalog_name: self.catalog_name.clone(),
|
||||
@@ -281,7 +275,6 @@ impl InformationSchemaProvider {
|
||||
process_manager,
|
||||
tables: HashMap::new(),
|
||||
kv_backend,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_table_factories: HashMap::new(),
|
||||
};
|
||||
|
||||
@@ -290,7 +283,6 @@ impl InformationSchemaProvider {
|
||||
provider
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub(crate) fn with_extra_table_factories(
|
||||
mut self,
|
||||
factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
@@ -358,7 +350,6 @@ impl InformationSchemaProvider {
|
||||
if let Some(process_list) = self.build_table(PROCESS_LIST) {
|
||||
tables.insert(PROCESS_LIST.to_string(), process_list);
|
||||
}
|
||||
#[cfg(feature = "enterprise")]
|
||||
for name in self.extra_table_factories.keys() {
|
||||
tables.insert(name.clone(), self.build_table(name).expect(name));
|
||||
}
|
||||
@@ -437,7 +428,7 @@ pub trait InformationExtension {
|
||||
}
|
||||
|
||||
/// The request to inspect the datanode.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct DatanodeInspectRequest {
|
||||
/// Kind to fetch from datanode.
|
||||
pub kind: DatanodeInspectKind,
|
||||
|
||||
@@ -211,6 +211,7 @@ struct InformationSchemaPartitionsBuilder {
|
||||
partition_names: StringVectorBuilder,
|
||||
partition_ordinal_positions: Int64VectorBuilder,
|
||||
partition_expressions: StringVectorBuilder,
|
||||
partition_descriptions: StringVectorBuilder,
|
||||
create_times: TimestampSecondVectorBuilder,
|
||||
partition_ids: UInt64VectorBuilder,
|
||||
}
|
||||
@@ -231,6 +232,7 @@ impl InformationSchemaPartitionsBuilder {
|
||||
partition_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_expressions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_descriptions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_times: TimestampSecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
@@ -319,6 +321,21 @@ impl InformationSchemaPartitionsBuilder {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get partition column names (shared by all partitions)
|
||||
// In MySQL, PARTITION_EXPRESSION is the partitioning function expression (e.g., column name)
|
||||
let partition_columns: String = table_info
|
||||
.meta
|
||||
.partition_column_names()
|
||||
.cloned()
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
|
||||
let partition_expr_str = if partition_columns.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(partition_columns)
|
||||
};
|
||||
|
||||
for (index, partition) in partitions.iter().enumerate() {
|
||||
let partition_name = format!("p{index}");
|
||||
|
||||
@@ -328,8 +345,12 @@ impl InformationSchemaPartitionsBuilder {
|
||||
self.partition_names.push(Some(&partition_name));
|
||||
self.partition_ordinal_positions
|
||||
.push(Some((index + 1) as i64));
|
||||
let expression = partition.partition_expr.as_ref().map(|e| e.to_string());
|
||||
self.partition_expressions.push(expression.as_deref());
|
||||
// PARTITION_EXPRESSION: partition column names (same for all partitions)
|
||||
self.partition_expressions
|
||||
.push(partition_expr_str.as_deref());
|
||||
// PARTITION_DESCRIPTION: partition boundary expression (different for each partition)
|
||||
let description = partition.partition_expr.as_ref().map(|e| e.to_string());
|
||||
self.partition_descriptions.push(description.as_deref());
|
||||
self.create_times.push(Some(TimestampSecond::from(
|
||||
table_info.meta.created_on.timestamp(),
|
||||
)));
|
||||
@@ -369,7 +390,7 @@ impl InformationSchemaPartitionsBuilder {
|
||||
null_string_vector.clone(),
|
||||
Arc::new(self.partition_expressions.finish()),
|
||||
null_string_vector.clone(),
|
||||
null_string_vector.clone(),
|
||||
Arc::new(self.partition_descriptions.finish()),
|
||||
// TODO(dennis): rows and index statistics info
|
||||
null_i64_vector.clone(),
|
||||
null_i64_vector.clone(),
|
||||
|
||||
@@ -16,7 +16,7 @@ default = [
|
||||
"meta-srv/pg_kvbackend",
|
||||
"meta-srv/mysql_kvbackend",
|
||||
]
|
||||
enterprise = ["common-meta/enterprise", "frontend/enterprise", "meta-srv/enterprise", "catalog/enterprise"]
|
||||
enterprise = ["common-meta/enterprise", "frontend/enterprise", "meta-srv/enterprise"]
|
||||
tokio-console = ["common-telemetry/tokio-console"]
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -145,6 +145,17 @@ impl ObjbenchCommand {
|
||||
let region_meta = extract_region_metadata(&self.source, &parquet_meta)?;
|
||||
let num_rows = parquet_meta.file_metadata().num_rows() as u64;
|
||||
let num_row_groups = parquet_meta.num_row_groups() as u64;
|
||||
let max_row_group_uncompressed_size: u64 = parquet_meta
|
||||
.row_groups()
|
||||
.iter()
|
||||
.map(|rg| {
|
||||
rg.columns()
|
||||
.iter()
|
||||
.map(|c| c.uncompressed_size() as u64)
|
||||
.sum::<u64>()
|
||||
})
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
|
||||
println!(
|
||||
"{} Metadata loaded - rows: {}, size: {} bytes",
|
||||
@@ -160,10 +171,11 @@ impl ObjbenchCommand {
|
||||
time_range: Default::default(),
|
||||
level: 0,
|
||||
file_size,
|
||||
max_row_group_uncompressed_size,
|
||||
available_indexes: Default::default(),
|
||||
indexes: Default::default(),
|
||||
index_file_size: 0,
|
||||
index_file_id: None,
|
||||
index_version: 0,
|
||||
num_rows,
|
||||
num_row_groups,
|
||||
sequence: None,
|
||||
@@ -564,7 +576,7 @@ fn new_noop_file_purger() -> FilePurgerRef {
|
||||
#[derive(Debug)]
|
||||
struct Noop;
|
||||
impl FilePurger for Noop {
|
||||
fn remove_file(&self, _file_meta: FileMeta, _is_delete: bool) {}
|
||||
fn remove_file(&self, _file_meta: FileMeta, _is_delete: bool, _index_outdated: bool) {}
|
||||
}
|
||||
Arc::new(Noop)
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
@@ -39,12 +40,14 @@ use flow::{
|
||||
get_flow_auth_options,
|
||||
};
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use plugins::flownode::context::GrpcConfigureContext;
|
||||
use servers::configurator::GrpcBuilderConfiguratorRef;
|
||||
use snafu::{OptionExt, ResultExt, ensure};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{
|
||||
BuildCacheRegistrySnafu, InitMetadataSnafu, LoadLayeredConfigSnafu, MetaClientInitSnafu,
|
||||
MissingConfigSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
|
||||
MissingConfigSnafu, OtherSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
|
||||
};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
||||
@@ -55,33 +58,14 @@ type FlownodeOptions = GreptimeOptions<flow::FlownodeOptions>;
|
||||
|
||||
pub struct Instance {
|
||||
flownode: FlownodeInstance,
|
||||
|
||||
// The components of flownode, which make it easier to expand based
|
||||
// on the components.
|
||||
#[cfg(feature = "enterprise")]
|
||||
components: Components,
|
||||
|
||||
// Keep the logging guard to prevent the worker from being dropped.
|
||||
_guard: Vec<WorkerGuard>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub struct Components {
|
||||
pub catalog_manager: catalog::CatalogManagerRef,
|
||||
pub fe_client: Arc<FrontendClient>,
|
||||
pub kv_backend: common_meta::kv_backend::KvBackendRef,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub fn new(
|
||||
flownode: FlownodeInstance,
|
||||
#[cfg(feature = "enterprise")] components: Components,
|
||||
guard: Vec<WorkerGuard>,
|
||||
) -> Self {
|
||||
pub fn new(flownode: FlownodeInstance, guard: Vec<WorkerGuard>) -> Self {
|
||||
Self {
|
||||
flownode,
|
||||
#[cfg(feature = "enterprise")]
|
||||
components,
|
||||
_guard: guard,
|
||||
}
|
||||
}
|
||||
@@ -94,11 +78,6 @@ impl Instance {
|
||||
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
|
||||
&mut self.flownode
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub fn components(&self) -> &Components {
|
||||
&self.components
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -396,7 +375,7 @@ impl StartCommand {
|
||||
let frontend_client = Arc::new(frontend_client);
|
||||
let flownode_builder = FlownodeBuilder::new(
|
||||
opts.clone(),
|
||||
plugins,
|
||||
plugins.clone(),
|
||||
table_metadata_manager,
|
||||
catalog_manager.clone(),
|
||||
flow_metadata_manager,
|
||||
@@ -405,8 +384,29 @@ impl StartCommand {
|
||||
.with_heartbeat_task(heartbeat_task);
|
||||
|
||||
let mut flownode = flownode_builder.build().await.context(StartFlownodeSnafu)?;
|
||||
|
||||
let builder =
|
||||
FlownodeServiceBuilder::grpc_server_builder(&opts, flownode.flownode_server());
|
||||
let builder = if let Some(configurator) =
|
||||
plugins.get::<GrpcBuilderConfiguratorRef<GrpcConfigureContext>>()
|
||||
{
|
||||
let context = GrpcConfigureContext {
|
||||
kv_backend: cached_meta_backend.clone(),
|
||||
fe_client: frontend_client.clone(),
|
||||
flownode_id: member_id,
|
||||
catalog_manager: catalog_manager.clone(),
|
||||
};
|
||||
configurator
|
||||
.configure(builder, context)
|
||||
.await
|
||||
.context(OtherSnafu)?
|
||||
} else {
|
||||
builder
|
||||
};
|
||||
let grpc_server = builder.build();
|
||||
|
||||
let services = FlownodeServiceBuilder::new(&opts)
|
||||
.with_default_grpc_server(flownode.flownode_server())
|
||||
.with_grpc_server(grpc_server)
|
||||
.enable_http_service()
|
||||
.build()
|
||||
.context(StartFlownodeSnafu)?;
|
||||
@@ -430,16 +430,6 @@ impl StartCommand {
|
||||
.set_frontend_invoker(invoker)
|
||||
.await;
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
let components = Components {
|
||||
catalog_manager: catalog_manager.clone(),
|
||||
fe_client: frontend_client,
|
||||
kv_backend: cached_meta_backend,
|
||||
};
|
||||
|
||||
#[cfg(not(feature = "enterprise"))]
|
||||
return Ok(Instance::new(flownode, guard));
|
||||
#[cfg(feature = "enterprise")]
|
||||
Ok(Instance::new(flownode, components, guard))
|
||||
Ok(Instance::new(flownode, guard))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
@@ -19,7 +20,10 @@ use std::time::Duration;
|
||||
use async_trait::async_trait;
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::information_extension::DistributedInformationExtension;
|
||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManagerBuilder, MetaKvBackend};
|
||||
use catalog::kvbackend::{
|
||||
CachedKvBackendBuilder, CatalogManagerConfiguratorRef, KvBackendCatalogManagerBuilder,
|
||||
MetaKvBackend,
|
||||
};
|
||||
use catalog::process_manager::ProcessManager;
|
||||
use clap::Parser;
|
||||
use client::client_manager::NodeClients;
|
||||
@@ -31,6 +35,7 @@ use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
use common_meta::heartbeat::handler::suspend::SuspendHandler;
|
||||
use common_query::prelude::set_default_prefix;
|
||||
use common_stat::ResourceStatImpl;
|
||||
use common_telemetry::info;
|
||||
@@ -41,14 +46,17 @@ use frontend::frontend::Frontend;
|
||||
use frontend::heartbeat::HeartbeatTask;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::server::Services;
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use meta_client::{MetaClientOptions, MetaClientRef, MetaClientType};
|
||||
use plugins::frontend::context::{
|
||||
CatalogManagerConfigureContext, DistributedCatalogManagerConfigureContext,
|
||||
};
|
||||
use servers::addrs;
|
||||
use servers::grpc::GrpcOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::tls::{TlsMode, TlsOption, merge_tls_option};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::error::{self, OtherSnafu, Result};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
||||
|
||||
@@ -248,7 +256,7 @@ impl StartCommand {
|
||||
|
||||
if let Some(addr) = &self.rpc_bind_addr {
|
||||
opts.grpc.bind_addr.clone_from(addr);
|
||||
opts.grpc.tls = tls_opts.clone();
|
||||
opts.grpc.tls = merge_tls_option(&opts.grpc.tls, tls_opts.clone());
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_server_addr {
|
||||
@@ -283,13 +291,13 @@ impl StartCommand {
|
||||
if let Some(addr) = &self.mysql_addr {
|
||||
opts.mysql.enable = true;
|
||||
opts.mysql.addr.clone_from(addr);
|
||||
opts.mysql.tls = tls_opts.clone();
|
||||
opts.mysql.tls = merge_tls_option(&opts.mysql.tls, tls_opts.clone());
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.postgres_addr {
|
||||
opts.postgres.enable = true;
|
||||
opts.postgres.addr.clone_from(addr);
|
||||
opts.postgres.tls = tls_opts;
|
||||
opts.postgres.tls = merge_tls_option(&opts.postgres.tls, tls_opts.clone());
|
||||
}
|
||||
|
||||
if let Some(enable) = self.influxdb_enable {
|
||||
@@ -416,38 +424,30 @@ impl StartCommand {
|
||||
layered_cache_registry.clone(),
|
||||
)
|
||||
.with_process_manager(process_manager.clone());
|
||||
#[cfg(feature = "enterprise")]
|
||||
let builder = if let Some(factories) = plugins.get() {
|
||||
builder.with_extra_information_table_factories(factories)
|
||||
let builder = if let Some(configurator) =
|
||||
plugins.get::<CatalogManagerConfiguratorRef<CatalogManagerConfigureContext>>()
|
||||
{
|
||||
let ctx = DistributedCatalogManagerConfigureContext {
|
||||
meta_client: meta_client.clone(),
|
||||
};
|
||||
let ctx = CatalogManagerConfigureContext::Distributed(ctx);
|
||||
|
||||
configurator
|
||||
.configure(builder, ctx)
|
||||
.await
|
||||
.context(OtherSnafu)?
|
||||
} else {
|
||||
builder
|
||||
};
|
||||
let catalog_manager = builder.build();
|
||||
|
||||
let executor = HandlerGroupExecutor::new(vec![
|
||||
Arc::new(ParseMailboxMessageHandler),
|
||||
Arc::new(InvalidateCacheHandler::new(layered_cache_registry.clone())),
|
||||
]);
|
||||
|
||||
let mut resource_stat = ResourceStatImpl::default();
|
||||
resource_stat.start_collect_cpu_usage();
|
||||
|
||||
let heartbeat_task = HeartbeatTask::new(
|
||||
&opts,
|
||||
meta_client.clone(),
|
||||
opts.heartbeat.clone(),
|
||||
Arc::new(executor),
|
||||
Arc::new(resource_stat),
|
||||
);
|
||||
let heartbeat_task = Some(heartbeat_task);
|
||||
|
||||
let instance = FrontendBuilder::new(
|
||||
opts.clone(),
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
catalog_manager,
|
||||
client,
|
||||
meta_client,
|
||||
meta_client.clone(),
|
||||
process_manager,
|
||||
)
|
||||
.with_plugin(plugins.clone())
|
||||
@@ -455,6 +455,9 @@ impl StartCommand {
|
||||
.try_build()
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
let heartbeat_task = Some(create_heartbeat_task(&opts, meta_client, &instance));
|
||||
|
||||
let instance = Arc::new(instance);
|
||||
|
||||
let servers = Services::new(opts, instance.clone(), plugins)
|
||||
@@ -471,6 +474,28 @@ impl StartCommand {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_heartbeat_task(
|
||||
options: &frontend::frontend::FrontendOptions,
|
||||
meta_client: MetaClientRef,
|
||||
instance: &frontend::instance::Instance,
|
||||
) -> HeartbeatTask {
|
||||
let executor = Arc::new(HandlerGroupExecutor::new(vec![
|
||||
Arc::new(ParseMailboxMessageHandler),
|
||||
Arc::new(SuspendHandler::new(instance.suspend_state())),
|
||||
Arc::new(InvalidateCacheHandler::new(
|
||||
instance.cache_invalidator().clone(),
|
||||
)),
|
||||
]));
|
||||
|
||||
let stat = {
|
||||
let mut stat = ResourceStatImpl::default();
|
||||
stat.start_collect_cpu_usage();
|
||||
Arc::new(stat)
|
||||
};
|
||||
|
||||
HeartbeatTask::new(options, meta_client, executor, stat)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::fmt::{self, Debug};
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -23,7 +23,7 @@ use common_config::Configurable;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
||||
use common_version::{short_version, verbose_version};
|
||||
use meta_srv::bootstrap::MetasrvInstance;
|
||||
use meta_srv::bootstrap::{MetasrvInstance, metasrv_builder};
|
||||
use meta_srv::metasrv::BackendImpl;
|
||||
use snafu::ResultExt;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
@@ -177,7 +177,7 @@ pub struct StartCommand {
|
||||
backend: Option<BackendImpl>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for StartCommand {
|
||||
impl Debug for StartCommand {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("StartCommand")
|
||||
.field("rpc_bind_addr", &self.rpc_bind_addr)
|
||||
@@ -341,7 +341,7 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartMetaServerSnafu)?;
|
||||
|
||||
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins, None)
|
||||
let builder = metasrv_builder(&opts, plugins, None)
|
||||
.await
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
let metasrv = builder.build().await.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
@@ -20,7 +21,7 @@ use std::{fs, path};
|
||||
use async_trait::async_trait;
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::information_schema::InformationExtensionRef;
|
||||
use catalog::kvbackend::KvBackendCatalogManagerBuilder;
|
||||
use catalog::kvbackend::{CatalogManagerConfiguratorRef, KvBackendCatalogManagerBuilder};
|
||||
use catalog::process_manager::ProcessManager;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
@@ -31,7 +32,7 @@ use common_meta::cache::LayeredCacheRegistryBuilder;
|
||||
use common_meta::ddl::flow_meta::FlowMetadataAllocator;
|
||||
use common_meta::ddl::table_meta::TableMetadataAllocator;
|
||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl};
|
||||
use common_meta::ddl_manager::DdlManager;
|
||||
use common_meta::ddl_manager::{DdlManager, DdlManagerConfiguratorRef};
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
@@ -57,13 +58,17 @@ use frontend::instance::StandaloneDatanodeManager;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::server::Services;
|
||||
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use plugins::frontend::context::{
|
||||
CatalogManagerConfigureContext, StandaloneCatalogManagerConfigureContext,
|
||||
};
|
||||
use plugins::standalone::context::DdlManagerConfigureContext;
|
||||
use servers::tls::{TlsMode, TlsOption, merge_tls_option};
|
||||
use snafu::ResultExt;
|
||||
use standalone::StandaloneInformationExtension;
|
||||
use standalone::options::StandaloneOptions;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{Result, StartFlownodeSnafu};
|
||||
use crate::error::{OtherSnafu, Result, StartFlownodeSnafu};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{App, create_resource_limit_metrics, error, log_versions, maybe_activate_heap_profile};
|
||||
|
||||
@@ -116,34 +121,15 @@ pub struct Instance {
|
||||
flownode: FlownodeInstance,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
wal_options_allocator: WalOptionsAllocatorRef,
|
||||
|
||||
// The components of standalone, which make it easier to expand based
|
||||
// on the components.
|
||||
#[cfg(feature = "enterprise")]
|
||||
components: Components,
|
||||
|
||||
// Keep the logging guard to prevent the worker from being dropped.
|
||||
_guard: Vec<WorkerGuard>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub struct Components {
|
||||
pub plugins: Plugins,
|
||||
pub kv_backend: KvBackendRef,
|
||||
pub frontend_client: Arc<FrontendClient>,
|
||||
pub catalog_manager: catalog::CatalogManagerRef,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
/// Find the socket addr of a server by its `name`.
|
||||
pub fn server_addr(&self, name: &str) -> Option<SocketAddr> {
|
||||
self.frontend.server_handlers().addr(name)
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub fn components(&self) -> &Components {
|
||||
&self.components
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -307,19 +293,20 @@ impl StartCommand {
|
||||
),
|
||||
}.fail();
|
||||
}
|
||||
opts.grpc.bind_addr.clone_from(addr)
|
||||
opts.grpc.bind_addr.clone_from(addr);
|
||||
opts.grpc.tls = merge_tls_option(&opts.grpc.tls, tls_opts.clone());
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.mysql_addr {
|
||||
opts.mysql.enable = true;
|
||||
opts.mysql.addr.clone_from(addr);
|
||||
opts.mysql.tls = tls_opts.clone();
|
||||
opts.mysql.tls = merge_tls_option(&opts.mysql.tls, tls_opts.clone());
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.postgres_addr {
|
||||
opts.postgres.enable = true;
|
||||
opts.postgres.addr.clone_from(addr);
|
||||
opts.postgres.tls = tls_opts;
|
||||
opts.postgres.tls = merge_tls_option(&opts.postgres.tls, tls_opts.clone());
|
||||
}
|
||||
|
||||
if self.influxdb_enable {
|
||||
@@ -415,6 +402,13 @@ impl StartCommand {
|
||||
plugins.insert::<InformationExtensionRef>(information_extension.clone());
|
||||
|
||||
let process_manager = Arc::new(ProcessManager::new(opts.grpc.server_addr.clone(), None));
|
||||
|
||||
// for standalone not use grpc, but get a handler to frontend grpc client without
|
||||
// actually make a connection
|
||||
let (frontend_client, frontend_instance_handler) =
|
||||
FrontendClient::from_empty_grpc_handler(opts.query.clone());
|
||||
let frontend_client = Arc::new(frontend_client);
|
||||
|
||||
let builder = KvBackendCatalogManagerBuilder::new(
|
||||
information_extension.clone(),
|
||||
kv_backend.clone(),
|
||||
@@ -422,9 +416,17 @@ impl StartCommand {
|
||||
)
|
||||
.with_procedure_manager(procedure_manager.clone())
|
||||
.with_process_manager(process_manager.clone());
|
||||
#[cfg(feature = "enterprise")]
|
||||
let builder = if let Some(factories) = plugins.get() {
|
||||
builder.with_extra_information_table_factories(factories)
|
||||
let builder = if let Some(configurator) =
|
||||
plugins.get::<CatalogManagerConfiguratorRef<CatalogManagerConfigureContext>>()
|
||||
{
|
||||
let ctx = StandaloneCatalogManagerConfigureContext {
|
||||
fe_client: frontend_client.clone(),
|
||||
};
|
||||
let ctx = CatalogManagerConfigureContext::Standalone(ctx);
|
||||
configurator
|
||||
.configure(builder, ctx)
|
||||
.await
|
||||
.context(OtherSnafu)?
|
||||
} else {
|
||||
builder
|
||||
};
|
||||
@@ -439,11 +441,6 @@ impl StartCommand {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// for standalone not use grpc, but get a handler to frontend grpc client without
|
||||
// actually make a connection
|
||||
let (frontend_client, frontend_instance_handler) =
|
||||
FrontendClient::from_empty_grpc_handler(opts.query.clone());
|
||||
let frontend_client = Arc::new(frontend_client);
|
||||
let flow_builder = FlownodeBuilder::new(
|
||||
flownode_options,
|
||||
plugins.clone(),
|
||||
@@ -514,11 +511,21 @@ impl StartCommand {
|
||||
|
||||
let ddl_manager = DdlManager::try_new(ddl_context, procedure_manager.clone(), true)
|
||||
.context(error::InitDdlManagerSnafu)?;
|
||||
#[cfg(feature = "enterprise")]
|
||||
let ddl_manager = {
|
||||
let trigger_ddl_manager: Option<common_meta::ddl_manager::TriggerDdlManagerRef> =
|
||||
plugins.get();
|
||||
ddl_manager.with_trigger_ddl_manager(trigger_ddl_manager)
|
||||
|
||||
let ddl_manager = if let Some(configurator) =
|
||||
plugins.get::<DdlManagerConfiguratorRef<DdlManagerConfigureContext>>()
|
||||
{
|
||||
let ctx = DdlManagerConfigureContext {
|
||||
kv_backend: kv_backend.clone(),
|
||||
fe_client: frontend_client.clone(),
|
||||
catalog_manager: catalog_manager.clone(),
|
||||
};
|
||||
configurator
|
||||
.configure(ddl_manager, ctx)
|
||||
.await
|
||||
.context(OtherSnafu)?
|
||||
} else {
|
||||
ddl_manager
|
||||
};
|
||||
|
||||
let procedure_executor = Arc::new(LocalProcedureExecutor::new(
|
||||
@@ -545,9 +552,8 @@ impl StartCommand {
|
||||
let grpc_handler = fe_instance.clone() as Arc<dyn GrpcQueryHandlerWithBoxedError>;
|
||||
let weak_grpc_handler = Arc::downgrade(&grpc_handler);
|
||||
frontend_instance_handler
|
||||
.lock()
|
||||
.unwrap()
|
||||
.replace(weak_grpc_handler);
|
||||
.set_handler(weak_grpc_handler)
|
||||
.await;
|
||||
|
||||
// set the frontend invoker for flownode
|
||||
let flow_streaming_engine = flownode.flow_engine().streaming_engine();
|
||||
@@ -574,22 +580,12 @@ impl StartCommand {
|
||||
heartbeat_task: None,
|
||||
};
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
let components = Components {
|
||||
plugins,
|
||||
kv_backend,
|
||||
frontend_client,
|
||||
catalog_manager,
|
||||
};
|
||||
|
||||
Ok(Instance {
|
||||
datanode,
|
||||
frontend,
|
||||
flownode,
|
||||
procedure_manager,
|
||||
wal_options_allocator,
|
||||
#[cfg(feature = "enterprise")]
|
||||
components,
|
||||
_guard: guard,
|
||||
})
|
||||
}
|
||||
@@ -769,7 +765,6 @@ mod tests {
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
mysql_addr: Some("127.0.0.1:4002".to_string()),
|
||||
postgres_addr: Some("127.0.0.1:4003".to_string()),
|
||||
tls_watch: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -786,8 +781,6 @@ mod tests {
|
||||
|
||||
assert_eq!("./greptimedb_data/test/logs", opts.logging.dir);
|
||||
assert_eq!("debug", opts.logging.level.unwrap());
|
||||
assert!(opts.mysql.tls.watch);
|
||||
assert!(opts.postgres.tls.watch);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -52,7 +52,6 @@ fn test_load_datanode_example_config() {
|
||||
meta_client: Some(MetaClientOptions {
|
||||
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
||||
timeout: Duration::from_secs(3),
|
||||
heartbeat_timeout: Duration::from_millis(500),
|
||||
ddl_timeout: Duration::from_secs(10),
|
||||
connect_timeout: Duration::from_secs(1),
|
||||
tcp_nodelay: true,
|
||||
@@ -118,7 +117,6 @@ fn test_load_frontend_example_config() {
|
||||
meta_client: Some(MetaClientOptions {
|
||||
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
||||
timeout: Duration::from_secs(3),
|
||||
heartbeat_timeout: Duration::from_millis(500),
|
||||
ddl_timeout: Duration::from_secs(10),
|
||||
connect_timeout: Duration::from_secs(1),
|
||||
tcp_nodelay: true,
|
||||
@@ -241,7 +239,6 @@ fn test_load_flownode_example_config() {
|
||||
meta_client: Some(MetaClientOptions {
|
||||
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
||||
timeout: Duration::from_secs(3),
|
||||
heartbeat_timeout: Duration::from_millis(500),
|
||||
ddl_timeout: Duration::from_secs(10),
|
||||
connect_timeout: Duration::from_secs(1),
|
||||
tcp_nodelay: true,
|
||||
|
||||
@@ -32,7 +32,12 @@ impl Plugins {
|
||||
|
||||
pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
|
||||
let last = self.write().insert(value);
|
||||
assert!(last.is_none(), "each type of plugins must be one and only");
|
||||
if last.is_some() {
|
||||
panic!(
|
||||
"Plugin of type {} already exists",
|
||||
std::any::type_name::<T>()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
|
||||
@@ -140,7 +145,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "each type of plugins must be one and only")]
|
||||
#[should_panic(expected = "Plugin of type i32 already exists")]
|
||||
fn test_plugin_uniqueness() {
|
||||
let plugins = Plugins::new();
|
||||
plugins.insert(1i32);
|
||||
|
||||
@@ -11,8 +11,10 @@ workspace = true
|
||||
common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
config.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
notify.workspace = true
|
||||
object-store.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
|
||||
@@ -49,14 +49,41 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to watch file: {}", path))]
|
||||
FileWatch {
|
||||
path: String,
|
||||
#[snafu(source)]
|
||||
error: notify::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to canonicalize path: {}", path))]
|
||||
CanonicalizePath {
|
||||
path: String,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid path '{}': expected a file, not a directory", path))]
|
||||
InvalidPath {
|
||||
path: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::TomlFormat { .. } | Error::LoadLayeredConfig { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::TomlFormat { .. }
|
||||
| Error::LoadLayeredConfig { .. }
|
||||
| Error::FileWatch { .. }
|
||||
| Error::InvalidPath { .. }
|
||||
| Error::CanonicalizePath { .. } => StatusCode::InvalidArguments,
|
||||
Error::SerdeJson { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
355
src/common/config/src/file_watcher.rs
Normal file
355
src/common/config/src/file_watcher.rs
Normal file
@@ -0,0 +1,355 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Common file watching utilities for configuration hot-reloading.
|
||||
//!
|
||||
//! This module provides a generic file watcher that can be used to watch
|
||||
//! files for changes and trigger callbacks when changes occur.
|
||||
//!
|
||||
//! The watcher monitors the parent directory of each file rather than the
|
||||
//! file itself. This ensures that file deletions and recreations are properly
|
||||
//! tracked, which is common with editors that use atomic saves or when
|
||||
//! configuration files are replaced.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::mpsc::channel;
|
||||
|
||||
use common_telemetry::{error, info, warn};
|
||||
use notify::{EventKind, RecursiveMode, Watcher};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{CanonicalizePathSnafu, FileWatchSnafu, InvalidPathSnafu, Result};
|
||||
|
||||
/// Configuration for the file watcher behavior.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct FileWatcherConfig {
|
||||
/// Whether to include Remove events in addition to Modify and Create.
|
||||
pub include_remove_events: bool,
|
||||
}
|
||||
|
||||
impl FileWatcherConfig {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
pub fn with_modify_and_create(mut self) -> Self {
|
||||
self.include_remove_events = false;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_remove_events(mut self) -> Self {
|
||||
self.include_remove_events = true;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// A builder for creating file watchers with flexible configuration.
|
||||
///
|
||||
/// The watcher monitors the parent directory of each file to handle file
|
||||
/// deletion and recreation properly. Events are filtered to only trigger
|
||||
/// callbacks for the specific files being watched.
|
||||
pub struct FileWatcherBuilder {
|
||||
config: FileWatcherConfig,
|
||||
/// Canonicalized paths of files to watch.
|
||||
file_paths: Vec<PathBuf>,
|
||||
}
|
||||
|
||||
impl FileWatcherBuilder {
|
||||
/// Create a new builder with default configuration.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
config: FileWatcherConfig::default(),
|
||||
file_paths: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the watcher configuration.
|
||||
pub fn config(mut self, config: FileWatcherConfig) -> Self {
|
||||
self.config = config;
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a file path to watch.
|
||||
///
|
||||
/// Returns an error if the path is a directory.
|
||||
/// The path is canonicalized for reliable comparison with events.
|
||||
pub fn watch_path<P: AsRef<Path>>(mut self, path: P) -> Result<Self> {
|
||||
let path = path.as_ref();
|
||||
snafu::ensure!(
|
||||
path.is_file(),
|
||||
InvalidPathSnafu {
|
||||
path: path.display().to_string(),
|
||||
}
|
||||
);
|
||||
// Canonicalize the path for reliable comparison with event paths
|
||||
let canonical = path.canonicalize().context(CanonicalizePathSnafu {
|
||||
path: path.display().to_string(),
|
||||
})?;
|
||||
self.file_paths.push(canonical);
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Add multiple file paths to watch.
|
||||
///
|
||||
/// Returns an error if any path is a directory.
|
||||
pub fn watch_paths<P: AsRef<Path>, I: IntoIterator<Item = P>>(
|
||||
mut self,
|
||||
paths: I,
|
||||
) -> Result<Self> {
|
||||
for path in paths {
|
||||
self = self.watch_path(path)?;
|
||||
}
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Build and spawn the file watcher with the given callback.
|
||||
///
|
||||
/// The callback is invoked when relevant file events are detected for
|
||||
/// the watched files. The watcher monitors the parent directories to
|
||||
/// handle file deletion and recreation properly.
|
||||
///
|
||||
/// The spawned watcher thread runs for the lifetime of the process.
|
||||
pub fn spawn<F>(self, callback: F) -> Result<()>
|
||||
where
|
||||
F: Fn() + Send + 'static,
|
||||
{
|
||||
let (tx, rx) = channel::<notify::Result<notify::Event>>();
|
||||
let mut watcher =
|
||||
notify::recommended_watcher(tx).context(FileWatchSnafu { path: "<none>" })?;
|
||||
|
||||
// Collect unique parent directories to watch
|
||||
let mut watched_dirs: HashSet<PathBuf> = HashSet::new();
|
||||
for file_path in &self.file_paths {
|
||||
if let Some(parent) = file_path.parent()
|
||||
&& watched_dirs.insert(parent.to_path_buf())
|
||||
{
|
||||
watcher
|
||||
.watch(parent, RecursiveMode::NonRecursive)
|
||||
.context(FileWatchSnafu {
|
||||
path: parent.display().to_string(),
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
||||
let config = self.config;
|
||||
let watched_files: HashSet<PathBuf> = self.file_paths.iter().cloned().collect();
|
||||
|
||||
info!(
|
||||
"Spawning file watcher for paths: {:?} (watching parent directories)",
|
||||
self.file_paths
|
||||
.iter()
|
||||
.map(|p| p.display().to_string())
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
std::thread::spawn(move || {
|
||||
// Keep watcher alive in the thread
|
||||
let _watcher = watcher;
|
||||
|
||||
while let Ok(res) = rx.recv() {
|
||||
match res {
|
||||
Ok(event) => {
|
||||
if !is_relevant_event(&event.kind, &config) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if any of the event paths match our watched files
|
||||
let is_watched_file = event.paths.iter().any(|event_path| {
|
||||
// Try to canonicalize the event path for comparison
|
||||
// If the file was deleted, canonicalize will fail, so we also
|
||||
// compare the raw path
|
||||
if let Ok(canonical) = event_path.canonicalize()
|
||||
&& watched_files.contains(&canonical)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
// For deleted files, compare using the raw path
|
||||
watched_files.contains(event_path)
|
||||
});
|
||||
|
||||
if !is_watched_file {
|
||||
continue;
|
||||
}
|
||||
|
||||
info!(?event.kind, ?event.paths, "Detected file change");
|
||||
callback();
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("File watcher error: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
error!("File watcher channel closed unexpectedly");
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for FileWatcherBuilder {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if an event kind is relevant based on the configuration.
|
||||
fn is_relevant_event(kind: &EventKind, config: &FileWatcherConfig) -> bool {
|
||||
match kind {
|
||||
EventKind::Modify(_) | EventKind::Create(_) => true,
|
||||
EventKind::Remove(_) => config.include_remove_events,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::time::Duration;
|
||||
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_file_watcher_detects_changes() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let dir = create_temp_dir("test_file_watcher");
|
||||
let file_path = dir.path().join("test_file.txt");
|
||||
|
||||
// Create initial file
|
||||
std::fs::write(&file_path, "initial content").unwrap();
|
||||
|
||||
let counter = Arc::new(AtomicUsize::new(0));
|
||||
let counter_clone = counter.clone();
|
||||
|
||||
FileWatcherBuilder::new()
|
||||
.watch_path(&file_path)
|
||||
.unwrap()
|
||||
.config(FileWatcherConfig::new())
|
||||
.spawn(move || {
|
||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// Give watcher time to start
|
||||
std::thread::sleep(Duration::from_millis(100));
|
||||
|
||||
// Modify the file
|
||||
std::fs::write(&file_path, "modified content").unwrap();
|
||||
|
||||
// Wait for the event to be processed
|
||||
std::thread::sleep(Duration::from_millis(500));
|
||||
|
||||
assert!(
|
||||
counter.load(Ordering::SeqCst) >= 1,
|
||||
"Watcher should have detected at least one change"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_watcher_detects_delete_and_recreate() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let dir = create_temp_dir("test_file_watcher_recreate");
|
||||
let file_path = dir.path().join("test_file.txt");
|
||||
|
||||
// Create initial file
|
||||
std::fs::write(&file_path, "initial content").unwrap();
|
||||
|
||||
let counter = Arc::new(AtomicUsize::new(0));
|
||||
let counter_clone = counter.clone();
|
||||
|
||||
FileWatcherBuilder::new()
|
||||
.watch_path(&file_path)
|
||||
.unwrap()
|
||||
.config(FileWatcherConfig::new())
|
||||
.spawn(move || {
|
||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// Give watcher time to start
|
||||
std::thread::sleep(Duration::from_millis(100));
|
||||
|
||||
// Delete the file
|
||||
std::fs::remove_file(&file_path).unwrap();
|
||||
std::thread::sleep(Duration::from_millis(100));
|
||||
|
||||
// Recreate the file - this should still be detected because we watch the directory
|
||||
std::fs::write(&file_path, "recreated content").unwrap();
|
||||
|
||||
// Wait for the event to be processed
|
||||
std::thread::sleep(Duration::from_millis(500));
|
||||
|
||||
assert!(
|
||||
counter.load(Ordering::SeqCst) >= 1,
|
||||
"Watcher should have detected file recreation"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_watcher_ignores_other_files() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let dir = create_temp_dir("test_file_watcher_other");
|
||||
let watched_file = dir.path().join("watched.txt");
|
||||
let other_file = dir.path().join("other.txt");
|
||||
|
||||
// Create both files
|
||||
std::fs::write(&watched_file, "watched content").unwrap();
|
||||
std::fs::write(&other_file, "other content").unwrap();
|
||||
|
||||
let counter = Arc::new(AtomicUsize::new(0));
|
||||
let counter_clone = counter.clone();
|
||||
|
||||
FileWatcherBuilder::new()
|
||||
.watch_path(&watched_file)
|
||||
.unwrap()
|
||||
.config(FileWatcherConfig::new())
|
||||
.spawn(move || {
|
||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// Give watcher time to start
|
||||
std::thread::sleep(Duration::from_millis(100));
|
||||
|
||||
// Modify the other file - should NOT trigger callback
|
||||
std::fs::write(&other_file, "modified other content").unwrap();
|
||||
|
||||
// Wait for potential event
|
||||
std::thread::sleep(Duration::from_millis(500));
|
||||
|
||||
assert_eq!(
|
||||
counter.load(Ordering::SeqCst),
|
||||
0,
|
||||
"Watcher should not have detected changes to other files"
|
||||
);
|
||||
|
||||
// Now modify the watched file - SHOULD trigger callback
|
||||
std::fs::write(&watched_file, "modified watched content").unwrap();
|
||||
|
||||
// Wait for the event to be processed
|
||||
std::thread::sleep(Duration::from_millis(500));
|
||||
|
||||
assert!(
|
||||
counter.load(Ordering::SeqCst) >= 1,
|
||||
"Watcher should have detected change to watched file"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
pub mod config;
|
||||
pub mod error;
|
||||
pub mod file_watcher;
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
|
||||
@@ -21,6 +21,8 @@ pub mod status_code;
|
||||
use http::{HeaderMap, HeaderValue};
|
||||
pub use snafu;
|
||||
|
||||
use crate::status_code::StatusCode;
|
||||
|
||||
// HACK - these headers are here for shared in gRPC services. For common HTTP headers,
|
||||
// please define in `src/servers/src/http/header.rs`.
|
||||
pub const GREPTIME_DB_HEADER_ERROR_CODE: &str = "x-greptime-err-code";
|
||||
@@ -46,6 +48,29 @@ pub fn from_err_code_msg_to_header(code: u32, msg: &str) -> HeaderMap {
|
||||
header
|
||||
}
|
||||
|
||||
/// Extract [StatusCode] and error message from [HeaderMap], if any.
|
||||
///
|
||||
/// Note that if the [StatusCode] is illegal, for example, a random number that is not pre-defined
|
||||
/// as a [StatusCode], the result is still `None`.
|
||||
pub fn from_header_to_err_code_msg(headers: &HeaderMap) -> Option<(StatusCode, &str)> {
|
||||
let code = headers
|
||||
.get(GREPTIME_DB_HEADER_ERROR_CODE)
|
||||
.and_then(|value| {
|
||||
value
|
||||
.to_str()
|
||||
.ok()
|
||||
.and_then(|x| x.parse::<u32>().ok())
|
||||
.and_then(StatusCode::from_u32)
|
||||
});
|
||||
let msg = headers
|
||||
.get(GREPTIME_DB_HEADER_ERROR_MSG)
|
||||
.and_then(|x| x.to_str().ok());
|
||||
match (code, msg) {
|
||||
(Some(code), Some(msg)) => Some((code, msg)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the external root cause of the source error (exclude the current error).
|
||||
pub fn root_source(err: &dyn std::error::Error) -> Option<&dyn std::error::Error> {
|
||||
// There are some divergence about the behavior of the `sources()` API
|
||||
|
||||
@@ -42,6 +42,8 @@ pub enum StatusCode {
|
||||
External = 1007,
|
||||
/// The request is deadline exceeded (typically server-side).
|
||||
DeadlineExceeded = 1008,
|
||||
/// Service got suspended for various reason. For example, resources exceed limit.
|
||||
Suspended = 1009,
|
||||
// ====== End of common status code ================
|
||||
|
||||
// ====== Begin of SQL related status code =========
|
||||
@@ -175,7 +177,8 @@ impl StatusCode {
|
||||
| StatusCode::AccessDenied
|
||||
| StatusCode::PermissionDenied
|
||||
| StatusCode::RequestOutdated
|
||||
| StatusCode::External => false,
|
||||
| StatusCode::External
|
||||
| StatusCode::Suspended => false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,7 +226,8 @@ impl StatusCode {
|
||||
| StatusCode::InvalidAuthHeader
|
||||
| StatusCode::AccessDenied
|
||||
| StatusCode::PermissionDenied
|
||||
| StatusCode::RequestOutdated => false,
|
||||
| StatusCode::RequestOutdated
|
||||
| StatusCode::Suspended => false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -347,7 +351,8 @@ pub fn status_to_tonic_code(status_code: StatusCode) -> Code {
|
||||
| StatusCode::RegionNotReady => Code::Unavailable,
|
||||
StatusCode::RuntimeResourcesExhausted
|
||||
| StatusCode::RateLimited
|
||||
| StatusCode::RegionBusy => Code::ResourceExhausted,
|
||||
| StatusCode::RegionBusy
|
||||
| StatusCode::Suspended => Code::ResourceExhausted,
|
||||
StatusCode::UnsupportedPasswordType
|
||||
| StatusCode::UserPasswordMismatch
|
||||
| StatusCode::AuthHeaderNotFound
|
||||
|
||||
@@ -39,7 +39,7 @@ datafusion-functions-aggregate-common.workspace = true
|
||||
datafusion-pg-catalog.workspace = true
|
||||
datafusion-physical-expr.workspace = true
|
||||
datatypes.workspace = true
|
||||
derive_more = { version = "1", default-features = false, features = ["display"] }
|
||||
derive_more.workspace = true
|
||||
geo = { version = "0.29", optional = true }
|
||||
geo-types = { version = "0.7", optional = true }
|
||||
geohash = { version = "0.13", optional = true }
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
mod binary;
|
||||
mod ctx;
|
||||
mod if_func;
|
||||
mod is_null;
|
||||
mod unary;
|
||||
|
||||
@@ -22,6 +23,7 @@ pub use ctx::EvalContext;
|
||||
pub use unary::scalar_unary_op;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
use crate::scalars::expression::if_func::IfFunction;
|
||||
use crate::scalars::expression::is_null::IsNullFunction;
|
||||
|
||||
pub(crate) struct ExpressionFunction;
|
||||
@@ -29,5 +31,6 @@ pub(crate) struct ExpressionFunction;
|
||||
impl ExpressionFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register_scalar(IsNullFunction::default());
|
||||
registry.register_scalar(IfFunction::default());
|
||||
}
|
||||
}
|
||||
|
||||
404
src/common/function/src/scalars/expression/if_func.rs
Normal file
404
src/common/function/src/scalars/expression/if_func.rs
Normal file
@@ -0,0 +1,404 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::fmt::Display;
|
||||
|
||||
use arrow::array::ArrowNativeTypeOp;
|
||||
use arrow::datatypes::ArrowPrimitiveType;
|
||||
use datafusion::arrow::array::{Array, ArrayRef, AsArray, BooleanArray, PrimitiveArray};
|
||||
use datafusion::arrow::compute::kernels::zip::zip;
|
||||
use datafusion::arrow::datatypes::DataType;
|
||||
use datafusion_common::DataFusionError;
|
||||
use datafusion_expr::type_coercion::binary::comparison_coercion;
|
||||
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature, Volatility};
|
||||
|
||||
use crate::function::Function;
|
||||
|
||||
const NAME: &str = "if";
|
||||
|
||||
/// MySQL-compatible IF function: IF(condition, true_value, false_value)
|
||||
///
|
||||
/// Returns true_value if condition is TRUE (not NULL and not 0),
|
||||
/// otherwise returns false_value.
|
||||
///
|
||||
/// MySQL truthy rules:
|
||||
/// - NULL -> false
|
||||
/// - 0 (numeric zero) -> false
|
||||
/// - Any non-zero numeric -> true
|
||||
/// - Boolean true/false -> use directly
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct IfFunction {
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl Default for IfFunction {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
signature: Signature::any(3, Volatility::Immutable),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for IfFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for IfFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, input_types: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
// Return the common type of true_value and false_value (args[1] and args[2])
|
||||
if input_types.len() < 3 {
|
||||
return Err(DataFusionError::Plan(format!(
|
||||
"{} requires 3 arguments, got {}",
|
||||
NAME,
|
||||
input_types.len()
|
||||
)));
|
||||
}
|
||||
let true_type = &input_types[1];
|
||||
let false_type = &input_types[2];
|
||||
|
||||
// Use comparison_coercion to find common type
|
||||
comparison_coercion(true_type, false_type).ok_or_else(|| {
|
||||
DataFusionError::Plan(format!(
|
||||
"Cannot find common type for IF function between {:?} and {:?}",
|
||||
true_type, false_type
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn invoke_with_args(
|
||||
&self,
|
||||
args: ScalarFunctionArgs,
|
||||
) -> datafusion_common::Result<ColumnarValue> {
|
||||
if args.args.len() != 3 {
|
||||
return Err(DataFusionError::Plan(format!(
|
||||
"{} requires exactly 3 arguments, got {}",
|
||||
NAME,
|
||||
args.args.len()
|
||||
)));
|
||||
}
|
||||
|
||||
let condition = &args.args[0];
|
||||
let true_value = &args.args[1];
|
||||
let false_value = &args.args[2];
|
||||
|
||||
// Convert condition to boolean array using MySQL truthy rules
|
||||
let bool_array = to_boolean_array(condition, args.number_rows)?;
|
||||
|
||||
// Convert true and false values to arrays
|
||||
let true_array = true_value.to_array(args.number_rows)?;
|
||||
let false_array = false_value.to_array(args.number_rows)?;
|
||||
|
||||
// Use zip to select values based on condition
|
||||
// zip expects &dyn Datum, and ArrayRef (Arc<dyn Array>) implements Datum
|
||||
let result = zip(&bool_array, &true_array, &false_array)?;
|
||||
Ok(ColumnarValue::Array(result))
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a ColumnarValue to a BooleanArray using MySQL truthy rules:
|
||||
/// - NULL -> false
|
||||
/// - 0 (any numeric zero) -> false
|
||||
/// - Non-zero numeric -> true
|
||||
/// - Boolean -> use directly
|
||||
fn to_boolean_array(
|
||||
value: &ColumnarValue,
|
||||
num_rows: usize,
|
||||
) -> datafusion_common::Result<BooleanArray> {
|
||||
let array = value.to_array(num_rows)?;
|
||||
array_to_bool(array)
|
||||
}
|
||||
|
||||
/// Convert an integer PrimitiveArray to BooleanArray using MySQL truthy rules:
|
||||
/// NULL -> false, 0 -> false, non-zero -> true
|
||||
fn int_array_to_bool<T>(array: &PrimitiveArray<T>) -> BooleanArray
|
||||
where
|
||||
T: ArrowPrimitiveType,
|
||||
T::Native: ArrowNativeTypeOp,
|
||||
{
|
||||
BooleanArray::from_iter(
|
||||
array
|
||||
.iter()
|
||||
.map(|opt| Some(opt.is_some_and(|v| !v.is_zero()))),
|
||||
)
|
||||
}
|
||||
|
||||
/// Convert a float PrimitiveArray to BooleanArray using MySQL truthy rules:
|
||||
/// NULL -> false, 0 (including -0.0) -> false, NaN -> true, other non-zero -> true
|
||||
fn float_array_to_bool<T>(array: &PrimitiveArray<T>) -> BooleanArray
|
||||
where
|
||||
T: ArrowPrimitiveType,
|
||||
T::Native: ArrowNativeTypeOp + num_traits::Float,
|
||||
{
|
||||
use num_traits::Float;
|
||||
BooleanArray::from_iter(
|
||||
array
|
||||
.iter()
|
||||
.map(|opt| Some(opt.is_some_and(|v| v.is_nan() || !v.is_zero()))),
|
||||
)
|
||||
}
|
||||
|
||||
/// Convert an Array to BooleanArray using MySQL truthy rules
|
||||
fn array_to_bool(array: ArrayRef) -> datafusion_common::Result<BooleanArray> {
|
||||
use arrow::datatypes::*;
|
||||
|
||||
match array.data_type() {
|
||||
DataType::Boolean => {
|
||||
let bool_array = array.as_boolean();
|
||||
Ok(BooleanArray::from_iter(
|
||||
bool_array.iter().map(|opt| Some(opt.unwrap_or(false))),
|
||||
))
|
||||
}
|
||||
DataType::Int8 => Ok(int_array_to_bool(array.as_primitive::<Int8Type>())),
|
||||
DataType::Int16 => Ok(int_array_to_bool(array.as_primitive::<Int16Type>())),
|
||||
DataType::Int32 => Ok(int_array_to_bool(array.as_primitive::<Int32Type>())),
|
||||
DataType::Int64 => Ok(int_array_to_bool(array.as_primitive::<Int64Type>())),
|
||||
DataType::UInt8 => Ok(int_array_to_bool(array.as_primitive::<UInt8Type>())),
|
||||
DataType::UInt16 => Ok(int_array_to_bool(array.as_primitive::<UInt16Type>())),
|
||||
DataType::UInt32 => Ok(int_array_to_bool(array.as_primitive::<UInt32Type>())),
|
||||
DataType::UInt64 => Ok(int_array_to_bool(array.as_primitive::<UInt64Type>())),
|
||||
// Float16 needs special handling since half::f16 doesn't implement num_traits::Float
|
||||
DataType::Float16 => {
|
||||
let typed_array = array.as_primitive::<Float16Type>();
|
||||
Ok(BooleanArray::from_iter(typed_array.iter().map(|opt| {
|
||||
Some(opt.is_some_and(|v| {
|
||||
let f = v.to_f32();
|
||||
f.is_nan() || !f.is_zero()
|
||||
}))
|
||||
})))
|
||||
}
|
||||
DataType::Float32 => Ok(float_array_to_bool(array.as_primitive::<Float32Type>())),
|
||||
DataType::Float64 => Ok(float_array_to_bool(array.as_primitive::<Float64Type>())),
|
||||
// Null type is always false.
|
||||
// Note: NullArray::is_null() returns false (physical null), so we must handle it explicitly.
|
||||
// See: https://github.com/apache/arrow-rs/issues/4840
|
||||
DataType::Null => Ok(BooleanArray::from(vec![false; array.len()])),
|
||||
// For other types, treat non-null as true
|
||||
_ => {
|
||||
let len = array.len();
|
||||
Ok(BooleanArray::from_iter(
|
||||
(0..len).map(|i| Some(!array.is_null(i))),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::Field;
|
||||
use datafusion_common::ScalarValue;
|
||||
use datafusion_common::arrow::array::{AsArray, Int32Array, StringArray};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_if_function_basic() {
|
||||
let if_func = IfFunction::default();
|
||||
assert_eq!("if", if_func.name());
|
||||
|
||||
// Test IF(true, 'yes', 'no') -> 'yes'
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Boolean(Some(true))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 1,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "yes");
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_if_function_false() {
|
||||
let if_func = IfFunction::default();
|
||||
|
||||
// Test IF(false, 'yes', 'no') -> 'no'
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Boolean(Some(false))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 1,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "no");
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_if_function_null_is_false() {
|
||||
let if_func = IfFunction::default();
|
||||
|
||||
// Test IF(NULL, 'yes', 'no') -> 'no' (NULL is treated as false)
|
||||
// Using Boolean(None) - typed null
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Boolean(None)),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 1,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "no");
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
|
||||
// Test IF(NULL, 'yes', 'no') -> 'no' using ScalarValue::Null (untyped null from SQL NULL literal)
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Null),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 1,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "no");
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_if_function_numeric_truthy() {
|
||||
let if_func = IfFunction::default();
|
||||
|
||||
// Test IF(1, 'yes', 'no') -> 'yes' (non-zero is true)
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Int32(Some(1))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 1,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "yes");
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
|
||||
// Test IF(0, 'yes', 'no') -> 'no' (zero is false)
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Int32(Some(0))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 1,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "no");
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_if_function_with_arrays() {
|
||||
let if_func = IfFunction::default();
|
||||
|
||||
// Test with array condition
|
||||
let condition = Int32Array::from(vec![Some(1), Some(0), None, Some(5)]);
|
||||
let true_val = StringArray::from(vec!["yes", "yes", "yes", "yes"]);
|
||||
let false_val = StringArray::from(vec!["no", "no", "no", "no"]);
|
||||
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Array(Arc::new(condition)),
|
||||
ColumnarValue::Array(Arc::new(true_val)),
|
||||
ColumnarValue::Array(Arc::new(false_val)),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 4,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "yes"); // 1 is true
|
||||
assert_eq!(str_arr.value(1), "no"); // 0 is false
|
||||
assert_eq!(str_arr.value(2), "no"); // NULL is false
|
||||
assert_eq!(str_arr.value(3), "yes"); // 5 is true
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
use datafusion_common::arrow::array::{Array, AsArray, BooleanBuilder};
|
||||
|
||||
@@ -17,7 +17,7 @@ use std::sync::Arc;
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_PRIVATE_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME,
|
||||
};
|
||||
use datafusion::arrow::array::{ArrayRef, StringArray, as_boolean_array};
|
||||
use datafusion::arrow::array::{ArrayRef, StringArray, StringBuilder, as_boolean_array};
|
||||
use datafusion::catalog::TableFunction;
|
||||
use datafusion::common::ScalarValue;
|
||||
use datafusion::common::utils::SingleRowListArrayBuilder;
|
||||
@@ -34,10 +34,15 @@ const CURRENT_SCHEMA_FUNCTION_NAME: &str = "current_schema";
|
||||
const CURRENT_SCHEMAS_FUNCTION_NAME: &str = "current_schemas";
|
||||
const SESSION_USER_FUNCTION_NAME: &str = "session_user";
|
||||
const CURRENT_DATABASE_FUNCTION_NAME: &str = "current_database";
|
||||
const OBJ_DESCRIPTION_FUNCTION_NAME: &str = "obj_description";
|
||||
const COL_DESCRIPTION_FUNCTION_NAME: &str = "col_description";
|
||||
const SHOBJ_DESCRIPTION_FUNCTION_NAME: &str = "shobj_description";
|
||||
const PG_MY_TEMP_SCHEMA_FUNCTION_NAME: &str = "pg_my_temp_schema";
|
||||
|
||||
define_nullary_udf!(CurrentSchemaFunction);
|
||||
define_nullary_udf!(SessionUserFunction);
|
||||
define_nullary_udf!(CurrentDatabaseFunction);
|
||||
define_nullary_udf!(PgMyTempSchemaFunction);
|
||||
|
||||
impl Function for CurrentDatabaseFunction {
|
||||
fn name(&self) -> &str {
|
||||
@@ -173,6 +178,175 @@ impl Function for CurrentSchemasFunction {
|
||||
}
|
||||
}
|
||||
|
||||
/// PostgreSQL obj_description - returns NULL for compatibility
|
||||
#[derive(Display, Debug, Clone)]
|
||||
#[display("{}", self.name())]
|
||||
pub(super) struct ObjDescriptionFunction {
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl ObjDescriptionFunction {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
signature: Signature::one_of(
|
||||
vec![
|
||||
TypeSignature::Exact(vec![DataType::Int64, DataType::Utf8]),
|
||||
TypeSignature::Exact(vec![DataType::UInt32, DataType::Utf8]),
|
||||
TypeSignature::Exact(vec![DataType::Int64]),
|
||||
TypeSignature::Exact(vec![DataType::UInt32]),
|
||||
],
|
||||
Volatility::Stable,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for ObjDescriptionFunction {
|
||||
fn name(&self) -> &str {
|
||||
OBJ_DESCRIPTION_FUNCTION_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
Ok(DataType::Utf8)
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn invoke_with_args(
|
||||
&self,
|
||||
args: ScalarFunctionArgs,
|
||||
) -> datafusion_common::Result<ColumnarValue> {
|
||||
let num_rows = args.number_rows;
|
||||
let mut builder = StringBuilder::with_capacity(num_rows, 0);
|
||||
for _ in 0..num_rows {
|
||||
builder.append_null();
|
||||
}
|
||||
Ok(ColumnarValue::Array(Arc::new(builder.finish())))
|
||||
}
|
||||
}
|
||||
|
||||
/// PostgreSQL col_description - returns NULL for compatibility
|
||||
#[derive(Display, Debug, Clone)]
|
||||
#[display("{}", self.name())]
|
||||
pub(super) struct ColDescriptionFunction {
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl ColDescriptionFunction {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
signature: Signature::one_of(
|
||||
vec![
|
||||
TypeSignature::Exact(vec![DataType::Int64, DataType::Int32]),
|
||||
TypeSignature::Exact(vec![DataType::UInt32, DataType::Int32]),
|
||||
TypeSignature::Exact(vec![DataType::Int64, DataType::Int64]),
|
||||
TypeSignature::Exact(vec![DataType::UInt32, DataType::Int64]),
|
||||
],
|
||||
Volatility::Stable,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for ColDescriptionFunction {
|
||||
fn name(&self) -> &str {
|
||||
COL_DESCRIPTION_FUNCTION_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
Ok(DataType::Utf8)
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn invoke_with_args(
|
||||
&self,
|
||||
args: ScalarFunctionArgs,
|
||||
) -> datafusion_common::Result<ColumnarValue> {
|
||||
let num_rows = args.number_rows;
|
||||
let mut builder = StringBuilder::with_capacity(num_rows, 0);
|
||||
for _ in 0..num_rows {
|
||||
builder.append_null();
|
||||
}
|
||||
Ok(ColumnarValue::Array(Arc::new(builder.finish())))
|
||||
}
|
||||
}
|
||||
|
||||
/// PostgreSQL shobj_description - returns NULL for compatibility
|
||||
#[derive(Display, Debug, Clone)]
|
||||
#[display("{}", self.name())]
|
||||
pub(super) struct ShobjDescriptionFunction {
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl ShobjDescriptionFunction {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
signature: Signature::one_of(
|
||||
vec![
|
||||
TypeSignature::Exact(vec![DataType::Int64, DataType::Utf8]),
|
||||
TypeSignature::Exact(vec![DataType::UInt64, DataType::Utf8]),
|
||||
TypeSignature::Exact(vec![DataType::Int32, DataType::Utf8]),
|
||||
TypeSignature::Exact(vec![DataType::UInt32, DataType::Utf8]),
|
||||
],
|
||||
Volatility::Stable,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for ShobjDescriptionFunction {
|
||||
fn name(&self) -> &str {
|
||||
SHOBJ_DESCRIPTION_FUNCTION_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
Ok(DataType::Utf8)
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn invoke_with_args(
|
||||
&self,
|
||||
args: ScalarFunctionArgs,
|
||||
) -> datafusion_common::Result<ColumnarValue> {
|
||||
let num_rows = args.number_rows;
|
||||
let mut builder = StringBuilder::with_capacity(num_rows, 0);
|
||||
for _ in 0..num_rows {
|
||||
builder.append_null();
|
||||
}
|
||||
Ok(ColumnarValue::Array(Arc::new(builder.finish())))
|
||||
}
|
||||
}
|
||||
|
||||
/// PostgreSQL pg_my_temp_schema - returns 0 (no temp schema) for compatibility
|
||||
impl Function for PgMyTempSchemaFunction {
|
||||
fn name(&self) -> &str {
|
||||
PG_MY_TEMP_SCHEMA_FUNCTION_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
Ok(DataType::UInt32)
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn invoke_with_args(
|
||||
&self,
|
||||
_args: ScalarFunctionArgs,
|
||||
) -> datafusion_common::Result<ColumnarValue> {
|
||||
Ok(ColumnarValue::Scalar(ScalarValue::UInt32(Some(0))))
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) struct PGCatalogFunction;
|
||||
|
||||
impl PGCatalogFunction {
|
||||
@@ -212,5 +386,100 @@ impl PGCatalogFunction {
|
||||
registry.register(pg_catalog::create_pg_total_relation_size_udf());
|
||||
registry.register(pg_catalog::create_pg_stat_get_numscans());
|
||||
registry.register(pg_catalog::create_pg_get_constraintdef());
|
||||
registry.register(pg_catalog::create_pg_get_partition_ancestors_udf());
|
||||
registry.register(pg_catalog::quote_ident_udf::create_quote_ident_udf());
|
||||
registry.register(pg_catalog::quote_ident_udf::create_parse_ident_udf());
|
||||
registry.register_scalar(ObjDescriptionFunction::new());
|
||||
registry.register_scalar(ColDescriptionFunction::new());
|
||||
registry.register_scalar(ShobjDescriptionFunction::new());
|
||||
registry.register_scalar(PgMyTempSchemaFunction::default());
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::Field;
|
||||
use datafusion::arrow::array::Array;
|
||||
use datafusion_common::ScalarValue;
|
||||
use datafusion_expr::ColumnarValue;
|
||||
|
||||
use super::*;
|
||||
|
||||
fn create_test_args(args: Vec<ColumnarValue>, number_rows: usize) -> ScalarFunctionArgs {
|
||||
ScalarFunctionArgs {
|
||||
args,
|
||||
arg_fields: vec![],
|
||||
number_rows,
|
||||
return_field: Arc::new(Field::new("result", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_obj_description_function() {
|
||||
let func = ObjDescriptionFunction::new();
|
||||
assert_eq!("obj_description", func.name());
|
||||
assert_eq!(DataType::Utf8, func.return_type(&[]).unwrap());
|
||||
|
||||
let args = create_test_args(
|
||||
vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Int64(Some(1234))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("pg_class".to_string()))),
|
||||
],
|
||||
1,
|
||||
);
|
||||
let result = func.invoke_with_args(args).unwrap();
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
assert_eq!(1, arr.len());
|
||||
assert!(arr.is_null(0));
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_col_description_function() {
|
||||
let func = ColDescriptionFunction::new();
|
||||
assert_eq!("col_description", func.name());
|
||||
assert_eq!(DataType::Utf8, func.return_type(&[]).unwrap());
|
||||
|
||||
let args = create_test_args(
|
||||
vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Int64(Some(1234))),
|
||||
ColumnarValue::Scalar(ScalarValue::Int64(Some(1))),
|
||||
],
|
||||
1,
|
||||
);
|
||||
let result = func.invoke_with_args(args).unwrap();
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
assert_eq!(1, arr.len());
|
||||
assert!(arr.is_null(0));
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shobj_description_function() {
|
||||
let func = ShobjDescriptionFunction::new();
|
||||
assert_eq!("shobj_description", func.name());
|
||||
assert_eq!(DataType::Utf8, func.return_type(&[]).unwrap());
|
||||
|
||||
let args = create_test_args(
|
||||
vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Int64(Some(1))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("pg_database".to_string()))),
|
||||
],
|
||||
1,
|
||||
);
|
||||
let result = func.invoke_with_args(args).unwrap();
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
assert_eq!(1, arr.len());
|
||||
assert!(arr.is_null(0));
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ api.workspace = true
|
||||
arrow-flight.workspace = true
|
||||
bytes.workspace = true
|
||||
common-base.workspace = true
|
||||
common-config.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
@@ -23,7 +24,6 @@ datatypes.workspace = true
|
||||
flatbuffers = "25.2"
|
||||
hyper.workspace = true
|
||||
lazy_static.workspace = true
|
||||
notify.workspace = true
|
||||
prost.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
|
||||
@@ -38,11 +38,10 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to watch config file path: {}", path))]
|
||||
#[snafu(display("Failed to watch config file"))]
|
||||
FileWatch {
|
||||
path: String,
|
||||
#[snafu(source)]
|
||||
error: notify::Error,
|
||||
source: common_config::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -46,13 +46,16 @@ pub struct DoPutResponse {
|
||||
request_id: i64,
|
||||
/// The successfully ingested rows number.
|
||||
affected_rows: AffectedRows,
|
||||
/// The elapsed time in seconds for handling the bulk insert.
|
||||
elapsed_secs: f64,
|
||||
}
|
||||
|
||||
impl DoPutResponse {
|
||||
pub fn new(request_id: i64, affected_rows: AffectedRows) -> Self {
|
||||
pub fn new(request_id: i64, affected_rows: AffectedRows, elapsed_secs: f64) -> Self {
|
||||
Self {
|
||||
request_id,
|
||||
affected_rows,
|
||||
elapsed_secs,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,6 +66,10 @@ impl DoPutResponse {
|
||||
pub fn affected_rows(&self) -> AffectedRows {
|
||||
self.affected_rows
|
||||
}
|
||||
|
||||
pub fn elapsed_secs(&self) -> f64 {
|
||||
self.elapsed_secs
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PutResult> for DoPutResponse {
|
||||
@@ -86,8 +93,11 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_serde_do_put_response() {
|
||||
let x = DoPutResponse::new(42, 88);
|
||||
let x = DoPutResponse::new(42, 88, 0.123);
|
||||
let serialized = serde_json::to_string(&x).unwrap();
|
||||
assert_eq!(serialized, r#"{"request_id":42,"affected_rows":88}"#);
|
||||
assert_eq!(
|
||||
serialized,
|
||||
r#"{"request_id":42,"affected_rows":88,"elapsed_secs":0.123}"#
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,11 +15,10 @@
|
||||
use std::path::Path;
|
||||
use std::result::Result as StdResult;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use common_config::file_watcher::{FileWatcherBuilder, FileWatcherConfig};
|
||||
use common_telemetry::{error, info};
|
||||
use notify::{EventKind, RecursiveMode, Watcher};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{FileWatchSnafu, Result};
|
||||
@@ -119,45 +118,28 @@ where
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let watch_paths: Vec<_> = tls_config
|
||||
.get_tls_option()
|
||||
.watch_paths()
|
||||
.iter()
|
||||
.map(|p| p.to_path_buf())
|
||||
.collect();
|
||||
|
||||
let tls_config_for_watcher = tls_config.clone();
|
||||
|
||||
let (tx, rx) = channel::<notify::Result<notify::Event>>();
|
||||
let mut watcher = notify::recommended_watcher(tx).context(FileWatchSnafu { path: "<none>" })?;
|
||||
|
||||
// Watch all paths returned by the TlsConfigLoader
|
||||
for path in tls_config.get_tls_option().watch_paths() {
|
||||
watcher
|
||||
.watch(path, RecursiveMode::NonRecursive)
|
||||
.with_context(|_| FileWatchSnafu {
|
||||
path: path.display().to_string(),
|
||||
})?;
|
||||
}
|
||||
|
||||
info!("Spawning background task for watching TLS cert/key file changes");
|
||||
std::thread::spawn(move || {
|
||||
let _watcher = watcher;
|
||||
loop {
|
||||
match rx.recv() {
|
||||
Ok(Ok(event)) => {
|
||||
if let EventKind::Modify(_) | EventKind::Create(_) = event.kind {
|
||||
info!("Detected TLS cert/key file change: {:?}", event);
|
||||
if let Err(err) = tls_config_for_watcher.reload() {
|
||||
error!("Failed to reload TLS config: {}", err);
|
||||
} else {
|
||||
info!("Reloaded TLS cert/key file successfully.");
|
||||
on_reload();
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Err(err)) => {
|
||||
error!("Failed to watch TLS cert/key file: {}", err);
|
||||
}
|
||||
Err(err) => {
|
||||
error!("TLS cert/key file watcher channel closed: {}", err);
|
||||
}
|
||||
FileWatcherBuilder::new()
|
||||
.watch_paths(&watch_paths)
|
||||
.context(FileWatchSnafu)?
|
||||
.config(FileWatcherConfig::new())
|
||||
.spawn(move || {
|
||||
if let Err(err) = tls_config_for_watcher.reload() {
|
||||
error!("Failed to reload TLS config: {}", err);
|
||||
} else {
|
||||
info!("Reloaded TLS cert/key file successfully.");
|
||||
on_reload();
|
||||
}
|
||||
}
|
||||
});
|
||||
})
|
||||
.context(FileWatchSnafu)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
20
src/common/memory-manager/Cargo.toml
Normal file
20
src/common/memory-manager/Cargo.toml
Normal file
@@ -0,0 +1,20 @@
|
||||
[package]
|
||||
name = "common-memory-manager"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
humantime = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
snafu = { workspace = true }
|
||||
tokio = { workspace = true, features = ["sync"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true, features = ["rt", "macros"] }
|
||||
53
src/common/memory-manager/src/error.rs
Normal file
53
src/common/memory-manager/src/error.rs
Normal file
@@ -0,0 +1,53 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::Snafu;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display(
|
||||
"Memory limit exceeded: requested {requested_bytes} bytes, limit {limit_bytes} bytes"
|
||||
))]
|
||||
MemoryLimitExceeded {
|
||||
requested_bytes: u64,
|
||||
limit_bytes: u64,
|
||||
},
|
||||
|
||||
#[snafu(display("Memory semaphore unexpectedly closed"))]
|
||||
MemorySemaphoreClosed,
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
use Error::*;
|
||||
|
||||
match self {
|
||||
MemoryLimitExceeded { .. } => StatusCode::RuntimeResourcesExhausted,
|
||||
MemorySemaphoreClosed => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
138
src/common/memory-manager/src/guard.rs
Normal file
138
src/common/memory-manager/src/guard.rs
Normal file
@@ -0,0 +1,138 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{fmt, mem};
|
||||
|
||||
use common_telemetry::debug;
|
||||
use tokio::sync::{OwnedSemaphorePermit, TryAcquireError};
|
||||
|
||||
use crate::manager::{MemoryMetrics, MemoryQuota, bytes_to_permits, permits_to_bytes};
|
||||
|
||||
/// Guard representing a slice of reserved memory.
|
||||
pub struct MemoryGuard<M: MemoryMetrics> {
|
||||
pub(crate) state: GuardState<M>,
|
||||
}
|
||||
|
||||
pub(crate) enum GuardState<M: MemoryMetrics> {
|
||||
Unlimited,
|
||||
Limited {
|
||||
permit: OwnedSemaphorePermit,
|
||||
quota: MemoryQuota<M>,
|
||||
},
|
||||
}
|
||||
|
||||
impl<M: MemoryMetrics> MemoryGuard<M> {
|
||||
pub(crate) fn unlimited() -> Self {
|
||||
Self {
|
||||
state: GuardState::Unlimited,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn limited(permit: OwnedSemaphorePermit, quota: MemoryQuota<M>) -> Self {
|
||||
Self {
|
||||
state: GuardState::Limited { permit, quota },
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns granted quota in bytes.
|
||||
pub fn granted_bytes(&self) -> u64 {
|
||||
match &self.state {
|
||||
GuardState::Unlimited => 0,
|
||||
GuardState::Limited { permit, .. } => permits_to_bytes(permit.num_permits() as u32),
|
||||
}
|
||||
}
|
||||
|
||||
/// Tries to allocate additional memory during task execution.
|
||||
///
|
||||
/// On success, merges the new memory into this guard and returns true.
|
||||
/// On failure, returns false and leaves this guard unchanged.
|
||||
pub fn request_additional(&mut self, bytes: u64) -> bool {
|
||||
match &mut self.state {
|
||||
GuardState::Unlimited => true,
|
||||
GuardState::Limited { permit, quota } => {
|
||||
if bytes == 0 {
|
||||
return true;
|
||||
}
|
||||
|
||||
let additional_permits = bytes_to_permits(bytes);
|
||||
|
||||
match quota
|
||||
.semaphore
|
||||
.clone()
|
||||
.try_acquire_many_owned(additional_permits)
|
||||
{
|
||||
Ok(additional_permit) => {
|
||||
permit.merge(additional_permit);
|
||||
quota.update_in_use_metric();
|
||||
debug!("Allocated additional {} bytes", bytes);
|
||||
true
|
||||
}
|
||||
Err(TryAcquireError::NoPermits) | Err(TryAcquireError::Closed) => {
|
||||
quota.metrics.inc_rejected("request_additional");
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Releases a portion of granted memory back to the pool early,
|
||||
/// before the guard is dropped.
|
||||
///
|
||||
/// Returns true if the release succeeds or is a no-op; false if the request exceeds granted.
|
||||
pub fn early_release_partial(&mut self, bytes: u64) -> bool {
|
||||
match &mut self.state {
|
||||
GuardState::Unlimited => true,
|
||||
GuardState::Limited { permit, quota } => {
|
||||
if bytes == 0 {
|
||||
return true;
|
||||
}
|
||||
|
||||
let release_permits = bytes_to_permits(bytes);
|
||||
|
||||
match permit.split(release_permits as usize) {
|
||||
Some(released_permit) => {
|
||||
let released_bytes = permits_to_bytes(released_permit.num_permits() as u32);
|
||||
drop(released_permit);
|
||||
quota.update_in_use_metric();
|
||||
debug!("Early released {} bytes from memory guard", released_bytes);
|
||||
true
|
||||
}
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: MemoryMetrics> Drop for MemoryGuard<M> {
|
||||
fn drop(&mut self) {
|
||||
if let GuardState::Limited { permit, quota } =
|
||||
mem::replace(&mut self.state, GuardState::Unlimited)
|
||||
{
|
||||
let bytes = permits_to_bytes(permit.num_permits() as u32);
|
||||
drop(permit);
|
||||
quota.update_in_use_metric();
|
||||
debug!("Released memory: {} bytes", bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: MemoryMetrics> fmt::Debug for MemoryGuard<M> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("MemoryGuard")
|
||||
.field("granted_bytes", &self.granted_bytes())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
47
src/common/memory-manager/src/lib.rs
Normal file
47
src/common/memory-manager/src/lib.rs
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Generic memory management for resource-constrained operations.
|
||||
//!
|
||||
//! This crate provides a reusable memory quota system based on semaphores,
|
||||
//! allowing different subsystems (compaction, flush, index build, etc.) to
|
||||
//! share the same allocation logic while using their own metrics.
|
||||
|
||||
mod error;
|
||||
mod guard;
|
||||
mod manager;
|
||||
mod policy;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
pub use error::{Error, Result};
|
||||
pub use guard::MemoryGuard;
|
||||
pub use manager::{MemoryManager, MemoryMetrics, PERMIT_GRANULARITY_BYTES};
|
||||
pub use policy::{DEFAULT_MEMORY_WAIT_TIMEOUT, OnExhaustedPolicy};
|
||||
|
||||
/// No-op metrics implementation for testing.
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
pub struct NoOpMetrics;
|
||||
|
||||
impl MemoryMetrics for NoOpMetrics {
|
||||
#[inline(always)]
|
||||
fn set_limit(&self, _: i64) {}
|
||||
|
||||
#[inline(always)]
|
||||
fn set_in_use(&self, _: i64) {}
|
||||
|
||||
#[inline(always)]
|
||||
fn inc_rejected(&self, _: &str) {}
|
||||
}
|
||||
173
src/common/memory-manager/src/manager.rs
Normal file
173
src/common/memory-manager/src/manager.rs
Normal file
@@ -0,0 +1,173 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use snafu::ensure;
|
||||
use tokio::sync::{Semaphore, TryAcquireError};
|
||||
|
||||
use crate::error::{MemoryLimitExceededSnafu, MemorySemaphoreClosedSnafu, Result};
|
||||
use crate::guard::MemoryGuard;
|
||||
|
||||
/// Minimum bytes controlled by one semaphore permit.
|
||||
pub const PERMIT_GRANULARITY_BYTES: u64 = 1 << 20; // 1 MB
|
||||
|
||||
/// Trait for recording memory usage metrics.
|
||||
pub trait MemoryMetrics: Clone + Send + Sync + 'static {
|
||||
fn set_limit(&self, bytes: i64);
|
||||
fn set_in_use(&self, bytes: i64);
|
||||
fn inc_rejected(&self, reason: &str);
|
||||
}
|
||||
|
||||
/// Generic memory manager for quota-controlled operations.
|
||||
#[derive(Clone)]
|
||||
pub struct MemoryManager<M: MemoryMetrics> {
|
||||
quota: Option<MemoryQuota<M>>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct MemoryQuota<M: MemoryMetrics> {
|
||||
pub(crate) semaphore: Arc<Semaphore>,
|
||||
pub(crate) limit_permits: u32,
|
||||
pub(crate) metrics: M,
|
||||
}
|
||||
|
||||
impl<M: MemoryMetrics> MemoryManager<M> {
|
||||
/// Creates a new memory manager with the given limit in bytes.
|
||||
/// `limit_bytes = 0` disables the limit.
|
||||
pub fn new(limit_bytes: u64, metrics: M) -> Self {
|
||||
if limit_bytes == 0 {
|
||||
metrics.set_limit(0);
|
||||
return Self { quota: None };
|
||||
}
|
||||
|
||||
let limit_permits = bytes_to_permits(limit_bytes);
|
||||
let limit_aligned_bytes = permits_to_bytes(limit_permits);
|
||||
metrics.set_limit(limit_aligned_bytes as i64);
|
||||
|
||||
Self {
|
||||
quota: Some(MemoryQuota {
|
||||
semaphore: Arc::new(Semaphore::new(limit_permits as usize)),
|
||||
limit_permits,
|
||||
metrics,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the configured limit in bytes (0 if unlimited).
|
||||
pub fn limit_bytes(&self) -> u64 {
|
||||
self.quota
|
||||
.as_ref()
|
||||
.map(|quota| permits_to_bytes(quota.limit_permits))
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Returns currently used bytes.
|
||||
pub fn used_bytes(&self) -> u64 {
|
||||
self.quota
|
||||
.as_ref()
|
||||
.map(|quota| permits_to_bytes(quota.used_permits()))
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Returns available bytes.
|
||||
pub fn available_bytes(&self) -> u64 {
|
||||
self.quota
|
||||
.as_ref()
|
||||
.map(|quota| permits_to_bytes(quota.available_permits_clamped()))
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Acquires memory, waiting if necessary until enough is available.
|
||||
///
|
||||
/// # Errors
|
||||
/// - Returns error if requested bytes exceed the total limit
|
||||
/// - Returns error if the semaphore is unexpectedly closed
|
||||
pub async fn acquire(&self, bytes: u64) -> Result<MemoryGuard<M>> {
|
||||
match &self.quota {
|
||||
None => Ok(MemoryGuard::unlimited()),
|
||||
Some(quota) => {
|
||||
let permits = bytes_to_permits(bytes);
|
||||
|
||||
ensure!(
|
||||
permits <= quota.limit_permits,
|
||||
MemoryLimitExceededSnafu {
|
||||
requested_bytes: bytes,
|
||||
limit_bytes: permits_to_bytes(quota.limit_permits),
|
||||
}
|
||||
);
|
||||
|
||||
let permit = quota
|
||||
.semaphore
|
||||
.clone()
|
||||
.acquire_many_owned(permits)
|
||||
.await
|
||||
.map_err(|_| MemorySemaphoreClosedSnafu.build())?;
|
||||
quota.update_in_use_metric();
|
||||
Ok(MemoryGuard::limited(permit, quota.clone()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Tries to acquire memory. Returns Some(guard) on success, None if insufficient.
|
||||
pub fn try_acquire(&self, bytes: u64) -> Option<MemoryGuard<M>> {
|
||||
match &self.quota {
|
||||
None => Some(MemoryGuard::unlimited()),
|
||||
Some(quota) => {
|
||||
let permits = bytes_to_permits(bytes);
|
||||
|
||||
match quota.semaphore.clone().try_acquire_many_owned(permits) {
|
||||
Ok(permit) => {
|
||||
quota.update_in_use_metric();
|
||||
Some(MemoryGuard::limited(permit, quota.clone()))
|
||||
}
|
||||
Err(TryAcquireError::NoPermits) | Err(TryAcquireError::Closed) => {
|
||||
quota.metrics.inc_rejected("try_acquire");
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: MemoryMetrics> MemoryQuota<M> {
|
||||
pub(crate) fn used_permits(&self) -> u32 {
|
||||
self.limit_permits
|
||||
.saturating_sub(self.available_permits_clamped())
|
||||
}
|
||||
|
||||
pub(crate) fn available_permits_clamped(&self) -> u32 {
|
||||
self.semaphore
|
||||
.available_permits()
|
||||
.min(self.limit_permits as usize) as u32
|
||||
}
|
||||
|
||||
pub(crate) fn update_in_use_metric(&self) {
|
||||
let bytes = permits_to_bytes(self.used_permits());
|
||||
self.metrics.set_in_use(bytes as i64);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn bytes_to_permits(bytes: u64) -> u32 {
|
||||
bytes
|
||||
.saturating_add(PERMIT_GRANULARITY_BYTES - 1)
|
||||
.saturating_div(PERMIT_GRANULARITY_BYTES)
|
||||
.min(Semaphore::MAX_PERMITS as u64)
|
||||
.min(u32::MAX as u64) as u32
|
||||
}
|
||||
|
||||
pub(crate) fn permits_to_bytes(permits: u32) -> u64 {
|
||||
(permits as u64).saturating_mul(PERMIT_GRANULARITY_BYTES)
|
||||
}
|
||||
83
src/common/memory-manager/src/policy.rs
Normal file
83
src/common/memory-manager/src/policy.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use humantime::{format_duration, parse_duration};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Default wait timeout for memory acquisition.
|
||||
pub const DEFAULT_MEMORY_WAIT_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
|
||||
/// Defines how to react when memory cannot be acquired immediately.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum OnExhaustedPolicy {
|
||||
/// Wait until enough memory is released, bounded by timeout.
|
||||
Wait { timeout: Duration },
|
||||
|
||||
/// Fail immediately if memory is not available.
|
||||
Fail,
|
||||
}
|
||||
|
||||
impl Default for OnExhaustedPolicy {
|
||||
fn default() -> Self {
|
||||
OnExhaustedPolicy::Wait {
|
||||
timeout: DEFAULT_MEMORY_WAIT_TIMEOUT,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for OnExhaustedPolicy {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
let text = match self {
|
||||
OnExhaustedPolicy::Fail => "fail".to_string(),
|
||||
OnExhaustedPolicy::Wait { timeout } if *timeout == DEFAULT_MEMORY_WAIT_TIMEOUT => {
|
||||
"wait".to_string()
|
||||
}
|
||||
OnExhaustedPolicy::Wait { timeout } => format!("wait({})", format_duration(*timeout)),
|
||||
};
|
||||
serializer.serialize_str(&text)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for OnExhaustedPolicy {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let raw = String::deserialize(deserializer)?;
|
||||
let lower = raw.to_ascii_lowercase();
|
||||
|
||||
// Accept both "skip" (legacy) and "fail".
|
||||
if lower == "skip" || lower == "fail" {
|
||||
return Ok(OnExhaustedPolicy::Fail);
|
||||
}
|
||||
if lower == "wait" {
|
||||
return Ok(OnExhaustedPolicy::default());
|
||||
}
|
||||
if lower.starts_with("wait(") && lower.ends_with(')') {
|
||||
let inner = &raw[5..raw.len() - 1];
|
||||
let timeout = parse_duration(inner).map_err(serde::de::Error::custom)?;
|
||||
return Ok(OnExhaustedPolicy::Wait { timeout });
|
||||
}
|
||||
|
||||
Err(serde::de::Error::custom(format!(
|
||||
"invalid memory policy: {}, expected wait, wait(<duration>), fail",
|
||||
raw
|
||||
)))
|
||||
}
|
||||
}
|
||||
247
src/common/memory-manager/src/tests.rs
Normal file
247
src/common/memory-manager/src/tests.rs
Normal file
@@ -0,0 +1,247 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use tokio::time::{Duration, sleep};
|
||||
|
||||
use crate::{MemoryManager, NoOpMetrics, PERMIT_GRANULARITY_BYTES};
|
||||
|
||||
#[test]
|
||||
fn test_try_acquire_unlimited() {
|
||||
let manager = MemoryManager::new(0, NoOpMetrics);
|
||||
let guard = manager.try_acquire(10 * PERMIT_GRANULARITY_BYTES).unwrap();
|
||||
assert_eq!(manager.limit_bytes(), 0);
|
||||
assert_eq!(guard.granted_bytes(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_acquire_limited_success_and_release() {
|
||||
let bytes = 2 * PERMIT_GRANULARITY_BYTES;
|
||||
let manager = MemoryManager::new(bytes, NoOpMetrics);
|
||||
{
|
||||
let guard = manager.try_acquire(PERMIT_GRANULARITY_BYTES).unwrap();
|
||||
assert_eq!(guard.granted_bytes(), PERMIT_GRANULARITY_BYTES);
|
||||
assert_eq!(manager.used_bytes(), PERMIT_GRANULARITY_BYTES);
|
||||
drop(guard);
|
||||
}
|
||||
assert_eq!(manager.used_bytes(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_acquire_exceeds_limit() {
|
||||
let limit = PERMIT_GRANULARITY_BYTES;
|
||||
let manager = MemoryManager::new(limit, NoOpMetrics);
|
||||
let result = manager.try_acquire(limit + PERMIT_GRANULARITY_BYTES);
|
||||
assert!(result.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "current_thread")]
|
||||
async fn test_acquire_blocks_and_unblocks() {
|
||||
let bytes = 2 * PERMIT_GRANULARITY_BYTES;
|
||||
let manager = MemoryManager::new(bytes, NoOpMetrics);
|
||||
let guard = manager.try_acquire(bytes).unwrap();
|
||||
|
||||
// Spawn a task that will block on acquire()
|
||||
let waiter = {
|
||||
let manager = manager.clone();
|
||||
tokio::spawn(async move {
|
||||
// This will block until memory is available
|
||||
let _guard = manager.acquire(bytes).await.unwrap();
|
||||
})
|
||||
};
|
||||
|
||||
sleep(Duration::from_millis(10)).await;
|
||||
// Release memory - this should unblock the waiter
|
||||
drop(guard);
|
||||
|
||||
// Waiter should complete now
|
||||
waiter.await.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_request_additional_success() {
|
||||
let limit = 10 * PERMIT_GRANULARITY_BYTES; // 10MB limit
|
||||
let manager = MemoryManager::new(limit, NoOpMetrics);
|
||||
|
||||
// Acquire base quota (5MB)
|
||||
let base = 5 * PERMIT_GRANULARITY_BYTES;
|
||||
let mut guard = manager.try_acquire(base).unwrap();
|
||||
assert_eq!(guard.granted_bytes(), base);
|
||||
assert_eq!(manager.used_bytes(), base);
|
||||
|
||||
// Request additional memory (3MB) - should succeed and merge
|
||||
assert!(guard.request_additional(3 * PERMIT_GRANULARITY_BYTES));
|
||||
assert_eq!(guard.granted_bytes(), 8 * PERMIT_GRANULARITY_BYTES);
|
||||
assert_eq!(manager.used_bytes(), 8 * PERMIT_GRANULARITY_BYTES);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_request_additional_exceeds_limit() {
|
||||
let limit = 10 * PERMIT_GRANULARITY_BYTES; // 10MB limit
|
||||
let manager = MemoryManager::new(limit, NoOpMetrics);
|
||||
|
||||
// Acquire base quota (5MB)
|
||||
let base = 5 * PERMIT_GRANULARITY_BYTES;
|
||||
let mut guard = manager.try_acquire(base).unwrap();
|
||||
|
||||
// Request additional memory (3MB) - should succeed
|
||||
assert!(guard.request_additional(3 * PERMIT_GRANULARITY_BYTES));
|
||||
assert_eq!(manager.used_bytes(), 8 * PERMIT_GRANULARITY_BYTES);
|
||||
|
||||
// Request more (3MB) - should fail (would exceed 10MB limit)
|
||||
let result = guard.request_additional(3 * PERMIT_GRANULARITY_BYTES);
|
||||
assert!(!result);
|
||||
|
||||
// Still at 8MB
|
||||
assert_eq!(manager.used_bytes(), 8 * PERMIT_GRANULARITY_BYTES);
|
||||
assert_eq!(guard.granted_bytes(), 8 * PERMIT_GRANULARITY_BYTES);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_request_additional_auto_release_on_guard_drop() {
|
||||
let limit = 10 * PERMIT_GRANULARITY_BYTES;
|
||||
let manager = MemoryManager::new(limit, NoOpMetrics);
|
||||
|
||||
{
|
||||
let mut guard = manager.try_acquire(5 * PERMIT_GRANULARITY_BYTES).unwrap();
|
||||
|
||||
// Request additional - memory is merged into guard
|
||||
assert!(guard.request_additional(3 * PERMIT_GRANULARITY_BYTES));
|
||||
assert_eq!(manager.used_bytes(), 8 * PERMIT_GRANULARITY_BYTES);
|
||||
|
||||
// When guard drops, all memory (base + additional) is released together
|
||||
}
|
||||
|
||||
// After scope, all memory should be released
|
||||
assert_eq!(manager.used_bytes(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_request_additional_unlimited() {
|
||||
let manager = MemoryManager::new(0, NoOpMetrics); // Unlimited
|
||||
let mut guard = manager.try_acquire(5 * PERMIT_GRANULARITY_BYTES).unwrap();
|
||||
|
||||
// Should always succeed with unlimited manager
|
||||
assert!(guard.request_additional(100 * PERMIT_GRANULARITY_BYTES));
|
||||
assert_eq!(guard.granted_bytes(), 0);
|
||||
assert_eq!(manager.used_bytes(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_request_additional_zero_bytes() {
|
||||
let limit = 10 * PERMIT_GRANULARITY_BYTES;
|
||||
let manager = MemoryManager::new(limit, NoOpMetrics);
|
||||
|
||||
let mut guard = manager.try_acquire(5 * PERMIT_GRANULARITY_BYTES).unwrap();
|
||||
|
||||
// Request 0 bytes should succeed without affecting anything
|
||||
assert!(guard.request_additional(0));
|
||||
assert_eq!(guard.granted_bytes(), 5 * PERMIT_GRANULARITY_BYTES);
|
||||
assert_eq!(manager.used_bytes(), 5 * PERMIT_GRANULARITY_BYTES);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_early_release_partial_success() {
|
||||
let limit = 10 * PERMIT_GRANULARITY_BYTES;
|
||||
let manager = MemoryManager::new(limit, NoOpMetrics);
|
||||
|
||||
let mut guard = manager.try_acquire(8 * PERMIT_GRANULARITY_BYTES).unwrap();
|
||||
assert_eq!(manager.used_bytes(), 8 * PERMIT_GRANULARITY_BYTES);
|
||||
|
||||
// Release half
|
||||
assert!(guard.early_release_partial(4 * PERMIT_GRANULARITY_BYTES));
|
||||
assert_eq!(guard.granted_bytes(), 4 * PERMIT_GRANULARITY_BYTES);
|
||||
assert_eq!(manager.used_bytes(), 4 * PERMIT_GRANULARITY_BYTES);
|
||||
|
||||
// Released memory should be available to others
|
||||
let _guard2 = manager.try_acquire(4 * PERMIT_GRANULARITY_BYTES).unwrap();
|
||||
assert_eq!(manager.used_bytes(), 8 * PERMIT_GRANULARITY_BYTES);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_early_release_partial_exceeds_granted() {
|
||||
let manager = MemoryManager::new(10 * PERMIT_GRANULARITY_BYTES, NoOpMetrics);
|
||||
let mut guard = manager.try_acquire(5 * PERMIT_GRANULARITY_BYTES).unwrap();
|
||||
|
||||
// Try to release more than granted - should fail
|
||||
assert!(!guard.early_release_partial(10 * PERMIT_GRANULARITY_BYTES));
|
||||
assert_eq!(guard.granted_bytes(), 5 * PERMIT_GRANULARITY_BYTES);
|
||||
assert_eq!(manager.used_bytes(), 5 * PERMIT_GRANULARITY_BYTES);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_early_release_partial_unlimited() {
|
||||
let manager = MemoryManager::new(0, NoOpMetrics);
|
||||
let mut guard = manager.try_acquire(100 * PERMIT_GRANULARITY_BYTES).unwrap();
|
||||
|
||||
// Unlimited guard - release should succeed (no-op)
|
||||
assert!(guard.early_release_partial(50 * PERMIT_GRANULARITY_BYTES));
|
||||
assert_eq!(guard.granted_bytes(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_request_and_early_release_symmetry() {
|
||||
let limit = 20 * PERMIT_GRANULARITY_BYTES;
|
||||
let manager = MemoryManager::new(limit, NoOpMetrics);
|
||||
|
||||
let mut guard = manager.try_acquire(5 * PERMIT_GRANULARITY_BYTES).unwrap();
|
||||
|
||||
// Request additional
|
||||
assert!(guard.request_additional(5 * PERMIT_GRANULARITY_BYTES));
|
||||
assert_eq!(guard.granted_bytes(), 10 * PERMIT_GRANULARITY_BYTES);
|
||||
assert_eq!(manager.used_bytes(), 10 * PERMIT_GRANULARITY_BYTES);
|
||||
|
||||
// Early release some
|
||||
assert!(guard.early_release_partial(3 * PERMIT_GRANULARITY_BYTES));
|
||||
assert_eq!(guard.granted_bytes(), 7 * PERMIT_GRANULARITY_BYTES);
|
||||
assert_eq!(manager.used_bytes(), 7 * PERMIT_GRANULARITY_BYTES);
|
||||
|
||||
// Request again
|
||||
assert!(guard.request_additional(2 * PERMIT_GRANULARITY_BYTES));
|
||||
assert_eq!(guard.granted_bytes(), 9 * PERMIT_GRANULARITY_BYTES);
|
||||
assert_eq!(manager.used_bytes(), 9 * PERMIT_GRANULARITY_BYTES);
|
||||
|
||||
// Early release again
|
||||
assert!(guard.early_release_partial(4 * PERMIT_GRANULARITY_BYTES));
|
||||
assert_eq!(guard.granted_bytes(), 5 * PERMIT_GRANULARITY_BYTES);
|
||||
assert_eq!(manager.used_bytes(), 5 * PERMIT_GRANULARITY_BYTES);
|
||||
|
||||
drop(guard);
|
||||
assert_eq!(manager.used_bytes(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_small_allocation_rounds_up() {
|
||||
// Test that allocations smaller than PERMIT_GRANULARITY_BYTES
|
||||
// round up to 1 permit and can use request_additional()
|
||||
let limit = 10 * PERMIT_GRANULARITY_BYTES;
|
||||
let manager = MemoryManager::new(limit, NoOpMetrics);
|
||||
|
||||
let mut guard = manager.try_acquire(512 * 1024).unwrap(); // 512KB
|
||||
assert_eq!(guard.granted_bytes(), PERMIT_GRANULARITY_BYTES); // Rounds up to 1MB
|
||||
assert!(guard.request_additional(2 * PERMIT_GRANULARITY_BYTES)); // Can request more
|
||||
assert_eq!(guard.granted_bytes(), 3 * PERMIT_GRANULARITY_BYTES);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acquire_zero_bytes_lazy_allocation() {
|
||||
// Test that acquire(0) returns 0 permits but can request_additional() later
|
||||
let manager = MemoryManager::new(10 * PERMIT_GRANULARITY_BYTES, NoOpMetrics);
|
||||
|
||||
let mut guard = manager.try_acquire(0).unwrap();
|
||||
assert_eq!(guard.granted_bytes(), 0); // No permits consumed
|
||||
assert_eq!(manager.used_bytes(), 0);
|
||||
|
||||
assert!(guard.request_additional(3 * PERMIT_GRANULARITY_BYTES)); // Lazy allocation
|
||||
assert_eq!(guard.granted_bytes(), 3 * PERMIT_GRANULARITY_BYTES);
|
||||
}
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::hash::{DefaultHasher, Hash, Hasher};
|
||||
use std::str::FromStr;
|
||||
|
||||
@@ -60,7 +61,7 @@ pub trait ClusterInfo {
|
||||
}
|
||||
|
||||
/// The key of [NodeInfo] in the storage. The format is `__meta_cluster_node_info-0-{role}-{node_id}`.
|
||||
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize, PartialOrd, Ord)]
|
||||
pub struct NodeInfoKey {
|
||||
/// The role of the node. It can be `[Role::Datanode]` or `[Role::Frontend]`.
|
||||
pub role: Role,
|
||||
@@ -135,7 +136,7 @@ pub struct NodeInfo {
|
||||
pub hostname: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize, PartialOrd, Ord)]
|
||||
pub enum Role {
|
||||
Datanode,
|
||||
Frontend,
|
||||
@@ -241,6 +242,12 @@ impl From<&NodeInfoKey> for Vec<u8> {
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for NodeInfoKey {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:?}-{}", self.role, self.node_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for NodeInfo {
|
||||
type Err = Error;
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ use crate::region_registry::LeaderRegionRegistryRef;
|
||||
pub mod alter_database;
|
||||
pub mod alter_logical_tables;
|
||||
pub mod alter_table;
|
||||
pub mod comment_on;
|
||||
pub mod create_database;
|
||||
pub mod create_flow;
|
||||
pub mod create_logical_tables;
|
||||
|
||||
@@ -301,8 +301,8 @@ fn build_new_table_info(
|
||||
| AlterKind::UnsetTableOptions { .. }
|
||||
| AlterKind::SetIndexes { .. }
|
||||
| AlterKind::UnsetIndexes { .. }
|
||||
| AlterKind::DropDefaults { .. } => {}
|
||||
AlterKind::SetDefaults { .. } => {}
|
||||
| AlterKind::DropDefaults { .. }
|
||||
| AlterKind::SetDefaults { .. } => {}
|
||||
}
|
||||
|
||||
info!(
|
||||
|
||||
509
src/common/meta/src/ddl/comment_on.rs
Normal file
509
src/common/meta/src/ddl/comment_on.rs
Normal file
@@ -0,0 +1,509 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use chrono::Utc;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
|
||||
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
|
||||
use common_telemetry::tracing::info;
|
||||
use datatypes::schema::COMMENT_KEY as COLUMN_COMMENT_KEY;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt, ensure};
|
||||
use store_api::storage::TableId;
|
||||
use strum::AsRefStr;
|
||||
use table::metadata::RawTableInfo;
|
||||
use table::requests::COMMENT_KEY as TABLE_COMMENT_KEY;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::ddl::utils::map_to_procedure_error;
|
||||
use crate::error::{ColumnNotFoundSnafu, FlowNotFoundSnafu, Result, TableNotFoundSnafu};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::flow::flow_info::{FlowInfoKey, FlowInfoValue};
|
||||
use crate::key::table_info::{TableInfoKey, TableInfoValue};
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::{DeserializedValueWithBytes, FlowId, MetadataKey, MetadataValue};
|
||||
use crate::lock_key::{CatalogLock, FlowNameLock, SchemaLock, TableNameLock};
|
||||
use crate::rpc::ddl::{CommentObjectType, CommentOnTask};
|
||||
use crate::rpc::store::PutRequest;
|
||||
|
||||
pub struct CommentOnProcedure {
|
||||
pub context: DdlContext,
|
||||
pub data: CommentOnData,
|
||||
}
|
||||
|
||||
impl CommentOnProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::CommentOn";
|
||||
|
||||
pub fn new(task: CommentOnTask, context: DdlContext) -> Self {
|
||||
Self {
|
||||
context,
|
||||
data: CommentOnData::new(task),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
|
||||
let data = serde_json::from_str(json).context(FromJsonSnafu)?;
|
||||
|
||||
Ok(Self { context, data })
|
||||
}
|
||||
|
||||
pub async fn on_prepare(&mut self) -> Result<Status> {
|
||||
match self.data.object_type {
|
||||
CommentObjectType::Table | CommentObjectType::Column => {
|
||||
self.prepare_table_or_column().await?;
|
||||
}
|
||||
CommentObjectType::Flow => {
|
||||
self.prepare_flow().await?;
|
||||
}
|
||||
}
|
||||
|
||||
// Fast path: if comment is unchanged, skip update
|
||||
if self.data.is_unchanged {
|
||||
let object_desc = match self.data.object_type {
|
||||
CommentObjectType::Table => format!(
|
||||
"table {}",
|
||||
format_full_table_name(
|
||||
&self.data.catalog_name,
|
||||
&self.data.schema_name,
|
||||
&self.data.object_name,
|
||||
)
|
||||
),
|
||||
CommentObjectType::Column => format!(
|
||||
"column {}.{}",
|
||||
format_full_table_name(
|
||||
&self.data.catalog_name,
|
||||
&self.data.schema_name,
|
||||
&self.data.object_name,
|
||||
),
|
||||
self.data.column_name.as_ref().unwrap()
|
||||
),
|
||||
CommentObjectType::Flow => {
|
||||
format!("flow {}.{}", self.data.catalog_name, self.data.object_name)
|
||||
}
|
||||
};
|
||||
info!("Comment unchanged for {}, skipping update", object_desc);
|
||||
return Ok(Status::done());
|
||||
}
|
||||
|
||||
self.data.state = CommentOnState::UpdateMetadata;
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
async fn prepare_table_or_column(&mut self) -> Result<()> {
|
||||
let table_name_key = TableNameKey::new(
|
||||
&self.data.catalog_name,
|
||||
&self.data.schema_name,
|
||||
&self.data.object_name,
|
||||
);
|
||||
|
||||
let table_id = self
|
||||
.context
|
||||
.table_metadata_manager
|
||||
.table_name_manager()
|
||||
.get(table_name_key)
|
||||
.await?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_name: format_full_table_name(
|
||||
&self.data.catalog_name,
|
||||
&self.data.schema_name,
|
||||
&self.data.object_name,
|
||||
),
|
||||
})?
|
||||
.table_id();
|
||||
|
||||
let table_info = self
|
||||
.context
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(table_id)
|
||||
.await?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_name: format_full_table_name(
|
||||
&self.data.catalog_name,
|
||||
&self.data.schema_name,
|
||||
&self.data.object_name,
|
||||
),
|
||||
})?;
|
||||
|
||||
// For column comments, validate the column exists
|
||||
if self.data.object_type == CommentObjectType::Column {
|
||||
let column_name = self.data.column_name.as_ref().unwrap();
|
||||
let column_exists = table_info
|
||||
.table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.iter()
|
||||
.any(|col| &col.name == column_name);
|
||||
|
||||
ensure!(
|
||||
column_exists,
|
||||
ColumnNotFoundSnafu {
|
||||
column_name,
|
||||
column_id: 0u32, // column_id is not known here
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
self.data.table_id = Some(table_id);
|
||||
|
||||
// Check if comment is unchanged for early exit optimization
|
||||
match self.data.object_type {
|
||||
CommentObjectType::Table => {
|
||||
let current_comment = &table_info.table_info.desc;
|
||||
if &self.data.comment == current_comment {
|
||||
self.data.is_unchanged = true;
|
||||
}
|
||||
}
|
||||
CommentObjectType::Column => {
|
||||
let column_name = self.data.column_name.as_ref().unwrap();
|
||||
let column_schema = table_info
|
||||
.table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.iter()
|
||||
.find(|col| &col.name == column_name)
|
||||
.unwrap(); // Safe: validated above
|
||||
|
||||
let current_comment = column_schema.metadata().get(COLUMN_COMMENT_KEY);
|
||||
if self.data.comment.as_deref() == current_comment.map(String::as_str) {
|
||||
self.data.is_unchanged = true;
|
||||
}
|
||||
}
|
||||
CommentObjectType::Flow => {
|
||||
// this branch is handled in `prepare_flow`
|
||||
}
|
||||
}
|
||||
|
||||
self.data.table_info = Some(table_info);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn prepare_flow(&mut self) -> Result<()> {
|
||||
let flow_name_value = self
|
||||
.context
|
||||
.flow_metadata_manager
|
||||
.flow_name_manager()
|
||||
.get(&self.data.catalog_name, &self.data.object_name)
|
||||
.await?
|
||||
.with_context(|| FlowNotFoundSnafu {
|
||||
flow_name: &self.data.object_name,
|
||||
})?;
|
||||
|
||||
let flow_id = flow_name_value.flow_id();
|
||||
let flow_info = self
|
||||
.context
|
||||
.flow_metadata_manager
|
||||
.flow_info_manager()
|
||||
.get_raw(flow_id)
|
||||
.await?
|
||||
.with_context(|| FlowNotFoundSnafu {
|
||||
flow_name: &self.data.object_name,
|
||||
})?;
|
||||
|
||||
self.data.flow_id = Some(flow_id);
|
||||
|
||||
// Check if comment is unchanged for early exit optimization
|
||||
let current_comment = &flow_info.get_inner_ref().comment;
|
||||
let new_comment = self.data.comment.as_deref().unwrap_or("");
|
||||
if new_comment == current_comment.as_str() {
|
||||
self.data.is_unchanged = true;
|
||||
}
|
||||
|
||||
self.data.flow_info = Some(flow_info);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn on_update_metadata(&mut self) -> Result<Status> {
|
||||
match self.data.object_type {
|
||||
CommentObjectType::Table => {
|
||||
self.update_table_comment().await?;
|
||||
}
|
||||
CommentObjectType::Column => {
|
||||
self.update_column_comment().await?;
|
||||
}
|
||||
CommentObjectType::Flow => {
|
||||
self.update_flow_comment().await?;
|
||||
}
|
||||
}
|
||||
|
||||
self.data.state = CommentOnState::InvalidateCache;
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
async fn update_table_comment(&mut self) -> Result<()> {
|
||||
let table_info_value = self.data.table_info.as_ref().unwrap();
|
||||
let mut new_table_info = table_info_value.table_info.clone();
|
||||
|
||||
new_table_info.desc = self.data.comment.clone();
|
||||
|
||||
// Sync comment to table options
|
||||
sync_table_comment_option(
|
||||
&mut new_table_info.meta.options,
|
||||
new_table_info.desc.as_deref(),
|
||||
);
|
||||
|
||||
self.update_table_info(table_info_value, new_table_info)
|
||||
.await?;
|
||||
|
||||
info!(
|
||||
"Updated comment for table {}.{}.{}",
|
||||
self.data.catalog_name, self.data.schema_name, self.data.object_name
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn update_column_comment(&mut self) -> Result<()> {
|
||||
let table_info_value = self.data.table_info.as_ref().unwrap();
|
||||
let mut new_table_info = table_info_value.table_info.clone();
|
||||
|
||||
let column_name = self.data.column_name.as_ref().unwrap();
|
||||
let column_schema = new_table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.iter_mut()
|
||||
.find(|col| &col.name == column_name)
|
||||
.unwrap(); // Safe: validated in prepare
|
||||
|
||||
update_column_comment_metadata(column_schema, self.data.comment.clone());
|
||||
|
||||
self.update_table_info(table_info_value, new_table_info)
|
||||
.await?;
|
||||
|
||||
info!(
|
||||
"Updated comment for column {}.{}.{}.{}",
|
||||
self.data.catalog_name, self.data.schema_name, self.data.object_name, column_name
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn update_flow_comment(&mut self) -> Result<()> {
|
||||
let flow_id = self.data.flow_id.unwrap();
|
||||
let flow_info_value = self.data.flow_info.as_ref().unwrap();
|
||||
|
||||
let mut new_flow_info = flow_info_value.get_inner_ref().clone();
|
||||
new_flow_info.comment = self.data.comment.clone().unwrap_or_default();
|
||||
new_flow_info.updated_time = Utc::now();
|
||||
|
||||
let raw_value = new_flow_info.try_as_raw_value()?;
|
||||
|
||||
self.context
|
||||
.table_metadata_manager
|
||||
.kv_backend()
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(FlowInfoKey::new(flow_id).to_bytes())
|
||||
.with_value(raw_value),
|
||||
)
|
||||
.await?;
|
||||
|
||||
info!(
|
||||
"Updated comment for flow {}.{}",
|
||||
self.data.catalog_name, self.data.object_name
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn update_table_info(
|
||||
&self,
|
||||
current_table_info: &DeserializedValueWithBytes<TableInfoValue>,
|
||||
new_table_info: RawTableInfo,
|
||||
) -> Result<()> {
|
||||
let table_id = current_table_info.table_info.ident.table_id;
|
||||
let new_table_info_value = current_table_info.update(new_table_info);
|
||||
let raw_value = new_table_info_value.try_as_raw_value()?;
|
||||
|
||||
self.context
|
||||
.table_metadata_manager
|
||||
.kv_backend()
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(TableInfoKey::new(table_id).to_bytes())
|
||||
.with_value(raw_value),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn on_invalidate_cache(&mut self) -> Result<Status> {
|
||||
let cache_invalidator = &self.context.cache_invalidator;
|
||||
|
||||
match self.data.object_type {
|
||||
CommentObjectType::Table | CommentObjectType::Column => {
|
||||
let table_id = self.data.table_id.unwrap();
|
||||
let table_name = TableName::new(
|
||||
self.data.catalog_name.clone(),
|
||||
self.data.schema_name.clone(),
|
||||
self.data.object_name.clone(),
|
||||
);
|
||||
|
||||
let cache_ident = vec![
|
||||
CacheIdent::TableId(table_id),
|
||||
CacheIdent::TableName(table_name),
|
||||
];
|
||||
|
||||
cache_invalidator
|
||||
.invalidate(&Context::default(), &cache_ident)
|
||||
.await?;
|
||||
}
|
||||
CommentObjectType::Flow => {
|
||||
let flow_id = self.data.flow_id.unwrap();
|
||||
let cache_ident = vec![CacheIdent::FlowId(flow_id)];
|
||||
|
||||
cache_invalidator
|
||||
.invalidate(&Context::default(), &cache_ident)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Status::done())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Procedure for CommentOnProcedure {
|
||||
fn type_name(&self) -> &str {
|
||||
Self::TYPE_NAME
|
||||
}
|
||||
|
||||
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
|
||||
match self.data.state {
|
||||
CommentOnState::Prepare => self.on_prepare().await,
|
||||
CommentOnState::UpdateMetadata => self.on_update_metadata().await,
|
||||
CommentOnState::InvalidateCache => self.on_invalidate_cache().await,
|
||||
}
|
||||
.map_err(map_to_procedure_error)
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
serde_json::to_string(&self.data).context(ToJsonSnafu)
|
||||
}
|
||||
|
||||
fn lock_key(&self) -> LockKey {
|
||||
let catalog = &self.data.catalog_name;
|
||||
let schema = &self.data.schema_name;
|
||||
|
||||
let lock_key = match self.data.object_type {
|
||||
CommentObjectType::Table | CommentObjectType::Column => {
|
||||
vec![
|
||||
CatalogLock::Read(catalog).into(),
|
||||
SchemaLock::read(catalog, schema).into(),
|
||||
TableNameLock::new(catalog, schema, &self.data.object_name).into(),
|
||||
]
|
||||
}
|
||||
CommentObjectType::Flow => {
|
||||
vec![
|
||||
CatalogLock::Read(catalog).into(),
|
||||
FlowNameLock::new(catalog, &self.data.object_name).into(),
|
||||
]
|
||||
}
|
||||
};
|
||||
|
||||
LockKey::new(lock_key)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, AsRefStr)]
|
||||
enum CommentOnState {
|
||||
Prepare,
|
||||
UpdateMetadata,
|
||||
InvalidateCache,
|
||||
}
|
||||
|
||||
/// The data of comment on procedure.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CommentOnData {
|
||||
state: CommentOnState,
|
||||
catalog_name: String,
|
||||
schema_name: String,
|
||||
object_type: CommentObjectType,
|
||||
object_name: String,
|
||||
/// Column name (only for Column comments)
|
||||
column_name: Option<String>,
|
||||
comment: Option<String>,
|
||||
/// Cached table ID (for Table/Column)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
table_id: Option<TableId>,
|
||||
/// Cached table info (for Table/Column)
|
||||
#[serde(skip)]
|
||||
table_info: Option<DeserializedValueWithBytes<TableInfoValue>>,
|
||||
/// Cached flow ID (for Flow)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
flow_id: Option<FlowId>,
|
||||
/// Cached flow info (for Flow)
|
||||
#[serde(skip)]
|
||||
flow_info: Option<DeserializedValueWithBytes<FlowInfoValue>>,
|
||||
/// Whether the comment is unchanged (optimization for early exit)
|
||||
#[serde(skip)]
|
||||
is_unchanged: bool,
|
||||
}
|
||||
|
||||
impl CommentOnData {
|
||||
pub fn new(task: CommentOnTask) -> Self {
|
||||
Self {
|
||||
state: CommentOnState::Prepare,
|
||||
catalog_name: task.catalog_name,
|
||||
schema_name: task.schema_name,
|
||||
object_type: task.object_type,
|
||||
object_name: task.object_name,
|
||||
column_name: task.column_name,
|
||||
comment: task.comment,
|
||||
table_id: None,
|
||||
table_info: None,
|
||||
flow_id: None,
|
||||
flow_info: None,
|
||||
is_unchanged: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn update_column_comment_metadata(
|
||||
column_schema: &mut datatypes::schema::ColumnSchema,
|
||||
comment: Option<String>,
|
||||
) {
|
||||
match comment {
|
||||
Some(value) => {
|
||||
column_schema
|
||||
.mut_metadata()
|
||||
.insert(COLUMN_COMMENT_KEY.to_string(), value);
|
||||
}
|
||||
None => {
|
||||
column_schema.mut_metadata().remove(COLUMN_COMMENT_KEY);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn sync_table_comment_option(options: &mut table::requests::TableOptions, comment: Option<&str>) {
|
||||
match comment {
|
||||
Some(value) => {
|
||||
options
|
||||
.extra_options
|
||||
.insert(TABLE_COMMENT_KEY.to_string(), value.to_string());
|
||||
}
|
||||
None => {
|
||||
options.extra_options.remove(TABLE_COMMENT_KEY);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_procedure::{
|
||||
BoxedProcedureLoader, Output, ProcedureId, ProcedureManagerRef, ProcedureWithId, watcher,
|
||||
};
|
||||
@@ -26,6 +27,7 @@ use store_api::storage::TableId;
|
||||
use crate::ddl::alter_database::AlterDatabaseProcedure;
|
||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||
use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::ddl::comment_on::CommentOnProcedure;
|
||||
use crate::ddl::create_database::CreateDatabaseProcedure;
|
||||
use crate::ddl::create_flow::CreateFlowProcedure;
|
||||
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
|
||||
@@ -51,21 +53,34 @@ use crate::rpc::ddl::DdlTask::CreateTrigger;
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::rpc::ddl::DdlTask::DropTrigger;
|
||||
use crate::rpc::ddl::DdlTask::{
|
||||
AlterDatabase, AlterLogicalTables, AlterTable, CreateDatabase, CreateFlow, CreateLogicalTables,
|
||||
CreateTable, CreateView, DropDatabase, DropFlow, DropLogicalTables, DropTable, DropView,
|
||||
TruncateTable,
|
||||
AlterDatabase, AlterLogicalTables, AlterTable, CommentOn, CreateDatabase, CreateFlow,
|
||||
CreateLogicalTables, CreateTable, CreateView, DropDatabase, DropFlow, DropLogicalTables,
|
||||
DropTable, DropView, TruncateTable,
|
||||
};
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::rpc::ddl::trigger::CreateTriggerTask;
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::rpc::ddl::trigger::DropTriggerTask;
|
||||
use crate::rpc::ddl::{
|
||||
AlterDatabaseTask, AlterTableTask, CreateDatabaseTask, CreateFlowTask, CreateTableTask,
|
||||
CreateViewTask, DropDatabaseTask, DropFlowTask, DropTableTask, DropViewTask, QueryContext,
|
||||
SubmitDdlTaskRequest, SubmitDdlTaskResponse, TruncateTableTask,
|
||||
AlterDatabaseTask, AlterTableTask, CommentOnTask, CreateDatabaseTask, CreateFlowTask,
|
||||
CreateTableTask, CreateViewTask, DropDatabaseTask, DropFlowTask, DropTableTask, DropViewTask,
|
||||
QueryContext, SubmitDdlTaskRequest, SubmitDdlTaskResponse, TruncateTableTask,
|
||||
};
|
||||
use crate::rpc::router::RegionRoute;
|
||||
|
||||
/// A configurator that customizes or enhances a [`DdlManager`].
|
||||
#[async_trait::async_trait]
|
||||
pub trait DdlManagerConfigurator<C>: Send + Sync {
|
||||
/// Configures the given [`DdlManager`] using the provided [`DdlManagerConfigureContext`].
|
||||
async fn configure(
|
||||
&self,
|
||||
ddl_manager: DdlManager,
|
||||
ctx: C,
|
||||
) -> std::result::Result<DdlManager, BoxedError>;
|
||||
}
|
||||
|
||||
pub type DdlManagerConfiguratorRef<C> = Arc<dyn DdlManagerConfigurator<C>>;
|
||||
|
||||
pub type DdlManagerRef = Arc<DdlManager>;
|
||||
|
||||
pub type BoxedProcedureLoaderFactory = dyn Fn(DdlContext) -> BoxedProcedureLoader;
|
||||
@@ -148,11 +163,8 @@ impl DdlManager {
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub fn with_trigger_ddl_manager(
|
||||
mut self,
|
||||
trigger_ddl_manager: Option<TriggerDdlManagerRef>,
|
||||
) -> Self {
|
||||
self.trigger_ddl_manager = trigger_ddl_manager;
|
||||
pub fn with_trigger_ddl_manager(mut self, trigger_ddl_manager: TriggerDdlManagerRef) -> Self {
|
||||
self.trigger_ddl_manager = Some(trigger_ddl_manager);
|
||||
self
|
||||
}
|
||||
|
||||
@@ -181,7 +193,8 @@ impl DdlManager {
|
||||
TruncateTableProcedure,
|
||||
CreateDatabaseProcedure,
|
||||
DropDatabaseProcedure,
|
||||
DropViewProcedure
|
||||
DropViewProcedure,
|
||||
CommentOnProcedure
|
||||
);
|
||||
|
||||
for (type_name, loader_factory) in loaders {
|
||||
@@ -397,6 +410,19 @@ impl DdlManager {
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
}
|
||||
|
||||
/// Submits and executes a comment on task.
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_comment_on_task(
|
||||
&self,
|
||||
comment_on_task: CommentOnTask,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
let procedure = CommentOnProcedure::new(comment_on_task, context);
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
}
|
||||
|
||||
async fn submit_procedure(
|
||||
&self,
|
||||
procedure_with_id: ProcedureWithId,
|
||||
@@ -465,6 +491,7 @@ impl DdlManager {
|
||||
handle_create_view_task(self, create_view_task).await
|
||||
}
|
||||
DropView(drop_view_task) => handle_drop_view_task(self, drop_view_task).await,
|
||||
CommentOn(comment_on_task) => handle_comment_on_task(self, comment_on_task).await,
|
||||
#[cfg(feature = "enterprise")]
|
||||
CreateTrigger(create_trigger_task) => {
|
||||
handle_create_trigger_task(
|
||||
@@ -896,6 +923,26 @@ async fn handle_create_view_task(
|
||||
})
|
||||
}
|
||||
|
||||
async fn handle_comment_on_task(
|
||||
ddl_manager: &DdlManager,
|
||||
comment_on_task: CommentOnTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let (id, _) = ddl_manager
|
||||
.submit_comment_on_task(comment_on_task.clone())
|
||||
.await?;
|
||||
|
||||
let procedure_id = id.to_string();
|
||||
info!(
|
||||
"Comment on {}.{}.{} is updated via procedure_id {id:?}",
|
||||
comment_on_task.catalog_name, comment_on_task.schema_name, comment_on_task.object_name
|
||||
);
|
||||
|
||||
Ok(SubmitDdlTaskResponse {
|
||||
key: procedure_id.into(),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use etcd_client::ConnectOptions;
|
||||
|
||||
/// Heartbeat interval time (is the basic unit of various time).
|
||||
pub const HEARTBEAT_INTERVAL_MILLIS: u64 = 3000;
|
||||
|
||||
@@ -41,6 +43,23 @@ pub const POSTGRES_KEEP_ALIVE_SECS: u64 = 30;
|
||||
/// In a lease, there are two opportunities for renewal.
|
||||
pub const META_KEEP_ALIVE_INTERVAL_SECS: u64 = META_LEASE_SECS / 2;
|
||||
|
||||
/// The timeout of the heartbeat request.
|
||||
pub const HEARTBEAT_TIMEOUT: Duration = Duration::from_secs(META_KEEP_ALIVE_INTERVAL_SECS + 1);
|
||||
|
||||
/// The keep-alive interval of the heartbeat channel.
|
||||
pub const HEARTBEAT_CHANNEL_KEEP_ALIVE_INTERVAL_SECS: Duration = Duration::from_secs(15);
|
||||
|
||||
/// The keep-alive timeout of the heartbeat channel.
|
||||
pub const HEARTBEAT_CHANNEL_KEEP_ALIVE_TIMEOUT_SECS: Duration = Duration::from_secs(5);
|
||||
|
||||
/// The default options for the etcd client.
|
||||
pub fn default_etcd_client_options() -> ConnectOptions {
|
||||
ConnectOptions::new()
|
||||
.with_keep_alive_while_idle(true)
|
||||
.with_keep_alive(Duration::from_secs(15), Duration::from_secs(5))
|
||||
.with_connect_timeout(Duration::from_secs(10))
|
||||
}
|
||||
|
||||
/// The default mailbox round-trip timeout.
|
||||
pub const MAILBOX_RTT_SECS: u64 = 1;
|
||||
|
||||
|
||||
@@ -272,13 +272,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to send message: {err_msg}"))]
|
||||
SendMessage {
|
||||
err_msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serde json"))]
|
||||
SerdeJson {
|
||||
#[snafu(source)]
|
||||
@@ -1118,7 +1111,7 @@ impl ErrorExt for Error {
|
||||
| DeserializeFlexbuffers { .. }
|
||||
| ConvertTimeRanges { .. } => StatusCode::Unexpected,
|
||||
|
||||
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } => StatusCode::Internal,
|
||||
GetKvCache { .. } | CacheNotGet { .. } => StatusCode::Internal,
|
||||
|
||||
SchemaAlreadyExists { .. } => StatusCode::DatabaseAlreadyExists,
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ use crate::heartbeat::mailbox::{IncomingMessage, MailboxRef};
|
||||
|
||||
pub mod invalidate_table_cache;
|
||||
pub mod parse_mailbox_message;
|
||||
pub mod suspend;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
|
||||
69
src/common/meta/src/heartbeat/handler/suspend.rs
Normal file
69
src/common/meta/src/heartbeat/handler/suspend.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_telemetry::{info, warn};
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::heartbeat::handler::{
|
||||
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
|
||||
};
|
||||
use crate::instruction::Instruction;
|
||||
|
||||
/// A heartbeat response handler that handles special "suspend" error.
|
||||
/// It will simply set or clear (if previously set) the inner suspend atomic state.
|
||||
pub struct SuspendHandler {
|
||||
suspend: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl SuspendHandler {
|
||||
pub fn new(suspend: Arc<AtomicBool>) -> Self {
|
||||
Self { suspend }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl HeartbeatResponseHandler for SuspendHandler {
|
||||
fn is_acceptable(&self, context: &HeartbeatResponseHandlerContext) -> bool {
|
||||
matches!(
|
||||
context.incoming_message,
|
||||
Some((_, Instruction::Suspend)) | None
|
||||
)
|
||||
}
|
||||
|
||||
async fn handle(&self, context: &mut HeartbeatResponseHandlerContext) -> Result<HandleControl> {
|
||||
let flip_state = |expect: bool| {
|
||||
self.suspend
|
||||
.compare_exchange(expect, !expect, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.is_ok()
|
||||
};
|
||||
|
||||
if let Some((_, Instruction::Suspend)) = context.incoming_message.take() {
|
||||
if flip_state(false) {
|
||||
warn!("Suspend instruction received from meta, entering suspension state");
|
||||
}
|
||||
} else {
|
||||
// Suspended components are made always tried to get rid of this state, we don't want
|
||||
// an "un-suspend" instruction to resume them running. That can be error-prone.
|
||||
// So if the "suspend" instruction is not found in the heartbeat, just unset the state.
|
||||
if flip_state(true) {
|
||||
info!("clear suspend state");
|
||||
}
|
||||
}
|
||||
Ok(HandleControl::Continue)
|
||||
}
|
||||
}
|
||||
@@ -15,8 +15,8 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::sync::mpsc::Sender;
|
||||
use tokio::sync::mpsc::error::SendError;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::instruction::{Instruction, InstructionReply};
|
||||
|
||||
pub type IncomingMessage = (MessageMeta, Instruction);
|
||||
@@ -51,13 +51,8 @@ impl HeartbeatMailbox {
|
||||
Self { sender }
|
||||
}
|
||||
|
||||
pub async fn send(&self, message: OutgoingMessage) -> Result<()> {
|
||||
self.sender.send(message).await.map_err(|e| {
|
||||
error::SendMessageSnafu {
|
||||
err_msg: e.to_string(),
|
||||
}
|
||||
.build()
|
||||
})
|
||||
pub async fn send(&self, message: OutgoingMessage) -> Result<(), SendError<OutgoingMessage>> {
|
||||
self.sender.send(message).await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -339,6 +339,16 @@ pub struct FlushRegions {
|
||||
pub error_strategy: FlushErrorStrategy,
|
||||
}
|
||||
|
||||
impl Display for FlushRegions {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"FlushRegions(region_ids={:?}, strategy={:?}, error_strategy={:?})",
|
||||
self.region_ids, self.strategy, self.error_strategy
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl FlushRegions {
|
||||
/// Create synchronous single-region flush
|
||||
pub fn sync_single(region_id: RegionId) -> Self {
|
||||
@@ -529,6 +539,8 @@ pub enum Instruction {
|
||||
GetFileRefs(GetFileRefs),
|
||||
/// Triggers garbage collection for a region.
|
||||
GcRegions(GcRegions),
|
||||
/// Temporary suspend serving reads or writes
|
||||
Suspend,
|
||||
}
|
||||
|
||||
impl Instruction {
|
||||
|
||||
@@ -94,7 +94,7 @@ impl TableInfoValue {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn update(&self, new_table_info: RawTableInfo) -> Self {
|
||||
pub fn update(&self, new_table_info: RawTableInfo) -> Self {
|
||||
Self {
|
||||
table_info: new_table_info,
|
||||
version: self.version + 1,
|
||||
|
||||
@@ -34,6 +34,8 @@ pub mod memory;
|
||||
#[cfg(any(feature = "mysql_kvbackend", feature = "pg_kvbackend"))]
|
||||
pub mod rds;
|
||||
pub mod test;
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub mod test_util;
|
||||
pub mod txn;
|
||||
pub mod util;
|
||||
pub type KvBackendRef<E = Error> = Arc<dyn KvBackend<Error = E> + Send + Sync>;
|
||||
|
||||
125
src/common/meta/src/kv_backend/test_util.rs
Normal file
125
src/common/meta/src/kv_backend/test_util.rs
Normal file
@@ -0,0 +1,125 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use derive_builder::Builder;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::kv_backend::txn::{Txn, TxnResponse};
|
||||
use crate::kv_backend::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, KvBackend, PutRequest, PutResponse,
|
||||
RangeRequest, RangeResponse, TxnService,
|
||||
};
|
||||
|
||||
pub type MockFn<Req, Resp> = Arc<dyn Fn(Req) -> Result<Resp> + Send + Sync>;
|
||||
|
||||
/// A mock kv backend for testing.
|
||||
#[derive(Builder)]
|
||||
pub struct MockKvBackend {
|
||||
#[builder(setter(strip_option), default)]
|
||||
pub range_fn: Option<MockFn<RangeRequest, RangeResponse>>,
|
||||
#[builder(setter(strip_option), default)]
|
||||
pub put_fn: Option<MockFn<PutRequest, PutResponse>>,
|
||||
#[builder(setter(strip_option), default)]
|
||||
pub batch_put_fn: Option<MockFn<BatchPutRequest, BatchPutResponse>>,
|
||||
#[builder(setter(strip_option), default)]
|
||||
pub batch_get_fn: Option<MockFn<BatchGetRequest, BatchGetResponse>>,
|
||||
#[builder(setter(strip_option), default)]
|
||||
pub delete_range_fn: Option<MockFn<DeleteRangeRequest, DeleteRangeResponse>>,
|
||||
#[builder(setter(strip_option), default)]
|
||||
pub batch_delete_fn: Option<MockFn<BatchDeleteRequest, BatchDeleteResponse>>,
|
||||
#[builder(setter(strip_option), default)]
|
||||
pub txn: Option<MockFn<Txn, TxnResponse>>,
|
||||
#[builder(setter(strip_option), default)]
|
||||
pub max_txn_ops: Option<usize>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TxnService for MockKvBackend {
|
||||
type Error = crate::error::Error;
|
||||
|
||||
async fn txn(&self, txn: Txn) -> Result<TxnResponse> {
|
||||
if let Some(f) = &self.txn {
|
||||
f(txn)
|
||||
} else {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
fn max_txn_ops(&self) -> usize {
|
||||
self.max_txn_ops.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for MockKvBackend {
|
||||
fn name(&self) -> &str {
|
||||
"mock_kv_backend"
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
|
||||
if let Some(f) = &self.range_fn {
|
||||
f(req)
|
||||
} else {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
async fn put(&self, req: PutRequest) -> Result<PutResponse> {
|
||||
if let Some(f) = &self.put_fn {
|
||||
f(req)
|
||||
} else {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse> {
|
||||
if let Some(f) = &self.batch_put_fn {
|
||||
f(req)
|
||||
} else {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
async fn batch_get(&self, req: BatchGetRequest) -> Result<BatchGetResponse> {
|
||||
if let Some(f) = &self.batch_get_fn {
|
||||
f(req)
|
||||
} else {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_range(&self, req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
|
||||
if let Some(f) = &self.delete_range_fn {
|
||||
f(req)
|
||||
} else {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
|
||||
if let Some(f) = &self.batch_delete_fn {
|
||||
f(req)
|
||||
} else {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -23,19 +23,20 @@ use api::v1::alter_database_expr::Kind as PbAlterDatabaseKind;
|
||||
use api::v1::meta::ddl_task_request::Task;
|
||||
use api::v1::meta::{
|
||||
AlterDatabaseTask as PbAlterDatabaseTask, AlterTableTask as PbAlterTableTask,
|
||||
AlterTableTasks as PbAlterTableTasks, CreateDatabaseTask as PbCreateDatabaseTask,
|
||||
CreateFlowTask as PbCreateFlowTask, CreateTableTask as PbCreateTableTask,
|
||||
CreateTableTasks as PbCreateTableTasks, CreateViewTask as PbCreateViewTask,
|
||||
DdlTaskRequest as PbDdlTaskRequest, DdlTaskResponse as PbDdlTaskResponse,
|
||||
DropDatabaseTask as PbDropDatabaseTask, DropFlowTask as PbDropFlowTask,
|
||||
DropTableTask as PbDropTableTask, DropTableTasks as PbDropTableTasks,
|
||||
DropViewTask as PbDropViewTask, Partition, ProcedureId,
|
||||
AlterTableTasks as PbAlterTableTasks, CommentOnTask as PbCommentOnTask,
|
||||
CreateDatabaseTask as PbCreateDatabaseTask, CreateFlowTask as PbCreateFlowTask,
|
||||
CreateTableTask as PbCreateTableTask, CreateTableTasks as PbCreateTableTasks,
|
||||
CreateViewTask as PbCreateViewTask, DdlTaskRequest as PbDdlTaskRequest,
|
||||
DdlTaskResponse as PbDdlTaskResponse, DropDatabaseTask as PbDropDatabaseTask,
|
||||
DropFlowTask as PbDropFlowTask, DropTableTask as PbDropTableTask,
|
||||
DropTableTasks as PbDropTableTasks, DropViewTask as PbDropViewTask, Partition, ProcedureId,
|
||||
TruncateTableTask as PbTruncateTableTask,
|
||||
};
|
||||
use api::v1::{
|
||||
AlterDatabaseExpr, AlterTableExpr, CreateDatabaseExpr, CreateFlowExpr, CreateTableExpr,
|
||||
CreateViewExpr, DropDatabaseExpr, DropFlowExpr, DropTableExpr, DropViewExpr, EvalInterval,
|
||||
ExpireAfter, Option as PbOption, QueryContext as PbQueryContext, TruncateTableExpr,
|
||||
AlterDatabaseExpr, AlterTableExpr, CommentObjectType as PbCommentObjectType, CommentOnExpr,
|
||||
CreateDatabaseExpr, CreateFlowExpr, CreateTableExpr, CreateViewExpr, DropDatabaseExpr,
|
||||
DropFlowExpr, DropTableExpr, DropViewExpr, EvalInterval, ExpireAfter, Option as PbOption,
|
||||
QueryContext as PbQueryContext, TruncateTableExpr,
|
||||
};
|
||||
use base64::Engine as _;
|
||||
use base64::engine::general_purpose;
|
||||
@@ -78,6 +79,7 @@ pub enum DdlTask {
|
||||
DropView(DropViewTask),
|
||||
#[cfg(feature = "enterprise")]
|
||||
CreateTrigger(trigger::CreateTriggerTask),
|
||||
CommentOn(CommentOnTask),
|
||||
}
|
||||
|
||||
impl DdlTask {
|
||||
@@ -200,6 +202,11 @@ impl DdlTask {
|
||||
view_info,
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a [`DdlTask`] to comment on a table, column, or flow.
|
||||
pub fn new_comment_on(task: CommentOnTask) -> Self {
|
||||
DdlTask::CommentOn(task)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<Task> for DdlTask {
|
||||
@@ -278,6 +285,7 @@ impl TryFrom<Task> for DdlTask {
|
||||
.fail()
|
||||
}
|
||||
}
|
||||
Task::CommentOnTask(comment_on) => Ok(DdlTask::CommentOn(comment_on.try_into()?)),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -332,6 +340,7 @@ impl TryFrom<SubmitDdlTaskRequest> for PbDdlTaskRequest {
|
||||
DdlTask::CreateTrigger(task) => Task::CreateTriggerTask(task.try_into()?),
|
||||
#[cfg(feature = "enterprise")]
|
||||
DdlTask::DropTrigger(task) => Task::DropTriggerTask(task.into()),
|
||||
DdlTask::CommentOn(task) => Task::CommentOnTask(task.into()),
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
@@ -1277,6 +1286,119 @@ impl From<DropFlowTask> for PbDropFlowTask {
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the ID of the object being commented on (Table or Flow).
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub enum CommentObjectId {
|
||||
Table(TableId),
|
||||
Flow(FlowId),
|
||||
}
|
||||
|
||||
/// Comment on table, column, or flow
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct CommentOnTask {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
pub object_type: CommentObjectType,
|
||||
pub object_name: String,
|
||||
/// Column name (only for Column comments)
|
||||
pub column_name: Option<String>,
|
||||
/// Object ID (Table or Flow) for validation and cache invalidation
|
||||
pub object_id: Option<CommentObjectId>,
|
||||
pub comment: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub enum CommentObjectType {
|
||||
Table,
|
||||
Column,
|
||||
Flow,
|
||||
}
|
||||
|
||||
impl CommentOnTask {
|
||||
pub fn table_ref(&self) -> TableReference<'_> {
|
||||
TableReference {
|
||||
catalog: &self.catalog_name,
|
||||
schema: &self.schema_name,
|
||||
table: &self.object_name,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Proto conversions for CommentObjectType
|
||||
impl From<CommentObjectType> for PbCommentObjectType {
|
||||
fn from(object_type: CommentObjectType) -> Self {
|
||||
match object_type {
|
||||
CommentObjectType::Table => PbCommentObjectType::Table,
|
||||
CommentObjectType::Column => PbCommentObjectType::Column,
|
||||
CommentObjectType::Flow => PbCommentObjectType::Flow,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<i32> for CommentObjectType {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(value: i32) -> Result<Self> {
|
||||
match value {
|
||||
0 => Ok(CommentObjectType::Table),
|
||||
1 => Ok(CommentObjectType::Column),
|
||||
2 => Ok(CommentObjectType::Flow),
|
||||
_ => error::InvalidProtoMsgSnafu {
|
||||
err_msg: format!(
|
||||
"Invalid CommentObjectType value: {}. Valid values are: 0 (Table), 1 (Column), 2 (Flow)",
|
||||
value
|
||||
),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Proto conversions for CommentOnTask
|
||||
impl TryFrom<PbCommentOnTask> for CommentOnTask {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(pb: PbCommentOnTask) -> Result<Self> {
|
||||
let comment_on = pb.comment_on.context(error::InvalidProtoMsgSnafu {
|
||||
err_msg: "expected comment_on",
|
||||
})?;
|
||||
|
||||
Ok(CommentOnTask {
|
||||
catalog_name: comment_on.catalog_name,
|
||||
schema_name: comment_on.schema_name,
|
||||
object_type: comment_on.object_type.try_into()?,
|
||||
object_name: comment_on.object_name,
|
||||
column_name: if comment_on.column_name.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(comment_on.column_name)
|
||||
},
|
||||
comment: if comment_on.comment.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(comment_on.comment)
|
||||
},
|
||||
object_id: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CommentOnTask> for PbCommentOnTask {
|
||||
fn from(task: CommentOnTask) -> Self {
|
||||
let pb_object_type: PbCommentObjectType = task.object_type.into();
|
||||
PbCommentOnTask {
|
||||
comment_on: Some(CommentOnExpr {
|
||||
catalog_name: task.catalog_name,
|
||||
schema_name: task.schema_name,
|
||||
object_type: pb_object_type as i32,
|
||||
object_name: task.object_name,
|
||||
column_name: task.column_name.unwrap_or_default(),
|
||||
comment: task.comment.unwrap_or_default(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct QueryContext {
|
||||
pub(crate) current_catalog: String,
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
use common_telemetry::{debug, error, info};
|
||||
use common_wal::config::kafka::common::{
|
||||
DEFAULT_BACKOFF_CONFIG, KafkaConnectionConfig, KafkaTopicConfig,
|
||||
DEFAULT_BACKOFF_CONFIG, DEFAULT_CONNECT_TIMEOUT, KafkaConnectionConfig, KafkaTopicConfig,
|
||||
};
|
||||
use rskafka::client::error::Error as RsKafkaError;
|
||||
use rskafka::client::error::ProtocolError::TopicAlreadyExists;
|
||||
@@ -205,11 +205,13 @@ impl KafkaTopicCreator {
|
||||
self.partition_client(topic).await.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds a kafka [Client](rskafka::client::Client).
|
||||
pub async fn build_kafka_client(connection: &KafkaConnectionConfig) -> Result<Client> {
|
||||
// Builds an kafka controller client for creating topics.
|
||||
let mut builder = ClientBuilder::new(connection.broker_endpoints.clone())
|
||||
.backoff_config(DEFAULT_BACKOFF_CONFIG);
|
||||
.backoff_config(DEFAULT_BACKOFF_CONFIG)
|
||||
.connect_timeout(Some(DEFAULT_CONNECT_TIMEOUT));
|
||||
if let Some(sasl) = &connection.sasl {
|
||||
builder = builder.sasl_config(sasl.config.clone().into_sasl_config());
|
||||
};
|
||||
|
||||
@@ -246,14 +246,6 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Loader for {type_name} is not implemented: {reason}"))]
|
||||
ProcedureLoaderNotImplemented {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
type_name: String,
|
||||
reason: String,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -274,8 +266,7 @@ impl ErrorExt for Error {
|
||||
Error::ToJson { .. }
|
||||
| Error::DeleteState { .. }
|
||||
| Error::FromJson { .. }
|
||||
| Error::WaitWatcher { .. }
|
||||
| Error::ProcedureLoaderNotImplemented { .. } => StatusCode::Internal,
|
||||
| Error::WaitWatcher { .. } => StatusCode::Internal,
|
||||
|
||||
Error::RetryTimesExceeded { .. }
|
||||
| Error::RollbackTimesExceeded { .. }
|
||||
|
||||
@@ -331,8 +331,29 @@ impl Runner {
|
||||
}
|
||||
|
||||
match status {
|
||||
Status::Executing { .. } => {}
|
||||
Status::Executing { .. } => {
|
||||
let prev_state = self.meta.state();
|
||||
if !matches!(prev_state, ProcedureState::Running) {
|
||||
info!(
|
||||
"Set Procedure {}-{} state to running, prev_state: {:?}",
|
||||
self.procedure.type_name(),
|
||||
self.meta.id,
|
||||
prev_state
|
||||
);
|
||||
self.meta.set_state(ProcedureState::Running);
|
||||
}
|
||||
}
|
||||
Status::Suspended { subprocedures, .. } => {
|
||||
let prev_state = self.meta.state();
|
||||
if !matches!(prev_state, ProcedureState::Running) {
|
||||
info!(
|
||||
"Set Procedure {}-{} state to running, prev_state: {:?}",
|
||||
self.procedure.type_name(),
|
||||
self.meta.id,
|
||||
prev_state
|
||||
);
|
||||
self.meta.set_state(ProcedureState::Running);
|
||||
}
|
||||
self.on_suspended(subprocedures).await;
|
||||
}
|
||||
Status::Done { output } => {
|
||||
@@ -393,8 +414,12 @@ impl Runner {
|
||||
return;
|
||||
}
|
||||
|
||||
self.meta
|
||||
.set_state(ProcedureState::prepare_rollback(Arc::new(e)));
|
||||
if self.procedure.rollback_supported() {
|
||||
self.meta
|
||||
.set_state(ProcedureState::prepare_rollback(Arc::new(e)));
|
||||
} else {
|
||||
self.meta.set_state(ProcedureState::failed(Arc::new(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1080,20 +1105,10 @@ mod tests {
|
||||
let mut runner = new_runner(meta.clone(), Box::new(fail), procedure_store.clone());
|
||||
runner.manager_ctx.start();
|
||||
|
||||
runner.execute_once(&ctx).await;
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_prepare_rollback(), "{state:?}");
|
||||
|
||||
runner.execute_once(&ctx).await;
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_failed(), "{state:?}");
|
||||
check_files(
|
||||
&object_store,
|
||||
&procedure_store,
|
||||
ctx.procedure_id,
|
||||
&["0000000000.rollback"],
|
||||
)
|
||||
.await;
|
||||
check_files(&object_store, &procedure_store, ctx.procedure_id, &[]).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -1146,6 +1161,8 @@ mod tests {
|
||||
async move {
|
||||
if times == 1 {
|
||||
Err(Error::retry_later(MockError::new(StatusCode::Unexpected)))
|
||||
} else if times == 2 {
|
||||
Ok(Status::executing(false))
|
||||
} else {
|
||||
Ok(Status::done())
|
||||
}
|
||||
@@ -1172,6 +1189,10 @@ mod tests {
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_retrying(), "{state:?}");
|
||||
|
||||
runner.execute_once(&ctx).await;
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_running(), "{state:?}");
|
||||
|
||||
runner.execute_once(&ctx).await;
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_done(), "{state:?}");
|
||||
@@ -1185,6 +1206,86 @@ mod tests {
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_execute_on_retry_later_error_with_child() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut times = 0;
|
||||
let child_id = ProcedureId::random();
|
||||
|
||||
let exec_fn = move |_| {
|
||||
times += 1;
|
||||
async move {
|
||||
debug!("times: {}", times);
|
||||
if times == 1 {
|
||||
Err(Error::retry_later(MockError::new(StatusCode::Unexpected)))
|
||||
} else if times == 2 {
|
||||
let exec_fn = |_| {
|
||||
async { Err(Error::external(MockError::new(StatusCode::Unexpected))) }
|
||||
.boxed()
|
||||
};
|
||||
let fail = ProcedureAdapter {
|
||||
data: "fail".to_string(),
|
||||
lock_key: LockKey::single_exclusive("catalog.schema.table.region-0"),
|
||||
poison_keys: PoisonKeys::default(),
|
||||
exec_fn,
|
||||
rollback_fn: None,
|
||||
};
|
||||
|
||||
Ok(Status::Suspended {
|
||||
subprocedures: vec![ProcedureWithId {
|
||||
id: child_id,
|
||||
procedure: Box::new(fail),
|
||||
}],
|
||||
persist: true,
|
||||
})
|
||||
} else {
|
||||
Ok(Status::done())
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
};
|
||||
|
||||
let retry_later = ProcedureAdapter {
|
||||
data: "retry_later".to_string(),
|
||||
lock_key: LockKey::single_exclusive("catalog.schema.table"),
|
||||
poison_keys: PoisonKeys::default(),
|
||||
exec_fn,
|
||||
rollback_fn: None,
|
||||
};
|
||||
|
||||
let dir = create_temp_dir("retry_later");
|
||||
let meta = retry_later.new_meta(ROOT_ID);
|
||||
let ctx = context_without_provider(meta.id);
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = Arc::new(ProcedureStore::from_object_store(object_store.clone()));
|
||||
let mut runner = new_runner(meta.clone(), Box::new(retry_later), procedure_store.clone());
|
||||
runner.manager_ctx.start();
|
||||
debug!("execute_once 1");
|
||||
runner.execute_once(&ctx).await;
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_retrying(), "{state:?}");
|
||||
|
||||
let moved_meta = meta.clone();
|
||||
tokio::spawn(async move {
|
||||
moved_meta.child_notify.notify_one();
|
||||
});
|
||||
runner.execute_once(&ctx).await;
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_running(), "{state:?}");
|
||||
|
||||
runner.execute_once(&ctx).await;
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_done(), "{state:?}");
|
||||
assert!(meta.state().is_done());
|
||||
check_files(
|
||||
&object_store,
|
||||
&procedure_store,
|
||||
ctx.procedure_id,
|
||||
&["0000000000.step", "0000000001.commit"],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_execute_exceed_max_retry_later() {
|
||||
let exec_fn =
|
||||
@@ -1304,7 +1405,7 @@ mod tests {
|
||||
async fn test_child_error() {
|
||||
let mut times = 0;
|
||||
let child_id = ProcedureId::random();
|
||||
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let exec_fn = move |ctx: Context| {
|
||||
times += 1;
|
||||
async move {
|
||||
@@ -1529,7 +1630,7 @@ mod tests {
|
||||
|
||||
runner.execute_once(&ctx).await;
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_prepare_rollback(), "{state:?}");
|
||||
assert!(state.is_failed(), "{state:?}");
|
||||
|
||||
let procedure_id = runner
|
||||
.manager_ctx
|
||||
@@ -1596,11 +1697,6 @@ mod tests {
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_running(), "{state:?}");
|
||||
|
||||
runner.execute_once(&ctx).await;
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_prepare_rollback(), "{state:?}");
|
||||
assert!(meta.state().is_prepare_rollback());
|
||||
|
||||
runner.execute_once(&ctx).await;
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_failed(), "{state:?}");
|
||||
|
||||
@@ -46,6 +46,22 @@ pub enum OutputData {
|
||||
Stream(SendableRecordBatchStream),
|
||||
}
|
||||
|
||||
impl OutputData {
|
||||
/// Consume the data to pretty printed string.
|
||||
pub async fn pretty_print(self) -> String {
|
||||
match self {
|
||||
OutputData::AffectedRows(x) => {
|
||||
format!("Affected Rows: {x}")
|
||||
}
|
||||
OutputData::RecordBatches(x) => x.pretty_print().unwrap_or_else(|e| e.to_string()),
|
||||
OutputData::Stream(x) => common_recordbatch::util::collect_batches(x)
|
||||
.await
|
||||
.and_then(|x| x.pretty_print())
|
||||
.unwrap_or_else(|e| e.to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// OutputMeta stores meta information produced/generated during the execution
|
||||
#[derive(Debug, Default)]
|
||||
pub struct OutputMeta {
|
||||
|
||||
@@ -188,6 +188,13 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to align JSON array, reason: {reason}"))]
|
||||
AlignJsonArray {
|
||||
reason: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -203,7 +210,8 @@ impl ErrorExt for Error {
|
||||
| Error::ToArrowScalar { .. }
|
||||
| Error::ProjectArrowRecordBatch { .. }
|
||||
| Error::PhysicalExpr { .. }
|
||||
| Error::RecordBatchSliceIndexOverflow { .. } => StatusCode::Internal,
|
||||
| Error::RecordBatchSliceIndexOverflow { .. }
|
||||
| Error::AlignJsonArray { .. } => StatusCode::Internal,
|
||||
|
||||
Error::PollStream { .. } => StatusCode::EngineExecuteQuery,
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ pub mod adapter;
|
||||
pub mod cursor;
|
||||
pub mod error;
|
||||
pub mod filter;
|
||||
mod recordbatch;
|
||||
pub mod recordbatch;
|
||||
pub mod util;
|
||||
|
||||
use std::fmt;
|
||||
|
||||
@@ -20,7 +20,8 @@ use datafusion::arrow::util::pretty::pretty_format_batches;
|
||||
use datafusion_common::arrow::array::ArrayRef;
|
||||
use datafusion_common::arrow::compute;
|
||||
use datafusion_common::arrow::datatypes::{DataType as ArrowDataType, SchemaRef as ArrowSchemaRef};
|
||||
use datatypes::arrow::array::{Array, AsArray, RecordBatchOptions};
|
||||
use datatypes::arrow::array::{Array, AsArray, RecordBatchOptions, StructArray, new_null_array};
|
||||
use datatypes::extension::json::is_json_extension_type;
|
||||
use datatypes::prelude::DataType;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
@@ -30,8 +31,8 @@ use snafu::{OptionExt, ResultExt, ensure};
|
||||
|
||||
use crate::DfRecordBatch;
|
||||
use crate::error::{
|
||||
self, ArrowComputeSnafu, ColumnNotExistsSnafu, DataTypesSnafu, ProjectArrowRecordBatchSnafu,
|
||||
Result,
|
||||
self, AlignJsonArraySnafu, ArrowComputeSnafu, ColumnNotExistsSnafu, DataTypesSnafu,
|
||||
NewDfRecordBatchSnafu, ProjectArrowRecordBatchSnafu, Result,
|
||||
};
|
||||
|
||||
/// A two-dimensional batch of column-oriented data with a defined schema.
|
||||
@@ -59,6 +60,8 @@ impl RecordBatch {
|
||||
// TODO(LFC): Remove the casting here once `Batch` is no longer used.
|
||||
let arrow_arrays = Self::cast_view_arrays(schema.arrow_schema(), arrow_arrays)?;
|
||||
|
||||
let arrow_arrays = maybe_align_json_array_with_schema(schema.arrow_schema(), arrow_arrays)?;
|
||||
|
||||
let df_record_batch = DfRecordBatch::try_new(schema.arrow_schema().clone(), arrow_arrays)
|
||||
.context(error::NewDfRecordBatchSnafu)?;
|
||||
|
||||
@@ -327,12 +330,111 @@ pub fn merge_record_batches(schema: SchemaRef, batches: &[RecordBatch]) -> Resul
|
||||
Ok(RecordBatch::from_df_record_batch(schema, record_batch))
|
||||
}
|
||||
|
||||
/// Align a json array `json_array` to the json type `schema_type`. The `schema_type` is often the
|
||||
/// "largest" json type after some insertions in the table schema, while the json array previously
|
||||
/// written in the SST could be lagged behind it. So it's important to "amend" the json array's
|
||||
/// missing fields with null arrays, to align the array's data type with the provided one.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// - The json array is not an Arrow [StructArray], or the provided data type `schema_type` is not
|
||||
/// of Struct type. Both of which shouldn't happen unless we switch our implementation of how
|
||||
/// json array is physically stored.
|
||||
pub fn align_json_array(json_array: &ArrayRef, schema_type: &ArrowDataType) -> Result<ArrayRef> {
|
||||
let json_type = json_array.data_type();
|
||||
if json_type == schema_type {
|
||||
return Ok(json_array.clone());
|
||||
}
|
||||
|
||||
let json_array = json_array.as_struct();
|
||||
let array_fields = json_array.fields();
|
||||
let array_columns = json_array.columns();
|
||||
let ArrowDataType::Struct(schema_fields) = schema_type else {
|
||||
unreachable!()
|
||||
};
|
||||
let mut aligned = Vec::with_capacity(schema_fields.len());
|
||||
|
||||
// Compare the fields in the json array and the to-be-aligned schema, amending with null arrays
|
||||
// on the way. It's very important to note that fields in the json array and in the json type
|
||||
// are both SORTED.
|
||||
|
||||
let mut i = 0; // point to the schema fields
|
||||
let mut j = 0; // point to the array fields
|
||||
while i < schema_fields.len() && j < array_fields.len() {
|
||||
let schema_field = &schema_fields[i];
|
||||
let array_field = &array_fields[j];
|
||||
if schema_field.name() == array_field.name() {
|
||||
if matches!(schema_field.data_type(), ArrowDataType::Struct(_)) {
|
||||
// A `StructArray`s in a json array must be another json array. (Like a nested json
|
||||
// object in a json value.)
|
||||
aligned.push(align_json_array(
|
||||
&array_columns[j],
|
||||
schema_field.data_type(),
|
||||
)?);
|
||||
} else {
|
||||
aligned.push(array_columns[j].clone());
|
||||
}
|
||||
j += 1;
|
||||
} else {
|
||||
aligned.push(new_null_array(schema_field.data_type(), json_array.len()));
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
if i < schema_fields.len() {
|
||||
for field in &schema_fields[i..] {
|
||||
aligned.push(new_null_array(field.data_type(), json_array.len()));
|
||||
}
|
||||
}
|
||||
ensure!(
|
||||
j == array_fields.len(),
|
||||
AlignJsonArraySnafu {
|
||||
reason: format!(
|
||||
"this json array has more fields {:?}",
|
||||
array_fields[j..]
|
||||
.iter()
|
||||
.map(|x| x.name())
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
let json_array =
|
||||
StructArray::try_new(schema_fields.clone(), aligned, json_array.nulls().cloned())
|
||||
.context(NewDfRecordBatchSnafu)?;
|
||||
Ok(Arc::new(json_array))
|
||||
}
|
||||
|
||||
fn maybe_align_json_array_with_schema(
|
||||
schema: &ArrowSchemaRef,
|
||||
arrays: Vec<ArrayRef>,
|
||||
) -> Result<Vec<ArrayRef>> {
|
||||
if schema.fields().iter().all(|f| !is_json_extension_type(f)) {
|
||||
return Ok(arrays);
|
||||
}
|
||||
|
||||
let mut aligned = Vec::with_capacity(arrays.len());
|
||||
for (field, array) in schema.fields().iter().zip(arrays.into_iter()) {
|
||||
if !is_json_extension_type(field) {
|
||||
aligned.push(array);
|
||||
continue;
|
||||
}
|
||||
|
||||
let json_array = align_json_array(&array, field.data_type())?;
|
||||
aligned.push(json_array);
|
||||
}
|
||||
Ok(aligned)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::arrow::array::{AsArray, UInt32Array};
|
||||
use datatypes::arrow::datatypes::{DataType, Field, Schema as ArrowSchema, UInt32Type};
|
||||
use datatypes::arrow::array::{
|
||||
AsArray, BooleanArray, Float64Array, Int64Array, ListArray, UInt32Array,
|
||||
};
|
||||
use datatypes::arrow::datatypes::{
|
||||
DataType, Field, Fields, Int64Type, Schema as ArrowSchema, UInt32Type,
|
||||
};
|
||||
use datatypes::arrow_array::StringArray;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
@@ -340,6 +442,165 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_align_json_array() -> Result<()> {
|
||||
struct TestCase {
|
||||
json_array: ArrayRef,
|
||||
schema_type: DataType,
|
||||
expected: std::result::Result<ArrayRef, String>,
|
||||
}
|
||||
|
||||
impl TestCase {
|
||||
fn new(
|
||||
json_array: StructArray,
|
||||
schema_type: Fields,
|
||||
expected: std::result::Result<Vec<ArrayRef>, String>,
|
||||
) -> Self {
|
||||
Self {
|
||||
json_array: Arc::new(json_array),
|
||||
schema_type: DataType::Struct(schema_type.clone()),
|
||||
expected: expected
|
||||
.map(|x| Arc::new(StructArray::new(schema_type, x, None)) as ArrayRef),
|
||||
}
|
||||
}
|
||||
|
||||
fn test(self) -> Result<()> {
|
||||
let result = align_json_array(&self.json_array, &self.schema_type);
|
||||
match (result, self.expected) {
|
||||
(Ok(json_array), Ok(expected)) => assert_eq!(&json_array, &expected),
|
||||
(Ok(json_array), Err(e)) => {
|
||||
panic!("expecting error {e} but actually get: {json_array:?}")
|
||||
}
|
||||
(Err(e), Err(expected)) => assert_eq!(e.to_string(), expected),
|
||||
(Err(e), Ok(_)) => return Err(e),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Test empty json array can be aligned with a complex json type.
|
||||
TestCase::new(
|
||||
StructArray::new_empty_fields(2, None),
|
||||
Fields::from(vec![
|
||||
Field::new("int", DataType::Int64, true),
|
||||
Field::new_struct(
|
||||
"nested",
|
||||
vec![Field::new("bool", DataType::Boolean, true)],
|
||||
true,
|
||||
),
|
||||
Field::new("string", DataType::Utf8, true),
|
||||
]),
|
||||
Ok(vec![
|
||||
Arc::new(Int64Array::new_null(2)) as ArrayRef,
|
||||
Arc::new(StructArray::new_null(
|
||||
Fields::from(vec![Arc::new(Field::new("bool", DataType::Boolean, true))]),
|
||||
2,
|
||||
)),
|
||||
Arc::new(StringArray::new_null(2)),
|
||||
]),
|
||||
)
|
||||
.test()?;
|
||||
|
||||
// Test simple json array alignment.
|
||||
TestCase::new(
|
||||
StructArray::from(vec![(
|
||||
Arc::new(Field::new("float", DataType::Float64, true)),
|
||||
Arc::new(Float64Array::from(vec![1.0, 2.0, 3.0])) as ArrayRef,
|
||||
)]),
|
||||
Fields::from(vec![
|
||||
Field::new("float", DataType::Float64, true),
|
||||
Field::new("string", DataType::Utf8, true),
|
||||
]),
|
||||
Ok(vec![
|
||||
Arc::new(Float64Array::from(vec![1.0, 2.0, 3.0])) as ArrayRef,
|
||||
Arc::new(StringArray::new_null(3)),
|
||||
]),
|
||||
)
|
||||
.test()?;
|
||||
|
||||
// Test complex json array alignment.
|
||||
TestCase::new(
|
||||
StructArray::from(vec![
|
||||
(
|
||||
Arc::new(Field::new_list(
|
||||
"list",
|
||||
Field::new_list_field(DataType::Int64, true),
|
||||
true,
|
||||
)),
|
||||
Arc::new(ListArray::from_iter_primitive::<Int64Type, _, _>(vec![
|
||||
Some(vec![Some(1)]),
|
||||
None,
|
||||
Some(vec![Some(2), Some(3)]),
|
||||
])) as ArrayRef,
|
||||
),
|
||||
(
|
||||
Arc::new(Field::new_struct(
|
||||
"nested",
|
||||
vec![Field::new("int", DataType::Int64, true)],
|
||||
true,
|
||||
)),
|
||||
Arc::new(StructArray::from(vec![(
|
||||
Arc::new(Field::new("int", DataType::Int64, true)),
|
||||
Arc::new(Int64Array::from(vec![-1, -2, -3])) as ArrayRef,
|
||||
)])),
|
||||
),
|
||||
(
|
||||
Arc::new(Field::new("string", DataType::Utf8, true)),
|
||||
Arc::new(StringArray::from(vec!["a", "b", "c"])),
|
||||
),
|
||||
]),
|
||||
Fields::from(vec![
|
||||
Field::new("bool", DataType::Boolean, true),
|
||||
Field::new_list("list", Field::new_list_field(DataType::Int64, true), true),
|
||||
Field::new_struct(
|
||||
"nested",
|
||||
vec![
|
||||
Field::new("float", DataType::Float64, true),
|
||||
Field::new("int", DataType::Int64, true),
|
||||
],
|
||||
true,
|
||||
),
|
||||
Field::new("string", DataType::Utf8, true),
|
||||
]),
|
||||
Ok(vec![
|
||||
Arc::new(BooleanArray::new_null(3)) as ArrayRef,
|
||||
Arc::new(ListArray::from_iter_primitive::<Int64Type, _, _>(vec![
|
||||
Some(vec![Some(1)]),
|
||||
None,
|
||||
Some(vec![Some(2), Some(3)]),
|
||||
])),
|
||||
Arc::new(StructArray::from(vec![
|
||||
(
|
||||
Arc::new(Field::new("float", DataType::Float64, true)),
|
||||
Arc::new(Float64Array::new_null(3)) as ArrayRef,
|
||||
),
|
||||
(
|
||||
Arc::new(Field::new("int", DataType::Int64, true)),
|
||||
Arc::new(Int64Array::from(vec![-1, -2, -3])),
|
||||
),
|
||||
])),
|
||||
Arc::new(StringArray::from(vec!["a", "b", "c"])),
|
||||
]),
|
||||
)
|
||||
.test()?;
|
||||
|
||||
// Test align failed.
|
||||
TestCase::new(
|
||||
StructArray::try_from(vec![
|
||||
("i", Arc::new(Int64Array::from(vec![1])) as ArrayRef),
|
||||
("j", Arc::new(Int64Array::from(vec![2])) as ArrayRef),
|
||||
])
|
||||
.unwrap(),
|
||||
Fields::from(vec![Field::new("i", DataType::Int64, true)]),
|
||||
Err(
|
||||
r#"Failed to align JSON array, reason: this json array has more fields ["j"]"#
|
||||
.to_string(),
|
||||
),
|
||||
)
|
||||
.test()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_record_batch() {
|
||||
let arrow_schema = Arc::new(ArrowSchema::new(vec![
|
||||
|
||||
@@ -231,13 +231,15 @@ pub fn sql_value_to_value(
|
||||
}
|
||||
}
|
||||
|
||||
if value.data_type() != *data_type {
|
||||
let value_datatype = value.data_type();
|
||||
// The datatype of json value is determined by its actual data, so we can't simply "cast" it here.
|
||||
if value_datatype.is_json() || value_datatype == *data_type {
|
||||
Ok(value)
|
||||
} else {
|
||||
datatypes::types::cast(value, data_type).with_context(|_| InvalidCastSnafu {
|
||||
sql_value: sql_val.clone(),
|
||||
datatype: data_type,
|
||||
})
|
||||
} else {
|
||||
Ok(value)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ use common_time::timezone::Timezone;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::ColumnDefaultConstraint;
|
||||
use datatypes::schema::constraint::{CURRENT_TIMESTAMP, CURRENT_TIMESTAMP_FN};
|
||||
use snafu::ensure;
|
||||
use sqlparser::ast::ValueWithSpan;
|
||||
pub use sqlparser::ast::{
|
||||
BinaryOperator, ColumnDef, ColumnOption, ColumnOptionDef, DataType, Expr, Function,
|
||||
@@ -37,6 +38,14 @@ pub fn parse_column_default_constraint(
|
||||
.iter()
|
||||
.find(|o| matches!(o.option, ColumnOption::Default(_)))
|
||||
{
|
||||
ensure!(
|
||||
!data_type.is_json(),
|
||||
UnsupportedDefaultValueSnafu {
|
||||
column_name,
|
||||
reason: "json column cannot have a default value",
|
||||
}
|
||||
);
|
||||
|
||||
let default_constraint = match &opt.option {
|
||||
ColumnOption::Default(Expr::Value(v)) => ColumnDefaultConstraint::Value(
|
||||
sql_value_to_value(column_name, data_type, &v.value, timezone, None, false)?,
|
||||
@@ -82,7 +91,7 @@ pub fn parse_column_default_constraint(
|
||||
} else {
|
||||
return UnsupportedDefaultValueSnafu {
|
||||
column_name,
|
||||
expr: *expr.clone(),
|
||||
reason: format!("expr '{expr}' not supported"),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
@@ -90,14 +99,14 @@ pub fn parse_column_default_constraint(
|
||||
ColumnOption::Default(others) => {
|
||||
return UnsupportedDefaultValueSnafu {
|
||||
column_name,
|
||||
expr: others.clone(),
|
||||
reason: format!("expr '{others}' not supported"),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
_ => {
|
||||
return UnsupportedDefaultValueSnafu {
|
||||
column_name,
|
||||
expr: Expr::Value(SqlValue::Null.into()),
|
||||
reason: format!("option '{}' not supported", opt.option),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
@@ -55,13 +55,11 @@ pub enum Error {
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Unsupported expr in default constraint: {} for column: {}",
|
||||
expr,
|
||||
column_name
|
||||
"Unsupported default constraint for column: '{column_name}', reason: {reason}"
|
||||
))]
|
||||
UnsupportedDefaultValue {
|
||||
column_name: String,
|
||||
expr: Expr,
|
||||
reason: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -58,10 +58,14 @@ pub fn get_total_memory_bytes() -> i64 {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the total CPU cores. The result will be rounded to the nearest integer.
|
||||
/// For example, if the total CPU is 1.5 cores(1500 millicores), the result will be 2.
|
||||
/// Get the total CPU cores. The result will be rounded up to the next integer (ceiling).
|
||||
/// For example, if the total CPU is 1.1 cores (1100 millicores) or 1.5 cores (1500 millicores), the result will be 2.
|
||||
pub fn get_total_cpu_cores() -> usize {
|
||||
((get_total_cpu_millicores() as f64) / 1000.0).round() as usize
|
||||
cpu_cores(get_total_cpu_millicores())
|
||||
}
|
||||
|
||||
fn cpu_cores(cpu_millicores: i64) -> usize {
|
||||
((cpu_millicores as f64) / 1_000.0).ceil() as usize
|
||||
}
|
||||
|
||||
/// Get the total memory in readable size.
|
||||
@@ -178,6 +182,13 @@ mod tests {
|
||||
#[test]
|
||||
fn test_get_total_cpu_cores() {
|
||||
assert!(get_total_cpu_cores() > 0);
|
||||
assert_eq!(cpu_cores(1), 1);
|
||||
assert_eq!(cpu_cores(100), 1);
|
||||
assert_eq!(cpu_cores(500), 1);
|
||||
assert_eq!(cpu_cores(1000), 1);
|
||||
assert_eq!(cpu_cores(1100), 2);
|
||||
assert_eq!(cpu_cores(1900), 2);
|
||||
assert_eq!(cpu_cores(10_000), 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -36,6 +36,9 @@ pub const DEFAULT_BACKOFF_CONFIG: BackoffConfig = BackoffConfig {
|
||||
deadline: Some(Duration::from_secs(3)),
|
||||
};
|
||||
|
||||
/// The default connect timeout for kafka client.
|
||||
pub const DEFAULT_CONNECT_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
|
||||
/// Default interval for auto WAL pruning.
|
||||
pub const DEFAULT_AUTO_PRUNE_INTERVAL: Duration = Duration::from_mins(30);
|
||||
/// Default limit for concurrent auto pruning tasks.
|
||||
|
||||
@@ -22,6 +22,7 @@ use common_base::Plugins;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_greptimedb_telemetry::GreptimeDBTelemetryTask;
|
||||
use common_meta::cache::{LayeredCacheRegistry, SchemaCacheRef, TableSchemaCacheRef};
|
||||
use common_meta::cache_invalidator::CacheInvalidatorRef;
|
||||
use common_meta::datanode::TopicStatsReporter;
|
||||
use common_meta::key::runtime_switch::RuntimeSwitchManager;
|
||||
use common_meta::key::{SchemaMetadataManager, SchemaMetadataManagerRef};
|
||||
@@ -281,21 +282,11 @@ impl DatanodeBuilder {
|
||||
open_all_regions.await?;
|
||||
}
|
||||
|
||||
let mut resource_stat = ResourceStatImpl::default();
|
||||
resource_stat.start_collect_cpu_usage();
|
||||
|
||||
let heartbeat_task = if let Some(meta_client) = meta_client {
|
||||
Some(
|
||||
HeartbeatTask::try_new(
|
||||
&self.opts,
|
||||
region_server.clone(),
|
||||
meta_client,
|
||||
cache_registry,
|
||||
self.plugins.clone(),
|
||||
Arc::new(resource_stat),
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
let task = self
|
||||
.create_heartbeat_task(®ion_server, meta_client, cache_registry)
|
||||
.await?;
|
||||
Some(task)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@@ -324,6 +315,29 @@ impl DatanodeBuilder {
|
||||
})
|
||||
}
|
||||
|
||||
async fn create_heartbeat_task(
|
||||
&self,
|
||||
region_server: &RegionServer,
|
||||
meta_client: MetaClientRef,
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
) -> Result<HeartbeatTask> {
|
||||
let stat = {
|
||||
let mut stat = ResourceStatImpl::default();
|
||||
stat.start_collect_cpu_usage();
|
||||
Arc::new(stat)
|
||||
};
|
||||
|
||||
HeartbeatTask::try_new(
|
||||
&self.opts,
|
||||
region_server.clone(),
|
||||
meta_client,
|
||||
cache_invalidator,
|
||||
self.plugins.clone(),
|
||||
stat,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Builds [ObjectStoreManager] from [StorageConfig].
|
||||
pub async fn build_object_store_manager(cfg: &StorageConfig) -> Result<ObjectStoreManagerRef> {
|
||||
let object_store = store::new_object_store(cfg.store.clone(), &cfg.data_home).await?;
|
||||
|
||||
@@ -410,14 +410,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build cache store"))]
|
||||
BuildCacheStore {
|
||||
#[snafu(source)]
|
||||
error: object_store::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Not yet implemented: {what}"))]
|
||||
NotYetImplemented { what: String },
|
||||
}
|
||||
@@ -493,7 +485,6 @@ impl ErrorExt for Error {
|
||||
SerializeJson { .. } => StatusCode::Internal,
|
||||
|
||||
ObjectStore { source, .. } => source.status_code(),
|
||||
BuildCacheStore { .. } => StatusCode::StorageUnavailable,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ use common_meta::datanode::REGION_STATISTIC_KEY;
|
||||
use common_meta::distributed_time_constants::META_KEEP_ALIVE_INTERVAL_SECS;
|
||||
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
use common_meta::heartbeat::handler::suspend::SuspendHandler;
|
||||
use common_meta::heartbeat::handler::{
|
||||
HandlerGroupExecutor, HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutorRef,
|
||||
};
|
||||
@@ -91,6 +92,7 @@ impl HeartbeatTask {
|
||||
let resp_handler_executor = Arc::new(HandlerGroupExecutor::new(vec![
|
||||
region_alive_keeper.clone(),
|
||||
Arc::new(ParseMailboxMessageHandler),
|
||||
Arc::new(SuspendHandler::new(region_server.suspend_state())),
|
||||
Arc::new(
|
||||
RegionHeartbeatResponseHandler::new(region_server.clone())
|
||||
.with_open_region_parallelism(opts.init_regions_parallelism),
|
||||
|
||||
@@ -99,26 +99,30 @@ impl RegionHeartbeatResponseHandler {
|
||||
self
|
||||
}
|
||||
|
||||
fn build_handler(&self, instruction: &Instruction) -> MetaResult<Box<InstructionHandlers>> {
|
||||
fn build_handler(
|
||||
&self,
|
||||
instruction: &Instruction,
|
||||
) -> MetaResult<Option<Box<InstructionHandlers>>> {
|
||||
match instruction {
|
||||
Instruction::CloseRegions(_) => Ok(Box::new(CloseRegionsHandler.into())),
|
||||
Instruction::OpenRegions(_) => Ok(Box::new(
|
||||
Instruction::CloseRegions(_) => Ok(Some(Box::new(CloseRegionsHandler.into()))),
|
||||
Instruction::OpenRegions(_) => Ok(Some(Box::new(
|
||||
OpenRegionsHandler {
|
||||
open_region_parallelism: self.open_region_parallelism,
|
||||
}
|
||||
.into(),
|
||||
)),
|
||||
Instruction::FlushRegions(_) => Ok(Box::new(FlushRegionsHandler.into())),
|
||||
Instruction::DowngradeRegions(_) => Ok(Box::new(DowngradeRegionsHandler.into())),
|
||||
Instruction::UpgradeRegions(_) => Ok(Box::new(
|
||||
))),
|
||||
Instruction::FlushRegions(_) => Ok(Some(Box::new(FlushRegionsHandler.into()))),
|
||||
Instruction::DowngradeRegions(_) => Ok(Some(Box::new(DowngradeRegionsHandler.into()))),
|
||||
Instruction::UpgradeRegions(_) => Ok(Some(Box::new(
|
||||
UpgradeRegionsHandler {
|
||||
upgrade_region_parallelism: self.open_region_parallelism,
|
||||
}
|
||||
.into(),
|
||||
)),
|
||||
Instruction::GetFileRefs(_) => Ok(Box::new(GetFileRefsHandler.into())),
|
||||
Instruction::GcRegions(_) => Ok(Box::new(GcRegionsHandler.into())),
|
||||
))),
|
||||
Instruction::GetFileRefs(_) => Ok(Some(Box::new(GetFileRefsHandler.into()))),
|
||||
Instruction::GcRegions(_) => Ok(Some(Box::new(GcRegionsHandler.into()))),
|
||||
Instruction::InvalidateCaches(_) => InvalidHeartbeatResponseSnafu.fail(),
|
||||
Instruction::Suspend => Ok(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -216,30 +220,24 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
|
||||
.context(InvalidHeartbeatResponseSnafu)?;
|
||||
|
||||
let mailbox = ctx.mailbox.clone();
|
||||
let region_server = self.region_server.clone();
|
||||
let downgrade_tasks = self.downgrade_tasks.clone();
|
||||
let flush_tasks = self.flush_tasks.clone();
|
||||
let gc_tasks = self.gc_tasks.clone();
|
||||
let handler = self.build_handler(&instruction)?;
|
||||
let _handle = common_runtime::spawn_global(async move {
|
||||
let reply = handler
|
||||
.handle(
|
||||
&HandlerContext {
|
||||
region_server,
|
||||
downgrade_tasks,
|
||||
flush_tasks,
|
||||
gc_tasks,
|
||||
},
|
||||
instruction,
|
||||
)
|
||||
.await;
|
||||
|
||||
if let Some(reply) = reply
|
||||
&& let Err(e) = mailbox.send((meta, reply)).await
|
||||
{
|
||||
error!(e; "Failed to send reply to mailbox");
|
||||
}
|
||||
});
|
||||
if let Some(handler) = self.build_handler(&instruction)? {
|
||||
let context = HandlerContext {
|
||||
region_server: self.region_server.clone(),
|
||||
downgrade_tasks: self.downgrade_tasks.clone(),
|
||||
flush_tasks: self.flush_tasks.clone(),
|
||||
gc_tasks: self.gc_tasks.clone(),
|
||||
};
|
||||
let _handle = common_runtime::spawn_global(async move {
|
||||
let reply = handler.handle(&context, instruction).await;
|
||||
if let Some(reply) = reply
|
||||
&& let Err(e) = mailbox.send((meta, reply)).await
|
||||
{
|
||||
let error = e.to_string();
|
||||
let (meta, reply) = e.0;
|
||||
error!("Failed to send reply {reply} to {meta:?}: {error}");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Ok(HandleControl::Continue)
|
||||
}
|
||||
|
||||
@@ -320,4 +320,15 @@ mod tests {
|
||||
assert!(flush_reply.results[0].1.is_ok());
|
||||
assert!(flush_reply.results[1].1.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flush_regions_display() {
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
let flush_regions = FlushRegions::sync_single(region_id);
|
||||
let display = format!("{}", flush_regions);
|
||||
assert_eq!(
|
||||
display,
|
||||
"FlushRegions(region_ids=[4398046511105(1024, 1)], strategy=Sync, error_strategy=FailFast)"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ mod catalog;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Debug;
|
||||
use std::ops::Deref;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -52,7 +53,9 @@ pub use query::dummy_catalog::{
|
||||
DummyCatalogList, DummyTableProviderFactory, TableProviderFactoryRef,
|
||||
};
|
||||
use serde_json;
|
||||
use servers::error::{self as servers_error, ExecuteGrpcRequestSnafu, Result as ServerResult};
|
||||
use servers::error::{
|
||||
self as servers_error, ExecuteGrpcRequestSnafu, Result as ServerResult, SuspendedSnafu,
|
||||
};
|
||||
use servers::grpc::FlightCompression;
|
||||
use servers::grpc::flight::{FlightCraft, FlightRecordBatchStream, TonicStream};
|
||||
use servers::grpc::region_server::RegionServerHandler;
|
||||
@@ -89,6 +92,7 @@ use crate::region_server::catalog::{NameAwareCatalogList, NameAwareDataSourceInj
|
||||
pub struct RegionServer {
|
||||
inner: Arc<RegionServerInner>,
|
||||
flight_compression: FlightCompression,
|
||||
suspend: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
pub struct RegionStat {
|
||||
@@ -136,6 +140,7 @@ impl RegionServer {
|
||||
),
|
||||
)),
|
||||
flight_compression,
|
||||
suspend: Arc::new(AtomicBool::new(false)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -595,6 +600,14 @@ impl RegionServer {
|
||||
.handle_sync_region(engine_with_status.engine(), region_id, manifest_info)
|
||||
.await
|
||||
}
|
||||
|
||||
fn is_suspended(&self) -> bool {
|
||||
self.suspend.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub(crate) fn suspend_state(&self) -> Arc<AtomicBool> {
|
||||
self.suspend.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -644,6 +657,8 @@ impl FlightCraft for RegionServer {
|
||||
&self,
|
||||
request: Request<Ticket>,
|
||||
) -> TonicResult<Response<TonicStream<FlightData>>> {
|
||||
ensure!(!self.is_suspended(), SuspendedSnafu);
|
||||
|
||||
let ticket = request.into_inner().ticket;
|
||||
let request = api::v1::region::QueryRequest::decode(ticket.as_ref())
|
||||
.context(servers_error::InvalidFlightTicketSnafu)?;
|
||||
@@ -1200,7 +1215,8 @@ impl RegionServerInner {
|
||||
| RegionRequest::Flush(_)
|
||||
| RegionRequest::Compact(_)
|
||||
| RegionRequest::Truncate(_)
|
||||
| RegionRequest::BuildIndex(_) => RegionChange::None,
|
||||
| RegionRequest::BuildIndex(_)
|
||||
| RegionRequest::EnterStaging(_) => RegionChange::None,
|
||||
RegionRequest::Catchup(_) => RegionChange::Catchup,
|
||||
};
|
||||
|
||||
@@ -1260,7 +1276,6 @@ impl RegionServerInner {
|
||||
.with_context(|_| HandleRegionRequestSnafu { region_id })?
|
||||
.new_opened_logical_region_ids()
|
||||
else {
|
||||
warn!("No new opened logical regions");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
|
||||
@@ -14,15 +14,10 @@
|
||||
|
||||
//! object storage utilities
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::info;
|
||||
use object_store::config::ObjectStorageCacheConfig;
|
||||
use common_telemetry::{info, warn};
|
||||
use object_store::factory::new_raw_object_store;
|
||||
use object_store::layers::LruCacheLayer;
|
||||
use object_store::services::Fs;
|
||||
use object_store::util::{clean_temp_dir, join_dir, with_instrument_layers, with_retry_layers};
|
||||
use object_store::{ATOMIC_WRITE_DIR, Access, ObjectStore, ObjectStoreBuilder};
|
||||
use object_store::{ATOMIC_WRITE_DIR, ObjectStore};
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::config::ObjectStoreConfig;
|
||||
@@ -47,23 +42,58 @@ pub(crate) async fn new_object_store_without_cache(
|
||||
Ok(object_store)
|
||||
}
|
||||
|
||||
/// Cleans up old LRU read cache directories that were removed.
|
||||
fn clean_old_read_cache(store: &ObjectStoreConfig, data_home: &str) {
|
||||
if !store.is_object_storage() {
|
||||
return;
|
||||
}
|
||||
|
||||
let Some(cache_config) = store.cache_config() else {
|
||||
return;
|
||||
};
|
||||
|
||||
// Only cleans if read cache was enabled
|
||||
if !cache_config.enable_read_cache {
|
||||
return;
|
||||
}
|
||||
|
||||
let cache_base_dir = if cache_config.cache_path.is_empty() {
|
||||
data_home
|
||||
} else {
|
||||
&cache_config.cache_path
|
||||
};
|
||||
|
||||
// Cleans up the old read cache directory
|
||||
let old_read_cache_dir = join_dir(cache_base_dir, "cache/object/read");
|
||||
info!(
|
||||
"Cleaning up old read cache directory: {}",
|
||||
old_read_cache_dir
|
||||
);
|
||||
if let Err(e) = clean_temp_dir(&old_read_cache_dir) {
|
||||
warn!(e; "Failed to clean old read cache directory {}", old_read_cache_dir);
|
||||
}
|
||||
|
||||
// Cleans up the atomic temp dir used by the cache layer
|
||||
let cache_atomic_temp_dir = join_dir(cache_base_dir, ATOMIC_WRITE_DIR);
|
||||
info!(
|
||||
"Cleaning up old cache atomic temp directory: {}",
|
||||
cache_atomic_temp_dir
|
||||
);
|
||||
if let Err(e) = clean_temp_dir(&cache_atomic_temp_dir) {
|
||||
warn!(e; "Failed to clean old cache atomic temp directory {}", cache_atomic_temp_dir);
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn new_object_store(store: ObjectStoreConfig, data_home: &str) -> Result<ObjectStore> {
|
||||
// Cleans up old LRU read cache directories.
|
||||
// TODO: Remove this line after the 1.0 release.
|
||||
clean_old_read_cache(&store, data_home);
|
||||
|
||||
let object_store = new_raw_object_store(&store, data_home)
|
||||
.await
|
||||
.context(error::ObjectStoreSnafu)?;
|
||||
// Enable retry layer and cache layer for non-fs object storages
|
||||
// Enables retry layer for non-fs object storages
|
||||
let object_store = if store.is_object_storage() {
|
||||
let object_store = {
|
||||
// It's safe to unwrap here because we already checked above.
|
||||
let cache_config = store.cache_config().unwrap();
|
||||
if let Some(cache_layer) = build_cache_layer(cache_config, data_home).await? {
|
||||
// Adds cache layer
|
||||
object_store.layer(cache_layer)
|
||||
} else {
|
||||
object_store
|
||||
}
|
||||
};
|
||||
|
||||
// Adds retry layer
|
||||
with_retry_layers(object_store)
|
||||
} else {
|
||||
@@ -73,40 +103,3 @@ pub async fn new_object_store(store: ObjectStoreConfig, data_home: &str) -> Resu
|
||||
let object_store = with_instrument_layers(object_store, true);
|
||||
Ok(object_store)
|
||||
}
|
||||
|
||||
async fn build_cache_layer(
|
||||
cache_config: &ObjectStorageCacheConfig,
|
||||
data_home: &str,
|
||||
) -> Result<Option<LruCacheLayer<impl Access>>> {
|
||||
// No need to build cache layer if read cache is disabled.
|
||||
if !cache_config.enable_read_cache {
|
||||
return Ok(None);
|
||||
}
|
||||
let cache_base_dir = if cache_config.cache_path.is_empty() {
|
||||
data_home
|
||||
} else {
|
||||
&cache_config.cache_path
|
||||
};
|
||||
let atomic_temp_dir = join_dir(cache_base_dir, ATOMIC_WRITE_DIR);
|
||||
clean_temp_dir(&atomic_temp_dir).context(error::ObjectStoreSnafu)?;
|
||||
|
||||
let cache_store = Fs::default()
|
||||
.root(cache_base_dir)
|
||||
.atomic_write_dir(&atomic_temp_dir)
|
||||
.build()
|
||||
.context(error::BuildCacheStoreSnafu)?;
|
||||
|
||||
let cache_layer = LruCacheLayer::new(
|
||||
Arc::new(cache_store),
|
||||
cache_config.cache_capacity.0 as usize,
|
||||
)
|
||||
.context(error::BuildCacheStoreSnafu)?;
|
||||
cache_layer.recover_cache(false).await;
|
||||
|
||||
info!(
|
||||
"Enabled local object storage cache, path: {}, capacity: {}.",
|
||||
cache_config.cache_path, cache_config.cache_capacity
|
||||
);
|
||||
|
||||
Ok(Some(cache_layer))
|
||||
}
|
||||
|
||||
@@ -24,8 +24,8 @@ use common_query::Output;
|
||||
use common_runtime::Runtime;
|
||||
use common_runtime::runtime::{BuilderBuild, RuntimeTrait};
|
||||
use datafusion::catalog::TableFunction;
|
||||
use datafusion::dataframe::DataFrame;
|
||||
use datafusion_expr::{AggregateUDF, LogicalPlan};
|
||||
use query::dataframe::DataFrame;
|
||||
use query::planner::LogicalPlanner;
|
||||
use query::query_engine::{DescribeResult, QueryEngineState};
|
||||
use query::{QueryEngine, QueryEngineContext};
|
||||
@@ -33,9 +33,9 @@ use servers::grpc::FlightCompression;
|
||||
use session::context::QueryContextRef;
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
use store_api::region_engine::{
|
||||
RegionEngine, RegionManifestInfo, RegionRole, RegionScannerRef, RegionStatistic,
|
||||
RemapManifestsRequest, RemapManifestsResponse, SetRegionRoleStateResponse,
|
||||
SettableRegionRoleState, SyncManifestResponse,
|
||||
CopyRegionFromRequest, CopyRegionFromResponse, RegionEngine, RegionManifestInfo, RegionRole,
|
||||
RegionScannerRef, RegionStatistic, RemapManifestsRequest, RemapManifestsResponse,
|
||||
SetRegionRoleStateResponse, SettableRegionRoleState, SyncManifestResponse,
|
||||
};
|
||||
use store_api::region_request::{AffectedRows, RegionRequest};
|
||||
use store_api::storage::{RegionId, ScanRequest, SequenceNumber};
|
||||
@@ -299,6 +299,14 @@ impl RegionEngine for MockRegionEngine {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn copy_region_from(
|
||||
&self,
|
||||
_region_id: RegionId,
|
||||
_request: CopyRegionFromRequest,
|
||||
) -> Result<CopyRegionFromResponse, BoxedError> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow::compute::cast as arrow_array_cast;
|
||||
use arrow::datatypes::{
|
||||
DataType as ArrowDataType, IntervalUnit as ArrowIntervalUnit, TimeUnit as ArrowTimeUnit,
|
||||
};
|
||||
@@ -368,8 +367,10 @@ impl ConcreteDataType {
|
||||
|
||||
/// Checks if the data type can cast to another data type.
|
||||
pub fn can_arrow_type_cast_to(&self, to_type: &ConcreteDataType) -> bool {
|
||||
let array = arrow_array::new_empty_array(&self.as_arrow_type());
|
||||
arrow_array_cast(array.as_ref(), &to_type.as_arrow_type()).is_ok()
|
||||
match (self, to_type) {
|
||||
(ConcreteDataType::Json(this), ConcreteDataType::Json(that)) => that.is_include(this),
|
||||
_ => arrow::compute::can_cast_types(&self.as_arrow_type(), &to_type.as_arrow_type()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to cast data type as a [`DurationType`].
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::extension::ExtensionType;
|
||||
use arrow_schema::{ArrowError, DataType};
|
||||
use arrow_schema::{ArrowError, DataType, FieldRef};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::json::JsonStructureSettings;
|
||||
@@ -102,3 +102,8 @@ impl ExtensionType for JsonExtensionType {
|
||||
Ok(json)
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if this field is to be treated as json extension type.
|
||||
pub fn is_json_extension_type(field: &FieldRef) -> bool {
|
||||
field.extension_type_name() == Some(JsonExtensionType::NAME)
|
||||
}
|
||||
|
||||
@@ -260,7 +260,7 @@ impl JsonValue {
|
||||
ConcreteDataType::Json(self.json_type().clone())
|
||||
}
|
||||
|
||||
pub(crate) fn json_type(&self) -> &JsonType {
|
||||
pub fn json_type(&self) -> &JsonType {
|
||||
self.json_type.get_or_init(|| self.json_variant.json_type())
|
||||
}
|
||||
|
||||
@@ -268,6 +268,14 @@ impl JsonValue {
|
||||
matches!(self.json_variant, JsonVariant::Null)
|
||||
}
|
||||
|
||||
/// Check if this JSON value is an empty object.
|
||||
pub fn is_empty_object(&self) -> bool {
|
||||
match &self.json_variant {
|
||||
JsonVariant::Object(object) => object.is_empty(),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn as_i64(&self) -> Option<i64> {
|
||||
match self.json_variant {
|
||||
JsonVariant::Number(n) => n.as_i64(),
|
||||
|
||||
@@ -33,7 +33,8 @@ pub use crate::schema::column_schema::{
|
||||
COLUMN_SKIPPING_INDEX_OPT_KEY_FALSE_POSITIVE_RATE, COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY,
|
||||
COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE, COMMENT_KEY, ColumnExtType, ColumnSchema, FULLTEXT_KEY,
|
||||
FulltextAnalyzer, FulltextBackend, FulltextOptions, INVERTED_INDEX_KEY, Metadata,
|
||||
SKIPPING_INDEX_KEY, SkippingIndexOptions, SkippingIndexType, TIME_INDEX_KEY,
|
||||
SKIPPING_INDEX_KEY, SkippingIndexOptions, SkippingIndexType, TIME_INDEX_KEY, VECTOR_INDEX_KEY,
|
||||
VectorDistanceMetric, VectorIndexEngineType, VectorIndexOptions,
|
||||
};
|
||||
pub use crate::schema::constraint::ColumnDefaultConstraint;
|
||||
pub use crate::schema::raw::RawSchema;
|
||||
@@ -273,8 +274,9 @@ fn collect_fields(column_schemas: &[ColumnSchema]) -> Result<FieldsAndIndices> {
|
||||
_ => None,
|
||||
};
|
||||
if let Some(extype) = extype {
|
||||
let metadata = HashMap::from([(TYPE_KEY.to_string(), extype.to_string())]);
|
||||
field = field.with_metadata(metadata);
|
||||
field
|
||||
.metadata_mut()
|
||||
.insert(TYPE_KEY.to_string(), extype.to_string());
|
||||
}
|
||||
fields.push(field);
|
||||
ensure!(
|
||||
|
||||
@@ -46,6 +46,8 @@ pub const FULLTEXT_KEY: &str = "greptime:fulltext";
|
||||
pub const INVERTED_INDEX_KEY: &str = "greptime:inverted_index";
|
||||
/// Key used to store skip options in arrow field's metadata.
|
||||
pub const SKIPPING_INDEX_KEY: &str = "greptime:skipping_index";
|
||||
/// Key used to store vector index options in arrow field's metadata.
|
||||
pub const VECTOR_INDEX_KEY: &str = "greptime:vector_index";
|
||||
|
||||
/// Keys used in fulltext options
|
||||
pub const COLUMN_FULLTEXT_CHANGE_OPT_KEY_ENABLE: &str = "enable";
|
||||
@@ -216,6 +218,53 @@ impl ColumnSchema {
|
||||
self.metadata.contains_key(INVERTED_INDEX_KEY)
|
||||
}
|
||||
|
||||
/// Checks if this column has a vector index.
|
||||
pub fn is_vector_indexed(&self) -> bool {
|
||||
match self.vector_index_options() {
|
||||
Ok(opts) => opts.is_some(),
|
||||
Err(e) => {
|
||||
common_telemetry::warn!(
|
||||
"Failed to deserialize vector_index_options for column '{}': {}",
|
||||
self.name,
|
||||
e
|
||||
);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the vector index options.
|
||||
pub fn vector_index_options(&self) -> Result<Option<VectorIndexOptions>> {
|
||||
match self.metadata.get(VECTOR_INDEX_KEY) {
|
||||
None => Ok(None),
|
||||
Some(json) => {
|
||||
let options =
|
||||
serde_json::from_str(json).context(error::DeserializeSnafu { json })?;
|
||||
Ok(Some(options))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the vector index options.
|
||||
pub fn set_vector_index_options(&mut self, options: &VectorIndexOptions) -> Result<()> {
|
||||
self.metadata.insert(
|
||||
VECTOR_INDEX_KEY.to_string(),
|
||||
serde_json::to_string(options).context(error::SerializeSnafu)?,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Removes the vector index options.
|
||||
pub fn unset_vector_index_options(&mut self) {
|
||||
self.metadata.remove(VECTOR_INDEX_KEY);
|
||||
}
|
||||
|
||||
/// Sets vector index options and returns self for chaining.
|
||||
pub fn with_vector_index_options(mut self, options: &VectorIndexOptions) -> Result<Self> {
|
||||
self.set_vector_index_options(options)?;
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Set default constraint.
|
||||
///
|
||||
/// If a default constraint exists for the column, this method will
|
||||
@@ -964,6 +1013,181 @@ impl TryFrom<HashMap<String, String>> for SkippingIndexOptions {
|
||||
}
|
||||
}
|
||||
|
||||
/// Distance metric for vector similarity search.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default, Visit, VisitMut)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum VectorDistanceMetric {
|
||||
/// Squared Euclidean distance (L2^2).
|
||||
#[default]
|
||||
L2sq,
|
||||
/// Cosine distance (1 - cosine similarity).
|
||||
Cosine,
|
||||
/// Inner product (negative, for maximum inner product search).
|
||||
#[serde(alias = "ip")]
|
||||
InnerProduct,
|
||||
}
|
||||
|
||||
impl fmt::Display for VectorDistanceMetric {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
VectorDistanceMetric::L2sq => write!(f, "l2sq"),
|
||||
VectorDistanceMetric::Cosine => write!(f, "cosine"),
|
||||
VectorDistanceMetric::InnerProduct => write!(f, "ip"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for VectorDistanceMetric {
|
||||
type Err = String;
|
||||
|
||||
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"l2sq" | "l2" | "euclidean" => Ok(VectorDistanceMetric::L2sq),
|
||||
"cosine" | "cos" => Ok(VectorDistanceMetric::Cosine),
|
||||
"inner_product" | "ip" | "dot" => Ok(VectorDistanceMetric::InnerProduct),
|
||||
_ => Err(format!(
|
||||
"Unknown distance metric: {}. Expected: l2sq, cosine, or ip",
|
||||
s
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VectorDistanceMetric {
|
||||
/// Returns the metric as u8 for blob serialization.
|
||||
pub fn as_u8(&self) -> u8 {
|
||||
match self {
|
||||
Self::L2sq => 0,
|
||||
Self::Cosine => 1,
|
||||
Self::InnerProduct => 2,
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses metric from u8 (used when reading blob).
|
||||
pub fn try_from_u8(v: u8) -> Option<Self> {
|
||||
match v {
|
||||
0 => Some(Self::L2sq),
|
||||
1 => Some(Self::Cosine),
|
||||
2 => Some(Self::InnerProduct),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Default HNSW connectivity parameter.
|
||||
const DEFAULT_VECTOR_INDEX_CONNECTIVITY: u32 = 16;
|
||||
/// Default expansion factor during index construction.
|
||||
const DEFAULT_VECTOR_INDEX_EXPANSION_ADD: u32 = 128;
|
||||
/// Default expansion factor during search.
|
||||
const DEFAULT_VECTOR_INDEX_EXPANSION_SEARCH: u32 = 64;
|
||||
|
||||
fn default_vector_index_connectivity() -> u32 {
|
||||
DEFAULT_VECTOR_INDEX_CONNECTIVITY
|
||||
}
|
||||
|
||||
fn default_vector_index_expansion_add() -> u32 {
|
||||
DEFAULT_VECTOR_INDEX_EXPANSION_ADD
|
||||
}
|
||||
|
||||
fn default_vector_index_expansion_search() -> u32 {
|
||||
DEFAULT_VECTOR_INDEX_EXPANSION_SEARCH
|
||||
}
|
||||
|
||||
/// Supported vector index engine types.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize, Visit, VisitMut)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum VectorIndexEngineType {
|
||||
/// USearch HNSW implementation.
|
||||
#[default]
|
||||
Usearch,
|
||||
// Future: Vsag,
|
||||
}
|
||||
|
||||
impl VectorIndexEngineType {
|
||||
/// Returns the engine type as u8 for blob serialization.
|
||||
pub fn as_u8(&self) -> u8 {
|
||||
match self {
|
||||
Self::Usearch => 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses engine type from u8 (used when reading blob).
|
||||
pub fn try_from_u8(v: u8) -> Option<Self> {
|
||||
match v {
|
||||
0 => Some(Self::Usearch),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for VectorIndexEngineType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Usearch => write!(f, "usearch"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for VectorIndexEngineType {
|
||||
type Err = String;
|
||||
|
||||
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"usearch" => Ok(Self::Usearch),
|
||||
_ => Err(format!(
|
||||
"Unknown vector index engine: {}. Expected: usearch",
|
||||
s
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Options for vector index (HNSW).
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Visit, VisitMut)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct VectorIndexOptions {
|
||||
/// Vector index engine type (default: usearch).
|
||||
#[serde(default)]
|
||||
pub engine: VectorIndexEngineType,
|
||||
/// Distance metric for similarity search.
|
||||
#[serde(default)]
|
||||
pub metric: VectorDistanceMetric,
|
||||
/// HNSW connectivity parameter (M in the paper).
|
||||
/// Higher values improve recall but increase memory usage.
|
||||
#[serde(default = "default_vector_index_connectivity")]
|
||||
pub connectivity: u32,
|
||||
/// Expansion factor during index construction (ef_construction).
|
||||
/// Higher values improve index quality but slow down construction.
|
||||
#[serde(default = "default_vector_index_expansion_add")]
|
||||
pub expansion_add: u32,
|
||||
/// Expansion factor during search (ef_search).
|
||||
/// Higher values improve recall but slow down search.
|
||||
#[serde(default = "default_vector_index_expansion_search")]
|
||||
pub expansion_search: u32,
|
||||
}
|
||||
|
||||
impl Default for VectorIndexOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
engine: VectorIndexEngineType::default(),
|
||||
metric: VectorDistanceMetric::default(),
|
||||
connectivity: DEFAULT_VECTOR_INDEX_CONNECTIVITY,
|
||||
expansion_add: DEFAULT_VECTOR_INDEX_EXPANSION_ADD,
|
||||
expansion_search: DEFAULT_VECTOR_INDEX_EXPANSION_SEARCH,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for VectorIndexOptions {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"engine={}, metric={}, connectivity={}, expansion_add={}, expansion_search={}",
|
||||
self.engine, self.metric, self.connectivity, self.expansion_add, self.expansion_search
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -20,7 +20,7 @@ mod decimal_type;
|
||||
mod dictionary_type;
|
||||
mod duration_type;
|
||||
mod interval_type;
|
||||
pub(crate) mod json_type;
|
||||
pub mod json_type;
|
||||
mod list_type;
|
||||
mod null_type;
|
||||
mod primitive_type;
|
||||
|
||||
@@ -18,7 +18,6 @@ use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow::datatypes::DataType as ArrowDataType;
|
||||
use arrow_schema::Fields;
|
||||
use common_base::bytes::Bytes;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
@@ -36,7 +35,7 @@ use crate::vectors::json::builder::JsonVectorBuilder;
|
||||
use crate::vectors::{BinaryVectorBuilder, MutableVector};
|
||||
|
||||
pub const JSON_TYPE_NAME: &str = "Json";
|
||||
const JSON_PLAIN_FIELD_NAME: &str = "__plain__";
|
||||
const JSON_PLAIN_FIELD_NAME: &str = "__json_plain__";
|
||||
const JSON_PLAIN_FIELD_METADATA_KEY: &str = "is_plain_json";
|
||||
|
||||
pub type JsonObjectType = BTreeMap<String, JsonNativeType>;
|
||||
@@ -59,6 +58,10 @@ pub enum JsonNativeType {
|
||||
}
|
||||
|
||||
impl JsonNativeType {
|
||||
pub fn is_null(&self) -> bool {
|
||||
matches!(self, JsonNativeType::Null)
|
||||
}
|
||||
|
||||
pub fn u64() -> Self {
|
||||
Self::Number(JsonNumberType::U64)
|
||||
}
|
||||
@@ -187,7 +190,7 @@ impl JsonType {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn empty() -> Self {
|
||||
pub fn null() -> Self {
|
||||
Self {
|
||||
format: JsonFormat::Native(Box::new(JsonNativeType::Null)),
|
||||
}
|
||||
@@ -208,7 +211,7 @@ impl JsonType {
|
||||
}
|
||||
|
||||
/// Try to merge this json type with others, error on datatype conflict.
|
||||
pub(crate) fn merge(&mut self, other: &JsonType) -> Result<()> {
|
||||
pub fn merge(&mut self, other: &JsonType) -> Result<()> {
|
||||
match (&self.format, &other.format) {
|
||||
(JsonFormat::Jsonb, JsonFormat::Jsonb) => Ok(()),
|
||||
(JsonFormat::Native(this), JsonFormat::Native(that)) => {
|
||||
@@ -223,7 +226,8 @@ impl JsonType {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn is_mergeable(&self, other: &JsonType) -> bool {
|
||||
/// Check if it can merge with `other` json type.
|
||||
pub fn is_mergeable(&self, other: &JsonType) -> bool {
|
||||
match (&self.format, &other.format) {
|
||||
(JsonFormat::Jsonb, JsonFormat::Jsonb) => true,
|
||||
(JsonFormat::Native(this), JsonFormat::Native(that)) => {
|
||||
@@ -232,6 +236,43 @@ impl JsonType {
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if it includes all fields in `other` json type.
|
||||
pub fn is_include(&self, other: &JsonType) -> bool {
|
||||
match (&self.format, &other.format) {
|
||||
(JsonFormat::Jsonb, JsonFormat::Jsonb) => true,
|
||||
(JsonFormat::Native(this), JsonFormat::Native(that)) => {
|
||||
is_include(this.as_ref(), that.as_ref())
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn is_include(this: &JsonNativeType, that: &JsonNativeType) -> bool {
|
||||
fn is_include_object(this: &JsonObjectType, that: &JsonObjectType) -> bool {
|
||||
for (type_name, that_type) in that {
|
||||
let Some(this_type) = this.get(type_name) else {
|
||||
return false;
|
||||
};
|
||||
if !is_include(this_type, that_type) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
match (this, that) {
|
||||
(this, that) if this == that => true,
|
||||
(JsonNativeType::Array(this), JsonNativeType::Array(that)) => {
|
||||
is_include(this.as_ref(), that.as_ref())
|
||||
}
|
||||
(JsonNativeType::Object(this), JsonNativeType::Object(that)) => {
|
||||
is_include_object(this, that)
|
||||
}
|
||||
(_, JsonNativeType::Null) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// A special struct type for denoting "plain"(not object) json value. It has only one field, with
|
||||
@@ -317,14 +358,14 @@ impl DataType for JsonType {
|
||||
fn as_arrow_type(&self) -> ArrowDataType {
|
||||
match self.format {
|
||||
JsonFormat::Jsonb => ArrowDataType::Binary,
|
||||
JsonFormat::Native(_) => ArrowDataType::Struct(Fields::empty()),
|
||||
JsonFormat::Native(_) => self.as_struct_type().as_arrow_type(),
|
||||
}
|
||||
}
|
||||
|
||||
fn create_mutable_vector(&self, capacity: usize) -> Box<dyn MutableVector> {
|
||||
match self.format {
|
||||
match &self.format {
|
||||
JsonFormat::Jsonb => Box::new(BinaryVectorBuilder::with_capacity(capacity)),
|
||||
JsonFormat::Native(_) => Box::new(JsonVectorBuilder::with_capacity(capacity)),
|
||||
JsonFormat::Native(x) => Box::new(JsonVectorBuilder::new(*x.clone(), capacity)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -336,6 +377,12 @@ impl DataType for JsonType {
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for JsonType {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.name())
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a json type value to string
|
||||
pub fn jsonb_to_string(val: &[u8]) -> Result<String> {
|
||||
match jsonb::from_slice(val) {
|
||||
@@ -366,6 +413,204 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::json::JsonStructureSettings;
|
||||
|
||||
#[test]
|
||||
fn test_json_type_include() {
|
||||
fn test(this: &JsonNativeType, that: &JsonNativeType, expected: bool) {
|
||||
assert_eq!(is_include(this, that), expected);
|
||||
}
|
||||
|
||||
test(&JsonNativeType::Null, &JsonNativeType::Null, true);
|
||||
test(&JsonNativeType::Null, &JsonNativeType::Bool, false);
|
||||
|
||||
test(&JsonNativeType::Bool, &JsonNativeType::Null, true);
|
||||
test(&JsonNativeType::Bool, &JsonNativeType::Bool, true);
|
||||
test(&JsonNativeType::Bool, &JsonNativeType::u64(), false);
|
||||
|
||||
test(&JsonNativeType::u64(), &JsonNativeType::Null, true);
|
||||
test(&JsonNativeType::u64(), &JsonNativeType::u64(), true);
|
||||
test(&JsonNativeType::u64(), &JsonNativeType::String, false);
|
||||
|
||||
test(&JsonNativeType::String, &JsonNativeType::Null, true);
|
||||
test(&JsonNativeType::String, &JsonNativeType::String, true);
|
||||
test(
|
||||
&JsonNativeType::String,
|
||||
&JsonNativeType::Array(Box::new(JsonNativeType::f64())),
|
||||
false,
|
||||
);
|
||||
|
||||
test(
|
||||
&JsonNativeType::Array(Box::new(JsonNativeType::f64())),
|
||||
&JsonNativeType::Null,
|
||||
true,
|
||||
);
|
||||
test(
|
||||
&JsonNativeType::Array(Box::new(JsonNativeType::f64())),
|
||||
&JsonNativeType::Array(Box::new(JsonNativeType::Null)),
|
||||
true,
|
||||
);
|
||||
test(
|
||||
&JsonNativeType::Array(Box::new(JsonNativeType::f64())),
|
||||
&JsonNativeType::Array(Box::new(JsonNativeType::f64())),
|
||||
true,
|
||||
);
|
||||
test(
|
||||
&JsonNativeType::Array(Box::new(JsonNativeType::f64())),
|
||||
&JsonNativeType::String,
|
||||
false,
|
||||
);
|
||||
test(
|
||||
&JsonNativeType::Array(Box::new(JsonNativeType::f64())),
|
||||
&JsonNativeType::Object(JsonObjectType::new()),
|
||||
false,
|
||||
);
|
||||
|
||||
let simple_json_object = &JsonNativeType::Object(JsonObjectType::from([(
|
||||
"foo".to_string(),
|
||||
JsonNativeType::String,
|
||||
)]));
|
||||
test(simple_json_object, &JsonNativeType::Null, true);
|
||||
test(simple_json_object, simple_json_object, true);
|
||||
test(simple_json_object, &JsonNativeType::i64(), false);
|
||||
test(
|
||||
simple_json_object,
|
||||
&JsonNativeType::Object(JsonObjectType::from([(
|
||||
"bar".to_string(),
|
||||
JsonNativeType::i64(),
|
||||
)])),
|
||||
false,
|
||||
);
|
||||
|
||||
let complex_json_object = &JsonNativeType::Object(JsonObjectType::from([
|
||||
(
|
||||
"nested".to_string(),
|
||||
JsonNativeType::Object(JsonObjectType::from([(
|
||||
"a".to_string(),
|
||||
JsonNativeType::Object(JsonObjectType::from([(
|
||||
"b".to_string(),
|
||||
JsonNativeType::Object(JsonObjectType::from([(
|
||||
"c".to_string(),
|
||||
JsonNativeType::String,
|
||||
)])),
|
||||
)])),
|
||||
)])),
|
||||
),
|
||||
("bar".to_string(), JsonNativeType::i64()),
|
||||
]));
|
||||
test(complex_json_object, &JsonNativeType::Null, true);
|
||||
test(complex_json_object, &JsonNativeType::String, false);
|
||||
test(complex_json_object, complex_json_object, true);
|
||||
test(
|
||||
complex_json_object,
|
||||
&JsonNativeType::Object(JsonObjectType::from([(
|
||||
"bar".to_string(),
|
||||
JsonNativeType::i64(),
|
||||
)])),
|
||||
true,
|
||||
);
|
||||
test(
|
||||
complex_json_object,
|
||||
&JsonNativeType::Object(JsonObjectType::from([
|
||||
(
|
||||
"nested".to_string(),
|
||||
JsonNativeType::Object(JsonObjectType::from([(
|
||||
"a".to_string(),
|
||||
JsonNativeType::Null,
|
||||
)])),
|
||||
),
|
||||
("bar".to_string(), JsonNativeType::i64()),
|
||||
])),
|
||||
true,
|
||||
);
|
||||
test(
|
||||
complex_json_object,
|
||||
&JsonNativeType::Object(JsonObjectType::from([
|
||||
(
|
||||
"nested".to_string(),
|
||||
JsonNativeType::Object(JsonObjectType::from([(
|
||||
"a".to_string(),
|
||||
JsonNativeType::String,
|
||||
)])),
|
||||
),
|
||||
("bar".to_string(), JsonNativeType::i64()),
|
||||
])),
|
||||
false,
|
||||
);
|
||||
test(
|
||||
complex_json_object,
|
||||
&JsonNativeType::Object(JsonObjectType::from([
|
||||
(
|
||||
"nested".to_string(),
|
||||
JsonNativeType::Object(JsonObjectType::from([(
|
||||
"a".to_string(),
|
||||
JsonNativeType::Object(JsonObjectType::from([(
|
||||
"b".to_string(),
|
||||
JsonNativeType::String,
|
||||
)])),
|
||||
)])),
|
||||
),
|
||||
("bar".to_string(), JsonNativeType::i64()),
|
||||
])),
|
||||
false,
|
||||
);
|
||||
test(
|
||||
complex_json_object,
|
||||
&JsonNativeType::Object(JsonObjectType::from([
|
||||
(
|
||||
"nested".to_string(),
|
||||
JsonNativeType::Object(JsonObjectType::from([(
|
||||
"a".to_string(),
|
||||
JsonNativeType::Object(JsonObjectType::from([(
|
||||
"b".to_string(),
|
||||
JsonNativeType::Object(JsonObjectType::from([(
|
||||
"c".to_string(),
|
||||
JsonNativeType::Null,
|
||||
)])),
|
||||
)])),
|
||||
)])),
|
||||
),
|
||||
("bar".to_string(), JsonNativeType::i64()),
|
||||
])),
|
||||
true,
|
||||
);
|
||||
test(
|
||||
complex_json_object,
|
||||
&JsonNativeType::Object(JsonObjectType::from([
|
||||
(
|
||||
"nested".to_string(),
|
||||
JsonNativeType::Object(JsonObjectType::from([(
|
||||
"a".to_string(),
|
||||
JsonNativeType::Object(JsonObjectType::from([(
|
||||
"b".to_string(),
|
||||
JsonNativeType::Object(JsonObjectType::from([(
|
||||
"c".to_string(),
|
||||
JsonNativeType::Bool,
|
||||
)])),
|
||||
)])),
|
||||
)])),
|
||||
),
|
||||
("bar".to_string(), JsonNativeType::i64()),
|
||||
])),
|
||||
false,
|
||||
);
|
||||
test(
|
||||
complex_json_object,
|
||||
&JsonNativeType::Object(JsonObjectType::from([(
|
||||
"nested".to_string(),
|
||||
JsonNativeType::Object(JsonObjectType::from([(
|
||||
"a".to_string(),
|
||||
JsonNativeType::Object(JsonObjectType::from([(
|
||||
"b".to_string(),
|
||||
JsonNativeType::Object(JsonObjectType::from([(
|
||||
"c".to_string(),
|
||||
JsonNativeType::String,
|
||||
)])),
|
||||
)])),
|
||||
)])),
|
||||
)])),
|
||||
true,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_json_type() -> Result<()> {
|
||||
fn test(
|
||||
|
||||
@@ -20,6 +20,7 @@ use crate::data_type::ConcreteDataType;
|
||||
use crate::error::{Result, TryFromValueSnafu, UnsupportedOperationSnafu};
|
||||
use crate::json::value::JsonValueRef;
|
||||
use crate::prelude::{ValueRef, Vector, VectorRef};
|
||||
use crate::types::json_type::JsonNativeType;
|
||||
use crate::types::{JsonType, json_type};
|
||||
use crate::value::StructValueRef;
|
||||
use crate::vectors::{MutableVector, StructVectorBuilder};
|
||||
@@ -181,9 +182,9 @@ pub(crate) struct JsonVectorBuilder {
|
||||
}
|
||||
|
||||
impl JsonVectorBuilder {
|
||||
pub(crate) fn with_capacity(capacity: usize) -> Self {
|
||||
pub(crate) fn new(json_type: JsonNativeType, capacity: usize) -> Self {
|
||||
Self {
|
||||
merged_type: JsonType::empty(),
|
||||
merged_type: JsonType::new_native(json_type),
|
||||
capacity,
|
||||
builders: vec![],
|
||||
}
|
||||
@@ -326,18 +327,18 @@ mod tests {
|
||||
"Failed to merge JSON datatype: datatypes have conflict, this: Number(I64), that: Array[Bool]",
|
||||
),
|
||||
];
|
||||
let mut builder = JsonVectorBuilder::with_capacity(1);
|
||||
let mut builder = JsonVectorBuilder::new(JsonNativeType::Null, 1);
|
||||
for (json, result) in jsons.into_iter().zip(results.into_iter()) {
|
||||
push(json, &mut builder, result);
|
||||
}
|
||||
let vector = builder.to_vector();
|
||||
let expected = r#"
|
||||
+----------------+
|
||||
| StructVector |
|
||||
+----------------+
|
||||
| {__plain__: 1} |
|
||||
| {__plain__: 2} |
|
||||
+----------------+"#;
|
||||
+---------------------+
|
||||
| StructVector |
|
||||
+---------------------+
|
||||
| {__json_plain__: 1} |
|
||||
| {__json_plain__: 2} |
|
||||
+---------------------+"#;
|
||||
assert_eq!(pretty_print(vector), expected.trim());
|
||||
Ok(())
|
||||
}
|
||||
@@ -386,7 +387,7 @@ mod tests {
|
||||
"object": {"timestamp": 1761523203000}
|
||||
}"#,
|
||||
];
|
||||
let mut builder = JsonVectorBuilder::with_capacity(1);
|
||||
let mut builder = JsonVectorBuilder::new(JsonNativeType::Null, 1);
|
||||
for json in jsons {
|
||||
push(json, &mut builder, Ok(()));
|
||||
}
|
||||
|
||||
@@ -379,10 +379,8 @@ impl MutableVector for StructVectorBuilder {
|
||||
},
|
||||
StructValueRef::Ref(val) => self.push_struct_value(val)?,
|
||||
StructValueRef::RefList { val, fields } => {
|
||||
let struct_value = StructValue::try_new(
|
||||
val.iter().map(|v| Value::from(v.clone())).collect(),
|
||||
fields.clone(),
|
||||
)?;
|
||||
let struct_value =
|
||||
StructValue::try_new(val.into_iter().map(Value::from).collect(), fields)?;
|
||||
self.push_struct_value(&struct_value)?;
|
||||
}
|
||||
}
|
||||
@@ -429,12 +427,17 @@ impl ScalarVectorBuilder for StructVectorBuilder {
|
||||
.value_builders
|
||||
.iter_mut()
|
||||
.map(|b| b.to_vector().to_arrow_array())
|
||||
.collect();
|
||||
let struct_array = StructArray::new(
|
||||
self.fields.as_arrow_fields(),
|
||||
arrays,
|
||||
self.null_buffer.finish(),
|
||||
);
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let struct_array = if arrays.is_empty() {
|
||||
StructArray::new_empty_fields(self.len(), self.null_buffer.finish())
|
||||
} else {
|
||||
StructArray::new(
|
||||
self.fields.as_arrow_fields(),
|
||||
arrays,
|
||||
self.null_buffer.finish(),
|
||||
)
|
||||
};
|
||||
|
||||
StructVector::try_new(self.fields.clone(), struct_array).unwrap()
|
||||
}
|
||||
|
||||
@@ -26,10 +26,10 @@ use object_store::ObjectStore;
|
||||
use snafu::{OptionExt, ensure};
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
use store_api::region_engine::{
|
||||
RegionEngine, RegionManifestInfo, RegionRole, RegionScannerRef, RegionStatistic,
|
||||
RemapManifestsRequest, RemapManifestsResponse, SetRegionRoleStateResponse,
|
||||
SetRegionRoleStateSuccess, SettableRegionRoleState, SinglePartitionScanner,
|
||||
SyncManifestResponse,
|
||||
CopyRegionFromRequest, CopyRegionFromResponse, RegionEngine, RegionManifestInfo, RegionRole,
|
||||
RegionScannerRef, RegionStatistic, RemapManifestsRequest, RemapManifestsResponse,
|
||||
SetRegionRoleStateResponse, SetRegionRoleStateSuccess, SettableRegionRoleState,
|
||||
SinglePartitionScanner, SyncManifestResponse,
|
||||
};
|
||||
use store_api::region_request::{
|
||||
AffectedRows, RegionCloseRequest, RegionCreateRequest, RegionDropRequest, RegionOpenRequest,
|
||||
@@ -163,6 +163,19 @@ impl RegionEngine for FileRegionEngine {
|
||||
))
|
||||
}
|
||||
|
||||
async fn copy_region_from(
|
||||
&self,
|
||||
_region_id: RegionId,
|
||||
_request: CopyRegionFromRequest,
|
||||
) -> Result<CopyRegionFromResponse, BoxedError> {
|
||||
Err(BoxedError::new(
|
||||
UnsupportedSnafu {
|
||||
operation: "copy_region_from",
|
||||
}
|
||||
.build(),
|
||||
))
|
||||
}
|
||||
|
||||
fn role(&self, region_id: RegionId) -> Option<RegionRole> {
|
||||
self.inner.state(region_id)
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
//! Frontend client to run flow as batching task which is time-window-aware normal query triggered every tick set by user
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::sync::{Arc, Mutex, Weak};
|
||||
use std::time::SystemTime;
|
||||
|
||||
use api::v1::greptime_request::Request;
|
||||
@@ -38,6 +38,7 @@ use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use session::context::{QueryContextBuilder, QueryContextRef};
|
||||
use session::hints::READ_PREFERENCE_HINT;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::sync::SetOnce;
|
||||
|
||||
use crate::batching_mode::BatchingModeOptions;
|
||||
use crate::error::{
|
||||
@@ -75,7 +76,19 @@ impl<E: ErrorExt + Send + Sync + 'static, T: GrpcQueryHandler<Error = E> + Send
|
||||
}
|
||||
}
|
||||
|
||||
type HandlerMutable = Arc<std::sync::Mutex<Option<Weak<dyn GrpcQueryHandlerWithBoxedError>>>>;
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HandlerMutable {
|
||||
handler: Arc<Mutex<Option<Weak<dyn GrpcQueryHandlerWithBoxedError>>>>,
|
||||
is_initialized: Arc<SetOnce<()>>,
|
||||
}
|
||||
|
||||
impl HandlerMutable {
|
||||
pub async fn set_handler(&self, handler: Weak<dyn GrpcQueryHandlerWithBoxedError>) {
|
||||
*self.handler.lock().unwrap() = Some(handler);
|
||||
// Ignore the error, as we allow the handler to be set multiple times.
|
||||
let _ = self.is_initialized.set(());
|
||||
}
|
||||
}
|
||||
|
||||
/// A simple frontend client able to execute sql using grpc protocol
|
||||
///
|
||||
@@ -100,7 +113,11 @@ pub enum FrontendClient {
|
||||
impl FrontendClient {
|
||||
/// Create a new empty frontend client, with a `HandlerMutable` to set the grpc handler later
|
||||
pub fn from_empty_grpc_handler(query: QueryOptions) -> (Self, HandlerMutable) {
|
||||
let handler = Arc::new(std::sync::Mutex::new(None));
|
||||
let is_initialized = Arc::new(SetOnce::new());
|
||||
let handler = HandlerMutable {
|
||||
handler: Arc::new(Mutex::new(None)),
|
||||
is_initialized,
|
||||
};
|
||||
(
|
||||
Self::Standalone {
|
||||
database_client: handler.clone(),
|
||||
@@ -110,6 +127,16 @@ impl FrontendClient {
|
||||
)
|
||||
}
|
||||
|
||||
/// Waits until the frontend client is initialized.
|
||||
pub async fn wait_initialized(&self) {
|
||||
if let FrontendClient::Standalone {
|
||||
database_client, ..
|
||||
} = self
|
||||
{
|
||||
database_client.is_initialized.wait().await;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_meta_client(
|
||||
meta_client: Arc<MetaClient>,
|
||||
auth: Option<FlowAuthHeader>,
|
||||
@@ -138,8 +165,14 @@ impl FrontendClient {
|
||||
grpc_handler: Weak<dyn GrpcQueryHandlerWithBoxedError>,
|
||||
query: QueryOptions,
|
||||
) -> Self {
|
||||
let is_initialized = Arc::new(SetOnce::new_with(Some(())));
|
||||
let handler = HandlerMutable {
|
||||
handler: Arc::new(Mutex::new(Some(grpc_handler))),
|
||||
is_initialized: is_initialized.clone(),
|
||||
};
|
||||
|
||||
Self::Standalone {
|
||||
database_client: Arc::new(std::sync::Mutex::new(Some(grpc_handler))),
|
||||
database_client: handler,
|
||||
query,
|
||||
}
|
||||
}
|
||||
@@ -321,6 +354,7 @@ impl FrontendClient {
|
||||
{
|
||||
let database_client = {
|
||||
database_client
|
||||
.handler
|
||||
.lock()
|
||||
.map_err(|e| {
|
||||
UnexpectedSnafu {
|
||||
@@ -398,6 +432,7 @@ impl FrontendClient {
|
||||
{
|
||||
let database_client = {
|
||||
database_client
|
||||
.handler
|
||||
.lock()
|
||||
.map_err(|e| {
|
||||
UnexpectedSnafu {
|
||||
@@ -460,3 +495,73 @@ impl std::fmt::Display for PeerDesc {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use common_query::Output;
|
||||
use tokio::time::timeout;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct NoopHandler;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl GrpcQueryHandlerWithBoxedError for NoopHandler {
|
||||
async fn do_query(
|
||||
&self,
|
||||
_query: Request,
|
||||
_ctx: QueryContextRef,
|
||||
) -> std::result::Result<Output, BoxedError> {
|
||||
Ok(Output::new_with_affected_rows(0))
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn wait_initialized() {
|
||||
let (client, handler_mut) =
|
||||
FrontendClient::from_empty_grpc_handler(QueryOptions::default());
|
||||
|
||||
assert!(
|
||||
timeout(Duration::from_millis(50), client.wait_initialized())
|
||||
.await
|
||||
.is_err()
|
||||
);
|
||||
|
||||
let handler: Arc<dyn GrpcQueryHandlerWithBoxedError> = Arc::new(NoopHandler);
|
||||
handler_mut.set_handler(Arc::downgrade(&handler)).await;
|
||||
|
||||
timeout(Duration::from_secs(1), client.wait_initialized())
|
||||
.await
|
||||
.expect("wait_initialized should complete after handler is set");
|
||||
|
||||
timeout(Duration::from_millis(10), client.wait_initialized())
|
||||
.await
|
||||
.expect("wait_initialized should be a no-op once initialized");
|
||||
|
||||
let handler: Arc<dyn GrpcQueryHandlerWithBoxedError> = Arc::new(NoopHandler);
|
||||
let client =
|
||||
FrontendClient::from_grpc_handler(Arc::downgrade(&handler), QueryOptions::default());
|
||||
assert!(
|
||||
timeout(Duration::from_millis(10), client.wait_initialized())
|
||||
.await
|
||||
.is_ok()
|
||||
);
|
||||
|
||||
let meta_client = Arc::new(MetaClient::default());
|
||||
let client = FrontendClient::from_meta_client(
|
||||
meta_client,
|
||||
None,
|
||||
QueryOptions::default(),
|
||||
BatchingModeOptions::default(),
|
||||
)
|
||||
.unwrap();
|
||||
assert!(
|
||||
timeout(Duration::from_millis(10), client.wait_initialized())
|
||||
.await
|
||||
.is_ok()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
|
||||
mod relation;
|
||||
|
||||
use api::helper::{pb_value_to_value_ref, value_to_grpc_value};
|
||||
use api::helper::{pb_value_to_value_ref, to_grpc_value};
|
||||
use api::v1::Row as ProtoRow;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::types::cast;
|
||||
@@ -201,11 +201,7 @@ impl From<ProtoRow> for Row {
|
||||
|
||||
impl From<Row> for ProtoRow {
|
||||
fn from(row: Row) -> Self {
|
||||
let values = row
|
||||
.unpack()
|
||||
.into_iter()
|
||||
.map(value_to_grpc_value)
|
||||
.collect_vec();
|
||||
let values = row.unpack().into_iter().map(to_grpc_value).collect_vec();
|
||||
ProtoRow { values }
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user