mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
118 Commits
v0.16.0-ni
...
8f9674440f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8f9674440f | ||
|
|
a29681eb12 | ||
|
|
c112cdf241 | ||
|
|
4d33b9687a | ||
|
|
b0d2d26ad8 | ||
|
|
4ed048c735 | ||
|
|
af4465e543 | ||
|
|
7c2cdccb22 | ||
|
|
3655bbe032 | ||
|
|
5501f63a71 | ||
|
|
c5c9b263e1 | ||
|
|
9e6b19d301 | ||
|
|
f7f52592b4 | ||
|
|
4a936d7320 | ||
|
|
83f566ad20 | ||
|
|
ed2dff6d27 | ||
|
|
af483335b2 | ||
|
|
70852a01a3 | ||
|
|
8f3c6f72f5 | ||
|
|
d3f15e72bf | ||
|
|
b25a6527ed | ||
|
|
7b48e53261 | ||
|
|
ebe78f668e | ||
|
|
8456949749 | ||
|
|
f21bedd141 | ||
|
|
469c3140fe | ||
|
|
569d93c599 | ||
|
|
b91b520f54 | ||
|
|
7a12585af9 | ||
|
|
89b661c98a | ||
|
|
e0b1ebdfb6 | ||
|
|
eaaf9448c7 | ||
|
|
dc37382946 | ||
|
|
b31d307eb6 | ||
|
|
d9f177ba9f | ||
|
|
b940906d86 | ||
|
|
9ccc8da231 | ||
|
|
0d603bfc96 | ||
|
|
62eedbb6cd | ||
|
|
dec6da0e8a | ||
|
|
a84cf5ec67 | ||
|
|
3b7652039f | ||
|
|
9164e8f50d | ||
|
|
3854e2edb4 | ||
|
|
f3dd7cccd3 | ||
|
|
b2112c3f5c | ||
|
|
e98f2facd4 | ||
|
|
98ef92bb0a | ||
|
|
1352a5b637 | ||
|
|
42ed5042b4 | ||
|
|
20af73ec44 | ||
|
|
f3221a3b18 | ||
|
|
164afb26da | ||
|
|
7d8473e9bc | ||
|
|
51dc393371 | ||
|
|
a265499325 | ||
|
|
2b4fb2f32a | ||
|
|
1df605ec4b | ||
|
|
ac8493ab4a | ||
|
|
d9d1773913 | ||
|
|
6afdf672b3 | ||
|
|
12c43ee27b | ||
|
|
a10b1d9885 | ||
|
|
f07b1daed4 | ||
|
|
5377db5392 | ||
|
|
b6cef77a5c | ||
|
|
8fef177575 | ||
|
|
4cfc878067 | ||
|
|
086777d938 | ||
|
|
b7fd4ca65d | ||
|
|
2e571e351f | ||
|
|
6ded2d267a | ||
|
|
3465bedc7f | ||
|
|
052033d436 | ||
|
|
c3201c32c3 | ||
|
|
dc01511adc | ||
|
|
b01be5efc5 | ||
|
|
5908febd6c | ||
|
|
076e20081b | ||
|
|
61d83bc36b | ||
|
|
8d71213ea6 | ||
|
|
2298227e0c | ||
|
|
b51bafe9d5 | ||
|
|
6e324adf02 | ||
|
|
02a9edef8b | ||
|
|
71b564b4aa | ||
|
|
2d67f9ae8b | ||
|
|
6501b0b13c | ||
|
|
dfe9eeb15c | ||
|
|
78b1c6c554 | ||
|
|
1c8e8b96c1 | ||
|
|
73a3ac1320 | ||
|
|
efefddbc85 | ||
|
|
58d2b7764f | ||
|
|
0dad8fc4a8 | ||
|
|
370d27587a | ||
|
|
77b540ff68 | ||
|
|
0cf25f7b05 | ||
|
|
c23b26461c | ||
|
|
90ababca97 | ||
|
|
37dc057423 | ||
|
|
8237646055 | ||
|
|
011dd51495 | ||
|
|
eb99e439c7 | ||
|
|
50148b25b5 | ||
|
|
639b3ddc3e | ||
|
|
139a36a459 | ||
|
|
e6b9d09901 | ||
|
|
bbc9f3ea1e | ||
|
|
fac8c3e62c | ||
|
|
411eb768b1 | ||
|
|
95d2549007 | ||
|
|
6744f5470b | ||
|
|
9d05c7c5fa | ||
|
|
9442284fd4 | ||
|
|
1065db9518 | ||
|
|
2f9a10ab74 | ||
|
|
d82bc98717 |
@@ -12,7 +12,7 @@ runs:
|
||||
steps:
|
||||
- name: Install Etcd cluster
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install etcd oci://registry-1.docker.io/bitnamicharts/etcd \
|
||||
--set replicaCount=${{ inputs.etcd-replicas }} \
|
||||
@@ -24,4 +24,9 @@ runs:
|
||||
--set auth.rbac.token.enabled=false \
|
||||
--set persistence.size=2Gi \
|
||||
--create-namespace \
|
||||
--set global.security.allowInsecureImages=true \
|
||||
--set image.registry=docker.io \
|
||||
--set image.repository=greptime/etcd \
|
||||
--set image.tag=3.6.1-debian-12-r3 \
|
||||
--version 12.0.8 \
|
||||
-n ${{ inputs.namespace }}
|
||||
|
||||
@@ -51,7 +51,7 @@ runs:
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install my-greptimedb \
|
||||
--set meta.backendStorage.etcd.endpoints=${{ inputs.etcd-endpoints }} \
|
||||
--set 'meta.backendStorage.etcd.endpoints[0]=${{ inputs.etcd-endpoints }}' \
|
||||
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
||||
--set image.registry=${{ inputs.image-registry }} \
|
||||
--set image.repository=${{ inputs.image-repository }} \
|
||||
|
||||
@@ -12,7 +12,7 @@ runs:
|
||||
steps:
|
||||
- name: Install Kafka cluster
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install kafka oci://registry-1.docker.io/bitnamicharts/kafka \
|
||||
--set controller.replicaCount=${{ inputs.controller-replicas }} \
|
||||
@@ -23,4 +23,8 @@ runs:
|
||||
--set listeners.controller.protocol=PLAINTEXT \
|
||||
--set listeners.client.protocol=PLAINTEXT \
|
||||
--create-namespace \
|
||||
--set image.registry=docker.io \
|
||||
--set image.repository=greptime/kafka \
|
||||
--set image.tag=3.9.0-debian-12-r1 \
|
||||
--version 31.0.0 \
|
||||
-n ${{ inputs.namespace }}
|
||||
|
||||
@@ -6,9 +6,7 @@ inputs:
|
||||
description: "Number of PostgreSQL replicas"
|
||||
namespace:
|
||||
default: "postgres-namespace"
|
||||
postgres-version:
|
||||
default: "14.2"
|
||||
description: "PostgreSQL version"
|
||||
description: "The PostgreSQL namespace"
|
||||
storage-size:
|
||||
default: "1Gi"
|
||||
description: "Storage size for PostgreSQL"
|
||||
@@ -22,7 +20,11 @@ runs:
|
||||
helm upgrade \
|
||||
--install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql \
|
||||
--set replicaCount=${{ inputs.postgres-replicas }} \
|
||||
--set image.tag=${{ inputs.postgres-version }} \
|
||||
--set global.security.allowInsecureImages=true \
|
||||
--set image.registry=docker.io \
|
||||
--set image.repository=greptime/postgresql \
|
||||
--set image.tag=17.5.0-debian-12-r3 \
|
||||
--version 16.7.4 \
|
||||
--set persistence.size=${{ inputs.storage-size }} \
|
||||
--set postgresql.username=greptimedb \
|
||||
--set postgresql.password=admin \
|
||||
|
||||
5
.github/scripts/deploy-greptimedb.sh
vendored
5
.github/scripts/deploy-greptimedb.sh
vendored
@@ -68,7 +68,7 @@ function deploy_greptimedb_cluster() {
|
||||
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
--set meta.backendStorage.etcd.endpoints[0]="etcd.$install_namespace:2379" \
|
||||
-n "$install_namespace"
|
||||
|
||||
# Wait for greptimedb cluster to be ready.
|
||||
@@ -103,14 +103,13 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
||||
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
--set meta.backendStorage.etcd.endpoints[0]="etcd.$install_namespace:2379" \
|
||||
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||
--set storage.s3.region="$AWS_REGION" \
|
||||
--set storage.s3.root="$DATA_ROOT" \
|
||||
--set storage.credentials.secretName=s3-credentials \
|
||||
--set storage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
|
||||
--set storage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
|
||||
|
||||
# Wait for greptimedb cluster to be ready.
|
||||
while true; do
|
||||
PHASE=$(kubectl -n "$install_namespace" get gtc "$cluster_name" -o jsonpath='{.status.clusterPhase}')
|
||||
|
||||
34
.github/scripts/pull-test-deps-images.sh
vendored
Executable file
34
.github/scripts/pull-test-deps-images.sh
vendored
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script is used to pull the test dependency images that are stored in public ECR one by one to avoid rate limiting.
|
||||
|
||||
set -e
|
||||
|
||||
MAX_RETRIES=3
|
||||
|
||||
IMAGES=(
|
||||
"greptime/zookeeper:3.7"
|
||||
"greptime/kafka:3.9.0-debian-12-r1"
|
||||
"greptime/etcd:3.6.1-debian-12-r3"
|
||||
"greptime/minio:2024"
|
||||
"greptime/mysql:5.7"
|
||||
)
|
||||
|
||||
for image in "${IMAGES[@]}"; do
|
||||
for ((attempt=1; attempt<=MAX_RETRIES; attempt++)); do
|
||||
if docker pull "$image"; then
|
||||
# Successfully pulled the image.
|
||||
break
|
||||
else
|
||||
# Use some simple exponential backoff to avoid rate limiting.
|
||||
if [ $attempt -lt $MAX_RETRIES ]; then
|
||||
sleep_seconds=$((attempt * 5))
|
||||
echo "Attempt $attempt failed for $image, waiting $sleep_seconds seconds"
|
||||
sleep $sleep_seconds # 5s, 10s delays
|
||||
else
|
||||
echo "Failed to pull $image after $MAX_RETRIES attempts"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
4
.github/workflows/develop.yml
vendored
4
.github/workflows/develop.yml
vendored
@@ -719,6 +719,10 @@ jobs:
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
|
||||
- name: Pull test dependencies images
|
||||
run: ./.github/scripts/pull-test-deps-images.sh
|
||||
|
||||
- name: Setup external services
|
||||
working-directory: tests-integration/fixtures
|
||||
run: docker compose up -d --wait
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -60,4 +60,7 @@ tests-fuzz/corpus/
|
||||
greptimedb_data
|
||||
|
||||
# github
|
||||
!/.github
|
||||
!/.github
|
||||
|
||||
# Claude code
|
||||
CLAUDE.md
|
||||
|
||||
@@ -10,12 +10,10 @@
|
||||
* [NiwakaDev](https://github.com/NiwakaDev)
|
||||
* [tisonkun](https://github.com/tisonkun)
|
||||
|
||||
|
||||
## Team Members (in alphabetical order)
|
||||
|
||||
* [apdong2022](https://github.com/apdong2022)
|
||||
* [beryl678](https://github.com/beryl678)
|
||||
* [Breeze-P](https://github.com/Breeze-P)
|
||||
* [daviderli614](https://github.com/daviderli614)
|
||||
* [discord9](https://github.com/discord9)
|
||||
* [evenyag](https://github.com/evenyag)
|
||||
|
||||
477
Cargo.lock
generated
477
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
12
Cargo.toml
12
Cargo.toml
@@ -13,6 +13,7 @@ members = [
|
||||
"src/common/datasource",
|
||||
"src/common/decimal",
|
||||
"src/common/error",
|
||||
"src/common/event-recorder",
|
||||
"src/common/frontend",
|
||||
"src/common/function",
|
||||
"src/common/greptimedb-telemetry",
|
||||
@@ -139,7 +140,7 @@ etcd-client = "0.14"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ceb1af4fa9309ce65bda0367db7b384df2bb4d4f" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ccfd4da48bc0254ed865e479cd981a3581b02d84" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
@@ -172,6 +173,7 @@ parking_lot = "0.12"
|
||||
parquet = { version = "54.2", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
pretty_assertions = "1.4.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
promql-parser = { version = "0.6", features = ["ser"] }
|
||||
prost = { version = "0.13", features = ["no-recursion-limit"] }
|
||||
@@ -186,7 +188,8 @@ reqwest = { version = "0.12", default-features = false, features = [
|
||||
"stream",
|
||||
"multipart",
|
||||
] }
|
||||
rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "8dbd01ed809f5a791833a594e85b144e36e45820", features = [
|
||||
# Branch: feat/request-timeout-port
|
||||
rskafka = { git = "https://github.com/WenyXu/rskafka.git", rev = "9494304ae3947b07e660b5d08549ad4a39c84a26", features = [
|
||||
"transport-tls",
|
||||
] }
|
||||
rstest = "0.25"
|
||||
@@ -195,6 +198,7 @@ rust_decimal = "1.33"
|
||||
rustc-hash = "2.0"
|
||||
# It is worth noting that we should try to avoid using aws-lc-rs until it can be compiled on various platforms.
|
||||
rustls = { version = "0.23.25", default-features = false }
|
||||
sea-query = "0.32"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||
serde_with = "3"
|
||||
@@ -203,7 +207,7 @@ simd-json = "0.15"
|
||||
similar-asserts = "1.6.0"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.8"
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "0cf6c04490d59435ee965edd2078e8855bd8471e", features = [
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "df6fcca80ce903f5beef7002cd2c1b062e7024f8", features = [
|
||||
"visitor",
|
||||
"serde",
|
||||
] } # branch = "v0.54.x"
|
||||
@@ -224,6 +228,7 @@ tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.8.8"
|
||||
tonic = { version = "0.12", features = ["tls", "gzip", "zstd"] }
|
||||
tower = "0.5"
|
||||
tower-http = "0.6"
|
||||
tracing = "0.1"
|
||||
tracing-appender = "0.2"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
||||
@@ -247,6 +252,7 @@ common-config = { path = "src/common/config" }
|
||||
common-datasource = { path = "src/common/datasource" }
|
||||
common-decimal = { path = "src/common/decimal" }
|
||||
common-error = { path = "src/common/error" }
|
||||
common-event-recorder = { path = "src/common/event-recorder" }
|
||||
common-frontend = { path = "src/common/frontend" }
|
||||
common-function = { path = "src/common/function" }
|
||||
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
||||
|
||||
@@ -78,6 +78,8 @@
|
||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.connect_timeout` | String | `3s` | The connect timeout for kafka client.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.timeout` | String | `3s` | The timeout for kafka client.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
||||
| `wal.num_topics` | Integer | `64` | Number of topics.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. |
|
||||
@@ -147,6 +149,7 @@
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `128` | Maximum number of SST files to scan concurrently. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
@@ -207,6 +210,8 @@
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
|
||||
## Distributed Mode
|
||||
@@ -272,7 +277,6 @@
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
@@ -281,6 +285,7 @@
|
||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||
| `query` | -- | -- | The query engine options. |
|
||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||
| `query.allow_query_fallback` | Bool | `false` | Whether to allow query fallback when push down optimize fails.<br/>Default to false, meaning when push down optimize failed, return error msg |
|
||||
| `datanode` | -- | -- | Datanode options. |
|
||||
| `datanode.client` | -- | -- | Datanode client options. |
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
@@ -310,6 +315,8 @@
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
|
||||
### Metasrv
|
||||
@@ -328,16 +335,29 @@
|
||||
| `region_failure_detector_initialization_delay` | String | `10m` | The delay before starting region failure detection.<br/>This delay helps prevent Metasrv from triggering unnecessary region failovers before all Datanodes are fully started.<br/>Especially useful when the cluster is not deployed with GreptimeDB Operator and maintenance mode is not enabled. |
|
||||
| `allow_region_failover_on_local_wal` | Bool | `false` | Whether to allow region failover on local WAL.<br/>**This option is not recommended to be set to true, because it may lead to data loss during failover.** |
|
||||
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
||||
| `heartbeat_interval` | String | `3s` | Base heartbeat interval for calculating distributed time constants.<br/>The frontend heartbeat interval is 6 times of the base heartbeat interval.<br/>The flownode/datanode heartbeat interval is 1 times of the base heartbeat interval.<br/>e.g., If the base heartbeat interval is 3s, the frontend heartbeat interval is 18s, the flownode/datanode heartbeat interval is 3s.<br/>If you change this value, you need to change the heartbeat interval of the flownode/frontend/datanode accordingly. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `backend_tls` | -- | -- | TLS configuration for kv store backend (only applicable for PostgreSQL/MySQL backends)<br/>When using PostgreSQL or MySQL as metadata store, you can configure TLS here |
|
||||
| `backend_tls.mode` | String | `prefer` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- "disable" - No TLS<br/>- "prefer" (default) - Try TLS, fallback to plain<br/>- "require" - Require TLS<br/>- "verify_ca" - Require TLS and verify CA<br/>- "verify_full" - Require TLS and verify hostname |
|
||||
| `backend_tls.cert_path` | String | `""` | Path to client certificate file (for client authentication)<br/>Like "/path/to/client.crt" |
|
||||
| `backend_tls.key_path` | String | `""` | Path to client private key file (for client authentication)<br/>Like "/path/to/client.key" |
|
||||
| `backend_tls.ca_cert_path` | String | `""` | Path to CA certificate file (for server certificate verification)<br/>Required when using custom CAs or self-signed certificates<br/>Leave empty to use system root certificates only<br/>Like "/path/to/ca.crt" |
|
||||
| `backend_tls.watch` | Bool | `false` | Watch for certificate file changes and auto reload |
|
||||
| `backend_client` | -- | -- | The backend client options.<br/>Currently, only applicable when using etcd as the metadata store. |
|
||||
| `backend_client.keep_alive_timeout` | String | `3s` | The keep alive timeout for backend client. |
|
||||
| `backend_client.keep_alive_interval` | String | `10s` | The keep alive interval for backend client. |
|
||||
| `backend_client.connect_timeout` | String | `3s` | The connect timeout for backend client. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:3002` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `grpc.http2_keep_alive_interval` | String | `10s` | The server side HTTP/2 keep-alive interval |
|
||||
| `grpc.http2_keep_alive_timeout` | String | `3s` | The server side HTTP/2 keep-alive timeout. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
@@ -369,6 +389,8 @@
|
||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
|
||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
||||
| `event_recorder` | -- | -- | Configuration options for the event recorder. |
|
||||
| `event_recorder.ttl` | String | `30d` | TTL for the events table that will be used to store the events. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
@@ -388,6 +410,8 @@
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
|
||||
### Datanode
|
||||
@@ -425,7 +449,6 @@
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
@@ -445,6 +468,8 @@
|
||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.connect_timeout` | String | `3s` | The connect timeout for kafka client.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.timeout` | String | `3s` | The timeout for kafka client.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.<br/>**It's only used when the provider is `kafka`**. |
|
||||
@@ -500,6 +525,7 @@
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `128` | Maximum number of SST files to scan concurrently. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
@@ -553,6 +579,8 @@
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
|
||||
### Flownode
|
||||
@@ -562,6 +590,16 @@
|
||||
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
|
||||
| `flow` | -- | -- | flow engine options. |
|
||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||
| `flow.batching_mode` | -- | -- | -- |
|
||||
| `flow.batching_mode.query_timeout` | String | `600s` | The default batching engine query timeout is 10 minutes. |
|
||||
| `flow.batching_mode.slow_query_threshold` | String | `60s` | will output a warn log for any query that runs for more that this threshold |
|
||||
| `flow.batching_mode.experimental_min_refresh_duration` | String | `5s` | The minimum duration between two queries execution by batching mode task |
|
||||
| `flow.batching_mode.grpc_conn_timeout` | String | `5s` | The gRPC connection timeout |
|
||||
| `flow.batching_mode.experimental_grpc_max_retries` | Integer | `3` | The gRPC max retry number |
|
||||
| `flow.batching_mode.experimental_frontend_scan_timeout` | String | `30s` | Flow wait for available frontend timeout,<br/>if failed to find available frontend after frontend_scan_timeout elapsed, return error<br/>which prevent flownode from starting |
|
||||
| `flow.batching_mode.experimental_frontend_activity_timeout` | String | `60s` | Frontend activity timeout<br/>if frontend is down(not sending heartbeat) for more than frontend_activity_timeout,<br/>it will be removed from the list that flownode use to connect |
|
||||
| `flow.batching_mode.experimental_max_filter_num_per_query` | Integer | `20` | Maximum number of filters allowed in a single query |
|
||||
| `flow.batching_mode.experimental_time_window_merge_threshold` | Integer | `3` | Time window merge distance |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:6800` | The address advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
@@ -575,7 +613,6 @@
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
@@ -600,3 +637,5 @@
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `query` | -- | -- | -- |
|
||||
| `query.parallelism` | Integer | `1` | Parallelism of the query engine for query sent by flownode.<br/>Default to 1, so it won't use too much cpu or memory |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
@@ -92,9 +92,6 @@ metasrv_addrs = ["127.0.0.1:3002"]
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
@@ -164,6 +161,14 @@ recovery_parallelism = 2
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
|
||||
## The connect timeout for kafka client.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
#+ connect_timeout = "3s"
|
||||
|
||||
## The timeout for kafka client.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
#+ timeout = "3s"
|
||||
|
||||
## The max size of a single producer batch.
|
||||
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
@@ -474,6 +479,9 @@ sst_write_buffer_size = "8MB"
|
||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||
parallel_scan_channel_size = 32
|
||||
|
||||
## Maximum number of SST files to scan concurrently.
|
||||
max_concurrent_scan_files = 128
|
||||
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
@@ -669,3 +677,11 @@ headers = { }
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
@@ -7,6 +7,29 @@ node_id = 14
|
||||
## The number of flow worker in flownode.
|
||||
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
|
||||
#+num_workers=0
|
||||
[flow.batching_mode]
|
||||
## The default batching engine query timeout is 10 minutes.
|
||||
#+query_timeout="600s"
|
||||
## will output a warn log for any query that runs for more that this threshold
|
||||
#+slow_query_threshold="60s"
|
||||
## The minimum duration between two queries execution by batching mode task
|
||||
#+experimental_min_refresh_duration="5s"
|
||||
## The gRPC connection timeout
|
||||
#+grpc_conn_timeout="5s"
|
||||
## The gRPC max retry number
|
||||
#+experimental_grpc_max_retries=3
|
||||
## Flow wait for available frontend timeout,
|
||||
## if failed to find available frontend after frontend_scan_timeout elapsed, return error
|
||||
## which prevent flownode from starting
|
||||
#+experimental_frontend_scan_timeout="30s"
|
||||
## Frontend activity timeout
|
||||
## if frontend is down(not sending heartbeat) for more than frontend_activity_timeout,
|
||||
## it will be removed from the list that flownode use to connect
|
||||
#+experimental_frontend_activity_timeout="60s"
|
||||
## Maximum number of filters allowed in a single query
|
||||
#+experimental_max_filter_num_per_query=20
|
||||
## Time window merge distance
|
||||
#+experimental_time_window_merge_threshold=3
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
@@ -41,9 +64,6 @@ metasrv_addrs = ["127.0.0.1:3002"]
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
@@ -113,3 +133,11 @@ default_ratio = 1.0
|
||||
## Parallelism of the query engine for query sent by flownode.
|
||||
## Default to 1, so it won't use too much cpu or memory
|
||||
parallelism = 1
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
@@ -171,9 +171,6 @@ metasrv_addrs = ["127.0.0.1:3002"]
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
@@ -197,6 +194,9 @@ metadata_cache_tti = "5m"
|
||||
## Parallelism of the query engine.
|
||||
## Default to 0, which means the number of CPU cores.
|
||||
parallelism = 0
|
||||
## Whether to allow query fallback when push down optimize fails.
|
||||
## Default to false, meaning when push down optimize failed, return error msg
|
||||
allow_query_fallback = false
|
||||
|
||||
## Datanode options.
|
||||
[datanode]
|
||||
@@ -277,3 +277,11 @@ headers = { }
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
@@ -55,6 +55,13 @@ allow_region_failover_on_local_wal = false
|
||||
## Max allowed idle time before removing node info from metasrv memory.
|
||||
node_max_idle_time = "24hours"
|
||||
|
||||
## Base heartbeat interval for calculating distributed time constants.
|
||||
## The frontend heartbeat interval is 6 times of the base heartbeat interval.
|
||||
## The flownode/datanode heartbeat interval is 1 times of the base heartbeat interval.
|
||||
## e.g., If the base heartbeat interval is 3s, the frontend heartbeat interval is 18s, the flownode/datanode heartbeat interval is 3s.
|
||||
## If you change this value, you need to change the heartbeat interval of the flownode/frontend/datanode accordingly.
|
||||
#+ heartbeat_interval = "3s"
|
||||
|
||||
## Whether to enable greptimedb telemetry. Enabled by default.
|
||||
#+ enable_telemetry = true
|
||||
|
||||
@@ -65,6 +72,44 @@ node_max_idle_time = "24hours"
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
#+ compact_rt_size = 4
|
||||
|
||||
## TLS configuration for kv store backend (only applicable for PostgreSQL/MySQL backends)
|
||||
## When using PostgreSQL or MySQL as metadata store, you can configure TLS here
|
||||
[backend_tls]
|
||||
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||
## - "disable" - No TLS
|
||||
## - "prefer" (default) - Try TLS, fallback to plain
|
||||
## - "require" - Require TLS
|
||||
## - "verify_ca" - Require TLS and verify CA
|
||||
## - "verify_full" - Require TLS and verify hostname
|
||||
mode = "prefer"
|
||||
|
||||
## Path to client certificate file (for client authentication)
|
||||
## Like "/path/to/client.crt"
|
||||
cert_path = ""
|
||||
|
||||
## Path to client private key file (for client authentication)
|
||||
## Like "/path/to/client.key"
|
||||
key_path = ""
|
||||
|
||||
## Path to CA certificate file (for server certificate verification)
|
||||
## Required when using custom CAs or self-signed certificates
|
||||
## Leave empty to use system root certificates only
|
||||
## Like "/path/to/ca.crt"
|
||||
ca_cert_path = ""
|
||||
|
||||
## Watch for certificate file changes and auto reload
|
||||
watch = false
|
||||
|
||||
## The backend client options.
|
||||
## Currently, only applicable when using etcd as the metadata store.
|
||||
#+ [backend_client]
|
||||
## The keep alive timeout for backend client.
|
||||
#+ keep_alive_timeout = "3s"
|
||||
## The keep alive interval for backend client.
|
||||
#+ keep_alive_interval = "10s"
|
||||
## The connect timeout for backend client.
|
||||
#+ connect_timeout = "3s"
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
@@ -79,6 +124,10 @@ runtime_size = 8
|
||||
max_recv_message_size = "512MB"
|
||||
## The maximum send message size for gRPC server.
|
||||
max_send_message_size = "512MB"
|
||||
## The server side HTTP/2 keep-alive interval
|
||||
#+ http2_keep_alive_interval = "10s"
|
||||
## The server side HTTP/2 keep-alive timeout.
|
||||
#+ http2_keep_alive_timeout = "3s"
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
@@ -212,6 +261,11 @@ create_topic_timeout = "30s"
|
||||
# client_cert_path = "/path/to/client_cert"
|
||||
# client_key_path = "/path/to/key"
|
||||
|
||||
## Configuration options for the event recorder.
|
||||
[event_recorder]
|
||||
## TTL for the events table that will be used to store the events.
|
||||
ttl = "30d"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
@@ -265,3 +319,11 @@ headers = { }
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
@@ -209,6 +209,14 @@ recovery_parallelism = 2
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
|
||||
## The connect timeout for kafka client.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
#+ connect_timeout = "3s"
|
||||
|
||||
## The timeout for kafka client.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
#+ timeout = "3s"
|
||||
|
||||
## Automatically create topics for WAL.
|
||||
## Set to `true` to automatically create topics for WAL.
|
||||
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
|
||||
@@ -565,6 +573,9 @@ sst_write_buffer_size = "8MB"
|
||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||
parallel_scan_channel_size = 32
|
||||
|
||||
## Maximum number of SST files to scan concurrently.
|
||||
max_concurrent_scan_files = 128
|
||||
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
@@ -783,3 +794,11 @@ headers = { }
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
@@ -47,4 +47,6 @@ WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
|
||||
@@ -47,4 +47,6 @@ WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
|
||||
@@ -15,4 +15,6 @@ ADD $TARGETARCH/greptime /greptime/bin/
|
||||
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
|
||||
@@ -18,4 +18,6 @@ ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENV TARGET_BIN=$TARGET_BIN
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["sh", "-c", "exec $TARGET_BIN \"$@\"", "--"]
|
||||
|
||||
@@ -30,6 +30,23 @@ curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph
|
||||
|
||||
## Profiling
|
||||
|
||||
### Configuration
|
||||
|
||||
You can control heap profiling activation through configuration. Add the following to your configuration file:
|
||||
|
||||
```toml
|
||||
[memory]
|
||||
# Whether to enable heap profiling activation during startup.
|
||||
# When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
# is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
# Default is true.
|
||||
enable_heap_profiling = true
|
||||
```
|
||||
|
||||
By default, if you set `MALLOC_CONF=prof:true,prof_active:false`, the database will enable profiling during startup. You can disable this behavior by setting `enable_heap_profiling = false` in the configuration.
|
||||
|
||||
### Starting with environment variables
|
||||
|
||||
Start GreptimeDB instance with environment variables:
|
||||
|
||||
```bash
|
||||
@@ -40,6 +57,23 @@ MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||
_RJEM_MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||
```
|
||||
|
||||
### Memory profiling control
|
||||
|
||||
You can control heap profiling activation using the new HTTP APIs:
|
||||
|
||||
```bash
|
||||
# Check current profiling status
|
||||
curl -X GET localhost:4000/debug/prof/mem/status
|
||||
|
||||
# Activate heap profiling (if not already active)
|
||||
curl -X POST localhost:4000/debug/prof/mem/activate
|
||||
|
||||
# Deactivate heap profiling
|
||||
curl -X POST localhost:4000/debug/prof/mem/deactivate
|
||||
```
|
||||
|
||||
### Dump memory profiling data
|
||||
|
||||
Dump memory profiling data through HTTP API:
|
||||
|
||||
```bash
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
Currently, our query engine is based on DataFusion, so all aggregate function is executed by DataFusion, through its UDAF interface. You can find DataFusion's UDAF example [here](https://github.com/apache/arrow-datafusion/blob/arrow2/datafusion-examples/examples/simple_udaf.rs). Basically, we provide the same way as DataFusion to write aggregate functions: both are centered in a struct called "Accumulator" to accumulates states along the way in aggregation.
|
||||
Currently, our query engine is based on DataFusion, so all aggregate function is executed by DataFusion, through its UDAF interface. You can find DataFusion's UDAF example [here](https://github.com/apache/datafusion/tree/main/datafusion-examples/examples/simple_udaf.rs). Basically, we provide the same way as DataFusion to write aggregate functions: both are centered in a struct called "Accumulator" to accumulates states along the way in aggregation.
|
||||
|
||||
However, DataFusion's UDAF implementation has a huge restriction, that it requires user to provide a concrete "Accumulator". Take `Median` aggregate function for example, to aggregate a `u32` datatype column, you have to write a `MedianU32`, and use `SELECT MEDIANU32(x)` in SQL. `MedianU32` cannot be used to aggregate a `i32` datatype column. Or, there's another way: you can use a special type that can hold all kinds of data (like our `Value` enum or Arrow's `ScalarValue`), and `match` all the way up to do aggregate calculations. It might work, though rather tedious. (But I think it's DataFusion's preferred way to write UDAF.)
|
||||
|
||||
|
||||
@@ -76,7 +76,7 @@ pub trait CompactionStrategy {
|
||||
```
|
||||
|
||||
The most suitable compaction strategy for time-series scenario would be
|
||||
a hybrid strategy that combines time window compaction with size-tired compaction, just like [Cassandra](https://cassandra.apache.org/doc/latest/cassandra/operating/compaction/twcs.html) and [ScyllaDB](https://docs.scylladb.com/stable/architecture/compaction/compaction-strategies.html#time-window-compaction-strategy-twcs) does.
|
||||
a hybrid strategy that combines time window compaction with size-tired compaction, just like [Cassandra](https://cassandra.apache.org/doc/latest/cassandra/managing/operating/compaction/twcs.html) and [ScyllaDB](https://docs.scylladb.com/stable/architecture/compaction/compaction-strategies.html#time-window-compaction-strategy-twcs) does.
|
||||
|
||||
We can first group SSTs in level n into buckets according to some predefined time window. Within that window,
|
||||
SSTs are compacted in a size-tired manner (find SSTs with similar size and compact them to level n+1).
|
||||
|
||||
@@ -28,7 +28,7 @@ In order to do those things while maintaining a low memory footprint, you need t
|
||||
- Greptime Flow's is built on top of [Hydroflow](https://github.com/hydro-project/hydroflow).
|
||||
- We have three choices for the Dataflow/Streaming process framework for our simple continuous aggregation feature:
|
||||
1. Based on the timely/differential dataflow crate that [materialize](https://github.com/MaterializeInc/materialize) based on. Later, it's proved too obscure for a simple usage, and is hard to customize memory usage control.
|
||||
2. Based on a simple dataflow framework that we write from ground up, like what [arroyo](https://www.arroyo.dev/) or [risingwave](https://www.risingwave.dev/) did, for example the core streaming logic of [arroyo](https://github.com/ArroyoSystems/arroyo/blob/master/arroyo-datastream/src/lib.rs) only takes up to 2000 line of codes. However, it means maintaining another layer of dataflow framework, which might seem easy in the beginning, but I fear it might be too burdensome to maintain once we need more features.
|
||||
2. Based on a simple dataflow framework that we write from ground up, like what [arroyo](https://www.arroyo.dev/) or [risingwave](https://www.risingwave.dev/) did, for example the core streaming logic of [arroyo](https://github.com/ArroyoSystems/arroyo/blob/master/crates/arroyo-datastream/src/lib.rs) only takes up to 2000 line of codes. However, it means maintaining another layer of dataflow framework, which might seem easy in the beginning, but I fear it might be too burdensome to maintain once we need more features.
|
||||
3. Based on a simple and lower level dataflow framework that someone else write, like [hydroflow](https://github.com/hydro-project/hydroflow), this approach combines the best of both worlds. Firstly, it boasts ease of comprehension and customization. Secondly, the dataflow framework offers precisely the necessary features for crafting uncomplicated single-node dataflow programs while delivering decent performance.
|
||||
|
||||
Hence, we choose the third option, and use a simple logical plan that's anagonistic to the underlying dataflow framework, as it only describe how the dataflow graph should be doing, not how it do that. And we built operator in hydroflow to execute the plan. And the result hydroflow graph is wrapped in a engine that only support data in/out and tick event to flush and compute the result. This provide a thin middle layer that's easy to maintain and allow switching to other dataflow framework if necessary.
|
||||
|
||||
154
docs/rfcs/2025-06-20-repartition.md
Normal file
154
docs/rfcs/2025-06-20-repartition.md
Normal file
@@ -0,0 +1,154 @@
|
||||
---
|
||||
Feature Name: Repartition
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/6558
|
||||
Date: 2025-06-20
|
||||
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
|
||||
This RFC proposes a method for repartitioning a table, to adjust the partition rule and data distribution.
|
||||
|
||||
# Motivation
|
||||
|
||||
With time passing, the data distribution and skew pattern of a table might change. We need a way to repartition the table to suit the new pattern.
|
||||
|
||||
# Details
|
||||
|
||||
Here is a rough workflow diagram of the entire repartition process, each step is described in detail below.
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Frontend
|
||||
participant Metasrv
|
||||
participant Datanodes
|
||||
participant Region0 as Region 0
|
||||
|
||||
Frontend->>Frontend: Process request, validation etc.
|
||||
Frontend->>Metasrv: Submit procedure
|
||||
Metasrv->>Metasrv: Compute diff and generate migration plan
|
||||
Metasrv->>Metasrv: Allocate necessary region resources (with Paas)
|
||||
Metasrv->>Datanodes: Stop compaction and snapshot
|
||||
rect rgb(255, 225, 225)
|
||||
note over Frontend, Region0: No Ingestion Period
|
||||
Metasrv->>Frontend: Stop processing write requests
|
||||
Metasrv->>Metasrv: Update metadata
|
||||
Metasrv->>Frontend: Start processing read requests
|
||||
end
|
||||
Metasrv->>Datanodes: Update region rule, stage version changes from now on
|
||||
Region0->>Region0: Compute new manifests for all regions
|
||||
Region0->>Datanodes: Submit manifest changes
|
||||
Metasrv->>Datanodes: Recover compaction and snapshot, make staged changes visible
|
||||
|
||||
note over Frontend, Datanodes: Reload Cache
|
||||
Metasrv->>Metasrv: Release resources (with Paas)
|
||||
Metasrv->>Metasrv: Schedule optional compaction (to remote compactor)
|
||||
```
|
||||
|
||||
## Preprocessing
|
||||
|
||||
This phase is for static analysis of the new partition rule. The server can know whether the repartitioning is possible, how to do the repartitioning, and how much resources are needed.
|
||||
|
||||
In theory, the input and output partition rules for repartitioning can be completely unrelated. But in practice, to avoid a very large change set, we'll only allow two simple kinds of change. One splits one region into two regions (region split) and another merges two regions into one (region merge).
|
||||
|
||||
After validating the new partition rule using the same validation logic as table creation, we compute the difference between the old and new partition rules. The resulting diff may contain several independent groups of changes. During subsequent processing, each group of changes can be handled independently and can succeed or fail without affecting other groups or creating non-idempotently retryable scenarios.
|
||||
|
||||
Next, we generate a repartition plan for each group of changes. Each plan contains this information for all regions involved in that particular plan. And one target region will only be referenced by a single plan.
|
||||
|
||||
With those plans, we can determine the resource requirements for the repartition operation, where resources here primarily refer to Regions. Metasrv will coordinate with PaaS layer to pre-allocate the necessary regions at this stage. These new regions start completely empty, and their metadata and manifests will be populated during subsequent modification steps.
|
||||
|
||||
## Data Processing
|
||||
|
||||
This phase is primarily for region's change, including region's metadata (route table and the corresponding rule) and manifest.
|
||||
|
||||
Once we start processing one plan through a procedure, we'll first stop the region's compaction and snapshot. This is to avoid any states being removed due to compaction (which may removes old SST files) and snapshot (which may removes old manifest files).
|
||||
|
||||
Metasrv will trying to update the metadata of partition, or the region route table (related to `PartitionRuleManager`). This step is in the "no ingestion" scope, so no new data will be ingested. Since this won't take much time, the affection to the cluster is minimized. Metasrv will also update the region rule to corresponding regions on Datanodes.
|
||||
|
||||
Every regions and all the ingestion requests to the region server will have a version of region rule, to identify under which rule the request is processed. The version can be something like `hash(region_rule)`. Once the region rule on region server is updated, all ingestion request with old rule will be rejected, and all requests with new rule will be accepted but not visible. They can still be flushed to persisted storage, but their version change (new manifest) will be staged.
|
||||
|
||||
Then region 0 (or let metasrv to pick any operational region) will compute the new manifests for all target regions. This step is done by first reading all old manifests, and remapping the files with new partition rule, to get the content of new manifests. Notice this step only handles the manifests before region rule change on region server, and won't touch those staged manifests, as they are already with the new rule.
|
||||
|
||||
Those new manifest will be submitted to the corresponding target regions by region 0 via a `RegionEdit` request. If this request falls after a few retries, region 0 will try to rollback this change by directly overwriting the manifest on object storage. and report this failure to metasrv and let the entire repartition procedure to fail. And we can also optionally compute the new manifest for those staged version changes (like another repartition) and submit them to the target regions to make the also visible even if the repartition fails.
|
||||
|
||||
In the other hand, a successful `RegionEdit` request also acknowledges those staged version changes and make them visible.
|
||||
|
||||
After this step, the repartition is done in the data plane. We can start to process compaction and snapshot again.
|
||||
|
||||
## Postprocessing
|
||||
|
||||
After the main processing is done, we can do some extra postprocessing to reduce the performance impact of repartition. Including reloading caches in frontend's route table, metasrv's kv cache and datanode's read/write/page cache etc.
|
||||
|
||||
We can also schedule an optional compaction to reorganize all the data file under the new partition rule to reduce potential fragmentation or read amplification.
|
||||
|
||||
## Procedure
|
||||
|
||||
Here describe the repartition procedure step by step:
|
||||
|
||||
- <on frontend> Validating repartition request
|
||||
- <on frontend> Initialize the repartition procedure
|
||||
- Calculate rule diff and repartition plan group
|
||||
- Allocate necessary new regions
|
||||
- Lock the table key
|
||||
- For each repartition subprocedure
|
||||
- Stop compaction and snapshot
|
||||
- Forbid new ingestion requests, update metadata, allow ingestion requests.
|
||||
- Update region rule to regions
|
||||
- Pick one region to calculate new manifest for all regions in this repartition group
|
||||
- Let that region to apply new manifest to each region via `RegionEdit`
|
||||
- If failed after some retries, revert this manifest change to other succeeded regions and mark this failure.
|
||||
- If all succeeded, acknowledge those staged version changes and make them visible.
|
||||
- Return result
|
||||
- Collect results from subprocedure.
|
||||
- For those who failed, we need to restart those regions to force reconstruct their status from manifests
|
||||
- For those who succeeded, collect and merge their rule diff
|
||||
- Unlock the table key
|
||||
- Report the result to user.
|
||||
- <in background> Reload cache
|
||||
- <in background> Maybe trigger a special compaction
|
||||
|
||||
In addition of sequential step, rollback is also an important part of this procedure. There are three steps can be rolled back when unrecoverable failure occurs.
|
||||
|
||||
If the metadata update is not committed, we can overwrite the metadata to previous version. This step is scoped in the "no ingestion" period, so no new data will be ingested and the status of both datanode and metasrv will be consistent.
|
||||
|
||||
If the `RegionEdit` to other regions is not acknowledged, or partial acknowledged, we can directly overwrite the manifest on object storage from the central region (who computes the new manifest), and force region server to reload corresponding region to load its state from object storage to recover.
|
||||
|
||||
If the staged version changes are not acknowledged, we can re-compute manifest based on old rule for staged data, and apply them directly like above. This is like another smaller repartition for those staged data.
|
||||
|
||||
## Region rule validation and diff calculation
|
||||
|
||||
In the current codebase, the rule checker is not complete. It can't check uniqueness and completeness of the rule. This RFC also propose a new way to validate the rule.
|
||||
|
||||
The proposed validation way is based on a check-point system, which first generates a group of check-points from the rule, and then check if all the point is covered and only covered by one rule.
|
||||
|
||||
All the partition rule expressionis limited to be the form of `<column> <operator> <value>`, and the operator is limited to be comparison operators. Those expressions are allowed to be nested with `AND` and `OR` operators. Based on this, we can first extract all the unique values on each column, adding and subtracting a little epsilon to cover its left and right boundary.
|
||||
|
||||
Since we accept integer, float and string as the value type, compute on them directly is not convenient. So we'll first normalize them to a common type and only need to preserve the relative partial ordering. This also avoids the problem of "what is next/previous value" of string and "what's a good precision" for float.
|
||||
|
||||
After normalization, we get a set of scatter points for each column. Then we can generate a set of check-points by combining all the scatter points like building a cartesian product. This might bring a large number of check-points, so we can do an prune optimization to remove some of them by merging some of the expression zones. Those expressions who have identical N-1 edge sub-expressions with one adjacent edge can be merged together. This prune check is with a time complexity of O(N * M * log(M)), where N is the number of active dimensions and M is the number of expression zones. Diff calculation is also done by finding different expression zones between the old and new rule set, and check if we can transform one to another by merging some of the expression zones.
|
||||
|
||||
The step to validate the check-points set against expressions can be treated as a tiny expression of `PhysicalExpr`. This evaluation will give a boolean matrix of K*M shape, where K is the number of check-points. We then check in each row of the matrix, if there is one and only one true value.
|
||||
|
||||
## Compute and use new manifest
|
||||
|
||||
We can generate a new set of manifest file based on old manifest and two versions of rule. From abvoe rule processing part, we can tell how a new rule & region is from previous one. So a simple way to get the new manifest is also apply the step of change to manifest files. E.g., if region A is from region B and C, we simply combine all file IDs from B and C to generate the content of A.
|
||||
|
||||
If necessary, we can do this better by involving some metadata related to data, like min-max statistics of each file, and pre-evaluate over min-max to filter out unneeded files when generating new manifest.
|
||||
|
||||
The way to use new manifest needs one more extra step based on the current implementation. We'll need to record either in manifest or in file metadata, of what rule is used when generating (flush or compaction) a SST file. Then in every single read request, we need to append the current region rule as predicate to the read request, to ensure no data belong to other regions will be read. We can use the stored region rule to reduce the number of new predicates to apply, by removing the identical predicate between the current region rule and the stored region rule. So ideally in a table that has not been repartitioned recently, the overhead of checking region rule is minimal.
|
||||
|
||||
## Pre-required tasks
|
||||
|
||||
In above steps, we assume some functionalities are implemented. Here list them with where they are used and how to implement them.
|
||||
|
||||
### Cross-region read
|
||||
|
||||
The current data directory structure is `{table_id}/{region_id}/[data/metadata]/{file_id}`, every region can only access files under their own directory. After repartition, data file may be placed in other previous old regions. So we need to support cross-region read. This new access method allows region to access any file under the same table. Related tracking issue is <https://github.com/GreptimeTeam/greptimedb/issues/6409>.
|
||||
|
||||
### Global GC worker
|
||||
|
||||
This is to simplify state management of data files. As one file may be referenced in multiple manifests, or no manifest at all. After this, every region and the repartition process only need to care about generateing and using new files, without tracking whether a file should be deleted or not. Leaving the deletion to the global GC worker. This worker basically works by counting reference from manifest file, and remove unused one. Related tracking issue is **TBD**.
|
||||
|
||||
# Alternatives
|
||||
|
||||
In the "Data Processing" section, we can enlarge the "no ingestion" period to include almost all the steps. This can simplify the entire procedure by a lot, but will bring a longer time of ingestion pause which may not be acceptable.
|
||||
151
docs/rfcs/2025-07-04-compatibility-test-framework.md
Normal file
151
docs/rfcs/2025-07-04-compatibility-test-framework.md
Normal file
@@ -0,0 +1,151 @@
|
||||
---
|
||||
Feature Name: Compatibility Test Framework
|
||||
Tracking Issue: TBD
|
||||
Date: 2025-07-04
|
||||
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
|
||||
This RFC proposes a compatibility test framework for GreptimeDB to ensure backward/forward compatibility for different versions of GreptimeDB.
|
||||
|
||||
# Motivation
|
||||
|
||||
In current practice, we don't have a systematic way to test and ensure the compatibility of different versions of GreptimeDB. Each time we release a new version, we need to manually test the compatibility with ad-hoc cases. This is not only time-consuming, but also prone to errors and unmaintainable. Highly rely on the release manager to ensure the compatibility of different versions of GreptimeDB.
|
||||
|
||||
We don't have a detailed guide on the release SoP of how to test and ensure the compatibility of the new version. And has broken the compatibility of the new version many times (`v0.14.1` and `v0.15.1` are two examples, which are both released right after the major release).
|
||||
|
||||
# Details
|
||||
|
||||
This RFC proposes a compatibility test framework that is easy to maintain, extend and run. It can tell the compatibility between any given two versions of GreptimeDB, both backward and forward. It's based on the Sqlness library but used in a different way.
|
||||
|
||||
Generally speaking, the framework is composed of two parts:
|
||||
|
||||
1. Test cases: A set of test cases that are maintained dedicatedly for the compatibility test. Still in the `.sql` and `.result` format.
|
||||
2. Test framework: A new sqlness runner that is used to run the test cases. With some new features that is not required by the integration sqlness test.
|
||||
|
||||
## Test Cases
|
||||
|
||||
### Structure
|
||||
|
||||
The case set is organized in three parts:
|
||||
|
||||
- `1.feature`: Use a new feature
|
||||
- `2.verify`: Verify database behavior
|
||||
- `3.cleanup`: Paired with `1.feature`, cleanup the test environment.
|
||||
|
||||
These three parts are organized in a tree structure, and should be run in sequence:
|
||||
|
||||
```
|
||||
compatibility_test/
|
||||
├── 1.feature/
|
||||
│ ├── feature-a/
|
||||
│ ├── feature-b/
|
||||
│ └── feature-c/
|
||||
├── 2.verify/
|
||||
│ ├── verify-metadata/
|
||||
│ ├── verify-data/
|
||||
│ └── verify-schema/
|
||||
└── 3.cleanup/
|
||||
├── cleanup-a/
|
||||
├── cleanup-b/
|
||||
└── cleanup-c/
|
||||
```
|
||||
|
||||
### Example
|
||||
|
||||
For example, for a new feature like adding new index option ([#6416](https://github.com/GreptimeTeam/greptimedb/pull/6416)), we (who implement the feature) create a new test case like this:
|
||||
|
||||
```sql
|
||||
-- path: compatibility_test/1.feature/index-option/granularity_and_false_positive_rate.sql
|
||||
|
||||
-- SQLNESS ARG since=0.15.0
|
||||
-- SQLNESS IGNORE_RESULT
|
||||
CREATE TABLE granularity_and_false_positive_rate (ts timestamp time index, val double) with ("index.granularity" = "8192", "index.false_positive_rate" = "0.01");
|
||||
```
|
||||
|
||||
And
|
||||
|
||||
```sql
|
||||
-- path: compatibility_test/3.cleanup/index-option/granularity_and_false_positive_rate.sql
|
||||
drop table granularity_and_false_positive_rate;
|
||||
```
|
||||
|
||||
Since this new feature don't require some special way to verify the database behavior, we can reuse existing test cases in `2.verify/` to verify the database behavior. For example, we can reuse the `verify-metadata` test case to verify the metadata of the table.
|
||||
|
||||
```sql
|
||||
-- path: compatibility_test/2.verify/verify-metadata/show-create-table.sql
|
||||
|
||||
-- SQLNESS TEMPLATE TABLE="SHOW TABLES";
|
||||
SHOW CREATE TABLE $TABLE;
|
||||
```
|
||||
|
||||
In this example, we use some new sqlness features that will be introduced in the next section (`since`, `IGNORE_RESULT`, `TEMPLATE`).
|
||||
|
||||
### Maintenance
|
||||
|
||||
Each time implement a new feature that should be covered by the compatibility test, we should create a new test case in `1.feature/` and `3.cleanup/` for them. And check if existing cases in `2.verify/` can be reused to verify the database behavior.
|
||||
|
||||
This simulates an enthusiastic user who uses all the new features at the first time. All the new Maintenance burden is on the feature implementer to write one more test case for the new feature, to "fixation" the behavior. And once there is a breaking change in the future, it can be detected by the compatibility test framework automatically.
|
||||
|
||||
Another topic is about deprecation. If a feature is deprecated, we should also mark it in the test case. Still use above example, assume we deprecate the `index.granularity` and `index.false_positive_rate` index options in `v0.99.0`, we can mark them as:
|
||||
```sql
|
||||
-- SQLNESS ARG since=0.15.0 till=0.99.0
|
||||
...
|
||||
```
|
||||
|
||||
This tells the framework to ignore this feature in version `v0.99.0` and later. Currently, we have so many experimental features that are scheduled to be broken in the future, this is a good way to mark them.
|
||||
|
||||
## Test Framework
|
||||
|
||||
This section is about new sqlness features required by this framework.
|
||||
|
||||
### Since and Till
|
||||
|
||||
Follows the `ARG` interceptor in sqlness, we can mark a feature is available between two given versions. Only the `since` is required:
|
||||
|
||||
```sql
|
||||
-- SQLNESS ARG since=VERSION_STRING [till=VERSION_STRING]
|
||||
```
|
||||
|
||||
### IGNORE_RESULT
|
||||
|
||||
`IGNORE_RESULT` is a new interceptor, it tells the runner to ignore the result of the query, only check whether the query is executed successfully.
|
||||
|
||||
This is useful to reduce the Maintenance burden of the test cases, unlike the integration sqlness test, in most cases we don't care about the result of the query, only need to make sure the query is executed successfully.
|
||||
|
||||
### TEMPLATE
|
||||
|
||||
`TEMPLATE` is another new interceptor, it can generate queries from a template based on a runtime data.
|
||||
|
||||
In above example, we need to run the `SHOW CREATE TABLE` query for all existing tables, so we can use the `TEMPLATE` interceptor to generate the query with a dynamic table list.
|
||||
|
||||
### RUNNER
|
||||
|
||||
There are also some extra requirement for the runner itself:
|
||||
|
||||
- It should run the test cases in sequence, first `1.feature/`, then `2.verify/`, and finally `3.cleanup/`.
|
||||
- It should be able to fetch required version automatically to finish the test.
|
||||
- It should handle the `since` and `till` properly.
|
||||
|
||||
On the `1.feature` phase, the runner needs to identify all features need to be tested by version number. And then restart with a new version (the `to` version) to run `2.verify/` and `3.cleanup/` phase.
|
||||
|
||||
## Test Report
|
||||
|
||||
Finally, we can run the compatibility test to verify the compatibility between any given two versions of GreptimeDB, for example:
|
||||
|
||||
```bash
|
||||
# check backward compatibility between v0.15.0 and v0.16.0 when releasing v0.16.0
|
||||
./sqlness run --from=0.15.0 --to=0.16.0
|
||||
|
||||
# check forward compatibility when downgrading from v0.15.0 to v0.13.0
|
||||
./sqlness run --from=0.15.0 --to=0.13.0
|
||||
```
|
||||
|
||||
We can also use a script to run the compatibility test for all the versions in a given range to give a quick report with all versions we need.
|
||||
|
||||
And we always bump the version in `Cargo.toml` to the next major release version, so the next major release version can be used as "latest" unpublished version for scenarios like local testing.
|
||||
|
||||
# Alternatives
|
||||
|
||||
There was a previous attempt to implement a compatibility test framework that was disabled due to some reasons [#3728](https://github.com/GreptimeTeam/greptimedb/issues/3728).
|
||||
@@ -83,7 +83,7 @@ If you use the [Helm Chart](https://github.com/GreptimeTeam/helm-charts) to depl
|
||||
- `monitoring.enabled=true`: Deploys a standalone GreptimeDB instance dedicated to monitoring the cluster;
|
||||
- `grafana.enabled=true`: Deploys Grafana and automatically imports the monitoring dashboard;
|
||||
|
||||
The standalone GreptimeDB instance will collect metrics from your cluster, and the dashboard will be available in the Grafana UI. For detailed deployment instructions, please refer to our [Kubernetes deployment guide](https://docs.greptime.com/user-guide/deployments-administration-administration/deploy-on-kubernetes/getting-started).
|
||||
The standalone GreptimeDB instance will collect metrics from your cluster, and the dashboard will be available in the Grafana UI. For detailed deployment instructions, please refer to our [Kubernetes deployment guide](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/overview).
|
||||
|
||||
### Self-host Prometheus and import dashboards manually
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -21,14 +21,14 @@
|
||||
# Resources
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-datanode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-frontend"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-flownode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
# Frontend Requests
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -72,18 +72,19 @@
|
||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Active Series and Field Builders Count | `sum by(instance, pod) (greptime_mito_memtable_active_series_count)`<br/>`sum by(instance, pod) (greptime_mito_memtable_field_builder_count)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]-series` |
|
||||
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Cache Miss | `sum by (instance,pod, type) (rate(greptime_mito_cache_miss{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | The local cache miss of the datanode. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
# OpenDAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Read QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode",operation="read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||
| Write QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="write"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| Read QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation=~"read\|Reader::read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode",operation=~"read\|Reader::read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Write QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation=~"write\|Writer::write\|Writer::close"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation =~ "Writer::write\|Writer::close\|write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| List QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="list"}[$__rate_interval]))` | `timeseries` | List QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| List P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="list"}[$__rate_interval])))` | `timeseries` | List P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| Other Requests per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode",operation!~"read\|write\|list\|stat"}[$__rate_interval]))` | `timeseries` | Other Requests per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read\|write\|list"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read\|write\|list\|Writer::write\|Writer::close\|Reader::read"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{instance=~"$datanode", error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
|
||||
# Metasrv
|
||||
|
||||
@@ -180,13 +180,18 @@ groups:
|
||||
- title: Datanode Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-datanode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Datanode CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -197,16 +202,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-frontend"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -217,16 +232,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-cpu'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Metasrv Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-resident'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Metasrv CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -237,16 +262,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Flownode Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-flownode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Flownode CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -257,6 +292,11 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend Requests
|
||||
panels:
|
||||
- title: HTTP QPS per Instance
|
||||
@@ -642,6 +682,15 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- title: Cache Miss
|
||||
type: timeseries
|
||||
description: The local cache miss of the datanode.
|
||||
queries:
|
||||
- expr: sum by (instance,pod, type) (rate(greptime_mito_cache_miss{instance=~"$datanode"}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]'
|
||||
- title: OpenDAL
|
||||
panels:
|
||||
- title: QPS per Instance
|
||||
@@ -659,41 +708,41 @@ groups:
|
||||
description: Read QPS per Instance.
|
||||
unit: ops
|
||||
queries:
|
||||
- expr: sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="read"}[$__rate_interval]))
|
||||
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation=~"read|Reader::read"}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]'
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||
- title: Read P99 per Instance
|
||||
type: timeseries
|
||||
description: Read P99 per Instance.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode",operation="read"}[$__rate_interval])))
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode",operation=~"read|Reader::read"}[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-{{scheme}}'
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||
- title: Write QPS per Instance
|
||||
type: timeseries
|
||||
description: Write QPS per Instance.
|
||||
unit: ops
|
||||
queries:
|
||||
- expr: sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="write"}[$__rate_interval]))
|
||||
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation=~"write|Writer::write|Writer::close"}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-{{scheme}}'
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||
- title: Write P99 per Instance
|
||||
type: timeseries
|
||||
description: Write P99 per Instance.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="write"}[$__rate_interval])))
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation =~ "Writer::write|Writer::close|write"}[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]'
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||
- title: List QPS per Instance
|
||||
type: timeseries
|
||||
description: List QPS per Instance.
|
||||
@@ -729,7 +778,7 @@ groups:
|
||||
description: Other Request P99 per Instance.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read|write|list"}[$__rate_interval])))
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read|write|list|Writer::write|Writer::close|Reader::read"}[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -21,14 +21,14 @@
|
||||
# Resources
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-datanode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-frontend"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-flownode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
# Frontend Requests
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -72,18 +72,19 @@
|
||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Active Series and Field Builders Count | `sum by(instance, pod) (greptime_mito_memtable_active_series_count)`<br/>`sum by(instance, pod) (greptime_mito_memtable_field_builder_count)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]-series` |
|
||||
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Cache Miss | `sum by (instance,pod, type) (rate(greptime_mito_cache_miss{}[$__rate_interval]))` | `timeseries` | The local cache miss of the datanode. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
# OpenDAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{}[$__rate_interval]))` | `timeseries` | QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Read QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{operation="read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||
| Write QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="write"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| Read QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{ operation=~"read\|Reader::read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{operation=~"read\|Reader::read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Write QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{ operation=~"write\|Writer::write\|Writer::close"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation =~ "Writer::write\|Writer::close\|write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| List QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="list"}[$__rate_interval]))` | `timeseries` | List QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| List P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="list"}[$__rate_interval])))` | `timeseries` | List P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| Other Requests per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{operation!~"read\|write\|list\|stat"}[$__rate_interval]))` | `timeseries` | Other Requests per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read\|write\|list"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read\|write\|list\|Writer::write\|Writer::close\|Reader::read"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{ error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
|
||||
# Metasrv
|
||||
|
||||
@@ -180,13 +180,18 @@ groups:
|
||||
- title: Datanode Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-datanode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Datanode CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -197,16 +202,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-frontend"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -217,16 +232,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-cpu'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Metasrv Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-resident'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Metasrv CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -237,16 +262,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Flownode Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-flownode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Flownode CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -257,6 +292,11 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend Requests
|
||||
panels:
|
||||
- title: HTTP QPS per Instance
|
||||
@@ -642,6 +682,15 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- title: Cache Miss
|
||||
type: timeseries
|
||||
description: The local cache miss of the datanode.
|
||||
queries:
|
||||
- expr: sum by (instance,pod, type) (rate(greptime_mito_cache_miss{}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]'
|
||||
- title: OpenDAL
|
||||
panels:
|
||||
- title: QPS per Instance
|
||||
@@ -659,41 +708,41 @@ groups:
|
||||
description: Read QPS per Instance.
|
||||
unit: ops
|
||||
queries:
|
||||
- expr: sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="read"}[$__rate_interval]))
|
||||
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{ operation=~"read|Reader::read"}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]'
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||
- title: Read P99 per Instance
|
||||
type: timeseries
|
||||
description: Read P99 per Instance.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{operation="read"}[$__rate_interval])))
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{operation=~"read|Reader::read"}[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-{{scheme}}'
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||
- title: Write QPS per Instance
|
||||
type: timeseries
|
||||
description: Write QPS per Instance.
|
||||
unit: ops
|
||||
queries:
|
||||
- expr: sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="write"}[$__rate_interval]))
|
||||
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{ operation=~"write|Writer::write|Writer::close"}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-{{scheme}}'
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||
- title: Write P99 per Instance
|
||||
type: timeseries
|
||||
description: Write P99 per Instance.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="write"}[$__rate_interval])))
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation =~ "Writer::write|Writer::close|write"}[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]'
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||
- title: List QPS per Instance
|
||||
type: timeseries
|
||||
description: List QPS per Instance.
|
||||
@@ -729,7 +778,7 @@ groups:
|
||||
description: Other Request P99 per Instance.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read|write|list"}[$__rate_interval])))
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read|write|list|Writer::write|Writer::close|Reader::read"}[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
|
||||
@@ -26,7 +26,7 @@ check_dashboards_generation() {
|
||||
./grafana/scripts/gen-dashboards.sh
|
||||
|
||||
if [[ -n "$(git diff --name-only grafana/dashboards/metrics)" ]]; then
|
||||
echo "Error: The dashboards are not generated correctly. You should execute the `make dashboards` command."
|
||||
echo "Error: The dashboards are not generated correctly. You should execute the 'make dashboards' command."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -29,9 +29,11 @@ excludes = [
|
||||
# enterprise
|
||||
"src/common/meta/src/rpc/ddl/trigger.rs",
|
||||
"src/operator/src/expr_helper/trigger.rs",
|
||||
"src/sql/src/statements/alter/trigger.rs",
|
||||
"src/sql/src/statements/create/trigger.rs",
|
||||
"src/sql/src/statements/show/trigger.rs",
|
||||
"src/sql/src/statements/drop/trigger.rs",
|
||||
"src/sql/src/parsers/alter_parser/trigger.rs",
|
||||
"src/sql/src/parsers/create_parser/trigger.rs",
|
||||
"src/sql/src/parsers/show_parser/trigger.rs",
|
||||
"src/mito2/src/extension.rs",
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import re
|
||||
from multiprocessing import Pool
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def find_rust_files(directory):
|
||||
@@ -24,6 +24,10 @@ def find_rust_files(directory):
|
||||
if "test" in root.lower():
|
||||
continue
|
||||
|
||||
# Skip the target directory
|
||||
if "target" in Path(root).parts:
|
||||
continue
|
||||
|
||||
for file in files:
|
||||
# Skip files with "test" in the filename
|
||||
if "test" in file.lower():
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::any::Any;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::prelude::*;
|
||||
use snafu::Location;
|
||||
@@ -66,12 +67,28 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid time unit: {time_unit}"))]
|
||||
InvalidTimeUnit {
|
||||
time_unit: i32,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Inconsistent time unit: {:?}", units))]
|
||||
InconsistentTimeUnit {
|
||||
units: Vec<TimeUnit>,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::UnknownColumnDataType { .. } => StatusCode::InvalidArguments,
|
||||
Error::UnknownColumnDataType { .. }
|
||||
| Error::InvalidTimeUnit { .. }
|
||||
| Error::InconsistentTimeUnit { .. } => StatusCode::InvalidArguments,
|
||||
Error::IntoColumnDataType { .. } | Error::SerializeJson { .. } => {
|
||||
StatusCode::Unexpected
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_base::BitVec;
|
||||
@@ -46,7 +47,7 @@ use greptime_proto::v1::{
|
||||
use paste::paste;
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::error::{self, InconsistentTimeUnitSnafu, InvalidTimeUnitSnafu, Result};
|
||||
use crate::v1::column::Values;
|
||||
use crate::v1::{Column, ColumnDataType, Value as GrpcValue};
|
||||
|
||||
@@ -291,6 +292,7 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
ConcreteDataType::Vector(_) => ColumnDataType::Vector,
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Struct(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_) => {
|
||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
||||
@@ -703,6 +705,7 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
||||
ConcreteDataType::Vector(_) => Arc::new(BinaryVector::from_vec(values.binary_values)),
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Struct(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_)
|
||||
| ConcreteDataType::Json(_) => {
|
||||
@@ -864,6 +867,7 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
||||
ConcreteDataType::Vector(_) => values.binary_values.into_iter().map(|v| v.into()).collect(),
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Struct(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_)
|
||||
| ConcreteDataType::Json(_) => {
|
||||
@@ -1076,6 +1080,89 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_pb_time_unit(unit: v1::TimeUnit) -> TimeUnit {
|
||||
match unit {
|
||||
v1::TimeUnit::Second => TimeUnit::Second,
|
||||
v1::TimeUnit::Millisecond => TimeUnit::Millisecond,
|
||||
v1::TimeUnit::Microsecond => TimeUnit::Microsecond,
|
||||
v1::TimeUnit::Nanosecond => TimeUnit::Nanosecond,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_pb_time_unit(unit: TimeUnit) -> v1::TimeUnit {
|
||||
match unit {
|
||||
TimeUnit::Second => v1::TimeUnit::Second,
|
||||
TimeUnit::Millisecond => v1::TimeUnit::Millisecond,
|
||||
TimeUnit::Microsecond => v1::TimeUnit::Microsecond,
|
||||
TimeUnit::Nanosecond => v1::TimeUnit::Nanosecond,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_pb_time_ranges(time_ranges: v1::TimeRanges) -> Result<Vec<(Timestamp, Timestamp)>> {
|
||||
if time_ranges.time_ranges.is_empty() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
let proto_time_unit = v1::TimeUnit::try_from(time_ranges.time_unit).map_err(|_| {
|
||||
InvalidTimeUnitSnafu {
|
||||
time_unit: time_ranges.time_unit,
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let time_unit = from_pb_time_unit(proto_time_unit);
|
||||
Ok(time_ranges
|
||||
.time_ranges
|
||||
.into_iter()
|
||||
.map(|r| {
|
||||
(
|
||||
Timestamp::new(r.start, time_unit),
|
||||
Timestamp::new(r.end, time_unit),
|
||||
)
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
/// All time_ranges must be of the same time unit.
|
||||
///
|
||||
/// if input `time_ranges` is empty, it will return a default `TimeRanges` with `Millisecond` as the time unit.
|
||||
pub fn to_pb_time_ranges(time_ranges: &[(Timestamp, Timestamp)]) -> Result<v1::TimeRanges> {
|
||||
let is_same_time_unit = time_ranges.windows(2).all(|x| {
|
||||
x[0].0.unit() == x[1].0.unit()
|
||||
&& x[0].1.unit() == x[1].1.unit()
|
||||
&& x[0].0.unit() == x[0].1.unit()
|
||||
});
|
||||
|
||||
if !is_same_time_unit {
|
||||
let all_time_units: Vec<_> = time_ranges
|
||||
.iter()
|
||||
.map(|(s, e)| [s.unit(), e.unit()])
|
||||
.clone()
|
||||
.flatten()
|
||||
.collect::<HashSet<_>>()
|
||||
.into_iter()
|
||||
.collect();
|
||||
InconsistentTimeUnitSnafu {
|
||||
units: all_time_units,
|
||||
}
|
||||
.fail()?
|
||||
}
|
||||
|
||||
let mut pb_time_ranges = v1::TimeRanges {
|
||||
// default time unit is Millisecond
|
||||
time_unit: v1::TimeUnit::Millisecond as i32,
|
||||
time_ranges: Vec::with_capacity(time_ranges.len()),
|
||||
};
|
||||
if let Some((start, _end)) = time_ranges.first() {
|
||||
pb_time_ranges.time_unit = to_pb_time_unit(start.unit()) as i32;
|
||||
}
|
||||
for (start, end) in time_ranges {
|
||||
pb_time_ranges.time_ranges.push(v1::TimeRange {
|
||||
start: start.value(),
|
||||
end: end.value(),
|
||||
});
|
||||
}
|
||||
Ok(pb_time_ranges)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -24,7 +24,7 @@ use greptime_proto::v1::{
|
||||
};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::error::{self, ConvertColumnDefaultConstraintSnafu, Result};
|
||||
use crate::helper::ColumnDataTypeWrapper;
|
||||
use crate::v1::{ColumnDef, ColumnOptions, SemanticType};
|
||||
|
||||
@@ -77,6 +77,48 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
})
|
||||
}
|
||||
|
||||
/// Tries to construct a `ColumnDef` from the given `ColumnSchema`.
|
||||
///
|
||||
/// TODO(weny): Add tests for this function.
|
||||
pub fn try_as_column_def(column_schema: &ColumnSchema, is_primary_key: bool) -> Result<ColumnDef> {
|
||||
let column_datatype =
|
||||
ColumnDataTypeWrapper::try_from(column_schema.data_type.clone()).map(|w| w.to_parts())?;
|
||||
|
||||
let semantic_type = if column_schema.is_time_index() {
|
||||
SemanticType::Timestamp
|
||||
} else if is_primary_key {
|
||||
SemanticType::Tag
|
||||
} else {
|
||||
SemanticType::Field
|
||||
} as i32;
|
||||
let comment = column_schema
|
||||
.metadata()
|
||||
.get(COMMENT_KEY)
|
||||
.cloned()
|
||||
.unwrap_or_default();
|
||||
|
||||
let default_constraint = match column_schema.default_constraint() {
|
||||
None => vec![],
|
||||
Some(v) => v
|
||||
.clone()
|
||||
.try_into()
|
||||
.context(ConvertColumnDefaultConstraintSnafu {
|
||||
column: &column_schema.name,
|
||||
})?,
|
||||
};
|
||||
let options = options_from_column_schema(column_schema);
|
||||
Ok(ColumnDef {
|
||||
name: column_schema.name.clone(),
|
||||
data_type: column_datatype.0 as i32,
|
||||
is_nullable: column_schema.is_nullable(),
|
||||
default_constraint,
|
||||
semantic_type,
|
||||
comment,
|
||||
datatype_extension: column_datatype.1,
|
||||
options,
|
||||
})
|
||||
}
|
||||
|
||||
/// Constructs a `ColumnOptions` from the given `ColumnSchema`.
|
||||
pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<ColumnOptions> {
|
||||
let mut options = ColumnOptions::default();
|
||||
|
||||
@@ -44,6 +44,8 @@ moka = { workspace = true, features = ["future", "sync"] }
|
||||
partition.workspace = true
|
||||
paste.workspace = true
|
||||
prometheus.workspace = true
|
||||
promql-parser.workspace = true
|
||||
rand.workspace = true
|
||||
rustc-hash.workspace = true
|
||||
serde_json.workspace = true
|
||||
session.workspace = true
|
||||
|
||||
@@ -16,8 +16,8 @@ use api::v1::meta::ProcedureStatus;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cluster::{ClusterInfo, NodeInfo};
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::ddl::{ExecutorContext, ProcedureExecutor};
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::procedure_executor::{ExecutorContext, ProcedureExecutor};
|
||||
use common_meta::rpc::procedure;
|
||||
use common_procedure::{ProcedureInfo, ProcedureState};
|
||||
use meta_client::MetaClientRef;
|
||||
|
||||
@@ -29,6 +29,7 @@ use crate::information_schema::{InformationExtensionRef, InformationSchemaProvid
|
||||
use crate::kvbackend::manager::{SystemCatalog, CATALOG_CACHE_MAX_CAPACITY};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::numbers_table_provider::NumbersTableProvider;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
|
||||
pub struct KvBackendCatalogManagerBuilder {
|
||||
@@ -119,6 +120,7 @@ impl KvBackendCatalogManagerBuilder {
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
)),
|
||||
numbers_table_provider: NumbersTableProvider,
|
||||
backend,
|
||||
process_manager,
|
||||
#[cfg(feature = "enterprise")]
|
||||
|
||||
@@ -18,12 +18,12 @@ use std::sync::{Arc, Weak};
|
||||
|
||||
use async_stream::try_stream;
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
|
||||
PG_CATALOG_NAME,
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME,
|
||||
};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::{
|
||||
LayeredCacheRegistryRef, TableRoute, TableRouteCacheRef, ViewInfoCacheRef,
|
||||
LayeredCacheRegistryRef, TableInfoCacheRef, TableNameCacheRef, TableRoute, TableRouteCacheRef,
|
||||
ViewInfoCacheRef,
|
||||
};
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
@@ -41,8 +41,7 @@ use session::context::{Channel, QueryContext};
|
||||
use snafu::prelude::*;
|
||||
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
||||
use table::dist_table::DistTable;
|
||||
use table::metadata::TableId;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::table_name::TableName;
|
||||
use table::TableRef;
|
||||
use tokio::sync::Semaphore;
|
||||
@@ -57,6 +56,7 @@ use crate::information_schema::InformationSchemaTableFactoryRef;
|
||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||
use crate::kvbackend::TableCacheRef;
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::numbers_table_provider::NumbersTableProvider;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
use crate::CatalogManager;
|
||||
@@ -325,6 +325,63 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn table_id(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Option<TableId>> {
|
||||
let channel = query_ctx.map_or(Channel::Unknown, |ctx| ctx.channel());
|
||||
if let Some(table) =
|
||||
self.system_catalog
|
||||
.table(catalog_name, schema_name, table_name, query_ctx)
|
||||
{
|
||||
return Ok(Some(table.table_info().table_id()));
|
||||
}
|
||||
|
||||
let table_cache: TableNameCacheRef =
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_name_cache",
|
||||
})?;
|
||||
|
||||
let table = table_cache
|
||||
.get_by_ref(&TableName {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
})
|
||||
.await
|
||||
.context(GetTableCacheSnafu)?;
|
||||
|
||||
if let Some(table) = table {
|
||||
return Ok(Some(table));
|
||||
}
|
||||
|
||||
if channel == Channel::Postgres {
|
||||
// falldown to pg_catalog
|
||||
if let Some(table) =
|
||||
self.system_catalog
|
||||
.table(catalog_name, PG_CATALOG_NAME, table_name, query_ctx)
|
||||
{
|
||||
return Ok(Some(table.table_info().table_id()));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn table_info_by_id(&self, table_id: TableId) -> Result<Option<TableInfoRef>> {
|
||||
let table_info_cache: TableInfoCacheRef =
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_info_cache",
|
||||
})?;
|
||||
table_info_cache
|
||||
.get_by_ref(&table_id)
|
||||
.await
|
||||
.context(GetTableCacheSnafu)
|
||||
}
|
||||
|
||||
async fn tables_by_ids(
|
||||
&self,
|
||||
catalog: &str,
|
||||
@@ -479,6 +536,7 @@ pub(super) struct SystemCatalog {
|
||||
// system_schema_provider for default catalog
|
||||
pub(super) information_schema_provider: Arc<InformationSchemaProvider>,
|
||||
pub(super) pg_catalog_provider: Arc<PGCatalogProvider>,
|
||||
pub(super) numbers_table_provider: NumbersTableProvider,
|
||||
pub(super) backend: KvBackendRef,
|
||||
pub(super) process_manager: Option<ProcessManagerRef>,
|
||||
#[cfg(feature = "enterprise")]
|
||||
@@ -508,9 +566,7 @@ impl SystemCatalog {
|
||||
PG_CATALOG_NAME if channel == Channel::Postgres => {
|
||||
self.pg_catalog_provider.table_names()
|
||||
}
|
||||
DEFAULT_SCHEMA_NAME => {
|
||||
vec![NUMBERS_TABLE_NAME.to_string()]
|
||||
}
|
||||
DEFAULT_SCHEMA_NAME => self.numbers_table_provider.table_names(),
|
||||
_ => vec![],
|
||||
}
|
||||
}
|
||||
@@ -528,7 +584,7 @@ impl SystemCatalog {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
self.information_schema_provider.table(table).is_some()
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
table == NUMBERS_TABLE_NAME
|
||||
self.numbers_table_provider.table_exists(table)
|
||||
} else if schema == PG_CATALOG_NAME && channel == Channel::Postgres {
|
||||
self.pg_catalog_provider.table(table).is_some()
|
||||
} else {
|
||||
@@ -573,8 +629,8 @@ impl SystemCatalog {
|
||||
});
|
||||
pg_catalog_provider.table(table_name)
|
||||
}
|
||||
} else if schema == DEFAULT_SCHEMA_NAME && table_name == NUMBERS_TABLE_NAME {
|
||||
Some(NumbersTable::table(NUMBERS_TABLE_ID))
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
self.numbers_table_provider.table(table_name)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ use common_catalog::consts::{INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME};
|
||||
use futures::future::BoxFuture;
|
||||
use futures_util::stream::BoxStream;
|
||||
use session::context::QueryContext;
|
||||
use table::metadata::TableId;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::Result;
|
||||
@@ -89,6 +89,23 @@ pub trait CatalogManager: Send + Sync {
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Option<TableRef>>;
|
||||
|
||||
/// Returns the table id of provided table ident.
|
||||
async fn table_id(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Option<TableId>> {
|
||||
Ok(self
|
||||
.table(catalog, schema, table_name, query_ctx)
|
||||
.await?
|
||||
.map(|t| t.table_info().ident.table_id))
|
||||
}
|
||||
|
||||
/// Returns the table of provided id.
|
||||
async fn table_info_by_id(&self, table_id: TableId) -> Result<Option<TableInfoRef>>;
|
||||
|
||||
/// Returns the tables by table ids.
|
||||
async fn tables_by_ids(
|
||||
&self,
|
||||
|
||||
@@ -28,7 +28,7 @@ use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use futures_util::stream::BoxStream;
|
||||
use session::context::QueryContext;
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
||||
@@ -144,6 +144,18 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn table_info_by_id(&self, table_id: TableId) -> Result<Option<TableInfoRef>> {
|
||||
Ok(self
|
||||
.catalogs
|
||||
.read()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.flat_map(|(_, schema_entries)| schema_entries.values())
|
||||
.flat_map(|tables| tables.values())
|
||||
.find(|t| t.table_info().ident.table_id == table_id)
|
||||
.map(|t| t.table_info()))
|
||||
}
|
||||
|
||||
async fn tables_by_ids(
|
||||
&self,
|
||||
catalog: &str,
|
||||
|
||||
@@ -14,17 +14,24 @@
|
||||
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::fmt::{Debug, Display, Formatter};
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::{Duration, Instant, UNIX_EPOCH};
|
||||
|
||||
use api::v1::frontend::{KillProcessRequest, ListProcessRequest, ProcessInfo};
|
||||
use common_base::cancellation::CancellationHandle;
|
||||
use common_frontend::selector::{FrontendSelector, MetaClientSelector};
|
||||
use common_telemetry::{debug, info, warn};
|
||||
use common_frontend::slow_query_event::SlowQueryEvent;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use common_time::util::current_time_millis;
|
||||
use meta_client::MetaClientRef;
|
||||
use promql_parser::parser::EvalStmt;
|
||||
use rand::random;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sql::statements::statement::Statement;
|
||||
use tokio::sync::mpsc::Sender;
|
||||
|
||||
use crate::error;
|
||||
use crate::metrics::{PROCESS_KILL_COUNT, PROCESS_LIST_COUNT};
|
||||
@@ -44,6 +51,23 @@ pub struct ProcessManager {
|
||||
frontend_selector: Option<MetaClientSelector>,
|
||||
}
|
||||
|
||||
/// Represents a parsed query statement, functionally equivalent to [query::parser::QueryStatement].
|
||||
/// This enum is defined here to avoid cyclic dependencies with the query parser module.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum QueryStatement {
|
||||
Sql(Statement),
|
||||
Promql(EvalStmt),
|
||||
}
|
||||
|
||||
impl Display for QueryStatement {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
QueryStatement::Sql(stmt) => write!(f, "{}", stmt),
|
||||
QueryStatement::Promql(eval_stmt) => write!(f, "{}", eval_stmt),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcessManager {
|
||||
/// Create a [ProcessManager] instance with server address and kv client.
|
||||
pub fn new(server_addr: String, meta_client: Option<MetaClientRef>) -> Self {
|
||||
@@ -67,6 +91,7 @@ impl ProcessManager {
|
||||
query: String,
|
||||
client: String,
|
||||
query_id: Option<ProcessId>,
|
||||
_slow_query_timer: Option<SlowQueryTimer>,
|
||||
) -> Ticket {
|
||||
let id = query_id.unwrap_or_else(|| self.next_id.fetch_add(1, Ordering::Relaxed));
|
||||
let process = ProcessInfo {
|
||||
@@ -93,6 +118,7 @@ impl ProcessManager {
|
||||
manager: self.clone(),
|
||||
id,
|
||||
cancellation_handle,
|
||||
_slow_query_timer,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,6 +249,7 @@ pub struct Ticket {
|
||||
pub(crate) manager: ProcessManagerRef,
|
||||
pub(crate) id: ProcessId,
|
||||
pub cancellation_handle: Arc<CancellationHandle>,
|
||||
_slow_query_timer: Option<SlowQueryTimer>,
|
||||
}
|
||||
|
||||
impl Drop for Ticket {
|
||||
@@ -263,6 +290,107 @@ impl Debug for CancellableProcess {
|
||||
}
|
||||
}
|
||||
|
||||
/// SlowQueryTimer is used to log slow query when it's dropped.
|
||||
/// In drop(), it will check if the query is slow and send the slow query event to the handler.
|
||||
pub struct SlowQueryTimer {
|
||||
start: Instant,
|
||||
stmt: QueryStatement,
|
||||
query_ctx: QueryContextRef,
|
||||
threshold: Option<Duration>,
|
||||
sample_ratio: Option<f64>,
|
||||
tx: Sender<SlowQueryEvent>,
|
||||
}
|
||||
|
||||
impl SlowQueryTimer {
|
||||
pub fn new(
|
||||
stmt: QueryStatement,
|
||||
query_ctx: QueryContextRef,
|
||||
threshold: Option<Duration>,
|
||||
sample_ratio: Option<f64>,
|
||||
tx: Sender<SlowQueryEvent>,
|
||||
) -> Self {
|
||||
Self {
|
||||
start: Instant::now(),
|
||||
stmt,
|
||||
query_ctx,
|
||||
threshold,
|
||||
sample_ratio,
|
||||
tx,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SlowQueryTimer {
|
||||
fn send_slow_query_event(&self, elapsed: Duration, threshold: Duration) {
|
||||
let mut slow_query_event = SlowQueryEvent {
|
||||
cost: elapsed.as_millis() as u64,
|
||||
threshold: threshold.as_millis() as u64,
|
||||
query: "".to_string(),
|
||||
query_ctx: self.query_ctx.clone(),
|
||||
|
||||
// The following fields are only used for PromQL queries.
|
||||
is_promql: false,
|
||||
promql_range: None,
|
||||
promql_step: None,
|
||||
promql_start: None,
|
||||
promql_end: None,
|
||||
};
|
||||
|
||||
match &self.stmt {
|
||||
QueryStatement::Promql(stmt) => {
|
||||
slow_query_event.is_promql = true;
|
||||
slow_query_event.query = stmt.expr.to_string();
|
||||
slow_query_event.promql_step = Some(stmt.interval.as_millis() as u64);
|
||||
|
||||
let start = stmt
|
||||
.start
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as i64;
|
||||
|
||||
let end = stmt
|
||||
.end
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as i64;
|
||||
|
||||
slow_query_event.promql_range = Some((end - start) as u64);
|
||||
slow_query_event.promql_start = Some(start);
|
||||
slow_query_event.promql_end = Some(end);
|
||||
}
|
||||
QueryStatement::Sql(stmt) => {
|
||||
slow_query_event.query = stmt.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
// Send SlowQueryEvent to the handler.
|
||||
if let Err(e) = self.tx.try_send(slow_query_event) {
|
||||
error!(e; "Failed to send slow query event");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for SlowQueryTimer {
|
||||
fn drop(&mut self) {
|
||||
if let Some(threshold) = self.threshold {
|
||||
// Calculate the elaspsed duration since the timer is created.
|
||||
let elapsed = self.start.elapsed();
|
||||
if elapsed > threshold {
|
||||
if let Some(ratio) = self.sample_ratio {
|
||||
// Only capture a portion of slow queries based on sample_ratio.
|
||||
// Generate a random number in [0, 1) and compare it with sample_ratio.
|
||||
if ratio >= 1.0 || random::<f64>() <= ratio {
|
||||
self.send_slow_query_event(elapsed, threshold);
|
||||
}
|
||||
} else {
|
||||
// Captures all slow queries if sample_ratio is not set.
|
||||
self.send_slow_query_event(elapsed, threshold);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
@@ -278,6 +406,7 @@ mod tests {
|
||||
"SELECT * FROM table".to_string(),
|
||||
"".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
let running_processes = process_manager.local_processes(None).unwrap();
|
||||
@@ -301,6 +430,7 @@ mod tests {
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
Some(custom_id),
|
||||
None,
|
||||
);
|
||||
|
||||
assert_eq!(ticket.id, custom_id);
|
||||
@@ -321,6 +451,7 @@ mod tests {
|
||||
"SELECT * FROM table1".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
let ticket2 = process_manager.clone().register_query(
|
||||
@@ -329,6 +460,7 @@ mod tests {
|
||||
"SELECT * FROM table2".to_string(),
|
||||
"client2".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
let running_processes = process_manager.local_processes(Some("public")).unwrap();
|
||||
@@ -350,6 +482,7 @@ mod tests {
|
||||
"SELECT * FROM table1".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
let _ticket2 = process_manager.clone().register_query(
|
||||
@@ -358,6 +491,7 @@ mod tests {
|
||||
"SELECT * FROM table2".to_string(),
|
||||
"client2".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
// Test listing processes for specific catalog
|
||||
@@ -384,6 +518,7 @@ mod tests {
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
assert_eq!(process_manager.local_processes(None).unwrap().len(), 1);
|
||||
process_manager.deregister_query("public".to_string(), ticket.id);
|
||||
@@ -400,6 +535,7 @@ mod tests {
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
assert!(!ticket.cancellation_handle.is_cancelled());
|
||||
@@ -417,6 +553,7 @@ mod tests {
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
assert!(!ticket.cancellation_handle.is_cancelled());
|
||||
let killed = process_manager
|
||||
@@ -462,6 +599,7 @@ mod tests {
|
||||
"SELECT COUNT(*) FROM users WHERE age > 18".to_string(),
|
||||
"test_client".to_string(),
|
||||
Some(42),
|
||||
None,
|
||||
);
|
||||
|
||||
let processes = process_manager.local_processes(None).unwrap();
|
||||
@@ -488,6 +626,7 @@ mod tests {
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
// Process should be registered
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
pub mod information_schema;
|
||||
mod memory_table;
|
||||
pub mod numbers_table_provider;
|
||||
pub mod pg_catalog;
|
||||
pub mod predicate;
|
||||
mod utils;
|
||||
|
||||
@@ -162,6 +162,16 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
}
|
||||
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
||||
#[cfg(feature = "enterprise")]
|
||||
if let Some(factory) = self.extra_table_factories.get(name) {
|
||||
let req = MakeInformationTableRequest {
|
||||
catalog_name: self.catalog_name.clone(),
|
||||
catalog_manager: self.catalog_manager.clone(),
|
||||
kv_backend: self.kv_backend.clone(),
|
||||
};
|
||||
return Some(factory.make_information_table(req));
|
||||
}
|
||||
|
||||
match name.to_ascii_lowercase().as_str() {
|
||||
TABLES => Some(Arc::new(InformationSchemaTables::new(
|
||||
self.catalog_name.clone(),
|
||||
@@ -240,22 +250,7 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
.process_manager
|
||||
.as_ref()
|
||||
.map(|p| Arc::new(InformationSchemaProcessList::new(p.clone())) as _),
|
||||
table_name => {
|
||||
#[cfg(feature = "enterprise")]
|
||||
return self.extra_table_factories.get(table_name).map(|factory| {
|
||||
let req = MakeInformationTableRequest {
|
||||
catalog_name: self.catalog_name.clone(),
|
||||
catalog_manager: self.catalog_manager.clone(),
|
||||
kv_backend: self.kv_backend.clone(),
|
||||
};
|
||||
factory.make_information_table(req)
|
||||
});
|
||||
#[cfg(not(feature = "enterprise"))]
|
||||
{
|
||||
let _ = table_name;
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,8 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int64Vector, StringVector, VectorRef};
|
||||
|
||||
use crate::system_schema::information_schema::table_names::*;
|
||||
@@ -367,28 +368,18 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
||||
|
||||
TRIGGERS => (
|
||||
vec![
|
||||
string_column("TRIGGER_CATALOG"),
|
||||
string_column("TRIGGER_SCHEMA"),
|
||||
string_column("TRIGGER_NAME"),
|
||||
string_column("EVENT_MANIPULATION"),
|
||||
string_column("EVENT_OBJECT_CATALOG"),
|
||||
string_column("EVENT_OBJECT_SCHEMA"),
|
||||
string_column("EVENT_OBJECT_TABLE"),
|
||||
bigint_column("ACTION_ORDER"),
|
||||
string_column("ACTION_CONDITION"),
|
||||
string_column("ACTION_STATEMENT"),
|
||||
string_column("ACTION_ORIENTATION"),
|
||||
string_column("ACTION_TIMING"),
|
||||
string_column("ACTION_REFERENCE_OLD_TABLE"),
|
||||
string_column("ACTION_REFERENCE_NEW_TABLE"),
|
||||
string_column("ACTION_REFERENCE_OLD_ROW"),
|
||||
string_column("ACTION_REFERENCE_NEW_ROW"),
|
||||
timestamp_micro_column("CREATED"),
|
||||
string_column("SQL_MODE"),
|
||||
string_column("DEFINER"),
|
||||
string_column("CHARACTER_SET_CLIENT"),
|
||||
string_column("COLLATION_CONNECTION"),
|
||||
string_column("DATABASE_COLLATION"),
|
||||
ColumnSchema::new(
|
||||
"trigger_id",
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
false,
|
||||
),
|
||||
string_column("TRIGGER_DEFINITION"),
|
||||
ColumnSchema::new(
|
||||
"flownode_id",
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
true,
|
||||
),
|
||||
],
|
||||
vec![],
|
||||
),
|
||||
|
||||
@@ -329,13 +329,8 @@ impl InformationSchemaPartitionsBuilder {
|
||||
self.partition_names.push(Some(&partition_name));
|
||||
self.partition_ordinal_positions
|
||||
.push(Some((index + 1) as i64));
|
||||
let expressions = if partition.partition.partition_columns().is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(partition.partition.to_string())
|
||||
};
|
||||
|
||||
self.partition_expressions.push(expressions.as_deref());
|
||||
let expression = partition.partition_expr.as_ref().map(|e| e.to_string());
|
||||
self.partition_expressions.push(expression.as_deref());
|
||||
self.create_times.push(Some(TimestampMicrosecond::from(
|
||||
table_info.meta.created_on.timestamp_millis(),
|
||||
)));
|
||||
|
||||
@@ -44,6 +44,7 @@ const DISK_SIZE: &str = "disk_size";
|
||||
const MEMTABLE_SIZE: &str = "memtable_size";
|
||||
const MANIFEST_SIZE: &str = "manifest_size";
|
||||
const SST_SIZE: &str = "sst_size";
|
||||
const SST_NUM: &str = "sst_num";
|
||||
const INDEX_SIZE: &str = "index_size";
|
||||
const ENGINE: &str = "engine";
|
||||
const REGION_ROLE: &str = "region_role";
|
||||
@@ -87,6 +88,7 @@ impl InformationSchemaRegionStatistics {
|
||||
ColumnSchema::new(MEMTABLE_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(MANIFEST_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(SST_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(SST_NUM, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(INDEX_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(ENGINE, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(REGION_ROLE, ConcreteDataType::string_datatype(), true),
|
||||
@@ -149,6 +151,7 @@ struct InformationSchemaRegionStatisticsBuilder {
|
||||
memtable_sizes: UInt64VectorBuilder,
|
||||
manifest_sizes: UInt64VectorBuilder,
|
||||
sst_sizes: UInt64VectorBuilder,
|
||||
sst_nums: UInt64VectorBuilder,
|
||||
index_sizes: UInt64VectorBuilder,
|
||||
engines: StringVectorBuilder,
|
||||
region_roles: StringVectorBuilder,
|
||||
@@ -167,6 +170,7 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
memtable_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
manifest_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
sst_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
sst_nums: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
index_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
engines: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
region_roles: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
@@ -197,6 +201,7 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
(MEMTABLE_SIZE, &Value::from(region_stat.memtable_size)),
|
||||
(MANIFEST_SIZE, &Value::from(region_stat.manifest_size)),
|
||||
(SST_SIZE, &Value::from(region_stat.sst_size)),
|
||||
(SST_NUM, &Value::from(region_stat.sst_num)),
|
||||
(INDEX_SIZE, &Value::from(region_stat.index_size)),
|
||||
(ENGINE, &Value::from(region_stat.engine.as_str())),
|
||||
(REGION_ROLE, &Value::from(region_stat.role.to_string())),
|
||||
@@ -215,6 +220,7 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
self.memtable_sizes.push(Some(region_stat.memtable_size));
|
||||
self.manifest_sizes.push(Some(region_stat.manifest_size));
|
||||
self.sst_sizes.push(Some(region_stat.sst_size));
|
||||
self.sst_nums.push(Some(region_stat.sst_num));
|
||||
self.index_sizes.push(Some(region_stat.index_size));
|
||||
self.engines.push(Some(®ion_stat.engine));
|
||||
self.region_roles.push(Some(®ion_stat.role.to_string()));
|
||||
@@ -230,6 +236,7 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
Arc::new(self.memtable_sizes.finish()),
|
||||
Arc::new(self.manifest_sizes.finish()),
|
||||
Arc::new(self.sst_sizes.finish()),
|
||||
Arc::new(self.sst_nums.finish()),
|
||||
Arc::new(self.index_sizes.finish()),
|
||||
Arc::new(self.engines.finish()),
|
||||
Arc::new(self.region_roles.finish()),
|
||||
|
||||
@@ -48,4 +48,3 @@ pub const FLOWS: &str = "flows";
|
||||
pub const PROCEDURE_INFO: &str = "procedure_info";
|
||||
pub const REGION_STATISTICS: &str = "region_statistics";
|
||||
pub const PROCESS_LIST: &str = "process_list";
|
||||
pub const TRIGGER_LIST: &str = "trigger_list";
|
||||
|
||||
59
src/catalog/src/system_schema/numbers_table_provider.rs
Normal file
59
src/catalog/src/system_schema/numbers_table_provider.rs
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[cfg(any(test, feature = "testing", debug_assertions))]
|
||||
use common_catalog::consts::NUMBERS_TABLE_ID;
|
||||
#[cfg(any(test, feature = "testing", debug_assertions))]
|
||||
use table::table::numbers::NumbersTable;
|
||||
#[cfg(any(test, feature = "testing", debug_assertions))]
|
||||
use table::table::numbers::NUMBERS_TABLE_NAME;
|
||||
use table::TableRef;
|
||||
|
||||
// NumbersTableProvider is a dedicated provider for feature-gating the numbers table.
|
||||
#[derive(Clone)]
|
||||
pub struct NumbersTableProvider;
|
||||
|
||||
#[cfg(any(test, feature = "testing", debug_assertions))]
|
||||
impl NumbersTableProvider {
|
||||
pub(crate) fn table_exists(&self, name: &str) -> bool {
|
||||
name == NUMBERS_TABLE_NAME
|
||||
}
|
||||
|
||||
pub(crate) fn table_names(&self) -> Vec<String> {
|
||||
vec![NUMBERS_TABLE_NAME.to_string()]
|
||||
}
|
||||
|
||||
pub(crate) fn table(&self, name: &str) -> Option<TableRef> {
|
||||
if name == NUMBERS_TABLE_NAME {
|
||||
Some(NumbersTable::table(NUMBERS_TABLE_ID))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(any(test, feature = "testing", debug_assertions)))]
|
||||
impl NumbersTableProvider {
|
||||
pub(crate) fn table_exists(&self, _name: &str) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn table_names(&self) -> Vec<String> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
pub(crate) fn table(&self, _name: &str) -> Option<TableRef> {
|
||||
None
|
||||
}
|
||||
}
|
||||
@@ -70,6 +70,11 @@ impl DfTableSourceProvider {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the query context.
|
||||
pub fn query_ctx(&self) -> &QueryContextRef {
|
||||
&self.query_ctx
|
||||
}
|
||||
|
||||
pub fn resolve_table_ref(&self, table_ref: TableReference) -> Result<ResolvedTableReference> {
|
||||
if self.disallow_cross_catalog_query {
|
||||
match &table_ref {
|
||||
|
||||
@@ -188,6 +188,7 @@ fn create_region_routes(regions: Vec<RegionNumber>) -> Vec<RegionRoute> {
|
||||
name: String::new(),
|
||||
partition: None,
|
||||
attrs: BTreeMap::new(),
|
||||
partition_expr: Default::default(),
|
||||
},
|
||||
leader_peer: Some(Peer {
|
||||
id: rng.random_range(0..10),
|
||||
|
||||
@@ -20,7 +20,7 @@ use common_meta::kv_backend::chroot::ChrootKvBackend;
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use meta_srv::bootstrap::create_etcd_client;
|
||||
use meta_srv::metasrv::BackendImpl;
|
||||
use meta_srv::metasrv::{BackendClientOptions, BackendImpl};
|
||||
|
||||
use crate::error::{EmptyStoreAddrsSnafu, UnsupportedMemoryBackendSnafu};
|
||||
|
||||
@@ -67,15 +67,16 @@ impl StoreConfig {
|
||||
} else {
|
||||
let kvbackend = match self.backend {
|
||||
BackendImpl::EtcdStore => {
|
||||
let etcd_client = create_etcd_client(store_addrs)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
let etcd_client =
|
||||
create_etcd_client(store_addrs, &BackendClientOptions::default())
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(EtcdStore::with_etcd_client(etcd_client, max_txn_ops))
|
||||
}
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
BackendImpl::PostgresStore => {
|
||||
let table_name = &self.meta_table_name;
|
||||
let pool = meta_srv::bootstrap::create_postgres_pool(store_addrs)
|
||||
let pool = meta_srv::bootstrap::create_postgres_pool(store_addrs, None)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(common_meta::kv_backend::rds::PgStore::with_pg_pool(
|
||||
|
||||
@@ -29,7 +29,7 @@ use futures::TryStreamExt;
|
||||
|
||||
use crate::error::InvalidArgumentsSnafu;
|
||||
use crate::metadata::common::StoreConfig;
|
||||
use crate::metadata::control::utils::{decode_key_value, get_table_id_by_name, json_fromatter};
|
||||
use crate::metadata::control::utils::{decode_key_value, get_table_id_by_name, json_formatter};
|
||||
use crate::Tool;
|
||||
|
||||
/// Getting metadata from metadata store.
|
||||
@@ -206,7 +206,7 @@ impl Tool for GetTableTool {
|
||||
println!(
|
||||
"{}\n{}",
|
||||
TableInfoKey::new(table_id),
|
||||
json_fromatter(self.pretty, &*table_info)
|
||||
json_formatter(self.pretty, &*table_info)
|
||||
);
|
||||
} else {
|
||||
println!("Table info not found");
|
||||
@@ -221,7 +221,7 @@ impl Tool for GetTableTool {
|
||||
println!(
|
||||
"{}\n{}",
|
||||
TableRouteKey::new(table_id),
|
||||
json_fromatter(self.pretty, &table_route)
|
||||
json_formatter(self.pretty, &table_route)
|
||||
);
|
||||
} else {
|
||||
println!("Table route not found");
|
||||
|
||||
@@ -27,7 +27,7 @@ pub fn decode_key_value(kv: KeyValue) -> CommonMetaResult<(String, String)> {
|
||||
}
|
||||
|
||||
/// Formats a value as a JSON string.
|
||||
pub fn json_fromatter<T>(pretty: bool, value: &T) -> String
|
||||
pub fn json_formatter<T>(pretty: bool, value: &T) -> String
|
||||
where
|
||||
T: Serialize,
|
||||
{
|
||||
|
||||
@@ -42,7 +42,7 @@ use common_telemetry::{error, warn};
|
||||
use futures::future;
|
||||
use futures_util::{Stream, StreamExt, TryStreamExt};
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use tonic::metadata::{AsciiMetadataKey, AsciiMetadataValue, MetadataMap, MetadataValue};
|
||||
use tonic::transport::Channel;
|
||||
|
||||
@@ -250,12 +250,18 @@ impl Database {
|
||||
retries += 1;
|
||||
warn!("Retrying {} times with error = {:?}", retries, err);
|
||||
continue;
|
||||
} else {
|
||||
error!(
|
||||
err; "Failed to send request to grpc handle, retries = {}, not retryable error, aborting",
|
||||
retries
|
||||
);
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
(Err(err), false) => {
|
||||
error!(
|
||||
"Failed to send request to grpc handle after {} retries, error = {:?}",
|
||||
retries, err
|
||||
err; "Failed to send request to grpc handle after {} retries",
|
||||
retries,
|
||||
);
|
||||
return Err(err.into());
|
||||
}
|
||||
@@ -355,7 +361,10 @@ impl Database {
|
||||
let mut flight_message_stream = flight_data_stream.map(move |flight_data| {
|
||||
flight_data
|
||||
.map_err(Error::from)
|
||||
.and_then(|data| decoder.try_decode(&data).context(ConvertFlightDataSnafu))
|
||||
.and_then(|data| decoder.try_decode(&data).context(ConvertFlightDataSnafu))?
|
||||
.context(IllegalFlightMessagesSnafu {
|
||||
reason: "none message",
|
||||
})
|
||||
});
|
||||
|
||||
let Some(first_flight_message) = flight_message_stream.next().await else {
|
||||
|
||||
@@ -128,7 +128,10 @@ impl RegionRequester {
|
||||
let mut flight_message_stream = flight_data_stream.map(move |flight_data| {
|
||||
flight_data
|
||||
.map_err(Error::from)
|
||||
.and_then(|data| decoder.try_decode(&data).context(ConvertFlightDataSnafu))
|
||||
.and_then(|data| decoder.try_decode(&data).context(ConvertFlightDataSnafu))?
|
||||
.context(IllegalFlightMessagesSnafu {
|
||||
reason: "none message",
|
||||
})
|
||||
});
|
||||
|
||||
let Some(first_flight_message) = flight_message_stream.next().await else {
|
||||
@@ -157,19 +160,70 @@ impl RegionRequester {
|
||||
let _span = tracing_context.attach(common_telemetry::tracing::info_span!(
|
||||
"poll_flight_data_stream"
|
||||
));
|
||||
while let Some(flight_message) = flight_message_stream.next().await {
|
||||
let flight_message = flight_message
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
|
||||
let mut buffered_message: Option<FlightMessage> = None;
|
||||
let mut stream_ended = false;
|
||||
|
||||
while !stream_ended {
|
||||
// get the next message from the buffered message or read from the flight message stream
|
||||
let flight_message_item = if let Some(msg) = buffered_message.take() {
|
||||
Some(Ok(msg))
|
||||
} else {
|
||||
flight_message_stream.next().await
|
||||
};
|
||||
|
||||
let flight_message = match flight_message_item {
|
||||
Some(Ok(message)) => message,
|
||||
Some(Err(e)) => {
|
||||
yield Err(BoxedError::new(e)).context(ExternalSnafu);
|
||||
break;
|
||||
}
|
||||
None => break,
|
||||
};
|
||||
|
||||
match flight_message {
|
||||
FlightMessage::RecordBatch(record_batch) => {
|
||||
yield RecordBatch::try_from_df_record_batch(
|
||||
let result_to_yield = RecordBatch::try_from_df_record_batch(
|
||||
schema_cloned.clone(),
|
||||
record_batch,
|
||||
)
|
||||
);
|
||||
|
||||
// get the next message from the stream. normally it should be a metrics message.
|
||||
if let Some(next_flight_message_result) = flight_message_stream.next().await
|
||||
{
|
||||
match next_flight_message_result {
|
||||
Ok(FlightMessage::Metrics(s)) => {
|
||||
let m = serde_json::from_str(&s).ok().map(Arc::new);
|
||||
metrics_ref.swap(m);
|
||||
}
|
||||
Ok(FlightMessage::RecordBatch(rb)) => {
|
||||
// for some reason it's not a metrics message, so we need to buffer this record batch
|
||||
// and yield it in the next iteration.
|
||||
buffered_message = Some(FlightMessage::RecordBatch(rb));
|
||||
}
|
||||
Ok(_) => {
|
||||
yield IllegalFlightMessagesSnafu {
|
||||
reason: "A RecordBatch message can only be succeeded by a Metrics message or another RecordBatch message"
|
||||
}
|
||||
.fail()
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu);
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
yield Err(BoxedError::new(e)).context(ExternalSnafu);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// the stream has ended
|
||||
stream_ended = true;
|
||||
}
|
||||
|
||||
yield result_to_yield;
|
||||
}
|
||||
FlightMessage::Metrics(s) => {
|
||||
// just a branch in case of some metrics message comes after other things.
|
||||
let m = serde_json::from_str(&s).ok().map(Arc::new);
|
||||
metrics_ref.swap(m);
|
||||
break;
|
||||
|
||||
@@ -38,6 +38,7 @@ common-config.workspace = true
|
||||
common-error.workspace = true
|
||||
common-grpc.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-mem-prof.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-options.workspace = true
|
||||
common-procedure.workspace = true
|
||||
|
||||
@@ -28,7 +28,7 @@ use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::datanode::{DatanodeOptions, Instance, APP_NAME};
|
||||
use crate::error::{MetaClientInitSnafu, MissingConfigSnafu, Result, StartDatanodeSnafu};
|
||||
use crate::{create_resource_limit_metrics, log_versions};
|
||||
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
||||
|
||||
/// Builder for Datanode instance.
|
||||
pub struct InstanceBuilder {
|
||||
@@ -68,6 +68,7 @@ impl InstanceBuilder {
|
||||
);
|
||||
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
maybe_activate_heap_profile(&dn_opts.memory);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
|
||||
plugins::setup_datanode_plugins(plugins, &opts.plugins, dn_opts)
|
||||
|
||||
@@ -46,7 +46,7 @@ use crate::error::{
|
||||
MissingConfigSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
|
||||
};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{create_resource_limit_metrics, log_versions, App};
|
||||
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile, App};
|
||||
|
||||
pub const APP_NAME: &str = "greptime-flownode";
|
||||
|
||||
@@ -280,6 +280,7 @@ impl StartCommand {
|
||||
);
|
||||
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
maybe_activate_heap_profile(&opts.component.memory);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
|
||||
info!("Flownode start command: {:#?}", self);
|
||||
@@ -374,6 +375,7 @@ impl StartCommand {
|
||||
meta_client.clone(),
|
||||
flow_auth_header,
|
||||
opts.query.clone(),
|
||||
opts.flow.batching_mode.clone(),
|
||||
);
|
||||
let frontend_client = Arc::new(frontend_client);
|
||||
let flownode_builder = FlownodeBuilder::new(
|
||||
|
||||
@@ -47,7 +47,7 @@ use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{create_resource_limit_metrics, log_versions, App};
|
||||
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile, App};
|
||||
|
||||
type FrontendOptions = GreptimeOptions<frontend::frontend::FrontendOptions>;
|
||||
|
||||
@@ -283,6 +283,7 @@ impl StartCommand {
|
||||
);
|
||||
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
maybe_activate_heap_profile(&opts.component.memory);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
|
||||
info!("Frontend start command: {:#?}", self);
|
||||
|
||||
@@ -15,7 +15,10 @@
|
||||
#![feature(assert_matches, let_chains)]
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_telemetry::{error, info};
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_mem_prof::activate_heap_profile;
|
||||
use common_telemetry::{error, info, warn};
|
||||
use stat::{get_cpu_limit, get_memory_limit};
|
||||
|
||||
use crate::error::Result;
|
||||
@@ -145,3 +148,20 @@ fn log_env_flags() {
|
||||
info!("argument: {}", argument);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn maybe_activate_heap_profile(memory_options: &common_options::memory::MemoryOptions) {
|
||||
if memory_options.enable_heap_profiling {
|
||||
match activate_heap_profile() {
|
||||
Ok(()) => {
|
||||
info!("Heap profile is active");
|
||||
}
|
||||
Err(err) => {
|
||||
if err.status_code() == StatusCode::Unsupported {
|
||||
info!("Heap profile is not supported");
|
||||
} else {
|
||||
warn!(err; "Failed to activate heap profile");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_meta::distributed_time_constants::init_distributed_time_constants;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
||||
use common_version::{short_version, verbose_version};
|
||||
@@ -30,7 +31,7 @@ use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{self, LoadLayeredConfigSnafu, Result, StartMetaServerSnafu};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{create_resource_limit_metrics, log_versions, App};
|
||||
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile, App};
|
||||
|
||||
type MetasrvOptions = GreptimeOptions<meta_srv::metasrv::MetasrvOptions>;
|
||||
|
||||
@@ -325,7 +326,9 @@ impl StartCommand {
|
||||
);
|
||||
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
maybe_activate_heap_profile(&opts.component.memory);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
init_distributed_time_constants(opts.component.heartbeat_interval);
|
||||
|
||||
info!("Metasrv start command: {:#?}", self);
|
||||
|
||||
|
||||
@@ -34,17 +34,19 @@ use common_meta::cluster::{NodeInfo, NodeStatus};
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::ddl::flow_meta::FlowMetadataAllocator;
|
||||
use common_meta::ddl::table_meta::TableMetadataAllocator;
|
||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl, ProcedureExecutorRef};
|
||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl};
|
||||
use common_meta::ddl_manager::DdlManager;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::procedure_executor::LocalProcedureExecutor;
|
||||
use common_meta::region_keeper::MemoryRegionKeeper;
|
||||
use common_meta::region_registry::LeaderRegionRegistry;
|
||||
use common_meta::sequence::SequenceBuilder;
|
||||
use common_meta::wal_options_allocator::{build_wal_options_allocator, WalOptionsAllocatorRef};
|
||||
use common_options::memory::MemoryOptions;
|
||||
use common_procedure::{ProcedureInfo, ProcedureManagerRef};
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::{
|
||||
@@ -83,7 +85,7 @@ use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{Result, StartFlownodeSnafu};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{create_resource_limit_metrics, error, log_versions, App};
|
||||
use crate::{create_resource_limit_metrics, error, log_versions, maybe_activate_heap_profile, App};
|
||||
|
||||
pub const APP_NAME: &str = "greptime-standalone";
|
||||
|
||||
@@ -157,6 +159,7 @@ pub struct StandaloneOptions {
|
||||
pub max_in_flight_write_bytes: Option<ReadableSize>,
|
||||
pub slow_query: Option<SlowQueryOptions>,
|
||||
pub query: QueryOptions,
|
||||
pub memory: MemoryOptions,
|
||||
}
|
||||
|
||||
impl Default for StandaloneOptions {
|
||||
@@ -190,6 +193,7 @@ impl Default for StandaloneOptions {
|
||||
max_in_flight_write_bytes: None,
|
||||
slow_query: Some(SlowQueryOptions::default()),
|
||||
query: QueryOptions::default(),
|
||||
memory: MemoryOptions::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -486,6 +490,7 @@ impl StartCommand {
|
||||
);
|
||||
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
maybe_activate_heap_profile(&opts.component.memory);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
|
||||
info!("Standalone start command: {:#?}", self);
|
||||
@@ -636,9 +641,8 @@ impl StartCommand {
|
||||
flow_metadata_allocator: flow_metadata_allocator.clone(),
|
||||
region_failure_detector_controller: Arc::new(NoopRegionFailureDetectorControl),
|
||||
};
|
||||
let procedure_manager_c = procedure_manager.clone();
|
||||
|
||||
let ddl_manager = DdlManager::try_new(ddl_context, procedure_manager_c, true)
|
||||
let ddl_manager = DdlManager::try_new(ddl_context, procedure_manager.clone(), true)
|
||||
.context(error::InitDdlManagerSnafu)?;
|
||||
#[cfg(feature = "enterprise")]
|
||||
let ddl_manager = {
|
||||
@@ -646,7 +650,11 @@ impl StartCommand {
|
||||
plugins.get();
|
||||
ddl_manager.with_trigger_ddl_manager(trigger_ddl_manager)
|
||||
};
|
||||
let ddl_task_executor: ProcedureExecutorRef = Arc::new(ddl_manager);
|
||||
|
||||
let procedure_executor = Arc::new(LocalProcedureExecutor::new(
|
||||
Arc::new(ddl_manager),
|
||||
procedure_manager.clone(),
|
||||
));
|
||||
|
||||
let fe_instance = FrontendBuilder::new(
|
||||
fe_opts.clone(),
|
||||
@@ -654,7 +662,7 @@ impl StartCommand {
|
||||
layered_cache_registry.clone(),
|
||||
catalog_manager.clone(),
|
||||
node_manager.clone(),
|
||||
ddl_task_executor.clone(),
|
||||
procedure_executor.clone(),
|
||||
process_manager,
|
||||
)
|
||||
.with_plugin(plugins.clone())
|
||||
@@ -679,7 +687,7 @@ impl StartCommand {
|
||||
catalog_manager.clone(),
|
||||
kv_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
ddl_task_executor.clone(),
|
||||
procedure_executor,
|
||||
node_manager,
|
||||
)
|
||||
.await
|
||||
@@ -821,6 +829,7 @@ impl InformationExtension for StandaloneInformationExtension {
|
||||
memtable_size: region_stat.memtable_size,
|
||||
manifest_size: region_stat.manifest_size,
|
||||
sst_size: region_stat.sst_size,
|
||||
sst_num: region_stat.sst_num,
|
||||
index_size: region_stat.index_size,
|
||||
region_manifest: region_stat.manifest.into(),
|
||||
data_topic_latest_entry_id: region_stat.data_topic_latest_entry_id,
|
||||
|
||||
@@ -34,6 +34,7 @@ use query::options::QueryOptions;
|
||||
use servers::export_metrics::ExportMetricsOption;
|
||||
use servers::grpc::GrpcOptions;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use store_api::path_utils::WAL_DIR;
|
||||
|
||||
#[allow(deprecated)]
|
||||
@@ -50,7 +51,6 @@ fn test_load_datanode_example_config() {
|
||||
meta_client: Some(MetaClientOptions {
|
||||
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
||||
timeout: Duration::from_secs(3),
|
||||
heartbeat_timeout: Duration::from_millis(500),
|
||||
ddl_timeout: Duration::from_secs(10),
|
||||
connect_timeout: Duration::from_secs(1),
|
||||
tcp_nodelay: true,
|
||||
@@ -115,7 +115,6 @@ fn test_load_frontend_example_config() {
|
||||
meta_client: Some(MetaClientOptions {
|
||||
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
||||
timeout: Duration::from_secs(3),
|
||||
heartbeat_timeout: Duration::from_millis(500),
|
||||
ddl_timeout: Duration::from_secs(10),
|
||||
connect_timeout: Duration::from_secs(1),
|
||||
tcp_nodelay: true,
|
||||
@@ -190,6 +189,13 @@ fn test_load_metasrv_example_config() {
|
||||
remote_write: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
backend_tls: Some(TlsOption {
|
||||
mode: TlsMode::Prefer,
|
||||
cert_path: String::new(),
|
||||
key_path: String::new(),
|
||||
ca_cert_path: String::new(),
|
||||
watch: false,
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
@@ -225,11 +231,13 @@ fn test_load_flownode_example_config() {
|
||||
heartbeat: Default::default(),
|
||||
// flownode deliberately use a slower query parallelism
|
||||
// to avoid overwhelming the frontend with too many queries
|
||||
query: QueryOptions { parallelism: 1 },
|
||||
query: QueryOptions {
|
||||
parallelism: 1,
|
||||
allow_query_fallback: false,
|
||||
},
|
||||
meta_client: Some(MetaClientOptions {
|
||||
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
||||
timeout: Duration::from_secs(3),
|
||||
heartbeat_timeout: Duration::from_millis(500),
|
||||
ddl_timeout: Duration::from_secs(10),
|
||||
connect_timeout: Duration::from_secs(1),
|
||||
tcp_nodelay: true,
|
||||
@@ -242,6 +250,7 @@ fn test_load_flownode_example_config() {
|
||||
..Default::default()
|
||||
},
|
||||
user_provider: None,
|
||||
memory: Default::default(),
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
@@ -295,6 +304,7 @@ fn test_load_standalone_example_config() {
|
||||
cors_allowed_origins: vec!["https://example.com".to_string()],
|
||||
..Default::default()
|
||||
},
|
||||
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
|
||||
@@ -104,8 +104,6 @@ pub const INFORMATION_SCHEMA_PROCEDURE_INFO_TABLE_ID: u32 = 34;
|
||||
pub const INFORMATION_SCHEMA_REGION_STATISTICS_TABLE_ID: u32 = 35;
|
||||
/// id for information_schema.process_list
|
||||
pub const INFORMATION_SCHEMA_PROCESS_LIST_TABLE_ID: u32 = 36;
|
||||
/// id for information_schema.trigger_list (for greptimedb trigger)
|
||||
pub const INFORMATION_SCHEMA_TRIGGER_TABLE_ID: u32 = 37;
|
||||
|
||||
// ----- End of information_schema tables -----
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ pub mod error;
|
||||
pub mod file_format;
|
||||
pub mod lister;
|
||||
pub mod object_store;
|
||||
pub mod parquet_writer;
|
||||
pub mod share_buffer;
|
||||
#[cfg(test)]
|
||||
pub mod test_util;
|
||||
|
||||
@@ -77,6 +77,11 @@ pub fn build_oss_backend(
|
||||
|
||||
let op = ObjectStore::new(builder)
|
||||
.context(error::BuildBackendSnafu)?
|
||||
.layer(
|
||||
object_store::layers::RetryLayer::new()
|
||||
.with_jitter()
|
||||
.with_notify(object_store::util::PrintDetailedError),
|
||||
)
|
||||
.layer(object_store::layers::LoggingLayer::default())
|
||||
.layer(object_store::layers::TracingLayer)
|
||||
.layer(object_store::layers::build_prometheus_metrics_layer(true))
|
||||
|
||||
@@ -85,6 +85,11 @@ pub fn build_s3_backend(
|
||||
// TODO(weny): Consider finding a better way to eliminate duplicate code.
|
||||
Ok(ObjectStore::new(builder)
|
||||
.context(error::BuildBackendSnafu)?
|
||||
.layer(
|
||||
object_store::layers::RetryLayer::new()
|
||||
.with_jitter()
|
||||
.with_notify(object_store::util::PrintDetailedError),
|
||||
)
|
||||
.layer(object_store::layers::LoggingLayer::new(
|
||||
DefaultLoggingInterceptor,
|
||||
))
|
||||
|
||||
52
src/common/datasource/src/parquet_writer.rs
Normal file
52
src/common/datasource/src/parquet_writer.rs
Normal file
@@ -0,0 +1,52 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::future::BoxFuture;
|
||||
use object_store::Writer;
|
||||
use parquet::arrow::async_writer::AsyncFileWriter;
|
||||
use parquet::errors::ParquetError;
|
||||
|
||||
/// Bridges opendal [Writer] with parquet [AsyncFileWriter].
|
||||
pub struct AsyncWriter {
|
||||
inner: Writer,
|
||||
}
|
||||
|
||||
impl AsyncWriter {
|
||||
/// Create a [`AsyncWriter`] by given [`Writer`].
|
||||
pub fn new(writer: Writer) -> Self {
|
||||
Self { inner: writer }
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncFileWriter for AsyncWriter {
|
||||
fn write(&mut self, bs: Bytes) -> BoxFuture<'_, parquet::errors::Result<()>> {
|
||||
Box::pin(async move {
|
||||
self.inner
|
||||
.write(bs)
|
||||
.await
|
||||
.map_err(|err| ParquetError::External(Box::new(err)))
|
||||
})
|
||||
}
|
||||
|
||||
fn complete(&mut self) -> BoxFuture<'_, parquet::errors::Result<()>> {
|
||||
Box::pin(async move {
|
||||
self.inner
|
||||
.close()
|
||||
.await
|
||||
.map(|_| ())
|
||||
.map_err(|err| ParquetError::External(Box::new(err)))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -84,24 +84,33 @@ fn test_to_string() {
|
||||
assert_eq!(result.unwrap_err().to_string(), "<root cause>");
|
||||
}
|
||||
|
||||
fn normalize_path(s: &str) -> String {
|
||||
s.replace('\\', "/")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_debug_format() {
|
||||
let result = normal_error();
|
||||
let debug_output = format!("{:?}", result.unwrap_err());
|
||||
let normalized_output = debug_output.replace('\\', "/");
|
||||
|
||||
assert_eq!(
|
||||
normalized_output,
|
||||
r#"0: A normal error with "display" attribute, message "blabla", at src/common/error/tests/ext.rs:55:22
|
||||
1: PlainError { msg: "<root cause>", status_code: Unexpected }"#
|
||||
normalize_path(&debug_output),
|
||||
format!(
|
||||
r#"0: A normal error with "display" attribute, message "blabla", at {}:55:22
|
||||
1: PlainError {{ msg: "<root cause>", status_code: Unexpected }}"#,
|
||||
normalize_path(file!())
|
||||
)
|
||||
);
|
||||
|
||||
let result = transparent_error();
|
||||
let debug_output = format!("{:?}", result.unwrap_err());
|
||||
let normalized_output = debug_output.replace('\\', "/");
|
||||
assert_eq!(
|
||||
normalized_output,
|
||||
r#"0: <transparent>, at src/common/error/tests/ext.rs:60:5
|
||||
1: PlainError { msg: "<root cause>", status_code: Unexpected }"#
|
||||
normalize_path(&debug_output),
|
||||
format!(
|
||||
r#"0: <transparent>, at {}:60:5
|
||||
1: PlainError {{ msg: "<root cause>", status_code: Unexpected }}"#,
|
||||
normalize_path(file!())
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
23
src/common/event-recorder/Cargo.toml
Normal file
23
src/common/event-recorder/Cargo.toml
Normal file
@@ -0,0 +1,23 @@
|
||||
[package]
|
||||
name = "common-event-recorder"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
backon.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
store-api.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-util.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
81
src/common/event-recorder/src/error.rs
Normal file
81
src/common/event-recorder/src/error.rs
Normal file
@@ -0,0 +1,81 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::ColumnSchema;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("No available frontend"))]
|
||||
NoAvailableFrontend {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Mismatched schema, expected: {:?}, actual: {:?}", expected, actual))]
|
||||
MismatchedSchema {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
expected: Vec<ColumnSchema>,
|
||||
actual: Vec<ColumnSchema>,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serialize event"))]
|
||||
SerializeEvent {
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to insert events"))]
|
||||
InsertEvents {
|
||||
// BoxedError is utilized here to prevent introducing a circular dependency that would arise from directly referencing `client::error::Error`.
|
||||
source: BoxedError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Keyvalue backend error"))]
|
||||
KvBackend {
|
||||
// BoxedError is utilized here to prevent introducing a circular dependency that would arise from directly referencing `common_meta::error::Error`.
|
||||
source: BoxedError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::MismatchedSchema { .. } | Error::SerializeEvent { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::NoAvailableFrontend { .. }
|
||||
| Error::InsertEvents { .. }
|
||||
| Error::KvBackend { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
@@ -12,16 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Format to store in parquet.
|
||||
//!
|
||||
//! We store two additional internal columns at last:
|
||||
//! - `__sequence`, the sequence number of a row. Type: uint64
|
||||
//! - `__op_type`, the op type of the row. Type: uint8
|
||||
//!
|
||||
//! We store other columns in the same order as [RegionMetadata::field_columns()](store_api::metadata::RegionMetadata::field_columns()).
|
||||
//!
|
||||
pub mod error;
|
||||
pub mod recorder;
|
||||
|
||||
/// Number of columns that have fixed positions.
|
||||
///
|
||||
/// Contains all internal columns.
|
||||
pub(crate) const PLAIN_FIXED_POS_COLUMN_NUM: usize = 2;
|
||||
pub use recorder::*;
|
||||
538
src/common/event-recorder/src/recorder.rs
Normal file
538
src/common/event-recorder/src/recorder.rs
Normal file
@@ -0,0 +1,538 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Debug;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::column_data_type_extension::TypeExt;
|
||||
use api::v1::value::ValueData;
|
||||
use api::v1::{
|
||||
ColumnDataType, ColumnDataTypeExtension, ColumnSchema, JsonTypeExtension, Row,
|
||||
RowInsertRequest, RowInsertRequests, Rows, SemanticType,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use backon::{BackoffBuilder, ExponentialBuilder};
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use common_time::timestamp::{TimeUnit, Timestamp};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use store_api::mito_engine_options::{APPEND_MODE_KEY, TTL_KEY};
|
||||
use tokio::sync::mpsc::{channel, Receiver, Sender};
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::time::sleep;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::error::{MismatchedSchemaSnafu, Result};
|
||||
|
||||
/// The default table name for storing the events.
|
||||
pub const DEFAULT_EVENTS_TABLE_NAME: &str = "events";
|
||||
|
||||
/// The column name for the event type.
|
||||
pub const EVENTS_TABLE_TYPE_COLUMN_NAME: &str = "type";
|
||||
/// The column name for the event payload.
|
||||
pub const EVENTS_TABLE_PAYLOAD_COLUMN_NAME: &str = "payload";
|
||||
/// The column name for the event timestamp.
|
||||
pub const EVENTS_TABLE_TIMESTAMP_COLUMN_NAME: &str = "timestamp";
|
||||
|
||||
/// EventRecorderRef is the reference to the event recorder.
|
||||
pub type EventRecorderRef = Arc<dyn EventRecorder>;
|
||||
|
||||
static EVENTS_TABLE_TTL: OnceLock<String> = OnceLock::new();
|
||||
|
||||
/// The time interval for flushing batched events to the event handler.
|
||||
pub const DEFAULT_FLUSH_INTERVAL_SECONDS: Duration = Duration::from_secs(5);
|
||||
// The default TTL for the events table.
|
||||
const DEFAULT_EVENTS_TABLE_TTL: &str = "30d";
|
||||
// The capacity of the tokio channel for transmitting events to background processor.
|
||||
const DEFAULT_CHANNEL_SIZE: usize = 2048;
|
||||
// The size of the buffer for batching events before flushing to event handler.
|
||||
const DEFAULT_BUFFER_SIZE: usize = 100;
|
||||
// The maximum number of retry attempts when event handler processing fails.
|
||||
const DEFAULT_MAX_RETRY_TIMES: u64 = 3;
|
||||
|
||||
/// Event trait defines the interface for events that can be recorded and persisted as the system table.
|
||||
/// By default, the event will be persisted as the system table with the following schema:
|
||||
///
|
||||
/// - `type`: the type of the event.
|
||||
/// - `payload`: the JSON bytes of the event.
|
||||
/// - `timestamp`: the timestamp of the event.
|
||||
///
|
||||
/// The event can also add the extra schema and row to the event by overriding the `extra_schema` and `extra_row` methods.
|
||||
pub trait Event: Send + Sync + Debug {
|
||||
/// Returns the type of the event.
|
||||
fn event_type(&self) -> &str;
|
||||
|
||||
/// Returns the timestamp of the event. Default to the current time.
|
||||
fn timestamp(&self) -> Timestamp {
|
||||
Timestamp::current_time(TimeUnit::Nanosecond)
|
||||
}
|
||||
|
||||
/// Returns the JSON bytes of the event as the payload. It will use JSON type to store the payload.
|
||||
fn json_payload(&self) -> Result<String> {
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
/// Add the extra schema to the event with the default schema.
|
||||
fn extra_schema(&self) -> Vec<ColumnSchema> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
/// Add the extra row to the event with the default row.
|
||||
fn extra_row(&self) -> Result<Row> {
|
||||
Ok(Row { values: vec![] })
|
||||
}
|
||||
|
||||
/// Returns the event as any type.
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
}
|
||||
|
||||
/// Eventable trait defines the interface for objects that can be converted to [Event].
|
||||
pub trait Eventable: Send + Sync + Debug {
|
||||
/// Converts the object to an [Event].
|
||||
fn to_event(&self) -> Option<Box<dyn Event>> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the hints for the insert operation.
|
||||
pub fn insert_hints() -> Vec<(&'static str, &'static str)> {
|
||||
vec![
|
||||
(
|
||||
TTL_KEY,
|
||||
EVENTS_TABLE_TTL
|
||||
.get()
|
||||
.map(|s| s.as_str())
|
||||
.unwrap_or(DEFAULT_EVENTS_TABLE_TTL),
|
||||
),
|
||||
(APPEND_MODE_KEY, "true"),
|
||||
]
|
||||
}
|
||||
|
||||
/// Builds the row inserts request for the events that will be persisted to the events table.
|
||||
pub fn build_row_inserts_request(events: &[Box<dyn Event>]) -> Result<RowInsertRequests> {
|
||||
// Aggregate the events by the event type.
|
||||
let mut event_groups: HashMap<&str, Vec<&Box<dyn Event>>> = HashMap::new();
|
||||
|
||||
for event in events {
|
||||
event_groups
|
||||
.entry(event.event_type())
|
||||
.or_default()
|
||||
.push(event);
|
||||
}
|
||||
|
||||
let mut row_insert_requests = RowInsertRequests {
|
||||
inserts: Vec::with_capacity(event_groups.len()),
|
||||
};
|
||||
|
||||
for (_, events) in event_groups {
|
||||
validate_events(&events)?;
|
||||
|
||||
// We already validated the events, so it's safe to get the first event to build the schema for the RowInsertRequest.
|
||||
let event = &events[0];
|
||||
let mut schema = vec![
|
||||
ColumnSchema {
|
||||
column_name: EVENTS_TABLE_TYPE_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::String.into(),
|
||||
semantic_type: SemanticType::Tag.into(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnSchema {
|
||||
column_name: EVENTS_TABLE_PAYLOAD_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::Binary as i32,
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
datatype_extension: Some(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnSchema {
|
||||
column_name: EVENTS_TABLE_TIMESTAMP_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::TimestampNanosecond.into(),
|
||||
semantic_type: SemanticType::Timestamp.into(),
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
schema.extend(event.extra_schema());
|
||||
|
||||
let rows = events
|
||||
.iter()
|
||||
.map(|event| {
|
||||
let mut row = Row {
|
||||
values: vec![
|
||||
ValueData::StringValue(event.event_type().to_string()).into(),
|
||||
ValueData::BinaryValue(event.json_payload()?.as_bytes().to_vec()).into(),
|
||||
ValueData::TimestampNanosecondValue(event.timestamp().value()).into(),
|
||||
],
|
||||
};
|
||||
row.values.extend(event.extra_row()?.values);
|
||||
Ok(row)
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
row_insert_requests.inserts.push(RowInsertRequest {
|
||||
table_name: DEFAULT_EVENTS_TABLE_NAME.to_string(),
|
||||
rows: Some(Rows { schema, rows }),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(row_insert_requests)
|
||||
}
|
||||
|
||||
// Ensure the events with the same event type have the same extra schema.
|
||||
#[allow(clippy::borrowed_box)]
|
||||
fn validate_events(events: &[&Box<dyn Event>]) -> Result<()> {
|
||||
// It's safe to get the first event because the events are already grouped by the event type.
|
||||
let extra_schema = events[0].extra_schema();
|
||||
for event in events {
|
||||
if event.extra_schema() != extra_schema {
|
||||
MismatchedSchemaSnafu {
|
||||
expected: extra_schema.clone(),
|
||||
actual: event.extra_schema(),
|
||||
}
|
||||
.fail()?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// EventRecorder trait defines the interface for recording events.
|
||||
pub trait EventRecorder: Send + Sync + Debug + 'static {
|
||||
/// Records an event for persistence and processing by [EventHandler].
|
||||
fn record(&self, event: Box<dyn Event>);
|
||||
|
||||
/// Cancels the event recorder.
|
||||
fn close(&self);
|
||||
}
|
||||
|
||||
/// EventHandler trait defines the interface for how to handle the event.
|
||||
#[async_trait]
|
||||
pub trait EventHandler: Send + Sync + 'static {
|
||||
/// Processes and handles incoming events. The [DefaultEventHandlerImpl] implementation forwards events to frontend instances for persistence.
|
||||
/// We use `&[Box<dyn Event>]` to avoid consuming the events, so the caller can buffer the events and retry if the handler fails.
|
||||
async fn handle(&self, events: &[Box<dyn Event>]) -> Result<()>;
|
||||
}
|
||||
|
||||
/// Configuration options for the event recorder.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct EventRecorderOptions {
|
||||
/// TTL for the events table that will be used to store the events.
|
||||
pub ttl: String,
|
||||
}
|
||||
|
||||
impl Default for EventRecorderOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
ttl: DEFAULT_EVENTS_TABLE_TTL.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Implementation of [EventRecorder] that records the events and processes them in the background by the [EventHandler].
|
||||
#[derive(Debug)]
|
||||
pub struct EventRecorderImpl {
|
||||
// The channel to send the events to the background processor.
|
||||
tx: Sender<Box<dyn Event>>,
|
||||
// The cancel token to cancel the background processor.
|
||||
cancel_token: CancellationToken,
|
||||
// The background processor to process the events.
|
||||
handle: Option<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl EventRecorderImpl {
|
||||
pub fn new(event_handler: Box<dyn EventHandler>, opts: EventRecorderOptions) -> Self {
|
||||
info!("Creating event recorder with options: {:?}", opts);
|
||||
|
||||
let (tx, rx) = channel(DEFAULT_CHANNEL_SIZE);
|
||||
let cancel_token = CancellationToken::new();
|
||||
|
||||
let mut recorder = Self {
|
||||
tx,
|
||||
handle: None,
|
||||
cancel_token: cancel_token.clone(),
|
||||
};
|
||||
|
||||
let processor = EventProcessor::new(
|
||||
rx,
|
||||
event_handler,
|
||||
DEFAULT_FLUSH_INTERVAL_SECONDS,
|
||||
DEFAULT_MAX_RETRY_TIMES,
|
||||
)
|
||||
.with_cancel_token(cancel_token);
|
||||
|
||||
// Spawn a background task to process the events.
|
||||
let handle = tokio::spawn(async move {
|
||||
processor.process(DEFAULT_BUFFER_SIZE).await;
|
||||
});
|
||||
|
||||
recorder.handle = Some(handle);
|
||||
|
||||
// It only sets the ttl once, so it's safe to skip the error.
|
||||
if EVENTS_TABLE_TTL.set(opts.ttl.clone()).is_err() {
|
||||
info!(
|
||||
"Events table ttl already set to {}, skip setting it",
|
||||
opts.ttl
|
||||
);
|
||||
}
|
||||
|
||||
recorder
|
||||
}
|
||||
}
|
||||
|
||||
impl EventRecorder for EventRecorderImpl {
|
||||
// Accepts an event and send it to the background handler.
|
||||
fn record(&self, event: Box<dyn Event>) {
|
||||
if let Err(e) = self.tx.try_send(event) {
|
||||
error!("Failed to send event to the background processor: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Closes the event recorder. It will stop the background processor and flush the buffer.
|
||||
fn close(&self) {
|
||||
self.cancel_token.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for EventRecorderImpl {
|
||||
fn drop(&mut self) {
|
||||
if let Some(handle) = self.handle.take() {
|
||||
handle.abort();
|
||||
info!("Aborted the background processor in event recorder");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct EventProcessor {
|
||||
rx: Receiver<Box<dyn Event>>,
|
||||
event_handler: Box<dyn EventHandler>,
|
||||
max_retry_times: u64,
|
||||
process_interval: Duration,
|
||||
cancel_token: CancellationToken,
|
||||
}
|
||||
|
||||
impl EventProcessor {
|
||||
fn new(
|
||||
rx: Receiver<Box<dyn Event>>,
|
||||
event_handler: Box<dyn EventHandler>,
|
||||
process_interval: Duration,
|
||||
max_retry_times: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
rx,
|
||||
event_handler,
|
||||
max_retry_times,
|
||||
process_interval,
|
||||
cancel_token: CancellationToken::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn with_cancel_token(mut self, cancel_token: CancellationToken) -> Self {
|
||||
self.cancel_token = cancel_token;
|
||||
self
|
||||
}
|
||||
|
||||
async fn process(mut self, buffer_size: usize) {
|
||||
info!("Start the background processor in event recorder to handle the received events.");
|
||||
|
||||
let mut buffer = Vec::with_capacity(buffer_size);
|
||||
let mut interval = tokio::time::interval(self.process_interval);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
maybe_event = self.rx.recv() => {
|
||||
if let Some(maybe_event) = maybe_event {
|
||||
debug!("Received event: {:?}", maybe_event);
|
||||
|
||||
if buffer.len() >= buffer_size {
|
||||
debug!(
|
||||
"Flushing events to the event handler because the buffer is full with {} events",
|
||||
buffer.len()
|
||||
);
|
||||
self.flush_events_to_handler(&mut buffer).await;
|
||||
}
|
||||
|
||||
// Push the event to the buffer, the buffer will be flushed when the interval is triggered or received a closed signal.
|
||||
buffer.push(maybe_event);
|
||||
} else {
|
||||
// When received a closed signal, flush the buffer and exit the loop.
|
||||
self.flush_events_to_handler(&mut buffer).await;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Cancel the processor through the cancel token.
|
||||
_ = self.cancel_token.cancelled() => {
|
||||
warn!("Received a cancel signal, flushing the buffer and exiting the loop");
|
||||
self.flush_events_to_handler(&mut buffer).await;
|
||||
break;
|
||||
}
|
||||
// When the interval is triggered, flush the buffer and send the events to the event handler.
|
||||
_ = interval.tick() => {
|
||||
self.flush_events_to_handler(&mut buffer).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: While we implement a retry mechanism for failed event handling, there is no guarantee that all events will be processed successfully.
|
||||
async fn flush_events_to_handler(&self, buffer: &mut Vec<Box<dyn Event>>) {
|
||||
if !buffer.is_empty() {
|
||||
debug!("Flushing {} events to the event handler", buffer.len());
|
||||
|
||||
let mut backoff = ExponentialBuilder::default()
|
||||
.with_min_delay(Duration::from_millis(
|
||||
DEFAULT_FLUSH_INTERVAL_SECONDS.as_millis() as u64 / self.max_retry_times.max(1),
|
||||
))
|
||||
.with_max_delay(Duration::from_millis(
|
||||
DEFAULT_FLUSH_INTERVAL_SECONDS.as_millis() as u64,
|
||||
))
|
||||
.with_max_times(self.max_retry_times as usize)
|
||||
.build();
|
||||
|
||||
loop {
|
||||
match self.event_handler.handle(buffer).await {
|
||||
Ok(()) => {
|
||||
debug!("Successfully handled {} events", buffer.len());
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
if let Some(d) = backoff.next() {
|
||||
warn!(e; "Failed to handle events, retrying...");
|
||||
sleep(d).await;
|
||||
continue;
|
||||
} else {
|
||||
warn!(
|
||||
e; "Failed to handle events after {} retries",
|
||||
self.max_retry_times
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the buffer to prevent unbounded memory growth, regardless of whether event processing succeeded or failed.
|
||||
buffer.clear();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct TestEvent {}
|
||||
|
||||
impl Event for TestEvent {
|
||||
fn event_type(&self) -> &str {
|
||||
"test_event"
|
||||
}
|
||||
|
||||
fn json_payload(&self) -> Result<String> {
|
||||
Ok("{\"procedure_id\": \"1234567890\"}".to_string())
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
struct TestEventHandlerImpl {}
|
||||
|
||||
#[async_trait]
|
||||
impl EventHandler for TestEventHandlerImpl {
|
||||
async fn handle(&self, events: &[Box<dyn Event>]) -> Result<()> {
|
||||
let event = events
|
||||
.first()
|
||||
.unwrap()
|
||||
.as_any()
|
||||
.downcast_ref::<TestEvent>()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
event.json_payload().unwrap(),
|
||||
"{\"procedure_id\": \"1234567890\"}"
|
||||
);
|
||||
assert_eq!(event.event_type(), "test_event");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_event_recorder() {
|
||||
let mut event_recorder = EventRecorderImpl::new(
|
||||
Box::new(TestEventHandlerImpl {}),
|
||||
EventRecorderOptions::default(),
|
||||
);
|
||||
event_recorder.record(Box::new(TestEvent {}));
|
||||
|
||||
// Sleep for a while to let the event be sent to the event handler.
|
||||
sleep(Duration::from_millis(500)).await;
|
||||
|
||||
// Close the event recorder to flush the buffer.
|
||||
event_recorder.close();
|
||||
|
||||
// Sleep for a while to let the background task process the event.
|
||||
sleep(Duration::from_millis(500)).await;
|
||||
|
||||
if let Some(handle) = event_recorder.handle.take() {
|
||||
assert!(handle.await.is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
struct TestEventHandlerImplShouldPanic {}
|
||||
|
||||
#[async_trait]
|
||||
impl EventHandler for TestEventHandlerImplShouldPanic {
|
||||
async fn handle(&self, events: &[Box<dyn Event>]) -> Result<()> {
|
||||
let event = events
|
||||
.first()
|
||||
.unwrap()
|
||||
.as_any()
|
||||
.downcast_ref::<TestEvent>()
|
||||
.unwrap();
|
||||
|
||||
// Set the incorrect payload and event type to trigger the panic.
|
||||
assert_eq!(
|
||||
event.json_payload().unwrap(),
|
||||
"{\"procedure_id\": \"should_panic\"}"
|
||||
);
|
||||
assert_eq!(event.event_type(), "should_panic");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_event_recorder_should_panic() {
|
||||
let mut event_recorder = EventRecorderImpl::new(
|
||||
Box::new(TestEventHandlerImplShouldPanic {}),
|
||||
EventRecorderOptions::default(),
|
||||
);
|
||||
|
||||
event_recorder.record(Box::new(TestEvent {}));
|
||||
|
||||
// Sleep for a while to let the event be sent to the event handler.
|
||||
sleep(Duration::from_millis(500)).await;
|
||||
|
||||
// Close the event recorder to flush the buffer.
|
||||
event_recorder.close();
|
||||
|
||||
// Sleep for a while to let the background task process the event.
|
||||
sleep(Duration::from_millis(500)).await;
|
||||
|
||||
if let Some(handle) = event_recorder.handle.take() {
|
||||
assert!(handle.await.unwrap_err().is_panic());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,6 +12,7 @@ common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
greptime-proto.workspace = true
|
||||
meta-client.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ use snafu::OptionExt;
|
||||
|
||||
pub mod error;
|
||||
pub mod selector;
|
||||
pub mod slow_query_event;
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct DisplayProcessId {
|
||||
|
||||
28
src/common/frontend/src/slow_query_event.rs
Normal file
28
src/common/frontend/src/slow_query_event.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use session::context::QueryContextRef;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SlowQueryEvent {
|
||||
pub cost: u64,
|
||||
pub threshold: u64,
|
||||
pub query: String,
|
||||
pub is_promql: bool,
|
||||
pub query_ctx: QueryContextRef,
|
||||
pub promql_range: Option<u64>,
|
||||
pub promql_step: Option<u64>,
|
||||
pub promql_start: Option<i64>,
|
||||
pub promql_end: Option<i64>,
|
||||
}
|
||||
@@ -16,6 +16,8 @@ geo = ["geohash", "h3o", "s2", "wkt", "geo-types", "dep:geo"]
|
||||
ahash.workspace = true
|
||||
api.workspace = true
|
||||
arc-swap = "1.0"
|
||||
arrow.workspace = true
|
||||
arrow-schema.workspace = true
|
||||
async-trait.workspace = true
|
||||
bincode = "1.3"
|
||||
catalog.workspace = true
|
||||
@@ -34,6 +36,7 @@ datafusion.workspace = true
|
||||
datafusion-common.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datafusion-functions-aggregate-common.workspace = true
|
||||
datafusion-physical-expr.workspace = true
|
||||
datatypes.workspace = true
|
||||
derive_more = { version = "1", default-features = false, features = ["display"] }
|
||||
geo = { version = "0.29", optional = true }
|
||||
@@ -62,5 +65,7 @@ wkt = { version = "0.11", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
approx = "0.5"
|
||||
futures.workspace = true
|
||||
pretty_assertions.workspace = true
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -16,6 +16,9 @@ mod add_region_follower;
|
||||
mod flush_compact_region;
|
||||
mod flush_compact_table;
|
||||
mod migrate_region;
|
||||
mod reconcile_catalog;
|
||||
mod reconcile_database;
|
||||
mod reconcile_table;
|
||||
mod remove_region_follower;
|
||||
|
||||
use std::sync::Arc;
|
||||
@@ -24,6 +27,9 @@ use add_region_follower::AddRegionFollowerFunction;
|
||||
use flush_compact_region::{CompactRegionFunction, FlushRegionFunction};
|
||||
use flush_compact_table::{CompactTableFunction, FlushTableFunction};
|
||||
use migrate_region::MigrateRegionFunction;
|
||||
use reconcile_catalog::ReconcileCatalogFunction;
|
||||
use reconcile_database::ReconcileDatabaseFunction;
|
||||
use reconcile_table::ReconcileTableFunction;
|
||||
use remove_region_follower::RemoveRegionFollowerFunction;
|
||||
|
||||
use crate::flush_flow::FlushFlowFunction;
|
||||
@@ -43,5 +49,8 @@ impl AdminFunction {
|
||||
registry.register_async(Arc::new(FlushTableFunction));
|
||||
registry.register_async(Arc::new(CompactTableFunction));
|
||||
registry.register_async(Arc::new(FlushFlowFunction));
|
||||
registry.register_async(Arc::new(ReconcileCatalogFunction));
|
||||
registry.register_async(Arc::new(ReconcileDatabaseFunction));
|
||||
registry.register_async(Arc::new(ReconcileTableFunction));
|
||||
}
|
||||
}
|
||||
|
||||
179
src/common/function/src/admin/reconcile_catalog.rs
Normal file
179
src/common/function/src/admin/reconcile_catalog.rs
Normal file
@@ -0,0 +1,179 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::meta::reconcile_request::Target;
|
||||
use api::v1::meta::{ReconcileCatalog, ReconcileRequest};
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingProcedureServiceHandlerSnafu, Result,
|
||||
UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use common_telemetry::info;
|
||||
use datatypes::prelude::*;
|
||||
use session::context::QueryContextRef;
|
||||
|
||||
use crate::handlers::ProcedureServiceHandlerRef;
|
||||
use crate::helper::{
|
||||
cast_u32, default_parallelism, default_resolve_strategy, get_string_from_params,
|
||||
parse_resolve_strategy,
|
||||
};
|
||||
|
||||
const FN_NAME: &str = "reconcile_catalog";
|
||||
|
||||
/// A function to reconcile a catalog.
|
||||
/// Returns the procedure id if success.
|
||||
///
|
||||
/// - `reconcile_catalog(resolve_strategy)`.
|
||||
/// - `reconcile_catalog(resolve_strategy, parallelism)`.
|
||||
///
|
||||
/// - `reconcile_catalog()`.
|
||||
#[admin_fn(
|
||||
name = ReconcileCatalogFunction,
|
||||
display_name = reconcile_catalog,
|
||||
sig_fn = signature,
|
||||
ret = string
|
||||
)]
|
||||
pub(crate) async fn reconcile_catalog(
|
||||
procedure_service_handler: &ProcedureServiceHandlerRef,
|
||||
query_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
let (resolve_strategy, parallelism) = match params.len() {
|
||||
0 => (default_resolve_strategy(), default_parallelism()),
|
||||
1 => (
|
||||
parse_resolve_strategy(get_string_from_params(params, 0, FN_NAME)?)?,
|
||||
default_parallelism(),
|
||||
),
|
||||
2 => {
|
||||
let Some(parallelism) = cast_u32(¶ms[1])? else {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: FN_NAME,
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
(
|
||||
parse_resolve_strategy(get_string_from_params(params, 0, FN_NAME)?)?,
|
||||
parallelism,
|
||||
)
|
||||
}
|
||||
size => {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 0, 1 or 2, have: {}",
|
||||
size
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
};
|
||||
info!(
|
||||
"Reconciling catalog with resolve_strategy: {:?}, parallelism: {}",
|
||||
resolve_strategy, parallelism
|
||||
);
|
||||
let pid = procedure_service_handler
|
||||
.reconcile(ReconcileRequest {
|
||||
target: Some(Target::ReconcileCatalog(ReconcileCatalog {
|
||||
catalog_name: query_ctx.current_catalog().to_string(),
|
||||
parallelism,
|
||||
resolve_strategy: resolve_strategy as i32,
|
||||
})),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
match pid {
|
||||
Some(pid) => Ok(Value::from(pid)),
|
||||
None => Ok(Value::Null),
|
||||
}
|
||||
}
|
||||
|
||||
fn signature() -> Signature {
|
||||
let nums = ConcreteDataType::numerics();
|
||||
let mut signs = Vec::with_capacity(2 + nums.len());
|
||||
signs.extend([
|
||||
// reconcile_catalog()
|
||||
TypeSignature::NullAry,
|
||||
// reconcile_catalog(resolve_strategy)
|
||||
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
|
||||
]);
|
||||
for sign in nums {
|
||||
// reconcile_catalog(resolve_strategy, parallelism)
|
||||
signs.push(TypeSignature::Exact(vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
sign,
|
||||
]));
|
||||
}
|
||||
Signature::one_of(signs, Volatility::Immutable)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Error;
|
||||
use datatypes::vectors::{StringVector, UInt64Vector, VectorRef};
|
||||
|
||||
use crate::admin::reconcile_catalog::ReconcileCatalogFunction;
|
||||
use crate::function::{AsyncFunction, FunctionContext};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_reconcile_catalog() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
// reconcile_catalog()
|
||||
let f = ReconcileCatalogFunction;
|
||||
let args = vec![];
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
||||
assert_eq!(expect, result);
|
||||
|
||||
// reconcile_catalog(resolve_strategy)
|
||||
let f = ReconcileCatalogFunction;
|
||||
let args = vec![Arc::new(StringVector::from(vec!["UseMetasrv"])) as _];
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
||||
assert_eq!(expect, result);
|
||||
|
||||
// reconcile_catalog(resolve_strategy, parallelism)
|
||||
let f = ReconcileCatalogFunction;
|
||||
let args = vec![
|
||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
||||
Arc::new(UInt64Vector::from_slice([10])) as _,
|
||||
];
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
||||
assert_eq!(expect, result);
|
||||
|
||||
// unsupported input data type
|
||||
let f = ReconcileCatalogFunction;
|
||||
let args = vec![
|
||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
||||
Arc::new(StringVector::from(vec!["test"])) as _,
|
||||
];
|
||||
let err = f.eval(FunctionContext::mock(), &args).await.unwrap_err();
|
||||
assert_matches!(err, Error::UnsupportedInputDataType { .. });
|
||||
|
||||
// invalid function args
|
||||
let f = ReconcileCatalogFunction;
|
||||
let args = vec![
|
||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
||||
Arc::new(UInt64Vector::from_slice([10])) as _,
|
||||
Arc::new(StringVector::from(vec!["10"])) as _,
|
||||
];
|
||||
let err = f.eval(FunctionContext::mock(), &args).await.unwrap_err();
|
||||
assert_matches!(err, Error::InvalidFuncArgs { .. });
|
||||
}
|
||||
}
|
||||
198
src/common/function/src/admin/reconcile_database.rs
Normal file
198
src/common/function/src/admin/reconcile_database.rs
Normal file
@@ -0,0 +1,198 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::meta::reconcile_request::Target;
|
||||
use api::v1::meta::{ReconcileDatabase, ReconcileRequest};
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingProcedureServiceHandlerSnafu, Result,
|
||||
UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use common_telemetry::info;
|
||||
use datatypes::prelude::*;
|
||||
use session::context::QueryContextRef;
|
||||
|
||||
use crate::handlers::ProcedureServiceHandlerRef;
|
||||
use crate::helper::{
|
||||
cast_u32, default_parallelism, default_resolve_strategy, get_string_from_params,
|
||||
parse_resolve_strategy,
|
||||
};
|
||||
|
||||
const FN_NAME: &str = "reconcile_database";
|
||||
|
||||
/// A function to reconcile a database.
|
||||
/// Returns the procedure id if success.
|
||||
///
|
||||
/// - `reconcile_database(database_name)`.
|
||||
/// - `reconcile_database(database_name, resolve_strategy)`.
|
||||
/// - `reconcile_database(database_name, resolve_strategy, parallelism)`.
|
||||
///
|
||||
/// The parameters:
|
||||
/// - `database_name`: the database name
|
||||
#[admin_fn(
|
||||
name = ReconcileDatabaseFunction,
|
||||
display_name = reconcile_database,
|
||||
sig_fn = signature,
|
||||
ret = string
|
||||
)]
|
||||
pub(crate) async fn reconcile_database(
|
||||
procedure_service_handler: &ProcedureServiceHandlerRef,
|
||||
query_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
let (database_name, resolve_strategy, parallelism) = match params.len() {
|
||||
1 => (
|
||||
get_string_from_params(params, 0, FN_NAME)?,
|
||||
default_resolve_strategy(),
|
||||
default_parallelism(),
|
||||
),
|
||||
2 => (
|
||||
get_string_from_params(params, 0, FN_NAME)?,
|
||||
parse_resolve_strategy(get_string_from_params(params, 1, FN_NAME)?)?,
|
||||
default_parallelism(),
|
||||
),
|
||||
3 => {
|
||||
let Some(parallelism) = cast_u32(¶ms[2])? else {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: FN_NAME,
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
(
|
||||
get_string_from_params(params, 0, FN_NAME)?,
|
||||
parse_resolve_strategy(get_string_from_params(params, 1, FN_NAME)?)?,
|
||||
parallelism,
|
||||
)
|
||||
}
|
||||
size => {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 1, 2 or 3, have: {}",
|
||||
size
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
};
|
||||
info!(
|
||||
"Reconciling database: {}, resolve_strategy: {:?}, parallelism: {}",
|
||||
database_name, resolve_strategy, parallelism
|
||||
);
|
||||
let pid = procedure_service_handler
|
||||
.reconcile(ReconcileRequest {
|
||||
target: Some(Target::ReconcileDatabase(ReconcileDatabase {
|
||||
catalog_name: query_ctx.current_catalog().to_string(),
|
||||
database_name: database_name.to_string(),
|
||||
parallelism,
|
||||
resolve_strategy: resolve_strategy as i32,
|
||||
})),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
match pid {
|
||||
Some(pid) => Ok(Value::from(pid)),
|
||||
None => Ok(Value::Null),
|
||||
}
|
||||
}
|
||||
|
||||
fn signature() -> Signature {
|
||||
let nums = ConcreteDataType::numerics();
|
||||
let mut signs = Vec::with_capacity(2 + nums.len());
|
||||
signs.extend([
|
||||
// reconcile_database(datanode_name)
|
||||
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
|
||||
// reconcile_database(database_name, resolve_strategy)
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
]),
|
||||
]);
|
||||
for sign in nums {
|
||||
// reconcile_database(database_name, resolve_strategy, parallelism)
|
||||
signs.push(TypeSignature::Exact(vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
sign,
|
||||
]));
|
||||
}
|
||||
Signature::one_of(signs, Volatility::Immutable)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Error;
|
||||
use datatypes::vectors::{StringVector, UInt32Vector, VectorRef};
|
||||
|
||||
use crate::admin::reconcile_database::ReconcileDatabaseFunction;
|
||||
use crate::function::{AsyncFunction, FunctionContext};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_reconcile_catalog() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
// reconcile_database(database_name)
|
||||
let f = ReconcileDatabaseFunction;
|
||||
let args = vec![Arc::new(StringVector::from(vec!["test"])) as _];
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
||||
assert_eq!(expect, result);
|
||||
|
||||
// reconcile_database(database_name, resolve_strategy)
|
||||
let f = ReconcileDatabaseFunction;
|
||||
let args = vec![
|
||||
Arc::new(StringVector::from(vec!["test"])) as _,
|
||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
||||
];
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
||||
assert_eq!(expect, result);
|
||||
|
||||
// reconcile_database(database_name, resolve_strategy, parallelism)
|
||||
let f = ReconcileDatabaseFunction;
|
||||
let args = vec![
|
||||
Arc::new(StringVector::from(vec!["test"])) as _,
|
||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
||||
Arc::new(UInt32Vector::from_slice([10])) as _,
|
||||
];
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
||||
assert_eq!(expect, result);
|
||||
|
||||
// invalid function args
|
||||
let f = ReconcileDatabaseFunction;
|
||||
let args = vec![
|
||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
||||
Arc::new(UInt32Vector::from_slice([10])) as _,
|
||||
Arc::new(StringVector::from(vec!["v1"])) as _,
|
||||
Arc::new(StringVector::from(vec!["v2"])) as _,
|
||||
];
|
||||
let err = f.eval(FunctionContext::mock(), &args).await.unwrap_err();
|
||||
assert_matches!(err, Error::InvalidFuncArgs { .. });
|
||||
|
||||
// unsupported input data type
|
||||
let f = ReconcileDatabaseFunction;
|
||||
let args = vec![
|
||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
||||
Arc::new(UInt32Vector::from_slice([10])) as _,
|
||||
Arc::new(StringVector::from(vec!["v1"])) as _,
|
||||
];
|
||||
let err = f.eval(FunctionContext::mock(), &args).await.unwrap_err();
|
||||
assert_matches!(err, Error::UnsupportedInputDataType { .. });
|
||||
}
|
||||
}
|
||||
149
src/common/function/src/admin/reconcile_table.rs
Normal file
149
src/common/function/src/admin/reconcile_table.rs
Normal file
@@ -0,0 +1,149 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::meta::reconcile_request::Target;
|
||||
use api::v1::meta::{ReconcileRequest, ReconcileTable, ResolveStrategy};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::{
|
||||
MissingProcedureServiceHandlerSnafu, Result, TableMutationSnafu, UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use common_telemetry::info;
|
||||
use datatypes::prelude::*;
|
||||
use session::context::QueryContextRef;
|
||||
use session::table_name::table_name_to_full_name;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::handlers::ProcedureServiceHandlerRef;
|
||||
use crate::helper::parse_resolve_strategy;
|
||||
|
||||
const FN_NAME: &str = "reconcile_table";
|
||||
|
||||
/// A function to reconcile a table.
|
||||
/// Returns the procedure id if success.
|
||||
///
|
||||
/// - `reconcile_table(table_name)`.
|
||||
/// - `reconcile_table(table_name, resolve_strategy)`.
|
||||
///
|
||||
/// The parameters:
|
||||
/// - `table_name`: the table name
|
||||
#[admin_fn(
|
||||
name = ReconcileTableFunction,
|
||||
display_name = reconcile_table,
|
||||
sig_fn = signature,
|
||||
ret = string
|
||||
)]
|
||||
pub(crate) async fn reconcile_table(
|
||||
procedure_service_handler: &ProcedureServiceHandlerRef,
|
||||
query_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
let (table_name, resolve_strategy) = match params {
|
||||
[ValueRef::String(table_name)] => (table_name, ResolveStrategy::UseLatest),
|
||||
[ValueRef::String(table_name), ValueRef::String(resolve_strategy)] => {
|
||||
(table_name, parse_resolve_strategy(resolve_strategy)?)
|
||||
}
|
||||
_ => {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: FN_NAME,
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
};
|
||||
let (catalog_name, schema_name, table_name) = table_name_to_full_name(table_name, query_ctx)
|
||||
.map_err(BoxedError::new)
|
||||
.context(TableMutationSnafu)?;
|
||||
info!(
|
||||
"Reconciling table: {} with resolve_strategy: {:?}",
|
||||
format_full_table_name(&catalog_name, &schema_name, &table_name),
|
||||
resolve_strategy
|
||||
);
|
||||
let pid = procedure_service_handler
|
||||
.reconcile(ReconcileRequest {
|
||||
target: Some(Target::ReconcileTable(ReconcileTable {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
resolve_strategy: resolve_strategy as i32,
|
||||
})),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
match pid {
|
||||
Some(pid) => Ok(Value::from(pid)),
|
||||
None => Ok(Value::Null),
|
||||
}
|
||||
}
|
||||
|
||||
fn signature() -> Signature {
|
||||
Signature::one_of(
|
||||
vec![
|
||||
// reconcile_table(table_name)
|
||||
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
|
||||
// reconcile_table(table_name, resolve_strategy)
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
]),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Error;
|
||||
use datatypes::vectors::{StringVector, VectorRef};
|
||||
|
||||
use crate::admin::reconcile_table::ReconcileTableFunction;
|
||||
use crate::function::{AsyncFunction, FunctionContext};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_reconcile_table() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
// reconcile_table(table_name)
|
||||
let f = ReconcileTableFunction;
|
||||
let args = vec![Arc::new(StringVector::from(vec!["test"])) as _];
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
||||
assert_eq!(expect, result);
|
||||
|
||||
// reconcile_table(table_name, resolve_strategy)
|
||||
let f = ReconcileTableFunction;
|
||||
let args = vec![
|
||||
Arc::new(StringVector::from(vec!["test"])) as _,
|
||||
Arc::new(StringVector::from(vec!["UseMetasrv"])) as _,
|
||||
];
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
||||
assert_eq!(expect, result);
|
||||
|
||||
// unsupported input data type
|
||||
let f = ReconcileTableFunction;
|
||||
let args = vec![
|
||||
Arc::new(StringVector::from(vec!["test"])) as _,
|
||||
Arc::new(StringVector::from(vec!["UseMetasrv"])) as _,
|
||||
Arc::new(StringVector::from(vec!["10"])) as _,
|
||||
];
|
||||
let err = f.eval(FunctionContext::mock(), &args).await.unwrap_err();
|
||||
assert_matches!(err, Error::UnsupportedInputDataType { .. });
|
||||
}
|
||||
}
|
||||
@@ -17,3 +17,5 @@ pub mod count_hash;
|
||||
#[cfg(feature = "geo")]
|
||||
pub mod geo;
|
||||
pub mod vector;
|
||||
|
||||
pub mod aggr_wrapper;
|
||||
|
||||
575
src/common/function/src/aggrs/aggr_wrapper.rs
Normal file
575
src/common/function/src/aggrs/aggr_wrapper.rs
Normal file
@@ -0,0 +1,575 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Wrapper for making aggregate functions out of state/merge functions of original aggregate functions.
|
||||
//!
|
||||
//! i.e. for a aggregate function `foo`, we will have a state function `foo_state` and a merge function `foo_merge`.
|
||||
//!
|
||||
//! `foo_state`'s input args is the same as `foo`'s, and its output is a state object.
|
||||
//! Note that `foo_state` might have multiple output columns, so it's a struct array
|
||||
//! that each output column is a struct field.
|
||||
//! `foo_merge`'s input arg is the same as `foo_state`'s output, and its output is the same as `foo`'s input.
|
||||
//!
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow::array::StructArray;
|
||||
use arrow_schema::Fields;
|
||||
use common_telemetry::debug;
|
||||
use datafusion::functions_aggregate::all_default_aggregate_functions;
|
||||
use datafusion::optimizer::analyzer::type_coercion::TypeCoercion;
|
||||
use datafusion::optimizer::AnalyzerRule;
|
||||
use datafusion::physical_planner::create_aggregate_expr_and_maybe_filter;
|
||||
use datafusion_common::{Column, ScalarValue};
|
||||
use datafusion_expr::expr::AggregateFunction;
|
||||
use datafusion_expr::function::StateFieldsArgs;
|
||||
use datafusion_expr::{
|
||||
Accumulator, Aggregate, AggregateUDF, AggregateUDFImpl, Expr, ExprSchemable, LogicalPlan,
|
||||
Signature,
|
||||
};
|
||||
use datafusion_physical_expr::aggregate::AggregateFunctionExpr;
|
||||
use datatypes::arrow::datatypes::{DataType, Field};
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
/// Returns the name of the state function for the given aggregate function name.
|
||||
/// The state function is used to compute the state of the aggregate function.
|
||||
/// The state function's name is in the format `__<aggr_name>_state
|
||||
pub fn aggr_state_func_name(aggr_name: &str) -> String {
|
||||
format!("__{}_state", aggr_name)
|
||||
}
|
||||
|
||||
/// Returns the name of the merge function for the given aggregate function name.
|
||||
/// The merge function is used to merge the states of the state functions.
|
||||
/// The merge function's name is in the format `__<aggr_name>_merge
|
||||
pub fn aggr_merge_func_name(aggr_name: &str) -> String {
|
||||
format!("__{}_merge", aggr_name)
|
||||
}
|
||||
|
||||
/// A wrapper to make an aggregate function out of the state and merge functions of the original aggregate function.
|
||||
/// It contains the original aggregate function, the state functions, and the merge function.
|
||||
///
|
||||
/// Notice state functions may have multiple output columns, so it's return type is always a struct array, and the merge function is used to merge the states of the state functions.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct StateMergeHelper;
|
||||
|
||||
/// A struct to hold the two aggregate plans, one for the state function(lower) and one for the merge function(upper).
|
||||
#[allow(unused)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct StepAggrPlan {
|
||||
/// Upper merge plan, which is the aggregate plan that merges the states of the state function.
|
||||
pub upper_merge: LogicalPlan,
|
||||
/// Lower state plan, which is the aggregate plan that computes the state of the aggregate function.
|
||||
pub lower_state: LogicalPlan,
|
||||
}
|
||||
|
||||
pub fn get_aggr_func(expr: &Expr) -> Option<&datafusion_expr::expr::AggregateFunction> {
|
||||
let mut expr_ref = expr;
|
||||
while let Expr::Alias(alias) = expr_ref {
|
||||
expr_ref = &alias.expr;
|
||||
}
|
||||
if let Expr::AggregateFunction(aggr_func) = expr_ref {
|
||||
Some(aggr_func)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl StateMergeHelper {
|
||||
/// Register all the `state` function of supported aggregate functions.
|
||||
/// Note that can't register `merge` function here, as it needs to be created from the original aggregate function with given input types.
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
let all_default = all_default_aggregate_functions();
|
||||
let greptime_custom_aggr_functions = registry.aggregate_functions();
|
||||
|
||||
// if our custom aggregate function have the same name as the default aggregate function, we will override it.
|
||||
let supported = all_default
|
||||
.into_iter()
|
||||
.chain(greptime_custom_aggr_functions.into_iter().map(Arc::new))
|
||||
.collect::<Vec<_>>();
|
||||
debug!(
|
||||
"Registering state functions for supported: {:?}",
|
||||
supported.iter().map(|f| f.name()).collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
let state_func = supported.into_iter().filter_map(|f| {
|
||||
StateWrapper::new((*f).clone())
|
||||
.inspect_err(
|
||||
|e| common_telemetry::error!(e; "Failed to register state function for {:?}", f),
|
||||
)
|
||||
.ok()
|
||||
.map(AggregateUDF::new_from_impl)
|
||||
});
|
||||
|
||||
for func in state_func {
|
||||
registry.register_aggr(func);
|
||||
}
|
||||
}
|
||||
|
||||
/// Split an aggregate plan into two aggregate plans, one for the state function and one for the merge function.
|
||||
pub fn split_aggr_node(aggr_plan: Aggregate) -> datafusion_common::Result<StepAggrPlan> {
|
||||
let aggr = {
|
||||
// certain aggr func need type coercion to work correctly, so we need to analyze the plan first.
|
||||
let aggr_plan = TypeCoercion::new().analyze(
|
||||
LogicalPlan::Aggregate(aggr_plan).clone(),
|
||||
&Default::default(),
|
||||
)?;
|
||||
if let LogicalPlan::Aggregate(aggr) = aggr_plan {
|
||||
aggr
|
||||
} else {
|
||||
return Err(datafusion_common::DataFusionError::Internal(format!(
|
||||
"Failed to coerce expressions in aggregate plan, expected Aggregate, got: {:?}",
|
||||
aggr_plan
|
||||
)));
|
||||
}
|
||||
};
|
||||
let mut lower_aggr_exprs = vec![];
|
||||
let mut upper_aggr_exprs = vec![];
|
||||
|
||||
for aggr_expr in aggr.aggr_expr.iter() {
|
||||
let Some(aggr_func) = get_aggr_func(aggr_expr) else {
|
||||
return Err(datafusion_common::DataFusionError::NotImplemented(format!(
|
||||
"Unsupported aggregate expression for step aggr optimize: {:?}",
|
||||
aggr_expr
|
||||
)));
|
||||
};
|
||||
|
||||
let original_input_types = aggr_func
|
||||
.args
|
||||
.iter()
|
||||
.map(|e| e.get_type(&aggr.input.schema()))
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
// first create the state function from the original aggregate function.
|
||||
let state_func = StateWrapper::new((*aggr_func.func).clone())?;
|
||||
|
||||
let expr = AggregateFunction {
|
||||
func: Arc::new(state_func.into()),
|
||||
args: aggr_func.args.clone(),
|
||||
distinct: aggr_func.distinct,
|
||||
filter: aggr_func.filter.clone(),
|
||||
order_by: aggr_func.order_by.clone(),
|
||||
null_treatment: aggr_func.null_treatment,
|
||||
};
|
||||
let expr = Expr::AggregateFunction(expr);
|
||||
let lower_state_output_col_name = expr.schema_name().to_string();
|
||||
|
||||
lower_aggr_exprs.push(expr);
|
||||
|
||||
let (original_phy_expr, _filter, _ordering) = create_aggregate_expr_and_maybe_filter(
|
||||
aggr_expr,
|
||||
aggr.input.schema(),
|
||||
aggr.input.schema().as_arrow(),
|
||||
&Default::default(),
|
||||
)?;
|
||||
|
||||
let merge_func = MergeWrapper::new(
|
||||
(*aggr_func.func).clone(),
|
||||
original_phy_expr,
|
||||
original_input_types,
|
||||
)?;
|
||||
let arg = Expr::Column(Column::new_unqualified(lower_state_output_col_name));
|
||||
let expr = AggregateFunction {
|
||||
func: Arc::new(merge_func.into()),
|
||||
args: vec![arg],
|
||||
distinct: aggr_func.distinct,
|
||||
filter: aggr_func.filter.clone(),
|
||||
order_by: aggr_func.order_by.clone(),
|
||||
null_treatment: aggr_func.null_treatment,
|
||||
};
|
||||
|
||||
// alias to the original aggregate expr's schema name, so parent plan can refer to it
|
||||
// correctly.
|
||||
let expr = Expr::AggregateFunction(expr).alias(aggr_expr.schema_name().to_string());
|
||||
upper_aggr_exprs.push(expr);
|
||||
}
|
||||
|
||||
let mut lower = aggr.clone();
|
||||
lower.aggr_expr = lower_aggr_exprs;
|
||||
let lower_plan = LogicalPlan::Aggregate(lower);
|
||||
|
||||
// update aggregate's output schema
|
||||
let lower_plan = lower_plan.recompute_schema()?;
|
||||
|
||||
let mut upper = aggr.clone();
|
||||
let aggr_plan = LogicalPlan::Aggregate(aggr);
|
||||
upper.aggr_expr = upper_aggr_exprs;
|
||||
upper.input = Arc::new(lower_plan.clone());
|
||||
// upper schema's output schema should be the same as the original aggregate plan's output schema
|
||||
let upper_check = upper;
|
||||
let upper_plan = LogicalPlan::Aggregate(upper_check).recompute_schema()?;
|
||||
if *upper_plan.schema() != *aggr_plan.schema() {
|
||||
return Err(datafusion_common::DataFusionError::Internal(format!(
|
||||
"Upper aggregate plan's schema is not the same as the original aggregate plan's schema: \n[transformed]:{}\n[original]:{}",
|
||||
upper_plan.schema(), aggr_plan.schema()
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(StepAggrPlan {
|
||||
lower_state: lower_plan,
|
||||
upper_merge: upper_plan,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper to make an aggregate function out of a state function.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct StateWrapper {
|
||||
inner: AggregateUDF,
|
||||
name: String,
|
||||
}
|
||||
|
||||
impl StateWrapper {
|
||||
/// `state_index`: The index of the state in the output of the state function.
|
||||
pub fn new(inner: AggregateUDF) -> datafusion_common::Result<Self> {
|
||||
let name = aggr_state_func_name(inner.name());
|
||||
Ok(Self { inner, name })
|
||||
}
|
||||
|
||||
pub fn inner(&self) -> &AggregateUDF {
|
||||
&self.inner
|
||||
}
|
||||
|
||||
/// Deduce the return type of the original aggregate function
|
||||
/// based on the accumulator arguments.
|
||||
///
|
||||
pub fn deduce_aggr_return_type(
|
||||
&self,
|
||||
acc_args: &datafusion_expr::function::AccumulatorArgs,
|
||||
) -> datafusion_common::Result<DataType> {
|
||||
let input_exprs = acc_args.exprs;
|
||||
let input_schema = acc_args.schema;
|
||||
let input_types = input_exprs
|
||||
.iter()
|
||||
.map(|e| e.data_type(input_schema))
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
let return_type = self.inner.return_type(&input_types)?;
|
||||
Ok(return_type)
|
||||
}
|
||||
}
|
||||
|
||||
impl AggregateUDFImpl for StateWrapper {
|
||||
fn accumulator<'a, 'b>(
|
||||
&'a self,
|
||||
acc_args: datafusion_expr::function::AccumulatorArgs<'b>,
|
||||
) -> datafusion_common::Result<Box<dyn Accumulator>> {
|
||||
// fix and recover proper acc args for the original aggregate function.
|
||||
let state_type = acc_args.return_type.clone();
|
||||
let inner = {
|
||||
let old_return_type = self.deduce_aggr_return_type(&acc_args)?;
|
||||
let acc_args = datafusion_expr::function::AccumulatorArgs {
|
||||
return_type: &old_return_type,
|
||||
schema: acc_args.schema,
|
||||
ignore_nulls: acc_args.ignore_nulls,
|
||||
ordering_req: acc_args.ordering_req,
|
||||
is_reversed: acc_args.is_reversed,
|
||||
name: acc_args.name,
|
||||
is_distinct: acc_args.is_distinct,
|
||||
exprs: acc_args.exprs,
|
||||
};
|
||||
self.inner.accumulator(acc_args)?
|
||||
};
|
||||
Ok(Box::new(StateAccum::new(inner, state_type)?))
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
fn name(&self) -> &str {
|
||||
self.name.as_str()
|
||||
}
|
||||
|
||||
fn is_nullable(&self) -> bool {
|
||||
self.inner.is_nullable()
|
||||
}
|
||||
|
||||
/// Return state_fields as the output struct type.
|
||||
///
|
||||
fn return_type(&self, arg_types: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
let old_return_type = self.inner.return_type(arg_types)?;
|
||||
let state_fields_args = StateFieldsArgs {
|
||||
name: self.inner().name(),
|
||||
input_types: arg_types,
|
||||
return_type: &old_return_type,
|
||||
// TODO(discord9): how to get this?, probably ok?
|
||||
ordering_fields: &[],
|
||||
is_distinct: false,
|
||||
};
|
||||
let state_fields = self.inner.state_fields(state_fields_args)?;
|
||||
let struct_field = DataType::Struct(state_fields.into());
|
||||
Ok(struct_field)
|
||||
}
|
||||
|
||||
/// The state function's output fields are the same as the original aggregate function's state fields.
|
||||
fn state_fields(
|
||||
&self,
|
||||
args: datafusion_expr::function::StateFieldsArgs,
|
||||
) -> datafusion_common::Result<Vec<Field>> {
|
||||
let old_return_type = self.inner.return_type(args.input_types)?;
|
||||
let state_fields_args = StateFieldsArgs {
|
||||
name: args.name,
|
||||
input_types: args.input_types,
|
||||
return_type: &old_return_type,
|
||||
ordering_fields: args.ordering_fields,
|
||||
is_distinct: args.is_distinct,
|
||||
};
|
||||
self.inner.state_fields(state_fields_args)
|
||||
}
|
||||
|
||||
/// The state function's signature is the same as the original aggregate function's signature,
|
||||
fn signature(&self) -> &Signature {
|
||||
self.inner.signature()
|
||||
}
|
||||
|
||||
/// Coerce types also do nothing, as optimizer should be able to already make struct types
|
||||
fn coerce_types(&self, arg_types: &[DataType]) -> datafusion_common::Result<Vec<DataType>> {
|
||||
self.inner.coerce_types(arg_types)
|
||||
}
|
||||
}
|
||||
|
||||
/// The wrapper's input is the same as the original aggregate function's input,
|
||||
/// and the output is the state function's output.
|
||||
#[derive(Debug)]
|
||||
pub struct StateAccum {
|
||||
inner: Box<dyn Accumulator>,
|
||||
state_fields: Fields,
|
||||
}
|
||||
|
||||
impl StateAccum {
|
||||
pub fn new(
|
||||
inner: Box<dyn Accumulator>,
|
||||
state_type: DataType,
|
||||
) -> datafusion_common::Result<Self> {
|
||||
let DataType::Struct(fields) = state_type else {
|
||||
return Err(datafusion_common::DataFusionError::Internal(format!(
|
||||
"Expected a struct type for state, got: {:?}",
|
||||
state_type
|
||||
)));
|
||||
};
|
||||
Ok(Self {
|
||||
inner,
|
||||
state_fields: fields,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Accumulator for StateAccum {
|
||||
fn evaluate(&mut self) -> datafusion_common::Result<ScalarValue> {
|
||||
let state = self.inner.state()?;
|
||||
|
||||
let array = state
|
||||
.iter()
|
||||
.map(|s| s.to_array())
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
let struct_array = StructArray::try_new(self.state_fields.clone(), array, None)?;
|
||||
Ok(ScalarValue::Struct(Arc::new(struct_array)))
|
||||
}
|
||||
|
||||
fn merge_batch(
|
||||
&mut self,
|
||||
states: &[datatypes::arrow::array::ArrayRef],
|
||||
) -> datafusion_common::Result<()> {
|
||||
self.inner.merge_batch(states)
|
||||
}
|
||||
|
||||
fn update_batch(
|
||||
&mut self,
|
||||
values: &[datatypes::arrow::array::ArrayRef],
|
||||
) -> datafusion_common::Result<()> {
|
||||
self.inner.update_batch(values)
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
self.inner.size()
|
||||
}
|
||||
|
||||
fn state(&mut self) -> datafusion_common::Result<Vec<ScalarValue>> {
|
||||
self.inner.state()
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO(discord9): mark this function as non-ser/de able
|
||||
///
|
||||
/// This wrapper shouldn't be register as a udaf, as it contain extra data that is not serializable.
|
||||
/// and changes for different logical plans.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MergeWrapper {
|
||||
inner: AggregateUDF,
|
||||
name: String,
|
||||
merge_signature: Signature,
|
||||
/// The original physical expression of the aggregate function, can't store the original aggregate function directly, as PhysicalExpr didn't implement Any
|
||||
original_phy_expr: Arc<AggregateFunctionExpr>,
|
||||
original_input_types: Vec<DataType>,
|
||||
}
|
||||
impl MergeWrapper {
|
||||
pub fn new(
|
||||
inner: AggregateUDF,
|
||||
original_phy_expr: Arc<AggregateFunctionExpr>,
|
||||
original_input_types: Vec<DataType>,
|
||||
) -> datafusion_common::Result<Self> {
|
||||
let name = aggr_merge_func_name(inner.name());
|
||||
// the input type is actually struct type, which is the state fields of the original aggregate function.
|
||||
let merge_signature = Signature::user_defined(datafusion_expr::Volatility::Immutable);
|
||||
|
||||
Ok(Self {
|
||||
inner,
|
||||
name,
|
||||
merge_signature,
|
||||
original_phy_expr,
|
||||
original_input_types,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn inner(&self) -> &AggregateUDF {
|
||||
&self.inner
|
||||
}
|
||||
}
|
||||
|
||||
impl AggregateUDFImpl for MergeWrapper {
|
||||
fn accumulator<'a, 'b>(
|
||||
&'a self,
|
||||
acc_args: datafusion_expr::function::AccumulatorArgs<'b>,
|
||||
) -> datafusion_common::Result<Box<dyn Accumulator>> {
|
||||
if acc_args.exprs.len() != 1
|
||||
|| !matches!(
|
||||
acc_args.exprs[0].data_type(acc_args.schema)?,
|
||||
DataType::Struct(_)
|
||||
)
|
||||
{
|
||||
return Err(datafusion_common::DataFusionError::Internal(format!(
|
||||
"Expected one struct type as input, got: {:?}",
|
||||
acc_args.schema
|
||||
)));
|
||||
}
|
||||
let input_type = acc_args.exprs[0].data_type(acc_args.schema)?;
|
||||
let DataType::Struct(fields) = input_type else {
|
||||
return Err(datafusion_common::DataFusionError::Internal(format!(
|
||||
"Expected a struct type for input, got: {:?}",
|
||||
input_type
|
||||
)));
|
||||
};
|
||||
|
||||
let inner_accum = self.original_phy_expr.create_accumulator()?;
|
||||
Ok(Box::new(MergeAccum::new(inner_accum, &fields)))
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
fn name(&self) -> &str {
|
||||
self.name.as_str()
|
||||
}
|
||||
|
||||
fn is_nullable(&self) -> bool {
|
||||
self.inner.is_nullable()
|
||||
}
|
||||
|
||||
/// Notice here the `arg_types` is actually the `state_fields`'s data types,
|
||||
/// so return fixed return type instead of using `arg_types` to determine the return type.
|
||||
fn return_type(&self, _arg_types: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
// The return type is the same as the original aggregate function's return type.
|
||||
let ret_type = self.inner.return_type(&self.original_input_types)?;
|
||||
Ok(ret_type)
|
||||
}
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.merge_signature
|
||||
}
|
||||
|
||||
/// Coerce types also do nothing, as optimizer should be able to already make struct types
|
||||
fn coerce_types(&self, arg_types: &[DataType]) -> datafusion_common::Result<Vec<DataType>> {
|
||||
// just check if the arg_types are only one and is struct array
|
||||
if arg_types.len() != 1 || !matches!(arg_types.first(), Some(DataType::Struct(_))) {
|
||||
return Err(datafusion_common::DataFusionError::Internal(format!(
|
||||
"Expected one struct type as input, got: {:?}",
|
||||
arg_types
|
||||
)));
|
||||
}
|
||||
Ok(arg_types.to_vec())
|
||||
}
|
||||
|
||||
/// Just return the original aggregate function's state fields.
|
||||
fn state_fields(
|
||||
&self,
|
||||
_args: datafusion_expr::function::StateFieldsArgs,
|
||||
) -> datafusion_common::Result<Vec<Field>> {
|
||||
self.original_phy_expr.state_fields()
|
||||
}
|
||||
}
|
||||
|
||||
/// The merge accumulator, which modify `update_batch`'s behavior to accept one struct array which
|
||||
/// include the state fields of original aggregate function, and merge said states into original accumulator
|
||||
/// the output is the same as original aggregate function
|
||||
#[derive(Debug)]
|
||||
pub struct MergeAccum {
|
||||
inner: Box<dyn Accumulator>,
|
||||
state_fields: Fields,
|
||||
}
|
||||
|
||||
impl MergeAccum {
|
||||
pub fn new(inner: Box<dyn Accumulator>, state_fields: &Fields) -> Self {
|
||||
Self {
|
||||
inner,
|
||||
state_fields: state_fields.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Accumulator for MergeAccum {
|
||||
fn evaluate(&mut self) -> datafusion_common::Result<ScalarValue> {
|
||||
self.inner.evaluate()
|
||||
}
|
||||
|
||||
fn merge_batch(&mut self, states: &[arrow::array::ArrayRef]) -> datafusion_common::Result<()> {
|
||||
self.inner.merge_batch(states)
|
||||
}
|
||||
|
||||
fn update_batch(&mut self, values: &[arrow::array::ArrayRef]) -> datafusion_common::Result<()> {
|
||||
let value = values.first().ok_or_else(|| {
|
||||
datafusion_common::DataFusionError::Internal("No values provided for merge".to_string())
|
||||
})?;
|
||||
// The input values are states from other accumulators, so we merge them.
|
||||
let struct_arr = value
|
||||
.as_any()
|
||||
.downcast_ref::<StructArray>()
|
||||
.ok_or_else(|| {
|
||||
datafusion_common::DataFusionError::Internal(format!(
|
||||
"Expected StructArray, got: {:?}",
|
||||
value.data_type()
|
||||
))
|
||||
})?;
|
||||
let fields = struct_arr.fields();
|
||||
if fields != &self.state_fields {
|
||||
return Err(datafusion_common::DataFusionError::Internal(format!(
|
||||
"Expected state fields: {:?}, got: {:?}",
|
||||
self.state_fields, fields
|
||||
)));
|
||||
}
|
||||
|
||||
// now fields should be the same, so we can merge the batch
|
||||
// by pass the columns as order should be the same
|
||||
let state_columns = struct_arr.columns();
|
||||
self.inner.merge_batch(state_columns)
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
self.inner.size()
|
||||
}
|
||||
|
||||
fn state(&mut self) -> datafusion_common::Result<Vec<ScalarValue>> {
|
||||
self.inner.state()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
804
src/common/function/src/aggrs/aggr_wrapper/tests.rs
Normal file
804
src/common/function/src/aggrs/aggr_wrapper/tests.rs
Normal file
@@ -0,0 +1,804 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::pin::Pin;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use arrow::array::{ArrayRef, Float64Array, Int64Array, UInt64Array};
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use arrow_schema::SchemaRef;
|
||||
use datafusion::catalog::{Session, TableProvider};
|
||||
use datafusion::datasource::DefaultTableSource;
|
||||
use datafusion::execution::{RecordBatchStream, SendableRecordBatchStream, TaskContext};
|
||||
use datafusion::functions_aggregate::average::avg_udaf;
|
||||
use datafusion::functions_aggregate::sum::sum_udaf;
|
||||
use datafusion::optimizer::analyzer::type_coercion::TypeCoercion;
|
||||
use datafusion::optimizer::AnalyzerRule;
|
||||
use datafusion::physical_plan::aggregates::AggregateExec;
|
||||
use datafusion::physical_plan::execution_plan::{Boundedness, EmissionType};
|
||||
use datafusion::physical_plan::{DisplayAs, DisplayFormatType, ExecutionPlan, PlanProperties};
|
||||
use datafusion::physical_planner::{DefaultPhysicalPlanner, PhysicalPlanner};
|
||||
use datafusion::prelude::SessionContext;
|
||||
use datafusion_common::{Column, TableReference};
|
||||
use datafusion_expr::expr::AggregateFunction;
|
||||
use datafusion_expr::sqlparser::ast::NullTreatment;
|
||||
use datafusion_expr::{Aggregate, Expr, LogicalPlan, SortExpr, TableScan};
|
||||
use datafusion_physical_expr::aggregate::AggregateExprBuilder;
|
||||
use datafusion_physical_expr::{EquivalenceProperties, Partitioning};
|
||||
use datatypes::arrow_array::StringArray;
|
||||
use futures::{Stream, StreamExt as _};
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use super::*;
|
||||
use crate::aggrs::approximate::hll::HllState;
|
||||
use crate::aggrs::approximate::uddsketch::UddSketchState;
|
||||
use crate::aggrs::count_hash::CountHash;
|
||||
use crate::function::Function as _;
|
||||
use crate::scalars::hll_count::HllCalcFunction;
|
||||
use crate::scalars::uddsketch_calc::UddSketchCalcFunction;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct MockInputExec {
|
||||
input: Vec<RecordBatch>,
|
||||
schema: SchemaRef,
|
||||
properties: PlanProperties,
|
||||
}
|
||||
|
||||
impl MockInputExec {
|
||||
pub fn new(input: Vec<RecordBatch>, schema: SchemaRef) -> Self {
|
||||
Self {
|
||||
properties: PlanProperties::new(
|
||||
EquivalenceProperties::new(schema.clone()),
|
||||
Partitioning::UnknownPartitioning(1),
|
||||
EmissionType::Incremental,
|
||||
Boundedness::Bounded,
|
||||
),
|
||||
input,
|
||||
schema,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DisplayAs for MockInputExec {
|
||||
fn fmt_as(&self, _t: DisplayFormatType, _f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl ExecutionPlan for MockInputExec {
|
||||
fn name(&self) -> &str {
|
||||
"MockInputExec"
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
fn with_new_children(
|
||||
self: Arc<Self>,
|
||||
_children: Vec<Arc<dyn ExecutionPlan>>,
|
||||
) -> datafusion_common::Result<Arc<dyn ExecutionPlan>> {
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&self,
|
||||
_partition: usize,
|
||||
_context: Arc<TaskContext>,
|
||||
) -> datafusion_common::Result<SendableRecordBatchStream> {
|
||||
let stream = MockStream {
|
||||
stream: self.input.clone(),
|
||||
schema: self.schema.clone(),
|
||||
idx: 0,
|
||||
};
|
||||
Ok(Box::pin(stream))
|
||||
}
|
||||
}
|
||||
|
||||
struct MockStream {
|
||||
stream: Vec<RecordBatch>,
|
||||
schema: SchemaRef,
|
||||
idx: usize,
|
||||
}
|
||||
|
||||
impl Stream for MockStream {
|
||||
type Item = datafusion_common::Result<RecordBatch>;
|
||||
fn poll_next(
|
||||
mut self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
) -> Poll<Option<datafusion_common::Result<RecordBatch>>> {
|
||||
if self.idx < self.stream.len() {
|
||||
let ret = self.stream[self.idx].clone();
|
||||
self.idx += 1;
|
||||
Poll::Ready(Some(Ok(ret)))
|
||||
} else {
|
||||
Poll::Ready(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RecordBatchStream for MockStream {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct DummyTableProvider {
|
||||
schema: Arc<arrow_schema::Schema>,
|
||||
record_batch: Mutex<Option<RecordBatch>>,
|
||||
}
|
||||
|
||||
impl DummyTableProvider {
|
||||
#[allow(unused)]
|
||||
pub fn new(schema: Arc<arrow_schema::Schema>, record_batch: Option<RecordBatch>) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
record_batch: Mutex::new(record_batch),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for DummyTableProvider {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
schema: Arc::new(arrow_schema::Schema::new(vec![Field::new(
|
||||
"number",
|
||||
DataType::Int64,
|
||||
true,
|
||||
)])),
|
||||
record_batch: Mutex::new(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TableProvider for DummyTableProvider {
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn schema(&self) -> Arc<arrow_schema::Schema> {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn table_type(&self) -> datafusion_expr::TableType {
|
||||
datafusion_expr::TableType::Base
|
||||
}
|
||||
|
||||
async fn scan(
|
||||
&self,
|
||||
_state: &dyn Session,
|
||||
_projection: Option<&Vec<usize>>,
|
||||
_filters: &[Expr],
|
||||
_limit: Option<usize>,
|
||||
) -> datafusion::error::Result<Arc<dyn ExecutionPlan>> {
|
||||
let input: Vec<RecordBatch> = self
|
||||
.record_batch
|
||||
.lock()
|
||||
.unwrap()
|
||||
.clone()
|
||||
.map(|r| vec![r])
|
||||
.unwrap_or_default();
|
||||
Ok(Arc::new(MockInputExec::new(input, self.schema.clone())))
|
||||
}
|
||||
}
|
||||
|
||||
fn dummy_table_scan() -> LogicalPlan {
|
||||
let table_provider = Arc::new(DummyTableProvider::default());
|
||||
let table_source = DefaultTableSource::new(table_provider);
|
||||
LogicalPlan::TableScan(
|
||||
TableScan::try_new(
|
||||
TableReference::bare("Number"),
|
||||
Arc::new(table_source),
|
||||
None,
|
||||
vec![],
|
||||
None,
|
||||
)
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sum_udaf() {
|
||||
let ctx = SessionContext::new();
|
||||
|
||||
let sum = datafusion::functions_aggregate::sum::sum_udaf();
|
||||
let sum = (*sum).clone();
|
||||
let original_aggr = Aggregate::try_new(
|
||||
Arc::new(dummy_table_scan()),
|
||||
vec![],
|
||||
vec![Expr::AggregateFunction(AggregateFunction::new_udf(
|
||||
Arc::new(sum.clone()),
|
||||
vec![Expr::Column(Column::new_unqualified("number"))],
|
||||
false,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
))],
|
||||
)
|
||||
.unwrap();
|
||||
let res = StateMergeHelper::split_aggr_node(original_aggr).unwrap();
|
||||
|
||||
let expected_lower_plan = LogicalPlan::Aggregate(
|
||||
Aggregate::try_new(
|
||||
Arc::new(dummy_table_scan()),
|
||||
vec![],
|
||||
vec![Expr::AggregateFunction(AggregateFunction::new_udf(
|
||||
Arc::new(StateWrapper::new(sum.clone()).unwrap().into()),
|
||||
vec![Expr::Column(Column::new_unqualified("number"))],
|
||||
false,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
))],
|
||||
)
|
||||
.unwrap(),
|
||||
)
|
||||
.recompute_schema()
|
||||
.unwrap();
|
||||
assert_eq!(&res.lower_state, &expected_lower_plan);
|
||||
|
||||
let expected_merge_plan = LogicalPlan::Aggregate(
|
||||
Aggregate::try_new(
|
||||
Arc::new(expected_lower_plan),
|
||||
vec![],
|
||||
vec![Expr::AggregateFunction(AggregateFunction::new_udf(
|
||||
Arc::new(
|
||||
MergeWrapper::new(
|
||||
sum.clone(),
|
||||
Arc::new(
|
||||
AggregateExprBuilder::new(
|
||||
Arc::new(sum.clone()),
|
||||
vec![Arc::new(
|
||||
datafusion::physical_expr::expressions::Column::new(
|
||||
"number", 0,
|
||||
),
|
||||
)],
|
||||
)
|
||||
.schema(Arc::new(dummy_table_scan().schema().as_arrow().clone()))
|
||||
.alias("sum(number)")
|
||||
.build()
|
||||
.unwrap(),
|
||||
),
|
||||
vec![DataType::Int64],
|
||||
)
|
||||
.unwrap()
|
||||
.into(),
|
||||
),
|
||||
vec![Expr::Column(Column::new_unqualified("__sum_state(number)"))],
|
||||
false,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
))
|
||||
.alias("sum(number)")],
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
assert_eq!(&res.upper_merge, &expected_merge_plan);
|
||||
|
||||
let phy_aggr_state_plan = DefaultPhysicalPlanner::default()
|
||||
.create_physical_plan(&res.lower_state, &ctx.state())
|
||||
.await
|
||||
.unwrap();
|
||||
let aggr_exec = phy_aggr_state_plan
|
||||
.as_any()
|
||||
.downcast_ref::<AggregateExec>()
|
||||
.unwrap();
|
||||
let aggr_func_expr = &aggr_exec.aggr_expr()[0];
|
||||
let mut state_accum = aggr_func_expr.create_accumulator().unwrap();
|
||||
|
||||
// evaluate the state function
|
||||
let input = Int64Array::from(vec![Some(1), Some(2), None, Some(3)]);
|
||||
let values = vec![Arc::new(input) as arrow::array::ArrayRef];
|
||||
|
||||
state_accum.update_batch(&values).unwrap();
|
||||
let state = state_accum.state().unwrap();
|
||||
assert_eq!(state.len(), 1);
|
||||
assert_eq!(state[0], ScalarValue::Int64(Some(6)));
|
||||
|
||||
let eval_res = state_accum.evaluate().unwrap();
|
||||
assert_eq!(
|
||||
eval_res,
|
||||
ScalarValue::Struct(Arc::new(
|
||||
StructArray::try_new(
|
||||
vec![Field::new("sum[sum]", DataType::Int64, true)].into(),
|
||||
vec![Arc::new(Int64Array::from(vec![Some(6)]))],
|
||||
None,
|
||||
)
|
||||
.unwrap(),
|
||||
))
|
||||
);
|
||||
|
||||
let phy_aggr_merge_plan = DefaultPhysicalPlanner::default()
|
||||
.create_physical_plan(&res.upper_merge, &ctx.state())
|
||||
.await
|
||||
.unwrap();
|
||||
let aggr_exec = phy_aggr_merge_plan
|
||||
.as_any()
|
||||
.downcast_ref::<AggregateExec>()
|
||||
.unwrap();
|
||||
let aggr_func_expr = &aggr_exec.aggr_expr()[0];
|
||||
let mut merge_accum = aggr_func_expr.create_accumulator().unwrap();
|
||||
|
||||
let merge_input =
|
||||
vec![Arc::new(Int64Array::from(vec![Some(6), Some(42), None])) as arrow::array::ArrayRef];
|
||||
let merge_input_struct_arr = StructArray::try_new(
|
||||
vec![Field::new("sum[sum]", DataType::Int64, true)].into(),
|
||||
merge_input,
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
merge_accum
|
||||
.update_batch(&[Arc::new(merge_input_struct_arr)])
|
||||
.unwrap();
|
||||
let merge_state = merge_accum.state().unwrap();
|
||||
assert_eq!(merge_state.len(), 1);
|
||||
assert_eq!(merge_state[0], ScalarValue::Int64(Some(48)));
|
||||
|
||||
let merge_eval_res = merge_accum.evaluate().unwrap();
|
||||
assert_eq!(merge_eval_res, ScalarValue::Int64(Some(48)));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_avg_udaf() {
|
||||
let ctx = SessionContext::new();
|
||||
|
||||
let avg = datafusion::functions_aggregate::average::avg_udaf();
|
||||
let avg = (*avg).clone();
|
||||
|
||||
let original_aggr = Aggregate::try_new(
|
||||
Arc::new(dummy_table_scan()),
|
||||
vec![],
|
||||
vec![Expr::AggregateFunction(AggregateFunction::new_udf(
|
||||
Arc::new(avg.clone()),
|
||||
vec![Expr::Column(Column::new_unqualified("number"))],
|
||||
false,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
))],
|
||||
)
|
||||
.unwrap();
|
||||
let res = StateMergeHelper::split_aggr_node(original_aggr).unwrap();
|
||||
|
||||
let state_func: Arc<AggregateUDF> = Arc::new(StateWrapper::new(avg.clone()).unwrap().into());
|
||||
let expected_aggr_state_plan = LogicalPlan::Aggregate(
|
||||
Aggregate::try_new(
|
||||
Arc::new(dummy_table_scan()),
|
||||
vec![],
|
||||
vec![Expr::AggregateFunction(AggregateFunction::new_udf(
|
||||
state_func,
|
||||
vec![Expr::Column(Column::new_unqualified("number"))],
|
||||
false,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
))],
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
// type coerced so avg aggr function can function correctly
|
||||
let coerced_aggr_state_plan = TypeCoercion::new()
|
||||
.analyze(expected_aggr_state_plan.clone(), &Default::default())
|
||||
.unwrap();
|
||||
assert_eq!(&res.lower_state, &coerced_aggr_state_plan);
|
||||
assert_eq!(
|
||||
res.lower_state.schema().as_arrow(),
|
||||
&arrow_schema::Schema::new(vec![Field::new(
|
||||
"__avg_state(number)",
|
||||
DataType::Struct(
|
||||
vec![
|
||||
Field::new("avg[count]", DataType::UInt64, true),
|
||||
Field::new("avg[sum]", DataType::Float64, true)
|
||||
]
|
||||
.into()
|
||||
),
|
||||
true,
|
||||
)])
|
||||
);
|
||||
|
||||
let expected_merge_fn = MergeWrapper::new(
|
||||
avg.clone(),
|
||||
Arc::new(
|
||||
AggregateExprBuilder::new(
|
||||
Arc::new(avg.clone()),
|
||||
vec![Arc::new(
|
||||
datafusion::physical_expr::expressions::Column::new("number", 0),
|
||||
)],
|
||||
)
|
||||
.schema(Arc::new(dummy_table_scan().schema().as_arrow().clone()))
|
||||
.alias("avg(number)")
|
||||
.build()
|
||||
.unwrap(),
|
||||
),
|
||||
// coerced to float64
|
||||
vec![DataType::Float64],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let expected_merge_plan = LogicalPlan::Aggregate(
|
||||
Aggregate::try_new(
|
||||
Arc::new(coerced_aggr_state_plan.clone()),
|
||||
vec![],
|
||||
vec![Expr::AggregateFunction(AggregateFunction::new_udf(
|
||||
Arc::new(expected_merge_fn.into()),
|
||||
vec![Expr::Column(Column::new_unqualified("__avg_state(number)"))],
|
||||
false,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
))
|
||||
.alias("avg(number)")],
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
assert_eq!(&res.upper_merge, &expected_merge_plan);
|
||||
|
||||
let phy_aggr_state_plan = DefaultPhysicalPlanner::default()
|
||||
.create_physical_plan(&coerced_aggr_state_plan, &ctx.state())
|
||||
.await
|
||||
.unwrap();
|
||||
let aggr_exec = phy_aggr_state_plan
|
||||
.as_any()
|
||||
.downcast_ref::<AggregateExec>()
|
||||
.unwrap();
|
||||
let aggr_func_expr = &aggr_exec.aggr_expr()[0];
|
||||
let mut state_accum = aggr_func_expr.create_accumulator().unwrap();
|
||||
|
||||
// evaluate the state function
|
||||
let input = Float64Array::from(vec![Some(1.), Some(2.), None, Some(3.)]);
|
||||
let values = vec![Arc::new(input) as arrow::array::ArrayRef];
|
||||
|
||||
state_accum.update_batch(&values).unwrap();
|
||||
let state = state_accum.state().unwrap();
|
||||
assert_eq!(state.len(), 2);
|
||||
assert_eq!(state[0], ScalarValue::UInt64(Some(3)));
|
||||
assert_eq!(state[1], ScalarValue::Float64(Some(6.)));
|
||||
|
||||
let eval_res = state_accum.evaluate().unwrap();
|
||||
let expected = Arc::new(
|
||||
StructArray::try_new(
|
||||
vec![
|
||||
Field::new("avg[count]", DataType::UInt64, true),
|
||||
Field::new("avg[sum]", DataType::Float64, true),
|
||||
]
|
||||
.into(),
|
||||
vec![
|
||||
Arc::new(UInt64Array::from(vec![Some(3)])),
|
||||
Arc::new(Float64Array::from(vec![Some(6.)])),
|
||||
],
|
||||
None,
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
assert_eq!(eval_res, ScalarValue::Struct(expected));
|
||||
|
||||
let phy_aggr_merge_plan = DefaultPhysicalPlanner::default()
|
||||
.create_physical_plan(&res.upper_merge, &ctx.state())
|
||||
.await
|
||||
.unwrap();
|
||||
let aggr_exec = phy_aggr_merge_plan
|
||||
.as_any()
|
||||
.downcast_ref::<AggregateExec>()
|
||||
.unwrap();
|
||||
let aggr_func_expr = &aggr_exec.aggr_expr()[0];
|
||||
|
||||
let mut merge_accum = aggr_func_expr.create_accumulator().unwrap();
|
||||
|
||||
let merge_input = vec![
|
||||
Arc::new(UInt64Array::from(vec![Some(3), Some(42), None])) as arrow::array::ArrayRef,
|
||||
Arc::new(Float64Array::from(vec![Some(48.), Some(84.), None])),
|
||||
];
|
||||
let merge_input_struct_arr = StructArray::try_new(
|
||||
vec![
|
||||
Field::new("avg[count]", DataType::UInt64, true),
|
||||
Field::new("avg[sum]", DataType::Float64, true),
|
||||
]
|
||||
.into(),
|
||||
merge_input,
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
merge_accum
|
||||
.update_batch(&[Arc::new(merge_input_struct_arr)])
|
||||
.unwrap();
|
||||
let merge_state = merge_accum.state().unwrap();
|
||||
assert_eq!(merge_state.len(), 2);
|
||||
assert_eq!(merge_state[0], ScalarValue::UInt64(Some(45)));
|
||||
assert_eq!(merge_state[1], ScalarValue::Float64(Some(132.)));
|
||||
|
||||
let merge_eval_res = merge_accum.evaluate().unwrap();
|
||||
// the merge function returns the average, which is 132 / 45
|
||||
assert_eq!(merge_eval_res, ScalarValue::Float64(Some(132. / 45_f64)));
|
||||
}
|
||||
|
||||
/// For testing whether the UDAF state fields are correctly implemented.
|
||||
/// esp. for our own custom UDAF's state fields.
|
||||
/// By compare eval results before and after split to state/merge functions.
|
||||
#[tokio::test]
|
||||
async fn test_udaf_correct_eval_result() {
|
||||
struct TestCase {
|
||||
func: Arc<AggregateUDF>,
|
||||
args: Vec<Expr>,
|
||||
input_schema: SchemaRef,
|
||||
input: Vec<ArrayRef>,
|
||||
expected_output: Option<ScalarValue>,
|
||||
expected_fn: Option<ExpectedFn>,
|
||||
distinct: bool,
|
||||
filter: Option<Box<Expr>>,
|
||||
order_by: Option<Vec<SortExpr>>,
|
||||
null_treatment: Option<NullTreatment>,
|
||||
}
|
||||
type ExpectedFn = fn(ArrayRef) -> bool;
|
||||
|
||||
let test_cases = vec![
|
||||
TestCase {
|
||||
func: sum_udaf(),
|
||||
input_schema: Arc::new(arrow_schema::Schema::new(vec![Field::new(
|
||||
"number",
|
||||
DataType::Int64,
|
||||
true,
|
||||
)])),
|
||||
args: vec![Expr::Column(Column::new_unqualified("number"))],
|
||||
input: vec![Arc::new(Int64Array::from(vec![
|
||||
Some(1),
|
||||
Some(2),
|
||||
None,
|
||||
Some(3),
|
||||
]))],
|
||||
expected_output: Some(ScalarValue::Int64(Some(6))),
|
||||
expected_fn: None,
|
||||
distinct: false,
|
||||
filter: None,
|
||||
order_by: None,
|
||||
null_treatment: None,
|
||||
},
|
||||
TestCase {
|
||||
func: avg_udaf(),
|
||||
input_schema: Arc::new(arrow_schema::Schema::new(vec![Field::new(
|
||||
"number",
|
||||
DataType::Int64,
|
||||
true,
|
||||
)])),
|
||||
args: vec![Expr::Column(Column::new_unqualified("number"))],
|
||||
input: vec![Arc::new(Int64Array::from(vec![
|
||||
Some(1),
|
||||
Some(2),
|
||||
None,
|
||||
Some(3),
|
||||
]))],
|
||||
expected_output: Some(ScalarValue::Float64(Some(2.0))),
|
||||
expected_fn: None,
|
||||
distinct: false,
|
||||
filter: None,
|
||||
order_by: None,
|
||||
null_treatment: None,
|
||||
},
|
||||
TestCase {
|
||||
func: Arc::new(CountHash::udf_impl()),
|
||||
input_schema: Arc::new(arrow_schema::Schema::new(vec![Field::new(
|
||||
"number",
|
||||
DataType::Int64,
|
||||
true,
|
||||
)])),
|
||||
args: vec![Expr::Column(Column::new_unqualified("number"))],
|
||||
input: vec![Arc::new(Int64Array::from(vec![
|
||||
Some(1),
|
||||
Some(2),
|
||||
None,
|
||||
Some(3),
|
||||
Some(3),
|
||||
Some(3),
|
||||
]))],
|
||||
expected_output: Some(ScalarValue::Int64(Some(4))),
|
||||
expected_fn: None,
|
||||
distinct: false,
|
||||
filter: None,
|
||||
order_by: None,
|
||||
null_treatment: None,
|
||||
},
|
||||
TestCase {
|
||||
func: Arc::new(UddSketchState::state_udf_impl()),
|
||||
input_schema: Arc::new(arrow_schema::Schema::new(vec![Field::new(
|
||||
"number",
|
||||
DataType::Float64,
|
||||
true,
|
||||
)])),
|
||||
args: vec![
|
||||
Expr::Literal(ScalarValue::Int64(Some(128))),
|
||||
Expr::Literal(ScalarValue::Float64(Some(0.05))),
|
||||
Expr::Column(Column::new_unqualified("number")),
|
||||
],
|
||||
input: vec![Arc::new(Float64Array::from(vec![
|
||||
Some(1.),
|
||||
Some(2.),
|
||||
None,
|
||||
Some(3.),
|
||||
Some(3.),
|
||||
Some(3.),
|
||||
]))],
|
||||
expected_output: None,
|
||||
expected_fn: Some(|arr| {
|
||||
let percent = ScalarValue::Float64(Some(0.5)).to_array().unwrap();
|
||||
let percent = datatypes::vectors::Helper::try_into_vector(percent).unwrap();
|
||||
let state = datatypes::vectors::Helper::try_into_vector(arr).unwrap();
|
||||
let udd_calc = UddSketchCalcFunction;
|
||||
let res = udd_calc
|
||||
.eval(&Default::default(), &[percent, state])
|
||||
.unwrap();
|
||||
let binding = res.to_arrow_array();
|
||||
let res_arr = binding.as_any().downcast_ref::<Float64Array>().unwrap();
|
||||
assert!(res_arr.len() == 1);
|
||||
assert!((res_arr.value(0) - 2.856578984907706f64).abs() <= f64::EPSILON);
|
||||
true
|
||||
}),
|
||||
distinct: false,
|
||||
filter: None,
|
||||
order_by: None,
|
||||
null_treatment: None,
|
||||
},
|
||||
TestCase {
|
||||
func: Arc::new(HllState::state_udf_impl()),
|
||||
input_schema: Arc::new(arrow_schema::Schema::new(vec![Field::new(
|
||||
"word",
|
||||
DataType::Utf8,
|
||||
true,
|
||||
)])),
|
||||
args: vec![Expr::Column(Column::new_unqualified("word"))],
|
||||
input: vec![Arc::new(StringArray::from(vec![
|
||||
Some("foo"),
|
||||
Some("bar"),
|
||||
None,
|
||||
Some("baz"),
|
||||
Some("baz"),
|
||||
]))],
|
||||
expected_output: None,
|
||||
expected_fn: Some(|arr| {
|
||||
let state = datatypes::vectors::Helper::try_into_vector(arr).unwrap();
|
||||
let hll_calc = HllCalcFunction;
|
||||
let res = hll_calc.eval(&Default::default(), &[state]).unwrap();
|
||||
let binding = res.to_arrow_array();
|
||||
let res_arr = binding.as_any().downcast_ref::<UInt64Array>().unwrap();
|
||||
assert!(res_arr.len() == 1);
|
||||
assert_eq!(res_arr.value(0), 3);
|
||||
true
|
||||
}),
|
||||
distinct: false,
|
||||
filter: None,
|
||||
order_by: None,
|
||||
null_treatment: None,
|
||||
},
|
||||
// TODO(discord9): udd_merge/hll_merge/geo_path/quantile_aggr tests
|
||||
];
|
||||
let test_table_ref = TableReference::bare("TestTable");
|
||||
|
||||
for case in test_cases {
|
||||
let ctx = SessionContext::new();
|
||||
let table_provider = DummyTableProvider::new(
|
||||
case.input_schema.clone(),
|
||||
Some(RecordBatch::try_new(case.input_schema.clone(), case.input.clone()).unwrap()),
|
||||
);
|
||||
let table_source = DefaultTableSource::new(Arc::new(table_provider));
|
||||
let logical_plan = LogicalPlan::TableScan(
|
||||
TableScan::try_new(
|
||||
test_table_ref.clone(),
|
||||
Arc::new(table_source),
|
||||
None,
|
||||
vec![],
|
||||
None,
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
let args = case.args;
|
||||
|
||||
let aggr_expr = Expr::AggregateFunction(AggregateFunction::new_udf(
|
||||
case.func.clone(),
|
||||
args,
|
||||
case.distinct,
|
||||
case.filter,
|
||||
case.order_by,
|
||||
case.null_treatment,
|
||||
));
|
||||
|
||||
let aggr_plan = LogicalPlan::Aggregate(
|
||||
Aggregate::try_new(Arc::new(logical_plan), vec![], vec![aggr_expr]).unwrap(),
|
||||
);
|
||||
|
||||
// make sure the aggr_plan is type coerced
|
||||
let aggr_plan = TypeCoercion::new()
|
||||
.analyze(aggr_plan, &Default::default())
|
||||
.unwrap();
|
||||
|
||||
// first eval the original aggregate function
|
||||
let phy_full_aggr_plan = DefaultPhysicalPlanner::default()
|
||||
.create_physical_plan(&aggr_plan, &ctx.state())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
{
|
||||
let unsplit_result = execute_phy_plan(&phy_full_aggr_plan).await.unwrap();
|
||||
assert_eq!(unsplit_result.len(), 1);
|
||||
let unsplit_batch = &unsplit_result[0];
|
||||
assert_eq!(unsplit_batch.num_columns(), 1);
|
||||
assert_eq!(unsplit_batch.num_rows(), 1);
|
||||
let unsplit_col = unsplit_batch.column(0);
|
||||
if let Some(expected_output) = &case.expected_output {
|
||||
assert_eq!(unsplit_col.data_type(), &expected_output.data_type());
|
||||
assert_eq!(unsplit_col.len(), 1);
|
||||
assert_eq!(unsplit_col, &expected_output.to_array().unwrap());
|
||||
}
|
||||
|
||||
if let Some(expected_fn) = &case.expected_fn {
|
||||
assert!(expected_fn(unsplit_col.clone()));
|
||||
}
|
||||
}
|
||||
let LogicalPlan::Aggregate(aggr_plan) = aggr_plan else {
|
||||
panic!("Expected Aggregate plan");
|
||||
};
|
||||
let split_plan = StateMergeHelper::split_aggr_node(aggr_plan).unwrap();
|
||||
|
||||
let phy_upper_plan = DefaultPhysicalPlanner::default()
|
||||
.create_physical_plan(&split_plan.upper_merge, &ctx.state())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// since upper plan use lower plan as input, execute upper plan should also execute lower plan
|
||||
// which should give the same result as the original aggregate function
|
||||
{
|
||||
let split_res = execute_phy_plan(&phy_upper_plan).await.unwrap();
|
||||
|
||||
assert_eq!(split_res.len(), 1);
|
||||
let split_batch = &split_res[0];
|
||||
assert_eq!(split_batch.num_columns(), 1);
|
||||
assert_eq!(split_batch.num_rows(), 1);
|
||||
let split_col = split_batch.column(0);
|
||||
if let Some(expected_output) = &case.expected_output {
|
||||
assert_eq!(split_col.data_type(), &expected_output.data_type());
|
||||
assert_eq!(split_col.len(), 1);
|
||||
assert_eq!(split_col, &expected_output.to_array().unwrap());
|
||||
}
|
||||
|
||||
if let Some(expected_fn) = &case.expected_fn {
|
||||
assert!(expected_fn(split_col.clone()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn execute_phy_plan(
|
||||
phy_plan: &Arc<dyn ExecutionPlan>,
|
||||
) -> datafusion_common::Result<Vec<RecordBatch>> {
|
||||
let task_ctx = Arc::new(TaskContext::default());
|
||||
let mut stream = phy_plan.execute(0, task_ctx)?;
|
||||
let mut batches = Vec::new();
|
||||
while let Some(batch) = stream.next().await {
|
||||
batches.push(batch?);
|
||||
}
|
||||
Ok(batches)
|
||||
}
|
||||
@@ -20,6 +20,7 @@ use datafusion_expr::AggregateUDF;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
use crate::admin::AdminFunction;
|
||||
use crate::aggrs::aggr_wrapper::StateMergeHelper;
|
||||
use crate::aggrs::approximate::ApproximateFunction;
|
||||
use crate::aggrs::count_hash::CountHash;
|
||||
use crate::aggrs::vector::VectorFunction as VectorAggrFunction;
|
||||
@@ -105,6 +106,10 @@ impl FunctionRegistry {
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn is_aggr_func_exist(&self, name: &str) -> bool {
|
||||
self.aggregate_functions.read().unwrap().contains_key(name)
|
||||
}
|
||||
}
|
||||
|
||||
pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
@@ -148,6 +153,9 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
// CountHash function
|
||||
CountHash::register(&function_registry);
|
||||
|
||||
// state function of supported aggregate functions
|
||||
StateMergeHelper::register(&function_registry);
|
||||
|
||||
Arc::new(function_registry)
|
||||
});
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::ReconcileRequest;
|
||||
use async_trait::async_trait;
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_base::AffectedRows;
|
||||
@@ -65,6 +66,9 @@ pub trait ProcedureServiceHandler: Send + Sync {
|
||||
/// Migrate a region from source peer to target peer, returns the procedure id if success.
|
||||
async fn migrate_region(&self, request: MigrateRegionRequest) -> Result<Option<String>>;
|
||||
|
||||
/// Reconcile a table, database or catalog, returns the procedure id if success.
|
||||
async fn reconcile(&self, request: ReconcileRequest) -> Result<Option<String>>;
|
||||
|
||||
/// Query the procedure' state by its id
|
||||
async fn query_procedure_state(&self, pid: &str) -> Result<ProcedureStateResponse>;
|
||||
|
||||
|
||||
@@ -12,12 +12,15 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_query::error::{InvalidInputTypeSnafu, Result};
|
||||
use api::v1::meta::ResolveStrategy;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, InvalidInputTypeSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::cast::cast;
|
||||
use datatypes::value::ValueRef;
|
||||
use snafu::ResultExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
/// Create a function signature with oneof signatures of interleaving two arguments.
|
||||
pub fn one_of_sigs2(args1: Vec<ConcreteDataType>, args2: Vec<ConcreteDataType>) -> Signature {
|
||||
@@ -43,3 +46,64 @@ pub fn cast_u64(value: &ValueRef) -> Result<Option<u64>> {
|
||||
})
|
||||
.map(|v| v.as_u64())
|
||||
}
|
||||
|
||||
/// Cast a [`ValueRef`] to u32, returns `None` if fails
|
||||
pub fn cast_u32(value: &ValueRef) -> Result<Option<u32>> {
|
||||
cast((*value).into(), &ConcreteDataType::uint32_datatype())
|
||||
.context(InvalidInputTypeSnafu {
|
||||
err_msg: format!(
|
||||
"Failed to cast input into uint32, actual type: {:#?}",
|
||||
value.data_type(),
|
||||
),
|
||||
})
|
||||
.map(|v| v.as_u64().map(|v| v as u32))
|
||||
}
|
||||
|
||||
/// Parse a resolve strategy from a string.
|
||||
pub fn parse_resolve_strategy(strategy: &str) -> Result<ResolveStrategy> {
|
||||
ResolveStrategy::from_str_name(strategy).context(InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Invalid resolve strategy: {}", strategy),
|
||||
})
|
||||
}
|
||||
|
||||
/// Default parallelism for reconcile operations.
|
||||
pub fn default_parallelism() -> u32 {
|
||||
64
|
||||
}
|
||||
|
||||
/// Default resolve strategy for reconcile operations.
|
||||
pub fn default_resolve_strategy() -> ResolveStrategy {
|
||||
ResolveStrategy::UseLatest
|
||||
}
|
||||
|
||||
/// Get the string value from the params.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if the input type is not a string.
|
||||
pub fn get_string_from_params<'a>(
|
||||
params: &'a [ValueRef<'a>],
|
||||
index: usize,
|
||||
fn_name: &'a str,
|
||||
) -> Result<&'a str> {
|
||||
let ValueRef::String(s) = ¶ms[index] else {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: fn_name,
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
Ok(s)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_resolve_strategy() {
|
||||
assert_eq!(
|
||||
parse_resolve_strategy("UseLatest").unwrap(),
|
||||
ResolveStrategy::UseLatest
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
#![feature(let_chains)]
|
||||
#![feature(try_blocks)]
|
||||
#![feature(assert_matches)]
|
||||
|
||||
mod admin;
|
||||
mod flush_flow;
|
||||
|
||||
@@ -81,7 +81,8 @@ impl Function for ClampFunction {
|
||||
}
|
||||
);
|
||||
ensure!(
|
||||
columns[1].len() == 1 && columns[2].len() == 1,
|
||||
(columns[1].len() == 1 || columns[1].is_const())
|
||||
&& (columns[2].len() == 1 || columns[2].is_const()),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The second and third args should be scalar, have: {:?}, {:?}",
|
||||
@@ -204,7 +205,7 @@ impl Function for ClampMinFunction {
|
||||
}
|
||||
);
|
||||
ensure!(
|
||||
columns[1].len() == 1,
|
||||
columns[1].len() == 1 || columns[1].is_const(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The second arg (min) should be scalar, have: {:?}",
|
||||
@@ -292,7 +293,7 @@ impl Function for ClampMaxFunction {
|
||||
}
|
||||
);
|
||||
ensure!(
|
||||
columns[1].len() == 1,
|
||||
columns[1].len() == 1 || columns[1].is_const(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The second arg (max) should be scalar, have: {:?}",
|
||||
|
||||
@@ -32,7 +32,7 @@ impl FunctionState {
|
||||
pub fn mock() -> Self {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::ProcedureStatus;
|
||||
use api::v1::meta::{ProcedureStatus, ReconcileRequest};
|
||||
use async_trait::async_trait;
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_base::AffectedRows;
|
||||
@@ -63,6 +63,10 @@ impl FunctionState {
|
||||
Ok(Some("test_pid".to_string()))
|
||||
}
|
||||
|
||||
async fn reconcile(&self, _request: ReconcileRequest) -> Result<Option<String>> {
|
||||
Ok(Some("test_pid".to_string()))
|
||||
}
|
||||
|
||||
async fn query_procedure_state(&self, _pid: &str) -> Result<ProcedureStateResponse> {
|
||||
Ok(ProcedureStateResponse {
|
||||
status: ProcedureStatus::Done.into(),
|
||||
|
||||
@@ -16,8 +16,8 @@ use std::env;
|
||||
use std::io::ErrorKind;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::sync::{Arc, LazyLock};
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use common_runtime::error::{Error, Result};
|
||||
use common_runtime::{BoxedTaskFunction, RepeatedTask, TaskFunction};
|
||||
@@ -31,6 +31,9 @@ pub const TELEMETRY_URL: &str = "https://telemetry.greptimestats.com/db/otel/sta
|
||||
/// The local installation uuid cache file
|
||||
const UUID_FILE_NAME: &str = ".greptimedb-telemetry-uuid";
|
||||
|
||||
/// System start time for uptime calculation
|
||||
static START_TIME: LazyLock<SystemTime> = LazyLock::new(SystemTime::now);
|
||||
|
||||
/// The default interval of reporting telemetry data to greptime cloud
|
||||
pub static TELEMETRY_INTERVAL: Duration = Duration::from_secs(60 * 30);
|
||||
/// The default connect timeout to greptime cloud.
|
||||
@@ -103,6 +106,8 @@ struct StatisticData {
|
||||
pub nodes: Option<i32>,
|
||||
/// The local installation uuid
|
||||
pub uuid: String,
|
||||
/// System uptime range (e.g., "hours", "days", "weeks")
|
||||
pub uptime: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
|
||||
@@ -171,6 +176,25 @@ fn print_anonymous_usage_data_disclaimer() {
|
||||
info!("https://docs.greptime.com/reference/telemetry");
|
||||
}
|
||||
|
||||
/// Format uptime duration into a general time range string
|
||||
/// Returns privacy-friendly descriptions like "hours", "days", etc.
|
||||
fn format_uptime() -> String {
|
||||
let uptime_duration = START_TIME.elapsed().unwrap_or(Duration::ZERO);
|
||||
let total_seconds = uptime_duration.as_secs();
|
||||
|
||||
if total_seconds < 86400 {
|
||||
"hours".to_string()
|
||||
} else if total_seconds < 604800 {
|
||||
"days".to_string()
|
||||
} else if total_seconds < 2629746 {
|
||||
"weeks".to_string()
|
||||
} else if total_seconds < 31556952 {
|
||||
"months".to_string()
|
||||
} else {
|
||||
"years".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_get_uuid(working_home: &Option<String>) -> Option<String> {
|
||||
let temp_dir = env::temp_dir();
|
||||
|
||||
@@ -260,6 +284,7 @@ impl GreptimeDBTelemetry {
|
||||
mode: self.statistics.get_mode(),
|
||||
nodes: self.statistics.get_nodes().await,
|
||||
uuid,
|
||||
uptime: format_uptime(),
|
||||
};
|
||||
|
||||
if let Some(client) = self.client.as_ref() {
|
||||
@@ -294,7 +319,9 @@ mod tests {
|
||||
use reqwest::{Client, Response};
|
||||
use tokio::spawn;
|
||||
|
||||
use crate::{default_get_uuid, Collector, GreptimeDBTelemetry, Mode, StatisticData};
|
||||
use crate::{
|
||||
default_get_uuid, format_uptime, Collector, GreptimeDBTelemetry, Mode, StatisticData,
|
||||
};
|
||||
|
||||
static COUNT: AtomicUsize = std::sync::atomic::AtomicUsize::new(0);
|
||||
|
||||
@@ -438,6 +465,7 @@ mod tests {
|
||||
assert_eq!(build_info().commit, body.git_commit);
|
||||
assert_eq!(Mode::Standalone, body.mode);
|
||||
assert_eq!(1, body.nodes.unwrap());
|
||||
assert!(!body.uptime.is_empty());
|
||||
|
||||
let failed_statistic = Box::new(FailedStatistic);
|
||||
let failed_report = GreptimeDBTelemetry::new(
|
||||
@@ -477,4 +505,18 @@ mod tests {
|
||||
assert_eq!(uuid, default_get_uuid(&Some(working_home.clone())));
|
||||
assert_eq!(uuid, default_get_uuid(&Some(working_home)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_uptime() {
|
||||
let uptime = format_uptime();
|
||||
assert!(!uptime.is_empty());
|
||||
// Should be a valid general time range (no specific numbers)
|
||||
assert!(
|
||||
uptime == "hours"
|
||||
|| uptime == "days"
|
||||
|| uptime == "weeks"
|
||||
|| uptime == "months"
|
||||
|| uptime == "years"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-query.workspace = true
|
||||
common-sql.workspace = true
|
||||
common-time.workspace = true
|
||||
datatypes.workspace = true
|
||||
prost.workspace = true
|
||||
|
||||
@@ -27,24 +27,84 @@ use common_query::AddColumnLocation;
|
||||
use datatypes::schema::{ColumnSchema, FulltextOptions, RawSchema, SkippingIndexOptions};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::region_request::{SetRegionOption, UnsetRegionOption};
|
||||
use table::metadata::TableId;
|
||||
use table::metadata::{TableId, TableMeta};
|
||||
use table::requests::{
|
||||
AddColumnRequest, AlterKind, AlterTableRequest, ModifyColumnTypeRequest, SetIndexOptions,
|
||||
UnsetIndexOptions,
|
||||
AddColumnRequest, AlterKind, AlterTableRequest, ModifyColumnTypeRequest, SetDefaultRequest,
|
||||
SetIndexOption, UnsetIndexOption,
|
||||
};
|
||||
|
||||
use crate::error::{
|
||||
InvalidColumnDefSnafu, InvalidIndexOptionSnafu, InvalidSetFulltextOptionRequestSnafu,
|
||||
InvalidSetSkippingIndexOptionRequestSnafu, InvalidSetTableOptionRequestSnafu,
|
||||
InvalidUnsetTableOptionRequestSnafu, MissingAlterIndexOptionSnafu, MissingFieldSnafu,
|
||||
ColumnNotFoundSnafu, InvalidColumnDefSnafu, InvalidIndexOptionSnafu,
|
||||
InvalidSetFulltextOptionRequestSnafu, InvalidSetSkippingIndexOptionRequestSnafu,
|
||||
InvalidSetTableOptionRequestSnafu, InvalidUnsetTableOptionRequestSnafu,
|
||||
MissingAlterIndexOptionSnafu, MissingFieldSnafu, MissingTableMetaSnafu,
|
||||
MissingTimestampColumnSnafu, Result, UnknownLocationTypeSnafu,
|
||||
};
|
||||
|
||||
const LOCATION_TYPE_FIRST: i32 = LocationType::First as i32;
|
||||
const LOCATION_TYPE_AFTER: i32 = LocationType::After as i32;
|
||||
|
||||
fn set_index_option_from_proto(set_index: api::v1::SetIndex) -> Result<SetIndexOption> {
|
||||
let options = set_index.options.context(MissingAlterIndexOptionSnafu)?;
|
||||
Ok(match options {
|
||||
api::v1::set_index::Options::Fulltext(f) => SetIndexOption::Fulltext {
|
||||
column_name: f.column_name.clone(),
|
||||
options: FulltextOptions::new(
|
||||
f.enable,
|
||||
as_fulltext_option_analyzer(
|
||||
Analyzer::try_from(f.analyzer).context(InvalidSetFulltextOptionRequestSnafu)?,
|
||||
),
|
||||
f.case_sensitive,
|
||||
as_fulltext_option_backend(
|
||||
PbFulltextBackend::try_from(f.backend)
|
||||
.context(InvalidSetFulltextOptionRequestSnafu)?,
|
||||
),
|
||||
f.granularity as u32,
|
||||
f.false_positive_rate,
|
||||
)
|
||||
.context(InvalidIndexOptionSnafu)?,
|
||||
},
|
||||
api::v1::set_index::Options::Inverted(i) => SetIndexOption::Inverted {
|
||||
column_name: i.column_name,
|
||||
},
|
||||
api::v1::set_index::Options::Skipping(s) => SetIndexOption::Skipping {
|
||||
column_name: s.column_name,
|
||||
options: SkippingIndexOptions::new(
|
||||
s.granularity as u32,
|
||||
s.false_positive_rate,
|
||||
as_skipping_index_type(
|
||||
PbSkippingIndexType::try_from(s.skipping_index_type)
|
||||
.context(InvalidSetSkippingIndexOptionRequestSnafu)?,
|
||||
),
|
||||
)
|
||||
.context(InvalidIndexOptionSnafu)?,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn unset_index_option_from_proto(unset_index: api::v1::UnsetIndex) -> Result<UnsetIndexOption> {
|
||||
let options = unset_index.options.context(MissingAlterIndexOptionSnafu)?;
|
||||
Ok(match options {
|
||||
api::v1::unset_index::Options::Fulltext(f) => UnsetIndexOption::Fulltext {
|
||||
column_name: f.column_name,
|
||||
},
|
||||
api::v1::unset_index::Options::Inverted(i) => UnsetIndexOption::Inverted {
|
||||
column_name: i.column_name,
|
||||
},
|
||||
api::v1::unset_index::Options::Skipping(s) => UnsetIndexOption::Skipping {
|
||||
column_name: s.column_name,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/// Convert an [`AlterTableExpr`] to an [`AlterTableRequest`]
|
||||
pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<AlterTableRequest> {
|
||||
///
|
||||
/// note: `table_meta` must not be None if [`AlterTableExpr`] is `SetDefault`
|
||||
pub fn alter_expr_to_request(
|
||||
table_id: TableId,
|
||||
expr: AlterTableExpr,
|
||||
table_meta: Option<&TableMeta>,
|
||||
) -> Result<AlterTableRequest> {
|
||||
let catalog_name = expr.catalog_name;
|
||||
let schema_name = expr.schema_name;
|
||||
let kind = expr.kind.context(MissingFieldSnafu { field: "kind" })?;
|
||||
@@ -121,70 +181,34 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
|
||||
.context(InvalidUnsetTableOptionRequestSnafu)?,
|
||||
}
|
||||
}
|
||||
Kind::SetIndex(o) => match o.options {
|
||||
Some(opt) => match opt {
|
||||
api::v1::set_index::Options::Fulltext(f) => AlterKind::SetIndex {
|
||||
options: SetIndexOptions::Fulltext {
|
||||
column_name: f.column_name.clone(),
|
||||
options: FulltextOptions::new(
|
||||
f.enable,
|
||||
as_fulltext_option_analyzer(
|
||||
Analyzer::try_from(f.analyzer)
|
||||
.context(InvalidSetFulltextOptionRequestSnafu)?,
|
||||
),
|
||||
f.case_sensitive,
|
||||
as_fulltext_option_backend(
|
||||
PbFulltextBackend::try_from(f.backend)
|
||||
.context(InvalidSetFulltextOptionRequestSnafu)?,
|
||||
),
|
||||
f.granularity as u32,
|
||||
f.false_positive_rate,
|
||||
)
|
||||
.context(InvalidIndexOptionSnafu)?,
|
||||
},
|
||||
},
|
||||
api::v1::set_index::Options::Inverted(i) => AlterKind::SetIndex {
|
||||
options: SetIndexOptions::Inverted {
|
||||
column_name: i.column_name,
|
||||
},
|
||||
},
|
||||
api::v1::set_index::Options::Skipping(s) => AlterKind::SetIndex {
|
||||
options: SetIndexOptions::Skipping {
|
||||
column_name: s.column_name,
|
||||
options: SkippingIndexOptions::new(
|
||||
s.granularity as u32,
|
||||
s.false_positive_rate,
|
||||
as_skipping_index_type(
|
||||
PbSkippingIndexType::try_from(s.skipping_index_type)
|
||||
.context(InvalidSetSkippingIndexOptionRequestSnafu)?,
|
||||
),
|
||||
)
|
||||
.context(InvalidIndexOptionSnafu)?,
|
||||
},
|
||||
},
|
||||
},
|
||||
None => return MissingAlterIndexOptionSnafu.fail(),
|
||||
},
|
||||
Kind::UnsetIndex(o) => match o.options {
|
||||
Some(opt) => match opt {
|
||||
api::v1::unset_index::Options::Fulltext(f) => AlterKind::UnsetIndex {
|
||||
options: UnsetIndexOptions::Fulltext {
|
||||
column_name: f.column_name,
|
||||
},
|
||||
},
|
||||
api::v1::unset_index::Options::Inverted(i) => AlterKind::UnsetIndex {
|
||||
options: UnsetIndexOptions::Inverted {
|
||||
column_name: i.column_name,
|
||||
},
|
||||
},
|
||||
api::v1::unset_index::Options::Skipping(s) => AlterKind::UnsetIndex {
|
||||
options: UnsetIndexOptions::Skipping {
|
||||
column_name: s.column_name,
|
||||
},
|
||||
},
|
||||
},
|
||||
None => return MissingAlterIndexOptionSnafu.fail(),
|
||||
},
|
||||
Kind::SetIndex(o) => {
|
||||
let option = set_index_option_from_proto(o)?;
|
||||
AlterKind::SetIndexes {
|
||||
options: vec![option],
|
||||
}
|
||||
}
|
||||
Kind::UnsetIndex(o) => {
|
||||
let option = unset_index_option_from_proto(o)?;
|
||||
AlterKind::UnsetIndexes {
|
||||
options: vec![option],
|
||||
}
|
||||
}
|
||||
Kind::SetIndexes(o) => {
|
||||
let options = o
|
||||
.set_indexes
|
||||
.into_iter()
|
||||
.map(set_index_option_from_proto)
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
AlterKind::SetIndexes { options }
|
||||
}
|
||||
Kind::UnsetIndexes(o) => {
|
||||
let options = o
|
||||
.unset_indexes
|
||||
.into_iter()
|
||||
.map(unset_index_option_from_proto)
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
AlterKind::UnsetIndexes { options }
|
||||
}
|
||||
Kind::DropDefaults(o) => {
|
||||
let names = o
|
||||
.drop_defaults
|
||||
@@ -201,6 +225,32 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
AlterKind::DropDefaults { names }
|
||||
}
|
||||
Kind::SetDefaults(o) => {
|
||||
let table_meta = table_meta.context(MissingTableMetaSnafu { table_id })?;
|
||||
let defaults = o
|
||||
.set_defaults
|
||||
.into_iter()
|
||||
.map(|col| {
|
||||
let column_scheme = table_meta
|
||||
.schema
|
||||
.column_schema_by_name(&col.column_name)
|
||||
.context(ColumnNotFoundSnafu {
|
||||
column_name: &col.column_name,
|
||||
})?;
|
||||
let default_constraint = common_sql::convert::deserialize_default_constraint(
|
||||
col.default_constraint.as_slice(),
|
||||
&col.column_name,
|
||||
&column_scheme.data_type,
|
||||
)
|
||||
.context(crate::error::SqlCommonSnafu)?;
|
||||
Ok(SetDefaultRequest {
|
||||
column_name: col.column_name,
|
||||
default_constraint,
|
||||
})
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
AlterKind::SetDefaults { defaults }
|
||||
}
|
||||
};
|
||||
|
||||
let request = AlterTableRequest {
|
||||
@@ -300,7 +350,7 @@ mod tests {
|
||||
})),
|
||||
};
|
||||
|
||||
let alter_request = alter_expr_to_request(1, expr).unwrap();
|
||||
let alter_request = alter_expr_to_request(1, expr, None).unwrap();
|
||||
assert_eq!(alter_request.catalog_name, "");
|
||||
assert_eq!(alter_request.schema_name, "");
|
||||
assert_eq!("monitor".to_string(), alter_request.table_name);
|
||||
@@ -364,7 +414,7 @@ mod tests {
|
||||
})),
|
||||
};
|
||||
|
||||
let alter_request = alter_expr_to_request(1, expr).unwrap();
|
||||
let alter_request = alter_expr_to_request(1, expr, None).unwrap();
|
||||
assert_eq!(alter_request.catalog_name, "");
|
||||
assert_eq!(alter_request.schema_name, "");
|
||||
assert_eq!("monitor".to_string(), alter_request.table_name);
|
||||
@@ -416,7 +466,7 @@ mod tests {
|
||||
})),
|
||||
};
|
||||
|
||||
let alter_request = alter_expr_to_request(1, expr).unwrap();
|
||||
let alter_request = alter_expr_to_request(1, expr, None).unwrap();
|
||||
assert_eq!(alter_request.catalog_name, "test_catalog");
|
||||
assert_eq!(alter_request.schema_name, "test_schema");
|
||||
assert_eq!("monitor".to_string(), alter_request.table_name);
|
||||
@@ -448,7 +498,7 @@ mod tests {
|
||||
})),
|
||||
};
|
||||
|
||||
let alter_request = alter_expr_to_request(1, expr).unwrap();
|
||||
let alter_request = alter_expr_to_request(1, expr, None).unwrap();
|
||||
assert_eq!(alter_request.catalog_name, "test_catalog");
|
||||
assert_eq!(alter_request.schema_name, "test_schema");
|
||||
assert_eq!("monitor".to_string(), alter_request.table_name);
|
||||
|
||||
@@ -161,6 +161,27 @@ pub enum Error {
|
||||
#[snafu(source)]
|
||||
error: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Sql common error"))]
|
||||
SqlCommon {
|
||||
source: common_sql::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing required field in protobuf, column name: {}", column_name))]
|
||||
ColumnNotFound {
|
||||
column_name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Need table metadata, but not found, table_id: {}", table_id))]
|
||||
MissingTableMeta {
|
||||
table_id: u32,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -190,6 +211,9 @@ impl ErrorExt for Error {
|
||||
| Error::InvalidSetSkippingIndexOptionRequest { .. }
|
||||
| Error::MissingAlterIndexOption { .. }
|
||||
| Error::InvalidIndexOption { .. } => StatusCode::InvalidArguments,
|
||||
Error::ColumnNotFound { .. } => StatusCode::TableColumnNotFound,
|
||||
Error::SqlCommon { source, .. } => source.status_code(),
|
||||
Error::MissingTableMeta { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user