mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-04 12:22:55 +00:00
Compare commits
15 Commits
v0.15.0-ni
...
flow/adjus
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4534e4c31d | ||
|
|
3d8278dc4c | ||
|
|
e4328380b2 | ||
|
|
e962076207 | ||
|
|
9ef8ba6460 | ||
|
|
c26138963e | ||
|
|
12648f388a | ||
|
|
2979aa048e | ||
|
|
74222c3070 | ||
|
|
0311db3089 | ||
|
|
e434294a0c | ||
|
|
8d2c1b7f6a | ||
|
|
c50e84095e | ||
|
|
d3d233257d | ||
|
|
fdf32a8f46 |
@@ -59,7 +59,7 @@ runs:
|
||||
--set base.podTemplate.main.resources.requests.cpu=50m \
|
||||
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
||||
--set base.podTemplate.main.resources.limits.cpu=2000m \
|
||||
--set base.podTemplate.main.resources.limits.memory=2Gi \
|
||||
--set base.podTemplate.main.resources.limits.memory=3Gi \
|
||||
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
||||
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
||||
--set meta.replicas=${{ inputs.meta-replicas }} \
|
||||
|
||||
15
.github/workflows/develop.yml
vendored
15
.github/workflows/develop.yml
vendored
@@ -250,6 +250,11 @@ jobs:
|
||||
name: unstable-fuzz-logs
|
||||
path: /tmp/unstable-greptime/
|
||||
retention-days: 3
|
||||
- name: Describe pods
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe pod -n my-greptimedb
|
||||
|
||||
build-greptime-ci:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
@@ -405,6 +410,11 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Describe pod
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe pod -n my-greptimedb
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
@@ -554,6 +564,11 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Describe pods
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe pod -n my-greptimedb
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
|
||||
34
.github/workflows/release.yml
vendored
34
.github/workflows/release.yml
vendored
@@ -441,8 +441,8 @@ jobs:
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
bump-doc-version:
|
||||
name: Bump doc version
|
||||
bump-downstream-repo-versions:
|
||||
name: Bump downstream repo versions
|
||||
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [allocate-runners, publish-github-release]
|
||||
runs-on: ubuntu-latest
|
||||
@@ -456,36 +456,16 @@ jobs:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Bump doc version
|
||||
- name: Bump downstream repo versions
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/bump-doc-version.ts
|
||||
env:
|
||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
|
||||
bump-website-version:
|
||||
name: Bump website version
|
||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
needs: [allocate-runners, publish-github-release]
|
||||
runs-on: ubuntu-latest
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
issues: write # Allows the action to create issues for cyborg.
|
||||
contents: write # Allows the action to create a release.
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Bump website version
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/bump-website-version.ts
|
||||
run: pnpm tsx bin/bump-versions.ts
|
||||
env:
|
||||
TARGET_REPOS: website,docs,demo
|
||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
WEBSITE_REPO_TOKEN: ${{ secrets.WEBSITE_REPO_TOKEN }}
|
||||
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
DEMO_REPO_TOKEN: ${{ secrets.DEMO_REPO_TOKEN }}
|
||||
|
||||
bump-helm-charts-version:
|
||||
name: Bump helm charts version
|
||||
|
||||
22
Cargo.lock
generated
22
Cargo.lock
generated
@@ -4511,9 +4511,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "flate2"
|
||||
version = "1.1.1"
|
||||
version = "1.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece"
|
||||
checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d"
|
||||
dependencies = [
|
||||
"crc32fast",
|
||||
"libz-rs-sys",
|
||||
@@ -5133,7 +5133,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=454c52634c3bac27de10bf0d85d5533eed1cf03f#454c52634c3bac27de10bf0d85d5533eed1cf03f"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=2dca1dc67862d7b410838aef81232274c019b3f6#2dca1dc67862d7b410838aef81232274c019b3f6"
|
||||
dependencies = [
|
||||
"prost 0.13.5",
|
||||
"serde",
|
||||
@@ -5146,9 +5146,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "grok"
|
||||
version = "2.0.0"
|
||||
version = "2.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "273797968160270573071022613fc4aa28b91fe68f3eef6c96a1b2a1947ddfbd"
|
||||
checksum = "6c52724b609896f661a3f4641dd3a44dc602958ef615857c12d00756b4e9355b"
|
||||
dependencies = [
|
||||
"glob",
|
||||
"onig",
|
||||
@@ -6716,9 +6716,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "libz-rs-sys"
|
||||
version = "0.5.0"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6489ca9bd760fe9642d7644e827b0c9add07df89857b0416ee15c1cc1a3b8c5a"
|
||||
checksum = "172a788537a2221661b480fee8dc5f96c580eb34fa88764d3205dc356c7e4221"
|
||||
dependencies = [
|
||||
"zlib-rs",
|
||||
]
|
||||
@@ -9664,9 +9664,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "psl"
|
||||
version = "2.1.112"
|
||||
version = "2.1.119"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1c6b4c497a0c6bfb466f75167c728b1a861b0cdc39de9c35b877208a270a9590"
|
||||
checksum = "d0e49aa528239f2ca13ad87387977c208e59c3fb8c437609f95f1b3898ec6ef1"
|
||||
dependencies = [
|
||||
"psl-types",
|
||||
]
|
||||
@@ -14701,9 +14701,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zlib-rs"
|
||||
version = "0.5.0"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "868b928d7949e09af2f6086dfc1e01936064cc7a819253bce650d4e2a2d63ba8"
|
||||
checksum = "626bd9fa9734751fc50d6060752170984d7053f5a39061f524cda68023d4db8a"
|
||||
|
||||
[[package]]
|
||||
name = "zstd"
|
||||
|
||||
@@ -133,7 +133,7 @@ etcd-client = "0.14"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "454c52634c3bac27de10bf0d85d5533eed1cf03f" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "2dca1dc67862d7b410838aef81232274c019b3f6" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
|
||||
@@ -100,7 +100,7 @@
|
||||
| `query` | -- | -- | The query engine options. |
|
||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `./greptimedb_data/` | The working home directory. |
|
||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
@@ -195,13 +195,13 @@
|
||||
| `slow_query.record_type` | String | Unset | The record type of slow queries. It can be `system_table` or `log`. |
|
||||
| `slow_query.threshold` | String | Unset | The threshold of slow query. |
|
||||
| `slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics` | -- | -- | The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommended to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | Unset | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
@@ -232,6 +232,7 @@
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
@@ -298,13 +299,11 @@
|
||||
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. |
|
||||
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. |
|
||||
| `slow_query.ttl` | String | `30d` | The TTL of the `slow_queries` system table. Default is `30d` when `record_type` is `system_table`. |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics` | -- | -- | The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | Unset | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
@@ -314,12 +313,10 @@
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `data_home` | String | `./greptimedb_data/metasrv/` | The working home directory. |
|
||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||
| `data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
|
||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store` |
|
||||
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store`<br/>- `mysql_store` |
|
||||
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
|
||||
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
|
||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
@@ -331,6 +328,12 @@
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:3002` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
@@ -372,13 +375,11 @@
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics` | -- | -- | The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | Unset | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
@@ -404,6 +405,7 @@
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for datanode side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
@@ -446,7 +448,7 @@
|
||||
| `query` | -- | -- | The query engine options. |
|
||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `./greptimedb_data/` | The working home directory. |
|
||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
@@ -536,13 +538,11 @@
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | Unset | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
|
||||
@@ -44,6 +44,12 @@ runtime_size = 8
|
||||
max_recv_message_size = "512MB"
|
||||
## The maximum send message size for gRPC server.
|
||||
max_send_message_size = "512MB"
|
||||
## Compression mode for datanode side Arrow IPC service. Available options:
|
||||
## - `none`: disable all compression
|
||||
## - `transport`: only enable gRPC transport compression (zstd)
|
||||
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
||||
## - `all`: enable all compression.
|
||||
flight_compression = "arrow_ipc"
|
||||
|
||||
## gRPC server TLS options, see `mysql.tls` section.
|
||||
[grpc.tls]
|
||||
@@ -252,7 +258,7 @@ parallelism = 0
|
||||
## The data storage options.
|
||||
[storage]
|
||||
## The working home directory.
|
||||
data_home = "./greptimedb_data/"
|
||||
data_home = "./greptimedb_data"
|
||||
|
||||
## The storage type used to store the data.
|
||||
## - `File`: the data is stored in the local file system.
|
||||
@@ -635,24 +641,16 @@ max_log_files = 720
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## @toml2docs:none-default
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
|
||||
@@ -54,6 +54,12 @@ bind_addr = "127.0.0.1:4001"
|
||||
server_addr = "127.0.0.1:4001"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
## Compression mode for frontend side Arrow IPC service. Available options:
|
||||
## - `none`: disable all compression
|
||||
## - `transport`: only enable gRPC transport compression (zstd)
|
||||
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
||||
## - `all`: enable all compression.
|
||||
flight_compression = "arrow_ipc"
|
||||
|
||||
## gRPC server TLS options, see `mysql.tls` section.
|
||||
[grpc.tls]
|
||||
@@ -247,24 +253,16 @@ sample_ratio = 1.0
|
||||
## The TTL of the `slow_queries` system table. Default is `30d` when `record_type` is `system_table`.
|
||||
ttl = "30d"
|
||||
|
||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||
## The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## @toml2docs:none-default
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
## The working home directory.
|
||||
data_home = "./greptimedb_data/metasrv/"
|
||||
|
||||
## The bind address of metasrv.
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
|
||||
## The communication server address for the frontend and datanode to connect to metasrv.
|
||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||
## on the host, with the same port number as the one specified in `bind_addr`.
|
||||
server_addr = "127.0.0.1:3002"
|
||||
data_home = "./greptimedb_data"
|
||||
|
||||
## Store server address default to etcd store.
|
||||
## For postgres store, the format is:
|
||||
@@ -24,6 +16,7 @@ store_key_prefix = ""
|
||||
## - `etcd_store` (default value)
|
||||
## - `memory_store`
|
||||
## - `postgres_store`
|
||||
## - `mysql_store`
|
||||
backend = "etcd_store"
|
||||
|
||||
## Table name in RDS to store metadata. Effect when using a RDS kvbackend.
|
||||
@@ -67,6 +60,21 @@ node_max_idle_time = "24hours"
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
#+ compact_rt_size = 4
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
## The communication server address for the frontend and datanode to connect to metasrv.
|
||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||
## on the host, with the same port number as the one specified in `bind_addr`.
|
||||
server_addr = "127.0.0.1:3002"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
## The maximum receive message size for gRPC server.
|
||||
max_recv_message_size = "512MB"
|
||||
## The maximum send message size for gRPC server.
|
||||
max_send_message_size = "512MB"
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
## The address to bind the HTTP server.
|
||||
@@ -229,24 +237,16 @@ max_log_files = 720
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||
## The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## @toml2docs:none-default
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
|
||||
@@ -350,7 +350,7 @@ parallelism = 0
|
||||
## The data storage options.
|
||||
[storage]
|
||||
## The working home directory.
|
||||
data_home = "./greptimedb_data/"
|
||||
data_home = "./greptimedb_data"
|
||||
|
||||
## The storage type used to store the data.
|
||||
## - `File`: the data is stored in the local file system.
|
||||
@@ -750,13 +750,11 @@ default_ratio = 1.0
|
||||
## @toml2docs:none-default
|
||||
#+ sample_ratio = 1.0
|
||||
|
||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||
## The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
@@ -767,7 +765,7 @@ write_interval = "30s"
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from "@actions/core";
|
||||
import {obtainClient} from "@/common";
|
||||
|
||||
async function triggerWorkflow(workflowId: string, version: string) {
|
||||
const docsClient = obtainClient("DOCS_REPO_TOKEN")
|
||||
try {
|
||||
await docsClient.rest.actions.createWorkflowDispatch({
|
||||
owner: "GreptimeTeam",
|
||||
repo: "docs",
|
||||
workflow_id: workflowId,
|
||||
ref: "main",
|
||||
inputs: {
|
||||
version,
|
||||
},
|
||||
});
|
||||
console.log(`Successfully triggered ${workflowId} workflow with version ${version}`);
|
||||
} catch (error) {
|
||||
core.setFailed(`Failed to trigger workflow: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
function determineWorkflow(version: string): [string, string] {
|
||||
// Check if it's a nightly version
|
||||
if (version.includes('nightly')) {
|
||||
return ['bump-nightly-version.yml', version];
|
||||
}
|
||||
|
||||
const parts = version.split('.');
|
||||
|
||||
if (parts.length !== 3) {
|
||||
throw new Error('Invalid version format');
|
||||
}
|
||||
|
||||
// If patch version (last number) is 0, it's a major version
|
||||
// Return only major.minor version
|
||||
if (parts[2] === '0') {
|
||||
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
|
||||
}
|
||||
|
||||
// Otherwise it's a patch version, use full version
|
||||
return ['bump-patch-version.yml', version];
|
||||
}
|
||||
|
||||
const version = process.env.VERSION;
|
||||
if (!version) {
|
||||
core.setFailed("VERSION environment variable is required");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Remove 'v' prefix if exists
|
||||
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
|
||||
|
||||
try {
|
||||
const [workflowId, apiVersion] = determineWorkflow(cleanVersion);
|
||||
triggerWorkflow(workflowId, apiVersion);
|
||||
} catch (error) {
|
||||
core.setFailed(`Error processing version: ${error.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
156
cyborg/bin/bump-versions.ts
Normal file
156
cyborg/bin/bump-versions.ts
Normal file
@@ -0,0 +1,156 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from "@actions/core";
|
||||
import {obtainClient} from "@/common";
|
||||
|
||||
interface RepoConfig {
|
||||
tokenEnv: string;
|
||||
repo: string;
|
||||
workflowLogic: (version: string) => [string, string] | null;
|
||||
}
|
||||
|
||||
const REPO_CONFIGS: Record<string, RepoConfig> = {
|
||||
website: {
|
||||
tokenEnv: "WEBSITE_REPO_TOKEN",
|
||||
repo: "website",
|
||||
workflowLogic: (version: string) => {
|
||||
// Skip nightly versions for website
|
||||
if (version.includes('nightly')) {
|
||||
console.log('Nightly version detected for website, skipping workflow trigger.');
|
||||
return null;
|
||||
}
|
||||
return ['bump-patch-version.yml', version];
|
||||
}
|
||||
},
|
||||
demo: {
|
||||
tokenEnv: "DEMO_REPO_TOKEN",
|
||||
repo: "demo-scene",
|
||||
workflowLogic: (version: string) => {
|
||||
// Skip nightly versions for demo
|
||||
if (version.includes('nightly')) {
|
||||
console.log('Nightly version detected for demo, skipping workflow trigger.');
|
||||
return null;
|
||||
}
|
||||
return ['bump-patch-version.yml', version];
|
||||
}
|
||||
},
|
||||
docs: {
|
||||
tokenEnv: "DOCS_REPO_TOKEN",
|
||||
repo: "docs",
|
||||
workflowLogic: (version: string) => {
|
||||
// Check if it's a nightly version
|
||||
if (version.includes('nightly')) {
|
||||
return ['bump-nightly-version.yml', version];
|
||||
}
|
||||
|
||||
const parts = version.split('.');
|
||||
if (parts.length !== 3) {
|
||||
throw new Error('Invalid version format');
|
||||
}
|
||||
|
||||
// If patch version (last number) is 0, it's a major version
|
||||
// Return only major.minor version
|
||||
if (parts[2] === '0') {
|
||||
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
|
||||
}
|
||||
|
||||
// Otherwise it's a patch version, use full version
|
||||
return ['bump-patch-version.yml', version];
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
async function triggerWorkflow(repoConfig: RepoConfig, workflowId: string, version: string) {
|
||||
const client = obtainClient(repoConfig.tokenEnv);
|
||||
try {
|
||||
await client.rest.actions.createWorkflowDispatch({
|
||||
owner: "GreptimeTeam",
|
||||
repo: repoConfig.repo,
|
||||
workflow_id: workflowId,
|
||||
ref: "main",
|
||||
inputs: {
|
||||
version,
|
||||
},
|
||||
});
|
||||
console.log(`Successfully triggered ${workflowId} workflow for ${repoConfig.repo} with version ${version}`);
|
||||
} catch (error) {
|
||||
core.setFailed(`Failed to trigger workflow for ${repoConfig.repo}: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async function processRepo(repoName: string, version: string) {
|
||||
const repoConfig = REPO_CONFIGS[repoName];
|
||||
if (!repoConfig) {
|
||||
throw new Error(`Unknown repository: ${repoName}`);
|
||||
}
|
||||
|
||||
try {
|
||||
const workflowResult = repoConfig.workflowLogic(version);
|
||||
if (workflowResult === null) {
|
||||
// Skip this repo (e.g., nightly version for website)
|
||||
return;
|
||||
}
|
||||
|
||||
const [workflowId, apiVersion] = workflowResult;
|
||||
await triggerWorkflow(repoConfig, workflowId, apiVersion);
|
||||
} catch (error) {
|
||||
core.setFailed(`Error processing ${repoName} with version ${version}: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const version = process.env.VERSION;
|
||||
if (!version) {
|
||||
core.setFailed("VERSION environment variable is required");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Remove 'v' prefix if exists
|
||||
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
|
||||
|
||||
// Get target repositories from environment variable
|
||||
// Default to both if not specified
|
||||
const targetRepos = process.env.TARGET_REPOS?.split(',').map(repo => repo.trim()) || ['website', 'docs'];
|
||||
|
||||
console.log(`Processing version ${cleanVersion} for repositories: ${targetRepos.join(', ')}`);
|
||||
|
||||
const errors: string[] = [];
|
||||
|
||||
// Process each repository
|
||||
for (const repo of targetRepos) {
|
||||
try {
|
||||
await processRepo(repo, cleanVersion);
|
||||
} catch (error) {
|
||||
errors.push(`${repo}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
if (errors.length > 0) {
|
||||
core.setFailed(`Failed to process some repositories: ${errors.join('; ')}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log('All repositories processed successfully');
|
||||
}
|
||||
|
||||
// Execute main function
|
||||
main().catch((error) => {
|
||||
core.setFailed(`Unexpected error: ${error.message}`);
|
||||
process.exit(1);
|
||||
});
|
||||
@@ -1,57 +0,0 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from "@actions/core";
|
||||
import {obtainClient} from "@/common";
|
||||
|
||||
async function triggerWorkflow(workflowId: string, version: string) {
|
||||
const websiteClient = obtainClient("WEBSITE_REPO_TOKEN")
|
||||
try {
|
||||
await websiteClient.rest.actions.createWorkflowDispatch({
|
||||
owner: "GreptimeTeam",
|
||||
repo: "website",
|
||||
workflow_id: workflowId,
|
||||
ref: "main",
|
||||
inputs: {
|
||||
version,
|
||||
},
|
||||
});
|
||||
console.log(`Successfully triggered ${workflowId} workflow with version ${version}`);
|
||||
} catch (error) {
|
||||
core.setFailed(`Failed to trigger workflow: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
const version = process.env.VERSION;
|
||||
if (!version) {
|
||||
core.setFailed("VERSION environment variable is required");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Remove 'v' prefix if exists
|
||||
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
|
||||
|
||||
if (cleanVersion.includes('nightly')) {
|
||||
console.log('Nightly version detected, skipping workflow trigger.');
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
try {
|
||||
triggerWorkflow('bump-patch-version.yml', cleanVersion);
|
||||
} catch (error) {
|
||||
core.setFailed(`Error processing version: ${error.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -60,7 +60,7 @@
|
||||
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `prometheus` | `ops` | `[{{ instance }}]-[{{pod}}]` |
|
||||
| Compaction Elapsed Time per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))`<br/>`sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{instance=~"$datanode"}[$__rate_interval]))/sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
||||
| Compaction Elapsed Time per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))`<br/>`sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{instance=~"$datanode"}[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
||||
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
|
||||
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
|
||||
| Cached Bytes per Instance | `greptime_mito_cache_bytes{instance=~"$datanode"}` | `timeseries` | Cached Bytes per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
@@ -69,7 +69,7 @@
|
||||
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
|
||||
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Compaction Input/Output Bytes | `sum by(instance, pod) (greptime_mito_compaction_input_bytes)`<br/>`sum by(instance, pod) (greptime_mito_compaction_output_bytes)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-input` |
|
||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
# OpenDAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
@@ -88,9 +88,19 @@
|
||||
# Metasrv
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `prometheus` | `none` | `from-datanode-{{datanode_id}}` |
|
||||
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `__auto` |
|
||||
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `__auto` |
|
||||
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `status-history` | Counter of region migration by source and destination | `prometheus` | -- | `from-datanode-{{datanode_id}}` |
|
||||
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `{{pod}}-{{state}}-{{error_type}}` |
|
||||
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `binBps` | `Datanode-{{datanode_id}}-writeload` |
|
||||
| Rate of SQL Executions (RDS) | `rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_count[$__rate_interval])` | `timeseries` | Displays the rate of SQL executions processed by the Meta service using the RDS backend. | `prometheus` | `none` | `{{pod}} {{op}} {{type}} {{result}} ` |
|
||||
| SQL Execution Latency (RDS) | `histogram_quantile(0.90, sum by(pod, op, type, result, le) (rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_bucket[$__rate_interval])))` | `timeseries` | Measures the response time of SQL executions via the RDS backend. | `prometheus` | `ms` | `{{pod}} {{op}} {{type}} {{result}} p90` |
|
||||
| Handler Execution Latency | `histogram_quantile(0.90, sum by(pod, le, name) (
|
||||
rate(greptime_meta_handler_execute_bucket[$__rate_interval])
|
||||
))` | `timeseries` | Shows latency of Meta handlers by pod and handler name, useful for monitoring handler performance and detecting latency spikes.<br/> | `prometheus` | `s` | `{{pod}} {{name}} p90` |
|
||||
| Heartbeat Packet Size | `histogram_quantile(0.9, sum by(pod, le) (greptime_meta_heartbeat_stat_memory_size_bucket))` | `timeseries` | Shows p90 heartbeat message sizes, helping track network usage and identify anomalies in heartbeat payload.<br/> | `prometheus` | `bytes` | `{{pod}}` |
|
||||
| Meta Heartbeat Receive Rate | `rate(greptime_meta_heartbeat_rate[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}` |
|
||||
| Meta KV Ops Latency | `histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}-{{op}} p99` |
|
||||
| Rate of meta KV Ops | `rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `{{pod}}-{{op}} p99` |
|
||||
| DDL Latency | `histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `CreateLogicalTables-{{step}} p90` |
|
||||
# Flownode
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
|
||||
@@ -497,7 +497,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-p99'
|
||||
- expr: sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{instance=~"$datanode"}[$__rate_interval]))/sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_count{instance=~"$datanode"}[$__rate_interval]))
|
||||
- expr: sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{instance=~"$datanode"}[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_count{instance=~"$datanode"}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -607,7 +607,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
|
||||
- expr: sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))
|
||||
- expr: sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -741,9 +741,8 @@ groups:
|
||||
- title: Metasrv
|
||||
panels:
|
||||
- title: Region migration datanode
|
||||
type: state-timeline
|
||||
type: status-history
|
||||
description: Counter of region migration by source and destination
|
||||
unit: none
|
||||
queries:
|
||||
- expr: greptime_meta_region_migration_stat{datanode_type="src"}
|
||||
datasource:
|
||||
@@ -764,17 +763,127 @@ groups:
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: __auto
|
||||
legendFormat: '{{pod}}-{{state}}-{{error_type}}'
|
||||
- title: Datanode load
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: none
|
||||
unit: binBps
|
||||
queries:
|
||||
- expr: greptime_datanode_load
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: __auto
|
||||
legendFormat: Datanode-{{datanode_id}}-writeload
|
||||
- title: Rate of SQL Executions (RDS)
|
||||
type: timeseries
|
||||
description: Displays the rate of SQL executions processed by the Meta service using the RDS backend.
|
||||
unit: none
|
||||
queries:
|
||||
- expr: rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_count[$__rate_interval])
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}} {{op}} {{type}} {{result}} '
|
||||
- title: SQL Execution Latency (RDS)
|
||||
type: timeseries
|
||||
description: 'Measures the response time of SQL executions via the RDS backend. '
|
||||
unit: ms
|
||||
queries:
|
||||
- expr: histogram_quantile(0.90, sum by(pod, op, type, result, le) (rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_bucket[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}} {{op}} {{type}} {{result}} p90'
|
||||
- title: Handler Execution Latency
|
||||
type: timeseries
|
||||
description: |
|
||||
Shows latency of Meta handlers by pod and handler name, useful for monitoring handler performance and detecting latency spikes.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: |-
|
||||
histogram_quantile(0.90, sum by(pod, le, name) (
|
||||
rate(greptime_meta_handler_execute_bucket[$__rate_interval])
|
||||
))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}} {{name}} p90'
|
||||
- title: Heartbeat Packet Size
|
||||
type: timeseries
|
||||
description: |
|
||||
Shows p90 heartbeat message sizes, helping track network usage and identify anomalies in heartbeat payload.
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: histogram_quantile(0.9, sum by(pod, le) (greptime_meta_heartbeat_stat_memory_size_bucket))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}'
|
||||
- title: Meta Heartbeat Receive Rate
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: rate(greptime_meta_heartbeat_rate[$__rate_interval])
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}'
|
||||
- title: Meta KV Ops Latency
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{op}} p99'
|
||||
- title: Rate of meta KV Ops
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: none
|
||||
queries:
|
||||
- expr: rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{op}} p99'
|
||||
- title: DDL Latency
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: CreateLogicalTables-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: CreateTable-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: CreateView-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: CreateFlow-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: DropTable-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: AlterTable-{{step}} p90
|
||||
- title: Flownode
|
||||
panels:
|
||||
- title: Flow Ingest / Output Rate
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -60,7 +60,7 @@
|
||||
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `prometheus` | `ops` | `[{{ instance }}]-[{{pod}}]` |
|
||||
| Compaction Elapsed Time per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{}[$__rate_interval])))`<br/>`sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{}[$__rate_interval]))/sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_count{}[$__rate_interval]))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
||||
| Compaction Elapsed Time per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{}[$__rate_interval])))`<br/>`sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{}[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_count{}[$__rate_interval]))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
||||
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
|
||||
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
|
||||
| Cached Bytes per Instance | `greptime_mito_cache_bytes{}` | `timeseries` | Cached Bytes per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
@@ -69,7 +69,7 @@
|
||||
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
|
||||
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Compaction Input/Output Bytes | `sum by(instance, pod) (greptime_mito_compaction_input_bytes)`<br/>`sum by(instance, pod) (greptime_mito_compaction_output_bytes)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-input` |
|
||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
# OpenDAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
@@ -88,9 +88,19 @@
|
||||
# Metasrv
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `prometheus` | `none` | `from-datanode-{{datanode_id}}` |
|
||||
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `__auto` |
|
||||
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `__auto` |
|
||||
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `status-history` | Counter of region migration by source and destination | `prometheus` | -- | `from-datanode-{{datanode_id}}` |
|
||||
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `{{pod}}-{{state}}-{{error_type}}` |
|
||||
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `binBps` | `Datanode-{{datanode_id}}-writeload` |
|
||||
| Rate of SQL Executions (RDS) | `rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_count[$__rate_interval])` | `timeseries` | Displays the rate of SQL executions processed by the Meta service using the RDS backend. | `prometheus` | `none` | `{{pod}} {{op}} {{type}} {{result}} ` |
|
||||
| SQL Execution Latency (RDS) | `histogram_quantile(0.90, sum by(pod, op, type, result, le) (rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_bucket[$__rate_interval])))` | `timeseries` | Measures the response time of SQL executions via the RDS backend. | `prometheus` | `ms` | `{{pod}} {{op}} {{type}} {{result}} p90` |
|
||||
| Handler Execution Latency | `histogram_quantile(0.90, sum by(pod, le, name) (
|
||||
rate(greptime_meta_handler_execute_bucket[$__rate_interval])
|
||||
))` | `timeseries` | Shows latency of Meta handlers by pod and handler name, useful for monitoring handler performance and detecting latency spikes.<br/> | `prometheus` | `s` | `{{pod}} {{name}} p90` |
|
||||
| Heartbeat Packet Size | `histogram_quantile(0.9, sum by(pod, le) (greptime_meta_heartbeat_stat_memory_size_bucket))` | `timeseries` | Shows p90 heartbeat message sizes, helping track network usage and identify anomalies in heartbeat payload.<br/> | `prometheus` | `bytes` | `{{pod}}` |
|
||||
| Meta Heartbeat Receive Rate | `rate(greptime_meta_heartbeat_rate[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}` |
|
||||
| Meta KV Ops Latency | `histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}-{{op}} p99` |
|
||||
| Rate of meta KV Ops | `rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `{{pod}}-{{op}} p99` |
|
||||
| DDL Latency | `histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `CreateLogicalTables-{{step}} p90` |
|
||||
# Flownode
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
|
||||
@@ -497,7 +497,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-p99'
|
||||
- expr: sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{}[$__rate_interval]))/sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_count{}[$__rate_interval]))
|
||||
- expr: sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{}[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_count{}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -607,7 +607,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
|
||||
- expr: sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))
|
||||
- expr: sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -741,9 +741,8 @@ groups:
|
||||
- title: Metasrv
|
||||
panels:
|
||||
- title: Region migration datanode
|
||||
type: state-timeline
|
||||
type: status-history
|
||||
description: Counter of region migration by source and destination
|
||||
unit: none
|
||||
queries:
|
||||
- expr: greptime_meta_region_migration_stat{datanode_type="src"}
|
||||
datasource:
|
||||
@@ -764,17 +763,127 @@ groups:
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: __auto
|
||||
legendFormat: '{{pod}}-{{state}}-{{error_type}}'
|
||||
- title: Datanode load
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: none
|
||||
unit: binBps
|
||||
queries:
|
||||
- expr: greptime_datanode_load
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: __auto
|
||||
legendFormat: Datanode-{{datanode_id}}-writeload
|
||||
- title: Rate of SQL Executions (RDS)
|
||||
type: timeseries
|
||||
description: Displays the rate of SQL executions processed by the Meta service using the RDS backend.
|
||||
unit: none
|
||||
queries:
|
||||
- expr: rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_count[$__rate_interval])
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}} {{op}} {{type}} {{result}} '
|
||||
- title: SQL Execution Latency (RDS)
|
||||
type: timeseries
|
||||
description: 'Measures the response time of SQL executions via the RDS backend. '
|
||||
unit: ms
|
||||
queries:
|
||||
- expr: histogram_quantile(0.90, sum by(pod, op, type, result, le) (rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_bucket[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}} {{op}} {{type}} {{result}} p90'
|
||||
- title: Handler Execution Latency
|
||||
type: timeseries
|
||||
description: |
|
||||
Shows latency of Meta handlers by pod and handler name, useful for monitoring handler performance and detecting latency spikes.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: |-
|
||||
histogram_quantile(0.90, sum by(pod, le, name) (
|
||||
rate(greptime_meta_handler_execute_bucket[$__rate_interval])
|
||||
))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}} {{name}} p90'
|
||||
- title: Heartbeat Packet Size
|
||||
type: timeseries
|
||||
description: |
|
||||
Shows p90 heartbeat message sizes, helping track network usage and identify anomalies in heartbeat payload.
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: histogram_quantile(0.9, sum by(pod, le) (greptime_meta_heartbeat_stat_memory_size_bucket))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}'
|
||||
- title: Meta Heartbeat Receive Rate
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: rate(greptime_meta_heartbeat_rate[$__rate_interval])
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}'
|
||||
- title: Meta KV Ops Latency
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{op}} p99'
|
||||
- title: Rate of meta KV Ops
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: none
|
||||
queries:
|
||||
- expr: rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{op}} p99'
|
||||
- title: DDL Latency
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: CreateLogicalTables-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: CreateTable-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: CreateView-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: CreateFlow-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: DropTable-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: AlterTable-{{step}} p90
|
||||
- title: Flownode
|
||||
panels:
|
||||
- title: Flow Ingest / Output Rate
|
||||
|
||||
@@ -167,9 +167,7 @@ impl Client {
|
||||
|
||||
let client = FlightServiceClient::new(channel)
|
||||
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
||||
.max_encoding_message_size(self.max_grpc_send_message_size())
|
||||
.accept_compressed(CompressionEncoding::Zstd)
|
||||
.send_compressed(CompressionEncoding::Zstd);
|
||||
.max_encoding_message_size(self.max_grpc_send_message_size());
|
||||
|
||||
Ok(FlightClient { addr, client })
|
||||
}
|
||||
@@ -178,9 +176,7 @@ impl Client {
|
||||
let (addr, channel) = self.find_channel()?;
|
||||
let client = PbRegionClient::new(channel)
|
||||
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
||||
.max_encoding_message_size(self.max_grpc_send_message_size())
|
||||
.accept_compressed(CompressionEncoding::Zstd)
|
||||
.send_compressed(CompressionEncoding::Zstd);
|
||||
.max_encoding_message_size(self.max_grpc_send_message_size());
|
||||
Ok((addr, client))
|
||||
}
|
||||
|
||||
|
||||
@@ -14,12 +14,13 @@
|
||||
|
||||
pub mod builder;
|
||||
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_config::Configurable;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
||||
use common_telemetry::{info, warn};
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::datanode::Datanode;
|
||||
@@ -248,6 +249,14 @@ impl StartCommand {
|
||||
raft_engine_config.dir.replace(wal_dir.clone());
|
||||
}
|
||||
|
||||
// If the logging dir is not set, use the default logs dir in the data home.
|
||||
if opts.logging.dir.is_empty() {
|
||||
opts.logging.dir = Path::new(&opts.storage.data_home)
|
||||
.join(DEFAULT_LOGGING_DIR)
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
}
|
||||
|
||||
if let Some(http_addr) = &self.http_addr {
|
||||
opts.http.addr.clone_from(http_addr);
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -21,7 +22,7 @@ use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManager, MetaKv
|
||||
use clap::Parser;
|
||||
use client::client_manager::NodeClients;
|
||||
use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
||||
use common_grpc::channel_manager::ChannelConfig;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
|
||||
@@ -30,7 +31,7 @@ use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
||||
use common_version::{short_version, version};
|
||||
use flow::{
|
||||
get_flow_auth_options, FlownodeBuilder, FlownodeInstance, FlownodeServiceBuilder,
|
||||
@@ -186,6 +187,14 @@ impl StartCommand {
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
|
||||
// If the logging dir is not set, use the default logs dir in the data home.
|
||||
if opts.logging.dir.is_empty() {
|
||||
opts.logging.dir = Path::new(DEFAULT_DATA_HOME)
|
||||
.join(DEFAULT_LOGGING_DIR)
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
}
|
||||
|
||||
if global_options.log_level.is_some() {
|
||||
opts.logging.level.clone_from(&global_options.log_level);
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -22,14 +23,14 @@ use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManager, MetaKv
|
||||
use clap::Parser;
|
||||
use client::client_manager::NodeClients;
|
||||
use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
||||
use common_grpc::channel_manager::ChannelConfig;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
||||
use common_time::timezone::set_default_timezone;
|
||||
use common_version::{short_version, version};
|
||||
use frontend::frontend::Frontend;
|
||||
@@ -194,6 +195,14 @@ impl StartCommand {
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
|
||||
// If the logging dir is not set, use the default logs dir in the data home.
|
||||
if opts.logging.dir.is_empty() {
|
||||
opts.logging.dir = Path::new(DEFAULT_DATA_HOME)
|
||||
.join(DEFAULT_LOGGING_DIR)
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
}
|
||||
|
||||
if global_options.log_level.is_some() {
|
||||
opts.logging.level.clone_from(&global_options.log_level);
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
@@ -20,7 +21,7 @@ use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
||||
use common_version::{short_version, version};
|
||||
use meta_srv::bootstrap::MetasrvInstance;
|
||||
use meta_srv::metasrv::BackendImpl;
|
||||
@@ -236,12 +237,20 @@ impl StartCommand {
|
||||
tokio_console_addr: global_options.tokio_console_addr.clone(),
|
||||
};
|
||||
|
||||
#[allow(deprecated)]
|
||||
if let Some(addr) = &self.rpc_bind_addr {
|
||||
opts.bind_addr.clone_from(addr);
|
||||
opts.grpc.bind_addr.clone_from(addr);
|
||||
} else if !opts.bind_addr.is_empty() {
|
||||
opts.grpc.bind_addr.clone_from(&opts.bind_addr);
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
if let Some(addr) = &self.rpc_server_addr {
|
||||
opts.server_addr.clone_from(addr);
|
||||
opts.grpc.server_addr.clone_from(addr);
|
||||
} else if !opts.server_addr.is_empty() {
|
||||
opts.grpc.server_addr.clone_from(&opts.server_addr);
|
||||
}
|
||||
|
||||
if let Some(addrs) = &self.store_addrs {
|
||||
@@ -274,6 +283,14 @@ impl StartCommand {
|
||||
opts.data_home.clone_from(data_home);
|
||||
}
|
||||
|
||||
// If the logging dir is not set, use the default logs dir in the data home.
|
||||
if opts.logging.dir.is_empty() {
|
||||
opts.logging.dir = Path::new(&opts.data_home)
|
||||
.join(DEFAULT_LOGGING_DIR)
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
}
|
||||
|
||||
if !self.store_key_prefix.is_empty() {
|
||||
opts.store_key_prefix.clone_from(&self.store_key_prefix)
|
||||
}
|
||||
@@ -310,7 +327,7 @@ impl StartCommand {
|
||||
|
||||
let plugin_opts = opts.plugins;
|
||||
let mut opts = opts.component;
|
||||
opts.detect_server_addr();
|
||||
opts.grpc.detect_server_addr();
|
||||
|
||||
info!("Metasrv options: {:#?}", opts);
|
||||
|
||||
@@ -354,7 +371,7 @@ mod tests {
|
||||
};
|
||||
|
||||
let options = cmd.load_options(&Default::default()).unwrap().component;
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.grpc.bind_addr);
|
||||
assert_eq!(vec!["127.0.0.1:2380".to_string()], options.store_addrs);
|
||||
assert_eq!(SelectorType::LoadBased, options.selector);
|
||||
}
|
||||
@@ -387,8 +404,8 @@ mod tests {
|
||||
};
|
||||
|
||||
let options = cmd.load_options(&Default::default()).unwrap().component;
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.grpc.bind_addr);
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.grpc.server_addr);
|
||||
assert_eq!(vec!["127.0.0.1:2379".to_string()], options.store_addrs);
|
||||
assert_eq!(SelectorType::LeaseBased, options.selector);
|
||||
assert_eq!("debug", options.logging.level.as_ref().unwrap());
|
||||
@@ -500,10 +517,10 @@ mod tests {
|
||||
let opts = command.load_options(&Default::default()).unwrap().component;
|
||||
|
||||
// Should be read from env, env > default values.
|
||||
assert_eq!(opts.bind_addr, "127.0.0.1:14002");
|
||||
assert_eq!(opts.grpc.bind_addr, "127.0.0.1:14002");
|
||||
|
||||
// Should be read from config file, config file > env > default values.
|
||||
assert_eq!(opts.server_addr, "127.0.0.1:3002");
|
||||
assert_eq!(opts.grpc.server_addr, "127.0.0.1:3002");
|
||||
|
||||
// Should be read from cli, cli > config file > env > default values.
|
||||
assert_eq!(opts.http.addr, "127.0.0.1:14000");
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::{fs, path};
|
||||
|
||||
@@ -49,7 +50,9 @@ use common_meta::sequence::SequenceBuilder;
|
||||
use common_meta::wal_options_allocator::{build_wal_options_allocator, WalOptionsAllocatorRef};
|
||||
use common_procedure::{ProcedureInfo, ProcedureManagerRef};
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::{LoggingOptions, SlowQueryOptions, TracingOptions};
|
||||
use common_telemetry::logging::{
|
||||
LoggingOptions, SlowQueryOptions, TracingOptions, DEFAULT_LOGGING_DIR,
|
||||
};
|
||||
use common_time::timezone::set_default_timezone;
|
||||
use common_version::{short_version, version};
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
@@ -407,6 +410,14 @@ impl StartCommand {
|
||||
opts.storage.data_home.clone_from(data_home);
|
||||
}
|
||||
|
||||
// If the logging dir is not set, use the default logs dir in the data home.
|
||||
if opts.logging.dir.is_empty() {
|
||||
opts.logging.dir = Path::new(&opts.storage.data_home)
|
||||
.join(DEFAULT_LOGGING_DIR)
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_bind_addr {
|
||||
// frontend grpc addr conflict with datanode default grpc addr
|
||||
let datanode_grpc_addr = DatanodeOptions::default().grpc.bind_addr;
|
||||
|
||||
@@ -12,13 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
|
||||
use cmd::options::GreptimeOptions;
|
||||
use cmd::standalone::StandaloneOptions;
|
||||
use common_config::Configurable;
|
||||
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
||||
use common_options::datanode::{ClientOptions, DatanodeClientOptions};
|
||||
use common_telemetry::logging::{LoggingOptions, DEFAULT_OTLP_ENDPOINT};
|
||||
use common_telemetry::logging::{LoggingOptions, DEFAULT_LOGGING_DIR, DEFAULT_OTLP_ENDPOINT};
|
||||
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
|
||||
@@ -32,6 +33,7 @@ use mito2::config::MitoConfig;
|
||||
use servers::export_metrics::ExportMetricsOption;
|
||||
use servers::grpc::GrpcOptions;
|
||||
use servers::http::HttpOptions;
|
||||
use store_api::path_utils::WAL_DIR;
|
||||
|
||||
#[allow(deprecated)]
|
||||
#[test]
|
||||
@@ -56,13 +58,18 @@ fn test_load_datanode_example_config() {
|
||||
metadata_cache_tti: Duration::from_secs(300),
|
||||
}),
|
||||
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
||||
dir: Some("./greptimedb_data/wal".to_string()),
|
||||
dir: Some(
|
||||
Path::new(DEFAULT_DATA_HOME)
|
||||
.join(WAL_DIR)
|
||||
.to_string_lossy()
|
||||
.to_string(),
|
||||
),
|
||||
sync_period: Some(Duration::from_secs(10)),
|
||||
recovery_parallelism: 2,
|
||||
..Default::default()
|
||||
}),
|
||||
storage: StorageConfig {
|
||||
data_home: "./greptimedb_data/".to_string(),
|
||||
data_home: DEFAULT_DATA_HOME.to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
region_engine: vec![
|
||||
@@ -79,12 +86,16 @@ fn test_load_datanode_example_config() {
|
||||
],
|
||||
logging: LoggingOptions {
|
||||
level: Some("info".to_string()),
|
||||
dir: Path::new(DEFAULT_DATA_HOME)
|
||||
.join(DEFAULT_LOGGING_DIR)
|
||||
.to_string_lossy()
|
||||
.to_string(),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
export_metrics: ExportMetricsOption {
|
||||
self_import: Some(Default::default()),
|
||||
self_import: None,
|
||||
remote_write: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
@@ -121,6 +132,10 @@ fn test_load_frontend_example_config() {
|
||||
}),
|
||||
logging: LoggingOptions {
|
||||
level: Some("info".to_string()),
|
||||
dir: Path::new(DEFAULT_DATA_HOME)
|
||||
.join(DEFAULT_LOGGING_DIR)
|
||||
.to_string_lossy()
|
||||
.to_string(),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
@@ -133,7 +148,7 @@ fn test_load_frontend_example_config() {
|
||||
},
|
||||
},
|
||||
export_metrics: ExportMetricsOption {
|
||||
self_import: Some(Default::default()),
|
||||
self_import: None,
|
||||
remote_write: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
@@ -160,10 +175,17 @@ fn test_load_metasrv_example_config() {
|
||||
let expected = GreptimeOptions::<MetasrvOptions> {
|
||||
component: MetasrvOptions {
|
||||
selector: SelectorType::default(),
|
||||
data_home: "./greptimedb_data/metasrv/".to_string(),
|
||||
server_addr: "127.0.0.1:3002".to_string(),
|
||||
data_home: DEFAULT_DATA_HOME.to_string(),
|
||||
grpc: GrpcOptions {
|
||||
bind_addr: "127.0.0.1:3002".to_string(),
|
||||
server_addr: "127.0.0.1:3002".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
logging: LoggingOptions {
|
||||
dir: "./greptimedb_data/logs".to_string(),
|
||||
dir: Path::new(DEFAULT_DATA_HOME)
|
||||
.join(DEFAULT_LOGGING_DIR)
|
||||
.to_string_lossy()
|
||||
.to_string(),
|
||||
level: Some("info".to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
@@ -177,7 +199,7 @@ fn test_load_metasrv_example_config() {
|
||||
},
|
||||
},
|
||||
export_metrics: ExportMetricsOption {
|
||||
self_import: Some(Default::default()),
|
||||
self_import: None,
|
||||
remote_write: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
@@ -198,7 +220,12 @@ fn test_load_standalone_example_config() {
|
||||
component: StandaloneOptions {
|
||||
default_timezone: Some("UTC".to_string()),
|
||||
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
||||
dir: Some("./greptimedb_data/wal".to_string()),
|
||||
dir: Some(
|
||||
Path::new(DEFAULT_DATA_HOME)
|
||||
.join(WAL_DIR)
|
||||
.to_string_lossy()
|
||||
.to_string(),
|
||||
),
|
||||
sync_period: Some(Duration::from_secs(10)),
|
||||
recovery_parallelism: 2,
|
||||
..Default::default()
|
||||
@@ -216,11 +243,15 @@ fn test_load_standalone_example_config() {
|
||||
}),
|
||||
],
|
||||
storage: StorageConfig {
|
||||
data_home: "./greptimedb_data/".to_string(),
|
||||
data_home: DEFAULT_DATA_HOME.to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
logging: LoggingOptions {
|
||||
level: Some("info".to_string()),
|
||||
dir: Path::new(DEFAULT_DATA_HOME)
|
||||
.join(DEFAULT_LOGGING_DIR)
|
||||
.to_string_lossy()
|
||||
.to_string(),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
|
||||
@@ -26,6 +26,9 @@ pub fn metadata_store_dir(store_dir: &str) -> String {
|
||||
format!("{store_dir}/metadata")
|
||||
}
|
||||
|
||||
/// The default data home directory.
|
||||
pub const DEFAULT_DATA_HOME: &str = "./greptimedb_data";
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(default)]
|
||||
pub struct KvBackendConfig {
|
||||
|
||||
90
src/common/function/src/adjust_flow.rs
Normal file
90
src/common/function/src/adjust_flow.rs
Normal file
@@ -0,0 +1,90 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingFlowServiceHandlerSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::Signature;
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::value::{Value, ValueRef};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::ensure;
|
||||
use store_api::storage::ConcreteDataType;
|
||||
|
||||
use crate::handlers::FlowServiceHandlerRef;
|
||||
use crate::helper::parse_catalog_flow;
|
||||
|
||||
fn adjust_signature() -> Signature {
|
||||
Signature::exact(
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(), // flow name
|
||||
ConcreteDataType::uint64_datatype(), // min_run_interval in seconds
|
||||
ConcreteDataType::uint64_datatype(), // max filter number per query
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
#[admin_fn(
|
||||
name = AdjustFlowFunction,
|
||||
display_name = adjust_flow,
|
||||
sig_fn = adjust_signature,
|
||||
ret = uint64
|
||||
)]
|
||||
pub(crate) async fn adjust_flow(
|
||||
flow_service_handler: &FlowServiceHandlerRef,
|
||||
query_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
ensure!(
|
||||
params.len() == 3,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 3, have: {}",
|
||||
params.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let (flow_name, min_run_interval, max_filter_num) = match (params[0], params[1], params[2]) {
|
||||
(
|
||||
ValueRef::String(flow_name),
|
||||
ValueRef::UInt64(min_run_interval),
|
||||
ValueRef::UInt64(max_filter_num),
|
||||
) => (flow_name, min_run_interval, max_filter_num),
|
||||
_ => {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: "adjust_flow",
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
};
|
||||
|
||||
let (catalog_name, flow_name) = parse_catalog_flow(flow_name, query_ctx)?;
|
||||
|
||||
let res = flow_service_handler
|
||||
.adjust(
|
||||
&catalog_name,
|
||||
&flow_name,
|
||||
min_run_interval,
|
||||
max_filter_num as usize,
|
||||
query_ctx.clone(),
|
||||
)
|
||||
.await?;
|
||||
let affected_rows = res.affected_rows;
|
||||
|
||||
Ok(Value::from(affected_rows))
|
||||
}
|
||||
@@ -26,6 +26,7 @@ use flush_compact_table::{CompactTableFunction, FlushTableFunction};
|
||||
use migrate_region::MigrateRegionFunction;
|
||||
use remove_region_follower::RemoveRegionFollowerFunction;
|
||||
|
||||
use crate::adjust_flow::AdjustFlowFunction;
|
||||
use crate::flush_flow::FlushFlowFunction;
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
@@ -43,5 +44,6 @@ impl AdminFunction {
|
||||
registry.register_async(Arc::new(FlushTableFunction));
|
||||
registry.register_async(Arc::new(CompactTableFunction));
|
||||
registry.register_async(Arc::new(FlushFlowFunction));
|
||||
registry.register_async(Arc::new(AdjustFlowFunction));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,11 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod geo_path;
|
||||
mod hll;
|
||||
mod uddsketch_state;
|
||||
|
||||
pub use geo_path::{GeoPathAccumulator, GEO_PATH_NAME};
|
||||
pub(crate) use hll::HllStateType;
|
||||
pub use hll::{HllState, HLL_MERGE_NAME, HLL_NAME};
|
||||
pub use uddsketch_state::{UddSketchState, UDDSKETCH_MERGE_NAME, UDDSKETCH_STATE_NAME};
|
||||
pub mod approximate;
|
||||
#[cfg(feature = "geo")]
|
||||
pub mod geo;
|
||||
pub mod vector;
|
||||
32
src/common/function/src/aggrs/approximate.rs
Normal file
32
src/common/function/src/aggrs/approximate.rs
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
pub(crate) mod hll;
|
||||
mod uddsketch;
|
||||
|
||||
pub(crate) struct ApproximateFunction;
|
||||
|
||||
impl ApproximateFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
// uddsketch
|
||||
registry.register_aggr(uddsketch::UddSketchState::state_udf_impl());
|
||||
registry.register_aggr(uddsketch::UddSketchState::merge_udf_impl());
|
||||
|
||||
// hll
|
||||
registry.register_aggr(hll::HllState::state_udf_impl());
|
||||
registry.register_aggr(hll::HllState::merge_udf_impl());
|
||||
}
|
||||
}
|
||||
27
src/common/function/src/aggrs/geo.rs
Normal file
27
src/common/function/src/aggrs/geo.rs
Normal file
@@ -0,0 +1,27 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
mod encoding;
|
||||
mod geo_path;
|
||||
|
||||
pub(crate) struct GeoFunction;
|
||||
|
||||
impl GeoFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register_aggr(geo_path::GeoPathAccumulator::uadf_impl());
|
||||
registry.register_aggr(encoding::JsonPathAccumulator::uadf_impl());
|
||||
}
|
||||
}
|
||||
@@ -19,9 +19,12 @@ use common_error::status_code::StatusCode;
|
||||
use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
|
||||
use common_query::error::{self, InvalidInputStateSnafu, Result};
|
||||
use common_query::logical_plan::accumulator::AggrFuncTypeStore;
|
||||
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
||||
use common_query::logical_plan::{
|
||||
create_aggregate_function, Accumulator, AggregateFunctionCreator,
|
||||
};
|
||||
use common_query::prelude::AccumulatorCreatorFunction;
|
||||
use common_time::Timestamp;
|
||||
use datafusion_expr::AggregateUDF;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::{ListValue, Value};
|
||||
use datatypes::vectors::VectorRef;
|
||||
@@ -47,6 +50,16 @@ impl JsonPathAccumulator {
|
||||
timestamp_type,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new `AggregateUDF` for the `json_encode_path` aggregate function.
|
||||
pub fn uadf_impl() -> AggregateUDF {
|
||||
create_aggregate_function(
|
||||
"json_encode_path".to_string(),
|
||||
3,
|
||||
Arc::new(JsonPathEncodeFunctionCreator::default()),
|
||||
)
|
||||
.into()
|
||||
}
|
||||
}
|
||||
|
||||
impl Accumulator for JsonPathAccumulator {
|
||||
@@ -47,7 +47,7 @@ impl GeoPathAccumulator {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
pub fn udf_impl() -> AggregateUDF {
|
||||
pub fn uadf_impl() -> AggregateUDF {
|
||||
create_udaf(
|
||||
GEO_PATH_NAME,
|
||||
// Input types: lat, lng, timestamp
|
||||
29
src/common/function/src/aggrs/vector.rs
Normal file
29
src/common/function/src/aggrs/vector.rs
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::aggrs::vector::product::VectorProduct;
|
||||
use crate::aggrs::vector::sum::VectorSum;
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
mod product;
|
||||
mod sum;
|
||||
|
||||
pub(crate) struct VectorFunction;
|
||||
|
||||
impl VectorFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register_aggr(VectorSum::uadf_impl());
|
||||
registry.register_aggr(VectorProduct::uadf_impl());
|
||||
}
|
||||
}
|
||||
@@ -16,8 +16,11 @@ use std::sync::Arc;
|
||||
|
||||
use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
|
||||
use common_query::error::{CreateAccumulatorSnafu, Error, InvalidFuncArgsSnafu};
|
||||
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
||||
use common_query::logical_plan::{
|
||||
create_aggregate_function, Accumulator, AggregateFunctionCreator,
|
||||
};
|
||||
use common_query::prelude::AccumulatorCreatorFunction;
|
||||
use datafusion_expr::AggregateUDF;
|
||||
use datatypes::prelude::{ConcreteDataType, Value, *};
|
||||
use datatypes::vectors::VectorRef;
|
||||
use nalgebra::{Const, DVectorView, Dyn, OVector};
|
||||
@@ -75,6 +78,16 @@ impl AggregateFunctionCreator for VectorProductCreator {
|
||||
}
|
||||
|
||||
impl VectorProduct {
|
||||
/// Create a new `AggregateUDF` for the `vec_product` aggregate function.
|
||||
pub fn uadf_impl() -> AggregateUDF {
|
||||
create_aggregate_function(
|
||||
"vec_product".to_string(),
|
||||
1,
|
||||
Arc::new(VectorProductCreator::default()),
|
||||
)
|
||||
.into()
|
||||
}
|
||||
|
||||
fn inner(&mut self, len: usize) -> &mut OVector<f32, Dyn> {
|
||||
self.product.get_or_insert_with(|| {
|
||||
OVector::from_iterator_generic(Dyn(len), Const::<1>, (0..len).map(|_| 1.0))
|
||||
@@ -16,8 +16,11 @@ use std::sync::Arc;
|
||||
|
||||
use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
|
||||
use common_query::error::{CreateAccumulatorSnafu, Error, InvalidFuncArgsSnafu};
|
||||
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
||||
use common_query::logical_plan::{
|
||||
create_aggregate_function, Accumulator, AggregateFunctionCreator,
|
||||
};
|
||||
use common_query::prelude::AccumulatorCreatorFunction;
|
||||
use datafusion_expr::AggregateUDF;
|
||||
use datatypes::prelude::{ConcreteDataType, Value, *};
|
||||
use datatypes::vectors::VectorRef;
|
||||
use nalgebra::{Const, DVectorView, Dyn, OVector};
|
||||
@@ -25,6 +28,7 @@ use snafu::ensure;
|
||||
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
/// The accumulator for the `vec_sum` aggregate function.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct VectorSum {
|
||||
sum: Option<OVector<f32, Dyn>>,
|
||||
@@ -74,6 +78,16 @@ impl AggregateFunctionCreator for VectorSumCreator {
|
||||
}
|
||||
|
||||
impl VectorSum {
|
||||
/// Create a new `AggregateUDF` for the `vec_sum` aggregate function.
|
||||
pub fn uadf_impl() -> AggregateUDF {
|
||||
create_aggregate_function(
|
||||
"vec_sum".to_string(),
|
||||
1,
|
||||
Arc::new(VectorSumCreator::default()),
|
||||
)
|
||||
.into()
|
||||
}
|
||||
|
||||
fn inner(&mut self, len: usize) -> &mut OVector<f32, Dyn> {
|
||||
self.sum
|
||||
.get_or_insert_with(|| OVector::zeros_generic(Dyn(len), Const::<1>))
|
||||
@@ -12,21 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::{
|
||||
ExecuteSnafu, InvalidFuncArgsSnafu, MissingFlowServiceHandlerSnafu, Result,
|
||||
UnsupportedInputDataTypeSnafu,
|
||||
InvalidFuncArgsSnafu, MissingFlowServiceHandlerSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::Signature;
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::value::{Value, ValueRef};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use sql::parser::ParserContext;
|
||||
use snafu::ensure;
|
||||
use store_api::storage::ConcreteDataType;
|
||||
|
||||
use crate::handlers::FlowServiceHandlerRef;
|
||||
use crate::helper::parse_catalog_flow;
|
||||
|
||||
fn flush_signature() -> Signature {
|
||||
Signature::uniform(
|
||||
@@ -47,20 +45,6 @@ pub(crate) async fn flush_flow(
|
||||
query_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
let (catalog_name, flow_name) = parse_flush_flow(params, query_ctx)?;
|
||||
|
||||
let res = flow_service_handler
|
||||
.flush(&catalog_name, &flow_name, query_ctx.clone())
|
||||
.await?;
|
||||
let affected_rows = res.affected_rows;
|
||||
|
||||
Ok(Value::from(affected_rows))
|
||||
}
|
||||
|
||||
fn parse_flush_flow(
|
||||
params: &[ValueRef<'_>],
|
||||
query_ctx: &QueryContextRef,
|
||||
) -> Result<(String, String)> {
|
||||
ensure!(
|
||||
params.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
@@ -70,7 +54,6 @@ fn parse_flush_flow(
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let ValueRef::String(flow_name) = params[0] else {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: "flush_flow",
|
||||
@@ -78,27 +61,14 @@ fn parse_flush_flow(
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
let obj_name = ParserContext::parse_table_name(flow_name, query_ctx.sql_dialect())
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExecuteSnafu)?;
|
||||
let (catalog_name, flow_name) = parse_catalog_flow(flow_name, query_ctx)?;
|
||||
|
||||
let (catalog_name, flow_name) = match &obj_name.0[..] {
|
||||
[flow_name] => (
|
||||
query_ctx.current_catalog().to_string(),
|
||||
flow_name.value.clone(),
|
||||
),
|
||||
[catalog, flow_name] => (catalog.value.clone(), flow_name.value.clone()),
|
||||
_ => {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"expect flow name to be <catalog>.<flow-name> or <flow-name>, actual: {}",
|
||||
obj_name
|
||||
),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
};
|
||||
Ok((catalog_name, flow_name))
|
||||
let res = flow_service_handler
|
||||
.flush(&catalog_name, &flow_name, query_ctx.clone())
|
||||
.await?;
|
||||
let affected_rows = res.affected_rows;
|
||||
|
||||
Ok(Value::from(affected_rows))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -154,10 +124,7 @@ mod test {
|
||||
("catalog.flow_name", ("catalog", "flow_name")),
|
||||
];
|
||||
for (input, expected) in testcases.iter() {
|
||||
let args = vec![*input];
|
||||
let args = args.into_iter().map(ValueRef::String).collect::<Vec<_>>();
|
||||
|
||||
let result = parse_flush_flow(&args, &QueryContext::arc()).unwrap();
|
||||
let result = parse_catalog_flow(input, &QueryContext::arc()).unwrap();
|
||||
assert_eq!(*expected, (result.0.as_str(), result.1.as_str()));
|
||||
}
|
||||
}
|
||||
|
||||
63
src/common/function/src/function_factory.rs
Normal file
63
src/common/function/src/function_factory.rs
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use datafusion_expr::ScalarUDF;
|
||||
|
||||
use crate::function::{FunctionContext, FunctionRef};
|
||||
use crate::scalars::udf::create_udf;
|
||||
|
||||
/// A factory for creating `ScalarUDF` that require a function context.
|
||||
#[derive(Clone)]
|
||||
pub struct ScalarFunctionFactory {
|
||||
name: String,
|
||||
factory: Arc<dyn Fn(FunctionContext) -> ScalarUDF + Send + Sync>,
|
||||
}
|
||||
|
||||
impl ScalarFunctionFactory {
|
||||
/// Returns the name of the function.
|
||||
pub fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
/// Returns a `ScalarUDF` when given a function context.
|
||||
pub fn provide(&self, ctx: FunctionContext) -> ScalarUDF {
|
||||
(self.factory)(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ScalarUDF> for ScalarFunctionFactory {
|
||||
fn from(df_udf: ScalarUDF) -> Self {
|
||||
let name = df_udf.name().to_string();
|
||||
let func = Arc::new(move |_ctx| df_udf.clone());
|
||||
Self {
|
||||
name,
|
||||
factory: func,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<FunctionRef> for ScalarFunctionFactory {
|
||||
fn from(func: FunctionRef) -> Self {
|
||||
let name = func.name().to_string();
|
||||
let func = Arc::new(move |ctx: FunctionContext| {
|
||||
create_udf(func.clone(), ctx.query_ctx, ctx.state)
|
||||
});
|
||||
Self {
|
||||
name,
|
||||
factory: func,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -16,11 +16,14 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use datafusion_expr::AggregateUDF;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
use crate::admin::AdminFunction;
|
||||
use crate::function::{AsyncFunctionRef, FunctionRef};
|
||||
use crate::scalars::aggregate::{AggregateFunctionMetaRef, AggregateFunctions};
|
||||
use crate::aggrs::approximate::ApproximateFunction;
|
||||
use crate::aggrs::vector::VectorFunction as VectorAggrFunction;
|
||||
use crate::function::{AsyncFunctionRef, Function, FunctionRef};
|
||||
use crate::function_factory::ScalarFunctionFactory;
|
||||
use crate::scalars::date::DateFunction;
|
||||
use crate::scalars::expression::ExpressionFunction;
|
||||
use crate::scalars::hll_count::HllCalcFunction;
|
||||
@@ -31,18 +34,19 @@ use crate::scalars::matches_term::MatchesTermFunction;
|
||||
use crate::scalars::math::MathFunction;
|
||||
use crate::scalars::timestamp::TimestampFunction;
|
||||
use crate::scalars::uddsketch_calc::UddSketchCalcFunction;
|
||||
use crate::scalars::vector::VectorFunction;
|
||||
use crate::scalars::vector::VectorFunction as VectorScalarFunction;
|
||||
use crate::system::SystemFunction;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct FunctionRegistry {
|
||||
functions: RwLock<HashMap<String, FunctionRef>>,
|
||||
functions: RwLock<HashMap<String, ScalarFunctionFactory>>,
|
||||
async_functions: RwLock<HashMap<String, AsyncFunctionRef>>,
|
||||
aggregate_functions: RwLock<HashMap<String, AggregateFunctionMetaRef>>,
|
||||
aggregate_functions: RwLock<HashMap<String, AggregateUDF>>,
|
||||
}
|
||||
|
||||
impl FunctionRegistry {
|
||||
pub fn register(&self, func: FunctionRef) {
|
||||
pub fn register(&self, func: impl Into<ScalarFunctionFactory>) {
|
||||
let func = func.into();
|
||||
let _ = self
|
||||
.functions
|
||||
.write()
|
||||
@@ -50,6 +54,10 @@ impl FunctionRegistry {
|
||||
.insert(func.name().to_string(), func);
|
||||
}
|
||||
|
||||
pub fn register_scalar(&self, func: impl Function + 'static) {
|
||||
self.register(Arc::new(func) as FunctionRef);
|
||||
}
|
||||
|
||||
pub fn register_async(&self, func: AsyncFunctionRef) {
|
||||
let _ = self
|
||||
.async_functions
|
||||
@@ -58,6 +66,14 @@ impl FunctionRegistry {
|
||||
.insert(func.name().to_string(), func);
|
||||
}
|
||||
|
||||
pub fn register_aggr(&self, func: AggregateUDF) {
|
||||
let _ = self
|
||||
.aggregate_functions
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(func.name().to_string(), func);
|
||||
}
|
||||
|
||||
pub fn get_async_function(&self, name: &str) -> Option<AsyncFunctionRef> {
|
||||
self.async_functions.read().unwrap().get(name).cloned()
|
||||
}
|
||||
@@ -71,27 +87,16 @@ impl FunctionRegistry {
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn register_aggregate_function(&self, func: AggregateFunctionMetaRef) {
|
||||
let _ = self
|
||||
.aggregate_functions
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(func.name(), func);
|
||||
}
|
||||
|
||||
pub fn get_aggr_function(&self, name: &str) -> Option<AggregateFunctionMetaRef> {
|
||||
self.aggregate_functions.read().unwrap().get(name).cloned()
|
||||
}
|
||||
|
||||
pub fn get_function(&self, name: &str) -> Option<FunctionRef> {
|
||||
#[cfg(test)]
|
||||
pub fn get_function(&self, name: &str) -> Option<ScalarFunctionFactory> {
|
||||
self.functions.read().unwrap().get(name).cloned()
|
||||
}
|
||||
|
||||
pub fn functions(&self) -> Vec<FunctionRef> {
|
||||
pub fn scalar_functions(&self) -> Vec<ScalarFunctionFactory> {
|
||||
self.functions.read().unwrap().values().cloned().collect()
|
||||
}
|
||||
|
||||
pub fn aggregate_functions(&self) -> Vec<AggregateFunctionMetaRef> {
|
||||
pub fn aggregate_functions(&self) -> Vec<AggregateUDF> {
|
||||
self.aggregate_functions
|
||||
.read()
|
||||
.unwrap()
|
||||
@@ -112,9 +117,6 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
UddSketchCalcFunction::register(&function_registry);
|
||||
HllCalcFunction::register(&function_registry);
|
||||
|
||||
// Aggregate functions
|
||||
AggregateFunctions::register(&function_registry);
|
||||
|
||||
// Full text search function
|
||||
MatchesFunction::register(&function_registry);
|
||||
MatchesTermFunction::register(&function_registry);
|
||||
@@ -127,15 +129,21 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
JsonFunction::register(&function_registry);
|
||||
|
||||
// Vector related functions
|
||||
VectorFunction::register(&function_registry);
|
||||
VectorScalarFunction::register(&function_registry);
|
||||
VectorAggrFunction::register(&function_registry);
|
||||
|
||||
// Geo functions
|
||||
#[cfg(feature = "geo")]
|
||||
crate::scalars::geo::GeoFunctions::register(&function_registry);
|
||||
#[cfg(feature = "geo")]
|
||||
crate::aggrs::geo::GeoFunction::register(&function_registry);
|
||||
|
||||
// Ip functions
|
||||
IpFunctions::register(&function_registry);
|
||||
|
||||
// Approximate functions
|
||||
ApproximateFunction::register(&function_registry);
|
||||
|
||||
Arc::new(function_registry)
|
||||
});
|
||||
|
||||
@@ -147,12 +155,11 @@ mod tests {
|
||||
#[test]
|
||||
fn test_function_registry() {
|
||||
let registry = FunctionRegistry::default();
|
||||
let func = Arc::new(TestAndFunction);
|
||||
|
||||
assert!(registry.get_function("test_and").is_none());
|
||||
assert!(registry.functions().is_empty());
|
||||
registry.register(func);
|
||||
assert!(registry.scalar_functions().is_empty());
|
||||
registry.register_scalar(TestAndFunction);
|
||||
let _ = registry.get_function("test_and").unwrap();
|
||||
assert_eq!(1, registry.functions().len());
|
||||
assert_eq!(1, registry.scalar_functions().len());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,6 +87,15 @@ pub trait FlowServiceHandler: Send + Sync {
|
||||
flow: &str,
|
||||
ctx: QueryContextRef,
|
||||
) -> Result<api::v1::flow::FlowResponse>;
|
||||
|
||||
async fn adjust(
|
||||
&self,
|
||||
catalog: &str,
|
||||
flow: &str,
|
||||
min_run_interval_secs: u64,
|
||||
max_filter_num_per_query: usize,
|
||||
ctx: QueryContextRef,
|
||||
) -> Result<api::v1::flow::FlowResponse>;
|
||||
}
|
||||
|
||||
pub type TableMutationHandlerRef = Arc<dyn TableMutationHandler>;
|
||||
|
||||
@@ -12,12 +12,15 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_query::error::{InvalidInputTypeSnafu, Result};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::error::{ExecuteSnafu, InvalidFuncArgsSnafu, InvalidInputTypeSnafu, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::cast::cast;
|
||||
use datatypes::value::ValueRef;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::ResultExt;
|
||||
use sql::parser::ParserContext;
|
||||
|
||||
/// Create a function signature with oneof signatures of interleaving two arguments.
|
||||
pub fn one_of_sigs2(args1: Vec<ConcreteDataType>, args2: Vec<ConcreteDataType>) -> Signature {
|
||||
@@ -43,3 +46,30 @@ pub fn cast_u64(value: &ValueRef) -> Result<Option<u64>> {
|
||||
})
|
||||
.map(|v| v.as_u64())
|
||||
}
|
||||
|
||||
pub fn parse_catalog_flow(
|
||||
flow_name: &str,
|
||||
query_ctx: &QueryContextRef,
|
||||
) -> Result<(String, String)> {
|
||||
let obj_name = ParserContext::parse_table_name(flow_name, query_ctx.sql_dialect())
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExecuteSnafu)?;
|
||||
|
||||
let (catalog_name, flow_name) = match &obj_name.0[..] {
|
||||
[flow_name] => (
|
||||
query_ctx.current_catalog().to_string(),
|
||||
flow_name.value.clone(),
|
||||
),
|
||||
[catalog, flow_name] => (catalog.value.clone(), flow_name.value.clone()),
|
||||
_ => {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"expect flow name to be <catalog>.<flow-name> or <flow-name>, actual: {}",
|
||||
obj_name
|
||||
),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
};
|
||||
Ok((catalog_name, flow_name))
|
||||
}
|
||||
|
||||
@@ -15,16 +15,18 @@
|
||||
#![feature(let_chains)]
|
||||
#![feature(try_blocks)]
|
||||
|
||||
mod adjust_flow;
|
||||
mod admin;
|
||||
mod flush_flow;
|
||||
mod macros;
|
||||
pub mod scalars;
|
||||
mod system;
|
||||
|
||||
pub mod aggr;
|
||||
pub mod aggrs;
|
||||
pub mod function;
|
||||
pub mod function_factory;
|
||||
pub mod function_registry;
|
||||
pub mod handlers;
|
||||
pub mod helper;
|
||||
pub mod scalars;
|
||||
pub mod state;
|
||||
pub mod utils;
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod aggregate;
|
||||
pub(crate) mod date;
|
||||
pub mod expression;
|
||||
#[cfg(feature = "geo")]
|
||||
|
||||
@@ -1,89 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! # Deprecate Warning:
|
||||
//!
|
||||
//! This module is deprecated and will be removed in the future.
|
||||
//! All UDAF implementation here are not maintained and should
|
||||
//! not be used before they are refactored into the `src/aggr`
|
||||
//! version.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::logical_plan::AggregateFunctionCreatorRef;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
use crate::scalars::vector::product::VectorProductCreator;
|
||||
use crate::scalars::vector::sum::VectorSumCreator;
|
||||
|
||||
/// A function creates `AggregateFunctionCreator`.
|
||||
/// "Aggregator" *is* AggregatorFunction. Since the later one is long, we named an short alias for it.
|
||||
/// The two names might be used interchangeably.
|
||||
type AggregatorCreatorFunction = Arc<dyn Fn() -> AggregateFunctionCreatorRef + Send + Sync>;
|
||||
|
||||
/// `AggregateFunctionMeta` dynamically creates AggregateFunctionCreator.
|
||||
#[derive(Clone)]
|
||||
pub struct AggregateFunctionMeta {
|
||||
name: String,
|
||||
args_count: u8,
|
||||
creator: AggregatorCreatorFunction,
|
||||
}
|
||||
|
||||
pub type AggregateFunctionMetaRef = Arc<AggregateFunctionMeta>;
|
||||
|
||||
impl AggregateFunctionMeta {
|
||||
pub fn new(name: &str, args_count: u8, creator: AggregatorCreatorFunction) -> Self {
|
||||
Self {
|
||||
name: name.to_string(),
|
||||
args_count,
|
||||
creator,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn name(&self) -> String {
|
||||
self.name.to_string()
|
||||
}
|
||||
|
||||
pub fn args_count(&self) -> u8 {
|
||||
self.args_count
|
||||
}
|
||||
|
||||
pub fn create(&self) -> AggregateFunctionCreatorRef {
|
||||
(self.creator)()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct AggregateFunctions;
|
||||
|
||||
impl AggregateFunctions {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register_aggregate_function(Arc::new(AggregateFunctionMeta::new(
|
||||
"vec_sum",
|
||||
1,
|
||||
Arc::new(|| Arc::new(VectorSumCreator::default())),
|
||||
)));
|
||||
registry.register_aggregate_function(Arc::new(AggregateFunctionMeta::new(
|
||||
"vec_product",
|
||||
1,
|
||||
Arc::new(|| Arc::new(VectorProductCreator::default())),
|
||||
)));
|
||||
|
||||
#[cfg(feature = "geo")]
|
||||
registry.register_aggregate_function(Arc::new(AggregateFunctionMeta::new(
|
||||
"json_encode_path",
|
||||
3,
|
||||
Arc::new(|| Arc::new(super::geo::encoding::JsonPathEncodeFunctionCreator::default())),
|
||||
)));
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
mod date_add;
|
||||
mod date_format;
|
||||
mod date_sub;
|
||||
@@ -27,8 +26,8 @@ pub(crate) struct DateFunction;
|
||||
|
||||
impl DateFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(DateAddFunction));
|
||||
registry.register(Arc::new(DateSubFunction));
|
||||
registry.register(Arc::new(DateFormatFunction));
|
||||
registry.register_scalar(DateAddFunction);
|
||||
registry.register_scalar(DateSubFunction);
|
||||
registry.register_scalar(DateFormatFunction);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,8 +17,6 @@ mod ctx;
|
||||
mod is_null;
|
||||
mod unary;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use binary::scalar_binary_op;
|
||||
pub use ctx::EvalContext;
|
||||
pub use unary::scalar_unary_op;
|
||||
@@ -30,6 +28,6 @@ pub(crate) struct ExpressionFunction;
|
||||
|
||||
impl ExpressionFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(IsNullFunction));
|
||||
registry.register_scalar(IsNullFunction);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,11 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
pub(crate) mod encoding;
|
||||
mod geohash;
|
||||
mod h3;
|
||||
mod helpers;
|
||||
pub(crate) mod helpers;
|
||||
mod measure;
|
||||
mod relation;
|
||||
mod s2;
|
||||
@@ -29,57 +27,57 @@ pub(crate) struct GeoFunctions;
|
||||
impl GeoFunctions {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
// geohash
|
||||
registry.register(Arc::new(geohash::GeohashFunction));
|
||||
registry.register(Arc::new(geohash::GeohashNeighboursFunction));
|
||||
registry.register_scalar(geohash::GeohashFunction);
|
||||
registry.register_scalar(geohash::GeohashNeighboursFunction);
|
||||
|
||||
// h3 index
|
||||
registry.register(Arc::new(h3::H3LatLngToCell));
|
||||
registry.register(Arc::new(h3::H3LatLngToCellString));
|
||||
registry.register_scalar(h3::H3LatLngToCell);
|
||||
registry.register_scalar(h3::H3LatLngToCellString);
|
||||
|
||||
// h3 index inspection
|
||||
registry.register(Arc::new(h3::H3CellBase));
|
||||
registry.register(Arc::new(h3::H3CellIsPentagon));
|
||||
registry.register(Arc::new(h3::H3StringToCell));
|
||||
registry.register(Arc::new(h3::H3CellToString));
|
||||
registry.register(Arc::new(h3::H3CellCenterLatLng));
|
||||
registry.register(Arc::new(h3::H3CellResolution));
|
||||
registry.register_scalar(h3::H3CellBase);
|
||||
registry.register_scalar(h3::H3CellIsPentagon);
|
||||
registry.register_scalar(h3::H3StringToCell);
|
||||
registry.register_scalar(h3::H3CellToString);
|
||||
registry.register_scalar(h3::H3CellCenterLatLng);
|
||||
registry.register_scalar(h3::H3CellResolution);
|
||||
|
||||
// h3 hierarchical grid
|
||||
registry.register(Arc::new(h3::H3CellCenterChild));
|
||||
registry.register(Arc::new(h3::H3CellParent));
|
||||
registry.register(Arc::new(h3::H3CellToChildren));
|
||||
registry.register(Arc::new(h3::H3CellToChildrenSize));
|
||||
registry.register(Arc::new(h3::H3CellToChildPos));
|
||||
registry.register(Arc::new(h3::H3ChildPosToCell));
|
||||
registry.register(Arc::new(h3::H3CellContains));
|
||||
registry.register_scalar(h3::H3CellCenterChild);
|
||||
registry.register_scalar(h3::H3CellParent);
|
||||
registry.register_scalar(h3::H3CellToChildren);
|
||||
registry.register_scalar(h3::H3CellToChildrenSize);
|
||||
registry.register_scalar(h3::H3CellToChildPos);
|
||||
registry.register_scalar(h3::H3ChildPosToCell);
|
||||
registry.register_scalar(h3::H3CellContains);
|
||||
|
||||
// h3 grid traversal
|
||||
registry.register(Arc::new(h3::H3GridDisk));
|
||||
registry.register(Arc::new(h3::H3GridDiskDistances));
|
||||
registry.register(Arc::new(h3::H3GridDistance));
|
||||
registry.register(Arc::new(h3::H3GridPathCells));
|
||||
registry.register_scalar(h3::H3GridDisk);
|
||||
registry.register_scalar(h3::H3GridDiskDistances);
|
||||
registry.register_scalar(h3::H3GridDistance);
|
||||
registry.register_scalar(h3::H3GridPathCells);
|
||||
|
||||
// h3 measurement
|
||||
registry.register(Arc::new(h3::H3CellDistanceSphereKm));
|
||||
registry.register(Arc::new(h3::H3CellDistanceEuclideanDegree));
|
||||
registry.register_scalar(h3::H3CellDistanceSphereKm);
|
||||
registry.register_scalar(h3::H3CellDistanceEuclideanDegree);
|
||||
|
||||
// s2
|
||||
registry.register(Arc::new(s2::S2LatLngToCell));
|
||||
registry.register(Arc::new(s2::S2CellLevel));
|
||||
registry.register(Arc::new(s2::S2CellToToken));
|
||||
registry.register(Arc::new(s2::S2CellParent));
|
||||
registry.register_scalar(s2::S2LatLngToCell);
|
||||
registry.register_scalar(s2::S2CellLevel);
|
||||
registry.register_scalar(s2::S2CellToToken);
|
||||
registry.register_scalar(s2::S2CellParent);
|
||||
|
||||
// spatial data type
|
||||
registry.register(Arc::new(wkt::LatLngToPointWkt));
|
||||
registry.register_scalar(wkt::LatLngToPointWkt);
|
||||
|
||||
// spatial relation
|
||||
registry.register(Arc::new(relation::STContains));
|
||||
registry.register(Arc::new(relation::STWithin));
|
||||
registry.register(Arc::new(relation::STIntersects));
|
||||
registry.register_scalar(relation::STContains);
|
||||
registry.register_scalar(relation::STWithin);
|
||||
registry.register_scalar(relation::STIntersects);
|
||||
|
||||
// spatial measure
|
||||
registry.register(Arc::new(measure::STDistance));
|
||||
registry.register(Arc::new(measure::STDistanceSphere));
|
||||
registry.register(Arc::new(measure::STArea));
|
||||
registry.register_scalar(measure::STDistance);
|
||||
registry.register_scalar(measure::STDistanceSphere);
|
||||
registry.register_scalar(measure::STArea);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ macro_rules! ensure_columns_len {
|
||||
};
|
||||
}
|
||||
|
||||
pub(super) use ensure_columns_len;
|
||||
pub(crate) use ensure_columns_len;
|
||||
|
||||
macro_rules! ensure_columns_n {
|
||||
($columns:ident, $n:literal) => {
|
||||
@@ -58,7 +58,7 @@ macro_rules! ensure_columns_n {
|
||||
};
|
||||
}
|
||||
|
||||
pub(super) use ensure_columns_n;
|
||||
pub(crate) use ensure_columns_n;
|
||||
|
||||
macro_rules! ensure_and_coerce {
|
||||
($compare:expr, $coerce:expr) => {{
|
||||
@@ -72,4 +72,4 @@ macro_rules! ensure_and_coerce {
|
||||
}};
|
||||
}
|
||||
|
||||
pub(super) use ensure_and_coerce;
|
||||
pub(crate) use ensure_and_coerce;
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
|
||||
use std::fmt;
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{DowncastVectorSnafu, InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
@@ -27,7 +26,7 @@ use datatypes::vectors::{BinaryVector, MutableVector, UInt64VectorBuilder, Vecto
|
||||
use hyperloglogplus::HyperLogLog;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::aggr::HllStateType;
|
||||
use crate::aggrs::approximate::hll::HllStateType;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
@@ -44,7 +43,7 @@ pub struct HllCalcFunction;
|
||||
|
||||
impl HllCalcFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(HllCalcFunction));
|
||||
registry.register_scalar(HllCalcFunction);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -117,6 +116,8 @@ impl Function for HllCalcFunction {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::vectors::BinaryVector;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -17,8 +17,6 @@ mod ipv4;
|
||||
mod ipv6;
|
||||
mod range;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use cidr::{Ipv4ToCidr, Ipv6ToCidr};
|
||||
use ipv4::{Ipv4NumToString, Ipv4StringToNum};
|
||||
use ipv6::{Ipv6NumToString, Ipv6StringToNum};
|
||||
@@ -31,15 +29,15 @@ pub(crate) struct IpFunctions;
|
||||
impl IpFunctions {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
// Register IPv4 functions
|
||||
registry.register(Arc::new(Ipv4NumToString));
|
||||
registry.register(Arc::new(Ipv4StringToNum));
|
||||
registry.register(Arc::new(Ipv4ToCidr));
|
||||
registry.register(Arc::new(Ipv4InRange));
|
||||
registry.register_scalar(Ipv4NumToString);
|
||||
registry.register_scalar(Ipv4StringToNum);
|
||||
registry.register_scalar(Ipv4ToCidr);
|
||||
registry.register_scalar(Ipv4InRange);
|
||||
|
||||
// Register IPv6 functions
|
||||
registry.register(Arc::new(Ipv6NumToString));
|
||||
registry.register(Arc::new(Ipv6StringToNum));
|
||||
registry.register(Arc::new(Ipv6ToCidr));
|
||||
registry.register(Arc::new(Ipv6InRange));
|
||||
registry.register_scalar(Ipv6NumToString);
|
||||
registry.register_scalar(Ipv6StringToNum);
|
||||
registry.register_scalar(Ipv6ToCidr);
|
||||
registry.register_scalar(Ipv6InRange);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
pub mod json_get;
|
||||
mod json_is;
|
||||
mod json_path_exists;
|
||||
@@ -33,23 +32,23 @@ pub(crate) struct JsonFunction;
|
||||
|
||||
impl JsonFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(JsonToStringFunction));
|
||||
registry.register(Arc::new(ParseJsonFunction));
|
||||
registry.register_scalar(JsonToStringFunction);
|
||||
registry.register_scalar(ParseJsonFunction);
|
||||
|
||||
registry.register(Arc::new(JsonGetInt));
|
||||
registry.register(Arc::new(JsonGetFloat));
|
||||
registry.register(Arc::new(JsonGetString));
|
||||
registry.register(Arc::new(JsonGetBool));
|
||||
registry.register_scalar(JsonGetInt);
|
||||
registry.register_scalar(JsonGetFloat);
|
||||
registry.register_scalar(JsonGetString);
|
||||
registry.register_scalar(JsonGetBool);
|
||||
|
||||
registry.register(Arc::new(JsonIsNull));
|
||||
registry.register(Arc::new(JsonIsInt));
|
||||
registry.register(Arc::new(JsonIsFloat));
|
||||
registry.register(Arc::new(JsonIsString));
|
||||
registry.register(Arc::new(JsonIsBool));
|
||||
registry.register(Arc::new(JsonIsArray));
|
||||
registry.register(Arc::new(JsonIsObject));
|
||||
registry.register_scalar(JsonIsNull);
|
||||
registry.register_scalar(JsonIsInt);
|
||||
registry.register_scalar(JsonIsFloat);
|
||||
registry.register_scalar(JsonIsString);
|
||||
registry.register_scalar(JsonIsBool);
|
||||
registry.register_scalar(JsonIsArray);
|
||||
registry.register_scalar(JsonIsObject);
|
||||
|
||||
registry.register(Arc::new(json_path_exists::JsonPathExistsFunction));
|
||||
registry.register(Arc::new(json_path_match::JsonPathMatchFunction));
|
||||
registry.register_scalar(json_path_exists::JsonPathExistsFunction);
|
||||
registry.register_scalar(json_path_match::JsonPathMatchFunction);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,11 +38,11 @@ use crate::function_registry::FunctionRegistry;
|
||||
///
|
||||
/// Usage: matches(`<col>`, `<pattern>`) -> boolean
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub(crate) struct MatchesFunction;
|
||||
pub struct MatchesFunction;
|
||||
|
||||
impl MatchesFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(MatchesFunction));
|
||||
registry.register_scalar(MatchesFunction);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -77,7 +77,7 @@ pub struct MatchesTermFunction;
|
||||
|
||||
impl MatchesTermFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(MatchesTermFunction));
|
||||
registry.register_scalar(MatchesTermFunction);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ mod pow;
|
||||
mod rate;
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use clamp::{ClampFunction, ClampMaxFunction, ClampMinFunction};
|
||||
use common_query::error::{GeneralDataFusionSnafu, Result};
|
||||
@@ -39,13 +38,13 @@ pub(crate) struct MathFunction;
|
||||
|
||||
impl MathFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(ModuloFunction));
|
||||
registry.register(Arc::new(PowFunction));
|
||||
registry.register(Arc::new(RateFunction));
|
||||
registry.register(Arc::new(RangeFunction));
|
||||
registry.register(Arc::new(ClampFunction));
|
||||
registry.register(Arc::new(ClampMinFunction));
|
||||
registry.register(Arc::new(ClampMaxFunction));
|
||||
registry.register_scalar(ModuloFunction);
|
||||
registry.register_scalar(PowFunction);
|
||||
registry.register_scalar(RateFunction);
|
||||
registry.register_scalar(RangeFunction);
|
||||
registry.register_scalar(ClampFunction);
|
||||
registry.register_scalar(ClampMinFunction);
|
||||
registry.register_scalar(ClampMaxFunction);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
mod to_unixtime;
|
||||
|
||||
use to_unixtime::ToUnixtimeFunction;
|
||||
@@ -23,6 +22,6 @@ pub(crate) struct TimestampFunction;
|
||||
|
||||
impl TimestampFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(ToUnixtimeFunction));
|
||||
registry.register_scalar(ToUnixtimeFunction);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
|
||||
use std::fmt;
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{DowncastVectorSnafu, InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
@@ -44,7 +43,7 @@ pub struct UddSketchCalcFunction;
|
||||
|
||||
impl UddSketchCalcFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(UddSketchCalcFunction));
|
||||
registry.register_scalar(UddSketchCalcFunction);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,10 +17,8 @@ mod distance;
|
||||
mod elem_product;
|
||||
mod elem_sum;
|
||||
pub mod impl_conv;
|
||||
pub(crate) mod product;
|
||||
mod scalar_add;
|
||||
mod scalar_mul;
|
||||
pub(crate) mod sum;
|
||||
mod vector_add;
|
||||
mod vector_dim;
|
||||
mod vector_div;
|
||||
@@ -30,37 +28,34 @@ mod vector_norm;
|
||||
mod vector_sub;
|
||||
mod vector_subvector;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
pub(crate) struct VectorFunction;
|
||||
|
||||
impl VectorFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
// conversion
|
||||
registry.register(Arc::new(convert::ParseVectorFunction));
|
||||
registry.register(Arc::new(convert::VectorToStringFunction));
|
||||
registry.register_scalar(convert::ParseVectorFunction);
|
||||
registry.register_scalar(convert::VectorToStringFunction);
|
||||
|
||||
// distance
|
||||
registry.register(Arc::new(distance::CosDistanceFunction));
|
||||
registry.register(Arc::new(distance::DotProductFunction));
|
||||
registry.register(Arc::new(distance::L2SqDistanceFunction));
|
||||
registry.register_scalar(distance::CosDistanceFunction);
|
||||
registry.register_scalar(distance::DotProductFunction);
|
||||
registry.register_scalar(distance::L2SqDistanceFunction);
|
||||
|
||||
// scalar calculation
|
||||
registry.register(Arc::new(scalar_add::ScalarAddFunction));
|
||||
registry.register(Arc::new(scalar_mul::ScalarMulFunction));
|
||||
registry.register_scalar(scalar_add::ScalarAddFunction);
|
||||
registry.register_scalar(scalar_mul::ScalarMulFunction);
|
||||
|
||||
// vector calculation
|
||||
registry.register(Arc::new(vector_add::VectorAddFunction));
|
||||
registry.register(Arc::new(vector_sub::VectorSubFunction));
|
||||
registry.register(Arc::new(vector_mul::VectorMulFunction));
|
||||
registry.register(Arc::new(vector_div::VectorDivFunction));
|
||||
registry.register(Arc::new(vector_norm::VectorNormFunction));
|
||||
registry.register(Arc::new(vector_dim::VectorDimFunction));
|
||||
registry.register(Arc::new(vector_kth_elem::VectorKthElemFunction));
|
||||
registry.register(Arc::new(vector_subvector::VectorSubvectorFunction));
|
||||
registry.register(Arc::new(elem_sum::ElemSumFunction));
|
||||
registry.register(Arc::new(elem_product::ElemProductFunction));
|
||||
registry.register_scalar(vector_add::VectorAddFunction);
|
||||
registry.register_scalar(vector_sub::VectorSubFunction);
|
||||
registry.register_scalar(vector_mul::VectorMulFunction);
|
||||
registry.register_scalar(vector_div::VectorDivFunction);
|
||||
registry.register_scalar(vector_norm::VectorNormFunction);
|
||||
registry.register_scalar(vector_dim::VectorDimFunction);
|
||||
registry.register_scalar(vector_kth_elem::VectorKthElemFunction);
|
||||
registry.register_scalar(vector_subvector::VectorSubvectorFunction);
|
||||
registry.register_scalar(elem_sum::ElemSumFunction);
|
||||
registry.register_scalar(elem_product::ElemProductFunction);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -148,6 +148,17 @@ impl FunctionState {
|
||||
) -> Result<api::v1::flow::FlowResponse> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn adjust(
|
||||
&self,
|
||||
_catalog: &str,
|
||||
_flow: &str,
|
||||
_min_run_interval_secs: u64,
|
||||
_max_filter_num_per_query: usize,
|
||||
_ctx: QueryContextRef,
|
||||
) -> Result<api::v1::flow::FlowResponse> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
Self {
|
||||
|
||||
@@ -36,13 +36,13 @@ pub(crate) struct SystemFunction;
|
||||
|
||||
impl SystemFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(BuildFunction));
|
||||
registry.register(Arc::new(VersionFunction));
|
||||
registry.register(Arc::new(CurrentSchemaFunction));
|
||||
registry.register(Arc::new(DatabaseFunction));
|
||||
registry.register(Arc::new(SessionUserFunction));
|
||||
registry.register(Arc::new(ReadPreferenceFunction));
|
||||
registry.register(Arc::new(TimezoneFunction));
|
||||
registry.register_scalar(BuildFunction);
|
||||
registry.register_scalar(VersionFunction);
|
||||
registry.register_scalar(CurrentSchemaFunction);
|
||||
registry.register_scalar(DatabaseFunction);
|
||||
registry.register_scalar(SessionUserFunction);
|
||||
registry.register_scalar(ReadPreferenceFunction);
|
||||
registry.register_scalar(TimezoneFunction);
|
||||
registry.register_async(Arc::new(ProcedureStateFunction));
|
||||
PGCatalogFunction::register(registry);
|
||||
}
|
||||
|
||||
@@ -16,8 +16,6 @@ mod pg_get_userbyid;
|
||||
mod table_is_visible;
|
||||
mod version;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use pg_get_userbyid::PGGetUserByIdFunction;
|
||||
use table_is_visible::PGTableIsVisibleFunction;
|
||||
use version::PGVersionFunction;
|
||||
@@ -35,8 +33,8 @@ pub(super) struct PGCatalogFunction;
|
||||
|
||||
impl PGCatalogFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(PGTableIsVisibleFunction));
|
||||
registry.register(Arc::new(PGGetUserByIdFunction));
|
||||
registry.register(Arc::new(PGVersionFunction));
|
||||
registry.register_scalar(PGTableIsVisibleFunction);
|
||||
registry.register_scalar(PGGetUserByIdFunction);
|
||||
registry.register_scalar(PGVersionFunction);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,6 +64,19 @@ impl Default for FlightEncoder {
|
||||
}
|
||||
|
||||
impl FlightEncoder {
|
||||
/// Creates new [FlightEncoder] with compression disabled.
|
||||
pub fn with_compression_disabled() -> Self {
|
||||
let write_options = writer::IpcWriteOptions::default()
|
||||
.try_with_compression(None)
|
||||
.unwrap();
|
||||
|
||||
Self {
|
||||
write_options,
|
||||
data_gen: writer::IpcDataGenerator::default(),
|
||||
dictionary_tracker: writer::DictionaryTracker::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encode(&mut self, flight_message: FlightMessage) -> FlightData {
|
||||
match flight_message {
|
||||
FlightMessage::Schema(schema) => SchemaAsIpc::new(&schema, &self.write_options).into(),
|
||||
|
||||
@@ -35,6 +35,9 @@ pub const FLOWNODE_LEASE_SECS: u64 = DATANODE_LEASE_SECS;
|
||||
/// The lease seconds of metasrv leader.
|
||||
pub const META_LEASE_SECS: u64 = 5;
|
||||
|
||||
/// The keep-alive interval of the Postgres connection.
|
||||
pub const POSTGRES_KEEP_ALIVE_SECS: u64 = 30;
|
||||
|
||||
/// In a lease, there are two opportunities for renewal.
|
||||
pub const META_KEEP_ALIVE_INTERVAL_SECS: u64 = META_LEASE_SECS / 2;
|
||||
|
||||
|
||||
@@ -37,6 +37,9 @@ use crate::tracing_sampler::{create_sampler, TracingSampleOptions};
|
||||
|
||||
pub const DEFAULT_OTLP_ENDPOINT: &str = "http://localhost:4317";
|
||||
|
||||
/// The default logs directory.
|
||||
pub const DEFAULT_LOGGING_DIR: &str = "logs";
|
||||
|
||||
// Handle for reloading log level
|
||||
pub static RELOAD_HANDLE: OnceCell<tracing_subscriber::reload::Handle<Targets, Registry>> =
|
||||
OnceCell::new();
|
||||
@@ -133,7 +136,8 @@ impl Eq for LoggingOptions {}
|
||||
impl Default for LoggingOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
dir: "./greptimedb_data/logs".to_string(),
|
||||
// The directory path will be configured at application startup, typically using the data home directory as a base.
|
||||
dir: "".to_string(),
|
||||
level: None,
|
||||
log_format: LogFormat::Text,
|
||||
enable_otlp_tracing: false,
|
||||
|
||||
@@ -18,7 +18,7 @@ use core::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_base::secrets::{ExposeSecret, SecretString};
|
||||
use common_config::Configurable;
|
||||
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
||||
pub use common_procedure::options::ProcedureConfig;
|
||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
@@ -36,9 +36,6 @@ use servers::http::HttpOptions;
|
||||
|
||||
pub const DEFAULT_OBJECT_STORE_CACHE_SIZE: ReadableSize = ReadableSize::gb(5);
|
||||
|
||||
/// Default data home in file storage
|
||||
const DEFAULT_DATA_HOME: &str = "./greptimedb_data";
|
||||
|
||||
/// Object storage config
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(tag = "type")]
|
||||
|
||||
@@ -372,6 +372,7 @@ impl DatanodeBuilder {
|
||||
opts.max_concurrent_queries,
|
||||
//TODO: revaluate the hardcoded timeout on the next version of datanode concurrency limiter.
|
||||
Duration::from_millis(100),
|
||||
opts.grpc.flight_compression,
|
||||
);
|
||||
|
||||
let object_store_manager = Self::build_object_store_manager(&opts.storage).await?;
|
||||
|
||||
@@ -50,6 +50,7 @@ use query::QueryEngineRef;
|
||||
use servers::error::{self as servers_error, ExecuteGrpcRequestSnafu, Result as ServerResult};
|
||||
use servers::grpc::flight::{FlightCraft, FlightRecordBatchStream, TonicStream};
|
||||
use servers::grpc::region_server::RegionServerHandler;
|
||||
use servers::grpc::FlightCompression;
|
||||
use session::context::{QueryContextBuilder, QueryContextRef};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::metric_engine_consts::{
|
||||
@@ -80,6 +81,7 @@ use crate::event_listener::RegionServerEventListenerRef;
|
||||
#[derive(Clone)]
|
||||
pub struct RegionServer {
|
||||
inner: Arc<RegionServerInner>,
|
||||
flight_compression: FlightCompression,
|
||||
}
|
||||
|
||||
pub struct RegionStat {
|
||||
@@ -93,6 +95,7 @@ impl RegionServer {
|
||||
query_engine: QueryEngineRef,
|
||||
runtime: Runtime,
|
||||
event_listener: RegionServerEventListenerRef,
|
||||
flight_compression: FlightCompression,
|
||||
) -> Self {
|
||||
Self::with_table_provider(
|
||||
query_engine,
|
||||
@@ -101,6 +104,7 @@ impl RegionServer {
|
||||
Arc::new(DummyTableProviderFactory),
|
||||
0,
|
||||
Duration::from_millis(0),
|
||||
flight_compression,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -111,6 +115,7 @@ impl RegionServer {
|
||||
table_provider_factory: TableProviderFactoryRef,
|
||||
max_concurrent_queries: usize,
|
||||
concurrent_query_limiter_timeout: Duration,
|
||||
flight_compression: FlightCompression,
|
||||
) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(RegionServerInner::new(
|
||||
@@ -123,6 +128,7 @@ impl RegionServer {
|
||||
concurrent_query_limiter_timeout,
|
||||
),
|
||||
)),
|
||||
flight_compression,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -536,7 +542,11 @@ impl FlightCraft for RegionServer {
|
||||
.trace(tracing_context.attach(info_span!("RegionServer::handle_read")))
|
||||
.await?;
|
||||
|
||||
let stream = Box::pin(FlightRecordBatchStream::new(result, tracing_context));
|
||||
let stream = Box::pin(FlightRecordBatchStream::new(
|
||||
result,
|
||||
tracing_context,
|
||||
self.flight_compression,
|
||||
));
|
||||
Ok(Response::new(stream))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,16 +19,16 @@ use std::time::Duration;
|
||||
use api::region::RegionResponse;
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_function::function::FunctionRef;
|
||||
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
|
||||
use common_function::function_factory::ScalarFunctionFactory;
|
||||
use common_query::Output;
|
||||
use common_runtime::runtime::{BuilderBuild, RuntimeTrait};
|
||||
use common_runtime::Runtime;
|
||||
use datafusion_expr::LogicalPlan;
|
||||
use datafusion_expr::{AggregateUDF, LogicalPlan};
|
||||
use query::dataframe::DataFrame;
|
||||
use query::planner::LogicalPlanner;
|
||||
use query::query_engine::{DescribeResult, QueryEngineState};
|
||||
use query::{QueryEngine, QueryEngineContext};
|
||||
use servers::grpc::FlightCompression;
|
||||
use session::context::QueryContextRef;
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
use store_api::region_engine::{
|
||||
@@ -76,9 +76,9 @@ impl QueryEngine for MockQueryEngine {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn register_aggregate_function(&self, _func: AggregateFunctionMetaRef) {}
|
||||
fn register_aggregate_function(&self, _func: AggregateUDF) {}
|
||||
|
||||
fn register_function(&self, _func: FunctionRef) {}
|
||||
fn register_scalar_function(&self, _func: ScalarFunctionFactory) {}
|
||||
|
||||
fn read_table(&self, _table: TableRef) -> query::error::Result<DataFrame> {
|
||||
unimplemented!()
|
||||
@@ -98,6 +98,7 @@ pub fn mock_region_server() -> RegionServer {
|
||||
Arc::new(MockQueryEngine),
|
||||
Runtime::builder().build().unwrap(),
|
||||
Box::new(NoopRegionServerEventListener),
|
||||
FlightCompression::default(),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -61,6 +61,7 @@ prost.workspace = true
|
||||
query.workspace = true
|
||||
rand.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
servers.workspace = true
|
||||
session.workspace = true
|
||||
smallvec.workspace = true
|
||||
|
||||
@@ -18,7 +18,7 @@ use std::sync::atomic::AtomicBool;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::flow::{
|
||||
flow_request, CreateRequest, DropRequest, FlowRequest, FlowResponse, FlushFlow,
|
||||
flow_request, AdjustFlow, CreateRequest, DropRequest, FlowRequest, FlowResponse, FlushFlow,
|
||||
};
|
||||
use api::v1::region::InsertRequests;
|
||||
use catalog::CatalogManager;
|
||||
@@ -32,6 +32,7 @@ use common_telemetry::{error, info, trace, warn};
|
||||
use datatypes::value::Value;
|
||||
use futures::TryStreamExt;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use session::context::QueryContextBuilder;
|
||||
use snafu::{ensure, IntoError, OptionExt, ResultExt};
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
@@ -809,6 +810,25 @@ impl common_meta::node_manager::Flownode for FlowDualEngine {
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
Some(flow_request::Body::Adjust(AdjustFlow { flow_id, options })) => {
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct Options {
|
||||
min_run_interval_secs: u64,
|
||||
max_filter_num_per_query: usize,
|
||||
}
|
||||
let options: Options = serde_json::from_str(&options).with_context(|_| {
|
||||
common_meta::error::DeserializeFromJsonSnafu { input: options }
|
||||
})?;
|
||||
self.batching_engine
|
||||
.adjust_flow(
|
||||
flow_id.unwrap().id as u64,
|
||||
options.min_run_interval_secs,
|
||||
options.max_filter_num_per_query,
|
||||
)
|
||||
.await
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
Ok(Default::default())
|
||||
}
|
||||
other => common_meta::error::InvalidFlowRequestBodySnafu { body: other }.fail(),
|
||||
}
|
||||
}
|
||||
@@ -841,93 +861,6 @@ fn to_meta_err(
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl common_meta::node_manager::Flownode for StreamingEngine {
|
||||
async fn handle(&self, request: FlowRequest) -> MetaResult<FlowResponse> {
|
||||
let query_ctx = request
|
||||
.header
|
||||
.and_then(|h| h.query_context)
|
||||
.map(|ctx| ctx.into());
|
||||
match request.body {
|
||||
Some(flow_request::Body::Create(CreateRequest {
|
||||
flow_id: Some(task_id),
|
||||
source_table_ids,
|
||||
sink_table_name: Some(sink_table_name),
|
||||
create_if_not_exists,
|
||||
expire_after,
|
||||
comment,
|
||||
sql,
|
||||
flow_options,
|
||||
or_replace,
|
||||
})) => {
|
||||
let source_table_ids = source_table_ids.into_iter().map(|id| id.id).collect_vec();
|
||||
let sink_table_name = [
|
||||
sink_table_name.catalog_name,
|
||||
sink_table_name.schema_name,
|
||||
sink_table_name.table_name,
|
||||
];
|
||||
let expire_after = expire_after.map(|e| e.value);
|
||||
let args = CreateFlowArgs {
|
||||
flow_id: task_id.id as u64,
|
||||
sink_table_name,
|
||||
source_table_ids,
|
||||
create_if_not_exists,
|
||||
or_replace,
|
||||
expire_after,
|
||||
comment: Some(comment),
|
||||
sql: sql.clone(),
|
||||
flow_options,
|
||||
query_ctx,
|
||||
};
|
||||
let ret = self
|
||||
.create_flow(args)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.with_context(|_| CreateFlowSnafu { sql: sql.clone() })
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
METRIC_FLOW_TASK_COUNT.inc();
|
||||
Ok(FlowResponse {
|
||||
affected_flows: ret
|
||||
.map(|id| greptime_proto::v1::FlowId { id: id as u32 })
|
||||
.into_iter()
|
||||
.collect_vec(),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
Some(flow_request::Body::Drop(DropRequest {
|
||||
flow_id: Some(flow_id),
|
||||
})) => {
|
||||
self.remove_flow(flow_id.id as u64)
|
||||
.await
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
METRIC_FLOW_TASK_COUNT.dec();
|
||||
Ok(Default::default())
|
||||
}
|
||||
Some(flow_request::Body::Flush(FlushFlow {
|
||||
flow_id: Some(flow_id),
|
||||
})) => {
|
||||
let row = self
|
||||
.flush_flow_inner(flow_id.id as u64)
|
||||
.await
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
Ok(FlowResponse {
|
||||
affected_flows: vec![flow_id],
|
||||
affected_rows: row as u64,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
other => common_meta::error::InvalidFlowRequestBodySnafu { body: other }.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_inserts(&self, request: InsertRequests) -> MetaResult<FlowResponse> {
|
||||
self.handle_inserts_inner(request)
|
||||
.await
|
||||
.map(|_| Default::default())
|
||||
.map_err(to_meta_err(snafu::location!()))
|
||||
}
|
||||
}
|
||||
|
||||
impl FlowEngine for StreamingEngine {
|
||||
async fn create_flow(&self, args: CreateFlowArgs) -> Result<Option<FlowId>, Error> {
|
||||
self.create_flow_inner(args).await
|
||||
|
||||
@@ -388,6 +388,20 @@ impl BatchingEngine {
|
||||
pub async fn flow_exist_inner(&self, flow_id: FlowId) -> bool {
|
||||
self.tasks.read().await.contains_key(&flow_id)
|
||||
}
|
||||
|
||||
pub async fn adjust_flow(
|
||||
&self,
|
||||
flow_id: FlowId,
|
||||
min_run_interval_secs: u64,
|
||||
max_filter_num_per_query: usize,
|
||||
) -> Result<(), Error> {
|
||||
let task = self.tasks.read().await.get(&flow_id).cloned();
|
||||
let task = task.with_context(|| FlowNotFoundSnafu { id: flow_id })?;
|
||||
debug!("Adjusting flow {flow_id} with min_run_interval_secs={} and max_filter_num_per_query={}", min_run_interval_secs, max_filter_num_per_query);
|
||||
task.adjust(min_run_interval_secs, max_filter_num_per_query);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl FlowEngine for BatchingEngine {
|
||||
|
||||
@@ -14,8 +14,9 @@
|
||||
|
||||
//! Frontend client to run flow as batching task which is time-window-aware normal query triggered every tick set by user
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::time::SystemTime;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Mutex, Weak};
|
||||
use std::time::{Duration, Instant, SystemTime};
|
||||
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::CreateTableExpr;
|
||||
@@ -26,20 +27,21 @@ use common_meta::cluster::{NodeInfo, NodeInfoKey, Role};
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::store::RangeRequest;
|
||||
use common_query::Output;
|
||||
use common_telemetry::warn;
|
||||
use common_telemetry::{debug, warn};
|
||||
use itertools::Itertools;
|
||||
use meta_client::client::MetaClient;
|
||||
use rand::rng;
|
||||
use rand::seq::SliceRandom;
|
||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use session::context::{QueryContextBuilder, QueryContextRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::batching_mode::task::BatchingTask;
|
||||
use crate::batching_mode::{
|
||||
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, FRONTEND_ACTIVITY_TIMEOUT, GRPC_CONN_TIMEOUT,
|
||||
GRPC_MAX_RETRIES,
|
||||
};
|
||||
use crate::error::{ExternalSnafu, InvalidRequestSnafu, NoAvailableFrontendSnafu, UnexpectedSnafu};
|
||||
use crate::{Error, FlowAuthHeader};
|
||||
use crate::metrics::METRIC_FLOW_BATCHING_ENGINE_GUESS_FE_LOAD;
|
||||
use crate::{Error, FlowAuthHeader, FlowId};
|
||||
|
||||
/// Just like [`GrpcQueryHandler`] but use BoxedError
|
||||
///
|
||||
@@ -74,6 +76,105 @@ impl<
|
||||
|
||||
type HandlerMutable = Arc<std::sync::Mutex<Option<Weak<dyn GrpcQueryHandlerWithBoxedError>>>>;
|
||||
|
||||
/// Statistics about running query on this frontend from flownode
|
||||
#[derive(Debug, Default, Clone)]
|
||||
struct FrontendStat {
|
||||
/// The query for flow id has been running since this timestamp
|
||||
since: HashMap<FlowId, Instant>,
|
||||
/// The average query time for each flow id
|
||||
/// This is used to calculate the average query time for each flow id
|
||||
past_query_avg: HashMap<FlowId, (usize, Duration)>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct FrontendStats {
|
||||
/// The statistics for each flow id
|
||||
stats: Arc<Mutex<HashMap<String, FrontendStat>>>,
|
||||
}
|
||||
|
||||
impl FrontendStats {
|
||||
pub fn observe(&self, frontend_addr: &str, flow_id: FlowId) -> FrontendStatsGuard {
|
||||
let mut stats = self.stats.lock().expect("Failed to lock frontend stats");
|
||||
let stat = stats.entry(frontend_addr.to_string()).or_default();
|
||||
stat.since.insert(flow_id, Instant::now());
|
||||
|
||||
FrontendStatsGuard {
|
||||
stats: self.stats.clone(),
|
||||
frontend_addr: frontend_addr.to_string(),
|
||||
cur: flow_id,
|
||||
}
|
||||
}
|
||||
|
||||
/// return frontend addrs sorted by load, from lightest to heaviest
|
||||
/// The load is calculated as the total average query time for each flow id plus running query's total running time elapsed
|
||||
pub fn sort_by_load(&self) -> Vec<String> {
|
||||
let stats = self.stats.lock().expect("Failed to lock frontend stats");
|
||||
let fe_load_factor = stats
|
||||
.iter()
|
||||
.map(|(node_addr, stat)| {
|
||||
// total expected avg running time for all currently running queries
|
||||
let total_expect_avg_run_time = stat
|
||||
.since
|
||||
.keys()
|
||||
.map(|f| {
|
||||
let (count, total_duration) =
|
||||
stat.past_query_avg.get(f).unwrap_or(&(0, Duration::ZERO));
|
||||
if *count == 0 {
|
||||
0.0
|
||||
} else {
|
||||
total_duration.as_secs_f64() / *count as f64
|
||||
}
|
||||
})
|
||||
.sum::<f64>();
|
||||
let total_cur_running_time = stat
|
||||
.since
|
||||
.values()
|
||||
.map(|since| since.elapsed().as_secs_f64())
|
||||
.sum::<f64>();
|
||||
(
|
||||
node_addr.to_string(),
|
||||
total_expect_avg_run_time + total_cur_running_time,
|
||||
)
|
||||
})
|
||||
.sorted_by(|(_, load_a), (_, load_b)| {
|
||||
load_a
|
||||
.partial_cmp(load_b)
|
||||
.unwrap_or(std::cmp::Ordering::Equal)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
debug!("Frontend load factor: {:?}", fe_load_factor);
|
||||
for (node_addr, load) in &fe_load_factor {
|
||||
METRIC_FLOW_BATCHING_ENGINE_GUESS_FE_LOAD
|
||||
.with_label_values(&[&node_addr.to_string()])
|
||||
.observe(*load);
|
||||
}
|
||||
fe_load_factor
|
||||
.into_iter()
|
||||
.map(|(addr, _)| addr)
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FrontendStatsGuard {
|
||||
stats: Arc<Mutex<HashMap<String, FrontendStat>>>,
|
||||
frontend_addr: String,
|
||||
cur: FlowId,
|
||||
}
|
||||
|
||||
impl Drop for FrontendStatsGuard {
|
||||
fn drop(&mut self) {
|
||||
let mut stats = self.stats.lock().expect("Failed to lock frontend stats");
|
||||
if let Some(stat) = stats.get_mut(&self.frontend_addr) {
|
||||
if let Some(since) = stat.since.remove(&self.cur) {
|
||||
let elapsed = since.elapsed();
|
||||
let (count, total_duration) = stat.past_query_avg.entry(self.cur).or_default();
|
||||
*count += 1;
|
||||
*total_duration += elapsed;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A simple frontend client able to execute sql using grpc protocol
|
||||
///
|
||||
/// This is for computation-heavy query which need to offload computation to frontend, lifting the load from flownode
|
||||
@@ -83,6 +184,7 @@ pub enum FrontendClient {
|
||||
meta_client: Arc<MetaClient>,
|
||||
chnl_mgr: ChannelManager,
|
||||
auth: Option<FlowAuthHeader>,
|
||||
fe_stats: FrontendStats,
|
||||
},
|
||||
Standalone {
|
||||
/// for the sake of simplicity still use grpc even in standalone mode
|
||||
@@ -114,6 +216,7 @@ impl FrontendClient {
|
||||
ChannelManager::with_config(cfg)
|
||||
},
|
||||
auth,
|
||||
fe_stats: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,7 +286,7 @@ impl FrontendClient {
|
||||
|
||||
/// Get the frontend with recent enough(less than 1 minute from now) `last_activity_ts`
|
||||
/// and is able to process query
|
||||
async fn get_random_active_frontend(
|
||||
pub(crate) async fn get_random_active_frontend(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
@@ -192,6 +295,7 @@ impl FrontendClient {
|
||||
meta_client: _,
|
||||
chnl_mgr,
|
||||
auth,
|
||||
fe_stats,
|
||||
} = self
|
||||
else {
|
||||
return UnexpectedSnafu {
|
||||
@@ -208,8 +312,21 @@ impl FrontendClient {
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_millis() as i64;
|
||||
// shuffle the frontends to avoid always pick the same one
|
||||
frontends.shuffle(&mut rng());
|
||||
let node_addrs_by_load = fe_stats.sort_by_load();
|
||||
// index+1 to load order asc, so that the lightest node has load 1 and non-existent node has load 0
|
||||
let addr2load = node_addrs_by_load
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, id)| (id.clone(), i + 1))
|
||||
.collect::<HashMap<_, _>>();
|
||||
// sort frontends by load, from lightest to heaviest
|
||||
frontends.sort_by(|(_, a), (_, b)| {
|
||||
// if not even in stats, treat as 0 load since never been queried
|
||||
let load_a = addr2load.get(&a.peer.addr).unwrap_or(&0);
|
||||
let load_b = addr2load.get(&b.peer.addr).unwrap_or(&0);
|
||||
load_a.cmp(load_b)
|
||||
});
|
||||
debug!("Frontend nodes sorted by load: {:?}", frontends);
|
||||
|
||||
// found node with maximum last_activity_ts
|
||||
for (_, node_info) in frontends
|
||||
@@ -257,6 +374,7 @@ impl FrontendClient {
|
||||
create: CreateTableExpr,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
task: Option<&BatchingTask>,
|
||||
) -> Result<u32, Error> {
|
||||
self.handle(
|
||||
Request::Ddl(api::v1::DdlRequest {
|
||||
@@ -264,7 +382,8 @@ impl FrontendClient {
|
||||
}),
|
||||
catalog,
|
||||
schema,
|
||||
&mut None,
|
||||
None,
|
||||
task,
|
||||
)
|
||||
.await
|
||||
}
|
||||
@@ -275,15 +394,31 @@ impl FrontendClient {
|
||||
req: api::v1::greptime_request::Request,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
peer_desc: &mut Option<PeerDesc>,
|
||||
use_peer: Option<Peer>,
|
||||
task: Option<&BatchingTask>,
|
||||
) -> Result<u32, Error> {
|
||||
match self {
|
||||
FrontendClient::Distributed { .. } => {
|
||||
let db = self.get_random_active_frontend(catalog, schema).await?;
|
||||
FrontendClient::Distributed {
|
||||
fe_stats, chnl_mgr, ..
|
||||
} => {
|
||||
let db = if let Some(peer) = use_peer {
|
||||
DatabaseWithPeer::new(
|
||||
Database::new(
|
||||
catalog,
|
||||
schema,
|
||||
Client::with_manager_and_urls(
|
||||
chnl_mgr.clone(),
|
||||
vec![peer.addr.clone()],
|
||||
),
|
||||
),
|
||||
peer,
|
||||
)
|
||||
} else {
|
||||
self.get_random_active_frontend(catalog, schema).await?
|
||||
};
|
||||
|
||||
*peer_desc = Some(PeerDesc::Dist {
|
||||
peer: db.peer.clone(),
|
||||
});
|
||||
let flow_id = task.map(|t| t.config.flow_id).unwrap_or_default();
|
||||
let _guard = fe_stats.observe(&db.peer.addr, flow_id);
|
||||
|
||||
db.database
|
||||
.handle_with_retry(req.clone(), GRPC_MAX_RETRIES)
|
||||
|
||||
@@ -32,6 +32,7 @@ use crate::batching_mode::MIN_REFRESH_DURATION;
|
||||
use crate::error::{DatatypesSnafu, InternalSnafu, TimeSnafu, UnexpectedSnafu};
|
||||
use crate::metrics::{
|
||||
METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME_RANGE, METRIC_FLOW_BATCHING_ENGINE_QUERY_WINDOW_CNT,
|
||||
METRIC_FLOW_BATCHING_ENGINE_STALLED_QUERY_WINDOW_CNT,
|
||||
};
|
||||
use crate::{Error, FlowId};
|
||||
|
||||
@@ -52,6 +53,13 @@ pub struct TaskState {
|
||||
pub(crate) shutdown_rx: oneshot::Receiver<()>,
|
||||
/// Task handle
|
||||
pub(crate) task_handle: Option<tokio::task::JoinHandle<()>>,
|
||||
/// Slow Query metrics update task handle
|
||||
pub(crate) slow_query_metric_task: Option<tokio::task::JoinHandle<()>>,
|
||||
|
||||
/// min run interval in seconds
|
||||
pub(crate) min_run_interval: Option<u64>,
|
||||
/// max filter number per query
|
||||
pub(crate) max_filter_num: Option<usize>,
|
||||
}
|
||||
impl TaskState {
|
||||
pub fn new(query_ctx: QueryContextRef, shutdown_rx: oneshot::Receiver<()>) -> Self {
|
||||
@@ -63,6 +71,9 @@ impl TaskState {
|
||||
exec_state: ExecState::Idle,
|
||||
shutdown_rx,
|
||||
task_handle: None,
|
||||
slow_query_metric_task: None,
|
||||
min_run_interval: None,
|
||||
max_filter_num: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,20 +98,17 @@ impl TaskState {
|
||||
pub fn get_next_start_query_time(
|
||||
&self,
|
||||
flow_id: FlowId,
|
||||
time_window_size: &Option<Duration>,
|
||||
_time_window_size: &Option<Duration>,
|
||||
max_timeout: Option<Duration>,
|
||||
) -> Instant {
|
||||
let last_duration = max_timeout
|
||||
let next_duration = max_timeout
|
||||
.unwrap_or(self.last_query_duration)
|
||||
.min(self.last_query_duration)
|
||||
.max(MIN_REFRESH_DURATION);
|
||||
|
||||
let next_duration = time_window_size
|
||||
.map(|t| {
|
||||
let half = t / 2;
|
||||
half.max(last_duration)
|
||||
})
|
||||
.unwrap_or(last_duration);
|
||||
.max(
|
||||
self.min_run_interval
|
||||
.map(Duration::from_secs)
|
||||
.unwrap_or(MIN_REFRESH_DURATION),
|
||||
);
|
||||
|
||||
// if have dirty time window, execute immediately to clean dirty time window
|
||||
if self.dirty_time_windows.windows.is_empty() {
|
||||
@@ -206,47 +214,69 @@ impl DirtyTimeWindows {
|
||||
|
||||
// get the first `window_cnt` time windows
|
||||
let max_time_range = window_size * window_cnt as i32;
|
||||
let nth = {
|
||||
let mut cur_time_range = chrono::Duration::zero();
|
||||
let mut nth_key = None;
|
||||
for (idx, (start, end)) in self.windows.iter().enumerate() {
|
||||
// if time range is too long, stop
|
||||
if cur_time_range > max_time_range {
|
||||
nth_key = Some(*start);
|
||||
break;
|
||||
}
|
||||
|
||||
// if we have enough time windows, stop
|
||||
if idx >= window_cnt {
|
||||
nth_key = Some(*start);
|
||||
break;
|
||||
}
|
||||
let mut to_be_query = BTreeMap::new();
|
||||
let mut new_windows = self.windows.clone();
|
||||
let mut cur_time_range = chrono::Duration::zero();
|
||||
for (idx, (start, end)) in self.windows.iter().enumerate() {
|
||||
let first_end = start
|
||||
.add_duration(window_size.to_std().unwrap())
|
||||
.context(TimeSnafu)?;
|
||||
let end = end.unwrap_or(first_end);
|
||||
|
||||
if let Some(end) = end {
|
||||
if let Some(x) = end.sub(start) {
|
||||
cur_time_range += x;
|
||||
}
|
||||
}
|
||||
// if time range is too long, stop
|
||||
if cur_time_range >= max_time_range {
|
||||
break;
|
||||
}
|
||||
|
||||
nth_key
|
||||
};
|
||||
let first_nth = {
|
||||
if let Some(nth) = nth {
|
||||
let mut after = self.windows.split_off(&nth);
|
||||
std::mem::swap(&mut self.windows, &mut after);
|
||||
|
||||
after
|
||||
} else {
|
||||
std::mem::take(&mut self.windows)
|
||||
// if we have enough time windows, stop
|
||||
if idx >= window_cnt {
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(x) = end.sub(start) {
|
||||
if cur_time_range + x <= max_time_range {
|
||||
to_be_query.insert(*start, Some(end));
|
||||
new_windows.remove(start);
|
||||
cur_time_range += x;
|
||||
} else {
|
||||
// too large a window, split it
|
||||
// split at window_size * times
|
||||
let surplus = max_time_range - cur_time_range;
|
||||
let times = surplus.num_seconds() / window_size.num_seconds();
|
||||
|
||||
let split_offset = window_size * times as i32;
|
||||
let split_at = start
|
||||
.add_duration(split_offset.to_std().unwrap())
|
||||
.context(TimeSnafu)?;
|
||||
to_be_query.insert(*start, Some(split_at));
|
||||
|
||||
// remove the original window
|
||||
new_windows.remove(start);
|
||||
new_windows.insert(split_at, Some(end));
|
||||
cur_time_range += split_offset;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.windows = new_windows;
|
||||
|
||||
METRIC_FLOW_BATCHING_ENGINE_QUERY_WINDOW_CNT
|
||||
.with_label_values(&[flow_id.to_string().as_str()])
|
||||
.observe(first_nth.len() as f64);
|
||||
.with_label_values(&[
|
||||
flow_id.to_string().as_str(),
|
||||
format!("{}", window_size).as_str(),
|
||||
])
|
||||
.observe(to_be_query.len() as f64);
|
||||
|
||||
let full_time_range = first_nth
|
||||
METRIC_FLOW_BATCHING_ENGINE_STALLED_QUERY_WINDOW_CNT
|
||||
.with_label_values(&[
|
||||
flow_id.to_string().as_str(),
|
||||
format!("{}", window_size).as_str(),
|
||||
])
|
||||
.observe(self.windows.len() as f64);
|
||||
|
||||
let full_time_range = to_be_query
|
||||
.iter()
|
||||
.fold(chrono::Duration::zero(), |acc, (start, end)| {
|
||||
if let Some(end) = end {
|
||||
@@ -257,11 +287,14 @@ impl DirtyTimeWindows {
|
||||
})
|
||||
.num_seconds() as f64;
|
||||
METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME_RANGE
|
||||
.with_label_values(&[flow_id.to_string().as_str()])
|
||||
.with_label_values(&[
|
||||
flow_id.to_string().as_str(),
|
||||
format!("{}", window_size).as_str(),
|
||||
])
|
||||
.observe(full_time_range);
|
||||
|
||||
let mut expr_lst = vec![];
|
||||
for (start, end) in first_nth.into_iter() {
|
||||
for (start, end) in to_be_query.into_iter() {
|
||||
// align using time window exprs
|
||||
let (start, end) = if let Some(ctx) = task_ctx {
|
||||
let Some(time_window_expr) = &ctx.config.time_window_expr else {
|
||||
@@ -495,6 +528,64 @@ mod test {
|
||||
"((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:00:21' AS TIMESTAMP)))",
|
||||
)
|
||||
),
|
||||
// split range
|
||||
(
|
||||
Vec::from_iter((0..20).map(|i|Timestamp::new_second(i*3)).chain(std::iter::once(
|
||||
Timestamp::new_second(60 + 3 * (DirtyTimeWindows::MERGE_DIST as i64 + 1)),
|
||||
))),
|
||||
(chrono::Duration::seconds(3), None),
|
||||
BTreeMap::from([
|
||||
(
|
||||
Timestamp::new_second(0),
|
||||
Some(Timestamp::new_second(
|
||||
60
|
||||
)),
|
||||
),
|
||||
(
|
||||
Timestamp::new_second(60 + 3 * (DirtyTimeWindows::MERGE_DIST as i64 + 1)),
|
||||
Some(Timestamp::new_second(
|
||||
60 + 3 * (DirtyTimeWindows::MERGE_DIST as i64 + 1) + 3
|
||||
)),
|
||||
)]),
|
||||
Some(
|
||||
"((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:01:00' AS TIMESTAMP)))",
|
||||
)
|
||||
),
|
||||
// split 2 min into 1 min
|
||||
(
|
||||
Vec::from_iter((0..40).map(|i|Timestamp::new_second(i*3))),
|
||||
(chrono::Duration::seconds(3), None),
|
||||
BTreeMap::from([
|
||||
(
|
||||
Timestamp::new_second(0),
|
||||
Some(Timestamp::new_second(
|
||||
40 * 3
|
||||
)),
|
||||
)]),
|
||||
Some(
|
||||
"((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:01:00' AS TIMESTAMP)))",
|
||||
)
|
||||
),
|
||||
// split 3s + 1min into 3s + 57s
|
||||
(
|
||||
Vec::from_iter(std::iter::once(Timestamp::new_second(0)).chain((0..40).map(|i|Timestamp::new_second(20+i*3)))),
|
||||
(chrono::Duration::seconds(3), None),
|
||||
BTreeMap::from([
|
||||
(
|
||||
Timestamp::new_second(0),
|
||||
Some(Timestamp::new_second(
|
||||
3
|
||||
)),
|
||||
),(
|
||||
Timestamp::new_second(20),
|
||||
Some(Timestamp::new_second(
|
||||
140
|
||||
)),
|
||||
)]),
|
||||
Some(
|
||||
"(((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:00:03' AS TIMESTAMP))) OR ((ts >= CAST('1970-01-01 00:00:20' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:01:17' AS TIMESTAMP))))",
|
||||
)
|
||||
),
|
||||
// expired
|
||||
(
|
||||
vec![
|
||||
@@ -511,6 +602,8 @@ mod test {
|
||||
None
|
||||
),
|
||||
];
|
||||
// let len = testcases.len();
|
||||
// let testcases = testcases[(len - 2)..(len - 1)].to_vec();
|
||||
for (lower_bounds, (window_size, expire_lower_bound), expected, expected_filter_expr) in
|
||||
testcases
|
||||
{
|
||||
|
||||
@@ -61,7 +61,8 @@ use crate::error::{
|
||||
SubstraitEncodeLogicalPlanSnafu, UnexpectedSnafu,
|
||||
};
|
||||
use crate::metrics::{
|
||||
METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME, METRIC_FLOW_BATCHING_ENGINE_SLOW_QUERY,
|
||||
METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME, METRIC_FLOW_BATCHING_ENGINE_REAL_TIME_SLOW_QUERY_CNT,
|
||||
METRIC_FLOW_BATCHING_ENGINE_SLOW_QUERY,
|
||||
};
|
||||
use crate::{Error, FlowId};
|
||||
|
||||
@@ -81,6 +82,14 @@ pub struct TaskConfig {
|
||||
query_type: QueryType,
|
||||
}
|
||||
|
||||
impl TaskConfig {
|
||||
pub fn time_window_size(&self) -> Option<Duration> {
|
||||
self.time_window_expr
|
||||
.as_ref()
|
||||
.and_then(|expr| *expr.time_window_size())
|
||||
}
|
||||
}
|
||||
|
||||
fn determine_query_type(query: &str, query_ctx: &QueryContextRef) -> Result<QueryType, Error> {
|
||||
let stmts =
|
||||
ParserContext::create_with_dialect(query, query_ctx.sql_dialect(), ParseOptions::default())
|
||||
@@ -144,6 +153,12 @@ impl BatchingTask {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn adjust(&self, min_run_interval_secs: u64, max_filter_num_per_query: usize) {
|
||||
let mut state = self.state.write().unwrap();
|
||||
state.min_run_interval = Some(min_run_interval_secs);
|
||||
state.max_filter_num = Some(max_filter_num_per_query);
|
||||
}
|
||||
|
||||
/// mark time window range (now - expire_after, now) as dirty (or (0, now) if expire_after not set)
|
||||
///
|
||||
/// useful for flush_flow to flush dirty time windows range
|
||||
@@ -280,7 +295,7 @@ impl BatchingTask {
|
||||
let catalog = &self.config.sink_table_name[0];
|
||||
let schema = &self.config.sink_table_name[1];
|
||||
frontend_client
|
||||
.create(expr.clone(), catalog, schema)
|
||||
.create(expr.clone(), catalog, schema, Some(self))
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -328,11 +343,53 @@ impl BatchingTask {
|
||||
})?;
|
||||
|
||||
let plan = expanded_plan;
|
||||
let mut peer_desc = None;
|
||||
|
||||
let db = frontend_client
|
||||
.get_random_active_frontend(catalog, schema)
|
||||
.await?;
|
||||
let peer_desc = db.peer.clone();
|
||||
|
||||
let (tx, mut rx) = oneshot::channel();
|
||||
let peer_inner = peer_desc.clone();
|
||||
let window_size_pretty = format!(
|
||||
"{}s",
|
||||
self.config.time_window_size().unwrap_or_default().as_secs()
|
||||
);
|
||||
let inner_window_size_pretty = window_size_pretty.clone();
|
||||
let flow_id = self.config.flow_id;
|
||||
let slow_query_metric_task = tokio::task::spawn(async move {
|
||||
tokio::time::sleep(SLOW_QUERY_THRESHOLD).await;
|
||||
METRIC_FLOW_BATCHING_ENGINE_REAL_TIME_SLOW_QUERY_CNT
|
||||
.with_label_values(&[
|
||||
flow_id.to_string().as_str(),
|
||||
&peer_inner.to_string(),
|
||||
inner_window_size_pretty.as_str(),
|
||||
])
|
||||
.add(1.0);
|
||||
while rx.try_recv() == Err(TryRecvError::Empty) {
|
||||
// sleep for a while before next update
|
||||
tokio::time::sleep(MIN_REFRESH_DURATION).await;
|
||||
}
|
||||
METRIC_FLOW_BATCHING_ENGINE_REAL_TIME_SLOW_QUERY_CNT
|
||||
.with_label_values(&[
|
||||
flow_id.to_string().as_str(),
|
||||
&peer_inner.to_string(),
|
||||
inner_window_size_pretty.as_str(),
|
||||
])
|
||||
.sub(1.0);
|
||||
});
|
||||
self.state.write().unwrap().slow_query_metric_task = Some(slow_query_metric_task);
|
||||
|
||||
let res = {
|
||||
let _timer = METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME
|
||||
.with_label_values(&[flow_id.to_string().as_str()])
|
||||
.with_label_values(&[
|
||||
flow_id.to_string().as_str(),
|
||||
format!(
|
||||
"{}s",
|
||||
self.config.time_window_size().unwrap_or_default().as_secs()
|
||||
)
|
||||
.as_str(),
|
||||
])
|
||||
.start_timer();
|
||||
|
||||
// hack and special handling the insert logical plan
|
||||
@@ -361,10 +418,12 @@ impl BatchingTask {
|
||||
};
|
||||
|
||||
frontend_client
|
||||
.handle(req, catalog, schema, &mut peer_desc)
|
||||
.handle(req, catalog, schema, Some(db.peer), Some(self))
|
||||
.await
|
||||
};
|
||||
|
||||
// signaling the slow query metric task to stop
|
||||
let _ = tx.send(());
|
||||
let elapsed = instant.elapsed();
|
||||
if let Ok(affected_rows) = &res {
|
||||
debug!(
|
||||
@@ -387,7 +446,12 @@ impl BatchingTask {
|
||||
METRIC_FLOW_BATCHING_ENGINE_SLOW_QUERY
|
||||
.with_label_values(&[
|
||||
flow_id.to_string().as_str(),
|
||||
&peer_desc.unwrap_or_default().to_string(),
|
||||
&peer_desc.to_string(),
|
||||
format!(
|
||||
"{}s",
|
||||
self.config.time_window_size().unwrap_or_default().as_secs()
|
||||
)
|
||||
.as_str(),
|
||||
])
|
||||
.observe(elapsed.as_secs_f64());
|
||||
}
|
||||
@@ -580,19 +644,20 @@ impl BatchingTask {
|
||||
),
|
||||
})?;
|
||||
|
||||
let expr = self
|
||||
.state
|
||||
.write()
|
||||
.unwrap()
|
||||
.dirty_time_windows
|
||||
.gen_filter_exprs(
|
||||
let expr = {
|
||||
let mut state = self.state.write().unwrap();
|
||||
let max_window_cnt = state
|
||||
.max_filter_num
|
||||
.unwrap_or(DirtyTimeWindows::MAX_FILTER_NUM);
|
||||
state.dirty_time_windows.gen_filter_exprs(
|
||||
&col_name,
|
||||
Some(l),
|
||||
window_size,
|
||||
DirtyTimeWindows::MAX_FILTER_NUM,
|
||||
max_window_cnt,
|
||||
self.config.flow_id,
|
||||
Some(self),
|
||||
)?;
|
||||
)?
|
||||
};
|
||||
|
||||
debug!(
|
||||
"Flow id={:?}, Generated filter expr: {:?}",
|
||||
|
||||
@@ -31,22 +31,37 @@ lazy_static! {
|
||||
pub static ref METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME: HistogramVec = register_histogram_vec!(
|
||||
"greptime_flow_batching_engine_query_time_secs",
|
||||
"flow batching engine query time(seconds)",
|
||||
&["flow_id"],
|
||||
&["flow_id", "time_window_granularity"],
|
||||
vec![0.0, 5., 10., 20., 40., 80., 160., 320., 640.,]
|
||||
)
|
||||
.unwrap();
|
||||
pub static ref METRIC_FLOW_BATCHING_ENGINE_SLOW_QUERY: HistogramVec = register_histogram_vec!(
|
||||
"greptime_flow_batching_engine_slow_query_secs",
|
||||
"flow batching engine slow query(seconds)",
|
||||
&["flow_id", "peer"],
|
||||
"flow batching engine slow query(seconds), updated after query finished",
|
||||
&["flow_id", "peer", "time_window_granularity"],
|
||||
vec![60., 2. * 60., 3. * 60., 5. * 60., 10. * 60.]
|
||||
)
|
||||
.unwrap();
|
||||
pub static ref METRIC_FLOW_BATCHING_ENGINE_REAL_TIME_SLOW_QUERY_CNT: GaugeVec =
|
||||
register_gauge_vec!(
|
||||
"greptime_flow_batching_engine_real_time_slow_query_number",
|
||||
"flow batching engine real time slow query number, updated in real time",
|
||||
&["flow_id", "peer", "time_window_granularity"],
|
||||
)
|
||||
.unwrap();
|
||||
pub static ref METRIC_FLOW_BATCHING_ENGINE_STALLED_QUERY_WINDOW_CNT: HistogramVec =
|
||||
register_histogram_vec!(
|
||||
"greptime_flow_batching_engine_stalled_query_window_cnt",
|
||||
"flow batching engine stalled query time window count",
|
||||
&["flow_id", "time_window_granularity"],
|
||||
vec![0.0, 5., 10., 20., 40.]
|
||||
)
|
||||
.unwrap();
|
||||
pub static ref METRIC_FLOW_BATCHING_ENGINE_QUERY_WINDOW_CNT: HistogramVec =
|
||||
register_histogram_vec!(
|
||||
"greptime_flow_batching_engine_query_window_cnt",
|
||||
"flow batching engine query time window count",
|
||||
&["flow_id"],
|
||||
&["flow_id", "time_window_granularity"],
|
||||
vec![0.0, 5., 10., 20., 40.]
|
||||
)
|
||||
.unwrap();
|
||||
@@ -54,7 +69,15 @@ lazy_static! {
|
||||
register_histogram_vec!(
|
||||
"greptime_flow_batching_engine_query_time_range_secs",
|
||||
"flow batching engine query time range(seconds)",
|
||||
&["flow_id"],
|
||||
&["flow_id", "time_window_granularity"],
|
||||
vec![60., 4. * 60., 16. * 60., 64. * 60., 256. * 60.]
|
||||
)
|
||||
.unwrap();
|
||||
pub static ref METRIC_FLOW_BATCHING_ENGINE_GUESS_FE_LOAD: HistogramVec =
|
||||
register_histogram_vec!(
|
||||
"greptime_flow_batching_engine_guess_fe_load",
|
||||
"flow batching engine guessed frontend load",
|
||||
&["fe_addr"],
|
||||
vec![60., 4. * 60., 16. * 60., 64. * 60., 256. * 60.]
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -17,7 +17,7 @@ use std::collections::BTreeMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_function::function::FunctionContext;
|
||||
use common_function::function::{FunctionContext, FunctionRef};
|
||||
use datafusion_substrait::extensions::Extensions;
|
||||
use datatypes::data_type::ConcreteDataType as CDT;
|
||||
use query::QueryEngine;
|
||||
@@ -108,9 +108,13 @@ impl FunctionExtensions {
|
||||
|
||||
/// register flow-specific functions to the query engine
|
||||
pub fn register_function_to_query_engine(engine: &Arc<dyn QueryEngine>) {
|
||||
engine.register_function(Arc::new(TumbleFunction::new("tumble")));
|
||||
engine.register_function(Arc::new(TumbleFunction::new(TUMBLE_START)));
|
||||
engine.register_function(Arc::new(TumbleFunction::new(TUMBLE_END)));
|
||||
let tumble_fn = Arc::new(TumbleFunction::new("tumble")) as FunctionRef;
|
||||
let tumble_start_fn = Arc::new(TumbleFunction::new(TUMBLE_START)) as FunctionRef;
|
||||
let tumble_end_fn = Arc::new(TumbleFunction::new(TUMBLE_END)) as FunctionRef;
|
||||
|
||||
engine.register_scalar_function(tumble_fn.into());
|
||||
engine.register_scalar_function(tumble_start_fn.into());
|
||||
engine.register_scalar_function(tumble_end_fn.into());
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -130,7 +130,13 @@ impl JaegerQueryHandler for Instance {
|
||||
.await?)
|
||||
}
|
||||
|
||||
async fn get_trace(&self, ctx: QueryContextRef, trace_id: &str) -> ServerResult<Output> {
|
||||
async fn get_trace(
|
||||
&self,
|
||||
ctx: QueryContextRef,
|
||||
trace_id: &str,
|
||||
start_time: Option<i64>,
|
||||
end_time: Option<i64>,
|
||||
) -> ServerResult<Output> {
|
||||
// It's equivalent to
|
||||
//
|
||||
// ```
|
||||
@@ -139,13 +145,25 @@ impl JaegerQueryHandler for Instance {
|
||||
// FROM
|
||||
// {db}.{trace_table}
|
||||
// WHERE
|
||||
// trace_id = '{trace_id}'
|
||||
// trace_id = '{trace_id}' AND
|
||||
// timestamp >= {start_time} AND
|
||||
// timestamp <= {end_time}
|
||||
// ORDER BY
|
||||
// timestamp DESC
|
||||
// ```.
|
||||
let selects = vec![wildcard()];
|
||||
|
||||
let filters = vec![col(TRACE_ID_COLUMN).eq(lit(trace_id))];
|
||||
let mut filters = vec![col(TRACE_ID_COLUMN).eq(lit(trace_id))];
|
||||
|
||||
if let Some(start_time) = start_time {
|
||||
// Microseconds to nanoseconds.
|
||||
filters.push(col(TIMESTAMP_COLUMN).gt_eq(lit_timestamp_nano(start_time * 1_000)));
|
||||
}
|
||||
|
||||
if let Some(end_time) = end_time {
|
||||
// Microseconds to nanoseconds.
|
||||
filters.push(col(TIMESTAMP_COLUMN).lt_eq(lit_timestamp_nano(end_time * 1_000)));
|
||||
}
|
||||
|
||||
Ok(query_trace_table(
|
||||
ctx,
|
||||
|
||||
@@ -154,6 +154,7 @@ where
|
||||
ServerGrpcQueryHandlerAdapter::arc(self.instance.clone()),
|
||||
user_provider.clone(),
|
||||
runtime,
|
||||
opts.grpc.flight_compression,
|
||||
);
|
||||
|
||||
let grpc_server = builder
|
||||
|
||||
@@ -31,8 +31,6 @@ use common_meta::kv_backend::rds::MySqlStore;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use common_meta::kv_backend::rds::PgStore;
|
||||
use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use common_telemetry::error;
|
||||
use common_telemetry::info;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use deadpool_postgres::{Config, Runtime};
|
||||
@@ -144,7 +142,8 @@ impl MetasrvInstance {
|
||||
let (serve_state_tx, serve_state_rx) = oneshot::channel();
|
||||
|
||||
let socket_addr =
|
||||
bootstrap_metasrv_with_router(&self.opts.bind_addr, router, serve_state_tx, rx).await?;
|
||||
bootstrap_metasrv_with_router(&self.opts.grpc.bind_addr, router, serve_state_tx, rx)
|
||||
.await?;
|
||||
self.bind_addr = Some(socket_addr);
|
||||
|
||||
let addr = self.opts.http.addr.parse().context(error::ParseAddrSnafu {
|
||||
@@ -260,7 +259,7 @@ pub async fn metasrv_builder(
|
||||
let etcd_client = create_etcd_client(&opts.store_addrs).await?;
|
||||
let kv_backend = EtcdStore::with_etcd_client(etcd_client.clone(), opts.max_txn_ops);
|
||||
let election = EtcdElection::with_etcd_client(
|
||||
&opts.server_addr,
|
||||
&opts.grpc.server_addr,
|
||||
etcd_client,
|
||||
opts.store_key_prefix.clone(),
|
||||
)
|
||||
@@ -270,22 +269,41 @@ pub async fn metasrv_builder(
|
||||
}
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
(None, BackendImpl::PostgresStore) => {
|
||||
let pool = create_postgres_pool(&opts.store_addrs).await?;
|
||||
let kv_backend = PgStore::with_pg_pool(pool, &opts.meta_table_name, opts.max_txn_ops)
|
||||
.await
|
||||
.context(error::KvBackendSnafu)?;
|
||||
// Client for election should be created separately since we need a different session keep-alive idle time.
|
||||
let election_client = create_postgres_client(opts).await?;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_meta::distributed_time_constants::POSTGRES_KEEP_ALIVE_SECS;
|
||||
|
||||
use crate::election::rds::postgres::ElectionPgClient;
|
||||
|
||||
let candidate_lease_ttl = Duration::from_secs(CANDIDATE_LEASE_SECS);
|
||||
let execution_timeout = Duration::from_secs(META_LEASE_SECS);
|
||||
let statement_timeout = Duration::from_secs(META_LEASE_SECS);
|
||||
let meta_lease_ttl = Duration::from_secs(META_LEASE_SECS);
|
||||
|
||||
let mut cfg = Config::new();
|
||||
cfg.keepalives = Some(true);
|
||||
cfg.keepalives_idle = Some(Duration::from_secs(POSTGRES_KEEP_ALIVE_SECS));
|
||||
// We use a separate pool for election since we need a different session keep-alive idle time.
|
||||
let pool = create_postgres_pool_with(&opts.store_addrs, cfg).await?;
|
||||
|
||||
let election_client =
|
||||
ElectionPgClient::new(pool, execution_timeout, meta_lease_ttl, statement_timeout)?;
|
||||
let election = PgElection::with_pg_client(
|
||||
opts.server_addr.clone(),
|
||||
opts.grpc.server_addr.clone(),
|
||||
election_client,
|
||||
opts.store_key_prefix.clone(),
|
||||
CANDIDATE_LEASE_SECS,
|
||||
META_LEASE_SECS,
|
||||
candidate_lease_ttl,
|
||||
meta_lease_ttl,
|
||||
&opts.meta_table_name,
|
||||
opts.meta_election_lock_id,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let pool = create_postgres_pool(&opts.store_addrs).await?;
|
||||
let kv_backend = PgStore::with_pg_pool(pool, &opts.meta_table_name, opts.max_txn_ops)
|
||||
.await
|
||||
.context(error::KvBackendSnafu)?;
|
||||
|
||||
(kv_backend, Some(election))
|
||||
}
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
@@ -299,7 +317,7 @@ pub async fn metasrv_builder(
|
||||
let election_table_name = opts.meta_table_name.clone() + "_election";
|
||||
let election_client = create_mysql_client(opts).await?;
|
||||
let election = MySqlElection::with_mysql_client(
|
||||
opts.server_addr.clone(),
|
||||
opts.grpc.server_addr.clone(),
|
||||
election_client,
|
||||
opts.store_key_prefix.clone(),
|
||||
CANDIDATE_LEASE_SECS,
|
||||
@@ -372,31 +390,24 @@ pub async fn create_etcd_client(store_addrs: &[String]) -> Result<Client> {
|
||||
}
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
async fn create_postgres_client(opts: &MetasrvOptions) -> Result<tokio_postgres::Client> {
|
||||
let postgres_url = opts
|
||||
.store_addrs
|
||||
.first()
|
||||
.context(error::InvalidArgumentsSnafu {
|
||||
err_msg: "empty store addrs",
|
||||
})?;
|
||||
let (client, connection) = tokio_postgres::connect(postgres_url, NoTls)
|
||||
.await
|
||||
.context(error::ConnectPostgresSnafu)?;
|
||||
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = connection.await {
|
||||
error!(e; "connection error");
|
||||
}
|
||||
});
|
||||
Ok(client)
|
||||
/// Creates a pool for the Postgres backend.
|
||||
///
|
||||
/// It only use first store addr to create a pool.
|
||||
pub async fn create_postgres_pool(store_addrs: &[String]) -> Result<deadpool_postgres::Pool> {
|
||||
create_postgres_pool_with(store_addrs, Config::new()).await
|
||||
}
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
pub async fn create_postgres_pool(store_addrs: &[String]) -> Result<deadpool_postgres::Pool> {
|
||||
/// Creates a pool for the Postgres backend.
|
||||
///
|
||||
/// It only use first store addr to create a pool, and use the given config to create a pool.
|
||||
pub async fn create_postgres_pool_with(
|
||||
store_addrs: &[String],
|
||||
mut cfg: Config,
|
||||
) -> Result<deadpool_postgres::Pool> {
|
||||
let postgres_url = store_addrs.first().context(error::InvalidArgumentsSnafu {
|
||||
err_msg: "empty store addrs",
|
||||
})?;
|
||||
let mut cfg = Config::new();
|
||||
cfg.url = Some(postgres_url.to_string());
|
||||
let pool = cfg
|
||||
.create_pool(Some(Runtime::Tokio1), NoTls)
|
||||
|
||||
@@ -157,6 +157,11 @@ pub trait Election: Send + Sync {
|
||||
/// but only one can be the leader at a time.
|
||||
async fn campaign(&self) -> Result<()>;
|
||||
|
||||
/// Resets the campaign.
|
||||
///
|
||||
/// Reset the client and the leader flag if needed.
|
||||
async fn reset_campaign(&self) {}
|
||||
|
||||
/// Returns the leader value for the current election.
|
||||
async fn leader(&self) -> Result<Self::Leader>;
|
||||
|
||||
|
||||
@@ -18,11 +18,12 @@ use std::time::Duration;
|
||||
|
||||
use common_telemetry::{error, warn};
|
||||
use common_time::Timestamp;
|
||||
use deadpool_postgres::{Manager, Pool};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::{broadcast, RwLock};
|
||||
use tokio::time::MissedTickBehavior;
|
||||
use tokio_postgres::types::ToSql;
|
||||
use tokio_postgres::Client;
|
||||
use tokio_postgres::Row;
|
||||
|
||||
use crate::election::rds::{parse_value_and_expire_time, Lease, RdsLeaderKey, LEASE_SEP};
|
||||
use crate::election::{
|
||||
@@ -30,15 +31,14 @@ use crate::election::{
|
||||
CANDIDATES_ROOT, ELECTION_KEY,
|
||||
};
|
||||
use crate::error::{
|
||||
DeserializeFromJsonSnafu, NoLeaderSnafu, PostgresExecutionSnafu, Result, SerializeToJsonSnafu,
|
||||
UnexpectedSnafu,
|
||||
DeserializeFromJsonSnafu, GetPostgresClientSnafu, NoLeaderSnafu, PostgresExecutionSnafu,
|
||||
Result, SerializeToJsonSnafu, SqlExecutionTimeoutSnafu, UnexpectedSnafu,
|
||||
};
|
||||
use crate::metasrv::{ElectionRef, LeaderValue, MetasrvNodeInfo};
|
||||
|
||||
struct ElectionSqlFactory<'a> {
|
||||
lock_id: u64,
|
||||
table_name: &'a str,
|
||||
meta_lease_ttl_secs: u64,
|
||||
}
|
||||
|
||||
struct ElectionSqlSet {
|
||||
@@ -88,11 +88,10 @@ struct ElectionSqlSet {
|
||||
}
|
||||
|
||||
impl<'a> ElectionSqlFactory<'a> {
|
||||
fn new(lock_id: u64, table_name: &'a str, meta_lease_ttl_secs: u64) -> Self {
|
||||
fn new(lock_id: u64, table_name: &'a str) -> Self {
|
||||
Self {
|
||||
lock_id,
|
||||
table_name,
|
||||
meta_lease_ttl_secs,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,15 +107,6 @@ impl<'a> ElectionSqlFactory<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
// Currently the session timeout is longer than the leader lease time.
|
||||
// So the leader will renew the lease twice before the session timeout if everything goes well.
|
||||
fn set_idle_session_timeout_sql(&self) -> String {
|
||||
format!(
|
||||
"SET idle_session_timeout = '{}s';",
|
||||
self.meta_lease_ttl_secs + 1
|
||||
)
|
||||
}
|
||||
|
||||
fn campaign_sql(&self) -> String {
|
||||
format!("SELECT pg_try_advisory_lock({})", self.lock_id)
|
||||
}
|
||||
@@ -171,46 +161,165 @@ impl<'a> ElectionSqlFactory<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
/// PgClient for election.
|
||||
pub struct ElectionPgClient {
|
||||
current: Option<deadpool::managed::Object<Manager>>,
|
||||
pool: Pool,
|
||||
/// The client-side timeout for statement execution.
|
||||
///
|
||||
/// This timeout is enforced by the client application and is independent of any server-side timeouts.
|
||||
/// If a statement takes longer than this duration to execute, the client will abort the operation.
|
||||
execution_timeout: Duration,
|
||||
|
||||
/// The idle session timeout.
|
||||
///
|
||||
/// This timeout is configured per client session and is enforced by the PostgreSQL server.
|
||||
/// If a session remains idle for longer than this duration, the server will terminate it.
|
||||
idle_session_timeout: Duration,
|
||||
|
||||
/// The statement timeout.
|
||||
///
|
||||
/// This timeout is configured per client session and is enforced by the PostgreSQL server.
|
||||
/// If a statement takes longer than this duration to execute, the server will abort it.
|
||||
statement_timeout: Duration,
|
||||
}
|
||||
|
||||
impl ElectionPgClient {
|
||||
pub fn new(
|
||||
pool: Pool,
|
||||
execution_timeout: Duration,
|
||||
idle_session_timeout: Duration,
|
||||
statement_timeout: Duration,
|
||||
) -> Result<ElectionPgClient> {
|
||||
Ok(ElectionPgClient {
|
||||
current: None,
|
||||
pool,
|
||||
execution_timeout,
|
||||
idle_session_timeout,
|
||||
statement_timeout,
|
||||
})
|
||||
}
|
||||
|
||||
fn set_idle_session_timeout_sql(&self) -> String {
|
||||
format!(
|
||||
"SET idle_session_timeout = '{}s';",
|
||||
self.idle_session_timeout.as_secs()
|
||||
)
|
||||
}
|
||||
|
||||
fn set_statement_timeout_sql(&self) -> String {
|
||||
format!(
|
||||
"SET statement_timeout = '{}s';",
|
||||
self.statement_timeout.as_secs()
|
||||
)
|
||||
}
|
||||
|
||||
async fn reset_client(&mut self) -> Result<()> {
|
||||
self.current = None;
|
||||
self.maybe_init_client().await
|
||||
}
|
||||
|
||||
async fn maybe_init_client(&mut self) -> Result<()> {
|
||||
if self.current.is_none() {
|
||||
let client = self.pool.get().await.context(GetPostgresClientSnafu)?;
|
||||
|
||||
self.current = Some(client);
|
||||
// Set idle session timeout and statement timeout.
|
||||
let idle_session_timeout_sql = self.set_idle_session_timeout_sql();
|
||||
self.execute(&idle_session_timeout_sql, &[]).await?;
|
||||
let statement_timeout_sql = self.set_statement_timeout_sql();
|
||||
self.execute(&statement_timeout_sql, &[]).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the result of the query.
|
||||
///
|
||||
/// # Panics
|
||||
/// if `current` is `None`.
|
||||
async fn execute(&self, sql: &str, params: &[&(dyn ToSql + Sync)]) -> Result<u64> {
|
||||
let result = tokio::time::timeout(
|
||||
self.execution_timeout,
|
||||
self.current.as_ref().unwrap().execute(sql, params),
|
||||
)
|
||||
.await
|
||||
.map_err(|_| {
|
||||
SqlExecutionTimeoutSnafu {
|
||||
sql: sql.to_string(),
|
||||
duration: self.execution_timeout,
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
|
||||
result.context(PostgresExecutionSnafu { sql })
|
||||
}
|
||||
|
||||
/// Returns the result of the query.
|
||||
///
|
||||
/// # Panics
|
||||
/// if `current` is `None`.
|
||||
async fn query(&self, sql: &str, params: &[&(dyn ToSql + Sync)]) -> Result<Vec<Row>> {
|
||||
let result = tokio::time::timeout(
|
||||
self.execution_timeout,
|
||||
self.current.as_ref().unwrap().query(sql, params),
|
||||
)
|
||||
.await
|
||||
.map_err(|_| {
|
||||
SqlExecutionTimeoutSnafu {
|
||||
sql: sql.to_string(),
|
||||
duration: self.execution_timeout,
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
|
||||
result.context(PostgresExecutionSnafu { sql })
|
||||
}
|
||||
}
|
||||
|
||||
/// PostgreSql implementation of Election.
|
||||
pub struct PgElection {
|
||||
leader_value: String,
|
||||
client: Client,
|
||||
pg_client: RwLock<ElectionPgClient>,
|
||||
is_leader: AtomicBool,
|
||||
leader_infancy: AtomicBool,
|
||||
leader_watcher: broadcast::Sender<LeaderChangeMessage>,
|
||||
store_key_prefix: String,
|
||||
candidate_lease_ttl_secs: u64,
|
||||
meta_lease_ttl_secs: u64,
|
||||
candidate_lease_ttl: Duration,
|
||||
meta_lease_ttl: Duration,
|
||||
sql_set: ElectionSqlSet,
|
||||
}
|
||||
|
||||
impl PgElection {
|
||||
async fn maybe_init_client(&self) -> Result<()> {
|
||||
if self.pg_client.read().await.current.is_none() {
|
||||
self.pg_client.write().await.maybe_init_client().await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn with_pg_client(
|
||||
leader_value: String,
|
||||
client: Client,
|
||||
pg_client: ElectionPgClient,
|
||||
store_key_prefix: String,
|
||||
candidate_lease_ttl_secs: u64,
|
||||
meta_lease_ttl_secs: u64,
|
||||
candidate_lease_ttl: Duration,
|
||||
meta_lease_ttl: Duration,
|
||||
table_name: &str,
|
||||
lock_id: u64,
|
||||
) -> Result<ElectionRef> {
|
||||
let sql_factory = ElectionSqlFactory::new(lock_id, table_name, meta_lease_ttl_secs);
|
||||
// Set idle session timeout to IDLE_SESSION_TIMEOUT to avoid dead advisory lock.
|
||||
client
|
||||
.execute(&sql_factory.set_idle_session_timeout_sql(), &[])
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
let sql_factory = ElectionSqlFactory::new(lock_id, table_name);
|
||||
|
||||
let tx = listen_leader_change(leader_value.clone());
|
||||
Ok(Arc::new(Self {
|
||||
leader_value,
|
||||
client,
|
||||
pg_client: RwLock::new(pg_client),
|
||||
is_leader: AtomicBool::new(false),
|
||||
leader_infancy: AtomicBool::new(false),
|
||||
leader_watcher: tx,
|
||||
store_key_prefix,
|
||||
candidate_lease_ttl_secs,
|
||||
meta_lease_ttl_secs,
|
||||
candidate_lease_ttl,
|
||||
meta_lease_ttl,
|
||||
sql_set: sql_factory.build(),
|
||||
}))
|
||||
}
|
||||
@@ -249,18 +358,17 @@ impl Election for PgElection {
|
||||
input: format!("{node_info:?}"),
|
||||
})?;
|
||||
let res = self
|
||||
.put_value_with_lease(&key, &node_info, self.candidate_lease_ttl_secs)
|
||||
.put_value_with_lease(&key, &node_info, self.candidate_lease_ttl)
|
||||
.await?;
|
||||
// May registered before, just update the lease.
|
||||
if !res {
|
||||
self.delete_value(&key).await?;
|
||||
self.put_value_with_lease(&key, &node_info, self.candidate_lease_ttl_secs)
|
||||
self.put_value_with_lease(&key, &node_info, self.candidate_lease_ttl)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Check if the current lease has expired and renew the lease.
|
||||
let mut keep_alive_interval =
|
||||
tokio::time::interval(Duration::from_secs(self.candidate_lease_ttl_secs / 2));
|
||||
let mut keep_alive_interval = tokio::time::interval(self.candidate_lease_ttl / 2);
|
||||
loop {
|
||||
let _ = keep_alive_interval.tick().await;
|
||||
|
||||
@@ -282,13 +390,8 @@ impl Election for PgElection {
|
||||
);
|
||||
|
||||
// Safety: origin is Some since we are using `get_value_with_lease` with `true`.
|
||||
self.update_value_with_lease(
|
||||
&key,
|
||||
&lease.origin,
|
||||
&node_info,
|
||||
self.candidate_lease_ttl_secs,
|
||||
)
|
||||
.await?;
|
||||
self.update_value_with_lease(&key, &lease.origin, &node_info, self.candidate_lease_ttl)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -321,16 +424,17 @@ impl Election for PgElection {
|
||||
/// - If the lock is not acquired (result is false), it calls the `follower_action` method
|
||||
/// to perform actions as a follower.
|
||||
async fn campaign(&self) -> Result<()> {
|
||||
let mut keep_alive_interval =
|
||||
tokio::time::interval(Duration::from_secs(self.meta_lease_ttl_secs / 2));
|
||||
let mut keep_alive_interval = tokio::time::interval(self.meta_lease_ttl / 2);
|
||||
keep_alive_interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
|
||||
|
||||
self.maybe_init_client().await?;
|
||||
loop {
|
||||
let res = self
|
||||
.client
|
||||
.query(&self.sql_set.campaign, &[])
|
||||
.pg_client
|
||||
.read()
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
.query(&self.sql_set.campaign, &[])
|
||||
.await?;
|
||||
let row = res.first().context(UnexpectedSnafu {
|
||||
violated: "Failed to get the result of acquiring advisory lock",
|
||||
})?;
|
||||
@@ -349,6 +453,12 @@ impl Election for PgElection {
|
||||
}
|
||||
}
|
||||
|
||||
async fn reset_campaign(&self) {
|
||||
if let Err(err) = self.pg_client.write().await.reset_client().await {
|
||||
error!(err; "Failed to reset client");
|
||||
}
|
||||
}
|
||||
|
||||
async fn leader(&self) -> Result<Self::Leader> {
|
||||
if self.is_leader.load(Ordering::Relaxed) {
|
||||
Ok(self.leader_value.as_bytes().into())
|
||||
@@ -376,11 +486,13 @@ impl PgElection {
|
||||
/// Returns value, expire time and current time. If `with_origin` is true, the origin string is also returned.
|
||||
async fn get_value_with_lease(&self, key: &str) -> Result<Option<Lease>> {
|
||||
let key = key.as_bytes();
|
||||
self.maybe_init_client().await?;
|
||||
let res = self
|
||||
.client
|
||||
.query(&self.sql_set.get_value_with_lease, &[&key])
|
||||
.pg_client
|
||||
.read()
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
.query(&self.sql_set.get_value_with_lease, &[&key])
|
||||
.await?;
|
||||
|
||||
if res.is_empty() {
|
||||
Ok(None)
|
||||
@@ -414,11 +526,13 @@ impl PgElection {
|
||||
key_prefix: &str,
|
||||
) -> Result<(Vec<(String, Timestamp)>, Timestamp)> {
|
||||
let key_prefix = format!("{}%", key_prefix).as_bytes().to_vec();
|
||||
self.maybe_init_client().await?;
|
||||
let res = self
|
||||
.client
|
||||
.query(&self.sql_set.get_value_with_lease_by_prefix, &[&key_prefix])
|
||||
.pg_client
|
||||
.read()
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
.query(&self.sql_set.get_value_with_lease_by_prefix, &[&key_prefix])
|
||||
.await?;
|
||||
|
||||
let mut values_with_leases = vec![];
|
||||
let mut current = Timestamp::default();
|
||||
@@ -445,18 +559,21 @@ impl PgElection {
|
||||
key: &str,
|
||||
prev: &str,
|
||||
updated: &str,
|
||||
lease_ttl: u64,
|
||||
lease_ttl: Duration,
|
||||
) -> Result<()> {
|
||||
let key = key.as_bytes();
|
||||
let prev = prev.as_bytes();
|
||||
self.maybe_init_client().await?;
|
||||
let lease_ttl_secs = lease_ttl.as_secs() as f64;
|
||||
let res = self
|
||||
.client
|
||||
.pg_client
|
||||
.read()
|
||||
.await
|
||||
.execute(
|
||||
&self.sql_set.update_value_with_lease,
|
||||
&[&key, &prev, &updated, &(lease_ttl as f64)],
|
||||
&[&key, &prev, &updated, &lease_ttl_secs],
|
||||
)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
.await?;
|
||||
|
||||
ensure!(
|
||||
res == 1,
|
||||
@@ -473,16 +590,18 @@ impl PgElection {
|
||||
&self,
|
||||
key: &str,
|
||||
value: &str,
|
||||
lease_ttl_secs: u64,
|
||||
lease_ttl: Duration,
|
||||
) -> Result<bool> {
|
||||
let key = key.as_bytes();
|
||||
let lease_ttl_secs = lease_ttl_secs as f64;
|
||||
let lease_ttl_secs = lease_ttl.as_secs() as f64;
|
||||
let params: Vec<&(dyn ToSql + Sync)> = vec![&key, &value, &lease_ttl_secs];
|
||||
self.maybe_init_client().await?;
|
||||
let res = self
|
||||
.client
|
||||
.query(&self.sql_set.put_value_with_lease, ¶ms)
|
||||
.pg_client
|
||||
.read()
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
.query(&self.sql_set.put_value_with_lease, ¶ms)
|
||||
.await?;
|
||||
Ok(res.is_empty())
|
||||
}
|
||||
|
||||
@@ -490,11 +609,13 @@ impl PgElection {
|
||||
/// Caution: Should only delete the key if the lease is expired.
|
||||
async fn delete_value(&self, key: &str) -> Result<bool> {
|
||||
let key = key.as_bytes();
|
||||
self.maybe_init_client().await?;
|
||||
let res = self
|
||||
.client
|
||||
.query(&self.sql_set.delete_value, &[&key])
|
||||
.pg_client
|
||||
.read()
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
.query(&self.sql_set.delete_value, &[&key])
|
||||
.await?;
|
||||
|
||||
Ok(res.len() == 1)
|
||||
}
|
||||
@@ -536,7 +657,7 @@ impl PgElection {
|
||||
&key,
|
||||
&lease.origin,
|
||||
&self.leader_value,
|
||||
self.meta_lease_ttl_secs,
|
||||
self.meta_lease_ttl,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
@@ -605,10 +726,12 @@ impl PgElection {
|
||||
..Default::default()
|
||||
};
|
||||
self.delete_value(&key).await?;
|
||||
self.client
|
||||
.query(&self.sql_set.step_down, &[])
|
||||
self.maybe_init_client().await?;
|
||||
self.pg_client
|
||||
.read()
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
.query(&self.sql_set.step_down, &[])
|
||||
.await?;
|
||||
send_leader_change_and_set_flags(
|
||||
&self.is_leader,
|
||||
&self.leader_infancy,
|
||||
@@ -651,7 +774,7 @@ impl PgElection {
|
||||
..Default::default()
|
||||
};
|
||||
self.delete_value(&key).await?;
|
||||
self.put_value_with_lease(&key, &self.leader_value, self.meta_lease_ttl_secs)
|
||||
self.put_value_with_lease(&key, &self.leader_value, self.meta_lease_ttl)
|
||||
.await?;
|
||||
|
||||
if self
|
||||
@@ -674,15 +797,21 @@ impl PgElection {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::env;
|
||||
|
||||
use common_meta::maybe_skip_postgres_integration_test;
|
||||
use tokio_postgres::{Client, NoTls};
|
||||
|
||||
use super::*;
|
||||
use crate::error::PostgresExecutionSnafu;
|
||||
use crate::bootstrap::create_postgres_pool;
|
||||
use crate::error;
|
||||
|
||||
async fn create_postgres_client(table_name: Option<&str>) -> Result<Client> {
|
||||
async fn create_postgres_client(
|
||||
table_name: Option<&str>,
|
||||
execution_timeout: Duration,
|
||||
idle_session_timeout: Duration,
|
||||
statement_timeout: Duration,
|
||||
) -> Result<ElectionPgClient> {
|
||||
let endpoint = env::var("GT_POSTGRES_ENDPOINTS").unwrap_or_default();
|
||||
if endpoint.is_empty() {
|
||||
return UnexpectedSnafu {
|
||||
@@ -690,25 +819,34 @@ mod tests {
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
let (client, connection) = tokio_postgres::connect(&endpoint, NoTls)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
tokio::spawn(async move {
|
||||
connection.await.context(PostgresExecutionSnafu).unwrap();
|
||||
});
|
||||
let pool = create_postgres_pool(&[endpoint]).await.unwrap();
|
||||
let mut pg_client = ElectionPgClient::new(
|
||||
pool,
|
||||
execution_timeout,
|
||||
idle_session_timeout,
|
||||
statement_timeout,
|
||||
)
|
||||
.unwrap();
|
||||
pg_client.maybe_init_client().await?;
|
||||
if let Some(table_name) = table_name {
|
||||
let create_table_sql = format!(
|
||||
"CREATE TABLE IF NOT EXISTS \"{}\"(k bytea PRIMARY KEY, v bytea);",
|
||||
table_name
|
||||
);
|
||||
client.execute(&create_table_sql, &[]).await.unwrap();
|
||||
pg_client.execute(&create_table_sql, &[]).await?;
|
||||
}
|
||||
Ok(client)
|
||||
Ok(pg_client)
|
||||
}
|
||||
|
||||
async fn drop_table(client: &Client, table_name: &str) {
|
||||
async fn drop_table(pg_election: &PgElection, table_name: &str) {
|
||||
let sql = format!("DROP TABLE IF EXISTS \"{}\";", table_name);
|
||||
client.execute(&sql, &[]).await.unwrap();
|
||||
pg_election
|
||||
.pg_client
|
||||
.read()
|
||||
.await
|
||||
.execute(&sql, &[])
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -719,23 +857,35 @@ mod tests {
|
||||
|
||||
let uuid = uuid::Uuid::new_v4().to_string();
|
||||
let table_name = "test_postgres_crud_greptime_metakv";
|
||||
let client = create_postgres_client(Some(table_name)).await.unwrap();
|
||||
let candidate_lease_ttl = Duration::from_secs(10);
|
||||
let execution_timeout = Duration::from_secs(10);
|
||||
let statement_timeout = Duration::from_secs(10);
|
||||
let meta_lease_ttl = Duration::from_secs(2);
|
||||
let idle_session_timeout = Duration::from_secs(0);
|
||||
let client = create_postgres_client(
|
||||
Some(table_name),
|
||||
execution_timeout,
|
||||
idle_session_timeout,
|
||||
statement_timeout,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let (tx, _) = broadcast::channel(100);
|
||||
let pg_election = PgElection {
|
||||
leader_value: "test_leader".to_string(),
|
||||
client,
|
||||
pg_client: RwLock::new(client),
|
||||
is_leader: AtomicBool::new(false),
|
||||
leader_infancy: AtomicBool::new(true),
|
||||
leader_watcher: tx,
|
||||
store_key_prefix: uuid,
|
||||
candidate_lease_ttl_secs: 10,
|
||||
meta_lease_ttl_secs: 2,
|
||||
sql_set: ElectionSqlFactory::new(28319, table_name, 2).build(),
|
||||
candidate_lease_ttl,
|
||||
meta_lease_ttl,
|
||||
sql_set: ElectionSqlFactory::new(28319, table_name).build(),
|
||||
};
|
||||
|
||||
let res = pg_election
|
||||
.put_value_with_lease(&key, &value, 10)
|
||||
.put_value_with_lease(&key, &value, candidate_lease_ttl)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(res);
|
||||
@@ -748,7 +898,7 @@ mod tests {
|
||||
assert_eq!(lease.leader_value, value);
|
||||
|
||||
pg_election
|
||||
.update_value_with_lease(&key, &lease.origin, &value, pg_election.meta_lease_ttl_secs)
|
||||
.update_value_with_lease(&key, &lease.origin, &value, pg_election.meta_lease_ttl)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -762,7 +912,7 @@ mod tests {
|
||||
let key = format!("test_key_{}", i);
|
||||
let value = format!("test_value_{}", i);
|
||||
pg_election
|
||||
.put_value_with_lease(&key, &value, 10)
|
||||
.put_value_with_lease(&key, &value, candidate_lease_ttl)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
@@ -787,28 +937,39 @@ mod tests {
|
||||
assert!(res.is_empty());
|
||||
assert!(current == Timestamp::default());
|
||||
|
||||
drop_table(&pg_election.client, table_name).await;
|
||||
drop_table(&pg_election, table_name).await;
|
||||
}
|
||||
|
||||
async fn candidate(
|
||||
leader_value: String,
|
||||
candidate_lease_ttl_secs: u64,
|
||||
candidate_lease_ttl: Duration,
|
||||
store_key_prefix: String,
|
||||
table_name: String,
|
||||
) {
|
||||
let client = create_postgres_client(None).await.unwrap();
|
||||
let execution_timeout = Duration::from_secs(10);
|
||||
let statement_timeout = Duration::from_secs(10);
|
||||
let meta_lease_ttl = Duration::from_secs(2);
|
||||
let idle_session_timeout = Duration::from_secs(0);
|
||||
let client = create_postgres_client(
|
||||
None,
|
||||
execution_timeout,
|
||||
idle_session_timeout,
|
||||
statement_timeout,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let (tx, _) = broadcast::channel(100);
|
||||
let pg_election = PgElection {
|
||||
leader_value,
|
||||
client,
|
||||
pg_client: RwLock::new(client),
|
||||
is_leader: AtomicBool::new(false),
|
||||
leader_infancy: AtomicBool::new(true),
|
||||
leader_watcher: tx,
|
||||
store_key_prefix,
|
||||
candidate_lease_ttl_secs,
|
||||
meta_lease_ttl_secs: 2,
|
||||
sql_set: ElectionSqlFactory::new(28319, &table_name, 2).build(),
|
||||
candidate_lease_ttl,
|
||||
meta_lease_ttl,
|
||||
sql_set: ElectionSqlFactory::new(28319, &table_name).build(),
|
||||
};
|
||||
|
||||
let node_info = MetasrvNodeInfo {
|
||||
@@ -824,17 +985,28 @@ mod tests {
|
||||
async fn test_candidate_registration() {
|
||||
maybe_skip_postgres_integration_test!();
|
||||
let leader_value_prefix = "test_leader".to_string();
|
||||
let candidate_lease_ttl_secs = 5;
|
||||
let uuid = uuid::Uuid::new_v4().to_string();
|
||||
let table_name = "test_candidate_registration_greptime_metakv";
|
||||
let mut handles = vec![];
|
||||
let client = create_postgres_client(Some(table_name)).await.unwrap();
|
||||
let candidate_lease_ttl = Duration::from_secs(5);
|
||||
let execution_timeout = Duration::from_secs(10);
|
||||
let statement_timeout = Duration::from_secs(10);
|
||||
let meta_lease_ttl = Duration::from_secs(2);
|
||||
let idle_session_timeout = Duration::from_secs(0);
|
||||
let client = create_postgres_client(
|
||||
Some(table_name),
|
||||
execution_timeout,
|
||||
idle_session_timeout,
|
||||
statement_timeout,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
for i in 0..10 {
|
||||
let leader_value = format!("{}{}", leader_value_prefix, i);
|
||||
let handle = tokio::spawn(candidate(
|
||||
leader_value,
|
||||
candidate_lease_ttl_secs,
|
||||
candidate_lease_ttl,
|
||||
uuid.clone(),
|
||||
table_name.to_string(),
|
||||
));
|
||||
@@ -847,14 +1019,14 @@ mod tests {
|
||||
let leader_value = "test_leader".to_string();
|
||||
let pg_election = PgElection {
|
||||
leader_value,
|
||||
client,
|
||||
pg_client: RwLock::new(client),
|
||||
is_leader: AtomicBool::new(false),
|
||||
leader_infancy: AtomicBool::new(true),
|
||||
leader_watcher: tx,
|
||||
store_key_prefix: uuid.clone(),
|
||||
candidate_lease_ttl_secs,
|
||||
meta_lease_ttl_secs: 2,
|
||||
sql_set: ElectionSqlFactory::new(28319, table_name, 2).build(),
|
||||
candidate_lease_ttl,
|
||||
meta_lease_ttl,
|
||||
sql_set: ElectionSqlFactory::new(28319, table_name).build(),
|
||||
};
|
||||
|
||||
let candidates = pg_election.all_candidates().await.unwrap();
|
||||
@@ -876,29 +1048,40 @@ mod tests {
|
||||
assert!(res);
|
||||
}
|
||||
|
||||
drop_table(&pg_election.client, table_name).await;
|
||||
drop_table(&pg_election, table_name).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_elected_and_step_down() {
|
||||
maybe_skip_postgres_integration_test!();
|
||||
let leader_value = "test_leader".to_string();
|
||||
let candidate_lease_ttl_secs = 5;
|
||||
let uuid = uuid::Uuid::new_v4().to_string();
|
||||
let table_name = "test_elected_and_step_down_greptime_metakv";
|
||||
let client = create_postgres_client(Some(table_name)).await.unwrap();
|
||||
let candidate_lease_ttl = Duration::from_secs(5);
|
||||
let execution_timeout = Duration::from_secs(10);
|
||||
let statement_timeout = Duration::from_secs(10);
|
||||
let meta_lease_ttl = Duration::from_secs(2);
|
||||
let idle_session_timeout = Duration::from_secs(0);
|
||||
let client = create_postgres_client(
|
||||
Some(table_name),
|
||||
execution_timeout,
|
||||
idle_session_timeout,
|
||||
statement_timeout,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let (tx, mut rx) = broadcast::channel(100);
|
||||
let leader_pg_election = PgElection {
|
||||
leader_value: leader_value.clone(),
|
||||
client,
|
||||
pg_client: RwLock::new(client),
|
||||
is_leader: AtomicBool::new(false),
|
||||
leader_infancy: AtomicBool::new(true),
|
||||
leader_watcher: tx,
|
||||
store_key_prefix: uuid,
|
||||
candidate_lease_ttl_secs,
|
||||
meta_lease_ttl_secs: 2,
|
||||
sql_set: ElectionSqlFactory::new(28320, table_name, 2).build(),
|
||||
candidate_lease_ttl,
|
||||
meta_lease_ttl,
|
||||
sql_set: ElectionSqlFactory::new(28320, table_name).build(),
|
||||
};
|
||||
|
||||
leader_pg_election.elected().await.unwrap();
|
||||
@@ -990,7 +1173,7 @@ mod tests {
|
||||
_ => panic!("Expected LeaderChangeMessage::StepDown"),
|
||||
}
|
||||
|
||||
drop_table(&leader_pg_election.client, table_name).await;
|
||||
drop_table(&leader_pg_election, table_name).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -999,25 +1182,38 @@ mod tests {
|
||||
let leader_value = "test_leader".to_string();
|
||||
let uuid = uuid::Uuid::new_v4().to_string();
|
||||
let table_name = "test_leader_action_greptime_metakv";
|
||||
let candidate_lease_ttl_secs = 5;
|
||||
let client = create_postgres_client(Some(table_name)).await.unwrap();
|
||||
let candidate_lease_ttl = Duration::from_secs(5);
|
||||
let execution_timeout = Duration::from_secs(10);
|
||||
let statement_timeout = Duration::from_secs(10);
|
||||
let meta_lease_ttl = Duration::from_secs(2);
|
||||
let idle_session_timeout = Duration::from_secs(0);
|
||||
let client = create_postgres_client(
|
||||
Some(table_name),
|
||||
execution_timeout,
|
||||
idle_session_timeout,
|
||||
statement_timeout,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let (tx, mut rx) = broadcast::channel(100);
|
||||
let leader_pg_election = PgElection {
|
||||
leader_value: leader_value.clone(),
|
||||
client,
|
||||
pg_client: RwLock::new(client),
|
||||
is_leader: AtomicBool::new(false),
|
||||
leader_infancy: AtomicBool::new(true),
|
||||
leader_watcher: tx,
|
||||
store_key_prefix: uuid,
|
||||
candidate_lease_ttl_secs,
|
||||
meta_lease_ttl_secs: 2,
|
||||
sql_set: ElectionSqlFactory::new(28321, table_name, 2).build(),
|
||||
candidate_lease_ttl,
|
||||
meta_lease_ttl,
|
||||
sql_set: ElectionSqlFactory::new(28321, table_name).build(),
|
||||
};
|
||||
|
||||
// Step 1: No leader exists, campaign and elected.
|
||||
let res = leader_pg_election
|
||||
.client
|
||||
.pg_client
|
||||
.read()
|
||||
.await
|
||||
.query(&leader_pg_election.sql_set.campaign, &[])
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1048,7 +1244,9 @@ mod tests {
|
||||
|
||||
// Step 2: As a leader, renew the lease.
|
||||
let res = leader_pg_election
|
||||
.client
|
||||
.pg_client
|
||||
.read()
|
||||
.await
|
||||
.query(&leader_pg_election.sql_set.campaign, &[])
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1070,7 +1268,9 @@ mod tests {
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
|
||||
let res = leader_pg_election
|
||||
.client
|
||||
.pg_client
|
||||
.read()
|
||||
.await
|
||||
.query(&leader_pg_election.sql_set.campaign, &[])
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1098,7 +1298,9 @@ mod tests {
|
||||
|
||||
// Step 4: Re-campaign and elected.
|
||||
let res = leader_pg_election
|
||||
.client
|
||||
.pg_client
|
||||
.read()
|
||||
.await
|
||||
.query(&leader_pg_election.sql_set.campaign, &[])
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1155,7 +1357,9 @@ mod tests {
|
||||
|
||||
// Step 6: Re-campaign and elected.
|
||||
let res = leader_pg_election
|
||||
.client
|
||||
.pg_client
|
||||
.read()
|
||||
.await
|
||||
.query(&leader_pg_election.sql_set.campaign, &[])
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1186,7 +1390,9 @@ mod tests {
|
||||
|
||||
// Step 7: Something wrong, the leader key changed by others.
|
||||
let res = leader_pg_election
|
||||
.client
|
||||
.pg_client
|
||||
.read()
|
||||
.await
|
||||
.query(&leader_pg_election.sql_set.campaign, &[])
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1197,7 +1403,11 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
leader_pg_election
|
||||
.put_value_with_lease(&leader_pg_election.election_key(), "test", 10)
|
||||
.put_value_with_lease(
|
||||
&leader_pg_election.election_key(),
|
||||
"test",
|
||||
Duration::from_secs(10),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
leader_pg_election.leader_action().await.unwrap();
|
||||
@@ -1223,52 +1433,74 @@ mod tests {
|
||||
|
||||
// Clean up
|
||||
leader_pg_election
|
||||
.client
|
||||
.pg_client
|
||||
.read()
|
||||
.await
|
||||
.query(&leader_pg_election.sql_set.step_down, &[])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
drop_table(&leader_pg_election.client, table_name).await;
|
||||
drop_table(&leader_pg_election, table_name).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_follower_action() {
|
||||
maybe_skip_postgres_integration_test!();
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let candidate_lease_ttl_secs = 5;
|
||||
let uuid = uuid::Uuid::new_v4().to_string();
|
||||
let table_name = "test_follower_action_greptime_metakv";
|
||||
|
||||
let follower_client = create_postgres_client(Some(table_name)).await.unwrap();
|
||||
let candidate_lease_ttl = Duration::from_secs(5);
|
||||
let execution_timeout = Duration::from_secs(10);
|
||||
let statement_timeout = Duration::from_secs(10);
|
||||
let meta_lease_ttl = Duration::from_secs(2);
|
||||
let idle_session_timeout = Duration::from_secs(0);
|
||||
let follower_client = create_postgres_client(
|
||||
Some(table_name),
|
||||
execution_timeout,
|
||||
idle_session_timeout,
|
||||
statement_timeout,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let (tx, mut rx) = broadcast::channel(100);
|
||||
let follower_pg_election = PgElection {
|
||||
leader_value: "test_follower".to_string(),
|
||||
client: follower_client,
|
||||
pg_client: RwLock::new(follower_client),
|
||||
is_leader: AtomicBool::new(false),
|
||||
leader_infancy: AtomicBool::new(true),
|
||||
leader_watcher: tx,
|
||||
store_key_prefix: uuid.clone(),
|
||||
candidate_lease_ttl_secs,
|
||||
meta_lease_ttl_secs: 2,
|
||||
sql_set: ElectionSqlFactory::new(28322, table_name, 2).build(),
|
||||
candidate_lease_ttl,
|
||||
meta_lease_ttl,
|
||||
sql_set: ElectionSqlFactory::new(28322, table_name).build(),
|
||||
};
|
||||
|
||||
let leader_client = create_postgres_client(Some(table_name)).await.unwrap();
|
||||
let leader_client = create_postgres_client(
|
||||
Some(table_name),
|
||||
execution_timeout,
|
||||
idle_session_timeout,
|
||||
statement_timeout,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let (tx, _) = broadcast::channel(100);
|
||||
let leader_pg_election = PgElection {
|
||||
leader_value: "test_leader".to_string(),
|
||||
client: leader_client,
|
||||
pg_client: RwLock::new(leader_client),
|
||||
is_leader: AtomicBool::new(false),
|
||||
leader_infancy: AtomicBool::new(true),
|
||||
leader_watcher: tx,
|
||||
store_key_prefix: uuid,
|
||||
candidate_lease_ttl_secs,
|
||||
meta_lease_ttl_secs: 2,
|
||||
sql_set: ElectionSqlFactory::new(28322, table_name, 2).build(),
|
||||
candidate_lease_ttl,
|
||||
meta_lease_ttl,
|
||||
sql_set: ElectionSqlFactory::new(28322, table_name).build(),
|
||||
};
|
||||
|
||||
leader_pg_election
|
||||
.client
|
||||
.pg_client
|
||||
.read()
|
||||
.await
|
||||
.query(&leader_pg_election.sql_set.campaign, &[])
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1309,11 +1541,41 @@ mod tests {
|
||||
|
||||
// Clean up
|
||||
leader_pg_election
|
||||
.client
|
||||
.pg_client
|
||||
.read()
|
||||
.await
|
||||
.query(&leader_pg_election.sql_set.step_down, &[])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
drop_table(&follower_pg_election.client, table_name).await;
|
||||
drop_table(&follower_pg_election, table_name).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_idle_session_timeout() {
|
||||
maybe_skip_postgres_integration_test!();
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let execution_timeout = Duration::from_secs(10);
|
||||
let statement_timeout = Duration::from_secs(10);
|
||||
let idle_session_timeout = Duration::from_secs(1);
|
||||
let mut client = create_postgres_client(
|
||||
None,
|
||||
execution_timeout,
|
||||
idle_session_timeout,
|
||||
statement_timeout,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
tokio::time::sleep(Duration::from_millis(1100)).await;
|
||||
// Wait for the idle session timeout.
|
||||
let err = client.query("SELECT 1", &[]).await.unwrap_err();
|
||||
assert_matches!(err, error::Error::PostgresExecution { .. });
|
||||
let error::Error::PostgresExecution { error, .. } = err else {
|
||||
panic!("Expected PostgresExecution error");
|
||||
};
|
||||
assert!(error.is_closed());
|
||||
// Reset the client and try again.
|
||||
client.reset_client().await.unwrap();
|
||||
let _ = client.query("SELECT 1", &[]).await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -748,21 +748,31 @@ pub enum Error {
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to execute via postgres"))]
|
||||
#[snafu(display("Failed to execute via postgres, sql: {}", sql))]
|
||||
PostgresExecution {
|
||||
#[snafu(source)]
|
||||
error: tokio_postgres::Error,
|
||||
sql: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to connect to Postgres"))]
|
||||
ConnectPostgres {
|
||||
#[snafu(source)]
|
||||
error: tokio_postgres::Error,
|
||||
#[snafu(display("Failed to get Postgres client"))]
|
||||
GetPostgresClient {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: deadpool::managed::PoolError<tokio_postgres::Error>,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Sql execution timeout, sql: {}, duration: {:?}", sql, duration))]
|
||||
SqlExecutionTimeout {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
sql: String,
|
||||
duration: std::time::Duration,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
@@ -1005,9 +1015,10 @@ impl ErrorExt for Error {
|
||||
Error::LookupPeer { source, .. } => source.status_code(),
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
Error::CreatePostgresPool { .. }
|
||||
| Error::GetPostgresClient { .. }
|
||||
| Error::GetPostgresConnection { .. }
|
||||
| Error::PostgresExecution { .. }
|
||||
| Error::ConnectPostgres { .. } => StatusCode::Internal,
|
||||
| Error::SqlExecutionTimeout { .. } => StatusCode::Internal,
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
Error::MySqlExecution { .. }
|
||||
| Error::CreateMySqlPool { .. }
|
||||
|
||||
@@ -22,7 +22,7 @@ use std::time::Duration;
|
||||
use clap::ValueEnum;
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
||||
use common_greptimedb_telemetry::GreptimeDBTelemetryTask;
|
||||
use common_meta::cache_invalidator::CacheInvalidatorRef;
|
||||
use common_meta::ddl::ProcedureExecutorRef;
|
||||
@@ -46,6 +46,7 @@ use common_telemetry::{error, info, warn};
|
||||
use common_wal::config::MetasrvWalConfig;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::export_metrics::ExportMetricsOption;
|
||||
use servers::grpc::GrpcOptions;
|
||||
use servers::http::HttpOptions;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::RegionId;
|
||||
@@ -73,7 +74,7 @@ use crate::state::{become_follower, become_leader, StateRef};
|
||||
|
||||
pub const TABLE_ID_SEQ: &str = "table_id";
|
||||
pub const FLOW_ID_SEQ: &str = "flow_id";
|
||||
pub const METASRV_HOME: &str = "./greptimedb_data/metasrv";
|
||||
pub const METASRV_DATA_DIR: &str = "metasrv";
|
||||
|
||||
// The datastores that implements metadata kvbackend.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Default, Deserialize, ValueEnum)]
|
||||
@@ -96,8 +97,10 @@ pub enum BackendImpl {
|
||||
#[serde(default)]
|
||||
pub struct MetasrvOptions {
|
||||
/// The address the server listens on.
|
||||
#[deprecated(note = "Use grpc.bind_addr instead")]
|
||||
pub bind_addr: String,
|
||||
/// The address the server advertises to the clients.
|
||||
#[deprecated(note = "Use grpc.server_addr instead")]
|
||||
pub server_addr: String,
|
||||
/// The address of the store, e.g., etcd.
|
||||
pub store_addrs: Vec<String>,
|
||||
@@ -112,6 +115,7 @@ pub struct MetasrvOptions {
|
||||
/// If it's true, the region failover will be allowed even if the local WAL is used.
|
||||
/// Note that this option is not recommended to be set to true, because it may lead to data loss during failover.
|
||||
pub allow_region_failover_on_local_wal: bool,
|
||||
pub grpc: GrpcOptions,
|
||||
/// The HTTP server options.
|
||||
pub http: HttpOptions,
|
||||
/// The logging options.
|
||||
@@ -166,8 +170,6 @@ impl fmt::Debug for MetasrvOptions {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let mut debug_struct = f.debug_struct("MetasrvOptions");
|
||||
debug_struct
|
||||
.field("bind_addr", &self.bind_addr)
|
||||
.field("server_addr", &self.server_addr)
|
||||
.field("store_addrs", &self.sanitize_store_addrs())
|
||||
.field("selector", &self.selector)
|
||||
.field("use_memory_store", &self.use_memory_store)
|
||||
@@ -176,6 +178,7 @@ impl fmt::Debug for MetasrvOptions {
|
||||
"allow_region_failover_on_local_wal",
|
||||
&self.allow_region_failover_on_local_wal,
|
||||
)
|
||||
.field("grpc", &self.grpc)
|
||||
.field("http", &self.http)
|
||||
.field("logging", &self.logging)
|
||||
.field("procedure", &self.procedure)
|
||||
@@ -208,19 +211,21 @@ const DEFAULT_METASRV_ADDR_PORT: &str = "3002";
|
||||
impl Default for MetasrvOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
bind_addr: format!("127.0.0.1:{}", DEFAULT_METASRV_ADDR_PORT),
|
||||
// If server_addr is not set, the server will use the local ip address as the server address.
|
||||
#[allow(deprecated)]
|
||||
bind_addr: String::new(),
|
||||
#[allow(deprecated)]
|
||||
server_addr: String::new(),
|
||||
store_addrs: vec!["127.0.0.1:2379".to_string()],
|
||||
selector: SelectorType::default(),
|
||||
use_memory_store: false,
|
||||
enable_region_failover: false,
|
||||
allow_region_failover_on_local_wal: false,
|
||||
http: HttpOptions::default(),
|
||||
logging: LoggingOptions {
|
||||
dir: format!("{METASRV_HOME}/logs"),
|
||||
grpc: GrpcOptions {
|
||||
bind_addr: format!("127.0.0.1:{}", DEFAULT_METASRV_ADDR_PORT),
|
||||
..Default::default()
|
||||
},
|
||||
http: HttpOptions::default(),
|
||||
logging: LoggingOptions::default(),
|
||||
procedure: ProcedureConfig {
|
||||
max_retry_times: 12,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
@@ -232,7 +237,7 @@ impl Default for MetasrvOptions {
|
||||
failure_detector: PhiAccrualFailureDetectorOptions::default(),
|
||||
datanode: DatanodeClientOptions::default(),
|
||||
enable_telemetry: true,
|
||||
data_home: METASRV_HOME.to_string(),
|
||||
data_home: DEFAULT_DATA_HOME.to_string(),
|
||||
wal: MetasrvWalConfig::default(),
|
||||
export_metrics: ExportMetricsOption::default(),
|
||||
store_key_prefix: String::new(),
|
||||
@@ -256,37 +261,6 @@ impl Configurable for MetasrvOptions {
|
||||
}
|
||||
|
||||
impl MetasrvOptions {
|
||||
/// Detect server address.
|
||||
#[cfg(not(target_os = "android"))]
|
||||
pub fn detect_server_addr(&mut self) {
|
||||
if self.server_addr.is_empty() {
|
||||
match local_ip_address::local_ip() {
|
||||
Ok(ip) => {
|
||||
let detected_addr = format!(
|
||||
"{}:{}",
|
||||
ip,
|
||||
self.bind_addr
|
||||
.split(':')
|
||||
.nth(1)
|
||||
.unwrap_or(DEFAULT_METASRV_ADDR_PORT)
|
||||
);
|
||||
info!("Using detected: {} as server address", detected_addr);
|
||||
self.server_addr = detected_addr;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to detect local ip address: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "android")]
|
||||
pub fn detect_server_addr(&mut self) {
|
||||
if self.server_addr.is_empty() {
|
||||
common_telemetry::debug!("detect local IP is not supported on Android");
|
||||
}
|
||||
}
|
||||
|
||||
fn sanitize_store_addrs(&self) -> Vec<String> {
|
||||
self.store_addrs
|
||||
.iter()
|
||||
@@ -585,6 +559,7 @@ impl Metasrv {
|
||||
if let Err(e) = res {
|
||||
warn!(e; "Metasrv election error");
|
||||
}
|
||||
election.reset_campaign().await;
|
||||
info!("Metasrv re-initiate election");
|
||||
}
|
||||
info!("Metasrv stopped");
|
||||
@@ -641,7 +616,7 @@ impl Metasrv {
|
||||
pub fn node_info(&self) -> MetasrvNodeInfo {
|
||||
let build_info = common_version::build_info();
|
||||
MetasrvNodeInfo {
|
||||
addr: self.options().server_addr.clone(),
|
||||
addr: self.options().grpc.server_addr.clone(),
|
||||
version: build_info.version.to_string(),
|
||||
git_commit: build_info.commit_short.to_string(),
|
||||
start_time_ms: self.start_time_ms(),
|
||||
@@ -733,7 +708,7 @@ impl Metasrv {
|
||||
|
||||
#[inline]
|
||||
pub fn new_ctx(&self) -> Context {
|
||||
let server_addr = self.options().server_addr.clone();
|
||||
let server_addr = self.options().grpc.server_addr.clone();
|
||||
let in_memory = self.in_memory.clone();
|
||||
let kv_backend = self.kv_backend.clone();
|
||||
let leader_cached_kv_backend = self.leader_cached_kv_backend.clone();
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
|
||||
@@ -55,7 +56,7 @@ use crate::handler::{HeartbeatHandlerGroupBuilder, HeartbeatMailbox, Pushers};
|
||||
use crate::lease::MetaPeerLookupService;
|
||||
use crate::metasrv::{
|
||||
ElectionRef, Metasrv, MetasrvInfo, MetasrvOptions, RegionStatAwareSelectorRef, SelectTarget,
|
||||
SelectorContext, SelectorRef, FLOW_ID_SEQ, TABLE_ID_SEQ,
|
||||
SelectorContext, SelectorRef, FLOW_ID_SEQ, METASRV_DATA_DIR, TABLE_ID_SEQ,
|
||||
};
|
||||
use crate::procedure::region_migration::manager::RegionMigrationManager;
|
||||
use crate::procedure::region_migration::DefaultContextFactory;
|
||||
@@ -178,8 +179,8 @@ impl MetasrvBuilder {
|
||||
let in_memory = in_memory.unwrap_or_else(|| Arc::new(MemoryKvBackend::new()));
|
||||
|
||||
let state = Arc::new(RwLock::new(match election {
|
||||
None => State::leader(options.server_addr.to_string(), true),
|
||||
Some(_) => State::follower(options.server_addr.to_string()),
|
||||
None => State::leader(options.grpc.server_addr.to_string(), true),
|
||||
Some(_) => State::follower(options.grpc.server_addr.to_string()),
|
||||
}));
|
||||
|
||||
let leader_cached_kv_backend = Arc::new(LeaderCachedKvBackend::new(
|
||||
@@ -202,7 +203,7 @@ impl MetasrvBuilder {
|
||||
));
|
||||
let maintenance_mode_manager = Arc::new(MaintenanceModeManager::new(kv_backend.clone()));
|
||||
let selector_ctx = SelectorContext {
|
||||
server_addr: options.server_addr.clone(),
|
||||
server_addr: options.grpc.server_addr.clone(),
|
||||
datanode_lease_secs: distributed_time_constants::DATANODE_LEASE_SECS,
|
||||
flownode_lease_secs: distributed_time_constants::FLOWNODE_LEASE_SECS,
|
||||
kv_backend: kv_backend.clone(),
|
||||
@@ -271,7 +272,7 @@ impl MetasrvBuilder {
|
||||
let cache_invalidator = Arc::new(MetasrvCacheInvalidator::new(
|
||||
mailbox.clone(),
|
||||
MetasrvInfo {
|
||||
server_addr: options.server_addr.clone(),
|
||||
server_addr: options.grpc.server_addr.clone(),
|
||||
},
|
||||
));
|
||||
let peer_lookup_service = Arc::new(MetaPeerLookupService::new(meta_peer_client.clone()));
|
||||
@@ -314,7 +315,7 @@ impl MetasrvBuilder {
|
||||
memory_region_keeper.clone(),
|
||||
region_failure_detector_controller.clone(),
|
||||
mailbox.clone(),
|
||||
options.server_addr.clone(),
|
||||
options.grpc.server_addr.clone(),
|
||||
cache_invalidator.clone(),
|
||||
),
|
||||
));
|
||||
@@ -389,7 +390,7 @@ impl MetasrvBuilder {
|
||||
client: Arc::new(kafka_client),
|
||||
table_metadata_manager: table_metadata_manager.clone(),
|
||||
leader_region_registry: leader_region_registry.clone(),
|
||||
server_addr: options.server_addr.clone(),
|
||||
server_addr: options.grpc.server_addr.clone(),
|
||||
mailbox: mailbox.clone(),
|
||||
};
|
||||
let wal_prune_manager = WalPruneManager::new(
|
||||
@@ -436,7 +437,10 @@ impl MetasrvBuilder {
|
||||
};
|
||||
|
||||
let enable_telemetry = options.enable_telemetry;
|
||||
let metasrv_home = options.data_home.to_string();
|
||||
let metasrv_home = Path::new(&options.data_home)
|
||||
.join(METASRV_DATA_DIR)
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
|
||||
Ok(Metasrv {
|
||||
state,
|
||||
|
||||
@@ -26,6 +26,7 @@ use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
|
||||
use hyper_util::rt::TokioIo;
|
||||
use servers::grpc::GrpcOptions;
|
||||
use tonic::codec::CompressionEncoding;
|
||||
use tower::service_fn;
|
||||
|
||||
@@ -47,7 +48,10 @@ pub async fn mock_with_memstore() -> MockInfo {
|
||||
let in_memory = Arc::new(MemoryKvBackend::new());
|
||||
mock(
|
||||
MetasrvOptions {
|
||||
server_addr: "127.0.0.1:3002".to_string(),
|
||||
grpc: GrpcOptions {
|
||||
server_addr: "127.0.0.1:3002".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
},
|
||||
kv_backend,
|
||||
@@ -62,7 +66,10 @@ pub async fn mock_with_etcdstore(addr: &str) -> MockInfo {
|
||||
let kv_backend = EtcdStore::with_endpoints([addr], 128).await.unwrap();
|
||||
mock(
|
||||
MetasrvOptions {
|
||||
server_addr: "127.0.0.1:3002".to_string(),
|
||||
grpc: GrpcOptions {
|
||||
server_addr: "127.0.0.1:3002".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
},
|
||||
kv_backend,
|
||||
@@ -80,7 +87,7 @@ pub async fn mock(
|
||||
datanode_clients: Option<Arc<NodeClients>>,
|
||||
in_memory: Option<ResettableKvBackendRef>,
|
||||
) -> MockInfo {
|
||||
let server_addr = opts.server_addr.clone();
|
||||
let server_addr = opts.grpc.server_addr.clone();
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
|
||||
|
||||
table_metadata_manager.init().await.unwrap();
|
||||
|
||||
@@ -88,7 +88,7 @@ impl cluster_server::Cluster for Metasrv {
|
||||
return Ok(Response::new(resp));
|
||||
}
|
||||
|
||||
let leader_addr = &self.options().server_addr;
|
||||
let leader_addr = &self.options().grpc.server_addr;
|
||||
let (leader, followers) = match self.election() {
|
||||
Some(election) => {
|
||||
let nodes = election.all_candidates().await?;
|
||||
|
||||
@@ -190,6 +190,7 @@ mod tests {
|
||||
use api::v1::meta::*;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use common_telemetry::tracing_context::W3cTrace;
|
||||
use servers::grpc::GrpcOptions;
|
||||
use tonic::IntoRequest;
|
||||
|
||||
use super::get_node_id;
|
||||
@@ -203,7 +204,10 @@ mod tests {
|
||||
let metasrv = MetasrvBuilder::new()
|
||||
.kv_backend(kv_backend)
|
||||
.options(MetasrvOptions {
|
||||
server_addr: "127.0.0.1:3002".to_string(),
|
||||
grpc: GrpcOptions {
|
||||
server_addr: "127.0.0.1:3002".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
})
|
||||
.build()
|
||||
@@ -216,7 +220,7 @@ mod tests {
|
||||
|
||||
let res = metasrv.ask_leader(req.into_request()).await.unwrap();
|
||||
let res = res.into_inner();
|
||||
assert_eq!(metasrv.options().bind_addr, res.leader.unwrap().addr);
|
||||
assert_eq!(metasrv.options().grpc.server_addr, res.leader.unwrap().addr);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -282,14 +282,15 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::SemanticType;
|
||||
use common_function::function_registry::FUNCTION_REGISTRY;
|
||||
use common_function::scalars::udf::create_udf;
|
||||
use common_function::function::FunctionRef;
|
||||
use common_function::function_factory::ScalarFunctionFactory;
|
||||
use common_function::scalars::matches::MatchesFunction;
|
||||
use common_function::scalars::matches_term::MatchesTermFunction;
|
||||
use datafusion::functions::string::lower;
|
||||
use datafusion_common::Column;
|
||||
use datafusion_expr::expr::ScalarFunction;
|
||||
use datafusion_expr::ScalarUDF;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use session::context::QueryContext;
|
||||
use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder};
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
@@ -317,19 +318,17 @@ mod tests {
|
||||
}
|
||||
|
||||
fn matches_func() -> Arc<ScalarUDF> {
|
||||
Arc::new(create_udf(
|
||||
FUNCTION_REGISTRY.get_function("matches").unwrap(),
|
||||
QueryContext::arc(),
|
||||
Default::default(),
|
||||
))
|
||||
Arc::new(
|
||||
ScalarFunctionFactory::from(Arc::new(MatchesFunction) as FunctionRef)
|
||||
.provide(Default::default()),
|
||||
)
|
||||
}
|
||||
|
||||
fn matches_term_func() -> Arc<ScalarUDF> {
|
||||
Arc::new(create_udf(
|
||||
FUNCTION_REGISTRY.get_function("matches_term").unwrap(),
|
||||
QueryContext::arc(),
|
||||
Default::default(),
|
||||
))
|
||||
Arc::new(
|
||||
ScalarFunctionFactory::from(Arc::new(MatchesTermFunction) as FunctionRef)
|
||||
.provide(Default::default()),
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::flow::FlowRequestHeader;
|
||||
use api::v1::flow::{AdjustFlow, FlowRequestHeader};
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_function::handlers::FlowServiceHandler;
|
||||
@@ -22,6 +22,7 @@ use common_query::error::Result;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use futures::stream::FuturesUnordered;
|
||||
use futures::StreamExt;
|
||||
use serde_json::json;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
@@ -57,9 +58,96 @@ impl FlowServiceHandler for FlowServiceOperator {
|
||||
) -> Result<api::v1::flow::FlowResponse> {
|
||||
self.flush_inner(catalog, flow, ctx).await
|
||||
}
|
||||
|
||||
async fn adjust(
|
||||
&self,
|
||||
catalog: &str,
|
||||
flow: &str,
|
||||
min_run_interval_secs: u64,
|
||||
max_filter_num_per_query: usize,
|
||||
ctx: QueryContextRef,
|
||||
) -> Result<api::v1::flow::FlowResponse> {
|
||||
self.adjust_inner(
|
||||
catalog,
|
||||
flow,
|
||||
min_run_interval_secs,
|
||||
max_filter_num_per_query,
|
||||
ctx,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl FlowServiceOperator {
|
||||
async fn adjust_inner(
|
||||
&self,
|
||||
catalog: &str,
|
||||
flow: &str,
|
||||
min_run_interval_secs: u64,
|
||||
max_filter_num_per_query: usize,
|
||||
ctx: QueryContextRef,
|
||||
) -> Result<api::v1::flow::FlowResponse> {
|
||||
let id = self
|
||||
.flow_metadata_manager
|
||||
.flow_name_manager()
|
||||
.get(catalog, flow)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(common_query::error::ExecuteSnafu)?
|
||||
.context(common_meta::error::FlowNotFoundSnafu {
|
||||
flow_name: format!("{}.{}", catalog, flow),
|
||||
})
|
||||
.map_err(BoxedError::new)
|
||||
.context(common_query::error::ExecuteSnafu)?
|
||||
.flow_id();
|
||||
|
||||
let all_flownode_peers = self
|
||||
.flow_metadata_manager
|
||||
.flow_route_manager()
|
||||
.routes(id)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(common_query::error::ExecuteSnafu)?;
|
||||
|
||||
// order of flownodes doesn't matter here
|
||||
let all_flow_nodes = FuturesUnordered::from_iter(
|
||||
all_flownode_peers
|
||||
.iter()
|
||||
.map(|(_key, peer)| self.node_manager.flownode(peer.peer())),
|
||||
)
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
// TODO(discord9): use proper type for flow options
|
||||
let options = json!({
|
||||
"min_run_interval_secs": min_run_interval_secs,
|
||||
"max_filter_num_per_query": max_filter_num_per_query,
|
||||
});
|
||||
|
||||
for node in all_flow_nodes {
|
||||
let _res = {
|
||||
use api::v1::flow::{flow_request, FlowRequest};
|
||||
let flush_req = FlowRequest {
|
||||
header: Some(FlowRequestHeader {
|
||||
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||
query_context: Some(
|
||||
common_meta::rpc::ddl::QueryContext::from(ctx.clone()).into(),
|
||||
),
|
||||
}),
|
||||
body: Some(flow_request::Body::Adjust(AdjustFlow {
|
||||
flow_id: Some(api::v1::FlowId { id }),
|
||||
options: options.to_string(),
|
||||
})),
|
||||
};
|
||||
node.handle(flush_req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(common_query::error::ExecuteSnafu)?
|
||||
};
|
||||
}
|
||||
Ok(Default::default())
|
||||
}
|
||||
|
||||
/// Flush the flownodes according to the flow id.
|
||||
async fn flush_inner(
|
||||
&self,
|
||||
|
||||
@@ -25,8 +25,7 @@ use async_trait::async_trait;
|
||||
use common_base::Plugins;
|
||||
use common_catalog::consts::is_readonly_schema;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_function::function::FunctionRef;
|
||||
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
|
||||
use common_function::function_factory::ScalarFunctionFactory;
|
||||
use common_query::{Output, OutputData, OutputMeta};
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{EmptyRecordBatchStream, SendableRecordBatchStream};
|
||||
@@ -35,7 +34,9 @@ use datafusion::physical_plan::analyze::AnalyzeExec;
|
||||
use datafusion::physical_plan::coalesce_partitions::CoalescePartitionsExec;
|
||||
use datafusion::physical_plan::ExecutionPlan;
|
||||
use datafusion_common::ResolvedTableReference;
|
||||
use datafusion_expr::{DmlStatement, LogicalPlan as DfLogicalPlan, LogicalPlan, WriteOp};
|
||||
use datafusion_expr::{
|
||||
AggregateUDF, DmlStatement, LogicalPlan as DfLogicalPlan, LogicalPlan, WriteOp,
|
||||
};
|
||||
use datatypes::prelude::VectorRef;
|
||||
use datatypes::schema::Schema;
|
||||
use futures_util::StreamExt;
|
||||
@@ -454,14 +455,14 @@ impl QueryEngine for DatafusionQueryEngine {
|
||||
/// `SELECT "my_UDAF"(x)` will look for an aggregate named `"my_UDAF"`
|
||||
///
|
||||
/// So it's better to make UDAF name lowercase when creating one.
|
||||
fn register_aggregate_function(&self, func: AggregateFunctionMetaRef) {
|
||||
self.state.register_aggregate_function(func);
|
||||
fn register_aggregate_function(&self, func: AggregateUDF) {
|
||||
self.state.register_aggr_function(func);
|
||||
}
|
||||
|
||||
/// Register an UDF function.
|
||||
/// Register an scalar function.
|
||||
/// Will override if the function with same name is already registered.
|
||||
fn register_function(&self, func: FunctionRef) {
|
||||
self.state.register_function(func);
|
||||
fn register_scalar_function(&self, func: ScalarFunctionFactory) {
|
||||
self.state.register_scalar_function(func);
|
||||
}
|
||||
|
||||
fn read_table(&self, table: TableRef) -> Result<DataFrame> {
|
||||
|
||||
@@ -18,12 +18,7 @@ use std::sync::Arc;
|
||||
|
||||
use arrow_schema::DataType;
|
||||
use catalog::table_source::DfTableSourceProvider;
|
||||
use common_function::aggr::{
|
||||
GeoPathAccumulator, HllState, UddSketchState, GEO_PATH_NAME, HLL_MERGE_NAME, HLL_NAME,
|
||||
UDDSKETCH_MERGE_NAME, UDDSKETCH_STATE_NAME,
|
||||
};
|
||||
use common_function::scalars::udf::create_udf;
|
||||
use common_query::logical_plan::create_aggregate_function;
|
||||
use common_function::function::FunctionContext;
|
||||
use datafusion::common::TableReference;
|
||||
use datafusion::datasource::cte_worktable::CteWorkTable;
|
||||
use datafusion::datasource::file_format::{format_as_file_type, FileFormatFactory};
|
||||
@@ -151,38 +146,21 @@ impl ContextProvider for DfContextProviderAdapter {
|
||||
}
|
||||
|
||||
fn get_function_meta(&self, name: &str) -> Option<Arc<ScalarUDF>> {
|
||||
self.engine_state.udf_function(name).map_or_else(
|
||||
self.engine_state.scalar_function(name).map_or_else(
|
||||
|| self.session_state.scalar_functions().get(name).cloned(),
|
||||
|func| {
|
||||
Some(Arc::new(create_udf(
|
||||
func,
|
||||
self.query_ctx.clone(),
|
||||
self.engine_state.function_state(),
|
||||
)))
|
||||
Some(Arc::new(func.provide(FunctionContext {
|
||||
query_ctx: self.query_ctx.clone(),
|
||||
state: self.engine_state.function_state(),
|
||||
})))
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn get_aggregate_meta(&self, name: &str) -> Option<Arc<AggregateUDF>> {
|
||||
if name == UDDSKETCH_STATE_NAME {
|
||||
return Some(Arc::new(UddSketchState::state_udf_impl()));
|
||||
} else if name == UDDSKETCH_MERGE_NAME {
|
||||
return Some(Arc::new(UddSketchState::merge_udf_impl()));
|
||||
} else if name == HLL_NAME {
|
||||
return Some(Arc::new(HllState::state_udf_impl()));
|
||||
} else if name == HLL_MERGE_NAME {
|
||||
return Some(Arc::new(HllState::merge_udf_impl()));
|
||||
} else if name == GEO_PATH_NAME {
|
||||
return Some(Arc::new(GeoPathAccumulator::udf_impl()));
|
||||
}
|
||||
|
||||
self.engine_state.aggregate_function(name).map_or_else(
|
||||
self.engine_state.aggr_function(name).map_or_else(
|
||||
|| self.session_state.aggregate_functions().get(name).cloned(),
|
||||
|func| {
|
||||
Some(Arc::new(
|
||||
create_aggregate_function(func.name(), func.args_count(), func.create()).into(),
|
||||
))
|
||||
},
|
||||
|func| Some(Arc::new(func)),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -213,13 +191,13 @@ impl ContextProvider for DfContextProviderAdapter {
|
||||
}
|
||||
|
||||
fn udf_names(&self) -> Vec<String> {
|
||||
let mut names = self.engine_state.udf_names();
|
||||
let mut names = self.engine_state.scalar_names();
|
||||
names.extend(self.session_state.scalar_functions().keys().cloned());
|
||||
names
|
||||
}
|
||||
|
||||
fn udaf_names(&self) -> Vec<String> {
|
||||
let mut names = self.engine_state.udaf_names();
|
||||
let mut names = self.engine_state.aggr_names();
|
||||
names.extend(self.session_state.aggregate_functions().keys().cloned());
|
||||
names
|
||||
}
|
||||
|
||||
@@ -22,14 +22,13 @@ use std::sync::Arc;
|
||||
use async_trait::async_trait;
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_base::Plugins;
|
||||
use common_function::function::FunctionRef;
|
||||
use common_function::function_factory::ScalarFunctionFactory;
|
||||
use common_function::function_registry::FUNCTION_REGISTRY;
|
||||
use common_function::handlers::{
|
||||
FlowServiceHandlerRef, ProcedureServiceHandlerRef, TableMutationHandlerRef,
|
||||
};
|
||||
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
|
||||
use common_query::Output;
|
||||
use datafusion_expr::LogicalPlan;
|
||||
use datafusion_expr::{AggregateUDF, LogicalPlan};
|
||||
use datatypes::schema::Schema;
|
||||
pub use default_serializer::{DefaultPlanDecoder, DefaultSerializer};
|
||||
use session::context::QueryContextRef;
|
||||
@@ -79,11 +78,11 @@ pub trait QueryEngine: Send + Sync {
|
||||
///
|
||||
/// # Panics
|
||||
/// Will panic if the function with same name is already registered.
|
||||
fn register_aggregate_function(&self, func: AggregateFunctionMetaRef);
|
||||
fn register_aggregate_function(&self, func: AggregateUDF);
|
||||
|
||||
/// Register a SQL function.
|
||||
/// Register a scalar function.
|
||||
/// Will override if the function with same name is already registered.
|
||||
fn register_function(&self, func: FunctionRef);
|
||||
fn register_scalar_function(&self, func: ScalarFunctionFactory);
|
||||
|
||||
/// Create a DataFrame from a table.
|
||||
fn read_table(&self, table: TableRef) -> Result<DataFrame>;
|
||||
@@ -154,8 +153,8 @@ impl QueryEngineFactory {
|
||||
|
||||
/// Register all functions implemented by GreptimeDB
|
||||
fn register_functions(query_engine: &Arc<DatafusionQueryEngine>) {
|
||||
for func in FUNCTION_REGISTRY.functions() {
|
||||
query_engine.register_function(func);
|
||||
for func in FUNCTION_REGISTRY.scalar_functions() {
|
||||
query_engine.register_scalar_function(func);
|
||||
}
|
||||
|
||||
for accumulator in FUNCTION_REGISTRY.aggregate_functions() {
|
||||
|
||||
@@ -15,9 +15,8 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_function::aggr::{GeoPathAccumulator, HllState, UddSketchState};
|
||||
use common_function::function::FunctionContext;
|
||||
use common_function::function_registry::FUNCTION_REGISTRY;
|
||||
use common_function::scalars::udf::create_udf;
|
||||
use common_query::error::RegisterUdfSnafu;
|
||||
use common_query::logical_plan::SubstraitPlanDecoder;
|
||||
use datafusion::catalog::CatalogProviderList;
|
||||
@@ -124,43 +123,46 @@ impl SubstraitPlanDecoder for DefaultPlanDecoder {
|
||||
// if they have the same name as the default UDFs or their alias.
|
||||
// e.g. The default UDF `to_char()` has an alias `date_format()`, if we register a UDF with the name `date_format()`
|
||||
// before we build the session state, the UDF will be lost.
|
||||
for func in FUNCTION_REGISTRY.functions() {
|
||||
let udf = Arc::new(create_udf(
|
||||
func.clone(),
|
||||
self.query_ctx.clone(),
|
||||
Default::default(),
|
||||
));
|
||||
for func in FUNCTION_REGISTRY.scalar_functions() {
|
||||
let udf = func.provide(FunctionContext {
|
||||
query_ctx: self.query_ctx.clone(),
|
||||
state: Default::default(),
|
||||
});
|
||||
session_state
|
||||
.register_udf(udf)
|
||||
.register_udf(Arc::new(udf))
|
||||
.context(RegisterUdfSnafu { name: func.name() })?;
|
||||
let _ = session_state.register_udaf(Arc::new(UddSketchState::state_udf_impl()));
|
||||
let _ = session_state.register_udaf(Arc::new(UddSketchState::merge_udf_impl()));
|
||||
let _ = session_state.register_udaf(Arc::new(HllState::state_udf_impl()));
|
||||
let _ = session_state.register_udaf(Arc::new(HllState::merge_udf_impl()));
|
||||
let _ = session_state.register_udaf(Arc::new(GeoPathAccumulator::udf_impl()));
|
||||
let _ = session_state.register_udaf(quantile_udaf());
|
||||
|
||||
let _ = session_state.register_udf(Arc::new(IDelta::<false>::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(IDelta::<true>::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Rate::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Increase::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Delta::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Resets::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Changes::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Deriv::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Round::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(AvgOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(MinOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(MaxOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(SumOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(CountOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(LastOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(AbsentOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(PresentOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(StddevOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(StdvarOverTime::scalar_udf()));
|
||||
// TODO(ruihang): add quantile_over_time, predict_linear, holt_winters, round
|
||||
}
|
||||
|
||||
for func in FUNCTION_REGISTRY.aggregate_functions() {
|
||||
let name = func.name().to_string();
|
||||
session_state
|
||||
.register_udaf(Arc::new(func))
|
||||
.context(RegisterUdfSnafu { name })?;
|
||||
}
|
||||
|
||||
let _ = session_state.register_udaf(quantile_udaf());
|
||||
|
||||
let _ = session_state.register_udf(Arc::new(IDelta::<false>::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(IDelta::<true>::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Rate::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Increase::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Delta::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Resets::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Changes::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Deriv::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Round::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(AvgOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(MinOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(MaxOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(SumOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(CountOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(LastOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(AbsentOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(PresentOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(StddevOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(StdvarOverTime::scalar_udf()));
|
||||
// TODO(ruihang): add quantile_over_time, predict_linear, holt_winters, round
|
||||
|
||||
let logical_plan = DFLogicalSubstraitConvertor
|
||||
.decode(message, session_state)
|
||||
.await
|
||||
|
||||
@@ -19,11 +19,10 @@ use std::sync::{Arc, RwLock};
|
||||
use async_trait::async_trait;
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_base::Plugins;
|
||||
use common_function::function::FunctionRef;
|
||||
use common_function::function_factory::ScalarFunctionFactory;
|
||||
use common_function::handlers::{
|
||||
FlowServiceHandlerRef, ProcedureServiceHandlerRef, TableMutationHandlerRef,
|
||||
};
|
||||
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
|
||||
use common_function::state::FunctionState;
|
||||
use common_telemetry::warn;
|
||||
use datafusion::dataframe::DataFrame;
|
||||
@@ -37,7 +36,7 @@ use datafusion::physical_optimizer::sanity_checker::SanityCheckPlan;
|
||||
use datafusion::physical_optimizer::PhysicalOptimizerRule;
|
||||
use datafusion::physical_plan::ExecutionPlan;
|
||||
use datafusion::physical_planner::{DefaultPhysicalPlanner, ExtensionPlanner, PhysicalPlanner};
|
||||
use datafusion_expr::LogicalPlan as DfLogicalPlan;
|
||||
use datafusion_expr::{AggregateUDF, LogicalPlan as DfLogicalPlan};
|
||||
use datafusion_optimizer::analyzer::count_wildcard_rule::CountWildcardRule;
|
||||
use datafusion_optimizer::analyzer::{Analyzer, AnalyzerRule};
|
||||
use datafusion_optimizer::optimizer::Optimizer;
|
||||
@@ -70,8 +69,8 @@ pub struct QueryEngineState {
|
||||
df_context: SessionContext,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
function_state: Arc<FunctionState>,
|
||||
udf_functions: Arc<RwLock<HashMap<String, FunctionRef>>>,
|
||||
aggregate_functions: Arc<RwLock<HashMap<String, AggregateFunctionMetaRef>>>,
|
||||
scalar_functions: Arc<RwLock<HashMap<String, ScalarFunctionFactory>>>,
|
||||
aggr_functions: Arc<RwLock<HashMap<String, AggregateUDF>>>,
|
||||
extension_rules: Vec<Arc<dyn ExtensionAnalyzerRule + Send + Sync>>,
|
||||
plugins: Plugins,
|
||||
}
|
||||
@@ -186,10 +185,10 @@ impl QueryEngineState {
|
||||
procedure_service_handler,
|
||||
flow_service_handler,
|
||||
}),
|
||||
aggregate_functions: Arc::new(RwLock::new(HashMap::new())),
|
||||
aggr_functions: Arc::new(RwLock::new(HashMap::new())),
|
||||
extension_rules,
|
||||
plugins,
|
||||
udf_functions: Arc::new(RwLock::new(HashMap::new())),
|
||||
scalar_functions: Arc::new(RwLock::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -222,38 +221,28 @@ impl QueryEngineState {
|
||||
self.session_state().optimize(&plan)
|
||||
}
|
||||
|
||||
/// Register an udf function.
|
||||
/// Will override if the function with same name is already registered.
|
||||
pub fn register_function(&self, func: FunctionRef) {
|
||||
let name = func.name().to_string();
|
||||
let x = self
|
||||
.udf_functions
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(name.clone(), func);
|
||||
|
||||
if x.is_some() {
|
||||
warn!("Already registered udf function '{name}'");
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieve the udf function by name
|
||||
pub fn udf_function(&self, function_name: &str) -> Option<FunctionRef> {
|
||||
self.udf_functions
|
||||
/// Retrieve the scalar function by name
|
||||
pub fn scalar_function(&self, function_name: &str) -> Option<ScalarFunctionFactory> {
|
||||
self.scalar_functions
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(function_name)
|
||||
.cloned()
|
||||
}
|
||||
|
||||
/// Retrieve udf function names.
|
||||
pub fn udf_names(&self) -> Vec<String> {
|
||||
self.udf_functions.read().unwrap().keys().cloned().collect()
|
||||
/// Retrieve scalar function names.
|
||||
pub fn scalar_names(&self) -> Vec<String> {
|
||||
self.scalar_functions
|
||||
.read()
|
||||
.unwrap()
|
||||
.keys()
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Retrieve the aggregate function by name
|
||||
pub fn aggregate_function(&self, function_name: &str) -> Option<AggregateFunctionMetaRef> {
|
||||
self.aggregate_functions
|
||||
pub fn aggr_function(&self, function_name: &str) -> Option<AggregateUDF> {
|
||||
self.aggr_functions
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(function_name)
|
||||
@@ -261,8 +250,8 @@ impl QueryEngineState {
|
||||
}
|
||||
|
||||
/// Retrieve aggregate function names.
|
||||
pub fn udaf_names(&self) -> Vec<String> {
|
||||
self.aggregate_functions
|
||||
pub fn aggr_names(&self) -> Vec<String> {
|
||||
self.aggr_functions
|
||||
.read()
|
||||
.unwrap()
|
||||
.keys()
|
||||
@@ -270,6 +259,21 @@ impl QueryEngineState {
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Register an scalar function.
|
||||
/// Will override if the function with same name is already registered.
|
||||
pub fn register_scalar_function(&self, func: ScalarFunctionFactory) {
|
||||
let name = func.name().to_string();
|
||||
let x = self
|
||||
.scalar_functions
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(name.clone(), func);
|
||||
|
||||
if x.is_some() {
|
||||
warn!("Already registered scalar function '{name}'");
|
||||
}
|
||||
}
|
||||
|
||||
/// Register an aggregate function.
|
||||
///
|
||||
/// # Panics
|
||||
@@ -278,10 +282,10 @@ impl QueryEngineState {
|
||||
/// Panicking consideration: currently the aggregated functions are all statically registered,
|
||||
/// user cannot define their own aggregate functions on the fly. So we can panic here. If that
|
||||
/// invariant is broken in the future, we should return an error instead of panicking.
|
||||
pub fn register_aggregate_function(&self, func: AggregateFunctionMetaRef) {
|
||||
let name = func.name();
|
||||
pub fn register_aggr_function(&self, func: AggregateUDF) {
|
||||
let name = func.name().to_string();
|
||||
let x = self
|
||||
.aggregate_functions
|
||||
.aggr_functions
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(name.clone(), func);
|
||||
|
||||
@@ -16,11 +16,12 @@ use std::fmt::Debug;
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_function::scalars::aggregate::AggregateFunctionMeta;
|
||||
use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
|
||||
use common_query::error::{CreateAccumulatorSnafu, Result as QueryResult};
|
||||
use common_query::logical_plan::accumulator::AggrFuncTypeStore;
|
||||
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
||||
use common_query::logical_plan::{
|
||||
create_aggregate_function, Accumulator, AggregateFunctionCreator,
|
||||
};
|
||||
use common_query::prelude::*;
|
||||
use common_recordbatch::{RecordBatch, RecordBatches};
|
||||
use datatypes::prelude::*;
|
||||
@@ -207,11 +208,14 @@ where
|
||||
|
||||
let engine = new_query_engine_with_table(testing_table);
|
||||
|
||||
engine.register_aggregate_function(Arc::new(AggregateFunctionMeta::new(
|
||||
"my_sum",
|
||||
1,
|
||||
Arc::new(|| Arc::new(MySumAccumulatorCreator::default())),
|
||||
)));
|
||||
engine.register_aggregate_function(
|
||||
create_aggregate_function(
|
||||
"my_sum".to_string(),
|
||||
1,
|
||||
Arc::new(MySumAccumulatorCreator::default()),
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
|
||||
let sql = format!("select MY_SUM({column_name}) as my_sum from {table_name}");
|
||||
let batches = exec_selection(engine, &sql).await;
|
||||
|
||||
@@ -66,6 +66,8 @@ pub struct GrpcOptions {
|
||||
pub max_recv_message_size: ReadableSize,
|
||||
/// Max gRPC sending(encoding) message size
|
||||
pub max_send_message_size: ReadableSize,
|
||||
/// Compression mode in Arrow Flight service.
|
||||
pub flight_compression: FlightCompression,
|
||||
pub runtime_size: usize,
|
||||
#[serde(default = "Default::default")]
|
||||
pub tls: TlsOption,
|
||||
@@ -114,6 +116,7 @@ impl Default for GrpcOptions {
|
||||
server_addr: String::new(),
|
||||
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
|
||||
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
||||
flight_compression: FlightCompression::ArrowIpc,
|
||||
runtime_size: 8,
|
||||
tls: TlsOption::default(),
|
||||
}
|
||||
@@ -132,6 +135,30 @@ impl GrpcOptions {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum FlightCompression {
|
||||
/// Disable all compression in Arrow Flight service.
|
||||
None,
|
||||
/// Enable only transport layer compression (zstd).
|
||||
Transport,
|
||||
/// Enable only payload compression (lz4)
|
||||
#[default]
|
||||
ArrowIpc,
|
||||
/// Enable all compression.
|
||||
All,
|
||||
}
|
||||
|
||||
impl FlightCompression {
|
||||
pub fn transport_compression(&self) -> bool {
|
||||
self == &FlightCompression::Transport || self == &FlightCompression::All
|
||||
}
|
||||
|
||||
pub fn arrow_compression(&self) -> bool {
|
||||
self == &FlightCompression::ArrowIpc || self == &FlightCompression::All
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GrpcServer {
|
||||
// states
|
||||
shutdown_tx: Mutex<Option<Sender<()>>>,
|
||||
|
||||
@@ -45,7 +45,7 @@ use tonic::{Request, Response, Status, Streaming};
|
||||
use crate::error::{InvalidParameterSnafu, ParseJsonSnafu, Result, ToJsonSnafu};
|
||||
pub use crate::grpc::flight::stream::FlightRecordBatchStream;
|
||||
use crate::grpc::greptime_handler::{get_request_type, GreptimeRequestHandler};
|
||||
use crate::grpc::TonicResult;
|
||||
use crate::grpc::{FlightCompression, TonicResult};
|
||||
use crate::http::header::constants::GREPTIME_DB_HEADER_NAME;
|
||||
use crate::http::AUTHORIZATION_HEADER;
|
||||
use crate::{error, hint_headers};
|
||||
@@ -195,9 +195,14 @@ impl FlightCraft for GreptimeRequestHandler {
|
||||
protocol = "grpc",
|
||||
request_type = get_request_type(&request)
|
||||
);
|
||||
let flight_compression = self.flight_compression;
|
||||
async {
|
||||
let output = self.handle_request(request, hints).await?;
|
||||
let stream = to_flight_data_stream(output, TracingContext::from_current_span());
|
||||
let stream = to_flight_data_stream(
|
||||
output,
|
||||
TracingContext::from_current_span(),
|
||||
flight_compression,
|
||||
);
|
||||
Ok(Response::new(stream))
|
||||
}
|
||||
.trace(span)
|
||||
@@ -365,14 +370,16 @@ impl Stream for PutRecordBatchRequestStream {
|
||||
fn to_flight_data_stream(
|
||||
output: Output,
|
||||
tracing_context: TracingContext,
|
||||
flight_compression: FlightCompression,
|
||||
) -> TonicStream<FlightData> {
|
||||
match output.data {
|
||||
OutputData::Stream(stream) => {
|
||||
let stream = FlightRecordBatchStream::new(stream, tracing_context);
|
||||
let stream = FlightRecordBatchStream::new(stream, tracing_context, flight_compression);
|
||||
Box::pin(stream) as _
|
||||
}
|
||||
OutputData::RecordBatches(x) => {
|
||||
let stream = FlightRecordBatchStream::new(x.as_stream(), tracing_context);
|
||||
let stream =
|
||||
FlightRecordBatchStream::new(x.as_stream(), tracing_context, flight_compression);
|
||||
Box::pin(stream) as _
|
||||
}
|
||||
OutputData::AffectedRows(rows) => {
|
||||
|
||||
@@ -30,6 +30,7 @@ use tokio::task::JoinHandle;
|
||||
|
||||
use crate::error;
|
||||
use crate::grpc::flight::TonicResult;
|
||||
use crate::grpc::FlightCompression;
|
||||
|
||||
#[pin_project(PinnedDrop)]
|
||||
pub struct FlightRecordBatchStream {
|
||||
@@ -41,18 +42,27 @@ pub struct FlightRecordBatchStream {
|
||||
}
|
||||
|
||||
impl FlightRecordBatchStream {
|
||||
pub fn new(recordbatches: SendableRecordBatchStream, tracing_context: TracingContext) -> Self {
|
||||
pub fn new(
|
||||
recordbatches: SendableRecordBatchStream,
|
||||
tracing_context: TracingContext,
|
||||
compression: FlightCompression,
|
||||
) -> Self {
|
||||
let (tx, rx) = mpsc::channel::<TonicResult<FlightMessage>>(1);
|
||||
let join_handle = common_runtime::spawn_global(async move {
|
||||
Self::flight_data_stream(recordbatches, tx)
|
||||
.trace(tracing_context.attach(info_span!("flight_data_stream")))
|
||||
.await
|
||||
});
|
||||
let encoder = if compression.arrow_compression() {
|
||||
FlightEncoder::default()
|
||||
} else {
|
||||
FlightEncoder::with_compression_disabled()
|
||||
};
|
||||
Self {
|
||||
rx,
|
||||
join_handle,
|
||||
done: false,
|
||||
encoder: FlightEncoder::default(),
|
||||
encoder,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -161,7 +171,11 @@ mod test {
|
||||
let recordbatches = RecordBatches::try_new(schema.clone(), vec![recordbatch.clone()])
|
||||
.unwrap()
|
||||
.as_stream();
|
||||
let mut stream = FlightRecordBatchStream::new(recordbatches, TracingContext::default());
|
||||
let mut stream = FlightRecordBatchStream::new(
|
||||
recordbatches,
|
||||
TracingContext::default(),
|
||||
FlightCompression::default(),
|
||||
);
|
||||
|
||||
let mut raw_data = Vec::with_capacity(2);
|
||||
raw_data.push(stream.next().await.unwrap().unwrap());
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user