mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-05 12:52:57 +00:00
Compare commits
80 Commits
v0.11.0-ni
...
windows_pd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
334dbee590 | ||
|
|
f33b378e45 | ||
|
|
267941bbb5 | ||
|
|
074846bbc2 | ||
|
|
88d46a38ae | ||
|
|
de0beabf34 | ||
|
|
68dd2916fb | ||
|
|
d51b65a8bf | ||
|
|
2082c4b6e4 | ||
|
|
c623404fff | ||
|
|
fa3b7ed5ea | ||
|
|
8ece853076 | ||
|
|
4245bff8f2 | ||
|
|
3d4121aefb | ||
|
|
1910d71cb3 | ||
|
|
a578eea801 | ||
|
|
6bf574f098 | ||
|
|
a4d61bcaf1 | ||
|
|
7ea8a44d3a | ||
|
|
2d6f63a504 | ||
|
|
422d18da8b | ||
|
|
66f0581f5b | ||
|
|
c9ad8c7101 | ||
|
|
2107737db1 | ||
|
|
548e1988ab | ||
|
|
218236cc5b | ||
|
|
f04d380259 | ||
|
|
fa773cf480 | ||
|
|
9b4e8555e2 | ||
|
|
c6b7caa2ec | ||
|
|
58d6982c93 | ||
|
|
e662c241e6 | ||
|
|
266919c226 | ||
|
|
7d1bcc9d49 | ||
|
|
18e8c45384 | ||
|
|
c33cf59398 | ||
|
|
421088a868 | ||
|
|
d821dc5a3e | ||
|
|
bfc777e6ac | ||
|
|
8a5384697b | ||
|
|
d0245473a9 | ||
|
|
043d0bd7c2 | ||
|
|
acedff030b | ||
|
|
88f7075a2a | ||
|
|
54698325b6 | ||
|
|
5ffda7e971 | ||
|
|
f82af15eba | ||
|
|
9d7fea902e | ||
|
|
358d5e1d63 | ||
|
|
579059d99f | ||
|
|
53d55c0b6b | ||
|
|
bef6896280 | ||
|
|
4b4c6dbb66 | ||
|
|
e8e9526738 | ||
|
|
fee75a1fad | ||
|
|
b8a78b7838 | ||
|
|
2137c53274 | ||
|
|
03ad6e2a8d | ||
|
|
d53fbcb936 | ||
|
|
8c1959c580 | ||
|
|
e2a41ccaec | ||
|
|
a8012147ab | ||
|
|
60f8dbf7f0 | ||
|
|
9da2e17d0e | ||
|
|
1a8e77a480 | ||
|
|
e1e39993f7 | ||
|
|
a30d918df2 | ||
|
|
2c4ac76754 | ||
|
|
a6893aad42 | ||
|
|
d91517688a | ||
|
|
3d1b8c4fac | ||
|
|
7c69ca0502 | ||
|
|
03a28320d6 | ||
|
|
ce86ba3425 | ||
|
|
2fcb95f50a | ||
|
|
1b642ea6a9 | ||
|
|
b35221ccb6 | ||
|
|
bac7e7bac9 | ||
|
|
903da8f4cb | ||
|
|
c0f498b00c |
@@ -54,7 +54,7 @@ runs:
|
||||
PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }}
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: ./target/$PROFILE_TARGET/greptime
|
||||
target-files: ./target/$PROFILE_TARGET/greptime
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
@@ -72,6 +72,6 @@ runs:
|
||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: ./target/aarch64-linux-android/release/greptime
|
||||
target-files: ./target/aarch64-linux-android/release/greptime
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
@@ -90,5 +90,5 @@ runs:
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||
version: ${{ inputs.version }}
|
||||
|
||||
@@ -76,5 +76,5 @@ runs:
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime,target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime.pdb
|
||||
version: ${{ inputs.version }}
|
||||
|
||||
@@ -8,7 +8,7 @@ inputs:
|
||||
default: 2
|
||||
description: "Number of Datanode replicas"
|
||||
meta-replicas:
|
||||
default: 3
|
||||
default: 1
|
||||
description: "Number of Metasrv replicas"
|
||||
image-registry:
|
||||
default: "docker.io"
|
||||
@@ -58,7 +58,7 @@ runs:
|
||||
--set image.tag=${{ inputs.image-tag }} \
|
||||
--set base.podTemplate.main.resources.requests.cpu=50m \
|
||||
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
||||
--set base.podTemplate.main.resources.limits.cpu=1000m \
|
||||
--set base.podTemplate.main.resources.limits.cpu=2000m \
|
||||
--set base.podTemplate.main.resources.limits.memory=2Gi \
|
||||
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
||||
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
||||
|
||||
@@ -5,7 +5,7 @@ meta:
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
timeout = "120s"
|
||||
datanode:
|
||||
configData: |-
|
||||
[runtime]
|
||||
@@ -21,7 +21,7 @@ frontend:
|
||||
global_rt_size = 4
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
ddl_timeout = "120s"
|
||||
objectStorage:
|
||||
s3:
|
||||
bucket: default
|
||||
|
||||
@@ -5,7 +5,7 @@ meta:
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
timeout = "120s"
|
||||
datanode:
|
||||
configData: |-
|
||||
[runtime]
|
||||
@@ -17,7 +17,7 @@ frontend:
|
||||
global_rt_size = 4
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
ddl_timeout = "120s"
|
||||
objectStorage:
|
||||
s3:
|
||||
bucket: default
|
||||
|
||||
@@ -11,7 +11,7 @@ meta:
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
timeout = "120s"
|
||||
datanode:
|
||||
configData: |-
|
||||
[runtime]
|
||||
@@ -28,7 +28,7 @@ frontend:
|
||||
global_rt_size = 4
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
ddl_timeout = "120s"
|
||||
objectStorage:
|
||||
s3:
|
||||
bucket: default
|
||||
|
||||
@@ -18,6 +18,8 @@ runs:
|
||||
--set controller.replicaCount=${{ inputs.controller-replicas }} \
|
||||
--set controller.resources.requests.cpu=50m \
|
||||
--set controller.resources.requests.memory=128Mi \
|
||||
--set controller.resources.limits.cpu=2000m \
|
||||
--set controller.resources.limits.memory=2Gi \
|
||||
--set listeners.controller.protocol=PLAINTEXT \
|
||||
--set listeners.client.protocol=PLAINTEXT \
|
||||
--create-namespace \
|
||||
|
||||
14
.github/actions/upload-artifacts/action.yml
vendored
14
.github/actions/upload-artifacts/action.yml
vendored
@@ -4,8 +4,8 @@ inputs:
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: true
|
||||
target-file:
|
||||
description: The path of the target artifact
|
||||
target-files:
|
||||
description: The multiple target files to upload, separated by comma
|
||||
required: false
|
||||
version:
|
||||
description: Version of the artifact
|
||||
@@ -18,12 +18,16 @@ runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Create artifacts directory
|
||||
if: ${{ inputs.target-file != '' }}
|
||||
if: ${{ inputs.target-files != '' }}
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p ${{ inputs.artifacts-dir }} && \
|
||||
cp ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
|
||||
set -e
|
||||
mkdir -p ${{ inputs.artifacts-dir }}
|
||||
IFS=',' read -ra FILES <<< "${{ inputs.target-files }}"
|
||||
for file in "${FILES[@]}"; do
|
||||
cp "$file" ${{ inputs.artifacts-dir }}/
|
||||
done
|
||||
|
||||
# The compressed artifacts will use the following layout:
|
||||
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
||||
|
||||
1
.github/cargo-blacklist.txt
vendored
1
.github/cargo-blacklist.txt
vendored
@@ -1,2 +1,3 @@
|
||||
native-tls
|
||||
openssl
|
||||
aws-lc-sys
|
||||
|
||||
10
.github/pull_request_template.md
vendored
10
.github/pull_request_template.md
vendored
@@ -4,7 +4,8 @@ I hereby agree to the terms of the [GreptimeDB CLA](https://github.com/GreptimeT
|
||||
|
||||
## What's changed and what's your intention?
|
||||
|
||||
__!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__
|
||||
<!--
|
||||
__!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__
|
||||
|
||||
Please explain IN DETAIL what the changes are in this PR and why they are needed:
|
||||
|
||||
@@ -12,9 +13,14 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
|
||||
- How does this PR work? Need a brief introduction for the changed logic (optional)
|
||||
- Describe clearly one logical change and avoid lazy messages (optional)
|
||||
- Describe any limitations of the current code (optional)
|
||||
- Describe if this PR will break **API or data compatibility** (optional)
|
||||
-->
|
||||
|
||||
## Checklist
|
||||
## PR Checklist
|
||||
Please convert it to a draft if some of the following conditions are not met.
|
||||
|
||||
- [ ] I have written the necessary rustdoc comments.
|
||||
- [ ] I have added the necessary unit tests and integration tests.
|
||||
- [ ] This PR requires documentation updates.
|
||||
- [ ] API changes are backward compatible.
|
||||
- [ ] Schema or data changes are backward compatible.
|
||||
|
||||
11
.github/workflows/develop.yml
vendored
11
.github/workflows/develop.yml
vendored
@@ -269,13 +269,6 @@ jobs:
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin
|
||||
- name: Check aws-lc-sys will not build
|
||||
shell: bash
|
||||
run: |
|
||||
if cargo tree -i aws-lc-sys -e features | grep -q aws-lc-sys; then
|
||||
echo "Found aws-lc-sys, which has compilation problems on older gcc versions. Please replace it with ring until its building experience improves."
|
||||
exit 1
|
||||
fi
|
||||
- name: Build greptime bianry
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
@@ -330,8 +323,6 @@ jobs:
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluser
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
- name: Setup Postgres cluser
|
||||
uses: ./.github/actions/setup-postgres-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
@@ -481,8 +472,6 @@ jobs:
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluser
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
- name: Setup Postgres cluser
|
||||
uses: ./.github/actions/setup-postgres-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
|
||||
4
.github/workflows/nightly-build.yml
vendored
4
.github/workflows/nightly-build.yml
vendored
@@ -12,7 +12,7 @@ on:
|
||||
linux_amd64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.2xlarge-amd64
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
@@ -27,7 +27,7 @@ on:
|
||||
linux_arm64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.2xlarge-arm64
|
||||
default: ec2-c6g.4xlarge-arm64
|
||||
options:
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
|
||||
10
.github/workflows/nightly-ci.yml
vendored
10
.github/workflows/nightly-ci.yml
vendored
@@ -114,6 +114,16 @@ jobs:
|
||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
cleanbuild-linux-nix:
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: cachix/install-nix-action@v27
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- run: nix-shell --pure --run "cargo build"
|
||||
|
||||
check-status:
|
||||
name: Check status
|
||||
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
||||
|
||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -31,7 +31,7 @@ on:
|
||||
linux_arm64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.4xlarge-arm64
|
||||
default: ec2-c6g.8xlarge-arm64
|
||||
options:
|
||||
- ubuntu-2204-32-cores-arm
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
@@ -91,7 +91,7 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.11.0
|
||||
NEXT_RELEASE_VERSION: v0.12.0
|
||||
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -47,6 +47,10 @@ benchmarks/data
|
||||
|
||||
venv/
|
||||
|
||||
# Fuzz tests
|
||||
# Fuzz tests
|
||||
tests-fuzz/artifacts/
|
||||
tests-fuzz/corpus/
|
||||
|
||||
# Nix
|
||||
.direnv
|
||||
.envrc
|
||||
|
||||
878
Cargo.lock
generated
878
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -68,7 +68,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.11.0"
|
||||
version = "0.12.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -180,6 +180,7 @@ sysinfo = "0.30"
|
||||
# on branch v0.44.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
|
||||
"visitor",
|
||||
"serde",
|
||||
] }
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
tempfile = "3"
|
||||
@@ -237,6 +238,7 @@ file-engine = { path = "src/file-engine" }
|
||||
flow = { path = "src/flow" }
|
||||
frontend = { path = "src/frontend", default-features = false }
|
||||
index = { path = "src/index" }
|
||||
log-query = { path = "src/log-query" }
|
||||
log-store = { path = "src/log-store" }
|
||||
meta-client = { path = "src/meta-client" }
|
||||
meta-srv = { path = "src/meta-srv" }
|
||||
|
||||
18
README.md
18
README.md
@@ -70,23 +70,23 @@ Our core developers have been building time-series data platforms for years. Bas
|
||||
|
||||
* **Unified Processing of Metrics, Logs, and Events**
|
||||
|
||||
GreptimeDB unifies time series data processing by treating all data - whether metrics, logs, or events - as timestamped events with context. Users can analyze this data using either [SQL](https://docs.greptime.com/user-guide/query-data/sql) or [PromQL](https://docs.greptime.com/user-guide/query-data/promql) and leverage stream processing ([Flow](https://docs.greptime.com/user-guide/continuous-aggregation/overview)) to enable continuous aggregation. [Read more](https://docs.greptime.com/user-guide/concepts/data-model).
|
||||
GreptimeDB unifies time series data processing by treating all data - whether metrics, logs, or events - as timestamped events with context. Users can analyze this data using either [SQL](https://docs.greptime.com/user-guide/query-data/sql) or [PromQL](https://docs.greptime.com/user-guide/query-data/promql) and leverage stream processing ([Flow](https://docs.greptime.com/user-guide/flow-computation/overview)) to enable continuous aggregation. [Read more](https://docs.greptime.com/user-guide/concepts/data-model).
|
||||
|
||||
* **Cloud-native Distributed Database**
|
||||
|
||||
Built for [Kubernetes](https://docs.greptime.com/user-guide/deployments/deploy-on-kubernetes/greptimedb-operator-management). GreptimeDB achieves seamless scalability with its [cloud-native architecture](https://docs.greptime.com/user-guide/concepts/architecture) of separated compute and storage, built on object storage (AWS S3, Azure Blob Storage, etc.) while enabling cross-cloud deployment through a unified data access layer.
|
||||
Built for [Kubernetes](https://docs.greptime.com/user-guide/deployments/deploy-on-kubernetes/greptimedb-operator-management). GreptimeDB achieves seamless scalability with its [cloud-native architecture](https://docs.greptime.com/user-guide/concepts/architecture) of separated compute and storage, built on object storage (AWS S3, Azure Blob Storage, etc.) while enabling cross-cloud deployment through a unified data access layer.
|
||||
|
||||
* **Performance and Cost-effective**
|
||||
|
||||
Written in pure Rust for superior performance and reliability. GreptimeDB features a distributed query engine with intelligent indexing to handle high cardinality data efficiently. Its optimized columnar storage achieves 50x cost efficiency on cloud object storage through advanced compression. [Benchmark reports](https://www.greptime.com/blogs/2024-09-09-report-summary).
|
||||
Written in pure Rust for superior performance and reliability. GreptimeDB features a distributed query engine with intelligent indexing to handle high cardinality data efficiently. Its optimized columnar storage achieves 50x cost efficiency on cloud object storage through advanced compression. [Benchmark reports](https://www.greptime.com/blogs/2024-09-09-report-summary).
|
||||
|
||||
* **Cloud-Edge Collaboration**
|
||||
|
||||
GreptimeDB seamlessly operates across cloud and edge (ARM/Android/Linux), providing consistent APIs and control plane for unified data management and efficient synchronization. [Learn how to run on Android](https://docs.greptime.com/user-guide/deployments/run-on-android/).
|
||||
GreptimeDB seamlessly operates across cloud and edge (ARM/Android/Linux), providing consistent APIs and control plane for unified data management and efficient synchronization. [Learn how to run on Android](https://docs.greptime.com/user-guide/deployments/run-on-android/).
|
||||
|
||||
* **Multi-protocol Ingestion, SQL & PromQL Ready**
|
||||
|
||||
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, InfluxDB, OpenTelemetry, Loki and Prometheus, etc. Effortless Adoption & Seamless Migration. [Supported Protocols Overview](https://docs.greptime.com/user-guide/protocols/overview).
|
||||
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, InfluxDB, OpenTelemetry, Loki and Prometheus, etc. Effortless Adoption & Seamless Migration. [Supported Protocols Overview](https://docs.greptime.com/user-guide/protocols/overview).
|
||||
|
||||
For more detailed info please read [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb).
|
||||
|
||||
@@ -138,7 +138,7 @@ Check the prerequisite:
|
||||
|
||||
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
||||
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
||||
* Python toolchain (optional): Required only if built with PyO3 backend. More detail for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
|
||||
* Python toolchain (optional): Required only if built with PyO3 backend. More details for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
|
||||
|
||||
Build GreptimeDB binary:
|
||||
|
||||
@@ -154,6 +154,10 @@ cargo run -- standalone start
|
||||
|
||||
## Tools & Extensions
|
||||
|
||||
### Kubernetes
|
||||
|
||||
- [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
|
||||
|
||||
### Dashboard
|
||||
|
||||
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
||||
@@ -173,7 +177,7 @@ Our official Grafana dashboard for monitoring GreptimeDB is available at [grafan
|
||||
|
||||
## Project Status
|
||||
|
||||
GreptimeDB is currently in Beta. We are targeting GA (General Availability) with v1.0 release by Early 2025.
|
||||
GreptimeDB is currently in Beta. We are targeting GA (General Availability) with v1.0 release by Early 2025.
|
||||
|
||||
While in Beta, GreptimeDB is already:
|
||||
|
||||
|
||||
@@ -13,11 +13,11 @@
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
@@ -61,9 +61,9 @@
|
||||
| `wal` | -- | -- | The WAL options. |
|
||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_interval` | String | `1m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
@@ -93,7 +93,7 @@
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc. It is recommended to configure it when using object storage for better performance.<br/>The local file cache directory. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
@@ -131,12 +131,11 @@
|
||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `1GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/object_cache/write`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
@@ -151,6 +150,7 @@
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `8MiB` | Page size for inverted index content cache. |
|
||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
@@ -287,12 +287,12 @@
|
||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
|
||||
| `store_addrs` | Array | -- | Store server address default to etcd store. |
|
||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||
| `backend` | String | `EtcdStore` | The datastore for meta server. |
|
||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
|
||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||
| `backend` | String | `EtcdStore` | The datastore for meta server. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
@@ -357,7 +357,6 @@
|
||||
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
|
||||
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
||||
| `rpc_addr` | String | Unset | Deprecated, use `grpc.addr` instead. |
|
||||
@@ -365,6 +364,7 @@
|
||||
| `rpc_runtime_size` | Integer | Unset | Deprecated, use `grpc.runtime_size` instead. |
|
||||
| `rpc_max_recv_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
|
||||
| `rpc_max_send_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
@@ -399,9 +399,9 @@
|
||||
| `wal` | -- | -- | The WAL options. |
|
||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_interval` | String | `1m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
@@ -421,7 +421,7 @@
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc. It is recommended to configure it when using object storage for better performance.<br/>The local file cache directory. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
@@ -459,12 +459,11 @@
|
||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `1GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/object_cache/write`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
@@ -477,6 +476,9 @@
|
||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `8MiB` | Page size for inverted index content cache. |
|
||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
|
||||
@@ -13,9 +13,6 @@ require_lease_before_startup = false
|
||||
## By default, it provides services after all regions have been initialized.
|
||||
init_regions_in_background = false
|
||||
|
||||
## Enable telemetry to collect anonymous usage data.
|
||||
enable_telemetry = true
|
||||
|
||||
## Parallelism of initializing regions.
|
||||
init_regions_parallelism = 16
|
||||
|
||||
@@ -42,6 +39,8 @@ rpc_max_recv_message_size = "512MB"
|
||||
## @toml2docs:none-default
|
||||
rpc_max_send_message_size = "512MB"
|
||||
|
||||
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
||||
#+ enable_telemetry = true
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
@@ -143,15 +142,15 @@ dir = "/tmp/greptimedb/wal"
|
||||
|
||||
## The size of the WAL segment file.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
file_size = "256MB"
|
||||
file_size = "128MB"
|
||||
|
||||
## The threshold of the WAL size to trigger a flush.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_threshold = "4GB"
|
||||
purge_threshold = "1GB"
|
||||
|
||||
## The interval to trigger a flush.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_interval = "10m"
|
||||
purge_interval = "1m"
|
||||
|
||||
## The read batch size.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
@@ -294,14 +293,14 @@ data_home = "/tmp/greptimedb/"
|
||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||
type = "File"
|
||||
|
||||
## Cache configuration for object storage such as 'S3' etc. It is recommended to configure it when using object storage for better performance.
|
||||
## The local file cache directory.
|
||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||
## A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling.
|
||||
## @toml2docs:none-default
|
||||
cache_path = "/path/local_cache"
|
||||
#+ cache_path = ""
|
||||
|
||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
||||
## @toml2docs:none-default
|
||||
cache_capacity = "1GiB"
|
||||
cache_capacity = "5GiB"
|
||||
|
||||
## The S3 bucket name.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||
@@ -476,14 +475,14 @@ auto_flush_interval = "1h"
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the experimental write cache. It is recommended to enable it when using object storage for better performance.
|
||||
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}/write_cache`.
|
||||
## File system path for write cache, defaults to `{data_home}/object_cache/write`.
|
||||
experimental_write_cache_path = ""
|
||||
|
||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||
experimental_write_cache_size = "1GiB"
|
||||
experimental_write_cache_size = "5GiB"
|
||||
|
||||
## TTL for write cache.
|
||||
## @toml2docs:none-default
|
||||
@@ -492,12 +491,6 @@ experimental_write_cache_ttl = "8h"
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
## Parallelism to scan a region (default: 1/4 of cpu cores).
|
||||
## - `0`: using the default value (1/4 of cpu cores).
|
||||
## - `1`: scan in current thread.
|
||||
## - `n`: scan in parallelism n.
|
||||
scan_parallelism = 0
|
||||
|
||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||
parallel_scan_channel_size = 32
|
||||
|
||||
@@ -550,6 +543,15 @@ mem_threshold_on_create = "auto"
|
||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||
intermediate_path = ""
|
||||
|
||||
## Cache size for inverted index metadata.
|
||||
metadata_cache_size = "64MiB"
|
||||
|
||||
## Cache size for inverted index content.
|
||||
content_cache_size = "128MiB"
|
||||
|
||||
## Page size for inverted index content cache.
|
||||
content_cache_page_size = "8MiB"
|
||||
|
||||
## The options for full-text index in Mito engine.
|
||||
[region_engine.mito.fulltext_index]
|
||||
|
||||
|
||||
@@ -10,6 +10,12 @@ server_addr = "127.0.0.1:3002"
|
||||
## Store server address default to etcd store.
|
||||
store_addrs = ["127.0.0.1:2379"]
|
||||
|
||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||
store_key_prefix = ""
|
||||
|
||||
## The datastore for meta server.
|
||||
backend = "EtcdStore"
|
||||
|
||||
## Datanode selector type.
|
||||
## - `round_robin` (default value)
|
||||
## - `lease_based`
|
||||
@@ -20,20 +26,14 @@ selector = "round_robin"
|
||||
## Store data in memory.
|
||||
use_memory_store = false
|
||||
|
||||
## Whether to enable greptimedb telemetry.
|
||||
enable_telemetry = true
|
||||
|
||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||
store_key_prefix = ""
|
||||
|
||||
## Whether to enable region failover.
|
||||
## This feature is only available on GreptimeDB running on cluster mode and
|
||||
## - Using Remote WAL
|
||||
## - Using shared storage (e.g., s3).
|
||||
enable_region_failover = false
|
||||
|
||||
## The datastore for meta server.
|
||||
backend = "EtcdStore"
|
||||
## Whether to enable greptimedb telemetry. Enabled by default.
|
||||
#+ enable_telemetry = true
|
||||
|
||||
## The runtime options.
|
||||
#+ [runtime]
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||
mode = "standalone"
|
||||
|
||||
## Enable telemetry to collect anonymous usage data.
|
||||
enable_telemetry = true
|
||||
|
||||
## The default timezone of the server.
|
||||
## @toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
@@ -18,6 +15,9 @@ init_regions_parallelism = 16
|
||||
## The maximum current queries allowed to be executed. Zero means unlimited.
|
||||
max_concurrent_queries = 0
|
||||
|
||||
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
||||
#+ enable_telemetry = true
|
||||
|
||||
## The runtime options.
|
||||
#+ [runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
@@ -147,15 +147,15 @@ dir = "/tmp/greptimedb/wal"
|
||||
|
||||
## The size of the WAL segment file.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
file_size = "256MB"
|
||||
file_size = "128MB"
|
||||
|
||||
## The threshold of the WAL size to trigger a flush.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_threshold = "4GB"
|
||||
purge_threshold = "1GB"
|
||||
|
||||
## The interval to trigger a flush.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_interval = "10m"
|
||||
purge_interval = "1m"
|
||||
|
||||
## The read batch size.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
@@ -332,14 +332,14 @@ data_home = "/tmp/greptimedb/"
|
||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||
type = "File"
|
||||
|
||||
## Cache configuration for object storage such as 'S3' etc. It is recommended to configure it when using object storage for better performance.
|
||||
## The local file cache directory.
|
||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||
## A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling.
|
||||
## @toml2docs:none-default
|
||||
cache_path = "/path/local_cache"
|
||||
#+ cache_path = ""
|
||||
|
||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
||||
## @toml2docs:none-default
|
||||
cache_capacity = "1GiB"
|
||||
cache_capacity = "5GiB"
|
||||
|
||||
## The S3 bucket name.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||
@@ -514,14 +514,14 @@ auto_flush_interval = "1h"
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the experimental write cache. It is recommended to enable it when using object storage for better performance.
|
||||
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}/write_cache`.
|
||||
## File system path for write cache, defaults to `{data_home}/object_cache/write`.
|
||||
experimental_write_cache_path = ""
|
||||
|
||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||
experimental_write_cache_size = "1GiB"
|
||||
experimental_write_cache_size = "5GiB"
|
||||
|
||||
## TTL for write cache.
|
||||
## @toml2docs:none-default
|
||||
@@ -530,12 +530,6 @@ experimental_write_cache_ttl = "8h"
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
## Parallelism to scan a region (default: 1/4 of cpu cores).
|
||||
## - `0`: using the default value (1/4 of cpu cores).
|
||||
## - `1`: scan in current thread.
|
||||
## - `n`: scan in parallelism n.
|
||||
scan_parallelism = 0
|
||||
|
||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||
parallel_scan_channel_size = 32
|
||||
|
||||
@@ -594,6 +588,9 @@ metadata_cache_size = "64MiB"
|
||||
## Cache size for inverted index content.
|
||||
content_cache_size = "128MiB"
|
||||
|
||||
## Page size for inverted index content cache.
|
||||
content_cache_page_size = "8MiB"
|
||||
|
||||
## The options for full-text index in Mito engine.
|
||||
[region_engine.mito.fulltext_index]
|
||||
|
||||
|
||||
@@ -15,8 +15,8 @@ RUN apt-get update && \
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
libssl-dev \
|
||||
tzdata \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
unzip \
|
||||
ca-certificates \
|
||||
git \
|
||||
build-essential \
|
||||
@@ -24,6 +24,20 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
python3.10 \
|
||||
python3.10-dev
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
RUN echo "target platform: $TARGETPLATFORM"
|
||||
|
||||
# Install protobuf, because the one in the apt is too old (v3.12).
|
||||
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
||||
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v29.1/protoc-29.1-linux-aarch_64.zip && \
|
||||
unzip protoc-29.1-linux-aarch_64.zip -d protoc3; \
|
||||
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v29.1/protoc-29.1-linux-x86_64.zip && \
|
||||
unzip protoc-29.1-linux-x86_64.zip -d protoc3; \
|
||||
fi
|
||||
RUN mv protoc3/bin/* /usr/local/bin/
|
||||
RUN mv protoc3/include/* /usr/local/include/
|
||||
|
||||
# https://github.com/GreptimeTeam/greptimedb/actions/runs/10935485852/job/30357457188#step:3:7106
|
||||
# `aws-lc-sys` require gcc >= 10.3.0 to work, hence alias to use gcc-10
|
||||
RUN apt-get remove -y gcc-9 g++-9 cpp-9 && \
|
||||
@@ -49,7 +63,7 @@ RUN apt-get -y purge python3.8 && \
|
||||
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
|
||||
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
|
||||
# it can be a different user that have prepared the submodules.
|
||||
RUN git config --global --add safe.directory *
|
||||
RUN git config --global --add safe.directory '*'
|
||||
|
||||
# Install Python dependencies.
|
||||
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,2 +1,3 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2024-10-19"
|
||||
components = ["rust-analyzer"]
|
||||
|
||||
@@ -58,8 +58,10 @@ def main():
|
||||
if not check_snafu_in_files(branch_name, other_rust_files)
|
||||
]
|
||||
|
||||
for name in unused_snafu:
|
||||
print(name)
|
||||
if unused_snafu:
|
||||
print("Unused error variants:")
|
||||
for name in unused_snafu:
|
||||
print(name)
|
||||
|
||||
if unused_snafu:
|
||||
raise SystemExit(1)
|
||||
|
||||
27
shell.nix
Normal file
27
shell.nix
Normal file
@@ -0,0 +1,27 @@
|
||||
let
|
||||
nixpkgs = fetchTarball "https://github.com/NixOS/nixpkgs/tarball/nixos-unstable";
|
||||
fenix = import (fetchTarball "https://github.com/nix-community/fenix/archive/main.tar.gz") {};
|
||||
pkgs = import nixpkgs { config = {}; overlays = []; };
|
||||
in
|
||||
|
||||
pkgs.mkShell rec {
|
||||
nativeBuildInputs = with pkgs; [
|
||||
pkg-config
|
||||
git
|
||||
clang
|
||||
gcc
|
||||
protobuf
|
||||
mold
|
||||
(fenix.fromToolchainFile {
|
||||
dir = ./.;
|
||||
})
|
||||
cargo-nextest
|
||||
taplo
|
||||
];
|
||||
|
||||
buildInputs = with pkgs; [
|
||||
libgit2
|
||||
];
|
||||
|
||||
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
|
||||
}
|
||||
@@ -16,7 +16,7 @@ use std::collections::HashMap;
|
||||
|
||||
use datatypes::schema::{
|
||||
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextOptions, COMMENT_KEY,
|
||||
FULLTEXT_KEY, INVERTED_INDEX_KEY,
|
||||
FULLTEXT_KEY, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY,
|
||||
};
|
||||
use greptime_proto::v1::Analyzer;
|
||||
use snafu::ResultExt;
|
||||
@@ -29,6 +29,8 @@ use crate::v1::{ColumnDef, ColumnOptions, SemanticType};
|
||||
const FULLTEXT_GRPC_KEY: &str = "fulltext";
|
||||
/// Key used to store inverted index options in gRPC column options.
|
||||
const INVERTED_INDEX_GRPC_KEY: &str = "inverted_index";
|
||||
/// Key used to store skip index options in gRPC column options.
|
||||
const SKIPPING_INDEX_GRPC_KEY: &str = "skipping_index";
|
||||
|
||||
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
|
||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
@@ -60,6 +62,9 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
if let Some(inverted_index) = options.options.get(INVERTED_INDEX_GRPC_KEY) {
|
||||
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.clone());
|
||||
}
|
||||
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
|
||||
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.clone());
|
||||
}
|
||||
}
|
||||
|
||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||
@@ -84,6 +89,11 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
|
||||
.options
|
||||
.insert(INVERTED_INDEX_GRPC_KEY.to_string(), inverted_index.clone());
|
||||
}
|
||||
if let Some(skipping_index) = column_schema.metadata().get(SKIPPING_INDEX_KEY) {
|
||||
options
|
||||
.options
|
||||
.insert(SKIPPING_INDEX_GRPC_KEY.to_string(), skipping_index.clone());
|
||||
}
|
||||
|
||||
(!options.options.is_empty()).then_some(options)
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ pub enum PermissionReq<'a> {
|
||||
GrpcRequest(&'a Request),
|
||||
SqlStatement(&'a Statement),
|
||||
PromQuery,
|
||||
LogQuery,
|
||||
Opentsdb,
|
||||
LineProtocol,
|
||||
PromStoreWrite,
|
||||
|
||||
1
src/cache/Cargo.toml
vendored
1
src/cache/Cargo.toml
vendored
@@ -11,4 +11,3 @@ common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
moka.workspace = true
|
||||
snafu.workspace = true
|
||||
substrait.workspace = true
|
||||
|
||||
@@ -18,7 +18,6 @@ async-stream.workspace = true
|
||||
async-trait = "0.1"
|
||||
bytes.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-config.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
@@ -58,7 +57,5 @@ catalog = { workspace = true, features = ["testing"] }
|
||||
chrono.workspace = true
|
||||
common-meta = { workspace = true, features = ["testing"] }
|
||||
common-query = { workspace = true, features = ["testing"] }
|
||||
common-test-util.workspace = true
|
||||
log-store.workspace = true
|
||||
object-store.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -64,6 +64,13 @@ pub enum Error {
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list flow stats"))]
|
||||
ListFlowStats {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list flows in catalog {catalog}"))]
|
||||
ListFlows {
|
||||
#[snafu(implicit)]
|
||||
@@ -326,6 +333,7 @@ impl ErrorExt for Error {
|
||||
| Error::ListSchemas { source, .. }
|
||||
| Error::ListTables { source, .. }
|
||||
| Error::ListFlows { source, .. }
|
||||
| Error::ListFlowStats { source, .. }
|
||||
| Error::ListProcedures { source, .. }
|
||||
| Error::ListRegionStats { source, .. }
|
||||
| Error::ConvertProtoData { source, .. } => source.status_code(),
|
||||
|
||||
@@ -17,6 +17,7 @@ use common_error::ext::BoxedError;
|
||||
use common_meta::cluster::{ClusterInfo, NodeInfo};
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::ddl::{ExecutorContext, ProcedureExecutor};
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::rpc::procedure;
|
||||
use common_procedure::{ProcedureInfo, ProcedureState};
|
||||
use meta_client::MetaClientRef;
|
||||
@@ -89,4 +90,12 @@ impl InformationExtension for DistributedInformationExtension {
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::ListRegionStatsSnafu)
|
||||
}
|
||||
|
||||
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
|
||||
self.meta_client
|
||||
.list_flow_stats()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(crate::error::ListFlowStatsSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ pub fn new_table_cache(
|
||||
) -> TableCache {
|
||||
let init = init_factory(table_info_cache, table_name_cache);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||
}
|
||||
|
||||
fn init_factory(
|
||||
|
||||
@@ -35,6 +35,7 @@ use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_meta::cluster::NodeInfo;
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_procedure::ProcedureInfo;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
@@ -192,6 +193,7 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
)) as _),
|
||||
FLOWS => Some(Arc::new(InformationSchemaFlows::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.flow_metadata_manager.clone(),
|
||||
)) as _),
|
||||
PROCEDURE_INFO => Some(
|
||||
@@ -338,6 +340,9 @@ pub trait InformationExtension {
|
||||
|
||||
/// Gets the region statistics.
|
||||
async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error>;
|
||||
|
||||
/// Get the flow statistics. If no flownode is available, return `None`.
|
||||
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error>;
|
||||
}
|
||||
|
||||
pub struct NoopInformationExtension;
|
||||
@@ -357,4 +362,8 @@ impl InformationExtension for NoopInformationExtension {
|
||||
async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error> {
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,11 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_FLOW_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::flow::flow_info::FlowInfoValue;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::FlowId;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
@@ -28,7 +29,9 @@ use datatypes::prelude::ConcreteDataType as CDT;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{Int64VectorBuilder, StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use datatypes::vectors::{
|
||||
Int64VectorBuilder, StringVectorBuilder, UInt32VectorBuilder, UInt64VectorBuilder, VectorRef,
|
||||
};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
@@ -38,6 +41,8 @@ use crate::error::{
|
||||
};
|
||||
use crate::information_schema::{Predicates, FLOWS};
|
||||
use crate::system_schema::information_schema::InformationTable;
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
@@ -45,6 +50,7 @@ const INIT_CAPACITY: usize = 42;
|
||||
// pk is (flow_name, flow_id, table_catalog)
|
||||
pub const FLOW_NAME: &str = "flow_name";
|
||||
pub const FLOW_ID: &str = "flow_id";
|
||||
pub const STATE_SIZE: &str = "state_size";
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const FLOW_DEFINITION: &str = "flow_definition";
|
||||
pub const COMMENT: &str = "comment";
|
||||
@@ -55,20 +61,24 @@ pub const FLOWNODE_IDS: &str = "flownode_ids";
|
||||
pub const OPTIONS: &str = "options";
|
||||
|
||||
/// The `information_schema.flows` to provides information about flows in databases.
|
||||
///
|
||||
pub(super) struct InformationSchemaFlows {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaFlows {
|
||||
pub(super) fn new(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
flow_metadata_manager,
|
||||
}
|
||||
}
|
||||
@@ -80,6 +90,7 @@ impl InformationSchemaFlows {
|
||||
vec![
|
||||
(FLOW_NAME, CDT::string_datatype(), false),
|
||||
(FLOW_ID, CDT::uint32_datatype(), false),
|
||||
(STATE_SIZE, CDT::uint64_datatype(), true),
|
||||
(TABLE_CATALOG, CDT::string_datatype(), false),
|
||||
(FLOW_DEFINITION, CDT::string_datatype(), false),
|
||||
(COMMENT, CDT::string_datatype(), true),
|
||||
@@ -99,6 +110,7 @@ impl InformationSchemaFlows {
|
||||
InformationSchemaFlowsBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
&self.flow_metadata_manager,
|
||||
)
|
||||
}
|
||||
@@ -144,10 +156,12 @@ impl InformationTable for InformationSchemaFlows {
|
||||
struct InformationSchemaFlowsBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
|
||||
flow_names: StringVectorBuilder,
|
||||
flow_ids: UInt32VectorBuilder,
|
||||
state_sizes: UInt64VectorBuilder,
|
||||
table_catalogs: StringVectorBuilder,
|
||||
raw_sqls: StringVectorBuilder,
|
||||
comments: StringVectorBuilder,
|
||||
@@ -162,15 +176,18 @@ impl InformationSchemaFlowsBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
flow_metadata_manager: &Arc<FlowMetadataManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
flow_metadata_manager: flow_metadata_manager.clone(),
|
||||
|
||||
flow_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
flow_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
state_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_catalogs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
raw_sqls: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
comments: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
@@ -195,6 +212,11 @@ impl InformationSchemaFlowsBuilder {
|
||||
.flow_names(&catalog_name)
|
||||
.await;
|
||||
|
||||
let flow_stat = {
|
||||
let information_extension = utils::information_extension(&self.catalog_manager)?;
|
||||
information_extension.flow_stats().await?
|
||||
};
|
||||
|
||||
while let Some((flow_name, flow_id)) = stream
|
||||
.try_next()
|
||||
.await
|
||||
@@ -213,7 +235,7 @@ impl InformationSchemaFlowsBuilder {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
flow_name: flow_name.to_string(),
|
||||
})?;
|
||||
self.add_flow(&predicates, flow_id.flow_id(), flow_info)?;
|
||||
self.add_flow(&predicates, flow_id.flow_id(), flow_info, &flow_stat)?;
|
||||
}
|
||||
|
||||
self.finish()
|
||||
@@ -224,6 +246,7 @@ impl InformationSchemaFlowsBuilder {
|
||||
predicates: &Predicates,
|
||||
flow_id: FlowId,
|
||||
flow_info: FlowInfoValue,
|
||||
flow_stat: &Option<FlowStat>,
|
||||
) -> Result<()> {
|
||||
let row = [
|
||||
(FLOW_NAME, &Value::from(flow_info.flow_name().to_string())),
|
||||
@@ -238,6 +261,11 @@ impl InformationSchemaFlowsBuilder {
|
||||
}
|
||||
self.flow_names.push(Some(flow_info.flow_name()));
|
||||
self.flow_ids.push(Some(flow_id));
|
||||
self.state_sizes.push(
|
||||
flow_stat
|
||||
.as_ref()
|
||||
.and_then(|state| state.state_size.get(&flow_id).map(|v| *v as u64)),
|
||||
);
|
||||
self.table_catalogs.push(Some(flow_info.catalog_name()));
|
||||
self.raw_sqls.push(Some(flow_info.raw_sql()));
|
||||
self.comments.push(Some(flow_info.comment()));
|
||||
@@ -270,6 +298,7 @@ impl InformationSchemaFlowsBuilder {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.flow_names.finish()),
|
||||
Arc::new(self.flow_ids.finish()),
|
||||
Arc::new(self.state_sizes.finish()),
|
||||
Arc::new(self.table_catalogs.finish()),
|
||||
Arc::new(self.raw_sqls.finish()),
|
||||
Arc::new(self.comments.finish()),
|
||||
|
||||
@@ -54,6 +54,10 @@ const INIT_CAPACITY: usize = 42;
|
||||
pub(crate) const PRI_CONSTRAINT_NAME: &str = "PRIMARY";
|
||||
/// Time index constraint name
|
||||
pub(crate) const TIME_INDEX_CONSTRAINT_NAME: &str = "TIME INDEX";
|
||||
/// Inverted index constraint name
|
||||
pub(crate) const INVERTED_INDEX_CONSTRAINT_NAME: &str = "INVERTED INDEX";
|
||||
/// Fulltext index constraint name
|
||||
pub(crate) const FULLTEXT_INDEX_CONSTRAINT_NAME: &str = "FULLTEXT INDEX";
|
||||
|
||||
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
|
||||
pub(super) struct InformationSchemaKeyColumnUsage {
|
||||
@@ -216,14 +220,13 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let mut primary_constraints = vec![];
|
||||
|
||||
let table_info = table.table_info();
|
||||
let table_name = &table_info.name;
|
||||
let keys = &table_info.meta.primary_key_indices;
|
||||
let schema = table.schema();
|
||||
|
||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||
let mut constraints = vec![];
|
||||
if column.is_time_index() {
|
||||
self.add_key_column_usage(
|
||||
&predicates,
|
||||
@@ -236,30 +239,31 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
1, //always 1 for time index
|
||||
);
|
||||
}
|
||||
if keys.contains(&idx) {
|
||||
primary_constraints.push((
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
table_name.to_string(),
|
||||
column.name.clone(),
|
||||
));
|
||||
}
|
||||
// TODO(dimbtp): foreign key constraint not supported yet
|
||||
}
|
||||
if keys.contains(&idx) {
|
||||
constraints.push(PRI_CONSTRAINT_NAME);
|
||||
}
|
||||
if column.is_inverted_indexed() {
|
||||
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
|
||||
}
|
||||
|
||||
for (i, (catalog_name, schema_name, table_name, column_name)) in
|
||||
primary_constraints.into_iter().enumerate()
|
||||
{
|
||||
self.add_key_column_usage(
|
||||
&predicates,
|
||||
&schema_name,
|
||||
PRI_CONSTRAINT_NAME,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
&column_name,
|
||||
i as u32 + 1,
|
||||
);
|
||||
if column.has_fulltext_index_key() {
|
||||
constraints.push(FULLTEXT_INDEX_CONSTRAINT_NAME);
|
||||
}
|
||||
|
||||
if !constraints.is_empty() {
|
||||
let aggregated_constraints = constraints.join(", ");
|
||||
self.add_key_column_usage(
|
||||
&predicates,
|
||||
&schema_name,
|
||||
&aggregated_constraints,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
table_name,
|
||||
&column.name,
|
||||
idx as u32 + 1,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,6 @@ common-error.workspace = true
|
||||
common-grpc.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-options.workspace = true
|
||||
common-procedure.workspace = true
|
||||
common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
@@ -61,5 +60,4 @@ client = { workspace = true, features = ["testing"] }
|
||||
common-test-util.workspace = true
|
||||
common-version.workspace = true
|
||||
serde.workspace = true
|
||||
temp-env = "0.3"
|
||||
tempfile.workspace = true
|
||||
|
||||
@@ -34,7 +34,7 @@ use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_telemetry::debug;
|
||||
use either::Either;
|
||||
use meta_client::client::MetaClientBuilder;
|
||||
use meta_client::client::{ClusterKvBackend, MetaClientBuilder};
|
||||
use query::datafusion::DatafusionQueryEngine;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use query::query_engine::{DefaultSerializer, QueryEngineState};
|
||||
|
||||
@@ -42,8 +42,6 @@ tonic.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-grpc-expr.workspace = true
|
||||
datanode.workspace = true
|
||||
derive-new = "0.5"
|
||||
tracing = "0.1"
|
||||
|
||||
[dev-dependencies.substrait_proto]
|
||||
|
||||
@@ -59,10 +59,6 @@ impl Instance {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn datanode_mut(&mut self) -> &mut Datanode {
|
||||
&mut self.datanode
|
||||
}
|
||||
|
||||
pub fn datanode(&self) -> &Datanode {
|
||||
&self.datanode
|
||||
}
|
||||
|
||||
@@ -63,10 +63,6 @@ impl Instance {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
|
||||
&mut self.flownode
|
||||
}
|
||||
|
||||
pub fn flownode(&self) -> &FlownodeInstance {
|
||||
&self.flownode
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ use common_meta::ddl::flow_meta::{FlowMetadataAllocator, FlowMetadataAllocatorRe
|
||||
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
|
||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl, ProcedureExecutorRef};
|
||||
use common_meta::ddl_manager::DdlManager;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::key::flow::{FlowMetadataManager, FlowMetadataManagerRef};
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
@@ -70,7 +71,7 @@ use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::{broadcast, RwLock};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{
|
||||
@@ -507,7 +508,7 @@ impl StartCommand {
|
||||
procedure_manager.clone(),
|
||||
));
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
information_extension,
|
||||
information_extension.clone(),
|
||||
kv_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
Some(procedure_manager.clone()),
|
||||
@@ -532,6 +533,14 @@ impl StartCommand {
|
||||
.context(OtherSnafu)?,
|
||||
);
|
||||
|
||||
// set the ref to query for the local flow state
|
||||
{
|
||||
let flow_worker_manager = flownode.flow_worker_manager();
|
||||
information_extension
|
||||
.set_flow_worker_manager(flow_worker_manager.clone())
|
||||
.await;
|
||||
}
|
||||
|
||||
let node_manager = Arc::new(StandaloneDatanodeManager {
|
||||
region_server: datanode.region_server(),
|
||||
flow_server: flownode.flow_worker_manager(),
|
||||
@@ -669,6 +678,7 @@ pub struct StandaloneInformationExtension {
|
||||
region_server: RegionServer,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
start_time_ms: u64,
|
||||
flow_worker_manager: RwLock<Option<Arc<FlowWorkerManager>>>,
|
||||
}
|
||||
|
||||
impl StandaloneInformationExtension {
|
||||
@@ -677,8 +687,15 @@ impl StandaloneInformationExtension {
|
||||
region_server,
|
||||
procedure_manager,
|
||||
start_time_ms: common_time::util::current_time_millis() as u64,
|
||||
flow_worker_manager: RwLock::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the flow worker manager for the standalone instance.
|
||||
pub async fn set_flow_worker_manager(&self, flow_worker_manager: Arc<FlowWorkerManager>) {
|
||||
let mut guard = self.flow_worker_manager.write().await;
|
||||
*guard = Some(flow_worker_manager);
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -750,6 +767,18 @@ impl InformationExtension for StandaloneInformationExtension {
|
||||
.collect::<Vec<_>>();
|
||||
Ok(stats)
|
||||
}
|
||||
|
||||
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
|
||||
Ok(Some(
|
||||
self.flow_worker_manager
|
||||
.read()
|
||||
.await
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.gen_state_report()
|
||||
.await,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -69,7 +69,6 @@ fn test_load_datanode_example_config() {
|
||||
region_engine: vec![
|
||||
RegionEngineConfig::Mito(MitoConfig {
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
scan_parallelism: 0,
|
||||
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
..Default::default()
|
||||
}),
|
||||
@@ -205,7 +204,6 @@ fn test_load_standalone_example_config() {
|
||||
RegionEngineConfig::Mito(MitoConfig {
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
scan_parallelism: 0,
|
||||
..Default::default()
|
||||
}),
|
||||
RegionEngineConfig::File(EngineConfig {}),
|
||||
|
||||
@@ -17,6 +17,7 @@ common-macro.workspace = true
|
||||
futures.workspace = true
|
||||
paste = "1.0"
|
||||
pin-project.workspace = true
|
||||
rand.workspace = true
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -36,6 +36,11 @@ pub struct Metadata {
|
||||
/// `RangeReader` reads a range of bytes from a source.
|
||||
#[async_trait]
|
||||
pub trait RangeReader: Send + Unpin {
|
||||
/// Sets the file size hint for the reader.
|
||||
///
|
||||
/// It's used to optimize the reading process by reducing the number of remote requests.
|
||||
fn with_file_size_hint(&mut self, file_size_hint: u64);
|
||||
|
||||
/// Returns the metadata of the source.
|
||||
async fn metadata(&mut self) -> io::Result<Metadata>;
|
||||
|
||||
@@ -70,6 +75,10 @@ pub trait RangeReader: Send + Unpin {
|
||||
|
||||
#[async_trait]
|
||||
impl<R: ?Sized + RangeReader> RangeReader for &mut R {
|
||||
fn with_file_size_hint(&mut self, file_size_hint: u64) {
|
||||
(*self).with_file_size_hint(file_size_hint)
|
||||
}
|
||||
|
||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
||||
(*self).metadata().await
|
||||
}
|
||||
@@ -186,15 +195,17 @@ impl<R: RangeReader + 'static> AsyncRead for AsyncReadAdapter<R> {
|
||||
|
||||
#[async_trait]
|
||||
impl RangeReader for Vec<u8> {
|
||||
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
||||
Ok(Metadata {
|
||||
content_length: self.len() as u64,
|
||||
})
|
||||
}
|
||||
|
||||
async fn read(&mut self, mut range: Range<u64>) -> io::Result<Bytes> {
|
||||
range.end = range.end.min(self.len() as u64);
|
||||
|
||||
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
|
||||
let bytes = Bytes::copy_from_slice(&self[range.start as usize..range.end as usize]);
|
||||
Ok(bytes)
|
||||
}
|
||||
@@ -222,6 +233,10 @@ impl FileReader {
|
||||
|
||||
#[async_trait]
|
||||
impl RangeReader for FileReader {
|
||||
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
||||
Ok(Metadata {
|
||||
content_length: self.content_length,
|
||||
|
||||
@@ -19,7 +19,7 @@ pub const GIB: u64 = MIB * BINARY_DATA_MAGNITUDE;
|
||||
pub const TIB: u64 = GIB * BINARY_DATA_MAGNITUDE;
|
||||
pub const PIB: u64 = TIB * BINARY_DATA_MAGNITUDE;
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Ord, PartialOrd)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Default)]
|
||||
pub struct ReadableSize(pub u64);
|
||||
|
||||
impl ReadableSize {
|
||||
|
||||
@@ -8,10 +8,5 @@ license.workspace = true
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
chrono.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -48,5 +48,4 @@ url = "2.3"
|
||||
[dev-dependencies]
|
||||
common-telemetry.workspace = true
|
||||
common-test-util.workspace = true
|
||||
dotenv.workspace = true
|
||||
uuid.workspace = true
|
||||
|
||||
@@ -27,7 +27,7 @@ pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
|
||||
DefaultLoggingInterceptor,
|
||||
))
|
||||
.layer(object_store::layers::TracingLayer)
|
||||
.layer(object_store::layers::PrometheusMetricsLayer::new(true))
|
||||
.layer(object_store::layers::build_prometheus_metrics_layer(true))
|
||||
.finish();
|
||||
Ok(object_store)
|
||||
}
|
||||
|
||||
@@ -89,7 +89,7 @@ pub fn build_s3_backend(
|
||||
DefaultLoggingInterceptor,
|
||||
))
|
||||
.layer(object_store::layers::TracingLayer)
|
||||
.layer(object_store::layers::PrometheusMetricsLayer::new(true))
|
||||
.layer(object_store::layers::build_prometheus_metrics_layer(true))
|
||||
.finish())
|
||||
}
|
||||
|
||||
|
||||
@@ -5,12 +5,7 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-query.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
sql.workspace = true
|
||||
|
||||
@@ -51,6 +51,5 @@ wkt = { version = "0.11", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
approx = "0.5"
|
||||
ron = "0.7"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -26,3 +26,4 @@ pub mod function_registry;
|
||||
pub mod handlers;
|
||||
pub mod helper;
|
||||
pub mod state;
|
||||
pub mod utils;
|
||||
|
||||
@@ -204,20 +204,10 @@ impl PatternAst {
|
||||
fn convert_literal(column: &str, pattern: &str) -> Expr {
|
||||
logical_expr::col(column).like(logical_expr::lit(format!(
|
||||
"%{}%",
|
||||
Self::escape_pattern(pattern)
|
||||
crate::utils::escape_like_pattern(pattern)
|
||||
)))
|
||||
}
|
||||
|
||||
fn escape_pattern(pattern: &str) -> String {
|
||||
pattern
|
||||
.chars()
|
||||
.flat_map(|c| match c {
|
||||
'\\' | '%' | '_' => vec!['\\', c],
|
||||
_ => vec![c],
|
||||
})
|
||||
.collect::<String>()
|
||||
}
|
||||
|
||||
/// Transform this AST with preset rules to make it correct.
|
||||
fn transform_ast(self) -> Result<Self> {
|
||||
self.transform_up(Self::collapse_binary_branch_fn)
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
|
||||
mod convert;
|
||||
mod distance;
|
||||
pub(crate) mod impl_conv;
|
||||
mod scalar_add;
|
||||
mod scalar_mul;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -31,5 +34,9 @@ impl VectorFunction {
|
||||
registry.register(Arc::new(distance::CosDistanceFunction));
|
||||
registry.register(Arc::new(distance::DotProductFunction));
|
||||
registry.register(Arc::new(distance::L2SqDistanceFunction));
|
||||
|
||||
// scalar calculation
|
||||
registry.register(Arc::new(scalar_add::ScalarAddFunction));
|
||||
registry.register(Arc::new(scalar_mul::ScalarMulFunction));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,18 +18,17 @@ mod l2sq;
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::Signature;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::value::ValueRef;
|
||||
use datatypes::vectors::{Float32VectorBuilder, MutableVector, Vector, VectorRef};
|
||||
use datatypes::vectors::{Float32VectorBuilder, MutableVector, VectorRef};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const};
|
||||
|
||||
macro_rules! define_distance_function {
|
||||
($StructName:ident, $display_name:expr, $similarity_method:path) => {
|
||||
@@ -80,17 +79,17 @@ macro_rules! define_distance_function {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = parse_if_constant_string(arg0)?;
|
||||
let arg1_const = parse_if_constant_string(arg1)?;
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
let arg1_const = as_veclit_if_const(arg1)?;
|
||||
|
||||
for i in 0..size {
|
||||
let vec0 = match arg0_const.as_ref() {
|
||||
Some(a) => Some(Cow::Borrowed(a.as_slice())),
|
||||
None => as_vector(arg0.get_ref(i))?,
|
||||
Some(a) => Some(Cow::Borrowed(a.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
let vec1 = match arg1_const.as_ref() {
|
||||
Some(b) => Some(Cow::Borrowed(b.as_slice())),
|
||||
None => as_vector(arg1.get_ref(i))?,
|
||||
Some(b) => Some(Cow::Borrowed(b.as_ref())),
|
||||
None => as_veclit(arg1.get_ref(i))?,
|
||||
};
|
||||
|
||||
if let (Some(vec0), Some(vec1)) = (vec0, vec1) {
|
||||
@@ -129,98 +128,6 @@ define_distance_function!(CosDistanceFunction, "vec_cos_distance", cos::cos);
|
||||
define_distance_function!(L2SqDistanceFunction, "vec_l2sq_distance", l2sq::l2sq);
|
||||
define_distance_function!(DotProductFunction, "vec_dot_product", dot::dot);
|
||||
|
||||
/// Parse a vector value if the value is a constant string.
|
||||
fn parse_if_constant_string(arg: &Arc<dyn Vector>) -> Result<Option<Vec<f32>>> {
|
||||
if !arg.is_const() {
|
||||
return Ok(None);
|
||||
}
|
||||
if arg.data_type() != ConcreteDataType::string_datatype() {
|
||||
return Ok(None);
|
||||
}
|
||||
arg.get_ref(0)
|
||||
.as_string()
|
||||
.unwrap() // Safe: checked if it is a string
|
||||
.map(parse_f32_vector_from_string)
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Convert a value to a vector value.
|
||||
/// Supported data types are binary and string.
|
||||
fn as_vector(arg: ValueRef<'_>) -> Result<Option<Cow<'_, [f32]>>> {
|
||||
match arg.data_type() {
|
||||
ConcreteDataType::Binary(_) => arg
|
||||
.as_binary()
|
||||
.unwrap() // Safe: checked if it is a binary
|
||||
.map(binary_as_vector)
|
||||
.transpose(),
|
||||
ConcreteDataType::String(_) => arg
|
||||
.as_string()
|
||||
.unwrap() // Safe: checked if it is a string
|
||||
.map(|s| Ok(Cow::Owned(parse_f32_vector_from_string(s)?)))
|
||||
.transpose(),
|
||||
ConcreteDataType::Null(_) => Ok(None),
|
||||
_ => InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Unsupported data type: {:?}", arg.data_type()),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a u8 slice to a vector value.
|
||||
fn binary_as_vector(bytes: &[u8]) -> Result<Cow<'_, [f32]>> {
|
||||
if bytes.len() % std::mem::size_of::<f32>() != 0 {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Invalid binary length of vector: {}", bytes.len()),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
if cfg!(target_endian = "little") {
|
||||
Ok(unsafe {
|
||||
let vec = std::slice::from_raw_parts(
|
||||
bytes.as_ptr() as *const f32,
|
||||
bytes.len() / std::mem::size_of::<f32>(),
|
||||
);
|
||||
Cow::Borrowed(vec)
|
||||
})
|
||||
} else {
|
||||
let v = bytes
|
||||
.chunks_exact(std::mem::size_of::<f32>())
|
||||
.map(|chunk| f32::from_le_bytes(chunk.try_into().unwrap()))
|
||||
.collect::<Vec<f32>>();
|
||||
Ok(Cow::Owned(v))
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a string to a vector value.
|
||||
/// Valid inputs are strings like "[1.0, 2.0, 3.0]".
|
||||
fn parse_f32_vector_from_string(s: &str) -> Result<Vec<f32>> {
|
||||
let trimmed = s.trim();
|
||||
if !trimmed.starts_with('[') || !trimmed.ends_with(']') {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"Failed to parse {s} to Vector value: not properly enclosed in brackets"
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
let content = trimmed[1..trimmed.len() - 1].trim();
|
||||
if content.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
content
|
||||
.split(',')
|
||||
.map(|s| s.trim().parse::<f32>())
|
||||
.collect::<std::result::Result<_, _>>()
|
||||
.map_err(|e| {
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Failed to parse {s} to Vector value: {e}"),
|
||||
}
|
||||
.build()
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
@@ -456,27 +363,4 @@ mod tests {
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_vector_from_string() {
|
||||
let result = parse_f32_vector_from_string("[1.0, 2.0, 3.0]").unwrap();
|
||||
assert_eq!(result, vec![1.0, 2.0, 3.0]);
|
||||
|
||||
let result = parse_f32_vector_from_string("[]").unwrap();
|
||||
assert_eq!(result, Vec::<f32>::new());
|
||||
|
||||
let result = parse_f32_vector_from_string("[1.0, a, 3.0]");
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_binary_as_vector() {
|
||||
let bytes = [0, 0, 128, 63];
|
||||
let result = binary_as_vector(&bytes).unwrap();
|
||||
assert_eq!(result.as_ref(), &[1.0]);
|
||||
|
||||
let invalid_bytes = [0, 0, 128];
|
||||
let result = binary_as_vector(&invalid_bytes);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
|
||||
155
src/common/function/src/scalars/vector/impl_conv.rs
Normal file
155
src/common/function/src/scalars/vector/impl_conv.rs
Normal file
@@ -0,0 +1,155 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::ValueRef;
|
||||
use datatypes::vectors::Vector;
|
||||
|
||||
/// Convert a constant string or binary literal to a vector literal.
|
||||
pub fn as_veclit_if_const(arg: &Arc<dyn Vector>) -> Result<Option<Cow<'_, [f32]>>> {
|
||||
if !arg.is_const() {
|
||||
return Ok(None);
|
||||
}
|
||||
if arg.data_type() != ConcreteDataType::string_datatype()
|
||||
&& arg.data_type() != ConcreteDataType::binary_datatype()
|
||||
{
|
||||
return Ok(None);
|
||||
}
|
||||
as_veclit(arg.get_ref(0))
|
||||
}
|
||||
|
||||
/// Convert a string or binary literal to a vector literal.
|
||||
pub fn as_veclit(arg: ValueRef<'_>) -> Result<Option<Cow<'_, [f32]>>> {
|
||||
match arg.data_type() {
|
||||
ConcreteDataType::Binary(_) => arg
|
||||
.as_binary()
|
||||
.unwrap() // Safe: checked if it is a binary
|
||||
.map(binlit_as_veclit)
|
||||
.transpose(),
|
||||
ConcreteDataType::String(_) => arg
|
||||
.as_string()
|
||||
.unwrap() // Safe: checked if it is a string
|
||||
.map(|s| Ok(Cow::Owned(parse_veclit_from_strlit(s)?)))
|
||||
.transpose(),
|
||||
ConcreteDataType::Null(_) => Ok(None),
|
||||
_ => InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Unsupported data type: {:?}", arg.data_type()),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a u8 slice to a vector literal.
|
||||
pub fn binlit_as_veclit(bytes: &[u8]) -> Result<Cow<'_, [f32]>> {
|
||||
if bytes.len() % std::mem::size_of::<f32>() != 0 {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Invalid binary length of vector: {}", bytes.len()),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
if cfg!(target_endian = "little") {
|
||||
Ok(unsafe {
|
||||
let vec = std::slice::from_raw_parts(
|
||||
bytes.as_ptr() as *const f32,
|
||||
bytes.len() / std::mem::size_of::<f32>(),
|
||||
);
|
||||
Cow::Borrowed(vec)
|
||||
})
|
||||
} else {
|
||||
let v = bytes
|
||||
.chunks_exact(std::mem::size_of::<f32>())
|
||||
.map(|chunk| f32::from_le_bytes(chunk.try_into().unwrap()))
|
||||
.collect::<Vec<f32>>();
|
||||
Ok(Cow::Owned(v))
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a string literal to a vector literal.
|
||||
/// Valid inputs are strings like "[1.0, 2.0, 3.0]".
|
||||
pub fn parse_veclit_from_strlit(s: &str) -> Result<Vec<f32>> {
|
||||
let trimmed = s.trim();
|
||||
if !trimmed.starts_with('[') || !trimmed.ends_with(']') {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"Failed to parse {s} to Vector value: not properly enclosed in brackets"
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
let content = trimmed[1..trimmed.len() - 1].trim();
|
||||
if content.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
content
|
||||
.split(',')
|
||||
.map(|s| s.trim().parse::<f32>())
|
||||
.collect::<std::result::Result<_, _>>()
|
||||
.map_err(|e| {
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Failed to parse {s} to Vector value: {e}"),
|
||||
}
|
||||
.build()
|
||||
})
|
||||
}
|
||||
|
||||
/// Convert a vector literal to a binary literal.
|
||||
pub fn veclit_to_binlit(vec: &[f32]) -> Vec<u8> {
|
||||
if cfg!(target_endian = "little") {
|
||||
unsafe {
|
||||
std::slice::from_raw_parts(vec.as_ptr() as *const u8, std::mem::size_of_val(vec))
|
||||
.to_vec()
|
||||
}
|
||||
} else {
|
||||
let mut bytes = Vec::with_capacity(std::mem::size_of_val(vec));
|
||||
for e in vec {
|
||||
bytes.extend_from_slice(&e.to_le_bytes());
|
||||
}
|
||||
bytes
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_veclit_from_strlit() {
|
||||
let result = parse_veclit_from_strlit("[1.0, 2.0, 3.0]").unwrap();
|
||||
assert_eq!(result, vec![1.0, 2.0, 3.0]);
|
||||
|
||||
let result = parse_veclit_from_strlit("[]").unwrap();
|
||||
assert_eq!(result, Vec::<f32>::new());
|
||||
|
||||
let result = parse_veclit_from_strlit("[1.0, a, 3.0]");
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_binlit_as_veclit() {
|
||||
let vec = &[1.0, 2.0, 3.0];
|
||||
let bytes = veclit_to_binlit(vec);
|
||||
let result = binlit_as_veclit(&bytes).unwrap();
|
||||
assert_eq!(result.as_ref(), vec);
|
||||
|
||||
let invalid_bytes = [0, 0, 128];
|
||||
let result = binlit_as_veclit(&invalid_bytes);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
173
src/common/function/src/scalars/vector/scalar_add.rs
Normal file
173
src/common/function/src/scalars/vector/scalar_add.rs
Normal file
@@ -0,0 +1,173 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::Signature;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
|
||||
use nalgebra::DVectorView;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
const NAME: &str = "vec_scalar_add";
|
||||
|
||||
/// Adds a scalar to each element of a vector.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT vec_to_string(vec_scalar_add(1, "[1, 2, 3]")) as result;
|
||||
///
|
||||
/// +---------+
|
||||
/// | result |
|
||||
/// +---------+
|
||||
/// | [2,3,4] |
|
||||
/// +---------+
|
||||
///
|
||||
/// -- Negative scalar to simulate subtraction
|
||||
/// SELECT vec_to_string(vec_scalar_add(-1, "[1, 2, 3]")) as result;
|
||||
///
|
||||
/// +---------+
|
||||
/// | result |
|
||||
/// +---------+
|
||||
/// | [0,1,2] |
|
||||
/// +---------+
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ScalarAddFunction;
|
||||
|
||||
impl Function for ScalarAddFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
helper::one_of_sigs2(
|
||||
vec![ConcreteDataType::float64_datatype()],
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
let arg0 = &columns[0];
|
||||
let arg1 = &columns[1];
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = BinaryVectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg1_const = as_veclit_if_const(arg1)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = arg0.get(i).as_f64_lossy();
|
||||
let Some(arg0) = arg0 else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
|
||||
let arg1 = match arg1_const.as_ref() {
|
||||
Some(arg1) => Some(Cow::Borrowed(arg1.as_ref())),
|
||||
None => as_veclit(arg1.get_ref(i))?,
|
||||
};
|
||||
let Some(arg1) = arg1 else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
|
||||
let vec = DVectorView::from_slice(&arg1, arg1.len());
|
||||
let vec_res = vec.add_scalar(arg0 as _);
|
||||
|
||||
let veclit = vec_res.as_slice();
|
||||
let binlit = veclit_to_binlit(veclit);
|
||||
result.push(Some(&binlit));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ScalarAddFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::vectors::{Float32Vector, StringVector};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_scalar_add() {
|
||||
let func = ScalarAddFunction;
|
||||
|
||||
let input0 = Arc::new(Float32Vector::from(vec![
|
||||
Some(1.0),
|
||||
Some(-1.0),
|
||||
None,
|
||||
Some(3.0),
|
||||
]));
|
||||
let input1 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
Some("[7.0,8.0,9.0]".to_string()),
|
||||
None,
|
||||
]));
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[input0, input1])
|
||||
.unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 4);
|
||||
assert_eq!(
|
||||
result.get_ref(0).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[2.0, 3.0, 4.0]).as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get_ref(1).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[3.0, 4.0, 5.0]).as_slice())
|
||||
);
|
||||
assert!(result.get_ref(2).is_null());
|
||||
assert!(result.get_ref(3).is_null());
|
||||
}
|
||||
}
|
||||
173
src/common/function/src/scalars/vector/scalar_mul.rs
Normal file
173
src/common/function/src/scalars/vector/scalar_mul.rs
Normal file
@@ -0,0 +1,173 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::Signature;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
|
||||
use nalgebra::DVectorView;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
const NAME: &str = "vec_scalar_mul";
|
||||
|
||||
/// Multiples a scalar to each element of a vector.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT vec_to_string(vec_scalar_mul(2, "[1, 2, 3]")) as result;
|
||||
///
|
||||
/// +---------+
|
||||
/// | result |
|
||||
/// +---------+
|
||||
/// | [2,4,6] |
|
||||
/// +---------+
|
||||
///
|
||||
/// -- 1/scalar to simulate division
|
||||
/// SELECT vec_to_string(vec_scalar_mul(0.5, "[2, 4, 6]")) as result;
|
||||
///
|
||||
/// +---------+
|
||||
/// | result |
|
||||
/// +---------+
|
||||
/// | [1,2,3] |
|
||||
/// +---------+
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ScalarMulFunction;
|
||||
|
||||
impl Function for ScalarMulFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
helper::one_of_sigs2(
|
||||
vec![ConcreteDataType::float64_datatype()],
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
let arg0 = &columns[0];
|
||||
let arg1 = &columns[1];
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = BinaryVectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg1_const = as_veclit_if_const(arg1)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = arg0.get(i).as_f64_lossy();
|
||||
let Some(arg0) = arg0 else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
|
||||
let arg1 = match arg1_const.as_ref() {
|
||||
Some(arg1) => Some(Cow::Borrowed(arg1.as_ref())),
|
||||
None => as_veclit(arg1.get_ref(i))?,
|
||||
};
|
||||
let Some(arg1) = arg1 else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
|
||||
let vec = DVectorView::from_slice(&arg1, arg1.len());
|
||||
let vec_res = vec.scale(arg0 as _);
|
||||
|
||||
let veclit = vec_res.as_slice();
|
||||
let binlit = veclit_to_binlit(veclit);
|
||||
result.push(Some(&binlit));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ScalarMulFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::vectors::{Float32Vector, StringVector};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_scalar_mul() {
|
||||
let func = ScalarMulFunction;
|
||||
|
||||
let input0 = Arc::new(Float32Vector::from(vec![
|
||||
Some(2.0),
|
||||
Some(-0.5),
|
||||
None,
|
||||
Some(3.0),
|
||||
]));
|
||||
let input1 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[8.0,10.0,12.0]".to_string()),
|
||||
Some("[7.0,8.0,9.0]".to_string()),
|
||||
None,
|
||||
]));
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[input0, input1])
|
||||
.unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 4);
|
||||
assert_eq!(
|
||||
result.get_ref(0).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[2.0, 4.0, 6.0]).as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get_ref(1).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[-4.0, -5.0, -6.0]).as_slice())
|
||||
);
|
||||
assert!(result.get_ref(2).is_null());
|
||||
assert!(result.get_ref(3).is_null());
|
||||
}
|
||||
}
|
||||
58
src/common/function/src/utils.rs
Normal file
58
src/common/function/src/utils.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// Escapes special characters in the provided pattern string for `LIKE`.
|
||||
///
|
||||
/// Specifically, it prefixes the backslash (`\`), percent (`%`), and underscore (`_`)
|
||||
/// characters with an additional backslash to ensure they are treated literally.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// let escaped = escape_pattern("100%_some\\path");
|
||||
/// assert_eq!(escaped, "100\\%\\_some\\\\path");
|
||||
/// ```
|
||||
pub fn escape_like_pattern(pattern: &str) -> String {
|
||||
pattern
|
||||
.chars()
|
||||
.flat_map(|c| match c {
|
||||
'\\' | '%' | '_' => vec!['\\', c],
|
||||
_ => vec![c],
|
||||
})
|
||||
.collect::<String>()
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_escape_like_pattern() {
|
||||
assert_eq!(
|
||||
escape_like_pattern("100%_some\\path"),
|
||||
"100\\%\\_some\\\\path"
|
||||
);
|
||||
assert_eq!(escape_like_pattern(""), "");
|
||||
assert_eq!(escape_like_pattern("hello"), "hello");
|
||||
assert_eq!(escape_like_pattern("\\%_"), "\\\\\\%\\_");
|
||||
assert_eq!(escape_like_pattern("%%__\\\\"), "\\%\\%\\_\\_\\\\\\\\");
|
||||
assert_eq!(escape_like_pattern("abc123"), "abc123");
|
||||
assert_eq!(escape_like_pattern("%_\\"), "\\%\\_\\\\");
|
||||
assert_eq!(
|
||||
escape_like_pattern("%%__\\\\another%string"),
|
||||
"\\%\\%\\_\\_\\\\\\\\another\\%string"
|
||||
);
|
||||
assert_eq!(escape_like_pattern("foo%bar_"), "foo\\%bar\\_");
|
||||
assert_eq!(escape_like_pattern("\\_\\%"), "\\\\\\_\\\\\\%");
|
||||
}
|
||||
}
|
||||
44
src/common/meta/src/cache/container.rs
vendored
44
src/common/meta/src/cache/container.rs
vendored
@@ -43,7 +43,7 @@ pub struct CacheContainer<K, V, CacheToken> {
|
||||
cache: Cache<K, V>,
|
||||
invalidator: Invalidator<K, V, CacheToken>,
|
||||
initializer: Initializer<K, V>,
|
||||
token_filter: TokenFilter<CacheToken>,
|
||||
token_filter: fn(&CacheToken) -> bool,
|
||||
}
|
||||
|
||||
impl<K, V, CacheToken> CacheContainer<K, V, CacheToken>
|
||||
@@ -58,7 +58,7 @@ where
|
||||
cache: Cache<K, V>,
|
||||
invalidator: Invalidator<K, V, CacheToken>,
|
||||
initializer: Initializer<K, V>,
|
||||
token_filter: TokenFilter<CacheToken>,
|
||||
token_filter: fn(&CacheToken) -> bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
name,
|
||||
@@ -206,10 +206,13 @@ mod tests {
|
||||
name: &'a str,
|
||||
}
|
||||
|
||||
fn always_true_filter(_: &String) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get() {
|
||||
let cache: Cache<NameKey, String> = CacheBuilder::new(128).build();
|
||||
let filter: TokenFilter<String> = Box::new(|_| true);
|
||||
let counter = Arc::new(AtomicI32::new(0));
|
||||
let moved_counter = counter.clone();
|
||||
let init: Initializer<NameKey, String> = Arc::new(move |_| {
|
||||
@@ -219,7 +222,13 @@ mod tests {
|
||||
let invalidator: Invalidator<NameKey, String, String> =
|
||||
Box::new(|_, _| Box::pin(async { Ok(()) }));
|
||||
|
||||
let adv_cache = CacheContainer::new("test".to_string(), cache, invalidator, init, filter);
|
||||
let adv_cache = CacheContainer::new(
|
||||
"test".to_string(),
|
||||
cache,
|
||||
invalidator,
|
||||
init,
|
||||
always_true_filter,
|
||||
);
|
||||
let key = NameKey { name: "key" };
|
||||
let value = adv_cache.get(key).await.unwrap().unwrap();
|
||||
assert_eq!(value, "hi");
|
||||
@@ -233,7 +242,6 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_get_by_ref() {
|
||||
let cache: Cache<String, String> = CacheBuilder::new(128).build();
|
||||
let filter: TokenFilter<String> = Box::new(|_| true);
|
||||
let counter = Arc::new(AtomicI32::new(0));
|
||||
let moved_counter = counter.clone();
|
||||
let init: Initializer<String, String> = Arc::new(move |_| {
|
||||
@@ -243,7 +251,13 @@ mod tests {
|
||||
let invalidator: Invalidator<String, String, String> =
|
||||
Box::new(|_, _| Box::pin(async { Ok(()) }));
|
||||
|
||||
let adv_cache = CacheContainer::new("test".to_string(), cache, invalidator, init, filter);
|
||||
let adv_cache = CacheContainer::new(
|
||||
"test".to_string(),
|
||||
cache,
|
||||
invalidator,
|
||||
init,
|
||||
always_true_filter,
|
||||
);
|
||||
let value = adv_cache.get_by_ref("foo").await.unwrap().unwrap();
|
||||
assert_eq!(value, "hi");
|
||||
let value = adv_cache.get_by_ref("foo").await.unwrap().unwrap();
|
||||
@@ -257,13 +271,18 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_get_value_not_exits() {
|
||||
let cache: Cache<String, String> = CacheBuilder::new(128).build();
|
||||
let filter: TokenFilter<String> = Box::new(|_| true);
|
||||
let init: Initializer<String, String> =
|
||||
Arc::new(move |_| Box::pin(async { error::ValueNotExistSnafu {}.fail() }));
|
||||
let invalidator: Invalidator<String, String, String> =
|
||||
Box::new(|_, _| Box::pin(async { Ok(()) }));
|
||||
|
||||
let adv_cache = CacheContainer::new("test".to_string(), cache, invalidator, init, filter);
|
||||
let adv_cache = CacheContainer::new(
|
||||
"test".to_string(),
|
||||
cache,
|
||||
invalidator,
|
||||
init,
|
||||
always_true_filter,
|
||||
);
|
||||
let value = adv_cache.get_by_ref("foo").await.unwrap();
|
||||
assert!(value.is_none());
|
||||
}
|
||||
@@ -271,7 +290,6 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_invalidate() {
|
||||
let cache: Cache<String, String> = CacheBuilder::new(128).build();
|
||||
let filter: TokenFilter<String> = Box::new(|_| true);
|
||||
let counter = Arc::new(AtomicI32::new(0));
|
||||
let moved_counter = counter.clone();
|
||||
let init: Initializer<String, String> = Arc::new(move |_| {
|
||||
@@ -285,7 +303,13 @@ mod tests {
|
||||
})
|
||||
});
|
||||
|
||||
let adv_cache = CacheContainer::new("test".to_string(), cache, invalidator, init, filter);
|
||||
let adv_cache = CacheContainer::new(
|
||||
"test".to_string(),
|
||||
cache,
|
||||
invalidator,
|
||||
init,
|
||||
always_true_filter,
|
||||
);
|
||||
let value = adv_cache.get_by_ref("foo").await.unwrap().unwrap();
|
||||
assert_eq!(value, "hi");
|
||||
let value = adv_cache.get_by_ref("foo").await.unwrap().unwrap();
|
||||
|
||||
@@ -45,7 +45,7 @@ pub fn new_table_flownode_set_cache(
|
||||
let table_flow_manager = Arc::new(TableFlowManager::new(kv_backend));
|
||||
let init = init_factory(table_flow_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||
}
|
||||
|
||||
fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId, FlownodeSet> {
|
||||
|
||||
22
src/common/meta/src/cache/registry.rs
vendored
22
src/common/meta/src/cache/registry.rs
vendored
@@ -151,12 +151,15 @@ mod tests {
|
||||
use crate::cache::*;
|
||||
use crate::instruction::CacheIdent;
|
||||
|
||||
fn always_true_filter(_: &CacheIdent) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn test_cache(
|
||||
name: &str,
|
||||
invalidator: Invalidator<String, String, CacheIdent>,
|
||||
) -> CacheContainer<String, String, CacheIdent> {
|
||||
let cache: Cache<String, String> = CacheBuilder::new(128).build();
|
||||
let filter: TokenFilter<CacheIdent> = Box::new(|_| true);
|
||||
let counter = Arc::new(AtomicI32::new(0));
|
||||
let moved_counter = counter.clone();
|
||||
let init: Initializer<String, String> = Arc::new(move |_| {
|
||||
@@ -164,7 +167,13 @@ mod tests {
|
||||
Box::pin(async { Ok(Some("hi".to_string())) })
|
||||
});
|
||||
|
||||
CacheContainer::new(name.to_string(), cache, invalidator, init, filter)
|
||||
CacheContainer::new(
|
||||
name.to_string(),
|
||||
cache,
|
||||
invalidator,
|
||||
init,
|
||||
always_true_filter,
|
||||
)
|
||||
}
|
||||
|
||||
fn test_i32_cache(
|
||||
@@ -172,7 +181,6 @@ mod tests {
|
||||
invalidator: Invalidator<i32, String, CacheIdent>,
|
||||
) -> CacheContainer<i32, String, CacheIdent> {
|
||||
let cache: Cache<i32, String> = CacheBuilder::new(128).build();
|
||||
let filter: TokenFilter<CacheIdent> = Box::new(|_| true);
|
||||
let counter = Arc::new(AtomicI32::new(0));
|
||||
let moved_counter = counter.clone();
|
||||
let init: Initializer<i32, String> = Arc::new(move |_| {
|
||||
@@ -180,7 +188,13 @@ mod tests {
|
||||
Box::pin(async { Ok(Some("foo".to_string())) })
|
||||
});
|
||||
|
||||
CacheContainer::new(name.to_string(), cache, invalidator, init, filter)
|
||||
CacheContainer::new(
|
||||
name.to_string(),
|
||||
cache,
|
||||
invalidator,
|
||||
init,
|
||||
always_true_filter,
|
||||
)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
2
src/common/meta/src/cache/table/schema.rs
vendored
2
src/common/meta/src/cache/table/schema.rs
vendored
@@ -36,7 +36,7 @@ pub fn new_schema_cache(
|
||||
let schema_manager = SchemaManager::new(kv_backend.clone());
|
||||
let init = init_factory(schema_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||
}
|
||||
|
||||
fn init_factory(schema_manager: SchemaManager) -> Initializer<SchemaName, Arc<SchemaNameValue>> {
|
||||
|
||||
@@ -41,7 +41,7 @@ pub fn new_table_info_cache(
|
||||
let table_info_manager = Arc::new(TableInfoManager::new(kv_backend));
|
||||
let init = init_factory(table_info_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||
}
|
||||
|
||||
fn init_factory(table_info_manager: TableInfoManagerRef) -> Initializer<TableId, Arc<TableInfo>> {
|
||||
|
||||
@@ -41,7 +41,7 @@ pub fn new_table_name_cache(
|
||||
let table_name_manager = Arc::new(TableNameManager::new(kv_backend));
|
||||
let init = init_factory(table_name_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||
}
|
||||
|
||||
fn init_factory(table_name_manager: TableNameManagerRef) -> Initializer<TableName, TableId> {
|
||||
|
||||
10
src/common/meta/src/cache/table/table_route.rs
vendored
10
src/common/meta/src/cache/table/table_route.rs
vendored
@@ -49,14 +49,6 @@ impl TableRoute {
|
||||
TableRoute::Logical(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns [LogicalTableRouteValue] reference if it's [TableRoute::Logical]; Otherwise it returns [None].
|
||||
pub fn as_logical_table_route_ref(&self) -> Option<&Arc<LogicalTableRouteValue>> {
|
||||
match self {
|
||||
TableRoute::Physical(_) => None,
|
||||
TableRoute::Logical(table_route) => Some(table_route),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// [TableRouteCache] caches the [TableId] to [TableRoute] mapping.
|
||||
@@ -73,7 +65,7 @@ pub fn new_table_route_cache(
|
||||
let table_info_manager = Arc::new(TableRouteManager::new(kv_backend));
|
||||
let init = init_factory(table_info_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||
}
|
||||
|
||||
fn init_factory(
|
||||
|
||||
@@ -40,7 +40,7 @@ pub fn new_table_schema_cache(
|
||||
let table_info_manager = TableInfoManager::new(kv_backend);
|
||||
let init = init_factory(table_info_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||
}
|
||||
|
||||
fn init_factory(table_info_manager: TableInfoManager) -> Initializer<TableId, Arc<SchemaName>> {
|
||||
|
||||
2
src/common/meta/src/cache/table/view_info.rs
vendored
2
src/common/meta/src/cache/table/view_info.rs
vendored
@@ -40,7 +40,7 @@ pub fn new_view_info_cache(
|
||||
let view_info_manager = Arc::new(ViewInfoManager::new(kv_backend));
|
||||
let init = init_factory(view_info_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||
}
|
||||
|
||||
fn init_factory(view_info_manager: ViewInfoManagerRef) -> Initializer<TableId, Arc<ViewInfoValue>> {
|
||||
|
||||
@@ -137,6 +137,7 @@ use self::schema_name::{SchemaManager, SchemaNameKey, SchemaNameValue};
|
||||
use self::table_route::{TableRouteManager, TableRouteValue};
|
||||
use self::tombstone::TombstoneManager;
|
||||
use crate::error::{self, Result, SerdeJsonSnafu};
|
||||
use crate::key::flow::flow_state::FlowStateValue;
|
||||
use crate::key::node_address::NodeAddressValue;
|
||||
use crate::key::table_route::TableRouteKey;
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
@@ -1262,7 +1263,8 @@ impl_metadata_value! {
|
||||
FlowRouteValue,
|
||||
TableFlowValue,
|
||||
NodeAddressValue,
|
||||
SchemaNameValue
|
||||
SchemaNameValue,
|
||||
FlowStateValue
|
||||
}
|
||||
|
||||
impl_optional_metadata_value! {
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use futures::stream::BoxStream;
|
||||
@@ -146,7 +145,7 @@ impl CatalogManager {
|
||||
self.kv_backend.clone(),
|
||||
req,
|
||||
DEFAULT_PAGE_SIZE,
|
||||
Arc::new(catalog_decoder),
|
||||
catalog_decoder,
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
@@ -156,6 +155,8 @@ impl CatalogManager {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::*;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::stream::BoxStream;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -166,7 +165,7 @@ impl DatanodeTableManager {
|
||||
self.kv_backend.clone(),
|
||||
req,
|
||||
DEFAULT_PAGE_SIZE,
|
||||
Arc::new(datanode_table_value_decoder),
|
||||
datanode_table_value_decoder,
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
pub mod flow_info;
|
||||
pub(crate) mod flow_name;
|
||||
pub(crate) mod flow_route;
|
||||
pub mod flow_state;
|
||||
pub(crate) mod flownode_flow;
|
||||
pub(crate) mod table_flow;
|
||||
|
||||
@@ -35,6 +36,7 @@ use crate::ensure_values;
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::flow::flow_info::FlowInfoManager;
|
||||
use crate::key::flow::flow_name::FlowNameManager;
|
||||
use crate::key::flow::flow_state::FlowStateManager;
|
||||
use crate::key::flow::flownode_flow::FlownodeFlowManager;
|
||||
pub use crate::key::flow::table_flow::{TableFlowManager, TableFlowManagerRef};
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
@@ -102,6 +104,8 @@ pub struct FlowMetadataManager {
|
||||
flownode_flow_manager: FlownodeFlowManager,
|
||||
table_flow_manager: TableFlowManager,
|
||||
flow_name_manager: FlowNameManager,
|
||||
/// only metasrv have access to itself's memory backend, so for other case it should be None
|
||||
flow_state_manager: Option<FlowStateManager>,
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
@@ -114,6 +118,7 @@ impl FlowMetadataManager {
|
||||
flow_name_manager: FlowNameManager::new(kv_backend.clone()),
|
||||
flownode_flow_manager: FlownodeFlowManager::new(kv_backend.clone()),
|
||||
table_flow_manager: TableFlowManager::new(kv_backend.clone()),
|
||||
flow_state_manager: None,
|
||||
kv_backend,
|
||||
}
|
||||
}
|
||||
@@ -123,6 +128,10 @@ impl FlowMetadataManager {
|
||||
&self.flow_name_manager
|
||||
}
|
||||
|
||||
pub fn flow_state_manager(&self) -> Option<&FlowStateManager> {
|
||||
self.flow_state_manager.as_ref()
|
||||
}
|
||||
|
||||
/// Returns the [`FlowInfoManager`].
|
||||
pub fn flow_info_manager(&self) -> &FlowInfoManager {
|
||||
&self.flow_info_manager
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::stream::BoxStream;
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
@@ -201,7 +199,7 @@ impl FlowNameManager {
|
||||
self.kv_backend.clone(),
|
||||
req,
|
||||
DEFAULT_PAGE_SIZE,
|
||||
Arc::new(flow_name_decoder),
|
||||
flow_name_decoder,
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::stream::BoxStream;
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
@@ -179,7 +177,7 @@ impl FlowRouteManager {
|
||||
self.kv_backend.clone(),
|
||||
req,
|
||||
DEFAULT_PAGE_SIZE,
|
||||
Arc::new(flow_route_decoder),
|
||||
flow_route_decoder,
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
|
||||
162
src/common/meta/src/key/flow/flow_state.rs
Normal file
162
src/common/meta/src/key/flow/flow_state.rs
Normal file
@@ -0,0 +1,162 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::flow::FlowScoped;
|
||||
use crate::key::{FlowId, MetadataKey, MetadataValue};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::PutRequest;
|
||||
|
||||
/// The entire FlowId to Flow Size's Map is stored directly in the value part of the key.
|
||||
const FLOW_STATE_KEY: &str = "state";
|
||||
|
||||
/// The key of flow state.
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
struct FlowStateKeyInner;
|
||||
|
||||
impl FlowStateKeyInner {
|
||||
pub fn new() -> Self {
|
||||
Self
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, FlowStateKeyInner> for FlowStateKeyInner {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
FLOW_STATE_KEY.as_bytes().to_vec()
|
||||
}
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<FlowStateKeyInner> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
error::InvalidMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"FlowInfoKeyInner '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
if key != FLOW_STATE_KEY {
|
||||
return Err(error::InvalidMetadataSnafu {
|
||||
err_msg: format!("Invalid FlowStateKeyInner '{key}'"),
|
||||
}
|
||||
.build());
|
||||
}
|
||||
Ok(FlowStateKeyInner::new())
|
||||
}
|
||||
}
|
||||
|
||||
/// The key stores the state size of the flow.
|
||||
///
|
||||
/// The layout: `__flow/state`.
|
||||
pub struct FlowStateKey(FlowScoped<FlowStateKeyInner>);
|
||||
|
||||
impl FlowStateKey {
|
||||
/// Returns the [FlowStateKey].
|
||||
pub fn new() -> FlowStateKey {
|
||||
let inner = FlowStateKeyInner::new();
|
||||
FlowStateKey(FlowScoped::new(inner))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for FlowStateKey {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, FlowStateKey> for FlowStateKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.0.to_bytes()
|
||||
}
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<FlowStateKey> {
|
||||
Ok(FlowStateKey(FlowScoped::<FlowStateKeyInner>::from_bytes(
|
||||
bytes,
|
||||
)?))
|
||||
}
|
||||
}
|
||||
|
||||
/// The value of flow state size
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct FlowStateValue {
|
||||
/// For each key, the bytes of the state in memory
|
||||
pub state_size: BTreeMap<FlowId, usize>,
|
||||
}
|
||||
|
||||
impl FlowStateValue {
|
||||
pub fn new(state_size: BTreeMap<FlowId, usize>) -> Self {
|
||||
Self { state_size }
|
||||
}
|
||||
}
|
||||
|
||||
pub type FlowStateManagerRef = Arc<FlowStateManager>;
|
||||
|
||||
/// The manager of [FlowStateKey]. Since state size changes frequently, we store it in memory.
|
||||
///
|
||||
/// This is only used in distributed mode. When meta-srv use heartbeat to update the flow stat report
|
||||
/// and frontned use get to get the latest flow stat report.
|
||||
pub struct FlowStateManager {
|
||||
in_memory: KvBackendRef,
|
||||
}
|
||||
|
||||
impl FlowStateManager {
|
||||
pub fn new(in_memory: KvBackendRef) -> Self {
|
||||
Self { in_memory }
|
||||
}
|
||||
|
||||
pub async fn get(&self) -> Result<Option<FlowStateValue>> {
|
||||
let key = FlowStateKey::new().to_bytes();
|
||||
self.in_memory
|
||||
.get(&key)
|
||||
.await?
|
||||
.map(|x| FlowStateValue::try_from_raw_value(&x.value))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub async fn put(&self, value: FlowStateValue) -> Result<()> {
|
||||
let key = FlowStateKey::new().to_bytes();
|
||||
let value = value.try_as_raw_value()?;
|
||||
let req = PutRequest::new().with_key(key).with_value(value);
|
||||
self.in_memory.put(req).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Flow's state report, send regularly through heartbeat message
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FlowStat {
|
||||
/// For each key, the bytes of the state in memory
|
||||
pub state_size: BTreeMap<u32, usize>,
|
||||
}
|
||||
|
||||
impl From<FlowStateValue> for FlowStat {
|
||||
fn from(value: FlowStateValue) -> Self {
|
||||
Self {
|
||||
state_size: value.state_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<FlowStat> for FlowStateValue {
|
||||
fn from(value: FlowStat) -> Self {
|
||||
Self {
|
||||
state_size: value.state_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::stream::BoxStream;
|
||||
use futures::TryStreamExt;
|
||||
use lazy_static::lazy_static;
|
||||
@@ -179,7 +177,7 @@ impl FlownodeFlowManager {
|
||||
self.kv_backend.clone(),
|
||||
req,
|
||||
DEFAULT_PAGE_SIZE,
|
||||
Arc::new(flownode_flow_key_decoder),
|
||||
flownode_flow_key_decoder,
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
|
||||
@@ -206,7 +206,7 @@ impl TableFlowManager {
|
||||
self.kv_backend.clone(),
|
||||
req,
|
||||
DEFAULT_PAGE_SIZE,
|
||||
Arc::new(table_flow_decoder),
|
||||
table_flow_decoder,
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_time::DatabaseTimeToLive;
|
||||
@@ -283,7 +282,7 @@ impl SchemaManager {
|
||||
self.kv_backend.clone(),
|
||||
req,
|
||||
DEFAULT_PAGE_SIZE,
|
||||
Arc::new(schema_decoder),
|
||||
schema_decoder,
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
@@ -308,6 +307,7 @@ impl<'a> From<&'a SchemaName> for SchemaNameKey<'a> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -269,7 +269,7 @@ impl TableNameManager {
|
||||
self.kv_backend.clone(),
|
||||
req,
|
||||
DEFAULT_PAGE_SIZE,
|
||||
Arc::new(table_decoder),
|
||||
table_decoder,
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
|
||||
@@ -290,28 +290,6 @@ impl TableRouteManager {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the [`PhysicalTableRouteValue`] in the first level,
|
||||
/// It won't follow the [`LogicalTableRouteValue`] to find the next level [`PhysicalTableRouteValue`].
|
||||
///
|
||||
/// Returns an error if the first level value is not a [`PhysicalTableRouteValue`].
|
||||
pub async fn try_get_physical_table_route(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> Result<Option<PhysicalTableRouteValue>> {
|
||||
match self.storage.get(table_id).await? {
|
||||
Some(route) => {
|
||||
ensure!(
|
||||
route.is_physical(),
|
||||
UnexpectedLogicalRouteTableSnafu {
|
||||
err_msg: format!("{route:?} is a non-physical TableRouteValue.")
|
||||
}
|
||||
);
|
||||
Ok(Some(route.into_physical_table_route()))
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the [TableId] recursively.
|
||||
///
|
||||
/// Returns a [TableRouteNotFound](crate::error::Error::TableRouteNotFound) Error if:
|
||||
@@ -569,37 +547,6 @@ impl TableRouteStorage {
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Returns the physical `DeserializedValueWithBytes<TableRouteValue>` recursively.
|
||||
///
|
||||
/// Returns a [TableRouteNotFound](crate::error::Error::TableRouteNotFound) Error if:
|
||||
/// - the physical table(`logical_or_physical_table_id`) does not exist
|
||||
/// - the corresponding physical table of the logical table(`logical_or_physical_table_id`) does not exist.
|
||||
pub async fn get_physical_table_route_with_raw_bytes(
|
||||
&self,
|
||||
logical_or_physical_table_id: TableId,
|
||||
) -> Result<(TableId, DeserializedValueWithBytes<TableRouteValue>)> {
|
||||
let table_route = self
|
||||
.get_with_raw_bytes(logical_or_physical_table_id)
|
||||
.await?
|
||||
.context(TableRouteNotFoundSnafu {
|
||||
table_id: logical_or_physical_table_id,
|
||||
})?;
|
||||
|
||||
match table_route.get_inner_ref() {
|
||||
TableRouteValue::Physical(_) => Ok((logical_or_physical_table_id, table_route)),
|
||||
TableRouteValue::Logical(x) => {
|
||||
let physical_table_id = x.physical_table_id();
|
||||
let physical_table_route = self
|
||||
.get_with_raw_bytes(physical_table_id)
|
||||
.await?
|
||||
.context(TableRouteNotFoundSnafu {
|
||||
table_id: physical_table_id,
|
||||
})?;
|
||||
Ok((physical_table_id, physical_table_route))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns batch of [`TableRouteValue`] that respects the order of `table_ids`.
|
||||
pub async fn batch_get(&self, table_ids: &[TableId]) -> Result<Vec<Option<TableRouteValue>>> {
|
||||
let mut table_routes = self.batch_get_inner(table_ids).await?;
|
||||
|
||||
@@ -36,7 +36,7 @@ pub mod postgres;
|
||||
pub mod test;
|
||||
pub mod txn;
|
||||
|
||||
pub type KvBackendRef = Arc<dyn KvBackend<Error = Error> + Send + Sync>;
|
||||
pub type KvBackendRef<E = Error> = Arc<dyn KvBackend<Error = E> + Send + Sync>;
|
||||
|
||||
#[async_trait]
|
||||
pub trait KvBackend: TxnService
|
||||
@@ -161,6 +161,9 @@ where
|
||||
Self::Error: ErrorExt,
|
||||
{
|
||||
fn reset(&self);
|
||||
|
||||
/// Upcast as `KvBackendRef`. Since https://github.com/rust-lang/rust/issues/65991 is not yet stable.
|
||||
fn as_kv_backend_ref(self: Arc<Self>) -> KvBackendRef<Self::Error>;
|
||||
}
|
||||
|
||||
pub type ResettableKvBackendRef = Arc<dyn ResettableKvBackend<Error = Error> + Send + Sync>;
|
||||
pub type ResettableKvBackendRef<E = Error> = Arc<dyn ResettableKvBackend<Error = E> + Send + Sync>;
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::info;
|
||||
use etcd_client::{
|
||||
Client, DeleteOptions, GetOptions, PutOptions, Txn, TxnOp, TxnOpResponse, TxnResponse,
|
||||
};
|
||||
@@ -55,6 +56,7 @@ impl EtcdStore {
|
||||
}
|
||||
|
||||
pub fn with_etcd_client(client: Client, max_txn_ops: usize) -> KvBackendRef {
|
||||
info!("Connected to etcd");
|
||||
Arc::new(Self {
|
||||
client,
|
||||
max_txn_ops,
|
||||
|
||||
@@ -16,13 +16,13 @@ use std::any::Any;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::RwLock;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::ErrorExt;
|
||||
use serde::Serializer;
|
||||
|
||||
use super::ResettableKvBackend;
|
||||
use super::{KvBackendRef, ResettableKvBackend};
|
||||
use crate::kv_backend::txn::{Txn, TxnOp, TxnOpResponse, TxnRequest, TxnResponse};
|
||||
use crate::kv_backend::{KvBackend, TxnService};
|
||||
use crate::metrics::METRIC_META_TXN_REQUEST;
|
||||
@@ -311,6 +311,10 @@ impl<T: ErrorExt + Send + Sync + 'static> ResettableKvBackend for MemoryKvBacken
|
||||
fn reset(&self) {
|
||||
self.clear();
|
||||
}
|
||||
|
||||
fn as_kv_backend_ref(self: Arc<Self>) -> KvBackendRef<T> {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_stream::try_stream;
|
||||
use common_telemetry::debug;
|
||||
use futures::Stream;
|
||||
@@ -148,7 +146,7 @@ impl PaginationStreamFactory {
|
||||
}
|
||||
|
||||
pub struct PaginationStream<T> {
|
||||
decoder_fn: Arc<KeyValueDecoderFn<T>>,
|
||||
decoder_fn: fn(KeyValue) -> Result<T>,
|
||||
factory: PaginationStreamFactory,
|
||||
}
|
||||
|
||||
@@ -158,7 +156,7 @@ impl<T> PaginationStream<T> {
|
||||
kv: KvBackendRef,
|
||||
req: RangeRequest,
|
||||
page_size: usize,
|
||||
decoder_fn: Arc<KeyValueDecoderFn<T>>,
|
||||
decoder_fn: fn(KeyValue) -> Result<T>,
|
||||
) -> Self {
|
||||
Self {
|
||||
decoder_fn,
|
||||
@@ -191,6 +189,7 @@ mod tests {
|
||||
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::TryStreamExt;
|
||||
|
||||
@@ -250,7 +249,7 @@ mod tests {
|
||||
..Default::default()
|
||||
},
|
||||
DEFAULT_PAGE_SIZE,
|
||||
Arc::new(decoder),
|
||||
decoder,
|
||||
)
|
||||
.into_stream();
|
||||
let kv = stream.try_collect::<Vec<_>>().await.unwrap();
|
||||
@@ -290,7 +289,7 @@ mod tests {
|
||||
..Default::default()
|
||||
},
|
||||
2,
|
||||
Arc::new(decoder),
|
||||
decoder,
|
||||
);
|
||||
let kv = stream
|
||||
.into_stream()
|
||||
|
||||
@@ -89,39 +89,6 @@ pub fn convert_to_region_leader_map(region_routes: &[RegionRoute]) -> HashMap<Re
|
||||
.collect::<HashMap<_, _>>()
|
||||
}
|
||||
|
||||
/// Returns the HashMap<[RegionNumber], HashSet<DatanodeId>>
|
||||
pub fn convert_to_region_peer_map(
|
||||
region_routes: &[RegionRoute],
|
||||
) -> HashMap<RegionNumber, HashSet<u64>> {
|
||||
region_routes
|
||||
.iter()
|
||||
.map(|x| {
|
||||
let set = x
|
||||
.follower_peers
|
||||
.iter()
|
||||
.map(|p| p.id)
|
||||
.chain(x.leader_peer.as_ref().map(|p| p.id))
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
(x.region.id.region_number(), set)
|
||||
})
|
||||
.collect::<HashMap<_, _>>()
|
||||
}
|
||||
|
||||
/// Returns the HashMap<[RegionNumber], [LeaderState]>;
|
||||
pub fn convert_to_region_leader_state_map(
|
||||
region_routes: &[RegionRoute],
|
||||
) -> HashMap<RegionNumber, LeaderState> {
|
||||
region_routes
|
||||
.iter()
|
||||
.filter_map(|x| {
|
||||
x.leader_state
|
||||
.as_ref()
|
||||
.map(|state| (x.region.id.region_number(), *state))
|
||||
})
|
||||
.collect::<HashMap<_, _>>()
|
||||
}
|
||||
|
||||
pub fn find_region_leader(
|
||||
region_routes: &[RegionRoute],
|
||||
region_number: RegionNumber,
|
||||
@@ -147,19 +114,6 @@ pub fn find_leader_regions(region_routes: &[RegionRoute], datanode: &Peer) -> Ve
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn extract_all_peers(region_routes: &[RegionRoute]) -> Vec<Peer> {
|
||||
let mut peers = region_routes
|
||||
.iter()
|
||||
.flat_map(|x| x.leader_peer.iter().chain(x.follower_peers.iter()))
|
||||
.collect::<HashSet<_>>()
|
||||
.into_iter()
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
peers.sort_by_key(|x| x.id);
|
||||
|
||||
peers
|
||||
}
|
||||
|
||||
impl TableRoute {
|
||||
pub fn new(table: Table, region_routes: Vec<RegionRoute>) -> Self {
|
||||
let region_leaders = region_routes
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_procedure::error::{DeleteStatesSnafu, ListStateSnafu, PutStateSnafu};
|
||||
@@ -171,7 +169,7 @@ impl StateStore for KvStateStore {
|
||||
self.kv_backend.clone(),
|
||||
req,
|
||||
self.max_num_per_range_request.unwrap_or_default(),
|
||||
Arc::new(decode_kv),
|
||||
decode_kv,
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
|
||||
@@ -544,7 +544,7 @@ mod tests {
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use futures_util::future::BoxFuture;
|
||||
use futures_util::FutureExt;
|
||||
use object_store::ObjectStore;
|
||||
use object_store::{EntryMode, ObjectStore};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use super::*;
|
||||
@@ -578,7 +578,11 @@ mod tests {
|
||||
) {
|
||||
let dir = proc_path!(procedure_store, "{procedure_id}/");
|
||||
let lister = object_store.list(&dir).await.unwrap();
|
||||
let mut files_in_dir: Vec<_> = lister.into_iter().map(|de| de.name().to_string()).collect();
|
||||
let mut files_in_dir: Vec<_> = lister
|
||||
.into_iter()
|
||||
.filter(|x| x.metadata().mode() == EntryMode::FILE)
|
||||
.map(|de| de.name().to_string())
|
||||
.collect();
|
||||
files_in_dir.sort_unstable();
|
||||
assert_eq!(files, files_in_dir);
|
||||
}
|
||||
|
||||
@@ -26,7 +26,6 @@ use std::sync::Arc;
|
||||
|
||||
use adapter::RecordBatchMetrics;
|
||||
use arc_swap::ArcSwapOption;
|
||||
use datafusion::physical_plan::memory::MemoryStream;
|
||||
pub use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::arrow::compute::SortOptions;
|
||||
pub use datatypes::arrow::record_batch::RecordBatch as DfRecordBatch;
|
||||
@@ -170,19 +169,6 @@ impl RecordBatches {
|
||||
index: 0,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn into_df_stream(self) -> DfSendableRecordBatchStream {
|
||||
let df_record_batches = self
|
||||
.batches
|
||||
.into_iter()
|
||||
.map(|batch| batch.into_df_record_batch())
|
||||
.collect();
|
||||
// unwrap safety: `MemoryStream::try_new` won't fail
|
||||
Box::pin(
|
||||
MemoryStream::try_new(df_record_batches, self.schema.arrow_schema().clone(), None)
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoIterator for RecordBatches {
|
||||
|
||||
@@ -35,8 +35,6 @@ serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
tempfile.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-metrics = "0.3"
|
||||
tokio-metrics-collector = { git = "https://github.com/MichaelScofield/tokio-metrics-collector.git", rev = "89d692d5753d28564a7aac73c6ac5aba22243ba0" }
|
||||
tokio-util.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -29,10 +29,6 @@ pub fn format_utc_datetime(utc: &NaiveDateTime, pattern: &str) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn system_datetime_to_utc(local: &NaiveDateTime) -> LocalResult<NaiveDateTime> {
|
||||
datetime_to_utc(local, get_timezone(None))
|
||||
}
|
||||
|
||||
/// Cast a [`NaiveDateTime`] with the given timezone.
|
||||
pub fn datetime_to_utc(
|
||||
datetime: &NaiveDateTime,
|
||||
|
||||
@@ -49,9 +49,9 @@ impl Default for RaftEngineConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
dir: None,
|
||||
file_size: ReadableSize::mb(256),
|
||||
purge_threshold: ReadableSize::gb(4),
|
||||
purge_interval: Duration::from_secs(600),
|
||||
file_size: ReadableSize::mb(128),
|
||||
purge_threshold: ReadableSize::gb(1),
|
||||
purge_interval: Duration::from_secs(60),
|
||||
read_batch_size: 128,
|
||||
sync_write: false,
|
||||
enable_log_recycle: true,
|
||||
|
||||
@@ -32,7 +32,7 @@ use servers::heartbeat_options::HeartbeatOptions;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::Mode;
|
||||
|
||||
pub const DEFAULT_OBJECT_STORE_CACHE_SIZE: ReadableSize = ReadableSize::gb(1);
|
||||
pub const DEFAULT_OBJECT_STORE_CACHE_SIZE: ReadableSize = ReadableSize::gb(5);
|
||||
|
||||
/// Default data home in file storage
|
||||
const DEFAULT_DATA_HOME: &str = "/tmp/greptimedb";
|
||||
@@ -60,6 +60,11 @@ impl ObjectStoreConfig {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true when it's a remote object storage such as AWS s3 etc.
|
||||
pub fn is_object_storage(&self) -> bool {
|
||||
!matches!(self, Self::File(_))
|
||||
}
|
||||
|
||||
/// Returns the object storage configuration name, return the provider name if it's empty.
|
||||
pub fn config_name(&self) -> &str {
|
||||
let name = match self {
|
||||
@@ -91,6 +96,13 @@ pub struct StorageConfig {
|
||||
pub providers: Vec<ObjectStoreConfig>,
|
||||
}
|
||||
|
||||
impl StorageConfig {
|
||||
/// Returns true when the default storage config is a remote object storage service such as AWS S3, etc.
|
||||
pub fn is_object_storage(&self) -> bool {
|
||||
self.store.is_object_storage()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for StorageConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
@@ -452,6 +464,20 @@ mod tests {
|
||||
assert_eq!("S3", s3_config.provider_name());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_object_storage() {
|
||||
let store = ObjectStoreConfig::default();
|
||||
assert!(!store.is_object_storage());
|
||||
let s3_config = ObjectStoreConfig::S3(S3Config::default());
|
||||
assert!(s3_config.is_object_storage());
|
||||
let oss_config = ObjectStoreConfig::Oss(OssConfig::default());
|
||||
assert!(oss_config.is_object_storage());
|
||||
let gcs_config = ObjectStoreConfig::Gcs(GcsConfig::default());
|
||||
assert!(gcs_config.is_object_storage());
|
||||
let azblob_config = ObjectStoreConfig::Azblob(AzblobConfig::default());
|
||||
assert!(azblob_config.is_object_storage());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_secstr() {
|
||||
let toml_str = r#"
|
||||
|
||||
@@ -428,10 +428,16 @@ impl DatanodeBuilder {
|
||||
async fn build_mito_engine(
|
||||
opts: &DatanodeOptions,
|
||||
object_store_manager: ObjectStoreManagerRef,
|
||||
config: MitoConfig,
|
||||
mut config: MitoConfig,
|
||||
schema_metadata_manager: SchemaMetadataManagerRef,
|
||||
plugins: Plugins,
|
||||
) -> Result<MitoEngine> {
|
||||
if opts.storage.is_object_storage() {
|
||||
// Enable the write cache when setting object storage
|
||||
config.enable_experimental_write_cache = true;
|
||||
info!("Configured 'enable_experimental_write_cache=true' for mito engine.");
|
||||
}
|
||||
|
||||
let mito_engine = match &opts.wal {
|
||||
DatanodeWalConfig::RaftEngine(raft_engine_config) => MitoEngine::new(
|
||||
&opts.storage.data_home,
|
||||
|
||||
@@ -193,6 +193,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build http client"))]
|
||||
BuildHttpClient {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: reqwest::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing required field: {}", name))]
|
||||
MissingRequiredField {
|
||||
name: String,
|
||||
@@ -406,9 +414,10 @@ impl ErrorExt for Error {
|
||||
| MissingKvBackend { .. }
|
||||
| TomlFormat { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
PayloadNotExist { .. } | Unexpected { .. } | WatchAsyncTaskChange { .. } => {
|
||||
StatusCode::Unexpected
|
||||
}
|
||||
PayloadNotExist { .. }
|
||||
| Unexpected { .. }
|
||||
| WatchAsyncTaskChange { .. }
|
||||
| BuildHttpClient { .. } => StatusCode::Unexpected,
|
||||
|
||||
AsyncTaskExecute { source, .. } => source.status_code(),
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(assert_matches)]
|
||||
#![feature(let_chains)]
|
||||
|
||||
pub mod alive_keeper;
|
||||
pub mod config;
|
||||
|
||||
@@ -19,21 +19,20 @@ mod fs;
|
||||
mod gcs;
|
||||
mod oss;
|
||||
mod s3;
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::{env, path};
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_telemetry::{info, warn};
|
||||
use object_store::layers::{LruCacheLayer, RetryInterceptor, RetryLayer};
|
||||
use object_store::services::Fs;
|
||||
use object_store::util::{join_dir, normalize_dir, with_instrument_layers};
|
||||
use object_store::{Access, Error, HttpClient, ObjectStore, ObjectStoreBuilder};
|
||||
use object_store::{Access, Error, HttpClient, ObjectStore, ObjectStoreBuilder, OBJECT_CACHE_DIR};
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::config::{HttpClientConfig, ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
|
||||
use crate::error::{self, Result};
|
||||
use crate::error::{self, BuildHttpClientSnafu, CreateDirSnafu, Result};
|
||||
|
||||
pub(crate) async fn new_raw_object_store(
|
||||
store: &ObjectStoreConfig,
|
||||
@@ -68,7 +67,7 @@ pub(crate) async fn new_object_store_without_cache(
|
||||
) -> Result<ObjectStore> {
|
||||
let object_store = new_raw_object_store(store, data_home).await?;
|
||||
// Enable retry layer and cache layer for non-fs object storages
|
||||
let object_store = if !matches!(store, ObjectStoreConfig::File(..)) {
|
||||
let object_store = if store.is_object_storage() {
|
||||
// Adds retry layer
|
||||
with_retry_layers(object_store)
|
||||
} else {
|
||||
@@ -85,8 +84,8 @@ pub(crate) async fn new_object_store(
|
||||
) -> Result<ObjectStore> {
|
||||
let object_store = new_raw_object_store(&store, data_home).await?;
|
||||
// Enable retry layer and cache layer for non-fs object storages
|
||||
let object_store = if !matches!(store, ObjectStoreConfig::File(..)) {
|
||||
let object_store = if let Some(cache_layer) = build_cache_layer(&store).await? {
|
||||
let object_store = if store.is_object_storage() {
|
||||
let object_store = if let Some(cache_layer) = build_cache_layer(&store, data_home).await? {
|
||||
// Adds cache layer
|
||||
object_store.layer(cache_layer)
|
||||
} else {
|
||||
@@ -105,44 +104,72 @@ pub(crate) async fn new_object_store(
|
||||
|
||||
async fn build_cache_layer(
|
||||
store_config: &ObjectStoreConfig,
|
||||
data_home: &str,
|
||||
) -> Result<Option<LruCacheLayer<impl Access>>> {
|
||||
let (cache_path, cache_capacity) = match store_config {
|
||||
let (name, mut cache_path, cache_capacity) = match store_config {
|
||||
ObjectStoreConfig::S3(s3_config) => {
|
||||
let path = s3_config.cache.cache_path.as_ref();
|
||||
let path = s3_config.cache.cache_path.clone();
|
||||
let name = &s3_config.name;
|
||||
let capacity = s3_config
|
||||
.cache
|
||||
.cache_capacity
|
||||
.unwrap_or(DEFAULT_OBJECT_STORE_CACHE_SIZE);
|
||||
(path, capacity)
|
||||
(name, path, capacity)
|
||||
}
|
||||
ObjectStoreConfig::Oss(oss_config) => {
|
||||
let path = oss_config.cache.cache_path.as_ref();
|
||||
let path = oss_config.cache.cache_path.clone();
|
||||
let name = &oss_config.name;
|
||||
let capacity = oss_config
|
||||
.cache
|
||||
.cache_capacity
|
||||
.unwrap_or(DEFAULT_OBJECT_STORE_CACHE_SIZE);
|
||||
(path, capacity)
|
||||
(name, path, capacity)
|
||||
}
|
||||
ObjectStoreConfig::Azblob(azblob_config) => {
|
||||
let path = azblob_config.cache.cache_path.as_ref();
|
||||
let path = azblob_config.cache.cache_path.clone();
|
||||
let name = &azblob_config.name;
|
||||
let capacity = azblob_config
|
||||
.cache
|
||||
.cache_capacity
|
||||
.unwrap_or(DEFAULT_OBJECT_STORE_CACHE_SIZE);
|
||||
(path, capacity)
|
||||
(name, path, capacity)
|
||||
}
|
||||
ObjectStoreConfig::Gcs(gcs_config) => {
|
||||
let path = gcs_config.cache.cache_path.as_ref();
|
||||
let path = gcs_config.cache.cache_path.clone();
|
||||
let name = &gcs_config.name;
|
||||
let capacity = gcs_config
|
||||
.cache
|
||||
.cache_capacity
|
||||
.unwrap_or(DEFAULT_OBJECT_STORE_CACHE_SIZE);
|
||||
(path, capacity)
|
||||
(name, path, capacity)
|
||||
}
|
||||
_ => (None, ReadableSize(0)),
|
||||
_ => unreachable!("Already checked above"),
|
||||
};
|
||||
|
||||
if let Some(path) = cache_path {
|
||||
// Enable object cache by default
|
||||
// Set the cache_path to be `${data_home}/object_cache/read/{name}` by default
|
||||
// if it's not present
|
||||
if cache_path.is_none() {
|
||||
let object_cache_path = join_dir(data_home, OBJECT_CACHE_DIR);
|
||||
let read_cache_path = join_dir(&object_cache_path, "read");
|
||||
let read_cache_path = join_dir(&read_cache_path, &name.to_lowercase());
|
||||
tokio::fs::create_dir_all(Path::new(&read_cache_path))
|
||||
.await
|
||||
.context(CreateDirSnafu {
|
||||
dir: &read_cache_path,
|
||||
})?;
|
||||
|
||||
info!(
|
||||
"The object storage cache path is not set for '{}', using the default path: '{}'",
|
||||
name, &read_cache_path
|
||||
);
|
||||
|
||||
cache_path = Some(read_cache_path);
|
||||
}
|
||||
|
||||
if let Some(path) = cache_path.as_ref()
|
||||
&& !path.trim().is_empty()
|
||||
{
|
||||
let atomic_temp_dir = join_dir(path, ".tmp/");
|
||||
clean_temp_dir(&atomic_temp_dir)?;
|
||||
|
||||
@@ -209,7 +236,8 @@ pub(crate) fn build_http_client(config: &HttpClientConfig) -> Result<HttpClient>
|
||||
builder.timeout(config.timeout)
|
||||
};
|
||||
|
||||
HttpClient::build(http_builder).context(error::InitBackendSnafu)
|
||||
let client = http_builder.build().context(BuildHttpClientSnafu)?;
|
||||
Ok(HttpClient::with(client))
|
||||
}
|
||||
struct PrintDetailedError;
|
||||
|
||||
|
||||
@@ -370,6 +370,51 @@ impl ConcreteDataType {
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the datatype name in postgres type system
|
||||
pub fn postgres_datatype_name(&self) -> &'static str {
|
||||
match self {
|
||||
&ConcreteDataType::Null(_) => "UNKNOWN",
|
||||
&ConcreteDataType::Boolean(_) => "BOOL",
|
||||
&ConcreteDataType::Int8(_) | &ConcreteDataType::UInt8(_) => "CHAR",
|
||||
&ConcreteDataType::Int16(_) | &ConcreteDataType::UInt16(_) => "INT2",
|
||||
&ConcreteDataType::Int32(_) | &ConcreteDataType::UInt32(_) => "INT4",
|
||||
&ConcreteDataType::Int64(_) | &ConcreteDataType::UInt64(_) => "INT8",
|
||||
&ConcreteDataType::Float32(_) => "FLOAT4",
|
||||
&ConcreteDataType::Float64(_) => "FLOAT8",
|
||||
&ConcreteDataType::Binary(_) | &ConcreteDataType::Vector(_) => "BYTEA",
|
||||
&ConcreteDataType::String(_) => "VARCHAR",
|
||||
&ConcreteDataType::Date(_) => "DATE",
|
||||
&ConcreteDataType::DateTime(_) | &ConcreteDataType::Timestamp(_) => "TIMESTAMP",
|
||||
&ConcreteDataType::Time(_) => "TIME",
|
||||
&ConcreteDataType::Interval(_) => "INTERVAL",
|
||||
&ConcreteDataType::Decimal128(_) => "NUMERIC",
|
||||
&ConcreteDataType::Json(_) => "JSON",
|
||||
ConcreteDataType::List(list) => match list.item_type() {
|
||||
&ConcreteDataType::Null(_) => "UNKNOWN",
|
||||
&ConcreteDataType::Boolean(_) => "_BOOL",
|
||||
&ConcreteDataType::Int8(_) | &ConcreteDataType::UInt8(_) => "_CHAR",
|
||||
&ConcreteDataType::Int16(_) | &ConcreteDataType::UInt16(_) => "_INT2",
|
||||
&ConcreteDataType::Int32(_) | &ConcreteDataType::UInt32(_) => "_INT4",
|
||||
&ConcreteDataType::Int64(_) | &ConcreteDataType::UInt64(_) => "_INT8",
|
||||
&ConcreteDataType::Float32(_) => "_FLOAT4",
|
||||
&ConcreteDataType::Float64(_) => "_FLOAT8",
|
||||
&ConcreteDataType::Binary(_) => "_BYTEA",
|
||||
&ConcreteDataType::String(_) => "_VARCHAR",
|
||||
&ConcreteDataType::Date(_) => "_DATE",
|
||||
&ConcreteDataType::DateTime(_) | &ConcreteDataType::Timestamp(_) => "_TIMESTAMP",
|
||||
&ConcreteDataType::Time(_) => "_TIME",
|
||||
&ConcreteDataType::Interval(_) => "_INTERVAL",
|
||||
&ConcreteDataType::Decimal128(_) => "_NUMERIC",
|
||||
&ConcreteDataType::Json(_) => "_JSON",
|
||||
&ConcreteDataType::Duration(_)
|
||||
| &ConcreteDataType::Dictionary(_)
|
||||
| &ConcreteDataType::Vector(_)
|
||||
| &ConcreteDataType::List(_) => "UNKNOWN",
|
||||
},
|
||||
&ConcreteDataType::Duration(_) | &ConcreteDataType::Dictionary(_) => "UNKNOWN",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&ConcreteDataType> for ConcreteDataType {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user