mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-05 21:02:58 +00:00
Compare commits
72 Commits
v0.11.0
...
windows_pd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
334dbee590 | ||
|
|
f33b378e45 | ||
|
|
267941bbb5 | ||
|
|
074846bbc2 | ||
|
|
88d46a38ae | ||
|
|
de0beabf34 | ||
|
|
68dd2916fb | ||
|
|
d51b65a8bf | ||
|
|
2082c4b6e4 | ||
|
|
c623404fff | ||
|
|
fa3b7ed5ea | ||
|
|
8ece853076 | ||
|
|
4245bff8f2 | ||
|
|
3d4121aefb | ||
|
|
1910d71cb3 | ||
|
|
a578eea801 | ||
|
|
6bf574f098 | ||
|
|
a4d61bcaf1 | ||
|
|
7ea8a44d3a | ||
|
|
2d6f63a504 | ||
|
|
422d18da8b | ||
|
|
66f0581f5b | ||
|
|
c9ad8c7101 | ||
|
|
2107737db1 | ||
|
|
548e1988ab | ||
|
|
218236cc5b | ||
|
|
f04d380259 | ||
|
|
fa773cf480 | ||
|
|
9b4e8555e2 | ||
|
|
c6b7caa2ec | ||
|
|
58d6982c93 | ||
|
|
e662c241e6 | ||
|
|
266919c226 | ||
|
|
7d1bcc9d49 | ||
|
|
18e8c45384 | ||
|
|
c33cf59398 | ||
|
|
421088a868 | ||
|
|
d821dc5a3e | ||
|
|
bfc777e6ac | ||
|
|
8a5384697b | ||
|
|
d0245473a9 | ||
|
|
043d0bd7c2 | ||
|
|
acedff030b | ||
|
|
88f7075a2a | ||
|
|
54698325b6 | ||
|
|
5ffda7e971 | ||
|
|
f82af15eba | ||
|
|
9d7fea902e | ||
|
|
358d5e1d63 | ||
|
|
579059d99f | ||
|
|
53d55c0b6b | ||
|
|
bef6896280 | ||
|
|
4b4c6dbb66 | ||
|
|
e8e9526738 | ||
|
|
fee75a1fad | ||
|
|
b8a78b7838 | ||
|
|
2137c53274 | ||
|
|
03ad6e2a8d | ||
|
|
d53fbcb936 | ||
|
|
8c1959c580 | ||
|
|
e2a41ccaec | ||
|
|
a8012147ab | ||
|
|
60f8dbf7f0 | ||
|
|
9da2e17d0e | ||
|
|
1a8e77a480 | ||
|
|
e1e39993f7 | ||
|
|
a30d918df2 | ||
|
|
2c4ac76754 | ||
|
|
a6893aad42 | ||
|
|
d91517688a | ||
|
|
3d1b8c4fac | ||
|
|
7c69ca0502 |
@@ -54,7 +54,7 @@ runs:
|
|||||||
PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }}
|
PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }}
|
||||||
with:
|
with:
|
||||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
target-file: ./target/$PROFILE_TARGET/greptime
|
target-files: ./target/$PROFILE_TARGET/greptime
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
|
||||||
@@ -72,6 +72,6 @@ runs:
|
|||||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||||
with:
|
with:
|
||||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
target-file: ./target/aarch64-linux-android/release/greptime
|
target-files: ./target/aarch64-linux-android/release/greptime
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
|||||||
@@ -90,5 +90,5 @@ runs:
|
|||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
with:
|
with:
|
||||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
|
|||||||
@@ -76,5 +76,5 @@ runs:
|
|||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
with:
|
with:
|
||||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime,target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime.pdb
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ meta:
|
|||||||
|
|
||||||
[datanode]
|
[datanode]
|
||||||
[datanode.client]
|
[datanode.client]
|
||||||
timeout = "60s"
|
timeout = "120s"
|
||||||
datanode:
|
datanode:
|
||||||
configData: |-
|
configData: |-
|
||||||
[runtime]
|
[runtime]
|
||||||
@@ -21,7 +21,7 @@ frontend:
|
|||||||
global_rt_size = 4
|
global_rt_size = 4
|
||||||
|
|
||||||
[meta_client]
|
[meta_client]
|
||||||
ddl_timeout = "60s"
|
ddl_timeout = "120s"
|
||||||
objectStorage:
|
objectStorage:
|
||||||
s3:
|
s3:
|
||||||
bucket: default
|
bucket: default
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ meta:
|
|||||||
|
|
||||||
[datanode]
|
[datanode]
|
||||||
[datanode.client]
|
[datanode.client]
|
||||||
timeout = "60s"
|
timeout = "120s"
|
||||||
datanode:
|
datanode:
|
||||||
configData: |-
|
configData: |-
|
||||||
[runtime]
|
[runtime]
|
||||||
@@ -17,7 +17,7 @@ frontend:
|
|||||||
global_rt_size = 4
|
global_rt_size = 4
|
||||||
|
|
||||||
[meta_client]
|
[meta_client]
|
||||||
ddl_timeout = "60s"
|
ddl_timeout = "120s"
|
||||||
objectStorage:
|
objectStorage:
|
||||||
s3:
|
s3:
|
||||||
bucket: default
|
bucket: default
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ meta:
|
|||||||
|
|
||||||
[datanode]
|
[datanode]
|
||||||
[datanode.client]
|
[datanode.client]
|
||||||
timeout = "60s"
|
timeout = "120s"
|
||||||
datanode:
|
datanode:
|
||||||
configData: |-
|
configData: |-
|
||||||
[runtime]
|
[runtime]
|
||||||
@@ -28,7 +28,7 @@ frontend:
|
|||||||
global_rt_size = 4
|
global_rt_size = 4
|
||||||
|
|
||||||
[meta_client]
|
[meta_client]
|
||||||
ddl_timeout = "60s"
|
ddl_timeout = "120s"
|
||||||
objectStorage:
|
objectStorage:
|
||||||
s3:
|
s3:
|
||||||
bucket: default
|
bucket: default
|
||||||
|
|||||||
@@ -18,6 +18,8 @@ runs:
|
|||||||
--set controller.replicaCount=${{ inputs.controller-replicas }} \
|
--set controller.replicaCount=${{ inputs.controller-replicas }} \
|
||||||
--set controller.resources.requests.cpu=50m \
|
--set controller.resources.requests.cpu=50m \
|
||||||
--set controller.resources.requests.memory=128Mi \
|
--set controller.resources.requests.memory=128Mi \
|
||||||
|
--set controller.resources.limits.cpu=2000m \
|
||||||
|
--set controller.resources.limits.memory=2Gi \
|
||||||
--set listeners.controller.protocol=PLAINTEXT \
|
--set listeners.controller.protocol=PLAINTEXT \
|
||||||
--set listeners.client.protocol=PLAINTEXT \
|
--set listeners.client.protocol=PLAINTEXT \
|
||||||
--create-namespace \
|
--create-namespace \
|
||||||
|
|||||||
14
.github/actions/upload-artifacts/action.yml
vendored
14
.github/actions/upload-artifacts/action.yml
vendored
@@ -4,8 +4,8 @@ inputs:
|
|||||||
artifacts-dir:
|
artifacts-dir:
|
||||||
description: Directory to store artifacts
|
description: Directory to store artifacts
|
||||||
required: true
|
required: true
|
||||||
target-file:
|
target-files:
|
||||||
description: The path of the target artifact
|
description: The multiple target files to upload, separated by comma
|
||||||
required: false
|
required: false
|
||||||
version:
|
version:
|
||||||
description: Version of the artifact
|
description: Version of the artifact
|
||||||
@@ -18,12 +18,16 @@ runs:
|
|||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- name: Create artifacts directory
|
- name: Create artifacts directory
|
||||||
if: ${{ inputs.target-file != '' }}
|
if: ${{ inputs.target-files != '' }}
|
||||||
working-directory: ${{ inputs.working-dir }}
|
working-directory: ${{ inputs.working-dir }}
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
mkdir -p ${{ inputs.artifacts-dir }} && \
|
set -e
|
||||||
cp ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
|
mkdir -p ${{ inputs.artifacts-dir }}
|
||||||
|
IFS=',' read -ra FILES <<< "${{ inputs.target-files }}"
|
||||||
|
for file in "${FILES[@]}"; do
|
||||||
|
cp "$file" ${{ inputs.artifacts-dir }}/
|
||||||
|
done
|
||||||
|
|
||||||
# The compressed artifacts will use the following layout:
|
# The compressed artifacts will use the following layout:
|
||||||
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
||||||
|
|||||||
1
.github/cargo-blacklist.txt
vendored
1
.github/cargo-blacklist.txt
vendored
@@ -1,2 +1,3 @@
|
|||||||
native-tls
|
native-tls
|
||||||
openssl
|
openssl
|
||||||
|
aws-lc-sys
|
||||||
|
|||||||
10
.github/pull_request_template.md
vendored
10
.github/pull_request_template.md
vendored
@@ -4,7 +4,8 @@ I hereby agree to the terms of the [GreptimeDB CLA](https://github.com/GreptimeT
|
|||||||
|
|
||||||
## What's changed and what's your intention?
|
## What's changed and what's your intention?
|
||||||
|
|
||||||
__!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__
|
<!--
|
||||||
|
__!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__
|
||||||
|
|
||||||
Please explain IN DETAIL what the changes are in this PR and why they are needed:
|
Please explain IN DETAIL what the changes are in this PR and why they are needed:
|
||||||
|
|
||||||
@@ -12,9 +13,14 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
|
|||||||
- How does this PR work? Need a brief introduction for the changed logic (optional)
|
- How does this PR work? Need a brief introduction for the changed logic (optional)
|
||||||
- Describe clearly one logical change and avoid lazy messages (optional)
|
- Describe clearly one logical change and avoid lazy messages (optional)
|
||||||
- Describe any limitations of the current code (optional)
|
- Describe any limitations of the current code (optional)
|
||||||
|
- Describe if this PR will break **API or data compatibility** (optional)
|
||||||
|
-->
|
||||||
|
|
||||||
## Checklist
|
## PR Checklist
|
||||||
|
Please convert it to a draft if some of the following conditions are not met.
|
||||||
|
|
||||||
- [ ] I have written the necessary rustdoc comments.
|
- [ ] I have written the necessary rustdoc comments.
|
||||||
- [ ] I have added the necessary unit tests and integration tests.
|
- [ ] I have added the necessary unit tests and integration tests.
|
||||||
- [ ] This PR requires documentation updates.
|
- [ ] This PR requires documentation updates.
|
||||||
|
- [ ] API changes are backward compatible.
|
||||||
|
- [ ] Schema or data changes are backward compatible.
|
||||||
|
|||||||
11
.github/workflows/develop.yml
vendored
11
.github/workflows/develop.yml
vendored
@@ -269,13 +269,6 @@ jobs:
|
|||||||
- name: Install cargo-gc-bin
|
- name: Install cargo-gc-bin
|
||||||
shell: bash
|
shell: bash
|
||||||
run: cargo install cargo-gc-bin
|
run: cargo install cargo-gc-bin
|
||||||
- name: Check aws-lc-sys will not build
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
if cargo tree -i aws-lc-sys -e features | grep -q aws-lc-sys; then
|
|
||||||
echo "Found aws-lc-sys, which has compilation problems on older gcc versions. Please replace it with ring until its building experience improves."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
- name: Build greptime bianry
|
- name: Build greptime bianry
|
||||||
shell: bash
|
shell: bash
|
||||||
# `cargo gc` will invoke `cargo build` with specified args
|
# `cargo gc` will invoke `cargo build` with specified args
|
||||||
@@ -330,8 +323,6 @@ jobs:
|
|||||||
uses: ./.github/actions/setup-kafka-cluster
|
uses: ./.github/actions/setup-kafka-cluster
|
||||||
- name: Setup Etcd cluser
|
- name: Setup Etcd cluser
|
||||||
uses: ./.github/actions/setup-etcd-cluster
|
uses: ./.github/actions/setup-etcd-cluster
|
||||||
- name: Setup Postgres cluser
|
|
||||||
uses: ./.github/actions/setup-postgres-cluster
|
|
||||||
# Prepares for fuzz tests
|
# Prepares for fuzz tests
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
@@ -481,8 +472,6 @@ jobs:
|
|||||||
uses: ./.github/actions/setup-kafka-cluster
|
uses: ./.github/actions/setup-kafka-cluster
|
||||||
- name: Setup Etcd cluser
|
- name: Setup Etcd cluser
|
||||||
uses: ./.github/actions/setup-etcd-cluster
|
uses: ./.github/actions/setup-etcd-cluster
|
||||||
- name: Setup Postgres cluser
|
|
||||||
uses: ./.github/actions/setup-postgres-cluster
|
|
||||||
# Prepares for fuzz tests
|
# Prepares for fuzz tests
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
|
|||||||
4
.github/workflows/nightly-build.yml
vendored
4
.github/workflows/nightly-build.yml
vendored
@@ -12,7 +12,7 @@ on:
|
|||||||
linux_amd64_runner:
|
linux_amd64_runner:
|
||||||
type: choice
|
type: choice
|
||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.2xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-20.04
|
- ubuntu-20.04
|
||||||
- ubuntu-20.04-8-cores
|
- ubuntu-20.04-8-cores
|
||||||
@@ -27,7 +27,7 @@ on:
|
|||||||
linux_arm64_runner:
|
linux_arm64_runner:
|
||||||
type: choice
|
type: choice
|
||||||
description: The runner uses to build linux-arm64 artifacts
|
description: The runner uses to build linux-arm64 artifacts
|
||||||
default: ec2-c6g.2xlarge-arm64
|
default: ec2-c6g.4xlarge-arm64
|
||||||
options:
|
options:
|
||||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||||
|
|||||||
10
.github/workflows/nightly-ci.yml
vendored
10
.github/workflows/nightly-ci.yml
vendored
@@ -114,6 +114,16 @@ jobs:
|
|||||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
|
|
||||||
|
cleanbuild-linux-nix:
|
||||||
|
runs-on: ubuntu-latest-8-cores
|
||||||
|
timeout-minutes: 60
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: cachix/install-nix-action@v27
|
||||||
|
with:
|
||||||
|
nix_path: nixpkgs=channel:nixos-unstable
|
||||||
|
- run: nix-shell --pure --run "cargo build"
|
||||||
|
|
||||||
check-status:
|
check-status:
|
||||||
name: Check status
|
name: Check status
|
||||||
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
||||||
|
|||||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -31,7 +31,7 @@ on:
|
|||||||
linux_arm64_runner:
|
linux_arm64_runner:
|
||||||
type: choice
|
type: choice
|
||||||
description: The runner uses to build linux-arm64 artifacts
|
description: The runner uses to build linux-arm64 artifacts
|
||||||
default: ec2-c6g.4xlarge-arm64
|
default: ec2-c6g.8xlarge-arm64
|
||||||
options:
|
options:
|
||||||
- ubuntu-2204-32-cores-arm
|
- ubuntu-2204-32-cores-arm
|
||||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||||
@@ -91,7 +91,7 @@ env:
|
|||||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||||
NIGHTLY_RELEASE_PREFIX: nightly
|
NIGHTLY_RELEASE_PREFIX: nightly
|
||||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||||
NEXT_RELEASE_VERSION: v0.11.0
|
NEXT_RELEASE_VERSION: v0.12.0
|
||||||
|
|
||||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||||
permissions:
|
permissions:
|
||||||
|
|||||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -47,6 +47,10 @@ benchmarks/data
|
|||||||
|
|
||||||
venv/
|
venv/
|
||||||
|
|
||||||
# Fuzz tests
|
# Fuzz tests
|
||||||
tests-fuzz/artifacts/
|
tests-fuzz/artifacts/
|
||||||
tests-fuzz/corpus/
|
tests-fuzz/corpus/
|
||||||
|
|
||||||
|
# Nix
|
||||||
|
.direnv
|
||||||
|
.envrc
|
||||||
|
|||||||
620
Cargo.lock
generated
620
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -68,7 +68,7 @@ members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.11.0"
|
version = "0.12.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
@@ -180,6 +180,7 @@ sysinfo = "0.30"
|
|||||||
# on branch v0.44.x
|
# on branch v0.44.x
|
||||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
|
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
|
||||||
"visitor",
|
"visitor",
|
||||||
|
"serde",
|
||||||
] }
|
] }
|
||||||
strum = { version = "0.25", features = ["derive"] }
|
strum = { version = "0.25", features = ["derive"] }
|
||||||
tempfile = "3"
|
tempfile = "3"
|
||||||
@@ -237,6 +238,7 @@ file-engine = { path = "src/file-engine" }
|
|||||||
flow = { path = "src/flow" }
|
flow = { path = "src/flow" }
|
||||||
frontend = { path = "src/frontend", default-features = false }
|
frontend = { path = "src/frontend", default-features = false }
|
||||||
index = { path = "src/index" }
|
index = { path = "src/index" }
|
||||||
|
log-query = { path = "src/log-query" }
|
||||||
log-store = { path = "src/log-store" }
|
log-store = { path = "src/log-store" }
|
||||||
meta-client = { path = "src/meta-client" }
|
meta-client = { path = "src/meta-client" }
|
||||||
meta-srv = { path = "src/meta-srv" }
|
meta-srv = { path = "src/meta-srv" }
|
||||||
|
|||||||
18
README.md
18
README.md
@@ -70,23 +70,23 @@ Our core developers have been building time-series data platforms for years. Bas
|
|||||||
|
|
||||||
* **Unified Processing of Metrics, Logs, and Events**
|
* **Unified Processing of Metrics, Logs, and Events**
|
||||||
|
|
||||||
GreptimeDB unifies time series data processing by treating all data - whether metrics, logs, or events - as timestamped events with context. Users can analyze this data using either [SQL](https://docs.greptime.com/user-guide/query-data/sql) or [PromQL](https://docs.greptime.com/user-guide/query-data/promql) and leverage stream processing ([Flow](https://docs.greptime.com/user-guide/continuous-aggregation/overview)) to enable continuous aggregation. [Read more](https://docs.greptime.com/user-guide/concepts/data-model).
|
GreptimeDB unifies time series data processing by treating all data - whether metrics, logs, or events - as timestamped events with context. Users can analyze this data using either [SQL](https://docs.greptime.com/user-guide/query-data/sql) or [PromQL](https://docs.greptime.com/user-guide/query-data/promql) and leverage stream processing ([Flow](https://docs.greptime.com/user-guide/flow-computation/overview)) to enable continuous aggregation. [Read more](https://docs.greptime.com/user-guide/concepts/data-model).
|
||||||
|
|
||||||
* **Cloud-native Distributed Database**
|
* **Cloud-native Distributed Database**
|
||||||
|
|
||||||
Built for [Kubernetes](https://docs.greptime.com/user-guide/deployments/deploy-on-kubernetes/greptimedb-operator-management). GreptimeDB achieves seamless scalability with its [cloud-native architecture](https://docs.greptime.com/user-guide/concepts/architecture) of separated compute and storage, built on object storage (AWS S3, Azure Blob Storage, etc.) while enabling cross-cloud deployment through a unified data access layer.
|
Built for [Kubernetes](https://docs.greptime.com/user-guide/deployments/deploy-on-kubernetes/greptimedb-operator-management). GreptimeDB achieves seamless scalability with its [cloud-native architecture](https://docs.greptime.com/user-guide/concepts/architecture) of separated compute and storage, built on object storage (AWS S3, Azure Blob Storage, etc.) while enabling cross-cloud deployment through a unified data access layer.
|
||||||
|
|
||||||
* **Performance and Cost-effective**
|
* **Performance and Cost-effective**
|
||||||
|
|
||||||
Written in pure Rust for superior performance and reliability. GreptimeDB features a distributed query engine with intelligent indexing to handle high cardinality data efficiently. Its optimized columnar storage achieves 50x cost efficiency on cloud object storage through advanced compression. [Benchmark reports](https://www.greptime.com/blogs/2024-09-09-report-summary).
|
Written in pure Rust for superior performance and reliability. GreptimeDB features a distributed query engine with intelligent indexing to handle high cardinality data efficiently. Its optimized columnar storage achieves 50x cost efficiency on cloud object storage through advanced compression. [Benchmark reports](https://www.greptime.com/blogs/2024-09-09-report-summary).
|
||||||
|
|
||||||
* **Cloud-Edge Collaboration**
|
* **Cloud-Edge Collaboration**
|
||||||
|
|
||||||
GreptimeDB seamlessly operates across cloud and edge (ARM/Android/Linux), providing consistent APIs and control plane for unified data management and efficient synchronization. [Learn how to run on Android](https://docs.greptime.com/user-guide/deployments/run-on-android/).
|
GreptimeDB seamlessly operates across cloud and edge (ARM/Android/Linux), providing consistent APIs and control plane for unified data management and efficient synchronization. [Learn how to run on Android](https://docs.greptime.com/user-guide/deployments/run-on-android/).
|
||||||
|
|
||||||
* **Multi-protocol Ingestion, SQL & PromQL Ready**
|
* **Multi-protocol Ingestion, SQL & PromQL Ready**
|
||||||
|
|
||||||
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, InfluxDB, OpenTelemetry, Loki and Prometheus, etc. Effortless Adoption & Seamless Migration. [Supported Protocols Overview](https://docs.greptime.com/user-guide/protocols/overview).
|
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, InfluxDB, OpenTelemetry, Loki and Prometheus, etc. Effortless Adoption & Seamless Migration. [Supported Protocols Overview](https://docs.greptime.com/user-guide/protocols/overview).
|
||||||
|
|
||||||
For more detailed info please read [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb).
|
For more detailed info please read [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb).
|
||||||
|
|
||||||
@@ -138,7 +138,7 @@ Check the prerequisite:
|
|||||||
|
|
||||||
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
||||||
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
||||||
* Python toolchain (optional): Required only if built with PyO3 backend. More detail for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
|
* Python toolchain (optional): Required only if built with PyO3 backend. More details for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
|
||||||
|
|
||||||
Build GreptimeDB binary:
|
Build GreptimeDB binary:
|
||||||
|
|
||||||
@@ -154,6 +154,10 @@ cargo run -- standalone start
|
|||||||
|
|
||||||
## Tools & Extensions
|
## Tools & Extensions
|
||||||
|
|
||||||
|
### Kubernetes
|
||||||
|
|
||||||
|
- [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
|
||||||
|
|
||||||
### Dashboard
|
### Dashboard
|
||||||
|
|
||||||
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
||||||
@@ -173,7 +177,7 @@ Our official Grafana dashboard for monitoring GreptimeDB is available at [grafan
|
|||||||
|
|
||||||
## Project Status
|
## Project Status
|
||||||
|
|
||||||
GreptimeDB is currently in Beta. We are targeting GA (General Availability) with v1.0 release by Early 2025.
|
GreptimeDB is currently in Beta. We are targeting GA (General Availability) with v1.0 release by Early 2025.
|
||||||
|
|
||||||
While in Beta, GreptimeDB is already:
|
While in Beta, GreptimeDB is already:
|
||||||
|
|
||||||
|
|||||||
@@ -13,11 +13,11 @@
|
|||||||
| Key | Type | Default | Descriptions |
|
| Key | Type | Default | Descriptions |
|
||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
|
||||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
| `default_timezone` | String | Unset | The default timezone of the server. |
|
||||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
||||||
|
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||||
| `runtime` | -- | -- | The runtime options. |
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||||
@@ -61,9 +61,9 @@
|
|||||||
| `wal` | -- | -- | The WAL options. |
|
| `wal` | -- | -- | The WAL options. |
|
||||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.purge_interval` | String | `1m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
@@ -150,6 +150,7 @@
|
|||||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||||
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||||
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||||
|
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `8MiB` | Page size for inverted index content cache. |
|
||||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
@@ -286,12 +287,12 @@
|
|||||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
|
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
|
||||||
| `store_addrs` | Array | -- | Store server address default to etcd store. |
|
| `store_addrs` | Array | -- | Store server address default to etcd store. |
|
||||||
|
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||||
|
| `backend` | String | `EtcdStore` | The datastore for meta server. |
|
||||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
|
|
||||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
|
||||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||||
| `backend` | String | `EtcdStore` | The datastore for meta server. |
|
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||||
| `runtime` | -- | -- | The runtime options. |
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||||
@@ -356,7 +357,6 @@
|
|||||||
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
|
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
|
||||||
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
||||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
|
||||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
||||||
| `rpc_addr` | String | Unset | Deprecated, use `grpc.addr` instead. |
|
| `rpc_addr` | String | Unset | Deprecated, use `grpc.addr` instead. |
|
||||||
@@ -364,6 +364,7 @@
|
|||||||
| `rpc_runtime_size` | Integer | Unset | Deprecated, use `grpc.runtime_size` instead. |
|
| `rpc_runtime_size` | Integer | Unset | Deprecated, use `grpc.runtime_size` instead. |
|
||||||
| `rpc_max_recv_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
|
| `rpc_max_recv_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
|
||||||
| `rpc_max_send_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
|
| `rpc_max_send_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
|
||||||
|
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||||
| `http` | -- | -- | The HTTP server options. |
|
| `http` | -- | -- | The HTTP server options. |
|
||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||||
@@ -398,9 +399,9 @@
|
|||||||
| `wal` | -- | -- | The WAL options. |
|
| `wal` | -- | -- | The WAL options. |
|
||||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.purge_interval` | String | `1m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
@@ -475,6 +476,9 @@
|
|||||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||||
|
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||||
|
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||||
|
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `8MiB` | Page size for inverted index content cache. |
|
||||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
|
|||||||
@@ -13,9 +13,6 @@ require_lease_before_startup = false
|
|||||||
## By default, it provides services after all regions have been initialized.
|
## By default, it provides services after all regions have been initialized.
|
||||||
init_regions_in_background = false
|
init_regions_in_background = false
|
||||||
|
|
||||||
## Enable telemetry to collect anonymous usage data.
|
|
||||||
enable_telemetry = true
|
|
||||||
|
|
||||||
## Parallelism of initializing regions.
|
## Parallelism of initializing regions.
|
||||||
init_regions_parallelism = 16
|
init_regions_parallelism = 16
|
||||||
|
|
||||||
@@ -42,6 +39,8 @@ rpc_max_recv_message_size = "512MB"
|
|||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
rpc_max_send_message_size = "512MB"
|
rpc_max_send_message_size = "512MB"
|
||||||
|
|
||||||
|
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
||||||
|
#+ enable_telemetry = true
|
||||||
|
|
||||||
## The HTTP server options.
|
## The HTTP server options.
|
||||||
[http]
|
[http]
|
||||||
@@ -143,15 +142,15 @@ dir = "/tmp/greptimedb/wal"
|
|||||||
|
|
||||||
## The size of the WAL segment file.
|
## The size of the WAL segment file.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
file_size = "256MB"
|
file_size = "128MB"
|
||||||
|
|
||||||
## The threshold of the WAL size to trigger a flush.
|
## The threshold of the WAL size to trigger a flush.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_threshold = "4GB"
|
purge_threshold = "1GB"
|
||||||
|
|
||||||
## The interval to trigger a flush.
|
## The interval to trigger a flush.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_interval = "10m"
|
purge_interval = "1m"
|
||||||
|
|
||||||
## The read batch size.
|
## The read batch size.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
@@ -544,6 +543,15 @@ mem_threshold_on_create = "auto"
|
|||||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||||
intermediate_path = ""
|
intermediate_path = ""
|
||||||
|
|
||||||
|
## Cache size for inverted index metadata.
|
||||||
|
metadata_cache_size = "64MiB"
|
||||||
|
|
||||||
|
## Cache size for inverted index content.
|
||||||
|
content_cache_size = "128MiB"
|
||||||
|
|
||||||
|
## Page size for inverted index content cache.
|
||||||
|
content_cache_page_size = "8MiB"
|
||||||
|
|
||||||
## The options for full-text index in Mito engine.
|
## The options for full-text index in Mito engine.
|
||||||
[region_engine.mito.fulltext_index]
|
[region_engine.mito.fulltext_index]
|
||||||
|
|
||||||
|
|||||||
@@ -10,6 +10,12 @@ server_addr = "127.0.0.1:3002"
|
|||||||
## Store server address default to etcd store.
|
## Store server address default to etcd store.
|
||||||
store_addrs = ["127.0.0.1:2379"]
|
store_addrs = ["127.0.0.1:2379"]
|
||||||
|
|
||||||
|
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||||
|
store_key_prefix = ""
|
||||||
|
|
||||||
|
## The datastore for meta server.
|
||||||
|
backend = "EtcdStore"
|
||||||
|
|
||||||
## Datanode selector type.
|
## Datanode selector type.
|
||||||
## - `round_robin` (default value)
|
## - `round_robin` (default value)
|
||||||
## - `lease_based`
|
## - `lease_based`
|
||||||
@@ -20,20 +26,14 @@ selector = "round_robin"
|
|||||||
## Store data in memory.
|
## Store data in memory.
|
||||||
use_memory_store = false
|
use_memory_store = false
|
||||||
|
|
||||||
## Whether to enable greptimedb telemetry.
|
|
||||||
enable_telemetry = true
|
|
||||||
|
|
||||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
|
||||||
store_key_prefix = ""
|
|
||||||
|
|
||||||
## Whether to enable region failover.
|
## Whether to enable region failover.
|
||||||
## This feature is only available on GreptimeDB running on cluster mode and
|
## This feature is only available on GreptimeDB running on cluster mode and
|
||||||
## - Using Remote WAL
|
## - Using Remote WAL
|
||||||
## - Using shared storage (e.g., s3).
|
## - Using shared storage (e.g., s3).
|
||||||
enable_region_failover = false
|
enable_region_failover = false
|
||||||
|
|
||||||
## The datastore for meta server.
|
## Whether to enable greptimedb telemetry. Enabled by default.
|
||||||
backend = "EtcdStore"
|
#+ enable_telemetry = true
|
||||||
|
|
||||||
## The runtime options.
|
## The runtime options.
|
||||||
#+ [runtime]
|
#+ [runtime]
|
||||||
|
|||||||
@@ -1,9 +1,6 @@
|
|||||||
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||||
mode = "standalone"
|
mode = "standalone"
|
||||||
|
|
||||||
## Enable telemetry to collect anonymous usage data.
|
|
||||||
enable_telemetry = true
|
|
||||||
|
|
||||||
## The default timezone of the server.
|
## The default timezone of the server.
|
||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
default_timezone = "UTC"
|
default_timezone = "UTC"
|
||||||
@@ -18,6 +15,9 @@ init_regions_parallelism = 16
|
|||||||
## The maximum current queries allowed to be executed. Zero means unlimited.
|
## The maximum current queries allowed to be executed. Zero means unlimited.
|
||||||
max_concurrent_queries = 0
|
max_concurrent_queries = 0
|
||||||
|
|
||||||
|
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
||||||
|
#+ enable_telemetry = true
|
||||||
|
|
||||||
## The runtime options.
|
## The runtime options.
|
||||||
#+ [runtime]
|
#+ [runtime]
|
||||||
## The number of threads to execute the runtime for global read operations.
|
## The number of threads to execute the runtime for global read operations.
|
||||||
@@ -147,15 +147,15 @@ dir = "/tmp/greptimedb/wal"
|
|||||||
|
|
||||||
## The size of the WAL segment file.
|
## The size of the WAL segment file.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
file_size = "256MB"
|
file_size = "128MB"
|
||||||
|
|
||||||
## The threshold of the WAL size to trigger a flush.
|
## The threshold of the WAL size to trigger a flush.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_threshold = "4GB"
|
purge_threshold = "1GB"
|
||||||
|
|
||||||
## The interval to trigger a flush.
|
## The interval to trigger a flush.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_interval = "10m"
|
purge_interval = "1m"
|
||||||
|
|
||||||
## The read batch size.
|
## The read batch size.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
@@ -588,6 +588,9 @@ metadata_cache_size = "64MiB"
|
|||||||
## Cache size for inverted index content.
|
## Cache size for inverted index content.
|
||||||
content_cache_size = "128MiB"
|
content_cache_size = "128MiB"
|
||||||
|
|
||||||
|
## Page size for inverted index content cache.
|
||||||
|
content_cache_page_size = "8MiB"
|
||||||
|
|
||||||
## The options for full-text index in Mito engine.
|
## The options for full-text index in Mito engine.
|
||||||
[region_engine.mito.fulltext_index]
|
[region_engine.mito.fulltext_index]
|
||||||
|
|
||||||
|
|||||||
@@ -15,8 +15,8 @@ RUN apt-get update && \
|
|||||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
tzdata \
|
tzdata \
|
||||||
protobuf-compiler \
|
|
||||||
curl \
|
curl \
|
||||||
|
unzip \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
git \
|
git \
|
||||||
build-essential \
|
build-essential \
|
||||||
@@ -24,6 +24,20 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
|||||||
python3.10 \
|
python3.10 \
|
||||||
python3.10-dev
|
python3.10-dev
|
||||||
|
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
RUN echo "target platform: $TARGETPLATFORM"
|
||||||
|
|
||||||
|
# Install protobuf, because the one in the apt is too old (v3.12).
|
||||||
|
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
||||||
|
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v29.1/protoc-29.1-linux-aarch_64.zip && \
|
||||||
|
unzip protoc-29.1-linux-aarch_64.zip -d protoc3; \
|
||||||
|
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||||
|
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v29.1/protoc-29.1-linux-x86_64.zip && \
|
||||||
|
unzip protoc-29.1-linux-x86_64.zip -d protoc3; \
|
||||||
|
fi
|
||||||
|
RUN mv protoc3/bin/* /usr/local/bin/
|
||||||
|
RUN mv protoc3/include/* /usr/local/include/
|
||||||
|
|
||||||
# https://github.com/GreptimeTeam/greptimedb/actions/runs/10935485852/job/30357457188#step:3:7106
|
# https://github.com/GreptimeTeam/greptimedb/actions/runs/10935485852/job/30357457188#step:3:7106
|
||||||
# `aws-lc-sys` require gcc >= 10.3.0 to work, hence alias to use gcc-10
|
# `aws-lc-sys` require gcc >= 10.3.0 to work, hence alias to use gcc-10
|
||||||
RUN apt-get remove -y gcc-9 g++-9 cpp-9 && \
|
RUN apt-get remove -y gcc-9 g++-9 cpp-9 && \
|
||||||
@@ -49,7 +63,7 @@ RUN apt-get -y purge python3.8 && \
|
|||||||
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
|
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
|
||||||
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
|
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
|
||||||
# it can be a different user that have prepared the submodules.
|
# it can be a different user that have prepared the submodules.
|
||||||
RUN git config --global --add safe.directory *
|
RUN git config --global --add safe.directory '*'
|
||||||
|
|
||||||
# Install Python dependencies.
|
# Install Python dependencies.
|
||||||
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
|
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,2 +1,3 @@
|
|||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "nightly-2024-10-19"
|
channel = "nightly-2024-10-19"
|
||||||
|
components = ["rust-analyzer"]
|
||||||
|
|||||||
@@ -58,8 +58,10 @@ def main():
|
|||||||
if not check_snafu_in_files(branch_name, other_rust_files)
|
if not check_snafu_in_files(branch_name, other_rust_files)
|
||||||
]
|
]
|
||||||
|
|
||||||
for name in unused_snafu:
|
if unused_snafu:
|
||||||
print(name)
|
print("Unused error variants:")
|
||||||
|
for name in unused_snafu:
|
||||||
|
print(name)
|
||||||
|
|
||||||
if unused_snafu:
|
if unused_snafu:
|
||||||
raise SystemExit(1)
|
raise SystemExit(1)
|
||||||
|
|||||||
27
shell.nix
Normal file
27
shell.nix
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
let
|
||||||
|
nixpkgs = fetchTarball "https://github.com/NixOS/nixpkgs/tarball/nixos-unstable";
|
||||||
|
fenix = import (fetchTarball "https://github.com/nix-community/fenix/archive/main.tar.gz") {};
|
||||||
|
pkgs = import nixpkgs { config = {}; overlays = []; };
|
||||||
|
in
|
||||||
|
|
||||||
|
pkgs.mkShell rec {
|
||||||
|
nativeBuildInputs = with pkgs; [
|
||||||
|
pkg-config
|
||||||
|
git
|
||||||
|
clang
|
||||||
|
gcc
|
||||||
|
protobuf
|
||||||
|
mold
|
||||||
|
(fenix.fromToolchainFile {
|
||||||
|
dir = ./.;
|
||||||
|
})
|
||||||
|
cargo-nextest
|
||||||
|
taplo
|
||||||
|
];
|
||||||
|
|
||||||
|
buildInputs = with pkgs; [
|
||||||
|
libgit2
|
||||||
|
];
|
||||||
|
|
||||||
|
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
|
||||||
|
}
|
||||||
@@ -16,7 +16,7 @@ use std::collections::HashMap;
|
|||||||
|
|
||||||
use datatypes::schema::{
|
use datatypes::schema::{
|
||||||
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextOptions, COMMENT_KEY,
|
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextOptions, COMMENT_KEY,
|
||||||
FULLTEXT_KEY, INVERTED_INDEX_KEY,
|
FULLTEXT_KEY, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY,
|
||||||
};
|
};
|
||||||
use greptime_proto::v1::Analyzer;
|
use greptime_proto::v1::Analyzer;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
@@ -29,6 +29,8 @@ use crate::v1::{ColumnDef, ColumnOptions, SemanticType};
|
|||||||
const FULLTEXT_GRPC_KEY: &str = "fulltext";
|
const FULLTEXT_GRPC_KEY: &str = "fulltext";
|
||||||
/// Key used to store inverted index options in gRPC column options.
|
/// Key used to store inverted index options in gRPC column options.
|
||||||
const INVERTED_INDEX_GRPC_KEY: &str = "inverted_index";
|
const INVERTED_INDEX_GRPC_KEY: &str = "inverted_index";
|
||||||
|
/// Key used to store skip index options in gRPC column options.
|
||||||
|
const SKIPPING_INDEX_GRPC_KEY: &str = "skipping_index";
|
||||||
|
|
||||||
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
|
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
|
||||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||||
@@ -60,6 +62,9 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
|||||||
if let Some(inverted_index) = options.options.get(INVERTED_INDEX_GRPC_KEY) {
|
if let Some(inverted_index) = options.options.get(INVERTED_INDEX_GRPC_KEY) {
|
||||||
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.clone());
|
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.clone());
|
||||||
}
|
}
|
||||||
|
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
|
||||||
|
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.clone());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||||
@@ -84,6 +89,11 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
|
|||||||
.options
|
.options
|
||||||
.insert(INVERTED_INDEX_GRPC_KEY.to_string(), inverted_index.clone());
|
.insert(INVERTED_INDEX_GRPC_KEY.to_string(), inverted_index.clone());
|
||||||
}
|
}
|
||||||
|
if let Some(skipping_index) = column_schema.metadata().get(SKIPPING_INDEX_KEY) {
|
||||||
|
options
|
||||||
|
.options
|
||||||
|
.insert(SKIPPING_INDEX_GRPC_KEY.to_string(), skipping_index.clone());
|
||||||
|
}
|
||||||
|
|
||||||
(!options.options.is_empty()).then_some(options)
|
(!options.options.is_empty()).then_some(options)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ pub enum PermissionReq<'a> {
|
|||||||
GrpcRequest(&'a Request),
|
GrpcRequest(&'a Request),
|
||||||
SqlStatement(&'a Statement),
|
SqlStatement(&'a Statement),
|
||||||
PromQuery,
|
PromQuery,
|
||||||
|
LogQuery,
|
||||||
Opentsdb,
|
Opentsdb,
|
||||||
LineProtocol,
|
LineProtocol,
|
||||||
PromStoreWrite,
|
PromStoreWrite,
|
||||||
|
|||||||
1
src/cache/Cargo.toml
vendored
1
src/cache/Cargo.toml
vendored
@@ -11,4 +11,3 @@ common-macro.workspace = true
|
|||||||
common-meta.workspace = true
|
common-meta.workspace = true
|
||||||
moka.workspace = true
|
moka.workspace = true
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
substrait.workspace = true
|
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ async-stream.workspace = true
|
|||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
common-catalog.workspace = true
|
common-catalog.workspace = true
|
||||||
common-config.workspace = true
|
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
common-macro.workspace = true
|
common-macro.workspace = true
|
||||||
common-meta.workspace = true
|
common-meta.workspace = true
|
||||||
@@ -58,7 +57,5 @@ catalog = { workspace = true, features = ["testing"] }
|
|||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
common-meta = { workspace = true, features = ["testing"] }
|
common-meta = { workspace = true, features = ["testing"] }
|
||||||
common-query = { workspace = true, features = ["testing"] }
|
common-query = { workspace = true, features = ["testing"] }
|
||||||
common-test-util.workspace = true
|
|
||||||
log-store.workspace = true
|
|
||||||
object-store.workspace = true
|
object-store.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
|||||||
@@ -64,6 +64,13 @@ pub enum Error {
|
|||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to list flow stats"))]
|
||||||
|
ListFlowStats {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
source: BoxedError,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to list flows in catalog {catalog}"))]
|
#[snafu(display("Failed to list flows in catalog {catalog}"))]
|
||||||
ListFlows {
|
ListFlows {
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
@@ -326,6 +333,7 @@ impl ErrorExt for Error {
|
|||||||
| Error::ListSchemas { source, .. }
|
| Error::ListSchemas { source, .. }
|
||||||
| Error::ListTables { source, .. }
|
| Error::ListTables { source, .. }
|
||||||
| Error::ListFlows { source, .. }
|
| Error::ListFlows { source, .. }
|
||||||
|
| Error::ListFlowStats { source, .. }
|
||||||
| Error::ListProcedures { source, .. }
|
| Error::ListProcedures { source, .. }
|
||||||
| Error::ListRegionStats { source, .. }
|
| Error::ListRegionStats { source, .. }
|
||||||
| Error::ConvertProtoData { source, .. } => source.status_code(),
|
| Error::ConvertProtoData { source, .. } => source.status_code(),
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ use common_error::ext::BoxedError;
|
|||||||
use common_meta::cluster::{ClusterInfo, NodeInfo};
|
use common_meta::cluster::{ClusterInfo, NodeInfo};
|
||||||
use common_meta::datanode::RegionStat;
|
use common_meta::datanode::RegionStat;
|
||||||
use common_meta::ddl::{ExecutorContext, ProcedureExecutor};
|
use common_meta::ddl::{ExecutorContext, ProcedureExecutor};
|
||||||
|
use common_meta::key::flow::flow_state::FlowStat;
|
||||||
use common_meta::rpc::procedure;
|
use common_meta::rpc::procedure;
|
||||||
use common_procedure::{ProcedureInfo, ProcedureState};
|
use common_procedure::{ProcedureInfo, ProcedureState};
|
||||||
use meta_client::MetaClientRef;
|
use meta_client::MetaClientRef;
|
||||||
@@ -89,4 +90,12 @@ impl InformationExtension for DistributedInformationExtension {
|
|||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
.context(error::ListRegionStatsSnafu)
|
.context(error::ListRegionStatsSnafu)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
|
||||||
|
self.meta_client
|
||||||
|
.list_flow_stats()
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(crate::error::ListFlowStatsSnafu)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ pub fn new_table_cache(
|
|||||||
) -> TableCache {
|
) -> TableCache {
|
||||||
let init = init_factory(table_info_cache, table_name_cache);
|
let init = init_factory(table_info_cache, table_name_cache);
|
||||||
|
|
||||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init_factory(
|
fn init_factory(
|
||||||
|
|||||||
@@ -35,6 +35,7 @@ use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME
|
|||||||
use common_error::ext::ErrorExt;
|
use common_error::ext::ErrorExt;
|
||||||
use common_meta::cluster::NodeInfo;
|
use common_meta::cluster::NodeInfo;
|
||||||
use common_meta::datanode::RegionStat;
|
use common_meta::datanode::RegionStat;
|
||||||
|
use common_meta::key::flow::flow_state::FlowStat;
|
||||||
use common_meta::key::flow::FlowMetadataManager;
|
use common_meta::key::flow::FlowMetadataManager;
|
||||||
use common_procedure::ProcedureInfo;
|
use common_procedure::ProcedureInfo;
|
||||||
use common_recordbatch::SendableRecordBatchStream;
|
use common_recordbatch::SendableRecordBatchStream;
|
||||||
@@ -192,6 +193,7 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
|||||||
)) as _),
|
)) as _),
|
||||||
FLOWS => Some(Arc::new(InformationSchemaFlows::new(
|
FLOWS => Some(Arc::new(InformationSchemaFlows::new(
|
||||||
self.catalog_name.clone(),
|
self.catalog_name.clone(),
|
||||||
|
self.catalog_manager.clone(),
|
||||||
self.flow_metadata_manager.clone(),
|
self.flow_metadata_manager.clone(),
|
||||||
)) as _),
|
)) as _),
|
||||||
PROCEDURE_INFO => Some(
|
PROCEDURE_INFO => Some(
|
||||||
@@ -338,6 +340,9 @@ pub trait InformationExtension {
|
|||||||
|
|
||||||
/// Gets the region statistics.
|
/// Gets the region statistics.
|
||||||
async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error>;
|
async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error>;
|
||||||
|
|
||||||
|
/// Get the flow statistics. If no flownode is available, return `None`.
|
||||||
|
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct NoopInformationExtension;
|
pub struct NoopInformationExtension;
|
||||||
@@ -357,4 +362,8 @@ impl InformationExtension for NoopInformationExtension {
|
|||||||
async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error> {
|
async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error> {
|
||||||
Ok(vec![])
|
Ok(vec![])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,11 +12,12 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
use common_catalog::consts::INFORMATION_SCHEMA_FLOW_TABLE_ID;
|
use common_catalog::consts::INFORMATION_SCHEMA_FLOW_TABLE_ID;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::key::flow::flow_info::FlowInfoValue;
|
use common_meta::key::flow::flow_info::FlowInfoValue;
|
||||||
|
use common_meta::key::flow::flow_state::FlowStat;
|
||||||
use common_meta::key::flow::FlowMetadataManager;
|
use common_meta::key::flow::FlowMetadataManager;
|
||||||
use common_meta::key::FlowId;
|
use common_meta::key::FlowId;
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
@@ -28,7 +29,9 @@ use datatypes::prelude::ConcreteDataType as CDT;
|
|||||||
use datatypes::scalars::ScalarVectorBuilder;
|
use datatypes::scalars::ScalarVectorBuilder;
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
use datatypes::vectors::{Int64VectorBuilder, StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
use datatypes::vectors::{
|
||||||
|
Int64VectorBuilder, StringVectorBuilder, UInt32VectorBuilder, UInt64VectorBuilder, VectorRef,
|
||||||
|
};
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
@@ -38,6 +41,8 @@ use crate::error::{
|
|||||||
};
|
};
|
||||||
use crate::information_schema::{Predicates, FLOWS};
|
use crate::information_schema::{Predicates, FLOWS};
|
||||||
use crate::system_schema::information_schema::InformationTable;
|
use crate::system_schema::information_schema::InformationTable;
|
||||||
|
use crate::system_schema::utils;
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
const INIT_CAPACITY: usize = 42;
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
@@ -45,6 +50,7 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
// pk is (flow_name, flow_id, table_catalog)
|
// pk is (flow_name, flow_id, table_catalog)
|
||||||
pub const FLOW_NAME: &str = "flow_name";
|
pub const FLOW_NAME: &str = "flow_name";
|
||||||
pub const FLOW_ID: &str = "flow_id";
|
pub const FLOW_ID: &str = "flow_id";
|
||||||
|
pub const STATE_SIZE: &str = "state_size";
|
||||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||||
pub const FLOW_DEFINITION: &str = "flow_definition";
|
pub const FLOW_DEFINITION: &str = "flow_definition";
|
||||||
pub const COMMENT: &str = "comment";
|
pub const COMMENT: &str = "comment";
|
||||||
@@ -55,20 +61,24 @@ pub const FLOWNODE_IDS: &str = "flownode_ids";
|
|||||||
pub const OPTIONS: &str = "options";
|
pub const OPTIONS: &str = "options";
|
||||||
|
|
||||||
/// The `information_schema.flows` to provides information about flows in databases.
|
/// The `information_schema.flows` to provides information about flows in databases.
|
||||||
|
///
|
||||||
pub(super) struct InformationSchemaFlows {
|
pub(super) struct InformationSchemaFlows {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl InformationSchemaFlows {
|
impl InformationSchemaFlows {
|
||||||
pub(super) fn new(
|
pub(super) fn new(
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
schema: Self::schema(),
|
schema: Self::schema(),
|
||||||
catalog_name,
|
catalog_name,
|
||||||
|
catalog_manager,
|
||||||
flow_metadata_manager,
|
flow_metadata_manager,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -80,6 +90,7 @@ impl InformationSchemaFlows {
|
|||||||
vec![
|
vec![
|
||||||
(FLOW_NAME, CDT::string_datatype(), false),
|
(FLOW_NAME, CDT::string_datatype(), false),
|
||||||
(FLOW_ID, CDT::uint32_datatype(), false),
|
(FLOW_ID, CDT::uint32_datatype(), false),
|
||||||
|
(STATE_SIZE, CDT::uint64_datatype(), true),
|
||||||
(TABLE_CATALOG, CDT::string_datatype(), false),
|
(TABLE_CATALOG, CDT::string_datatype(), false),
|
||||||
(FLOW_DEFINITION, CDT::string_datatype(), false),
|
(FLOW_DEFINITION, CDT::string_datatype(), false),
|
||||||
(COMMENT, CDT::string_datatype(), true),
|
(COMMENT, CDT::string_datatype(), true),
|
||||||
@@ -99,6 +110,7 @@ impl InformationSchemaFlows {
|
|||||||
InformationSchemaFlowsBuilder::new(
|
InformationSchemaFlowsBuilder::new(
|
||||||
self.schema.clone(),
|
self.schema.clone(),
|
||||||
self.catalog_name.clone(),
|
self.catalog_name.clone(),
|
||||||
|
self.catalog_manager.clone(),
|
||||||
&self.flow_metadata_manager,
|
&self.flow_metadata_manager,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -144,10 +156,12 @@ impl InformationTable for InformationSchemaFlows {
|
|||||||
struct InformationSchemaFlowsBuilder {
|
struct InformationSchemaFlowsBuilder {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||||
|
|
||||||
flow_names: StringVectorBuilder,
|
flow_names: StringVectorBuilder,
|
||||||
flow_ids: UInt32VectorBuilder,
|
flow_ids: UInt32VectorBuilder,
|
||||||
|
state_sizes: UInt64VectorBuilder,
|
||||||
table_catalogs: StringVectorBuilder,
|
table_catalogs: StringVectorBuilder,
|
||||||
raw_sqls: StringVectorBuilder,
|
raw_sqls: StringVectorBuilder,
|
||||||
comments: StringVectorBuilder,
|
comments: StringVectorBuilder,
|
||||||
@@ -162,15 +176,18 @@ impl InformationSchemaFlowsBuilder {
|
|||||||
fn new(
|
fn new(
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
flow_metadata_manager: &Arc<FlowMetadataManager>,
|
flow_metadata_manager: &Arc<FlowMetadataManager>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
schema,
|
schema,
|
||||||
catalog_name,
|
catalog_name,
|
||||||
|
catalog_manager,
|
||||||
flow_metadata_manager: flow_metadata_manager.clone(),
|
flow_metadata_manager: flow_metadata_manager.clone(),
|
||||||
|
|
||||||
flow_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
flow_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
flow_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
flow_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
state_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
table_catalogs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
table_catalogs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
raw_sqls: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
raw_sqls: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
comments: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
comments: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
@@ -195,6 +212,11 @@ impl InformationSchemaFlowsBuilder {
|
|||||||
.flow_names(&catalog_name)
|
.flow_names(&catalog_name)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
let flow_stat = {
|
||||||
|
let information_extension = utils::information_extension(&self.catalog_manager)?;
|
||||||
|
information_extension.flow_stats().await?
|
||||||
|
};
|
||||||
|
|
||||||
while let Some((flow_name, flow_id)) = stream
|
while let Some((flow_name, flow_id)) = stream
|
||||||
.try_next()
|
.try_next()
|
||||||
.await
|
.await
|
||||||
@@ -213,7 +235,7 @@ impl InformationSchemaFlowsBuilder {
|
|||||||
catalog_name: catalog_name.to_string(),
|
catalog_name: catalog_name.to_string(),
|
||||||
flow_name: flow_name.to_string(),
|
flow_name: flow_name.to_string(),
|
||||||
})?;
|
})?;
|
||||||
self.add_flow(&predicates, flow_id.flow_id(), flow_info)?;
|
self.add_flow(&predicates, flow_id.flow_id(), flow_info, &flow_stat)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.finish()
|
self.finish()
|
||||||
@@ -224,6 +246,7 @@ impl InformationSchemaFlowsBuilder {
|
|||||||
predicates: &Predicates,
|
predicates: &Predicates,
|
||||||
flow_id: FlowId,
|
flow_id: FlowId,
|
||||||
flow_info: FlowInfoValue,
|
flow_info: FlowInfoValue,
|
||||||
|
flow_stat: &Option<FlowStat>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let row = [
|
let row = [
|
||||||
(FLOW_NAME, &Value::from(flow_info.flow_name().to_string())),
|
(FLOW_NAME, &Value::from(flow_info.flow_name().to_string())),
|
||||||
@@ -238,6 +261,11 @@ impl InformationSchemaFlowsBuilder {
|
|||||||
}
|
}
|
||||||
self.flow_names.push(Some(flow_info.flow_name()));
|
self.flow_names.push(Some(flow_info.flow_name()));
|
||||||
self.flow_ids.push(Some(flow_id));
|
self.flow_ids.push(Some(flow_id));
|
||||||
|
self.state_sizes.push(
|
||||||
|
flow_stat
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|state| state.state_size.get(&flow_id).map(|v| *v as u64)),
|
||||||
|
);
|
||||||
self.table_catalogs.push(Some(flow_info.catalog_name()));
|
self.table_catalogs.push(Some(flow_info.catalog_name()));
|
||||||
self.raw_sqls.push(Some(flow_info.raw_sql()));
|
self.raw_sqls.push(Some(flow_info.raw_sql()));
|
||||||
self.comments.push(Some(flow_info.comment()));
|
self.comments.push(Some(flow_info.comment()));
|
||||||
@@ -270,6 +298,7 @@ impl InformationSchemaFlowsBuilder {
|
|||||||
let columns: Vec<VectorRef> = vec![
|
let columns: Vec<VectorRef> = vec![
|
||||||
Arc::new(self.flow_names.finish()),
|
Arc::new(self.flow_names.finish()),
|
||||||
Arc::new(self.flow_ids.finish()),
|
Arc::new(self.flow_ids.finish()),
|
||||||
|
Arc::new(self.state_sizes.finish()),
|
||||||
Arc::new(self.table_catalogs.finish()),
|
Arc::new(self.table_catalogs.finish()),
|
||||||
Arc::new(self.raw_sqls.finish()),
|
Arc::new(self.raw_sqls.finish()),
|
||||||
Arc::new(self.comments.finish()),
|
Arc::new(self.comments.finish()),
|
||||||
|
|||||||
@@ -54,6 +54,10 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
pub(crate) const PRI_CONSTRAINT_NAME: &str = "PRIMARY";
|
pub(crate) const PRI_CONSTRAINT_NAME: &str = "PRIMARY";
|
||||||
/// Time index constraint name
|
/// Time index constraint name
|
||||||
pub(crate) const TIME_INDEX_CONSTRAINT_NAME: &str = "TIME INDEX";
|
pub(crate) const TIME_INDEX_CONSTRAINT_NAME: &str = "TIME INDEX";
|
||||||
|
/// Inverted index constraint name
|
||||||
|
pub(crate) const INVERTED_INDEX_CONSTRAINT_NAME: &str = "INVERTED INDEX";
|
||||||
|
/// Fulltext index constraint name
|
||||||
|
pub(crate) const FULLTEXT_INDEX_CONSTRAINT_NAME: &str = "FULLTEXT INDEX";
|
||||||
|
|
||||||
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
|
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
|
||||||
pub(super) struct InformationSchemaKeyColumnUsage {
|
pub(super) struct InformationSchemaKeyColumnUsage {
|
||||||
@@ -216,14 +220,13 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
|||||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
|
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
|
||||||
|
|
||||||
while let Some(table) = stream.try_next().await? {
|
while let Some(table) = stream.try_next().await? {
|
||||||
let mut primary_constraints = vec![];
|
|
||||||
|
|
||||||
let table_info = table.table_info();
|
let table_info = table.table_info();
|
||||||
let table_name = &table_info.name;
|
let table_name = &table_info.name;
|
||||||
let keys = &table_info.meta.primary_key_indices;
|
let keys = &table_info.meta.primary_key_indices;
|
||||||
let schema = table.schema();
|
let schema = table.schema();
|
||||||
|
|
||||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||||
|
let mut constraints = vec![];
|
||||||
if column.is_time_index() {
|
if column.is_time_index() {
|
||||||
self.add_key_column_usage(
|
self.add_key_column_usage(
|
||||||
&predicates,
|
&predicates,
|
||||||
@@ -236,30 +239,31 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
|||||||
1, //always 1 for time index
|
1, //always 1 for time index
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if keys.contains(&idx) {
|
|
||||||
primary_constraints.push((
|
|
||||||
catalog_name.clone(),
|
|
||||||
schema_name.clone(),
|
|
||||||
table_name.to_string(),
|
|
||||||
column.name.clone(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
// TODO(dimbtp): foreign key constraint not supported yet
|
// TODO(dimbtp): foreign key constraint not supported yet
|
||||||
}
|
if keys.contains(&idx) {
|
||||||
|
constraints.push(PRI_CONSTRAINT_NAME);
|
||||||
|
}
|
||||||
|
if column.is_inverted_indexed() {
|
||||||
|
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
|
||||||
|
}
|
||||||
|
|
||||||
for (i, (catalog_name, schema_name, table_name, column_name)) in
|
if column.has_fulltext_index_key() {
|
||||||
primary_constraints.into_iter().enumerate()
|
constraints.push(FULLTEXT_INDEX_CONSTRAINT_NAME);
|
||||||
{
|
}
|
||||||
self.add_key_column_usage(
|
|
||||||
&predicates,
|
if !constraints.is_empty() {
|
||||||
&schema_name,
|
let aggregated_constraints = constraints.join(", ");
|
||||||
PRI_CONSTRAINT_NAME,
|
self.add_key_column_usage(
|
||||||
&catalog_name,
|
&predicates,
|
||||||
&schema_name,
|
&schema_name,
|
||||||
&table_name,
|
&aggregated_constraints,
|
||||||
&column_name,
|
&catalog_name,
|
||||||
i as u32 + 1,
|
&schema_name,
|
||||||
);
|
table_name,
|
||||||
|
&column.name,
|
||||||
|
idx as u32 + 1,
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,7 +23,6 @@ common-error.workspace = true
|
|||||||
common-grpc.workspace = true
|
common-grpc.workspace = true
|
||||||
common-macro.workspace = true
|
common-macro.workspace = true
|
||||||
common-meta.workspace = true
|
common-meta.workspace = true
|
||||||
common-options.workspace = true
|
|
||||||
common-procedure.workspace = true
|
common-procedure.workspace = true
|
||||||
common-query.workspace = true
|
common-query.workspace = true
|
||||||
common-recordbatch.workspace = true
|
common-recordbatch.workspace = true
|
||||||
@@ -61,5 +60,4 @@ client = { workspace = true, features = ["testing"] }
|
|||||||
common-test-util.workspace = true
|
common-test-util.workspace = true
|
||||||
common-version.workspace = true
|
common-version.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
temp-env = "0.3"
|
|
||||||
tempfile.workspace = true
|
tempfile.workspace = true
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ use common_query::Output;
|
|||||||
use common_recordbatch::RecordBatches;
|
use common_recordbatch::RecordBatches;
|
||||||
use common_telemetry::debug;
|
use common_telemetry::debug;
|
||||||
use either::Either;
|
use either::Either;
|
||||||
use meta_client::client::MetaClientBuilder;
|
use meta_client::client::{ClusterKvBackend, MetaClientBuilder};
|
||||||
use query::datafusion::DatafusionQueryEngine;
|
use query::datafusion::DatafusionQueryEngine;
|
||||||
use query::parser::QueryLanguageParser;
|
use query::parser::QueryLanguageParser;
|
||||||
use query::query_engine::{DefaultSerializer, QueryEngineState};
|
use query::query_engine::{DefaultSerializer, QueryEngineState};
|
||||||
|
|||||||
@@ -42,8 +42,6 @@ tonic.workspace = true
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
common-grpc-expr.workspace = true
|
common-grpc-expr.workspace = true
|
||||||
datanode.workspace = true
|
|
||||||
derive-new = "0.5"
|
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
|
|
||||||
[dev-dependencies.substrait_proto]
|
[dev-dependencies.substrait_proto]
|
||||||
|
|||||||
@@ -59,10 +59,6 @@ impl Instance {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn datanode_mut(&mut self) -> &mut Datanode {
|
|
||||||
&mut self.datanode
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn datanode(&self) -> &Datanode {
|
pub fn datanode(&self) -> &Datanode {
|
||||||
&self.datanode
|
&self.datanode
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -63,10 +63,6 @@ impl Instance {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
|
|
||||||
&mut self.flownode
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn flownode(&self) -> &FlownodeInstance {
|
pub fn flownode(&self) -> &FlownodeInstance {
|
||||||
&self.flownode
|
&self.flownode
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ use common_meta::ddl::flow_meta::{FlowMetadataAllocator, FlowMetadataAllocatorRe
|
|||||||
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
|
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
|
||||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl, ProcedureExecutorRef};
|
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl, ProcedureExecutorRef};
|
||||||
use common_meta::ddl_manager::DdlManager;
|
use common_meta::ddl_manager::DdlManager;
|
||||||
|
use common_meta::key::flow::flow_state::FlowStat;
|
||||||
use common_meta::key::flow::{FlowMetadataManager, FlowMetadataManagerRef};
|
use common_meta::key::flow::{FlowMetadataManager, FlowMetadataManagerRef};
|
||||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
@@ -70,7 +71,7 @@ use servers::http::HttpOptions;
|
|||||||
use servers::tls::{TlsMode, TlsOption};
|
use servers::tls::{TlsMode, TlsOption};
|
||||||
use servers::Mode;
|
use servers::Mode;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use tokio::sync::broadcast;
|
use tokio::sync::{broadcast, RwLock};
|
||||||
use tracing_appender::non_blocking::WorkerGuard;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
@@ -507,7 +508,7 @@ impl StartCommand {
|
|||||||
procedure_manager.clone(),
|
procedure_manager.clone(),
|
||||||
));
|
));
|
||||||
let catalog_manager = KvBackendCatalogManager::new(
|
let catalog_manager = KvBackendCatalogManager::new(
|
||||||
information_extension,
|
information_extension.clone(),
|
||||||
kv_backend.clone(),
|
kv_backend.clone(),
|
||||||
layered_cache_registry.clone(),
|
layered_cache_registry.clone(),
|
||||||
Some(procedure_manager.clone()),
|
Some(procedure_manager.clone()),
|
||||||
@@ -532,6 +533,14 @@ impl StartCommand {
|
|||||||
.context(OtherSnafu)?,
|
.context(OtherSnafu)?,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// set the ref to query for the local flow state
|
||||||
|
{
|
||||||
|
let flow_worker_manager = flownode.flow_worker_manager();
|
||||||
|
information_extension
|
||||||
|
.set_flow_worker_manager(flow_worker_manager.clone())
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
let node_manager = Arc::new(StandaloneDatanodeManager {
|
let node_manager = Arc::new(StandaloneDatanodeManager {
|
||||||
region_server: datanode.region_server(),
|
region_server: datanode.region_server(),
|
||||||
flow_server: flownode.flow_worker_manager(),
|
flow_server: flownode.flow_worker_manager(),
|
||||||
@@ -669,6 +678,7 @@ pub struct StandaloneInformationExtension {
|
|||||||
region_server: RegionServer,
|
region_server: RegionServer,
|
||||||
procedure_manager: ProcedureManagerRef,
|
procedure_manager: ProcedureManagerRef,
|
||||||
start_time_ms: u64,
|
start_time_ms: u64,
|
||||||
|
flow_worker_manager: RwLock<Option<Arc<FlowWorkerManager>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl StandaloneInformationExtension {
|
impl StandaloneInformationExtension {
|
||||||
@@ -677,8 +687,15 @@ impl StandaloneInformationExtension {
|
|||||||
region_server,
|
region_server,
|
||||||
procedure_manager,
|
procedure_manager,
|
||||||
start_time_ms: common_time::util::current_time_millis() as u64,
|
start_time_ms: common_time::util::current_time_millis() as u64,
|
||||||
|
flow_worker_manager: RwLock::new(None),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set the flow worker manager for the standalone instance.
|
||||||
|
pub async fn set_flow_worker_manager(&self, flow_worker_manager: Arc<FlowWorkerManager>) {
|
||||||
|
let mut guard = self.flow_worker_manager.write().await;
|
||||||
|
*guard = Some(flow_worker_manager);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
@@ -750,6 +767,18 @@ impl InformationExtension for StandaloneInformationExtension {
|
|||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
Ok(stats)
|
Ok(stats)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
|
||||||
|
Ok(Some(
|
||||||
|
self.flow_worker_manager
|
||||||
|
.read()
|
||||||
|
.await
|
||||||
|
.as_ref()
|
||||||
|
.unwrap()
|
||||||
|
.gen_state_report()
|
||||||
|
.await,
|
||||||
|
))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ common-macro.workspace = true
|
|||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
pin-project.workspace = true
|
pin-project.workspace = true
|
||||||
|
rand.workspace = true
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
|||||||
@@ -36,6 +36,11 @@ pub struct Metadata {
|
|||||||
/// `RangeReader` reads a range of bytes from a source.
|
/// `RangeReader` reads a range of bytes from a source.
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait RangeReader: Send + Unpin {
|
pub trait RangeReader: Send + Unpin {
|
||||||
|
/// Sets the file size hint for the reader.
|
||||||
|
///
|
||||||
|
/// It's used to optimize the reading process by reducing the number of remote requests.
|
||||||
|
fn with_file_size_hint(&mut self, file_size_hint: u64);
|
||||||
|
|
||||||
/// Returns the metadata of the source.
|
/// Returns the metadata of the source.
|
||||||
async fn metadata(&mut self) -> io::Result<Metadata>;
|
async fn metadata(&mut self) -> io::Result<Metadata>;
|
||||||
|
|
||||||
@@ -70,6 +75,10 @@ pub trait RangeReader: Send + Unpin {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<R: ?Sized + RangeReader> RangeReader for &mut R {
|
impl<R: ?Sized + RangeReader> RangeReader for &mut R {
|
||||||
|
fn with_file_size_hint(&mut self, file_size_hint: u64) {
|
||||||
|
(*self).with_file_size_hint(file_size_hint)
|
||||||
|
}
|
||||||
|
|
||||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
async fn metadata(&mut self) -> io::Result<Metadata> {
|
||||||
(*self).metadata().await
|
(*self).metadata().await
|
||||||
}
|
}
|
||||||
@@ -186,15 +195,17 @@ impl<R: RangeReader + 'static> AsyncRead for AsyncReadAdapter<R> {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl RangeReader for Vec<u8> {
|
impl RangeReader for Vec<u8> {
|
||||||
|
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
|
||||||
|
// do nothing
|
||||||
|
}
|
||||||
|
|
||||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
async fn metadata(&mut self) -> io::Result<Metadata> {
|
||||||
Ok(Metadata {
|
Ok(Metadata {
|
||||||
content_length: self.len() as u64,
|
content_length: self.len() as u64,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn read(&mut self, mut range: Range<u64>) -> io::Result<Bytes> {
|
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
|
||||||
range.end = range.end.min(self.len() as u64);
|
|
||||||
|
|
||||||
let bytes = Bytes::copy_from_slice(&self[range.start as usize..range.end as usize]);
|
let bytes = Bytes::copy_from_slice(&self[range.start as usize..range.end as usize]);
|
||||||
Ok(bytes)
|
Ok(bytes)
|
||||||
}
|
}
|
||||||
@@ -222,6 +233,10 @@ impl FileReader {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl RangeReader for FileReader {
|
impl RangeReader for FileReader {
|
||||||
|
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
|
||||||
|
// do nothing
|
||||||
|
}
|
||||||
|
|
||||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
async fn metadata(&mut self) -> io::Result<Metadata> {
|
||||||
Ok(Metadata {
|
Ok(Metadata {
|
||||||
content_length: self.content_length,
|
content_length: self.content_length,
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ pub const GIB: u64 = MIB * BINARY_DATA_MAGNITUDE;
|
|||||||
pub const TIB: u64 = GIB * BINARY_DATA_MAGNITUDE;
|
pub const TIB: u64 = GIB * BINARY_DATA_MAGNITUDE;
|
||||||
pub const PIB: u64 = TIB * BINARY_DATA_MAGNITUDE;
|
pub const PIB: u64 = TIB * BINARY_DATA_MAGNITUDE;
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Ord, PartialOrd)]
|
#[derive(Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Default)]
|
||||||
pub struct ReadableSize(pub u64);
|
pub struct ReadableSize(pub u64);
|
||||||
|
|
||||||
impl ReadableSize {
|
impl ReadableSize {
|
||||||
|
|||||||
@@ -8,10 +8,5 @@ license.workspace = true
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
common-error.workspace = true
|
|
||||||
common-macro.workspace = true
|
|
||||||
snafu.workspace = true
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
chrono.workspace = true
|
|
||||||
tokio.workspace = true
|
|
||||||
|
|||||||
@@ -48,5 +48,4 @@ url = "2.3"
|
|||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
common-telemetry.workspace = true
|
common-telemetry.workspace = true
|
||||||
common-test-util.workspace = true
|
common-test-util.workspace = true
|
||||||
dotenv.workspace = true
|
|
||||||
uuid.workspace = true
|
uuid.workspace = true
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
|
|||||||
DefaultLoggingInterceptor,
|
DefaultLoggingInterceptor,
|
||||||
))
|
))
|
||||||
.layer(object_store::layers::TracingLayer)
|
.layer(object_store::layers::TracingLayer)
|
||||||
.layer(object_store::layers::PrometheusMetricsLayer::new(true))
|
.layer(object_store::layers::build_prometheus_metrics_layer(true))
|
||||||
.finish();
|
.finish();
|
||||||
Ok(object_store)
|
Ok(object_store)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -89,7 +89,7 @@ pub fn build_s3_backend(
|
|||||||
DefaultLoggingInterceptor,
|
DefaultLoggingInterceptor,
|
||||||
))
|
))
|
||||||
.layer(object_store::layers::TracingLayer)
|
.layer(object_store::layers::TracingLayer)
|
||||||
.layer(object_store::layers::PrometheusMetricsLayer::new(true))
|
.layer(object_store::layers::build_prometheus_metrics_layer(true))
|
||||||
.finish())
|
.finish())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,12 +5,7 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
api.workspace = true
|
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
common-base.workspace = true
|
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
common-macro.workspace = true
|
common-macro.workspace = true
|
||||||
common-query.workspace = true
|
|
||||||
session.workspace = true
|
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
sql.workspace = true
|
|
||||||
|
|||||||
@@ -51,6 +51,5 @@ wkt = { version = "0.11", optional = true }
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
approx = "0.5"
|
approx = "0.5"
|
||||||
ron = "0.7"
|
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
|||||||
@@ -26,3 +26,4 @@ pub mod function_registry;
|
|||||||
pub mod handlers;
|
pub mod handlers;
|
||||||
pub mod helper;
|
pub mod helper;
|
||||||
pub mod state;
|
pub mod state;
|
||||||
|
pub mod utils;
|
||||||
|
|||||||
@@ -204,20 +204,10 @@ impl PatternAst {
|
|||||||
fn convert_literal(column: &str, pattern: &str) -> Expr {
|
fn convert_literal(column: &str, pattern: &str) -> Expr {
|
||||||
logical_expr::col(column).like(logical_expr::lit(format!(
|
logical_expr::col(column).like(logical_expr::lit(format!(
|
||||||
"%{}%",
|
"%{}%",
|
||||||
Self::escape_pattern(pattern)
|
crate::utils::escape_like_pattern(pattern)
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn escape_pattern(pattern: &str) -> String {
|
|
||||||
pattern
|
|
||||||
.chars()
|
|
||||||
.flat_map(|c| match c {
|
|
||||||
'\\' | '%' | '_' => vec!['\\', c],
|
|
||||||
_ => vec![c],
|
|
||||||
})
|
|
||||||
.collect::<String>()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Transform this AST with preset rules to make it correct.
|
/// Transform this AST with preset rules to make it correct.
|
||||||
fn transform_ast(self) -> Result<Self> {
|
fn transform_ast(self) -> Result<Self> {
|
||||||
self.transform_up(Self::collapse_binary_branch_fn)
|
self.transform_up(Self::collapse_binary_branch_fn)
|
||||||
|
|||||||
@@ -15,6 +15,8 @@
|
|||||||
mod convert;
|
mod convert;
|
||||||
mod distance;
|
mod distance;
|
||||||
pub(crate) mod impl_conv;
|
pub(crate) mod impl_conv;
|
||||||
|
mod scalar_add;
|
||||||
|
mod scalar_mul;
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
@@ -32,5 +34,9 @@ impl VectorFunction {
|
|||||||
registry.register(Arc::new(distance::CosDistanceFunction));
|
registry.register(Arc::new(distance::CosDistanceFunction));
|
||||||
registry.register(Arc::new(distance::DotProductFunction));
|
registry.register(Arc::new(distance::DotProductFunction));
|
||||||
registry.register(Arc::new(distance::L2SqDistanceFunction));
|
registry.register(Arc::new(distance::L2SqDistanceFunction));
|
||||||
|
|
||||||
|
// scalar calculation
|
||||||
|
registry.register(Arc::new(scalar_add::ScalarAddFunction));
|
||||||
|
registry.register(Arc::new(scalar_mul::ScalarMulFunction));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -109,7 +109,6 @@ pub fn parse_veclit_from_strlit(s: &str) -> Result<Vec<f32>> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(unused)]
|
|
||||||
/// Convert a vector literal to a binary literal.
|
/// Convert a vector literal to a binary literal.
|
||||||
pub fn veclit_to_binlit(vec: &[f32]) -> Vec<u8> {
|
pub fn veclit_to_binlit(vec: &[f32]) -> Vec<u8> {
|
||||||
if cfg!(target_endian = "little") {
|
if cfg!(target_endian = "little") {
|
||||||
|
|||||||
173
src/common/function/src/scalars/vector/scalar_add.rs
Normal file
173
src/common/function/src/scalars/vector/scalar_add.rs
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::borrow::Cow;
|
||||||
|
use std::fmt::Display;
|
||||||
|
|
||||||
|
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||||
|
use common_query::prelude::Signature;
|
||||||
|
use datatypes::prelude::ConcreteDataType;
|
||||||
|
use datatypes::scalars::ScalarVectorBuilder;
|
||||||
|
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
|
||||||
|
use nalgebra::DVectorView;
|
||||||
|
use snafu::ensure;
|
||||||
|
|
||||||
|
use crate::function::{Function, FunctionContext};
|
||||||
|
use crate::helper;
|
||||||
|
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||||
|
|
||||||
|
const NAME: &str = "vec_scalar_add";
|
||||||
|
|
||||||
|
/// Adds a scalar to each element of a vector.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```sql
|
||||||
|
/// SELECT vec_to_string(vec_scalar_add(1, "[1, 2, 3]")) as result;
|
||||||
|
///
|
||||||
|
/// +---------+
|
||||||
|
/// | result |
|
||||||
|
/// +---------+
|
||||||
|
/// | [2,3,4] |
|
||||||
|
/// +---------+
|
||||||
|
///
|
||||||
|
/// -- Negative scalar to simulate subtraction
|
||||||
|
/// SELECT vec_to_string(vec_scalar_add(-1, "[1, 2, 3]")) as result;
|
||||||
|
///
|
||||||
|
/// +---------+
|
||||||
|
/// | result |
|
||||||
|
/// +---------+
|
||||||
|
/// | [0,1,2] |
|
||||||
|
/// +---------+
|
||||||
|
/// ```
|
||||||
|
#[derive(Debug, Clone, Default)]
|
||||||
|
pub struct ScalarAddFunction;
|
||||||
|
|
||||||
|
impl Function for ScalarAddFunction {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
NAME
|
||||||
|
}
|
||||||
|
|
||||||
|
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||||
|
Ok(ConcreteDataType::binary_datatype())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn signature(&self) -> Signature {
|
||||||
|
helper::one_of_sigs2(
|
||||||
|
vec![ConcreteDataType::float64_datatype()],
|
||||||
|
vec![
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
ConcreteDataType::binary_datatype(),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||||
|
ensure!(
|
||||||
|
columns.len() == 2,
|
||||||
|
InvalidFuncArgsSnafu {
|
||||||
|
err_msg: format!(
|
||||||
|
"The length of the args is not correct, expect exactly two, have: {}",
|
||||||
|
columns.len()
|
||||||
|
),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
let arg0 = &columns[0];
|
||||||
|
let arg1 = &columns[1];
|
||||||
|
|
||||||
|
let len = arg0.len();
|
||||||
|
let mut result = BinaryVectorBuilder::with_capacity(len);
|
||||||
|
if len == 0 {
|
||||||
|
return Ok(result.to_vector());
|
||||||
|
}
|
||||||
|
|
||||||
|
let arg1_const = as_veclit_if_const(arg1)?;
|
||||||
|
|
||||||
|
for i in 0..len {
|
||||||
|
let arg0 = arg0.get(i).as_f64_lossy();
|
||||||
|
let Some(arg0) = arg0 else {
|
||||||
|
result.push_null();
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let arg1 = match arg1_const.as_ref() {
|
||||||
|
Some(arg1) => Some(Cow::Borrowed(arg1.as_ref())),
|
||||||
|
None => as_veclit(arg1.get_ref(i))?,
|
||||||
|
};
|
||||||
|
let Some(arg1) = arg1 else {
|
||||||
|
result.push_null();
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let vec = DVectorView::from_slice(&arg1, arg1.len());
|
||||||
|
let vec_res = vec.add_scalar(arg0 as _);
|
||||||
|
|
||||||
|
let veclit = vec_res.as_slice();
|
||||||
|
let binlit = veclit_to_binlit(veclit);
|
||||||
|
result.push(Some(&binlit));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(result.to_vector())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for ScalarAddFunction {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use datatypes::vectors::{Float32Vector, StringVector};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_scalar_add() {
|
||||||
|
let func = ScalarAddFunction;
|
||||||
|
|
||||||
|
let input0 = Arc::new(Float32Vector::from(vec![
|
||||||
|
Some(1.0),
|
||||||
|
Some(-1.0),
|
||||||
|
None,
|
||||||
|
Some(3.0),
|
||||||
|
]));
|
||||||
|
let input1 = Arc::new(StringVector::from(vec![
|
||||||
|
Some("[1.0,2.0,3.0]".to_string()),
|
||||||
|
Some("[4.0,5.0,6.0]".to_string()),
|
||||||
|
Some("[7.0,8.0,9.0]".to_string()),
|
||||||
|
None,
|
||||||
|
]));
|
||||||
|
|
||||||
|
let result = func
|
||||||
|
.eval(FunctionContext::default(), &[input0, input1])
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let result = result.as_ref();
|
||||||
|
assert_eq!(result.len(), 4);
|
||||||
|
assert_eq!(
|
||||||
|
result.get_ref(0).as_binary().unwrap(),
|
||||||
|
Some(veclit_to_binlit(&[2.0, 3.0, 4.0]).as_slice())
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
result.get_ref(1).as_binary().unwrap(),
|
||||||
|
Some(veclit_to_binlit(&[3.0, 4.0, 5.0]).as_slice())
|
||||||
|
);
|
||||||
|
assert!(result.get_ref(2).is_null());
|
||||||
|
assert!(result.get_ref(3).is_null());
|
||||||
|
}
|
||||||
|
}
|
||||||
173
src/common/function/src/scalars/vector/scalar_mul.rs
Normal file
173
src/common/function/src/scalars/vector/scalar_mul.rs
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::borrow::Cow;
|
||||||
|
use std::fmt::Display;
|
||||||
|
|
||||||
|
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||||
|
use common_query::prelude::Signature;
|
||||||
|
use datatypes::prelude::ConcreteDataType;
|
||||||
|
use datatypes::scalars::ScalarVectorBuilder;
|
||||||
|
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
|
||||||
|
use nalgebra::DVectorView;
|
||||||
|
use snafu::ensure;
|
||||||
|
|
||||||
|
use crate::function::{Function, FunctionContext};
|
||||||
|
use crate::helper;
|
||||||
|
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||||
|
|
||||||
|
const NAME: &str = "vec_scalar_mul";
|
||||||
|
|
||||||
|
/// Multiples a scalar to each element of a vector.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```sql
|
||||||
|
/// SELECT vec_to_string(vec_scalar_mul(2, "[1, 2, 3]")) as result;
|
||||||
|
///
|
||||||
|
/// +---------+
|
||||||
|
/// | result |
|
||||||
|
/// +---------+
|
||||||
|
/// | [2,4,6] |
|
||||||
|
/// +---------+
|
||||||
|
///
|
||||||
|
/// -- 1/scalar to simulate division
|
||||||
|
/// SELECT vec_to_string(vec_scalar_mul(0.5, "[2, 4, 6]")) as result;
|
||||||
|
///
|
||||||
|
/// +---------+
|
||||||
|
/// | result |
|
||||||
|
/// +---------+
|
||||||
|
/// | [1,2,3] |
|
||||||
|
/// +---------+
|
||||||
|
/// ```
|
||||||
|
#[derive(Debug, Clone, Default)]
|
||||||
|
pub struct ScalarMulFunction;
|
||||||
|
|
||||||
|
impl Function for ScalarMulFunction {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
NAME
|
||||||
|
}
|
||||||
|
|
||||||
|
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||||
|
Ok(ConcreteDataType::binary_datatype())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn signature(&self) -> Signature {
|
||||||
|
helper::one_of_sigs2(
|
||||||
|
vec![ConcreteDataType::float64_datatype()],
|
||||||
|
vec![
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
ConcreteDataType::binary_datatype(),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||||
|
ensure!(
|
||||||
|
columns.len() == 2,
|
||||||
|
InvalidFuncArgsSnafu {
|
||||||
|
err_msg: format!(
|
||||||
|
"The length of the args is not correct, expect exactly two, have: {}",
|
||||||
|
columns.len()
|
||||||
|
),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
let arg0 = &columns[0];
|
||||||
|
let arg1 = &columns[1];
|
||||||
|
|
||||||
|
let len = arg0.len();
|
||||||
|
let mut result = BinaryVectorBuilder::with_capacity(len);
|
||||||
|
if len == 0 {
|
||||||
|
return Ok(result.to_vector());
|
||||||
|
}
|
||||||
|
|
||||||
|
let arg1_const = as_veclit_if_const(arg1)?;
|
||||||
|
|
||||||
|
for i in 0..len {
|
||||||
|
let arg0 = arg0.get(i).as_f64_lossy();
|
||||||
|
let Some(arg0) = arg0 else {
|
||||||
|
result.push_null();
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let arg1 = match arg1_const.as_ref() {
|
||||||
|
Some(arg1) => Some(Cow::Borrowed(arg1.as_ref())),
|
||||||
|
None => as_veclit(arg1.get_ref(i))?,
|
||||||
|
};
|
||||||
|
let Some(arg1) = arg1 else {
|
||||||
|
result.push_null();
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let vec = DVectorView::from_slice(&arg1, arg1.len());
|
||||||
|
let vec_res = vec.scale(arg0 as _);
|
||||||
|
|
||||||
|
let veclit = vec_res.as_slice();
|
||||||
|
let binlit = veclit_to_binlit(veclit);
|
||||||
|
result.push(Some(&binlit));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(result.to_vector())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for ScalarMulFunction {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use datatypes::vectors::{Float32Vector, StringVector};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_scalar_mul() {
|
||||||
|
let func = ScalarMulFunction;
|
||||||
|
|
||||||
|
let input0 = Arc::new(Float32Vector::from(vec![
|
||||||
|
Some(2.0),
|
||||||
|
Some(-0.5),
|
||||||
|
None,
|
||||||
|
Some(3.0),
|
||||||
|
]));
|
||||||
|
let input1 = Arc::new(StringVector::from(vec![
|
||||||
|
Some("[1.0,2.0,3.0]".to_string()),
|
||||||
|
Some("[8.0,10.0,12.0]".to_string()),
|
||||||
|
Some("[7.0,8.0,9.0]".to_string()),
|
||||||
|
None,
|
||||||
|
]));
|
||||||
|
|
||||||
|
let result = func
|
||||||
|
.eval(FunctionContext::default(), &[input0, input1])
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let result = result.as_ref();
|
||||||
|
assert_eq!(result.len(), 4);
|
||||||
|
assert_eq!(
|
||||||
|
result.get_ref(0).as_binary().unwrap(),
|
||||||
|
Some(veclit_to_binlit(&[2.0, 4.0, 6.0]).as_slice())
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
result.get_ref(1).as_binary().unwrap(),
|
||||||
|
Some(veclit_to_binlit(&[-4.0, -5.0, -6.0]).as_slice())
|
||||||
|
);
|
||||||
|
assert!(result.get_ref(2).is_null());
|
||||||
|
assert!(result.get_ref(3).is_null());
|
||||||
|
}
|
||||||
|
}
|
||||||
58
src/common/function/src/utils.rs
Normal file
58
src/common/function/src/utils.rs
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
/// Escapes special characters in the provided pattern string for `LIKE`.
|
||||||
|
///
|
||||||
|
/// Specifically, it prefixes the backslash (`\`), percent (`%`), and underscore (`_`)
|
||||||
|
/// characters with an additional backslash to ensure they are treated literally.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// let escaped = escape_pattern("100%_some\\path");
|
||||||
|
/// assert_eq!(escaped, "100\\%\\_some\\\\path");
|
||||||
|
/// ```
|
||||||
|
pub fn escape_like_pattern(pattern: &str) -> String {
|
||||||
|
pattern
|
||||||
|
.chars()
|
||||||
|
.flat_map(|c| match c {
|
||||||
|
'\\' | '%' | '_' => vec!['\\', c],
|
||||||
|
_ => vec![c],
|
||||||
|
})
|
||||||
|
.collect::<String>()
|
||||||
|
}
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_escape_like_pattern() {
|
||||||
|
assert_eq!(
|
||||||
|
escape_like_pattern("100%_some\\path"),
|
||||||
|
"100\\%\\_some\\\\path"
|
||||||
|
);
|
||||||
|
assert_eq!(escape_like_pattern(""), "");
|
||||||
|
assert_eq!(escape_like_pattern("hello"), "hello");
|
||||||
|
assert_eq!(escape_like_pattern("\\%_"), "\\\\\\%\\_");
|
||||||
|
assert_eq!(escape_like_pattern("%%__\\\\"), "\\%\\%\\_\\_\\\\\\\\");
|
||||||
|
assert_eq!(escape_like_pattern("abc123"), "abc123");
|
||||||
|
assert_eq!(escape_like_pattern("%_\\"), "\\%\\_\\\\");
|
||||||
|
assert_eq!(
|
||||||
|
escape_like_pattern("%%__\\\\another%string"),
|
||||||
|
"\\%\\%\\_\\_\\\\\\\\another\\%string"
|
||||||
|
);
|
||||||
|
assert_eq!(escape_like_pattern("foo%bar_"), "foo\\%bar\\_");
|
||||||
|
assert_eq!(escape_like_pattern("\\_\\%"), "\\\\\\_\\\\\\%");
|
||||||
|
}
|
||||||
|
}
|
||||||
44
src/common/meta/src/cache/container.rs
vendored
44
src/common/meta/src/cache/container.rs
vendored
@@ -43,7 +43,7 @@ pub struct CacheContainer<K, V, CacheToken> {
|
|||||||
cache: Cache<K, V>,
|
cache: Cache<K, V>,
|
||||||
invalidator: Invalidator<K, V, CacheToken>,
|
invalidator: Invalidator<K, V, CacheToken>,
|
||||||
initializer: Initializer<K, V>,
|
initializer: Initializer<K, V>,
|
||||||
token_filter: TokenFilter<CacheToken>,
|
token_filter: fn(&CacheToken) -> bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<K, V, CacheToken> CacheContainer<K, V, CacheToken>
|
impl<K, V, CacheToken> CacheContainer<K, V, CacheToken>
|
||||||
@@ -58,7 +58,7 @@ where
|
|||||||
cache: Cache<K, V>,
|
cache: Cache<K, V>,
|
||||||
invalidator: Invalidator<K, V, CacheToken>,
|
invalidator: Invalidator<K, V, CacheToken>,
|
||||||
initializer: Initializer<K, V>,
|
initializer: Initializer<K, V>,
|
||||||
token_filter: TokenFilter<CacheToken>,
|
token_filter: fn(&CacheToken) -> bool,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
name,
|
name,
|
||||||
@@ -206,10 +206,13 @@ mod tests {
|
|||||||
name: &'a str,
|
name: &'a str,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn always_true_filter(_: &String) -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_get() {
|
async fn test_get() {
|
||||||
let cache: Cache<NameKey, String> = CacheBuilder::new(128).build();
|
let cache: Cache<NameKey, String> = CacheBuilder::new(128).build();
|
||||||
let filter: TokenFilter<String> = Box::new(|_| true);
|
|
||||||
let counter = Arc::new(AtomicI32::new(0));
|
let counter = Arc::new(AtomicI32::new(0));
|
||||||
let moved_counter = counter.clone();
|
let moved_counter = counter.clone();
|
||||||
let init: Initializer<NameKey, String> = Arc::new(move |_| {
|
let init: Initializer<NameKey, String> = Arc::new(move |_| {
|
||||||
@@ -219,7 +222,13 @@ mod tests {
|
|||||||
let invalidator: Invalidator<NameKey, String, String> =
|
let invalidator: Invalidator<NameKey, String, String> =
|
||||||
Box::new(|_, _| Box::pin(async { Ok(()) }));
|
Box::new(|_, _| Box::pin(async { Ok(()) }));
|
||||||
|
|
||||||
let adv_cache = CacheContainer::new("test".to_string(), cache, invalidator, init, filter);
|
let adv_cache = CacheContainer::new(
|
||||||
|
"test".to_string(),
|
||||||
|
cache,
|
||||||
|
invalidator,
|
||||||
|
init,
|
||||||
|
always_true_filter,
|
||||||
|
);
|
||||||
let key = NameKey { name: "key" };
|
let key = NameKey { name: "key" };
|
||||||
let value = adv_cache.get(key).await.unwrap().unwrap();
|
let value = adv_cache.get(key).await.unwrap().unwrap();
|
||||||
assert_eq!(value, "hi");
|
assert_eq!(value, "hi");
|
||||||
@@ -233,7 +242,6 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_get_by_ref() {
|
async fn test_get_by_ref() {
|
||||||
let cache: Cache<String, String> = CacheBuilder::new(128).build();
|
let cache: Cache<String, String> = CacheBuilder::new(128).build();
|
||||||
let filter: TokenFilter<String> = Box::new(|_| true);
|
|
||||||
let counter = Arc::new(AtomicI32::new(0));
|
let counter = Arc::new(AtomicI32::new(0));
|
||||||
let moved_counter = counter.clone();
|
let moved_counter = counter.clone();
|
||||||
let init: Initializer<String, String> = Arc::new(move |_| {
|
let init: Initializer<String, String> = Arc::new(move |_| {
|
||||||
@@ -243,7 +251,13 @@ mod tests {
|
|||||||
let invalidator: Invalidator<String, String, String> =
|
let invalidator: Invalidator<String, String, String> =
|
||||||
Box::new(|_, _| Box::pin(async { Ok(()) }));
|
Box::new(|_, _| Box::pin(async { Ok(()) }));
|
||||||
|
|
||||||
let adv_cache = CacheContainer::new("test".to_string(), cache, invalidator, init, filter);
|
let adv_cache = CacheContainer::new(
|
||||||
|
"test".to_string(),
|
||||||
|
cache,
|
||||||
|
invalidator,
|
||||||
|
init,
|
||||||
|
always_true_filter,
|
||||||
|
);
|
||||||
let value = adv_cache.get_by_ref("foo").await.unwrap().unwrap();
|
let value = adv_cache.get_by_ref("foo").await.unwrap().unwrap();
|
||||||
assert_eq!(value, "hi");
|
assert_eq!(value, "hi");
|
||||||
let value = adv_cache.get_by_ref("foo").await.unwrap().unwrap();
|
let value = adv_cache.get_by_ref("foo").await.unwrap().unwrap();
|
||||||
@@ -257,13 +271,18 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_get_value_not_exits() {
|
async fn test_get_value_not_exits() {
|
||||||
let cache: Cache<String, String> = CacheBuilder::new(128).build();
|
let cache: Cache<String, String> = CacheBuilder::new(128).build();
|
||||||
let filter: TokenFilter<String> = Box::new(|_| true);
|
|
||||||
let init: Initializer<String, String> =
|
let init: Initializer<String, String> =
|
||||||
Arc::new(move |_| Box::pin(async { error::ValueNotExistSnafu {}.fail() }));
|
Arc::new(move |_| Box::pin(async { error::ValueNotExistSnafu {}.fail() }));
|
||||||
let invalidator: Invalidator<String, String, String> =
|
let invalidator: Invalidator<String, String, String> =
|
||||||
Box::new(|_, _| Box::pin(async { Ok(()) }));
|
Box::new(|_, _| Box::pin(async { Ok(()) }));
|
||||||
|
|
||||||
let adv_cache = CacheContainer::new("test".to_string(), cache, invalidator, init, filter);
|
let adv_cache = CacheContainer::new(
|
||||||
|
"test".to_string(),
|
||||||
|
cache,
|
||||||
|
invalidator,
|
||||||
|
init,
|
||||||
|
always_true_filter,
|
||||||
|
);
|
||||||
let value = adv_cache.get_by_ref("foo").await.unwrap();
|
let value = adv_cache.get_by_ref("foo").await.unwrap();
|
||||||
assert!(value.is_none());
|
assert!(value.is_none());
|
||||||
}
|
}
|
||||||
@@ -271,7 +290,6 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_invalidate() {
|
async fn test_invalidate() {
|
||||||
let cache: Cache<String, String> = CacheBuilder::new(128).build();
|
let cache: Cache<String, String> = CacheBuilder::new(128).build();
|
||||||
let filter: TokenFilter<String> = Box::new(|_| true);
|
|
||||||
let counter = Arc::new(AtomicI32::new(0));
|
let counter = Arc::new(AtomicI32::new(0));
|
||||||
let moved_counter = counter.clone();
|
let moved_counter = counter.clone();
|
||||||
let init: Initializer<String, String> = Arc::new(move |_| {
|
let init: Initializer<String, String> = Arc::new(move |_| {
|
||||||
@@ -285,7 +303,13 @@ mod tests {
|
|||||||
})
|
})
|
||||||
});
|
});
|
||||||
|
|
||||||
let adv_cache = CacheContainer::new("test".to_string(), cache, invalidator, init, filter);
|
let adv_cache = CacheContainer::new(
|
||||||
|
"test".to_string(),
|
||||||
|
cache,
|
||||||
|
invalidator,
|
||||||
|
init,
|
||||||
|
always_true_filter,
|
||||||
|
);
|
||||||
let value = adv_cache.get_by_ref("foo").await.unwrap().unwrap();
|
let value = adv_cache.get_by_ref("foo").await.unwrap().unwrap();
|
||||||
assert_eq!(value, "hi");
|
assert_eq!(value, "hi");
|
||||||
let value = adv_cache.get_by_ref("foo").await.unwrap().unwrap();
|
let value = adv_cache.get_by_ref("foo").await.unwrap().unwrap();
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ pub fn new_table_flownode_set_cache(
|
|||||||
let table_flow_manager = Arc::new(TableFlowManager::new(kv_backend));
|
let table_flow_manager = Arc::new(TableFlowManager::new(kv_backend));
|
||||||
let init = init_factory(table_flow_manager);
|
let init = init_factory(table_flow_manager);
|
||||||
|
|
||||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId, FlownodeSet> {
|
fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId, FlownodeSet> {
|
||||||
|
|||||||
22
src/common/meta/src/cache/registry.rs
vendored
22
src/common/meta/src/cache/registry.rs
vendored
@@ -151,12 +151,15 @@ mod tests {
|
|||||||
use crate::cache::*;
|
use crate::cache::*;
|
||||||
use crate::instruction::CacheIdent;
|
use crate::instruction::CacheIdent;
|
||||||
|
|
||||||
|
fn always_true_filter(_: &CacheIdent) -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
fn test_cache(
|
fn test_cache(
|
||||||
name: &str,
|
name: &str,
|
||||||
invalidator: Invalidator<String, String, CacheIdent>,
|
invalidator: Invalidator<String, String, CacheIdent>,
|
||||||
) -> CacheContainer<String, String, CacheIdent> {
|
) -> CacheContainer<String, String, CacheIdent> {
|
||||||
let cache: Cache<String, String> = CacheBuilder::new(128).build();
|
let cache: Cache<String, String> = CacheBuilder::new(128).build();
|
||||||
let filter: TokenFilter<CacheIdent> = Box::new(|_| true);
|
|
||||||
let counter = Arc::new(AtomicI32::new(0));
|
let counter = Arc::new(AtomicI32::new(0));
|
||||||
let moved_counter = counter.clone();
|
let moved_counter = counter.clone();
|
||||||
let init: Initializer<String, String> = Arc::new(move |_| {
|
let init: Initializer<String, String> = Arc::new(move |_| {
|
||||||
@@ -164,7 +167,13 @@ mod tests {
|
|||||||
Box::pin(async { Ok(Some("hi".to_string())) })
|
Box::pin(async { Ok(Some("hi".to_string())) })
|
||||||
});
|
});
|
||||||
|
|
||||||
CacheContainer::new(name.to_string(), cache, invalidator, init, filter)
|
CacheContainer::new(
|
||||||
|
name.to_string(),
|
||||||
|
cache,
|
||||||
|
invalidator,
|
||||||
|
init,
|
||||||
|
always_true_filter,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_i32_cache(
|
fn test_i32_cache(
|
||||||
@@ -172,7 +181,6 @@ mod tests {
|
|||||||
invalidator: Invalidator<i32, String, CacheIdent>,
|
invalidator: Invalidator<i32, String, CacheIdent>,
|
||||||
) -> CacheContainer<i32, String, CacheIdent> {
|
) -> CacheContainer<i32, String, CacheIdent> {
|
||||||
let cache: Cache<i32, String> = CacheBuilder::new(128).build();
|
let cache: Cache<i32, String> = CacheBuilder::new(128).build();
|
||||||
let filter: TokenFilter<CacheIdent> = Box::new(|_| true);
|
|
||||||
let counter = Arc::new(AtomicI32::new(0));
|
let counter = Arc::new(AtomicI32::new(0));
|
||||||
let moved_counter = counter.clone();
|
let moved_counter = counter.clone();
|
||||||
let init: Initializer<i32, String> = Arc::new(move |_| {
|
let init: Initializer<i32, String> = Arc::new(move |_| {
|
||||||
@@ -180,7 +188,13 @@ mod tests {
|
|||||||
Box::pin(async { Ok(Some("foo".to_string())) })
|
Box::pin(async { Ok(Some("foo".to_string())) })
|
||||||
});
|
});
|
||||||
|
|
||||||
CacheContainer::new(name.to_string(), cache, invalidator, init, filter)
|
CacheContainer::new(
|
||||||
|
name.to_string(),
|
||||||
|
cache,
|
||||||
|
invalidator,
|
||||||
|
init,
|
||||||
|
always_true_filter,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
|||||||
2
src/common/meta/src/cache/table/schema.rs
vendored
2
src/common/meta/src/cache/table/schema.rs
vendored
@@ -36,7 +36,7 @@ pub fn new_schema_cache(
|
|||||||
let schema_manager = SchemaManager::new(kv_backend.clone());
|
let schema_manager = SchemaManager::new(kv_backend.clone());
|
||||||
let init = init_factory(schema_manager);
|
let init = init_factory(schema_manager);
|
||||||
|
|
||||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init_factory(schema_manager: SchemaManager) -> Initializer<SchemaName, Arc<SchemaNameValue>> {
|
fn init_factory(schema_manager: SchemaManager) -> Initializer<SchemaName, Arc<SchemaNameValue>> {
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ pub fn new_table_info_cache(
|
|||||||
let table_info_manager = Arc::new(TableInfoManager::new(kv_backend));
|
let table_info_manager = Arc::new(TableInfoManager::new(kv_backend));
|
||||||
let init = init_factory(table_info_manager);
|
let init = init_factory(table_info_manager);
|
||||||
|
|
||||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init_factory(table_info_manager: TableInfoManagerRef) -> Initializer<TableId, Arc<TableInfo>> {
|
fn init_factory(table_info_manager: TableInfoManagerRef) -> Initializer<TableId, Arc<TableInfo>> {
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ pub fn new_table_name_cache(
|
|||||||
let table_name_manager = Arc::new(TableNameManager::new(kv_backend));
|
let table_name_manager = Arc::new(TableNameManager::new(kv_backend));
|
||||||
let init = init_factory(table_name_manager);
|
let init = init_factory(table_name_manager);
|
||||||
|
|
||||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init_factory(table_name_manager: TableNameManagerRef) -> Initializer<TableName, TableId> {
|
fn init_factory(table_name_manager: TableNameManagerRef) -> Initializer<TableName, TableId> {
|
||||||
|
|||||||
10
src/common/meta/src/cache/table/table_route.rs
vendored
10
src/common/meta/src/cache/table/table_route.rs
vendored
@@ -49,14 +49,6 @@ impl TableRoute {
|
|||||||
TableRoute::Logical(_) => None,
|
TableRoute::Logical(_) => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns [LogicalTableRouteValue] reference if it's [TableRoute::Logical]; Otherwise it returns [None].
|
|
||||||
pub fn as_logical_table_route_ref(&self) -> Option<&Arc<LogicalTableRouteValue>> {
|
|
||||||
match self {
|
|
||||||
TableRoute::Physical(_) => None,
|
|
||||||
TableRoute::Logical(table_route) => Some(table_route),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// [TableRouteCache] caches the [TableId] to [TableRoute] mapping.
|
/// [TableRouteCache] caches the [TableId] to [TableRoute] mapping.
|
||||||
@@ -73,7 +65,7 @@ pub fn new_table_route_cache(
|
|||||||
let table_info_manager = Arc::new(TableRouteManager::new(kv_backend));
|
let table_info_manager = Arc::new(TableRouteManager::new(kv_backend));
|
||||||
let init = init_factory(table_info_manager);
|
let init = init_factory(table_info_manager);
|
||||||
|
|
||||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init_factory(
|
fn init_factory(
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ pub fn new_table_schema_cache(
|
|||||||
let table_info_manager = TableInfoManager::new(kv_backend);
|
let table_info_manager = TableInfoManager::new(kv_backend);
|
||||||
let init = init_factory(table_info_manager);
|
let init = init_factory(table_info_manager);
|
||||||
|
|
||||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init_factory(table_info_manager: TableInfoManager) -> Initializer<TableId, Arc<SchemaName>> {
|
fn init_factory(table_info_manager: TableInfoManager) -> Initializer<TableId, Arc<SchemaName>> {
|
||||||
|
|||||||
2
src/common/meta/src/cache/table/view_info.rs
vendored
2
src/common/meta/src/cache/table/view_info.rs
vendored
@@ -40,7 +40,7 @@ pub fn new_view_info_cache(
|
|||||||
let view_info_manager = Arc::new(ViewInfoManager::new(kv_backend));
|
let view_info_manager = Arc::new(ViewInfoManager::new(kv_backend));
|
||||||
let init = init_factory(view_info_manager);
|
let init = init_factory(view_info_manager);
|
||||||
|
|
||||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init_factory(view_info_manager: ViewInfoManagerRef) -> Initializer<TableId, Arc<ViewInfoValue>> {
|
fn init_factory(view_info_manager: ViewInfoManagerRef) -> Initializer<TableId, Arc<ViewInfoValue>> {
|
||||||
|
|||||||
@@ -137,6 +137,7 @@ use self::schema_name::{SchemaManager, SchemaNameKey, SchemaNameValue};
|
|||||||
use self::table_route::{TableRouteManager, TableRouteValue};
|
use self::table_route::{TableRouteManager, TableRouteValue};
|
||||||
use self::tombstone::TombstoneManager;
|
use self::tombstone::TombstoneManager;
|
||||||
use crate::error::{self, Result, SerdeJsonSnafu};
|
use crate::error::{self, Result, SerdeJsonSnafu};
|
||||||
|
use crate::key::flow::flow_state::FlowStateValue;
|
||||||
use crate::key::node_address::NodeAddressValue;
|
use crate::key::node_address::NodeAddressValue;
|
||||||
use crate::key::table_route::TableRouteKey;
|
use crate::key::table_route::TableRouteKey;
|
||||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||||
@@ -1262,7 +1263,8 @@ impl_metadata_value! {
|
|||||||
FlowRouteValue,
|
FlowRouteValue,
|
||||||
TableFlowValue,
|
TableFlowValue,
|
||||||
NodeAddressValue,
|
NodeAddressValue,
|
||||||
SchemaNameValue
|
SchemaNameValue,
|
||||||
|
FlowStateValue
|
||||||
}
|
}
|
||||||
|
|
||||||
impl_optional_metadata_value! {
|
impl_optional_metadata_value! {
|
||||||
|
|||||||
@@ -13,7 +13,6 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::fmt::Display;
|
use std::fmt::Display;
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||||
use futures::stream::BoxStream;
|
use futures::stream::BoxStream;
|
||||||
@@ -146,7 +145,7 @@ impl CatalogManager {
|
|||||||
self.kv_backend.clone(),
|
self.kv_backend.clone(),
|
||||||
req,
|
req,
|
||||||
DEFAULT_PAGE_SIZE,
|
DEFAULT_PAGE_SIZE,
|
||||||
Arc::new(catalog_decoder),
|
catalog_decoder,
|
||||||
)
|
)
|
||||||
.into_stream();
|
.into_stream();
|
||||||
|
|
||||||
@@ -156,6 +155,8 @@ impl CatalogManager {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::kv_backend::memory::MemoryKvBackend;
|
use crate::kv_backend::memory::MemoryKvBackend;
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,6 @@
|
|||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fmt::Display;
|
use std::fmt::Display;
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use futures::stream::BoxStream;
|
use futures::stream::BoxStream;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
@@ -166,7 +165,7 @@ impl DatanodeTableManager {
|
|||||||
self.kv_backend.clone(),
|
self.kv_backend.clone(),
|
||||||
req,
|
req,
|
||||||
DEFAULT_PAGE_SIZE,
|
DEFAULT_PAGE_SIZE,
|
||||||
Arc::new(datanode_table_value_decoder),
|
datanode_table_value_decoder,
|
||||||
)
|
)
|
||||||
.into_stream();
|
.into_stream();
|
||||||
|
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
pub mod flow_info;
|
pub mod flow_info;
|
||||||
pub(crate) mod flow_name;
|
pub(crate) mod flow_name;
|
||||||
pub(crate) mod flow_route;
|
pub(crate) mod flow_route;
|
||||||
|
pub mod flow_state;
|
||||||
pub(crate) mod flownode_flow;
|
pub(crate) mod flownode_flow;
|
||||||
pub(crate) mod table_flow;
|
pub(crate) mod table_flow;
|
||||||
|
|
||||||
@@ -35,6 +36,7 @@ use crate::ensure_values;
|
|||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::key::flow::flow_info::FlowInfoManager;
|
use crate::key::flow::flow_info::FlowInfoManager;
|
||||||
use crate::key::flow::flow_name::FlowNameManager;
|
use crate::key::flow::flow_name::FlowNameManager;
|
||||||
|
use crate::key::flow::flow_state::FlowStateManager;
|
||||||
use crate::key::flow::flownode_flow::FlownodeFlowManager;
|
use crate::key::flow::flownode_flow::FlownodeFlowManager;
|
||||||
pub use crate::key::flow::table_flow::{TableFlowManager, TableFlowManagerRef};
|
pub use crate::key::flow::table_flow::{TableFlowManager, TableFlowManagerRef};
|
||||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||||
@@ -102,6 +104,8 @@ pub struct FlowMetadataManager {
|
|||||||
flownode_flow_manager: FlownodeFlowManager,
|
flownode_flow_manager: FlownodeFlowManager,
|
||||||
table_flow_manager: TableFlowManager,
|
table_flow_manager: TableFlowManager,
|
||||||
flow_name_manager: FlowNameManager,
|
flow_name_manager: FlowNameManager,
|
||||||
|
/// only metasrv have access to itself's memory backend, so for other case it should be None
|
||||||
|
flow_state_manager: Option<FlowStateManager>,
|
||||||
kv_backend: KvBackendRef,
|
kv_backend: KvBackendRef,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -114,6 +118,7 @@ impl FlowMetadataManager {
|
|||||||
flow_name_manager: FlowNameManager::new(kv_backend.clone()),
|
flow_name_manager: FlowNameManager::new(kv_backend.clone()),
|
||||||
flownode_flow_manager: FlownodeFlowManager::new(kv_backend.clone()),
|
flownode_flow_manager: FlownodeFlowManager::new(kv_backend.clone()),
|
||||||
table_flow_manager: TableFlowManager::new(kv_backend.clone()),
|
table_flow_manager: TableFlowManager::new(kv_backend.clone()),
|
||||||
|
flow_state_manager: None,
|
||||||
kv_backend,
|
kv_backend,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -123,6 +128,10 @@ impl FlowMetadataManager {
|
|||||||
&self.flow_name_manager
|
&self.flow_name_manager
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn flow_state_manager(&self) -> Option<&FlowStateManager> {
|
||||||
|
self.flow_state_manager.as_ref()
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the [`FlowInfoManager`].
|
/// Returns the [`FlowInfoManager`].
|
||||||
pub fn flow_info_manager(&self) -> &FlowInfoManager {
|
pub fn flow_info_manager(&self) -> &FlowInfoManager {
|
||||||
&self.flow_info_manager
|
&self.flow_info_manager
|
||||||
|
|||||||
@@ -12,8 +12,6 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use futures::stream::BoxStream;
|
use futures::stream::BoxStream;
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
@@ -201,7 +199,7 @@ impl FlowNameManager {
|
|||||||
self.kv_backend.clone(),
|
self.kv_backend.clone(),
|
||||||
req,
|
req,
|
||||||
DEFAULT_PAGE_SIZE,
|
DEFAULT_PAGE_SIZE,
|
||||||
Arc::new(flow_name_decoder),
|
flow_name_decoder,
|
||||||
)
|
)
|
||||||
.into_stream();
|
.into_stream();
|
||||||
|
|
||||||
|
|||||||
@@ -12,8 +12,6 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use futures::stream::BoxStream;
|
use futures::stream::BoxStream;
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
@@ -179,7 +177,7 @@ impl FlowRouteManager {
|
|||||||
self.kv_backend.clone(),
|
self.kv_backend.clone(),
|
||||||
req,
|
req,
|
||||||
DEFAULT_PAGE_SIZE,
|
DEFAULT_PAGE_SIZE,
|
||||||
Arc::new(flow_route_decoder),
|
flow_route_decoder,
|
||||||
)
|
)
|
||||||
.into_stream();
|
.into_stream();
|
||||||
|
|
||||||
|
|||||||
162
src/common/meta/src/key/flow/flow_state.rs
Normal file
162
src/common/meta/src/key/flow/flow_state.rs
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::error::{self, Result};
|
||||||
|
use crate::key::flow::FlowScoped;
|
||||||
|
use crate::key::{FlowId, MetadataKey, MetadataValue};
|
||||||
|
use crate::kv_backend::KvBackendRef;
|
||||||
|
use crate::rpc::store::PutRequest;
|
||||||
|
|
||||||
|
/// The entire FlowId to Flow Size's Map is stored directly in the value part of the key.
|
||||||
|
const FLOW_STATE_KEY: &str = "state";
|
||||||
|
|
||||||
|
/// The key of flow state.
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||||
|
struct FlowStateKeyInner;
|
||||||
|
|
||||||
|
impl FlowStateKeyInner {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> MetadataKey<'a, FlowStateKeyInner> for FlowStateKeyInner {
|
||||||
|
fn to_bytes(&self) -> Vec<u8> {
|
||||||
|
FLOW_STATE_KEY.as_bytes().to_vec()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_bytes(bytes: &'a [u8]) -> Result<FlowStateKeyInner> {
|
||||||
|
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||||
|
error::InvalidMetadataSnafu {
|
||||||
|
err_msg: format!(
|
||||||
|
"FlowInfoKeyInner '{}' is not a valid UTF8 string: {e}",
|
||||||
|
String::from_utf8_lossy(bytes)
|
||||||
|
),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
if key != FLOW_STATE_KEY {
|
||||||
|
return Err(error::InvalidMetadataSnafu {
|
||||||
|
err_msg: format!("Invalid FlowStateKeyInner '{key}'"),
|
||||||
|
}
|
||||||
|
.build());
|
||||||
|
}
|
||||||
|
Ok(FlowStateKeyInner::new())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The key stores the state size of the flow.
|
||||||
|
///
|
||||||
|
/// The layout: `__flow/state`.
|
||||||
|
pub struct FlowStateKey(FlowScoped<FlowStateKeyInner>);
|
||||||
|
|
||||||
|
impl FlowStateKey {
|
||||||
|
/// Returns the [FlowStateKey].
|
||||||
|
pub fn new() -> FlowStateKey {
|
||||||
|
let inner = FlowStateKeyInner::new();
|
||||||
|
FlowStateKey(FlowScoped::new(inner))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for FlowStateKey {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> MetadataKey<'a, FlowStateKey> for FlowStateKey {
|
||||||
|
fn to_bytes(&self) -> Vec<u8> {
|
||||||
|
self.0.to_bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_bytes(bytes: &'a [u8]) -> Result<FlowStateKey> {
|
||||||
|
Ok(FlowStateKey(FlowScoped::<FlowStateKeyInner>::from_bytes(
|
||||||
|
bytes,
|
||||||
|
)?))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The value of flow state size
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||||
|
pub struct FlowStateValue {
|
||||||
|
/// For each key, the bytes of the state in memory
|
||||||
|
pub state_size: BTreeMap<FlowId, usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FlowStateValue {
|
||||||
|
pub fn new(state_size: BTreeMap<FlowId, usize>) -> Self {
|
||||||
|
Self { state_size }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type FlowStateManagerRef = Arc<FlowStateManager>;
|
||||||
|
|
||||||
|
/// The manager of [FlowStateKey]. Since state size changes frequently, we store it in memory.
|
||||||
|
///
|
||||||
|
/// This is only used in distributed mode. When meta-srv use heartbeat to update the flow stat report
|
||||||
|
/// and frontned use get to get the latest flow stat report.
|
||||||
|
pub struct FlowStateManager {
|
||||||
|
in_memory: KvBackendRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FlowStateManager {
|
||||||
|
pub fn new(in_memory: KvBackendRef) -> Self {
|
||||||
|
Self { in_memory }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get(&self) -> Result<Option<FlowStateValue>> {
|
||||||
|
let key = FlowStateKey::new().to_bytes();
|
||||||
|
self.in_memory
|
||||||
|
.get(&key)
|
||||||
|
.await?
|
||||||
|
.map(|x| FlowStateValue::try_from_raw_value(&x.value))
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn put(&self, value: FlowStateValue) -> Result<()> {
|
||||||
|
let key = FlowStateKey::new().to_bytes();
|
||||||
|
let value = value.try_as_raw_value()?;
|
||||||
|
let req = PutRequest::new().with_key(key).with_value(value);
|
||||||
|
self.in_memory.put(req).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Flow's state report, send regularly through heartbeat message
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct FlowStat {
|
||||||
|
/// For each key, the bytes of the state in memory
|
||||||
|
pub state_size: BTreeMap<u32, usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<FlowStateValue> for FlowStat {
|
||||||
|
fn from(value: FlowStateValue) -> Self {
|
||||||
|
Self {
|
||||||
|
state_size: value.state_size,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<FlowStat> for FlowStateValue {
|
||||||
|
fn from(value: FlowStat) -> Self {
|
||||||
|
Self {
|
||||||
|
state_size: value.state_size,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -12,8 +12,6 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use futures::stream::BoxStream;
|
use futures::stream::BoxStream;
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
@@ -179,7 +177,7 @@ impl FlownodeFlowManager {
|
|||||||
self.kv_backend.clone(),
|
self.kv_backend.clone(),
|
||||||
req,
|
req,
|
||||||
DEFAULT_PAGE_SIZE,
|
DEFAULT_PAGE_SIZE,
|
||||||
Arc::new(flownode_flow_key_decoder),
|
flownode_flow_key_decoder,
|
||||||
)
|
)
|
||||||
.into_stream();
|
.into_stream();
|
||||||
|
|
||||||
|
|||||||
@@ -206,7 +206,7 @@ impl TableFlowManager {
|
|||||||
self.kv_backend.clone(),
|
self.kv_backend.clone(),
|
||||||
req,
|
req,
|
||||||
DEFAULT_PAGE_SIZE,
|
DEFAULT_PAGE_SIZE,
|
||||||
Arc::new(table_flow_decoder),
|
table_flow_decoder,
|
||||||
)
|
)
|
||||||
.into_stream();
|
.into_stream();
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,6 @@
|
|||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fmt::Display;
|
use std::fmt::Display;
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_time::DatabaseTimeToLive;
|
use common_time::DatabaseTimeToLive;
|
||||||
@@ -283,7 +282,7 @@ impl SchemaManager {
|
|||||||
self.kv_backend.clone(),
|
self.kv_backend.clone(),
|
||||||
req,
|
req,
|
||||||
DEFAULT_PAGE_SIZE,
|
DEFAULT_PAGE_SIZE,
|
||||||
Arc::new(schema_decoder),
|
schema_decoder,
|
||||||
)
|
)
|
||||||
.into_stream();
|
.into_stream();
|
||||||
|
|
||||||
@@ -308,6 +307,7 @@ impl<'a> From<&'a SchemaName> for SchemaNameKey<'a> {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|||||||
@@ -269,7 +269,7 @@ impl TableNameManager {
|
|||||||
self.kv_backend.clone(),
|
self.kv_backend.clone(),
|
||||||
req,
|
req,
|
||||||
DEFAULT_PAGE_SIZE,
|
DEFAULT_PAGE_SIZE,
|
||||||
Arc::new(table_decoder),
|
table_decoder,
|
||||||
)
|
)
|
||||||
.into_stream();
|
.into_stream();
|
||||||
|
|
||||||
|
|||||||
@@ -290,28 +290,6 @@ impl TableRouteManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the [`PhysicalTableRouteValue`] in the first level,
|
|
||||||
/// It won't follow the [`LogicalTableRouteValue`] to find the next level [`PhysicalTableRouteValue`].
|
|
||||||
///
|
|
||||||
/// Returns an error if the first level value is not a [`PhysicalTableRouteValue`].
|
|
||||||
pub async fn try_get_physical_table_route(
|
|
||||||
&self,
|
|
||||||
table_id: TableId,
|
|
||||||
) -> Result<Option<PhysicalTableRouteValue>> {
|
|
||||||
match self.storage.get(table_id).await? {
|
|
||||||
Some(route) => {
|
|
||||||
ensure!(
|
|
||||||
route.is_physical(),
|
|
||||||
UnexpectedLogicalRouteTableSnafu {
|
|
||||||
err_msg: format!("{route:?} is a non-physical TableRouteValue.")
|
|
||||||
}
|
|
||||||
);
|
|
||||||
Ok(Some(route.into_physical_table_route()))
|
|
||||||
}
|
|
||||||
None => Ok(None),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the [TableId] recursively.
|
/// Returns the [TableId] recursively.
|
||||||
///
|
///
|
||||||
/// Returns a [TableRouteNotFound](crate::error::Error::TableRouteNotFound) Error if:
|
/// Returns a [TableRouteNotFound](crate::error::Error::TableRouteNotFound) Error if:
|
||||||
@@ -569,37 +547,6 @@ impl TableRouteStorage {
|
|||||||
.transpose()
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the physical `DeserializedValueWithBytes<TableRouteValue>` recursively.
|
|
||||||
///
|
|
||||||
/// Returns a [TableRouteNotFound](crate::error::Error::TableRouteNotFound) Error if:
|
|
||||||
/// - the physical table(`logical_or_physical_table_id`) does not exist
|
|
||||||
/// - the corresponding physical table of the logical table(`logical_or_physical_table_id`) does not exist.
|
|
||||||
pub async fn get_physical_table_route_with_raw_bytes(
|
|
||||||
&self,
|
|
||||||
logical_or_physical_table_id: TableId,
|
|
||||||
) -> Result<(TableId, DeserializedValueWithBytes<TableRouteValue>)> {
|
|
||||||
let table_route = self
|
|
||||||
.get_with_raw_bytes(logical_or_physical_table_id)
|
|
||||||
.await?
|
|
||||||
.context(TableRouteNotFoundSnafu {
|
|
||||||
table_id: logical_or_physical_table_id,
|
|
||||||
})?;
|
|
||||||
|
|
||||||
match table_route.get_inner_ref() {
|
|
||||||
TableRouteValue::Physical(_) => Ok((logical_or_physical_table_id, table_route)),
|
|
||||||
TableRouteValue::Logical(x) => {
|
|
||||||
let physical_table_id = x.physical_table_id();
|
|
||||||
let physical_table_route = self
|
|
||||||
.get_with_raw_bytes(physical_table_id)
|
|
||||||
.await?
|
|
||||||
.context(TableRouteNotFoundSnafu {
|
|
||||||
table_id: physical_table_id,
|
|
||||||
})?;
|
|
||||||
Ok((physical_table_id, physical_table_route))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns batch of [`TableRouteValue`] that respects the order of `table_ids`.
|
/// Returns batch of [`TableRouteValue`] that respects the order of `table_ids`.
|
||||||
pub async fn batch_get(&self, table_ids: &[TableId]) -> Result<Vec<Option<TableRouteValue>>> {
|
pub async fn batch_get(&self, table_ids: &[TableId]) -> Result<Vec<Option<TableRouteValue>>> {
|
||||||
let mut table_routes = self.batch_get_inner(table_ids).await?;
|
let mut table_routes = self.batch_get_inner(table_ids).await?;
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ pub mod postgres;
|
|||||||
pub mod test;
|
pub mod test;
|
||||||
pub mod txn;
|
pub mod txn;
|
||||||
|
|
||||||
pub type KvBackendRef = Arc<dyn KvBackend<Error = Error> + Send + Sync>;
|
pub type KvBackendRef<E = Error> = Arc<dyn KvBackend<Error = E> + Send + Sync>;
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait KvBackend: TxnService
|
pub trait KvBackend: TxnService
|
||||||
@@ -161,6 +161,9 @@ where
|
|||||||
Self::Error: ErrorExt,
|
Self::Error: ErrorExt,
|
||||||
{
|
{
|
||||||
fn reset(&self);
|
fn reset(&self);
|
||||||
|
|
||||||
|
/// Upcast as `KvBackendRef`. Since https://github.com/rust-lang/rust/issues/65991 is not yet stable.
|
||||||
|
fn as_kv_backend_ref(self: Arc<Self>) -> KvBackendRef<Self::Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type ResettableKvBackendRef = Arc<dyn ResettableKvBackend<Error = Error> + Send + Sync>;
|
pub type ResettableKvBackendRef<E = Error> = Arc<dyn ResettableKvBackend<Error = E> + Send + Sync>;
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use common_telemetry::info;
|
||||||
use etcd_client::{
|
use etcd_client::{
|
||||||
Client, DeleteOptions, GetOptions, PutOptions, Txn, TxnOp, TxnOpResponse, TxnResponse,
|
Client, DeleteOptions, GetOptions, PutOptions, Txn, TxnOp, TxnOpResponse, TxnResponse,
|
||||||
};
|
};
|
||||||
@@ -55,6 +56,7 @@ impl EtcdStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_etcd_client(client: Client, max_txn_ops: usize) -> KvBackendRef {
|
pub fn with_etcd_client(client: Client, max_txn_ops: usize) -> KvBackendRef {
|
||||||
|
info!("Connected to etcd");
|
||||||
Arc::new(Self {
|
Arc::new(Self {
|
||||||
client,
|
client,
|
||||||
max_txn_ops,
|
max_txn_ops,
|
||||||
|
|||||||
@@ -16,13 +16,13 @@ use std::any::Any;
|
|||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use std::fmt::{Display, Formatter};
|
use std::fmt::{Display, Formatter};
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::sync::RwLock;
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use common_error::ext::ErrorExt;
|
use common_error::ext::ErrorExt;
|
||||||
use serde::Serializer;
|
use serde::Serializer;
|
||||||
|
|
||||||
use super::ResettableKvBackend;
|
use super::{KvBackendRef, ResettableKvBackend};
|
||||||
use crate::kv_backend::txn::{Txn, TxnOp, TxnOpResponse, TxnRequest, TxnResponse};
|
use crate::kv_backend::txn::{Txn, TxnOp, TxnOpResponse, TxnRequest, TxnResponse};
|
||||||
use crate::kv_backend::{KvBackend, TxnService};
|
use crate::kv_backend::{KvBackend, TxnService};
|
||||||
use crate::metrics::METRIC_META_TXN_REQUEST;
|
use crate::metrics::METRIC_META_TXN_REQUEST;
|
||||||
@@ -311,6 +311,10 @@ impl<T: ErrorExt + Send + Sync + 'static> ResettableKvBackend for MemoryKvBacken
|
|||||||
fn reset(&self) {
|
fn reset(&self) {
|
||||||
self.clear();
|
self.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn as_kv_backend_ref(self: Arc<Self>) -> KvBackendRef<T> {
|
||||||
|
self
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -12,8 +12,6 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use async_stream::try_stream;
|
use async_stream::try_stream;
|
||||||
use common_telemetry::debug;
|
use common_telemetry::debug;
|
||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
@@ -148,7 +146,7 @@ impl PaginationStreamFactory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct PaginationStream<T> {
|
pub struct PaginationStream<T> {
|
||||||
decoder_fn: Arc<KeyValueDecoderFn<T>>,
|
decoder_fn: fn(KeyValue) -> Result<T>,
|
||||||
factory: PaginationStreamFactory,
|
factory: PaginationStreamFactory,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -158,7 +156,7 @@ impl<T> PaginationStream<T> {
|
|||||||
kv: KvBackendRef,
|
kv: KvBackendRef,
|
||||||
req: RangeRequest,
|
req: RangeRequest,
|
||||||
page_size: usize,
|
page_size: usize,
|
||||||
decoder_fn: Arc<KeyValueDecoderFn<T>>,
|
decoder_fn: fn(KeyValue) -> Result<T>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
decoder_fn,
|
decoder_fn,
|
||||||
@@ -191,6 +189,7 @@ mod tests {
|
|||||||
|
|
||||||
use std::assert_matches::assert_matches;
|
use std::assert_matches::assert_matches;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
|
|
||||||
@@ -250,7 +249,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
DEFAULT_PAGE_SIZE,
|
DEFAULT_PAGE_SIZE,
|
||||||
Arc::new(decoder),
|
decoder,
|
||||||
)
|
)
|
||||||
.into_stream();
|
.into_stream();
|
||||||
let kv = stream.try_collect::<Vec<_>>().await.unwrap();
|
let kv = stream.try_collect::<Vec<_>>().await.unwrap();
|
||||||
@@ -290,7 +289,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
2,
|
2,
|
||||||
Arc::new(decoder),
|
decoder,
|
||||||
);
|
);
|
||||||
let kv = stream
|
let kv = stream
|
||||||
.into_stream()
|
.into_stream()
|
||||||
|
|||||||
@@ -89,39 +89,6 @@ pub fn convert_to_region_leader_map(region_routes: &[RegionRoute]) -> HashMap<Re
|
|||||||
.collect::<HashMap<_, _>>()
|
.collect::<HashMap<_, _>>()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the HashMap<[RegionNumber], HashSet<DatanodeId>>
|
|
||||||
pub fn convert_to_region_peer_map(
|
|
||||||
region_routes: &[RegionRoute],
|
|
||||||
) -> HashMap<RegionNumber, HashSet<u64>> {
|
|
||||||
region_routes
|
|
||||||
.iter()
|
|
||||||
.map(|x| {
|
|
||||||
let set = x
|
|
||||||
.follower_peers
|
|
||||||
.iter()
|
|
||||||
.map(|p| p.id)
|
|
||||||
.chain(x.leader_peer.as_ref().map(|p| p.id))
|
|
||||||
.collect::<HashSet<_>>();
|
|
||||||
|
|
||||||
(x.region.id.region_number(), set)
|
|
||||||
})
|
|
||||||
.collect::<HashMap<_, _>>()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the HashMap<[RegionNumber], [LeaderState]>;
|
|
||||||
pub fn convert_to_region_leader_state_map(
|
|
||||||
region_routes: &[RegionRoute],
|
|
||||||
) -> HashMap<RegionNumber, LeaderState> {
|
|
||||||
region_routes
|
|
||||||
.iter()
|
|
||||||
.filter_map(|x| {
|
|
||||||
x.leader_state
|
|
||||||
.as_ref()
|
|
||||||
.map(|state| (x.region.id.region_number(), *state))
|
|
||||||
})
|
|
||||||
.collect::<HashMap<_, _>>()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn find_region_leader(
|
pub fn find_region_leader(
|
||||||
region_routes: &[RegionRoute],
|
region_routes: &[RegionRoute],
|
||||||
region_number: RegionNumber,
|
region_number: RegionNumber,
|
||||||
@@ -147,19 +114,6 @@ pub fn find_leader_regions(region_routes: &[RegionRoute], datanode: &Peer) -> Ve
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn extract_all_peers(region_routes: &[RegionRoute]) -> Vec<Peer> {
|
|
||||||
let mut peers = region_routes
|
|
||||||
.iter()
|
|
||||||
.flat_map(|x| x.leader_peer.iter().chain(x.follower_peers.iter()))
|
|
||||||
.collect::<HashSet<_>>()
|
|
||||||
.into_iter()
|
|
||||||
.cloned()
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
peers.sort_by_key(|x| x.id);
|
|
||||||
|
|
||||||
peers
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TableRoute {
|
impl TableRoute {
|
||||||
pub fn new(table: Table, region_routes: Vec<RegionRoute>) -> Self {
|
pub fn new(table: Table, region_routes: Vec<RegionRoute>) -> Self {
|
||||||
let region_leaders = region_routes
|
let region_leaders = region_routes
|
||||||
|
|||||||
@@ -12,8 +12,6 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_procedure::error::{DeleteStatesSnafu, ListStateSnafu, PutStateSnafu};
|
use common_procedure::error::{DeleteStatesSnafu, ListStateSnafu, PutStateSnafu};
|
||||||
@@ -171,7 +169,7 @@ impl StateStore for KvStateStore {
|
|||||||
self.kv_backend.clone(),
|
self.kv_backend.clone(),
|
||||||
req,
|
req,
|
||||||
self.max_num_per_range_request.unwrap_or_default(),
|
self.max_num_per_range_request.unwrap_or_default(),
|
||||||
Arc::new(decode_kv),
|
decode_kv,
|
||||||
)
|
)
|
||||||
.into_stream();
|
.into_stream();
|
||||||
|
|
||||||
|
|||||||
@@ -544,7 +544,7 @@ mod tests {
|
|||||||
use common_test_util::temp_dir::create_temp_dir;
|
use common_test_util::temp_dir::create_temp_dir;
|
||||||
use futures_util::future::BoxFuture;
|
use futures_util::future::BoxFuture;
|
||||||
use futures_util::FutureExt;
|
use futures_util::FutureExt;
|
||||||
use object_store::ObjectStore;
|
use object_store::{EntryMode, ObjectStore};
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -578,7 +578,11 @@ mod tests {
|
|||||||
) {
|
) {
|
||||||
let dir = proc_path!(procedure_store, "{procedure_id}/");
|
let dir = proc_path!(procedure_store, "{procedure_id}/");
|
||||||
let lister = object_store.list(&dir).await.unwrap();
|
let lister = object_store.list(&dir).await.unwrap();
|
||||||
let mut files_in_dir: Vec<_> = lister.into_iter().map(|de| de.name().to_string()).collect();
|
let mut files_in_dir: Vec<_> = lister
|
||||||
|
.into_iter()
|
||||||
|
.filter(|x| x.metadata().mode() == EntryMode::FILE)
|
||||||
|
.map(|de| de.name().to_string())
|
||||||
|
.collect();
|
||||||
files_in_dir.sort_unstable();
|
files_in_dir.sort_unstable();
|
||||||
assert_eq!(files, files_in_dir);
|
assert_eq!(files, files_in_dir);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,7 +26,6 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use adapter::RecordBatchMetrics;
|
use adapter::RecordBatchMetrics;
|
||||||
use arc_swap::ArcSwapOption;
|
use arc_swap::ArcSwapOption;
|
||||||
use datafusion::physical_plan::memory::MemoryStream;
|
|
||||||
pub use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
pub use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
use datatypes::arrow::compute::SortOptions;
|
use datatypes::arrow::compute::SortOptions;
|
||||||
pub use datatypes::arrow::record_batch::RecordBatch as DfRecordBatch;
|
pub use datatypes::arrow::record_batch::RecordBatch as DfRecordBatch;
|
||||||
@@ -170,19 +169,6 @@ impl RecordBatches {
|
|||||||
index: 0,
|
index: 0,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn into_df_stream(self) -> DfSendableRecordBatchStream {
|
|
||||||
let df_record_batches = self
|
|
||||||
.batches
|
|
||||||
.into_iter()
|
|
||||||
.map(|batch| batch.into_df_record_batch())
|
|
||||||
.collect();
|
|
||||||
// unwrap safety: `MemoryStream::try_new` won't fail
|
|
||||||
Box::pin(
|
|
||||||
MemoryStream::try_new(df_record_batches, self.schema.arrow_schema().clone(), None)
|
|
||||||
.unwrap(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IntoIterator for RecordBatches {
|
impl IntoIterator for RecordBatches {
|
||||||
|
|||||||
@@ -35,8 +35,6 @@ serde_json.workspace = true
|
|||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
tempfile.workspace = true
|
tempfile.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tokio-metrics = "0.3"
|
|
||||||
tokio-metrics-collector = { git = "https://github.com/MichaelScofield/tokio-metrics-collector.git", rev = "89d692d5753d28564a7aac73c6ac5aba22243ba0" }
|
|
||||||
tokio-util.workspace = true
|
tokio-util.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
|||||||
@@ -29,10 +29,6 @@ pub fn format_utc_datetime(utc: &NaiveDateTime, pattern: &str) -> String {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn system_datetime_to_utc(local: &NaiveDateTime) -> LocalResult<NaiveDateTime> {
|
|
||||||
datetime_to_utc(local, get_timezone(None))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Cast a [`NaiveDateTime`] with the given timezone.
|
/// Cast a [`NaiveDateTime`] with the given timezone.
|
||||||
pub fn datetime_to_utc(
|
pub fn datetime_to_utc(
|
||||||
datetime: &NaiveDateTime,
|
datetime: &NaiveDateTime,
|
||||||
|
|||||||
@@ -49,9 +49,9 @@ impl Default for RaftEngineConfig {
|
|||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
dir: None,
|
dir: None,
|
||||||
file_size: ReadableSize::mb(256),
|
file_size: ReadableSize::mb(128),
|
||||||
purge_threshold: ReadableSize::gb(4),
|
purge_threshold: ReadableSize::gb(1),
|
||||||
purge_interval: Duration::from_secs(600),
|
purge_interval: Duration::from_secs(60),
|
||||||
read_batch_size: 128,
|
read_batch_size: 128,
|
||||||
sync_write: false,
|
sync_write: false,
|
||||||
enable_log_recycle: true,
|
enable_log_recycle: true,
|
||||||
|
|||||||
@@ -193,6 +193,14 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to build http client"))]
|
||||||
|
BuildHttpClient {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
#[snafu(source)]
|
||||||
|
error: reqwest::Error,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Missing required field: {}", name))]
|
#[snafu(display("Missing required field: {}", name))]
|
||||||
MissingRequiredField {
|
MissingRequiredField {
|
||||||
name: String,
|
name: String,
|
||||||
@@ -406,9 +414,10 @@ impl ErrorExt for Error {
|
|||||||
| MissingKvBackend { .. }
|
| MissingKvBackend { .. }
|
||||||
| TomlFormat { .. } => StatusCode::InvalidArguments,
|
| TomlFormat { .. } => StatusCode::InvalidArguments,
|
||||||
|
|
||||||
PayloadNotExist { .. } | Unexpected { .. } | WatchAsyncTaskChange { .. } => {
|
PayloadNotExist { .. }
|
||||||
StatusCode::Unexpected
|
| Unexpected { .. }
|
||||||
}
|
| WatchAsyncTaskChange { .. }
|
||||||
|
| BuildHttpClient { .. } => StatusCode::Unexpected,
|
||||||
|
|
||||||
AsyncTaskExecute { source, .. } => source.status_code(),
|
AsyncTaskExecute { source, .. } => source.status_code(),
|
||||||
|
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ use object_store::{Access, Error, HttpClient, ObjectStore, ObjectStoreBuilder, O
|
|||||||
use snafu::prelude::*;
|
use snafu::prelude::*;
|
||||||
|
|
||||||
use crate::config::{HttpClientConfig, ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
|
use crate::config::{HttpClientConfig, ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
|
||||||
use crate::error::{self, CreateDirSnafu, Result};
|
use crate::error::{self, BuildHttpClientSnafu, CreateDirSnafu, Result};
|
||||||
|
|
||||||
pub(crate) async fn new_raw_object_store(
|
pub(crate) async fn new_raw_object_store(
|
||||||
store: &ObjectStoreConfig,
|
store: &ObjectStoreConfig,
|
||||||
@@ -236,7 +236,8 @@ pub(crate) fn build_http_client(config: &HttpClientConfig) -> Result<HttpClient>
|
|||||||
builder.timeout(config.timeout)
|
builder.timeout(config.timeout)
|
||||||
};
|
};
|
||||||
|
|
||||||
HttpClient::build(http_builder).context(error::InitBackendSnafu)
|
let client = http_builder.build().context(BuildHttpClientSnafu)?;
|
||||||
|
Ok(HttpClient::with(client))
|
||||||
}
|
}
|
||||||
struct PrintDetailedError;
|
struct PrintDetailedError;
|
||||||
|
|
||||||
|
|||||||
@@ -370,6 +370,51 @@ impl ConcreteDataType {
|
|||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return the datatype name in postgres type system
|
||||||
|
pub fn postgres_datatype_name(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
&ConcreteDataType::Null(_) => "UNKNOWN",
|
||||||
|
&ConcreteDataType::Boolean(_) => "BOOL",
|
||||||
|
&ConcreteDataType::Int8(_) | &ConcreteDataType::UInt8(_) => "CHAR",
|
||||||
|
&ConcreteDataType::Int16(_) | &ConcreteDataType::UInt16(_) => "INT2",
|
||||||
|
&ConcreteDataType::Int32(_) | &ConcreteDataType::UInt32(_) => "INT4",
|
||||||
|
&ConcreteDataType::Int64(_) | &ConcreteDataType::UInt64(_) => "INT8",
|
||||||
|
&ConcreteDataType::Float32(_) => "FLOAT4",
|
||||||
|
&ConcreteDataType::Float64(_) => "FLOAT8",
|
||||||
|
&ConcreteDataType::Binary(_) | &ConcreteDataType::Vector(_) => "BYTEA",
|
||||||
|
&ConcreteDataType::String(_) => "VARCHAR",
|
||||||
|
&ConcreteDataType::Date(_) => "DATE",
|
||||||
|
&ConcreteDataType::DateTime(_) | &ConcreteDataType::Timestamp(_) => "TIMESTAMP",
|
||||||
|
&ConcreteDataType::Time(_) => "TIME",
|
||||||
|
&ConcreteDataType::Interval(_) => "INTERVAL",
|
||||||
|
&ConcreteDataType::Decimal128(_) => "NUMERIC",
|
||||||
|
&ConcreteDataType::Json(_) => "JSON",
|
||||||
|
ConcreteDataType::List(list) => match list.item_type() {
|
||||||
|
&ConcreteDataType::Null(_) => "UNKNOWN",
|
||||||
|
&ConcreteDataType::Boolean(_) => "_BOOL",
|
||||||
|
&ConcreteDataType::Int8(_) | &ConcreteDataType::UInt8(_) => "_CHAR",
|
||||||
|
&ConcreteDataType::Int16(_) | &ConcreteDataType::UInt16(_) => "_INT2",
|
||||||
|
&ConcreteDataType::Int32(_) | &ConcreteDataType::UInt32(_) => "_INT4",
|
||||||
|
&ConcreteDataType::Int64(_) | &ConcreteDataType::UInt64(_) => "_INT8",
|
||||||
|
&ConcreteDataType::Float32(_) => "_FLOAT4",
|
||||||
|
&ConcreteDataType::Float64(_) => "_FLOAT8",
|
||||||
|
&ConcreteDataType::Binary(_) => "_BYTEA",
|
||||||
|
&ConcreteDataType::String(_) => "_VARCHAR",
|
||||||
|
&ConcreteDataType::Date(_) => "_DATE",
|
||||||
|
&ConcreteDataType::DateTime(_) | &ConcreteDataType::Timestamp(_) => "_TIMESTAMP",
|
||||||
|
&ConcreteDataType::Time(_) => "_TIME",
|
||||||
|
&ConcreteDataType::Interval(_) => "_INTERVAL",
|
||||||
|
&ConcreteDataType::Decimal128(_) => "_NUMERIC",
|
||||||
|
&ConcreteDataType::Json(_) => "_JSON",
|
||||||
|
&ConcreteDataType::Duration(_)
|
||||||
|
| &ConcreteDataType::Dictionary(_)
|
||||||
|
| &ConcreteDataType::Vector(_)
|
||||||
|
| &ConcreteDataType::List(_) => "UNKNOWN",
|
||||||
|
},
|
||||||
|
&ConcreteDataType::Duration(_) | &ConcreteDataType::Dictionary(_) => "UNKNOWN",
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<&ConcreteDataType> for ConcreteDataType {
|
impl From<&ConcreteDataType> for ConcreteDataType {
|
||||||
|
|||||||
@@ -232,6 +232,12 @@ pub enum Error {
|
|||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
#[snafu(display("Invalid skipping index option: {}", msg))]
|
||||||
|
InvalidSkippingIndexOption {
|
||||||
|
msg: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ErrorExt for Error {
|
impl ErrorExt for Error {
|
||||||
@@ -252,7 +258,8 @@ impl ErrorExt for Error {
|
|||||||
| InvalidPrecisionOrScale { .. }
|
| InvalidPrecisionOrScale { .. }
|
||||||
| InvalidJson { .. }
|
| InvalidJson { .. }
|
||||||
| InvalidVector { .. }
|
| InvalidVector { .. }
|
||||||
| InvalidFulltextOption { .. } => StatusCode::InvalidArguments,
|
| InvalidFulltextOption { .. }
|
||||||
|
| InvalidSkippingIndexOption { .. } => StatusCode::InvalidArguments,
|
||||||
|
|
||||||
ValueExceedsPrecision { .. }
|
ValueExceedsPrecision { .. }
|
||||||
| CastType { .. }
|
| CastType { .. }
|
||||||
|
|||||||
@@ -28,10 +28,11 @@ use snafu::{ensure, ResultExt};
|
|||||||
use crate::error::{self, DuplicateColumnSnafu, Error, ProjectArrowSchemaSnafu, Result};
|
use crate::error::{self, DuplicateColumnSnafu, Error, ProjectArrowSchemaSnafu, Result};
|
||||||
use crate::prelude::ConcreteDataType;
|
use crate::prelude::ConcreteDataType;
|
||||||
pub use crate::schema::column_schema::{
|
pub use crate::schema::column_schema::{
|
||||||
ColumnSchema, FulltextAnalyzer, FulltextOptions, Metadata,
|
ColumnSchema, FulltextAnalyzer, FulltextOptions, Metadata, SkippingIndexOptions,
|
||||||
COLUMN_FULLTEXT_CHANGE_OPT_KEY_ENABLE, COLUMN_FULLTEXT_OPT_KEY_ANALYZER,
|
COLUMN_FULLTEXT_CHANGE_OPT_KEY_ENABLE, COLUMN_FULLTEXT_OPT_KEY_ANALYZER,
|
||||||
COLUMN_FULLTEXT_OPT_KEY_CASE_SENSITIVE, COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY,
|
COLUMN_FULLTEXT_OPT_KEY_CASE_SENSITIVE, COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY,
|
||||||
TIME_INDEX_KEY,
|
COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE, COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY,
|
||||||
|
SKIPPING_INDEX_KEY, TIME_INDEX_KEY,
|
||||||
};
|
};
|
||||||
pub use crate::schema::constraint::ColumnDefaultConstraint;
|
pub use crate::schema::constraint::ColumnDefaultConstraint;
|
||||||
pub use crate::schema::raw::RawSchema;
|
pub use crate::schema::raw::RawSchema;
|
||||||
|
|||||||
@@ -39,12 +39,20 @@ const DEFAULT_CONSTRAINT_KEY: &str = "greptime:default_constraint";
|
|||||||
pub const FULLTEXT_KEY: &str = "greptime:fulltext";
|
pub const FULLTEXT_KEY: &str = "greptime:fulltext";
|
||||||
/// Key used to store whether the column has inverted index in arrow field's metadata.
|
/// Key used to store whether the column has inverted index in arrow field's metadata.
|
||||||
pub const INVERTED_INDEX_KEY: &str = "greptime:inverted_index";
|
pub const INVERTED_INDEX_KEY: &str = "greptime:inverted_index";
|
||||||
|
/// Key used to store skip options in arrow field's metadata.
|
||||||
|
pub const SKIPPING_INDEX_KEY: &str = "greptime:skipping_index";
|
||||||
|
|
||||||
/// Keys used in fulltext options
|
/// Keys used in fulltext options
|
||||||
pub const COLUMN_FULLTEXT_CHANGE_OPT_KEY_ENABLE: &str = "enable";
|
pub const COLUMN_FULLTEXT_CHANGE_OPT_KEY_ENABLE: &str = "enable";
|
||||||
pub const COLUMN_FULLTEXT_OPT_KEY_ANALYZER: &str = "analyzer";
|
pub const COLUMN_FULLTEXT_OPT_KEY_ANALYZER: &str = "analyzer";
|
||||||
pub const COLUMN_FULLTEXT_OPT_KEY_CASE_SENSITIVE: &str = "case_sensitive";
|
pub const COLUMN_FULLTEXT_OPT_KEY_CASE_SENSITIVE: &str = "case_sensitive";
|
||||||
|
|
||||||
|
/// Keys used in SKIPPING index options
|
||||||
|
pub const COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY: &str = "granularity";
|
||||||
|
pub const COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE: &str = "type";
|
||||||
|
|
||||||
|
pub const DEFAULT_GRANULARITY: u32 = 10240;
|
||||||
|
|
||||||
/// Schema of a column, used as an immutable struct.
|
/// Schema of a column, used as an immutable struct.
|
||||||
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
pub struct ColumnSchema {
|
pub struct ColumnSchema {
|
||||||
@@ -156,6 +164,10 @@ impl ColumnSchema {
|
|||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn has_fulltext_index_key(&self) -> bool {
|
||||||
|
self.metadata.contains_key(FULLTEXT_KEY)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn has_inverted_index_key(&self) -> bool {
|
pub fn has_inverted_index_key(&self) -> bool {
|
||||||
self.metadata.contains_key(INVERTED_INDEX_KEY)
|
self.metadata.contains_key(INVERTED_INDEX_KEY)
|
||||||
}
|
}
|
||||||
@@ -298,6 +310,34 @@ impl ColumnSchema {
|
|||||||
);
|
);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Retrieves the skipping index options for the column.
|
||||||
|
pub fn skipping_index_options(&self) -> Result<Option<SkippingIndexOptions>> {
|
||||||
|
match self.metadata.get(SKIPPING_INDEX_KEY) {
|
||||||
|
None => Ok(None),
|
||||||
|
Some(json) => {
|
||||||
|
let options =
|
||||||
|
serde_json::from_str(json).context(error::DeserializeSnafu { json })?;
|
||||||
|
Ok(Some(options))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_skipping_options(mut self, options: SkippingIndexOptions) -> Result<Self> {
|
||||||
|
self.metadata.insert(
|
||||||
|
SKIPPING_INDEX_KEY.to_string(),
|
||||||
|
serde_json::to_string(&options).context(error::SerializeSnafu)?,
|
||||||
|
);
|
||||||
|
Ok(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_skipping_options(&mut self, options: &SkippingIndexOptions) -> Result<()> {
|
||||||
|
self.metadata.insert(
|
||||||
|
SKIPPING_INDEX_KEY.to_string(),
|
||||||
|
serde_json::to_string(options).context(error::SerializeSnafu)?,
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Column extended type set in column schema's metadata.
|
/// Column extended type set in column schema's metadata.
|
||||||
@@ -495,6 +535,76 @@ impl fmt::Display for FulltextAnalyzer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Skipping options for a column.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default, Visit, VisitMut)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct SkippingIndexOptions {
|
||||||
|
/// The granularity of the skip index.
|
||||||
|
pub granularity: u32,
|
||||||
|
/// The type of the skip index.
|
||||||
|
#[serde(default)]
|
||||||
|
pub index_type: SkipIndexType,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for SkippingIndexOptions {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
write!(f, "granularity={}", self.granularity)?;
|
||||||
|
write!(f, ", index_type={}", self.index_type)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Skip index types.
|
||||||
|
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize, Visit, VisitMut)]
|
||||||
|
pub enum SkipIndexType {
|
||||||
|
#[default]
|
||||||
|
BloomFilter,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for SkipIndexType {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
SkipIndexType::BloomFilter => write!(f, "BLOOM"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<HashMap<String, String>> for SkippingIndexOptions {
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
fn try_from(options: HashMap<String, String>) -> Result<Self> {
|
||||||
|
// Parse granularity with default value 1
|
||||||
|
let granularity = match options.get(COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY) {
|
||||||
|
Some(value) => value.parse::<u32>().map_err(|_| {
|
||||||
|
error::InvalidSkippingIndexOptionSnafu {
|
||||||
|
msg: format!("Invalid granularity: {value}, expected: positive integer"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?,
|
||||||
|
None => DEFAULT_GRANULARITY,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Parse index type with default value BloomFilter
|
||||||
|
let index_type = match options.get(COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE) {
|
||||||
|
Some(typ) => match typ.to_ascii_uppercase().as_str() {
|
||||||
|
"BLOOM" => SkipIndexType::BloomFilter,
|
||||||
|
_ => {
|
||||||
|
return error::InvalidSkippingIndexOptionSnafu {
|
||||||
|
msg: format!("Invalid index type: {typ}, expected: 'BLOOM'"),
|
||||||
|
}
|
||||||
|
.fail();
|
||||||
|
}
|
||||||
|
},
|
||||||
|
None => SkipIndexType::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(SkippingIndexOptions {
|
||||||
|
granularity,
|
||||||
|
index_type,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|||||||
@@ -38,5 +38,4 @@ tokio.workspace = true
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
api.workspace = true
|
api.workspace = true
|
||||||
common-procedure-test.workspace = true
|
|
||||||
common-test-util.workspace = true
|
common-test-util.workspace = true
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ impl FileRegionManifest {
|
|||||||
pub async fn store(&self, region_dir: &str, object_store: &ObjectStore) -> Result<()> {
|
pub async fn store(&self, region_dir: &str, object_store: &ObjectStore) -> Result<()> {
|
||||||
let path = ®ion_manifest_path(region_dir);
|
let path = ®ion_manifest_path(region_dir);
|
||||||
let exist = object_store
|
let exist = object_store
|
||||||
.is_exist(path)
|
.exists(path)
|
||||||
.await
|
.await
|
||||||
.context(CheckObjectSnafu { path })?;
|
.context(CheckObjectSnafu { path })?;
|
||||||
ensure!(!exist, ManifestExistsSnafu { path });
|
ensure!(!exist, ManifestExistsSnafu { path });
|
||||||
|
|||||||
@@ -130,7 +130,7 @@ mod tests {
|
|||||||
assert_eq!(region.metadata.primary_key, vec![1]);
|
assert_eq!(region.metadata.primary_key, vec![1]);
|
||||||
|
|
||||||
assert!(object_store
|
assert!(object_store
|
||||||
.is_exist("create_region_dir/manifest/_file_manifest")
|
.exists("create_region_dir/manifest/_file_manifest")
|
||||||
.await
|
.await
|
||||||
.unwrap());
|
.unwrap());
|
||||||
|
|
||||||
@@ -198,13 +198,13 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
assert!(object_store
|
assert!(object_store
|
||||||
.is_exist("drop_region_dir/manifest/_file_manifest")
|
.exists("drop_region_dir/manifest/_file_manifest")
|
||||||
.await
|
.await
|
||||||
.unwrap());
|
.unwrap());
|
||||||
|
|
||||||
FileRegion::drop(®ion, &object_store).await.unwrap();
|
FileRegion::drop(®ion, &object_store).await.unwrap();
|
||||||
assert!(!object_store
|
assert!(!object_store
|
||||||
.is_exist("drop_region_dir/manifest/_file_manifest")
|
.exists("drop_region_dir/manifest/_file_manifest")
|
||||||
.await
|
.await
|
||||||
.unwrap());
|
.unwrap());
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user